Browse code

Periodic formatting cleanup

This is the semi-irregular comment and docs cleanup.

No functional changes should be here although some code is moved in a
small attempt to sort functions and get things where they need to be.

Change-Id: Ib4a3e2590c6fbd016c391acc7aef6421e91c0dca

Dean Troyer authored on 2013/06/04 06:47:36
Showing 7 changed files
... ...
@@ -200,6 +200,7 @@ function _get_package_dir() {
200 200
     echo "$pkg_dir"
201 201
 }
202 202
 
203
+
203 204
 # get_packages() collects a list of package names of any type from the
204 205
 # prerequisite files in ``files/{apts|rpms}``.  The list is intended
205 206
 # to be passed to a package installer such as apt or yum.
... ...
@@ -390,42 +391,6 @@ GetOSVersion() {
390 390
     export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
391 391
 }
392 392
 
393
-# git update using reference as a branch.
394
-# git_update_branch ref
395
-function git_update_branch() {
396
-
397
-    GIT_BRANCH=$1
398
-
399
-    git checkout -f origin/$GIT_BRANCH
400
-    # a local branch might not exist
401
-    git branch -D $GIT_BRANCH || true
402
-    git checkout -b $GIT_BRANCH
403
-}
404
-
405
-
406
-# git update using reference as a tag. Be careful editing source at that repo
407
-# as working copy will be in a detached mode
408
-# git_update_tag ref
409
-function git_update_tag() {
410
-
411
-    GIT_TAG=$1
412
-
413
-    git tag -d $GIT_TAG
414
-    # fetching given tag only
415
-    git fetch origin tag $GIT_TAG
416
-    git checkout -f $GIT_TAG
417
-}
418
-
419
-
420
-# git update using reference as a branch.
421
-# git_update_remote_branch ref
422
-function git_update_remote_branch() {
423
-
424
-    GIT_BRANCH=$1
425
-
426
-    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
427
-}
428
-
429 393
 
430 394
 # Translate the OS version values into common nomenclature
431 395
 # Sets ``DISTRO`` from the ``os_*`` values
... ...
@@ -457,19 +422,8 @@ function GetDistro() {
457 457
 }
458 458
 
459 459
 
460
-# Determine if current distribution is an Ubuntu-based distribution.
461
-# It will also detect non-Ubuntu but Debian-based distros; this is not an issue
462
-# since Debian and Ubuntu should be compatible.
463
-# is_ubuntu
464
-function is_ubuntu {
465
-    if [[ -z "$os_PACKAGE" ]]; then
466
-        GetOSVersion
467
-    fi
468
-    [ "$os_PACKAGE" = "deb" ]
469
-}
470
-
471 460
 # Determine if current distribution is a Fedora-based distribution
472
-# (Fedora, RHEL, CentOS).
461
+# (Fedora, RHEL, CentOS, etc).
473 462
 # is_fedora
474 463
 function is_fedora {
475 464
     if [[ -z "$os_VENDOR" ]]; then
... ...
@@ -479,6 +433,7 @@ function is_fedora {
479 479
     [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ]
480 480
 }
481 481
 
482
+
482 483
 # Determine if current distribution is a SUSE-based distribution
483 484
 # (openSUSE, SLE).
484 485
 # is_suse
... ...
@@ -491,6 +446,17 @@ function is_suse {
491 491
 }
492 492
 
493 493
 
494
+# Determine if current distribution is an Ubuntu-based distribution
495
+# It will also detect non-Ubuntu but Debian-based distros
496
+# is_ubuntu
497
+function is_ubuntu {
498
+    if [[ -z "$os_PACKAGE" ]]; then
499
+        GetOSVersion
500
+    fi
501
+    [ "$os_PACKAGE" = "deb" ]
502
+}
503
+
504
+
494 505
 # Exit after outputting a message about the distribution not being supported.
495 506
 # exit_distro_not_supported [optional-string-telling-what-is-missing]
496 507
 function exit_distro_not_supported {
... ...
@@ -565,6 +531,43 @@ function git_clone {
565 565
 }
566 566
 
567 567
 
568
+# git update using reference as a branch.
569
+# git_update_branch ref
570
+function git_update_branch() {
571
+
572
+    GIT_BRANCH=$1
573
+
574
+    git checkout -f origin/$GIT_BRANCH
575
+    # a local branch might not exist
576
+    git branch -D $GIT_BRANCH || true
577
+    git checkout -b $GIT_BRANCH
578
+}
579
+
580
+
581
+# git update using reference as a branch.
582
+# git_update_remote_branch ref
583
+function git_update_remote_branch() {
584
+
585
+    GIT_BRANCH=$1
586
+
587
+    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
588
+}
589
+
590
+
591
+# git update using reference as a tag. Be careful editing source at that repo
592
+# as working copy will be in a detached mode
593
+# git_update_tag ref
594
+function git_update_tag() {
595
+
596
+    GIT_TAG=$1
597
+
598
+    git tag -d $GIT_TAG
599
+    # fetching given tag only
600
+    git fetch origin tag $GIT_TAG
601
+    git checkout -f $GIT_TAG
602
+}
603
+
604
+
568 605
 # Comment an option in an INI file
569 606
 # inicomment config-file section option
570 607
 function inicomment() {
... ...
@@ -1020,6 +1023,7 @@ function screen_rc {
1020 1020
     fi
1021 1021
 }
1022 1022
 
1023
+
1023 1024
 # Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME
1024 1025
 # This is used for service_check when all the screen_it are called finished
1025 1026
 # init_service_check
... ...
@@ -1034,6 +1038,7 @@ function init_service_check() {
1034 1034
     rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
1035 1035
 }
1036 1036
 
1037
+
1037 1038
 # Helper to get the status of each running service
1038 1039
 # service_check
1039 1040
 function service_check() {
... ...
@@ -1062,6 +1067,7 @@ function service_check() {
1062 1062
     fi
1063 1063
 }
1064 1064
 
1065
+
1065 1066
 # ``pip install`` the dependencies of the package before ``setup.py develop``
1066 1067
 # so pip and not distutils processes the dependency chain
1067 1068
 # Uses globals ``TRACK_DEPENDES``, ``*_proxy`
... ...
@@ -1242,6 +1248,7 @@ function upload_image() {
1242 1242
     fi
1243 1243
 }
1244 1244
 
1245
+
1245 1246
 # Set the database backend to use
1246 1247
 # When called from stackrc/localrc DATABASE_BACKENDS has not been
1247 1248
 # initialized yet, just save the configuration selection and call back later
... ...
@@ -1259,6 +1266,7 @@ function use_database {
1259 1259
     fi
1260 1260
 }
1261 1261
 
1262
+
1262 1263
 # Toggle enable/disable_service for services that must run exclusive of each other
1263 1264
 #  $1 The name of a variable containing a space-separated list of services
1264 1265
 #  $2 The name of a variable in which to store the enabled service's name
... ...
@@ -1275,6 +1283,7 @@ function use_exclusive_service {
1275 1275
     return 0
1276 1276
 }
1277 1277
 
1278
+
1278 1279
 # Wait for an HTTP server to start answering requests
1279 1280
 # wait_for_service timeout url
1280 1281
 function wait_for_service() {
... ...
@@ -1283,6 +1292,7 @@ function wait_for_service() {
1283 1283
     timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done"
1284 1284
 }
1285 1285
 
1286
+
1286 1287
 # Wrapper for ``yum`` to set proxy environment variables
1287 1288
 # Uses globals ``OFFLINE``, ``*_proxy`
1288 1289
 # yum_install package [package ...]
... ...
@@ -1295,8 +1305,21 @@ function yum_install() {
1295 1295
         yum install -y "$@"
1296 1296
 }
1297 1297
 
1298
+
1299
+# zypper wrapper to set arguments correctly
1300
+# zypper_install package [package ...]
1301
+function zypper_install() {
1302
+    [[ "$OFFLINE" = "True" ]] && return
1303
+    local sudo="sudo"
1304
+    [[ "$(id -u)" = "0" ]] && sudo="env"
1305
+    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
1306
+        zypper --non-interactive install --auto-agree-with-licenses "$@"
1307
+}
1308
+
1309
+
1298 1310
 # ping check
1299 1311
 # Uses globals ``ENABLED_SERVICES``
1312
+# ping_check from-net ip boot-timeout expected
1300 1313
 function ping_check() {
1301 1314
     if is_service_enabled quantum; then
1302 1315
         _ping_check_quantum  "$1" $2 $3 $4
... ...
@@ -1333,8 +1356,10 @@ function _ping_check_novanet() {
1333 1333
     fi
1334 1334
 }
1335 1335
 
1336
+
1336 1337
 # ssh check
1337 1338
 
1339
+# ssh_check net-name key-file floating-ip default-user active-timeout
1338 1340
 function ssh_check() {
1339 1341
     if is_service_enabled quantum; then
1340 1342
         _ssh_check_quantum  "$1" $2 $3 $4 $5
... ...
@@ -1356,17 +1381,6 @@ function _ssh_check_novanet() {
1356 1356
 }
1357 1357
 
1358 1358
 
1359
-# zypper wrapper to set arguments correctly
1360
-# zypper_install package [package ...]
1361
-function zypper_install() {
1362
-    [[ "$OFFLINE" = "True" ]] && return
1363
-    local sudo="sudo"
1364
-    [[ "$(id -u)" = "0" ]] && sudo="env"
1365
-    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
1366
-        zypper --non-interactive install --auto-agree-with-licenses "$@"
1367
-}
1368
-
1369
-
1370 1359
 # Add a user to a group.
1371 1360
 # add_user_to_group user group
1372 1361
 function add_user_to_group() {
... ...
@@ -1396,6 +1410,7 @@ function get_python_exec_prefix() {
1396 1396
     fi
1397 1397
 }
1398 1398
 
1399
+
1399 1400
 # Get the location of the $module-rootwrap executables, where module is cinder
1400 1401
 # or nova.
1401 1402
 # get_rootwrap_location module
... ...
@@ -1405,6 +1420,7 @@ function get_rootwrap_location() {
1405 1405
     echo "$(get_python_exec_prefix)/$module-rootwrap"
1406 1406
 }
1407 1407
 
1408
+
1408 1409
 # Get the path to the pip command.
1409 1410
 # get_pip_command
1410 1411
 function get_pip_command() {
... ...
@@ -1419,6 +1435,7 @@ function get_pip_command() {
1419 1419
     fi
1420 1420
 }
1421 1421
 
1422
+
1422 1423
 # Path permissions sanity check
1423 1424
 # check_path_perm_sanity path
1424 1425
 function check_path_perm_sanity() {
... ...
@@ -1448,6 +1465,7 @@ function check_path_perm_sanity() {
1448 1448
     done
1449 1449
 }
1450 1450
 
1451
+
1451 1452
 # Restore xtrace
1452 1453
 $XTRACE
1453 1454
 
... ...
@@ -74,13 +74,20 @@ function _horizon_config_set() {
74 74
 }
75 75
 
76 76
 
77
+
77 78
 # Entry Points
78 79
 # ------------
79 80
 
80 81
 # cleanup_horizon() - Remove residual data files, anything left over from previous
81 82
 # runs that a clean run would need to clean up
82 83
 function cleanup_horizon() {
83
-    :
84
+    if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
85
+        # If ``/usr/bin/node`` points into ``$DEST``
86
+        # we installed it via ``install_nodejs``
87
+        if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then
88
+            sudo rm /usr/bin/node
89
+        fi
90
+    fi
84 91
 }
85 92
 
86 93
 # configure_horizon() - Set config files, create data dirs, etc
... ...
@@ -111,7 +118,6 @@ function init_horizon() {
111 111
     # Create an empty directory that apache uses as docroot
112 112
     sudo mkdir -p $HORIZON_DIR/.blackhole
113 113
 
114
-
115 114
     HORIZON_REQUIRE=''
116 115
     if is_ubuntu; then
117 116
         # Clean up the old config name
... ...
@@ -148,7 +154,6 @@ function init_horizon() {
148 148
         s,%DEST%,$DEST,g;
149 149
         s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g;
150 150
     \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF"
151
-
152 151
 }
153 152
 
154 153
 # install_horizon() - Collect source and prepare
... ...
@@ -193,6 +198,7 @@ function stop_horizon() {
193 193
     fi
194 194
 }
195 195
 
196
+
196 197
 # Restore xtrace
197 198
 $XTRACE
198 199
 
... ...
@@ -178,7 +178,6 @@ function configure_keystone() {
178 178
     cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
179 179
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
180 180
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
181
-
182 181
 }
183 182
 
184 183
 # create_keystone_accounts() - Sets up common required keystone accounts
... ...
@@ -254,25 +253,6 @@ create_keystone_accounts() {
254 254
             --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \
255 255
             --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0"
256 256
     fi
257
-
258
-    # TODO(dtroyer): This is part of a series of changes...remove these when
259
-    #                complete if they are really unused
260
-#    KEYSTONEADMIN_ROLE=$(keystone role-create \
261
-#        --name KeystoneAdmin \
262
-#        | grep " id " | get_field 2)
263
-#    KEYSTONESERVICE_ROLE=$(keystone role-create \
264
-#        --name KeystoneServiceAdmin \
265
-#        | grep " id " | get_field 2)
266
-
267
-    # TODO(termie): these two might be dubious
268
-#    keystone user-role-add \
269
-#        --user_id $ADMIN_USER \
270
-#        --role_id $KEYSTONEADMIN_ROLE \
271
-#        --tenant_id $ADMIN_TENANT
272
-#    keystone user-role-add \
273
-#        --user_id $ADMIN_USER \
274
-#        --role_id $KEYSTONESERVICE_ROLE \
275
-#        --tenant_id $ADMIN_TENANT
276 257
 }
277 258
 
278 259
 # init_keystone() - Initialize databases, etc.
... ...
@@ -339,6 +319,7 @@ function stop_keystone() {
339 339
     screen -S $SCREEN_NAME -p key -X kill
340 340
 }
341 341
 
342
+
342 343
 # Restore xtrace
343 344
 $XTRACE
344 345
 
... ...
@@ -300,7 +300,7 @@ EOF
300 300
             fi
301 301
 
302 302
             if is_fedora || is_suse; then
303
-                if is_fedora && [[  $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
303
+                if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
304 304
                     sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
305 305
 [libvirt Management Access]
306 306
 Identity=unix-group:$LIBVIRT_GROUP
... ...
@@ -352,7 +352,6 @@ EOF"
352 352
             restart_service $LIBVIRT_DAEMON
353 353
         fi
354 354
 
355
-
356 355
         # Instance Storage
357 356
         # ----------------
358 357
 
... ...
@@ -494,7 +493,6 @@ function create_nova_conf() {
494 494
         iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier"
495 495
     fi
496 496
 
497
-
498 497
     # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
499 498
     if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
500 499
         EXTRA_OPTS=$EXTRA_FLAGS
... ...
@@ -112,18 +112,18 @@ if is_service_enabled quantum; then
112 112
     # The following variables control the Quantum openvswitch and
113 113
     # linuxbridge plugins' allocation of tenant networks and
114 114
     # availability of provider networks. If these are not configured
115
-    # in localrc, tenant networks will be local to the host (with no
115
+    # in ``localrc``, tenant networks will be local to the host (with no
116 116
     # remote connectivity), and no physical resources will be
117 117
     # available for the allocation of provider networks.
118 118
 
119 119
     # To use GRE tunnels for tenant networks, set to True in
120
-    # localrc. GRE tunnels are only supported by the openvswitch
120
+    # ``localrc``. GRE tunnels are only supported by the openvswitch
121 121
     # plugin, and currently only on Ubuntu.
122 122
     ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
123 123
 
124 124
     # If using GRE tunnels for tenant networks, specify the range of
125 125
     # tunnel IDs from which tenant networks are allocated. Can be
126
-    # overriden in localrc in necesssary.
126
+    # overriden in ``localrc`` in necesssary.
127 127
     TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
128 128
 
129 129
     # To use VLANs for tenant networks, set to True in localrc. VLANs
... ...
@@ -131,7 +131,7 @@ if is_service_enabled quantum; then
131 131
     # requiring additional configuration described below.
132 132
     ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
133 133
 
134
-    # If using VLANs for tenant networks, set in localrc to specify
134
+    # If using VLANs for tenant networks, set in ``localrc`` to specify
135 135
     # the range of VLAN VIDs from which tenant networks are
136 136
     # allocated. An external network switch must be configured to
137 137
     # trunk these VLANs between hosts for multi-host connectivity.
... ...
@@ -140,16 +140,16 @@ if is_service_enabled quantum; then
140 140
     TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
141 141
 
142 142
     # If using VLANs for tenant networks, or if using flat or VLAN
143
-    # provider networks, set in localrc to the name of the physical
144
-    # network, and also configure OVS_PHYSICAL_BRIDGE for the
145
-    # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
143
+    # provider networks, set in ``localrc`` to the name of the physical
144
+    # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
145
+    # openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
146 146
     # agent, as described below.
147 147
     #
148 148
     # Example: ``PHYSICAL_NETWORK=default``
149 149
     PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
150 150
 
151 151
     # With the openvswitch plugin, if using VLANs for tenant networks,
152
-    # or if using flat or VLAN provider networks, set in localrc to
152
+    # or if using flat or VLAN provider networks, set in ``localrc`` to
153 153
     # the name of the OVS bridge to use for the physical network. The
154 154
     # bridge will be created if it does not already exist, but a
155 155
     # physical interface must be manually added to the bridge as a
... ...
@@ -159,28 +159,29 @@ if is_service_enabled quantum; then
159 159
     OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
160 160
 
161 161
     # With the linuxbridge plugin, if using VLANs for tenant networks,
162
-    # or if using flat or VLAN provider networks, set in localrc to
162
+    # or if using flat or VLAN provider networks, set in ``localrc`` to
163 163
     # the name of the network interface to use for the physical
164 164
     # network.
165 165
     #
166 166
     # Example: ``LB_PHYSICAL_INTERFACE=eth1``
167 167
     LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
168 168
 
169
-    # With the openvswitch plugin, set to True in localrc to enable
169
+    # With the openvswitch plugin, set to True in ``localrc`` to enable
170 170
     # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
171 171
     #
172 172
     # Example: ``OVS_ENABLE_TUNNELING=True``
173 173
     OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
174 174
 fi
175 175
 
176
-
177 176
 # Quantum plugin specific functions
178 177
 # ---------------------------------
179
-# Please refer to lib/quantum_plugins/README.md for details.
178
+
179
+# Please refer to ``lib/quantum_plugins/README.md`` for details.
180 180
 source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN
181 181
 
182 182
 # Agent loadbalancer service plugin functions
183 183
 # -------------------------------------------
184
+
184 185
 # Hardcoding for 1 service plugin for now
185 186
 source $TOP_DIR/lib/quantum_plugins/services/agent_loadbalancer
186 187
 
... ...
@@ -191,7 +192,6 @@ else
191 191
     Q_USE_SECGROUP=False
192 192
 fi
193 193
 
194
-
195 194
 # Functions
196 195
 # ---------
197 196
 
... ...
@@ -423,7 +423,7 @@ function cleanup_quantum() {
423 423
 
424 424
 # _configure_quantum_common()
425 425
 # Set common config for all quantum server and agents.
426
-# This MUST be called before other _configure_quantum_* functions.
426
+# This MUST be called before other ``_configure_quantum_*`` functions.
427 427
 function _configure_quantum_common() {
428 428
     # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find
429 429
     if [[ ! -d $QUANTUM_CONF_DIR ]]; then
... ...
@@ -433,11 +433,11 @@ function _configure_quantum_common() {
433 433
 
434 434
     cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF
435 435
 
436
-    # Set plugin-specific variables Q_DB_NAME, Q_PLUGIN_CLASS.
437
-    # For main plugin config file, set Q_PLUGIN_CONF_PATH, Q_PLUGIN_CONF_FILENAME.
438
-    # For addition plugin config files, set Q_PLUGIN_EXTRA_CONF_PATH,
439
-    # Q_PLUGIN_EXTRA_CONF_FILES.  For example:
440
-    #    Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)
436
+    # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
437
+    # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
438
+    # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``,
439
+    # ``Q_PLUGIN_EXTRA_CONF_FILES``.  For example:
440
+    #    ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)``
441 441
     quantum_plugin_configure_common
442 442
 
443 443
     if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
... ...
@@ -543,8 +543,7 @@ function _configure_quantum_metadata_agent() {
543 543
     _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
544 544
 }
545 545
 
546
-function _configure_quantum_lbaas()
547
-{
546
+function _configure_quantum_lbaas() {
548 547
     quantum_agent_lbaas_install_agent_packages
549 548
     quantum_agent_lbaas_configure_common
550 549
     quantum_agent_lbaas_configure_agent
... ...
@@ -606,17 +605,17 @@ function _quantum_setup_rootwrap() {
606 606
         return
607 607
     fi
608 608
     # Deploy new rootwrap filters files (owned by root).
609
-    # Wipe any existing rootwrap.d files first
609
+    # Wipe any existing ``rootwrap.d`` files first
610 610
     Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d
611 611
     if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
612 612
         sudo rm -rf $Q_CONF_ROOTWRAP_D
613 613
     fi
614
-    # Deploy filters to $QUANTUM_CONF_DIR/rootwrap.d
614
+    # Deploy filters to ``$QUANTUM_CONF_DIR/rootwrap.d``
615 615
     mkdir -p -m 755 $Q_CONF_ROOTWRAP_D
616 616
     cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
617 617
     sudo chown -R root:root $Q_CONF_ROOTWRAP_D
618 618
     sudo chmod 644 $Q_CONF_ROOTWRAP_D/*
619
-    # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d
619
+    # Set up ``rootwrap.conf``, pointing to ``$QUANTUM_CONF_DIR/rootwrap.d``
620 620
     # location moved in newer versions, prefer new location
621 621
     if test -r $QUANTUM_DIR/etc/quantum/rootwrap.conf; then
622 622
       sudo cp -p $QUANTUM_DIR/etc/quantum/rootwrap.conf $Q_RR_CONF_FILE
... ...
@@ -626,7 +625,7 @@ function _quantum_setup_rootwrap() {
626 626
     sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
627 627
     sudo chown root:root $Q_RR_CONF_FILE
628 628
     sudo chmod 0644 $Q_RR_CONF_FILE
629
-    # Specify rootwrap.conf as first parameter to quantum-rootwrap
629
+    # Specify ``rootwrap.conf`` as first parameter to quantum-rootwrap
630 630
     ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *"
631 631
 
632 632
     # Set up the rootwrap sudoers for quantum
... ...
@@ -743,7 +742,8 @@ function _ssh_check_quantum() {
743 743
 
744 744
 # Quantum 3rd party programs
745 745
 #---------------------------
746
-# please refer to lib/quantum_thirdparty/README.md for details
746
+
747
+# please refer to ``lib/quantum_thirdparty/README.md`` for details
747 748
 QUANTUM_THIRD_PARTIES=""
748 749
 for f in $TOP_DIR/lib/quantum_thirdparty/*; do
749 750
      third_party=$(basename $f)
... ...
@@ -3,7 +3,7 @@
3 3
 # ``stack.sh`` is an opinionated OpenStack developer installation.  It
4 4
 # installs and configures various combinations of **Ceilometer**, **Cinder**,
5 5
 # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Quantum**
6
-# and **Swift**
6
+# and **Swift**.
7 7
 
8 8
 # This script allows you to specify configuration options of what git
9 9
 # repositories to use, enabled services, network configuration and various
... ...
@@ -12,9 +12,11 @@
12 12
 # developer install.
13 13
 
14 14
 # To keep this script simple we assume you are running on a recent **Ubuntu**
15
-# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine.  It
16
-# should work in a VM or physical server.  Additionally we put the list of
17
-# ``apt`` and ``rpm`` dependencies and other configuration files in this repo.
15
+# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine.  (It may work
16
+# on other platforms but support for those platforms is left to those who added
17
+# them to DevStack.)  It should work in a VM or physical server.  Additionally
18
+# we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration
19
+# files in this repo.
18 20
 
19 21
 # Learn more and get the most recent version at http://devstack.org
20 22
 
... ...
@@ -33,55 +35,20 @@ source $TOP_DIR/functions
33 33
 GetDistro
34 34
 
35 35
 
36
-# Configure non-default repos
37
-# ===========================
38
-
39
-# Repo configuration needs to occur before package installation.
40
-
41
-# Some dependencies are not available in Debian Wheezy official
42
-# repositories. However, it's possible to run OpenStack from gplhost
43
-# repository.
44
-if [[ "$os_VENDOR" =~ (Debian) ]]; then
45
-    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
46
-    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
47
-    apt_get update
48
-    apt_get install --force-yes gplhost-archive-keyring
49
-fi
50
-
51
-# Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
52
-RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"}
53
-RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"}
54
-# RHEL6 requires EPEL for many Open Stack dependencies
55
-RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
56
-
57
-if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
58
-
59
-    if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
60
-        echo "RDO repo not detected; installing"
61
-        yum_install $RHEL6_RDO_REPO_RPM || \
62
-            die $LINENO "Error installing RDO repo, cannot continue"
63
-    fi
64
-
65
-    if ! yum repolist enabled epel | grep -q 'epel'; then
66
-        echo "EPEL not detected; installing"
67
-        yum_install ${RHEL6_EPEL_RPM} || \
68
-            die $LINENO "Error installing EPEL repo, cannot continue"
69
-    fi
70
-
71
-fi
72
-
73 36
 # Global Settings
74 37
 # ===============
75 38
 
76
-# ``stack.sh`` is customizable through setting environment variables.  If you
77
-# want to override a setting you can set and export it::
39
+# ``stack.sh`` is customizable by setting environment variables.  Override a
40
+# default setting via export::
78 41
 #
79 42
 #     export DATABASE_PASSWORD=anothersecret
80 43
 #     ./stack.sh
81 44
 #
82
-# You can also pass options on a single line ``DATABASE_PASSWORD=simple ./stack.sh``
45
+# or by setting the variable on the command line::
83 46
 #
84
-# Additionally, you can put any local variables into a ``localrc`` file::
47
+#     DATABASE_PASSWORD=simple ./stack.sh
48
+#
49
+# Persistent variables can be placed in a ``localrc`` file::
85 50
 #
86 51
 #     DATABASE_PASSWORD=anothersecret
87 52
 #     DATABASE_USER=hellaroot
... ...
@@ -166,6 +133,41 @@ fi
166 166
 VERBOSE=$(trueorfalse True $VERBOSE)
167 167
 
168 168
 
169
+# Additional repos
170
+# ================
171
+
172
+# Some distros need to add repos beyond the defaults provided by the vendor
173
+# to pick up required packages.
174
+
175
+# The Debian Wheezy official repositories do not contain all required packages,
176
+# add gplhost repository.
177
+if [[ "$os_VENDOR" =~ (Debian) ]]; then
178
+    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
179
+    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
180
+    apt_get update
181
+    apt_get install --force-yes gplhost-archive-keyring
182
+fi
183
+
184
+if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
185
+    # Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
186
+    RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"}
187
+    RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"}
188
+    if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
189
+        echo "RDO repo not detected; installing"
190
+        yum_install $RHEL6_RDO_REPO_RPM || \
191
+            die $LINENO "Error installing RDO repo, cannot continue"
192
+    fi
193
+
194
+    # RHEL6 requires EPEL for many Open Stack dependencies
195
+    RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
196
+    if ! yum repolist enabled epel | grep -q 'epel'; then
197
+        echo "EPEL not detected; installing"
198
+        yum_install ${RHEL6_EPEL_RPM} || \
199
+            die $LINENO "Error installing EPEL repo, cannot continue"
200
+    fi
201
+fi
202
+
203
+
169 204
 # root Access
170 205
 # -----------
171 206
 
... ...
@@ -296,7 +298,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
296 296
 # Configure Projects
297 297
 # ==================
298 298
 
299
-# Get project function libraries
299
+# Source project function libraries
300 300
 source $TOP_DIR/lib/tls
301 301
 source $TOP_DIR/lib/horizon
302 302
 source $TOP_DIR/lib/keystone
... ...
@@ -310,7 +312,7 @@ source $TOP_DIR/lib/quantum
310 310
 source $TOP_DIR/lib/baremetal
311 311
 source $TOP_DIR/lib/ldap
312 312
 
313
-# Set the destination directories for OpenStack projects
313
+# Set the destination directories for other OpenStack projects
314 314
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
315 315
 PBR_DIR=$DEST/pbr
316 316
 
... ...
@@ -565,6 +567,7 @@ failed() {
565 565
 # an error.  It is also useful for following along as the install occurs.
566 566
 set -o xtrace
567 567
 
568
+
568 569
 # Install Packages
569 570
 # ================
570 571
 
... ...
@@ -585,61 +588,51 @@ if is_service_enabled q-agt; then
585 585
     install_quantum_agent_packages
586 586
 fi
587 587
 
588
-#
588
+
589 589
 # System-specific preconfigure
590 590
 # ============================
591 591
 
592 592
 if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
593
-    # Avoid having to configure selinux to allow things like httpd to
594
-    # access horizion files or run binaries like nodejs (LP#1175444)
593
+    # Disable selinux to avoid configuring to allow Apache access
594
+    # to Horizon files or run nodejs (LP#1175444)
595 595
     if selinuxenabled; then
596 596
         sudo setenforce 0
597 597
     fi
598 598
 
599
-    # An old version (2.0.1) of python-crypto is probably installed on
600
-    # a fresh system, via the dependency chain
601
-    # cas->python-paramiko->python-crypto (related to anaconda).
602
-    # Unfortunately, "pip uninstall pycrypto" will remove the
603
-    # .egg-info file for this rpm-installed version, but leave most of
604
-    # the actual library files behind in /usr/lib64/python2.6/Crypto.
605
-    # When later "pip install pycrypto" happens, the built library
606
-    # will be installed over these existing files; the result is a
607
-    # useless mess of old, rpm-packaged files and pip-installed files.
608
-    # Unsurprisingly, the end result is it doesn't work.  Thus we have
609
-    # to get rid of it now so that any packages that pip-install
610
-    # pycrypto get a "clean slate".
611
-    # (note, we have to be careful about other RPM packages specified
612
-    # pulling in python-crypto as well.  That's why RHEL6 doesn't
613
-    # install python-paramiko packages for example...)
599
+    # An old version of ``python-crypto`` (2.0.1) may be installed on a
600
+    # fresh system via Anaconda and the dependency chain
601
+    # ``cas`` -> ``python-paramiko`` -> ``python-crypto``.
602
+    # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` file
603
+    # but leave most of the actual library files behind in ``/usr/lib64/python2.6/Crypto``.
604
+    # Later ``pip install pycrypto`` will install over the packaged files resulting
605
+    # in a useless mess of old, rpm-packaged files and pip-installed files.
606
+    # Remove the package so that ``pip install python-crypto`` installs cleanly.
607
+    # Note: other RPM packages may require ``python-crypto`` as well.  For example,
608
+    # RHEL6 does not install ``python-paramiko packages``.
614 609
     uninstall_package python-crypto
615 610
 
616
-    # A similar thing happens for python-lxml (a dependency of
617
-    # ipa-client, an auditing thing we don't care about).  We have the
618
-    # build-dependencies the lxml pip-install will need (gcc,
619
-    # libxml2-dev & libxslt-dev) in the "general" rpm lists
611
+    # A similar situation occurs with ``python-lxml``, which is required by
612
+    # ``ipa-client``, an auditing package we don't care about.  The
613
+    # build-dependencies needed for ``pip install lxml`` (``gcc``,
614
+    # ``libxml2-dev`` and ``libxslt-dev``) are present in ``files/rpms/general``.
620 615
     uninstall_package python-lxml
621 616
 
622
-    # If the dbus rpm was installed by the devstack rpm dependencies
623
-    # then you may hit a bug where the uuid isn't generated because
624
-    # the service was never started (PR#598200), causing issues for
625
-    # Nova stopping later on complaining that
626
-    # '/var/lib/dbus/machine-id' doesn't exist.
617
+    # If the ``dbus`` package was installed by DevStack dependencies the
618
+    # uuid may not be generated because the service was never started (PR#598200),
619
+    # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id``
620
+    # does not exist.
627 621
     sudo service messagebus restart
628 622
 
629
-    # In setup.py, a "setup_requires" package is supposed to
630
-    # transient.  However there is a bug with rhel6 distribute where
631
-    # setup_requires packages can register entry points that aren't
632
-    # cleared out properly after the setup-phase; the end result is
633
-    # installation failures (bz#924038).  Thus we pre-install the
634
-    # problem package here; this way the setup_requires dependency is
635
-    # already satisfied and it will not need to be installed
636
-    # transiently, meaning we avoid the issue of it not being cleaned
637
-    # out properly.  Note we do this before the track-depends below.
623
+    # ``setup.py`` contains a ``setup_requires`` package that is supposed
624
+    # to be transient.  However, RHEL6 distribute has a bug where
625
+    # ``setup_requires`` registers entry points that are not cleaned
626
+    # out properly after the setup-phase resulting in installation failures
627
+    # (bz#924038).  Pre-install the problem package so the ``setup_requires``
628
+    # dependency is satisfied and it will not be installed transiently.
629
+    # Note we do this before the track-depends below.
638 630
     pip_install hgtools
639 631
 
640
-    # The version of python-nose in the RHEL6 repo is incompatible
641
-    # with Tempest.  As a workaround:
642
-
632
+    # RHEL6's version of ``python-nose`` is incompatible with Tempest.
643 633
     # Install nose 1.1 (Tempest-compatible) from EPEL
644 634
     install_package python-nose1.1
645 635
     # Add a symlink for the new nosetests to allow tox for Tempest to
... ...
@@ -850,10 +843,10 @@ fi
850 850
 init_service_check
851 851
 
852 852
 
853
-# Kick off Sysstat
854
-# ------------------------
855
-# run sysstat if it is enabled, this has to be early as daemon
856
-# startup is one of the things to track.
853
+# Sysstat
854
+# -------
855
+
856
+# If enabled, systat has to start early to track OpenStack service startup.
857 857
 if is_service_enabled sysstat;then
858 858
     if [[ -n ${SCREEN_LOGDIR} ]]; then
859 859
         screen_it sysstat "sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
... ...
@@ -967,7 +960,7 @@ if is_service_enabled n-net q-dhcp; then
967 967
     rm -rf ${NOVA_STATE_PATH}/networks
968 968
     sudo mkdir -p ${NOVA_STATE_PATH}/networks
969 969
     sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks
970
-    # Force IP forwarding on, just on case
970
+    # Force IP forwarding on, just in case
971 971
     sudo sysctl -w net.ipv4.ip_forward=1
972 972
 fi
973 973
 
... ...
@@ -1018,6 +1011,7 @@ if is_service_enabled nova; then
1018 1018
         XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
1019 1019
         iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
1020 1020
 
1021
+
1021 1022
     # OpenVZ
1022 1023
     # ------
1023 1024
 
... ...
@@ -1028,6 +1022,7 @@ if is_service_enabled nova; then
1028 1028
         LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
1029 1029
         iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
1030 1030
 
1031
+
1031 1032
     # Bare Metal
1032 1033
     # ----------
1033 1034
 
... ...
@@ -1050,6 +1045,7 @@ if is_service_enabled nova; then
1050 1050
            iniset $NOVA_CONF baremetal ${I/=/ }
1051 1051
         done
1052 1052
 
1053
+
1053 1054
    # PowerVM
1054 1055
    # -------
1055 1056
 
... ...
@@ -1069,8 +1065,9 @@ if is_service_enabled nova; then
1069 1069
         iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH
1070 1070
         iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH
1071 1071
 
1072
+
1072 1073
     # vSphere API
1073
-    # -------
1074
+    # -----------
1074 1075
 
1075 1076
     elif [ "$VIRT_DRIVER" = 'vsphere' ]; then
1076 1077
         echo_summary "Using VMware vCenter driver"
... ...
@@ -1081,8 +1078,9 @@ if is_service_enabled nova; then
1081 1081
         iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD"
1082 1082
         iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER"
1083 1083
 
1084
+
1084 1085
     # fake
1085
-    # -----
1086
+    # ----
1086 1087
 
1087 1088
     elif [ "$VIRT_DRIVER" = 'fake' ]; then
1088 1089
         echo_summary "Using fake Virt driver"
... ...
@@ -1102,8 +1100,8 @@ if is_service_enabled nova; then
1102 1102
         iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter"
1103 1103
 
1104 1104
 
1105
-    # Default
1106
-    # -------
1105
+    # Default libvirt
1106
+    # ---------------
1107 1107
 
1108 1108
     else
1109 1109
         echo_summary "Using libvirt virtualization driver"
... ...
@@ -1296,7 +1294,6 @@ if is_service_enabled nova && is_baremetal; then
1296 1296
     screen_it baremetal "nova-baremetal-deploy-helper"
1297 1297
 fi
1298 1298
 
1299
-
1300 1299
 # Save some values we generated for later use
1301 1300
 CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
1302 1301
 echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
... ...
@@ -73,20 +73,6 @@ CINDER_BRANCH=${CINDER_BRANCH:-master}
73 73
 CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
74 74
 CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master}
75 75
 
76
-# compute service
77
-NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
78
-NOVA_BRANCH=${NOVA_BRANCH:-master}
79
-
80
-# storage service
81
-SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
82
-SWIFT_BRANCH=${SWIFT_BRANCH:-master}
83
-SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git}
84
-SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
85
-
86
-# python swift client library
87
-SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
88
-SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master}
89
-
90 76
 # image catalog service
91 77
 GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
92 78
 GLANCE_BRANCH=${GLANCE_BRANCH:-master}
... ...
@@ -95,22 +81,30 @@ GLANCE_BRANCH=${GLANCE_BRANCH:-master}
95 95
 GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
96 96
 GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master}
97 97
 
98
-# unified auth system (manages accounts/tokens)
99
-KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
100
-KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
101
-
102
-# a websockets/html5 or flash powered VNC console for vm instances
103
-NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git}
104
-NOVNC_BRANCH=${NOVNC_BRANCH:-master}
98
+# heat service
99
+HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git}
100
+HEAT_BRANCH=${HEAT_BRANCH:-master}
105 101
 
106
-# a websockets/html5 or flash powered SPICE console for vm instances
107
-SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
108
-SPICE_BRANCH=${SPICE_BRANCH:-master}
102
+# python heat client library
103
+HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
104
+HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master}
109 105
 
110 106
 # django powered web control panel for openstack
111 107
 HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
112 108
 HORIZON_BRANCH=${HORIZON_BRANCH:-master}
113 109
 
110
+# unified auth system (manages accounts/tokens)
111
+KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
112
+KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
113
+
114
+# python keystone client library to nova that horizon uses
115
+KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
116
+KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master}
117
+
118
+# compute service
119
+NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
120
+NOVA_BRANCH=${NOVA_BRANCH:-master}
121
+
114 122
 # python client library to nova that horizon (and others) use
115 123
 NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
116 124
 NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master}
... ...
@@ -119,9 +113,9 @@ NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master}
119 119
 OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
120 120
 OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
121 121
 
122
-# python keystone client library to nova that horizon uses
123
-KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
124
-KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master}
122
+# pbr drives the setuptools configs
123
+PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
124
+PBR_BRANCH=${PBR_BRANCH:-master}
125 125
 
126 126
 # quantum service
127 127
 QUANTUM_REPO=${QUANTUM_REPO:-${GIT_BASE}/openstack/quantum.git}
... ...
@@ -131,21 +125,20 @@ QUANTUM_BRANCH=${QUANTUM_BRANCH:-master}
131 131
 QUANTUMCLIENT_REPO=${QUANTUMCLIENT_REPO:-${GIT_BASE}/openstack/python-quantumclient.git}
132 132
 QUANTUMCLIENT_BRANCH=${QUANTUMCLIENT_BRANCH:-master}
133 133
 
134
+# storage service
135
+SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
136
+SWIFT_BRANCH=${SWIFT_BRANCH:-master}
137
+SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git}
138
+SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
139
+
140
+# python swift client library
141
+SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
142
+SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master}
143
+
134 144
 # Tempest test suite
135 145
 TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
136 146
 TEMPEST_BRANCH=${TEMPEST_BRANCH:-master}
137 147
 
138
-# heat service
139
-HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git}
140
-HEAT_BRANCH=${HEAT_BRANCH:-master}
141
-
142
-# python heat client library
143
-HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
144
-HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master}
145
-
146
-# ryu service
147
-RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git}
148
-RYU_BRANCH=${RYU_BRANCH:-master}
149 148
 
150 149
 # diskimage-builder
151 150
 BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/stackforge/diskimage-builder.git}
... ...
@@ -157,10 +150,18 @@ BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
157 157
 BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git}
158 158
 BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
159 159
 
160
-# pbr
161
-# Used to drive the setuptools configs
162
-PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
163
-PBR_BRANCH=${PBR_BRANCH:-master}
160
+# a websockets/html5 or flash powered VNC console for vm instances
161
+NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git}
162
+NOVNC_BRANCH=${NOVNC_BRANCH:-master}
163
+
164
+# ryu service
165
+RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git}
166
+RYU_BRANCH=${RYU_BRANCH:-master}
167
+
168
+# a websockets/html5 or flash powered SPICE console for vm instances
169
+SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
170
+SPICE_BRANCH=${SPICE_BRANCH:-master}
171
+
164 172
 
165 173
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
166 174
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
... ...
@@ -184,18 +185,22 @@ case "$VIRT_DRIVER" in
184 184
         ;;
185 185
 esac
186 186
 
187
-# Specify a comma-separated list of UEC images to download and install into glance.
188
-# supported urls here are:
187
+
188
+# Images
189
+# ------
190
+
191
+# Specify a comma-separated list of images to download and install into glance.
192
+# Supported urls here are:
189 193
 #  * "uec-style" images:
190 194
 #     If the file ends in .tar.gz, uncompress the tarball and and select the first
191 195
 #     .img file inside it as the image.  If present, use "*-vmlinuz*" as the kernel
192 196
 #     and "*-initrd*" as the ramdisk
193
-#     example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz
197
+#     example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
194 198
 #  * disk image (*.img,*.img.gz)
195 199
 #    if file ends in .img, then it will be uploaded and registered as a to
196 200
 #    glance as a disk image.  If it ends in .gz, it is uncompressed first.
197 201
 #    example:
198
-#      http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
202
+#      http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
199 203
 #      http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-rootfs.img.gz
200 204
 #  * OpenVZ image:
201 205
 #    OpenVZ uses its own format of image, and does not support UEC style images
... ...
@@ -222,11 +227,12 @@ case "$VIRT_DRIVER" in
222 222
         ;;
223 223
     vsphere)
224 224
         IMAGE_URLS="";;
225
-    *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
225
+    *) # Default to Cirros with kernel, ramdisk and disk image
226 226
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
227 227
         IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
228 228
 esac
229 229
 
230
+
230 231
 # 5Gb default volume backing file size
231 232
 VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
232 233