Browse code

fix whitespace in the rest of lib/*

this brings this in line with bash8 checker

Change-Id: Ib34a2292dd5bc259069457461041ec9cd4fd2957

Sean Dague authored on 2013/10/22 21:47:11
Showing 12 changed files
... ...
@@ -256,19 +256,19 @@ function upload_baremetal_deploy() {
256 256
 
257 257
     # load them into glance
258 258
     BM_DEPLOY_KERNEL_ID=$(glance \
259
-         --os-auth-token $token \
260
-         --os-image-url http://$GLANCE_HOSTPORT \
261
-         image-create \
262
-         --name $BM_DEPLOY_KERNEL \
263
-         --is-public True --disk-format=aki \
264
-         < $TOP_DIR/files/$BM_DEPLOY_KERNEL  | grep ' id ' | get_field 2)
259
+        --os-auth-token $token \
260
+        --os-image-url http://$GLANCE_HOSTPORT \
261
+        image-create \
262
+        --name $BM_DEPLOY_KERNEL \
263
+        --is-public True --disk-format=aki \
264
+        < $TOP_DIR/files/$BM_DEPLOY_KERNEL  | grep ' id ' | get_field 2)
265 265
     BM_DEPLOY_RAMDISK_ID=$(glance \
266
-         --os-auth-token $token \
267
-         --os-image-url http://$GLANCE_HOSTPORT \
268
-         image-create \
269
-         --name $BM_DEPLOY_RAMDISK \
270
-         --is-public True --disk-format=ari \
271
-         < $TOP_DIR/files/$BM_DEPLOY_RAMDISK  | grep ' id ' | get_field 2)
266
+        --os-auth-token $token \
267
+        --os-image-url http://$GLANCE_HOSTPORT \
268
+        image-create \
269
+        --name $BM_DEPLOY_RAMDISK \
270
+        --is-public True --disk-format=ari \
271
+        < $TOP_DIR/files/$BM_DEPLOY_RAMDISK  | grep ' id ' | get_field 2)
272 272
 }
273 273
 
274 274
 # create a basic baremetal flavor, associated with deploy kernel & ramdisk
... ...
@@ -278,11 +278,11 @@ function create_baremetal_flavor() {
278 278
     aki=$1
279 279
     ari=$2
280 280
     nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
281
-            $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
281
+        $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
282 282
     nova flavor-key $BM_FLAVOR_NAME set \
283
-            "cpu_arch"="$BM_FLAVOR_ARCH" \
284
-            "baremetal:deploy_kernel_id"="$aki" \
285
-            "baremetal:deploy_ramdisk_id"="$ari"
283
+        "cpu_arch"="$BM_FLAVOR_ARCH" \
284
+        "baremetal:deploy_kernel_id"="$aki" \
285
+        "baremetal:deploy_ramdisk_id"="$ari"
286 286
 
287 287
 }
288 288
 
... ...
@@ -311,19 +311,19 @@ function extract_and_upload_k_and_r_from_image() {
311 311
 
312 312
     # load them into glance
313 313
     KERNEL_ID=$(glance \
314
-         --os-auth-token $token \
315
-         --os-image-url http://$GLANCE_HOSTPORT \
316
-         image-create \
317
-         --name $image_name-kernel \
318
-         --is-public True --disk-format=aki \
319
-         < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
314
+        --os-auth-token $token \
315
+        --os-image-url http://$GLANCE_HOSTPORT \
316
+        image-create \
317
+        --name $image_name-kernel \
318
+        --is-public True --disk-format=aki \
319
+        < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
320 320
     RAMDISK_ID=$(glance \
321
-         --os-auth-token $token \
322
-         --os-image-url http://$GLANCE_HOSTPORT \
323
-         image-create \
324
-         --name $image_name-initrd \
325
-         --is-public True --disk-format=ari \
326
-         < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
321
+        --os-auth-token $token \
322
+        --os-image-url http://$GLANCE_HOSTPORT \
323
+        image-create \
324
+        --name $image_name-initrd \
325
+        --is-public True --disk-format=ari \
326
+        < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
327 327
 }
328 328
 
329 329
 
... ...
@@ -365,11 +365,11 @@ function upload_baremetal_image() {
365 365
             mkdir "$xdir"
366 366
             tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
367 367
             KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
368
-                     [ -f "$f" ] && echo "$f" && break; done; true)
368
+                [ -f "$f" ] && echo "$f" && break; done; true)
369 369
             RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
370
-                     [ -f "$f" ] && echo "$f" && break; done; true)
370
+                [ -f "$f" ] && echo "$f" && break; done; true)
371 371
             IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
372
-                     [ -f "$f" ] && echo "$f" && break; done; true)
372
+                [ -f "$f" ] && echo "$f" && break; done; true)
373 373
             if [[ -z "$IMAGE_NAME" ]]; then
374 374
                 IMAGE_NAME=$(basename "$IMAGE" ".img")
375 375
             fi
... ...
@@ -403,19 +403,19 @@ function upload_baremetal_image() {
403 403
             --container-format ari \
404 404
             --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
405 405
     else
406
-       # TODO(deva): add support for other image types
407
-       return
406
+        # TODO(deva): add support for other image types
407
+        return
408 408
     fi
409 409
 
410 410
     glance \
411
-       --os-auth-token $token \
412
-       --os-image-url http://$GLANCE_HOSTPORT \
413
-       image-create \
414
-       --name "${IMAGE_NAME%.img}" --is-public True \
415
-       --container-format $CONTAINER_FORMAT \
416
-       --disk-format $DISK_FORMAT \
417
-       ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
418
-       ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
411
+        --os-auth-token $token \
412
+        --os-image-url http://$GLANCE_HOSTPORT \
413
+        image-create \
414
+        --name "${IMAGE_NAME%.img}" --is-public True \
415
+        --container-format $CONTAINER_FORMAT \
416
+        --disk-format $DISK_FORMAT \
417
+        ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
418
+        ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
419 419
 
420 420
     # override DEFAULT_IMAGE_NAME so that tempest can find the image
421 421
     # that we just uploaded in glance
... ...
@@ -439,15 +439,15 @@ function add_baremetal_node() {
439 439
     mac_2=${2:-$BM_SECOND_MAC}
440 440
 
441 441
     id=$(nova baremetal-node-create \
442
-       --pm_address="$BM_PM_ADDR" \
443
-       --pm_user="$BM_PM_USER" \
444
-       --pm_password="$BM_PM_PASS" \
445
-       "$BM_HOSTNAME" \
446
-       "$BM_FLAVOR_CPU" \
447
-       "$BM_FLAVOR_RAM" \
448
-       "$BM_FLAVOR_ROOT_DISK" \
449
-       "$mac_1" \
450
-       | grep ' id ' | get_field 2 )
442
+        --pm_address="$BM_PM_ADDR" \
443
+        --pm_user="$BM_PM_USER" \
444
+        --pm_password="$BM_PM_PASS" \
445
+        "$BM_HOSTNAME" \
446
+        "$BM_FLAVOR_CPU" \
447
+        "$BM_FLAVOR_RAM" \
448
+        "$BM_FLAVOR_ROOT_DISK" \
449
+        "$mac_1" \
450
+        | grep ' id ' | get_field 2 )
451 451
     [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node"
452 452
     if [ -n "$mac_2" ]; then
453 453
         id2=$(nova baremetal-interface-add "$id" "$mac_2" )
... ...
@@ -194,7 +194,7 @@ function start_glance() {
194 194
     screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
195 195
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
196 196
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
197
-      die $LINENO "g-api did not start"
197
+        die $LINENO "g-api did not start"
198 198
     fi
199 199
 }
200 200
 
... ...
@@ -203,7 +203,7 @@ function start_ironic_api() {
203 203
     screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
204 204
     echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
205 205
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then
206
-      die $LINENO "ir-api did not start"
206
+        die $LINENO "ir-api did not start"
207 207
     fi
208 208
 }
209 209
 
... ...
@@ -373,7 +373,7 @@ function start_keystone() {
373 373
 
374 374
     echo "Waiting for keystone to start..."
375 375
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
376
-      die $LINENO "keystone did not start"
376
+        die $LINENO "keystone did not start"
377 377
     fi
378 378
 
379 379
     # Start proxies if enabled
... ...
@@ -66,8 +66,8 @@ function init_trema() {
66 66
 
67 67
     cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG
68 68
     sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \
69
-           -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \
70
-           $TREMA_SS_CONFIG
69
+        -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \
70
+        $TREMA_SS_CONFIG
71 71
 }
72 72
 
73 73
 function gem_install() {
... ...
@@ -465,27 +465,27 @@ function create_nova_conf() {
465 465
     fi
466 466
 
467 467
     if is_service_enabled n-novnc || is_service_enabled n-xvnc; then
468
-      # Address on which instance vncservers will listen on compute hosts.
469
-      # For multi-host, this should be the management ip of the compute host.
470
-      VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
471
-      VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
472
-      iniset $NOVA_CONF DEFAULT vnc_enabled true
473
-      iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
474
-      iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
468
+        # Address on which instance vncservers will listen on compute hosts.
469
+        # For multi-host, this should be the management ip of the compute host.
470
+        VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
471
+        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
472
+        iniset $NOVA_CONF DEFAULT vnc_enabled true
473
+        iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
474
+        iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
475 475
     else
476
-      iniset $NOVA_CONF DEFAULT vnc_enabled false
476
+        iniset $NOVA_CONF DEFAULT vnc_enabled false
477 477
     fi
478 478
 
479 479
     if is_service_enabled n-spice; then
480
-      # Address on which instance spiceservers will listen on compute hosts.
481
-      # For multi-host, this should be the management ip of the compute host.
482
-      SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
483
-      SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
484
-      iniset $NOVA_CONF spice enabled true
485
-      iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
486
-      iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
480
+        # Address on which instance spiceservers will listen on compute hosts.
481
+        # For multi-host, this should be the management ip of the compute host.
482
+        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
483
+        SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
484
+        iniset $NOVA_CONF spice enabled true
485
+        iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
486
+        iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
487 487
     else
488
-      iniset $NOVA_CONF spice enabled false
488
+        iniset $NOVA_CONF spice enabled false
489 489
     fi
490 490
 
491 491
     iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
... ...
@@ -602,7 +602,7 @@ function start_nova_api() {
602 602
     screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
603 603
     echo "Waiting for nova-api to start..."
604 604
     if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
605
-      die $LINENO "nova-api did not start"
605
+        die $LINENO "nova-api did not start"
606 606
     fi
607 607
 
608 608
     # Start proxies if enabled
... ...
@@ -620,10 +620,9 @@ function start_nova_compute() {
620 620
         # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
621 621
         screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'"
622 622
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
623
-       for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`
624
-       do
625
-           screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
626
-       done
623
+        for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
624
+            screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
625
+        done
627 626
     else
628 627
         if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
629 628
             start_nova_hypervisor
... ...
@@ -61,8 +61,8 @@ function configure_nova_hypervisor() {
61 61
 
62 62
     # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
63 63
     for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
64
-       # Attempt to convert flags to options
65
-       iniset $NOVA_CONF baremetal ${I/=/ }
64
+        # Attempt to convert flags to options
65
+        iniset $NOVA_CONF baremetal ${I/=/ }
66 66
     done
67 67
 }
68 68
 
... ...
@@ -82,10 +82,10 @@ EOF"
82 82
             sudo mkdir -p $rules_dir
83 83
             sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
84 84
 polkit.addRule(function(action, subject) {
85
-     if (action.id == 'org.libvirt.unix.manage' &&
86
-         subject.user == '"$STACK_USER"') {
87
-         return polkit.Result.YES;
88
-     }
85
+    if (action.id == 'org.libvirt.unix.manage' &&
86
+        subject.user == '"$STACK_USER"') {
87
+        return polkit.Result.YES;
88
+    }
89 89
 });
90 90
 EOF"
91 91
             unset rules_dir
... ...
@@ -102,9 +102,9 @@ function install_rpc_backend() {
102 102
         if is_fedora; then
103 103
             install_package qpid-cpp-server
104 104
             if [[ $DISTRO =~ (rhel6) ]]; then
105
-               # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to
106
-               # be no or you get GSS authentication errors as it
107
-               # attempts to default to this.
105
+                # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to
106
+                # be no or you get GSS authentication errors as it
107
+                # attempts to default to this.
108 108
                 sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf
109 109
             fi
110 110
         elif is_ubuntu; then
... ...
@@ -104,17 +104,17 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
104 104
 
105 105
 # cleanup_swift() - Remove residual data files
106 106
 function cleanup_swift() {
107
-   rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
108
-   if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
109
-      sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
110
-   fi
111
-   if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
112
-      rm ${SWIFT_DISK_IMAGE}
113
-   fi
114
-   rm -rf ${SWIFT_DATA_DIR}/run/
115
-   if is_apache_enabled_service swift; then
116
-       _cleanup_swift_apache_wsgi
117
-   fi
107
+    rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
108
+    if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
109
+        sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
110
+    fi
111
+    if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
112
+        rm ${SWIFT_DISK_IMAGE}
113
+    fi
114
+    rm -rf ${SWIFT_DATA_DIR}/run/
115
+    if is_apache_enabled_service swift; then
116
+        _cleanup_swift_apache_wsgi
117
+    fi
118 118
 }
119 119
 
120 120
 # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
... ...
@@ -192,7 +192,7 @@ function _config_swift_apache_wsgi() {
192 192
 
193 193
         sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number}
194 194
         sudo sed -e "
195
-             /^#/d;/^$/d;
195
+            /^#/d;/^$/d;
196 196
             s/%PORT%/$account_port/g;
197 197
             s/%SERVICENAME%/account-server-${node_number}/g;
198 198
             s/%APACHE_NAME%/${APACHE_NAME}/g;
... ...
@@ -202,7 +202,7 @@ function _config_swift_apache_wsgi() {
202 202
 
203 203
         sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
204 204
         sudo sed -e "
205
-             /^#/d;/^$/d;
205
+            /^#/d;/^$/d;
206 206
             s/%SERVICECONF%/account-server\/${node_number}.conf/g;
207 207
         " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
208 208
     done
... ...
@@ -577,26 +577,26 @@ function start_swift() {
577 577
         return 0
578 578
     fi
579 579
 
580
-   # By default with only one replica we are launching the proxy,
581
-   # container, account and object server in screen in foreground and
582
-   # other services in background. If we have SWIFT_REPLICAS set to something
583
-   # greater than one we first spawn all the swift services then kill the proxy
584
-   # service so we can run it in foreground in screen.  ``swift-init ...
585
-   # {stop|restart}`` exits with '1' if no servers are running, ignore it just
586
-   # in case
587
-   swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
588
-   if [[ ${SWIFT_REPLICAS} == 1 ]]; then
580
+    # By default with only one replica we are launching the proxy,
581
+    # container, account and object server in screen in foreground and
582
+    # other services in background. If we have SWIFT_REPLICAS set to something
583
+    # greater than one we first spawn all the swift services then kill the proxy
584
+    # service so we can run it in foreground in screen.  ``swift-init ...
585
+    # {stop|restart}`` exits with '1' if no servers are running, ignore it just
586
+    # in case
587
+    swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
588
+    if [[ ${SWIFT_REPLICAS} == 1 ]]; then
589 589
         todo="object container account"
590
-   fi
591
-   for type in proxy ${todo}; do
592
-       swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
593
-   done
594
-   screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
595
-   if [[ ${SWIFT_REPLICAS} == 1 ]]; then
596
-       for type in object container account; do
597
-           screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
598
-       done
599
-   fi
590
+    fi
591
+    for type in proxy ${todo}; do
592
+        swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
593
+    done
594
+    screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
595
+    if [[ ${SWIFT_REPLICAS} == 1 ]]; then
596
+        for type in object container account; do
597
+            screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
598
+        done
599
+    fi
600 600
 }
601 601
 
602 602
 # stop_swift() - Stop running processes (non-screen)
... ...
@@ -193,7 +193,7 @@ function configure_tempest() {
193 193
             # If namespaces are disabled, devstack will create a single
194 194
             # public router that tempest should be configured to use.
195 195
             public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \
196
-               { print \$2 }")
196
+                { print \$2 }")
197 197
         fi
198 198
     fi
199 199
 
... ...
@@ -328,15 +328,15 @@ function init_tempest() {
328 328
     local disk_image="$image_dir/${base_image_name}-blank.img"
329 329
     # if the cirros uec downloaded and the system is uec capable
330 330
     if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a  "$VIRT_DRIVER" != "openvz" \
331
-         -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
332
-       echo "Prepare aki/ari/ami Images"
333
-       ( #new namespace
334
-           # tenant:demo ; user: demo
335
-           source $TOP_DIR/accrc/demo/demo
336
-           euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
337
-           euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
338
-           euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
339
-       ) 2>&1 </dev/null | cat
331
+        -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
332
+        echo "Prepare aki/ari/ami Images"
333
+        ( #new namespace
334
+            # tenant:demo ; user: demo
335
+            source $TOP_DIR/accrc/demo/demo
336
+            euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
337
+            euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
338
+            euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
339
+        ) 2>&1 </dev/null | cat
340 340
     else
341 341
         echo "Boto materials are not prepared"
342 342
     fi
... ...
@@ -45,14 +45,15 @@ create_trove_accounts() {
45 45
     SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
46 46
 
47 47
     if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
48
-        TROVE_USER=$(keystone user-create --name=trove \
49
-                                                  --pass="$SERVICE_PASSWORD" \
50
-                                                  --tenant_id $SERVICE_TENANT \
51
-                                                  --email=trove@example.com \
52
-                                                  | grep " id " | get_field 2)
48
+        TROVE_USER=$(keystone user-create \
49
+            --name=trove \
50
+            --pass="$SERVICE_PASSWORD" \
51
+            --tenant_id $SERVICE_TENANT \
52
+            --email=trove@example.com \
53
+            | grep " id " | get_field 2)
53 54
         keystone user-role-add --tenant-id $SERVICE_TENANT \
54
-                               --user-id $TROVE_USER \
55
-                               --role-id $SERVICE_ROLE
55
+            --user-id $TROVE_USER \
56
+            --role-id $SERVICE_ROLE
56 57
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
57 58
             TROVE_SERVICE=$(keystone service-create \
58 59
                 --name=trove \