Browse code

Replace screen_it() with run_process() throughout

run_process will use screen if USE_SCREEN=True (the default),
otherwise it will simply start the requested service. Therefore
wherever screen_it used, run_process can be instead.

Where stop_screen was found it has been replaced with stop_process.

A tail_log function has been added which will tail a logfile in a
screen if USE_SCREEN is True.

lib/template has been updated to reflect the use of the new
functions.

When using sg the quoting in run_process gets very complicated.
To get around this run_process and the functions it calls accepts
an optional third argument. If set it is a group to be used with sg.

Conflicts:
functions-common
lib/ceilometer
lib/keystone
lib/neutron
lib/sahara
lib/zaqar
stack.sh

Change-Id: Ia3843818014f7c6c7526ef3aa9676bbddb8a85ca
(cherry picked from commit 2f27a0ed3c609bfcd6344a55c121e56d5569afc9)

Chris Dent authored on 2014/09/09 21:46:02
Showing 20 changed files
... ...
@@ -984,10 +984,13 @@ function zypper_install {
984 984
 # files to produce the same logs as screen_it().  The log filename is derived
985 985
 # from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
986 986
 # Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
987
-# _old_run_process service "command-line"
987
+# If an optional group is provided sg will be used to set the group of
988
+# the command.
989
+# _run_process service "command-line" [group]
988 990
 function _run_process {
989 991
     local service=$1
990 992
     local command="$2"
993
+    local group=$3
991 994
 
992 995
     # Undo logging redirections and close the extra descriptors
993 996
     exec 1>&3
... ...
@@ -996,8 +999,8 @@ function _run_process {
996 996
     exec 6>&-
997 997
 
998 998
     if [[ -n ${SCREEN_LOGDIR} ]]; then
999
-        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
1000
-        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
999
+        exec 1>&${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log 2>&1
1000
+        ln -sf ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${service}.log
1001 1001
 
1002 1002
         # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
1003 1003
         export PYTHONUNBUFFERED=1
... ...
@@ -1005,7 +1008,11 @@ function _run_process {
1005 1005
 
1006 1006
     # Run under ``setsid`` to force the process to become a session and group leader.
1007 1007
     # The pid saved can be used with pkill -g to get the entire process group.
1008
-    setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$1.pid
1008
+    if [[ -n "$group" ]]; then
1009
+        setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
1010
+    else
1011
+        setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
1012
+    fi
1009 1013
 
1010 1014
     # Just silently exit this process
1011 1015
     exit 0
... ...
@@ -1037,17 +1044,20 @@ function is_running {
1037 1037
 
1038 1038
 # Run a single service under screen or directly
1039 1039
 # If the command includes shell metachatacters (;<>*) it must be run using a shell
1040
-# run_process service "command-line"
1040
+# If an optional group is provided sg will be used to run the
1041
+# command as that group.
1042
+# run_process service "command-line" [group]
1041 1043
 function run_process {
1042 1044
     local service=$1
1043 1045
     local command="$2"
1046
+    local group=$3
1044 1047
 
1045 1048
     if is_service_enabled $service; then
1046 1049
         if [[ "$USE_SCREEN" = "True" ]]; then
1047
-            screen_service "$service" "$command"
1050
+            screen_service "$service" "$command" "$group"
1048 1051
         else
1049 1052
             # Spawn directly without screen
1050
-            _run_process "$service" "$command" &
1053
+            _run_process "$service" "$command" "$group" &
1051 1054
         fi
1052 1055
     fi
1053 1056
 }
... ...
@@ -1055,11 +1065,13 @@ function run_process {
1055 1055
 # Helper to launch a service in a named screen
1056 1056
 # Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_NAME``, ``SCREEN_LOGDIR``,
1057 1057
 # ``SERVICE_DIR``, ``USE_SCREEN``
1058
-# screen_service service "command-line"
1059
-# Run a command in a shell in a screen window
1058
+# screen_service service "command-line" [group]
1059
+# Run a command in a shell in a screen window, if an optional group
1060
+# is provided, use sg to set the group of the command.
1060 1061
 function screen_service {
1061 1062
     local service=$1
1062 1063
     local command="$2"
1064
+    local group=$3
1063 1065
 
1064 1066
     SCREEN_NAME=${SCREEN_NAME:-stack}
1065 1067
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
... ...
@@ -1089,8 +1101,11 @@ function screen_service {
1089 1089
         # - the server process is brought back to the foreground
1090 1090
         # - if the server process exits prematurely the fg command errors
1091 1091
         #   and a message is written to stdout and the service failure file
1092
-        # The pid saved can be used in screen_stop() as a process group
1092
+        # The pid saved can be used in stop_process() as a process group
1093 1093
         # id to kill off all child processes
1094
+        if [[ -n "$group" ]]; then
1095
+            command="sg $group '$command'"
1096
+        fi
1094 1097
         screen -S $SCREEN_NAME -p $service -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${service}.pid; fg || echo \"$service failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${service}.failure\"$NL"
1095 1098
     fi
1096 1099
 }
... ...
@@ -1126,7 +1141,8 @@ function screen_rc {
1126 1126
 # If a PID is available use it, kill the whole process group via TERM
1127 1127
 # If screen is being used kill the screen window; this will catch processes
1128 1128
 # that did not leave a PID behind
1129
-# screen_stop service
1129
+# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
1130
+# screen_stop_service service
1130 1131
 function screen_stop_service {
1131 1132
     local service=$1
1132 1133
 
... ...
@@ -1194,6 +1210,17 @@ function service_check {
1194 1194
     fi
1195 1195
 }
1196 1196
 
1197
+# Tail a log file in a screen if USE_SCREEN is true.
1198
+function tail_log {
1199
+    local service=$1
1200
+    local logfile=$2
1201
+
1202
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
1203
+    if [[ "$USE_SCREEN" = "True" ]]; then
1204
+        screen_service "$service" "sudo tail -f $logfile"
1205
+    fi
1206
+}
1207
+
1197 1208
 
1198 1209
 # Deprecated Functions
1199 1210
 # --------------------
... ...
@@ -1532,6 +1559,7 @@ function is_service_enabled {
1532 1532
         #                are implemented
1533 1533
 
1534 1534
         [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
1535
+        [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
1535 1536
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
1536 1537
         [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
1537 1538
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
... ...
@@ -235,16 +235,19 @@ function install_ceilometerclient {
235 235
 
236 236
 # start_ceilometer() - Start running processes, including screen
237 237
 function start_ceilometer {
238
+    run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
239
+    run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF"
240
+    run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
241
+    run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
242
+
243
+    # Start the compute agent last to allow time for the collector to
244
+    # fully wake up and connect to the message bus. See bug #1355809
238 245
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
239
-        screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
246
+        run_process ceilometer-acompute "sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'"
240 247
     fi
241 248
     if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
242
-        screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF"
249
+        run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF"
243 250
     fi
244
-    screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
245
-    screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
246
-    screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
247
-    screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
248 251
 
249 252
     # only die on API if it was actually intended to be turned on
250 253
     if is_service_enabled ceilometer-api; then
... ...
@@ -254,15 +257,15 @@ function start_ceilometer {
254 254
         fi
255 255
     fi
256 256
 
257
-    screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
258
-    screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
257
+    run_process ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
258
+    run_process ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
259 259
 }
260 260
 
261 261
 # stop_ceilometer() - Stop running processes
262 262
 function stop_ceilometer {
263 263
     # Kill the ceilometer screen windows
264 264
     for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
265
-        screen_stop $serv
265
+        stop_process $serv
266 266
     done
267 267
 }
268 268
 
... ...
@@ -538,7 +538,7 @@ function start_cinder {
538 538
 function stop_cinder {
539 539
     # Kill the cinder screen windows
540 540
     for serv in c-api c-bak c-sch c-vol; do
541
-        screen_stop $serv
541
+        stop_process $serv
542 542
     done
543 543
 
544 544
     if is_service_enabled c-vol; then
... ...
@@ -77,14 +77,14 @@ function install_ganttclient {
77 77
 # start_gantt() - Start running processes, including screen
78 78
 function start_gantt {
79 79
     if is_service_enabled gantt; then
80
-        screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF"
80
+        run_process gantt "$GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF"
81 81
     fi
82 82
 }
83 83
 
84 84
 # stop_gantt() - Stop running processes
85 85
 function stop_gantt {
86 86
     echo "Stop Gantt"
87
-    screen_stop gantt
87
+    stop_process gantt
88 88
 }
89 89
 
90 90
 # Restore xtrace
... ...
@@ -246,8 +246,8 @@ function install_glance {
246 246
 
247 247
 # start_glance() - Start running processes, including screen
248 248
 function start_glance {
249
-    screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
250
-    screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
249
+    run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
250
+    run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
251 251
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
252 252
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
253 253
         die $LINENO "g-api did not start"
... ...
@@ -257,8 +257,8 @@ function start_glance {
257 257
 # stop_glance() - Stop running processes
258 258
 function stop_glance {
259 259
     # Kill the Glance screen windows
260
-    screen_stop g-api
261
-    screen_stop g-reg
260
+    stop_process g-api
261
+    stop_process g-reg
262 262
 }
263 263
 
264 264
 
... ...
@@ -177,17 +177,17 @@ function install_heat {
177 177
 
178 178
 # start_heat() - Start running processes, including screen
179 179
 function start_heat {
180
-    screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF"
181
-    screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF"
182
-    screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF"
183
-    screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
180
+    run_process h-eng "$HEAT_DIR/bin/heat-engine --config-file=$HEAT_CONF"
181
+    run_process h-api "$HEAT_DIR/bin/heat-api --config-file=$HEAT_CONF"
182
+    run_process h-api-cfn "$HEAT_DIR/bin/heat-api-cfn --config-file=$HEAT_CONF"
183
+    run_process h-api-cw "$HEAT_DIR/bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
184 184
 }
185 185
 
186 186
 # stop_heat() - Stop running processes
187 187
 function stop_heat {
188 188
     # Kill the screen windows
189 189
     for serv in h-eng h-api h-api-cfn h-api-cw; do
190
-        screen_stop $serv
190
+        stop_process $serv
191 191
     done
192 192
 }
193 193
 
... ...
@@ -128,6 +128,7 @@ function init_horizon {
128 128
 
129 129
     # Remove old log files that could mess with how devstack detects whether Horizon
130 130
     # has been successfully started (see start_horizon() and functions::screen_it())
131
+    # and run_process
131 132
     sudo rm -f /var/log/$APACHE_NAME/horizon_*
132 133
 
133 134
     # Configure apache to run horizon
... ...
@@ -158,7 +159,7 @@ function install_horizon {
158 158
 # start_horizon() - Start running processes, including screen
159 159
 function start_horizon {
160 160
     restart_apache_server
161
-    screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
161
+    tail_log horizon /var/log/$APACHE_NAME/horizon_error.log
162 162
 }
163 163
 
164 164
 # stop_horizon() - Stop running processes (non-screen)
... ...
@@ -266,7 +266,7 @@ function start_ironic {
266 266
 # start_ironic_api() - Used by start_ironic().
267 267
 # Starts Ironic API server.
268 268
 function start_ironic_api {
269
-    screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
269
+    run_process ir-api "$IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
270 270
     echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
271 271
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then
272 272
         die $LINENO "ir-api did not start"
... ...
@@ -276,7 +276,7 @@ function start_ironic_api {
276 276
 # start_ironic_conductor() - Used by start_ironic().
277 277
 # Starts Ironic conductor.
278 278
 function start_ironic_conductor {
279
-    screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
279
+    run_process ir-cond "$IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
280 280
     # TODO(romcheg): Find a way to check whether the conductor has started.
281 281
 }
282 282
 
... ...
@@ -428,10 +428,10 @@ function start_keystone {
428 428
 
429 429
     if is_apache_enabled_service key; then
430 430
         restart_apache_server
431
-        screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone"
431
+        tail_log key /var/log/$APACHE_NAME/keystone
432 432
     else
433 433
         # Start Keystone in a screen window
434
-        screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
434
+        run_process key "$KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
435 435
     fi
436 436
 
437 437
     echo "Waiting for keystone to start..."
... ...
@@ -449,7 +449,9 @@ function start_keystone {
449 449
 # stop_keystone() - Stop running processes
450 450
 function stop_keystone {
451 451
     # Kill the Keystone screen window
452
-    screen_stop key
452
+    stop_process key
453
+    # Cleanup the WSGI files and VHOST
454
+    _cleanup_keystone_apache_wsgi
453 455
 }
454 456
 
455 457
 function is_keystone_enabled {
... ...
@@ -476,7 +476,7 @@ function start_neutron_service_and_check {
476 476
         CFG_FILE_OPTIONS+=" --config-file /$cfg_file"
477 477
     done
478 478
     # Start the Neutron service
479
-    screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS"
479
+    run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS"
480 480
     echo "Waiting for Neutron to start..."
481 481
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
482 482
         die $LINENO "Neutron did not start"
... ...
@@ -486,8 +486,8 @@ function start_neutron_service_and_check {
486 486
 # Start running processes, including screen
487 487
 function start_neutron_agents {
488 488
     # Start up the neutron agents if enabled
489
-    screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
490
-    screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
489
+    run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
490
+    run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
491 491
 
492 492
     L3_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
493 493
 
... ...
@@ -495,24 +495,24 @@ function start_neutron_agents {
495 495
         L3_CONF_FILES="$L3_CONF_FILES --config-file $Q_FWAAS_CONF_FILE"
496 496
     fi
497 497
     if is_service_enabled q-vpn; then
498
-        screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $L3_CONF_FILES"
498
+        run_process q-vpn "$AGENT_VPN_BINARY $L3_CONF_FILES"
499 499
     else
500
-        screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $L3_CONF_FILES"
500
+        run_process q-l3 "python $AGENT_L3_BINARY $L3_CONF_FILES"
501 501
     fi
502 502
 
503
-    screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
503
+    run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
504 504
 
505 505
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
506 506
         # For XenServer, start an agent for the domU openvswitch
507
-        screen_it q-domua "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
507
+        run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
508 508
     fi
509 509
 
510 510
     if is_service_enabled q-lbaas; then
511
-        screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
511
+        run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
512 512
     fi
513 513
 
514 514
     if is_service_enabled q-metering; then
515
-        screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
515
+        run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
516 516
     fi
517 517
 }
518 518
 
... ...
@@ -28,12 +28,14 @@ functions to be implemented
28 28
   git clone xxx
29 29
 
30 30
 * ``start_<third_party>``:
31
-  start running processes, including screen
31
+  start running processes, including screen if USE_SCREEN=True
32 32
   e.g.
33
-  screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin"
33
+  run_process XXXX "$XXXX_DIR/bin/XXXX-bin"
34 34
 
35 35
 * ``stop_<third_party>``:
36 36
   stop running processes (non-screen)
37
+  e.g.
38
+  stop_process XXXX
37 39
 
38 40
 * ``check_<third_party>``:
39 41
   verify that the integration between neutron server and third-party components is sane
... ...
@@ -63,7 +63,7 @@ function install_ryu {
63 63
 }
64 64
 
65 65
 function start_ryu {
66
-    screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
66
+    run_process ryu "$RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
67 67
 }
68 68
 
69 69
 function stop_ryu {
... ...
@@ -39,6 +39,7 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova}
39 39
 NOVA_CONF_DIR=/etc/nova
40 40
 NOVA_CONF=$NOVA_CONF_DIR/nova.conf
41 41
 NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
42
+NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
42 43
 NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
43 44
 
44 45
 NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
... ...
@@ -697,7 +698,7 @@ function start_nova_api {
697 697
         service_port=$NOVA_SERVICE_PORT_INT
698 698
     fi
699 699
 
700
-    screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
700
+    run_process n-api "$NOVA_BIN_DIR/nova-api"
701 701
     echo "Waiting for nova-api to start..."
702 702
     if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
703 703
         die $LINENO "nova-api did not start"
... ...
@@ -719,17 +720,23 @@ function start_nova_compute {
719 719
 
720 720
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
721 721
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
722
-        # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
723
-        screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
722
+        # sg' will be used in run_process to execute nova-compute as a member of the
723
+        # **$LIBVIRT_GROUP** group.
724
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP
724 725
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
725 726
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
726
-            screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')"
727
+            # Avoid process redirection of fake host configurations by
728
+            # creating or modifying real configurations. Each fake
729
+            # gets its own configuration and own log file.
730
+            local fake_conf="${NOVA_FAKE_CONF}-${i}"
731
+            iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}"
732
+            run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf"
727 733
         done
728 734
     else
729 735
         if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
730 736
             start_nova_hypervisor
731 737
         fi
732
-        screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
738
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
733 739
     fi
734 740
 }
735 741
 
... ...
@@ -742,25 +749,25 @@ function start_nova_rest {
742 742
         local compute_cell_conf=$NOVA_CONF
743 743
     fi
744 744
 
745
-    # ``screen_it`` checks ``is_service_enabled``, it is not needed here
746
-    screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
747
-    screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
748
-    screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
745
+    # ``run_process`` checks ``is_service_enabled``, it is not needed here
746
+    run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
747
+    run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
748
+    run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
749 749
 
750
-    screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
751
-    screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
752
-    screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
753
-    screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
750
+    run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
751
+    run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
752
+    run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
753
+    run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
754 754
 
755
-    screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
756
-    screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
757
-    screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
758
-    screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
755
+    run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
756
+    run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
757
+    run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
758
+    run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
759 759
 
760 760
     # Starting the nova-objectstore only if swift3 service is not enabled.
761 761
     # Swift will act as s3 objectstore.
762 762
     is_service_enabled swift3 || \
763
-        screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
763
+        run_process n-obj "$NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
764 764
 }
765 765
 
766 766
 function start_nova {
... ...
@@ -769,7 +776,7 @@ function start_nova {
769 769
 }
770 770
 
771 771
 function stop_nova_compute {
772
-    screen_stop n-cpu
772
+    stop_process n-cpu
773 773
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
774 774
         stop_nova_hypervisor
775 775
     fi
... ...
@@ -780,7 +787,7 @@ function stop_nova_rest {
780 780
     # Some services are listed here twice since more than one instance
781 781
     # of a service may be running in certain configs.
782 782
     for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
783
-        screen_stop $serv
783
+        stop_process $serv
784 784
     done
785 785
 }
786 786
 
... ...
@@ -148,6 +148,8 @@ function start_opendaylight {
148 148
     # The flags to ODL have the following meaning:
149 149
     #   -of13: runs ODL using OpenFlow 1.3 protocol support.
150 150
     #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
151
+    # NOTE(chdent): Leaving this as screen_it instead of run_process until
152
+    # the right thing for this service is determined.
151 153
     screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
152 154
 
153 155
     # Sleep a bit to let OpenDaylight finish starting up
... ...
@@ -156,7 +158,7 @@ function start_opendaylight {
156 156
 
157 157
 # stop_opendaylight() - Stop running processes (non-screen)
158 158
 function stop_opendaylight {
159
-    screen_stop odl-server
159
+    stop_process odl-server
160 160
 }
161 161
 
162 162
 # stop_opendaylight-compute() - Remove OVS bridges
... ...
@@ -159,7 +159,7 @@ function install_sahara {
159 159
 
160 160
 # start_sahara() - Start running processes, including screen
161 161
 function start_sahara {
162
-    screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
162
+    run_process sahara "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
163 163
 }
164 164
 
165 165
 # stop_sahara() - Stop running processes
... ...
@@ -646,10 +646,10 @@ function start_swift {
646 646
     if is_apache_enabled_service swift; then
647 647
         restart_apache_server
648 648
         swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
649
-        screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server"
649
+        tail_log s-proxy /var/log/$APACHE_NAME/proxy-server
650 650
         if [[ ${SWIFT_REPLICAS} == 1 ]]; then
651 651
             for type in object container account; do
652
-                screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1"
652
+                tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1
653 653
             done
654 654
         fi
655 655
         return 0
... ...
@@ -669,10 +669,10 @@ function start_swift {
669 669
     for type in proxy ${todo}; do
670 670
         swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
671 671
     done
672
-    screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
672
+    run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
673 673
     if [[ ${SWIFT_REPLICAS} == 1 ]]; then
674 674
         for type in object container account; do
675
-            screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
675
+            run_process s-${type} "$SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
676 676
         done
677 677
     fi
678 678
 }
... ...
@@ -689,9 +689,9 @@ function stop_swift {
689 689
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
690 690
     fi
691 691
     # Dump all of the servers
692
-    # Maintain the iteration as screen_stop() has some desirable side-effects
692
+    # Maintain the iteration as stop_process() has some desirable side-effects
693 693
     for type in proxy object container account; do
694
-        screen_stop s-${type}
694
+        stop_process s-${type}
695 695
     done
696 696
     # Blast out any stragglers
697 697
     pkill -f swift-
... ...
@@ -75,13 +75,17 @@ function install_XXXX {
75 75
 
76 76
 # start_XXXX() - Start running processes, including screen
77 77
 function start_XXXX {
78
-    # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin"
78
+    # The quoted command must be a single command and not include an
79
+    # shell metacharacters, redirections or shell builtins.
80
+    # run_process XXXX "$XXXX_DIR/bin/XXXX-bin"
79 81
     :
80 82
 }
81 83
 
82 84
 # stop_XXXX() - Stop running processes (non-screen)
83 85
 function stop_XXXX {
84
-    # FIXME(dtroyer): stop only our screen screen window?
86
+    # for serv in serv-a serv-b; do
87
+    #     stop_process $serv
88
+    # done
85 89
     :
86 90
 }
87 91
 
... ...
@@ -208,16 +208,16 @@ function init_trove {
208 208
 
209 209
 # start_trove() - Start running processes, including screen
210 210
 function start_trove {
211
-    screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1"
212
-    screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1"
213
-    screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1"
211
+    run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug"
212
+    run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug"
213
+    run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug"
214 214
 }
215 215
 
216 216
 # stop_trove() - Stop running processes
217 217
 function stop_trove {
218 218
     # Kill the trove screen windows
219 219
     for serv in tr-api tr-tmgr tr-cond; do
220
-        screen_stop $serv
220
+        stop_process $serv
221 221
     done
222 222
 }
223 223
 
... ...
@@ -1159,7 +1159,7 @@ fi
1159 1159
 
1160 1160
 if is_service_enabled zeromq; then
1161 1161
     echo_summary "Starting zermomq receiver"
1162
-    screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
1162
+    run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
1163 1163
 fi
1164 1164
 
1165 1165
 # Launch the nova-api and wait for it to answer before continuing
... ...
@@ -1262,7 +1262,7 @@ if is_service_enabled nova && is_baremetal; then
1262 1262
     fi
1263 1263
     # ensure callback daemon is running
1264 1264
     sudo pkill nova-baremetal-deploy-helper || true
1265
-    screen_it baremetal "cd ; nova-baremetal-deploy-helper"
1265
+    run_process baremetal "nova-baremetal-deploy-helper"
1266 1266
 fi
1267 1267
 
1268 1268
 # Save some values we generated for later use
... ...
@@ -1,9 +1,9 @@
1 1
 #!/bin/bash
2
-# tests/exec.sh - Test DevStack screen_it() and screen_stop()
2
+# tests/exec.sh - Test DevStack run_process() and stop_process()
3 3
 #
4 4
 # exec.sh start|stop|status
5 5
 #
6
-# Set USE_SCREEN to change the default
6
+# Set USE_SCREEN True|False to change use of screen.
7 7
 #
8 8
 # This script emulates the basic exec envirnment in ``stack.sh`` to test
9 9
 # the process spawn and kill operations.
... ...
@@ -94,12 +94,12 @@ fi
94 94
 if [[ "$1" == "start" ]]; then
95 95
     echo "Start service"
96 96
     setup_screen
97
-    screen_it fake-service "$TOP_DIR/tests/fake-service.sh"
97
+    run_process fake-service "$TOP_DIR/tests/fake-service.sh"
98 98
     sleep 1
99 99
     status
100 100
 elif [[ "$1" == "stop" ]]; then
101 101
     echo "Stop service"
102
-    screen_stop fake-service
102
+    stop_process fake-service
103 103
     status
104 104
 elif [[ "$1" == "status" ]]; then
105 105
     status