run_process will use screen if USE_SCREEN=True (the default),
otherwise it will simply start the requested service. Therefore
wherever screen_it used, run_process can be instead.
Where stop_screen was found it has been replaced with stop_process.
A tail_log function has been added which will tail a logfile in a
screen if USE_SCREEN is True.
lib/template has been updated to reflect the use of the new
functions.
When using sg the quoting in run_process gets very complicated.
To get around this run_process and the functions it calls accepts
an optional third argument. If set it is a group to be used with sg.
Change-Id: Ia3843818014f7c6c7526ef3aa9676bbddb8a85ca
... | ... |
@@ -1136,10 +1136,13 @@ function zypper_install { |
1136 | 1136 |
# files to produce the same logs as screen_it(). The log filename is derived |
1137 | 1137 |
# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR`` |
1138 | 1138 |
# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR`` |
1139 |
-# _old_run_process service "command-line" |
|
1139 |
+# If an optional group is provided sg will be used to set the group of |
|
1140 |
+# the command. |
|
1141 |
+# _run_process service "command-line" [group] |
|
1140 | 1142 |
function _run_process { |
1141 | 1143 |
local service=$1 |
1142 | 1144 |
local command="$2" |
1145 |
+ local group=$3 |
|
1143 | 1146 |
|
1144 | 1147 |
# Undo logging redirections and close the extra descriptors |
1145 | 1148 |
exec 1>&3 |
... | ... |
@@ -1148,8 +1151,8 @@ function _run_process { |
1148 | 1148 |
exec 6>&- |
1149 | 1149 |
|
1150 | 1150 |
if [[ -n ${SCREEN_LOGDIR} ]]; then |
1151 |
- exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 |
|
1152 |
- ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log |
|
1151 |
+ exec 1>&${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log 2>&1 |
|
1152 |
+ ln -sf ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${service}.log |
|
1153 | 1153 |
|
1154 | 1154 |
# TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. |
1155 | 1155 |
export PYTHONUNBUFFERED=1 |
... | ... |
@@ -1157,7 +1160,11 @@ function _run_process { |
1157 | 1157 |
|
1158 | 1158 |
# Run under ``setsid`` to force the process to become a session and group leader. |
1159 | 1159 |
# The pid saved can be used with pkill -g to get the entire process group. |
1160 |
- setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$1.pid |
|
1160 |
+ if [[ -n "$group" ]]; then |
|
1161 |
+ setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid |
|
1162 |
+ else |
|
1163 |
+ setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid |
|
1164 |
+ fi |
|
1161 | 1165 |
|
1162 | 1166 |
# Just silently exit this process |
1163 | 1167 |
exit 0 |
... | ... |
@@ -1190,17 +1197,20 @@ function is_running { |
1190 | 1190 |
|
1191 | 1191 |
# Run a single service under screen or directly |
1192 | 1192 |
# If the command includes shell metachatacters (;<>*) it must be run using a shell |
1193 |
-# run_process service "command-line" |
|
1193 |
+# If an optional group is provided sg will be used to run the |
|
1194 |
+# command as that group. |
|
1195 |
+# run_process service "command-line" [group] |
|
1194 | 1196 |
function run_process { |
1195 | 1197 |
local service=$1 |
1196 | 1198 |
local command="$2" |
1199 |
+ local group=$3 |
|
1197 | 1200 |
|
1198 | 1201 |
if is_service_enabled $service; then |
1199 | 1202 |
if [[ "$USE_SCREEN" = "True" ]]; then |
1200 |
- screen_service "$service" "$command" |
|
1203 |
+ screen_service "$service" "$command" "$group" |
|
1201 | 1204 |
else |
1202 | 1205 |
# Spawn directly without screen |
1203 |
- _run_process "$service" "$command" & |
|
1206 |
+ _run_process "$service" "$command" "$group" & |
|
1204 | 1207 |
fi |
1205 | 1208 |
fi |
1206 | 1209 |
} |
... | ... |
@@ -1208,11 +1218,13 @@ function run_process { |
1208 | 1208 |
# Helper to launch a service in a named screen |
1209 | 1209 |
# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_NAME``, ``SCREEN_LOGDIR``, |
1210 | 1210 |
# ``SERVICE_DIR``, ``USE_SCREEN`` |
1211 |
-# screen_service service "command-line" |
|
1212 |
-# Run a command in a shell in a screen window |
|
1211 |
+# screen_service service "command-line" [group] |
|
1212 |
+# Run a command in a shell in a screen window, if an optional group |
|
1213 |
+# is provided, use sg to set the group of the command. |
|
1213 | 1214 |
function screen_service { |
1214 | 1215 |
local service=$1 |
1215 | 1216 |
local command="$2" |
1217 |
+ local group=$3 |
|
1216 | 1218 |
|
1217 | 1219 |
SCREEN_NAME=${SCREEN_NAME:-stack} |
1218 | 1220 |
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} |
... | ... |
@@ -1242,8 +1254,11 @@ function screen_service { |
1242 | 1242 |
# - the server process is brought back to the foreground |
1243 | 1243 |
# - if the server process exits prematurely the fg command errors |
1244 | 1244 |
# and a message is written to stdout and the service failure file |
1245 |
- # The pid saved can be used in screen_stop() as a process group |
|
1245 |
+ # The pid saved can be used in stop_process() as a process group |
|
1246 | 1246 |
# id to kill off all child processes |
1247 |
+ if [[ -n "$group" ]]; then |
|
1248 |
+ command="sg $group '$command'" |
|
1249 |
+ fi |
|
1247 | 1250 |
screen -S $SCREEN_NAME -p $service -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${service}.pid; fg || echo \"$service failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${service}.failure\"$NL" |
1248 | 1251 |
fi |
1249 | 1252 |
} |
... | ... |
@@ -1281,7 +1296,7 @@ function screen_rc { |
1281 | 1281 |
# If screen is being used kill the screen window; this will catch processes |
1282 | 1282 |
# that did not leave a PID behind |
1283 | 1283 |
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN`` |
1284 |
-# screen_stop service |
|
1284 |
+# screen_stop_service service |
|
1285 | 1285 |
function screen_stop_service { |
1286 | 1286 |
local service=$1 |
1287 | 1287 |
|
... | ... |
@@ -1350,6 +1365,17 @@ function service_check { |
1350 | 1350 |
fi |
1351 | 1351 |
} |
1352 | 1352 |
|
1353 |
+# Tail a log file in a screen if USE_SCREEN is true. |
|
1354 |
+function tail_log { |
|
1355 |
+ local service=$1 |
|
1356 |
+ local logfile=$2 |
|
1357 |
+ |
|
1358 |
+ USE_SCREEN=$(trueorfalse True $USE_SCREEN) |
|
1359 |
+ if [[ "$USE_SCREEN" = "True" ]]; then |
|
1360 |
+ screen_service "$service" "sudo tail -f $logfile" |
|
1361 |
+ fi |
|
1362 |
+} |
|
1363 |
+ |
|
1353 | 1364 |
|
1354 | 1365 |
# Deprecated Functions |
1355 | 1366 |
# -------------------- |
... | ... |
@@ -1707,6 +1733,7 @@ function is_service_enabled { |
1707 | 1707 |
# are implemented |
1708 | 1708 |
|
1709 | 1709 |
[[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0 |
1710 |
+ [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0 |
|
1710 | 1711 |
[[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 |
1711 | 1712 |
[[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0 |
1712 | 1713 |
[[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0 |
... | ... |
@@ -224,18 +224,18 @@ function install_ceilometerclient { |
224 | 224 |
|
225 | 225 |
# start_ceilometer() - Start running processes, including screen |
226 | 226 |
function start_ceilometer { |
227 |
- screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" |
|
228 |
- screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF" |
|
229 |
- screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" |
|
230 |
- screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" |
|
227 |
+ run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" |
|
228 |
+ run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF" |
|
229 |
+ run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" |
|
230 |
+ run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" |
|
231 | 231 |
|
232 | 232 |
# Start the compute agent last to allow time for the collector to |
233 | 233 |
# fully wake up and connect to the message bus. See bug #1355809 |
234 | 234 |
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then |
235 |
- screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'" |
|
235 |
+ run_process ceilometer-acompute "sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'" |
|
236 | 236 |
fi |
237 | 237 |
if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then |
238 |
- screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF" |
|
238 |
+ run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" |
|
239 | 239 |
fi |
240 | 240 |
|
241 | 241 |
# only die on API if it was actually intended to be turned on |
... | ... |
@@ -246,15 +246,15 @@ function start_ceilometer { |
246 | 246 |
fi |
247 | 247 |
fi |
248 | 248 |
|
249 |
- screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" |
|
250 |
- screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" |
|
249 |
+ run_process ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" |
|
250 |
+ run_process ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" |
|
251 | 251 |
} |
252 | 252 |
|
253 | 253 |
# stop_ceilometer() - Stop running processes |
254 | 254 |
function stop_ceilometer { |
255 | 255 |
# Kill the ceilometer screen windows |
256 | 256 |
for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do |
257 |
- screen_stop $serv |
|
257 |
+ stop_process $serv |
|
258 | 258 |
done |
259 | 259 |
} |
260 | 260 |
|
... | ... |
@@ -77,14 +77,14 @@ function install_ganttclient { |
77 | 77 |
# start_gantt() - Start running processes, including screen |
78 | 78 |
function start_gantt { |
79 | 79 |
if is_service_enabled gantt; then |
80 |
- screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" |
|
80 |
+ run_process gantt "$GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" |
|
81 | 81 |
fi |
82 | 82 |
} |
83 | 83 |
|
84 | 84 |
# stop_gantt() - Stop running processes |
85 | 85 |
function stop_gantt { |
86 | 86 |
echo "Stop Gantt" |
87 |
- screen_stop gantt |
|
87 |
+ stop_process gantt |
|
88 | 88 |
} |
89 | 89 |
|
90 | 90 |
# Restore xtrace |
... | ... |
@@ -269,8 +269,8 @@ function install_glance { |
269 | 269 |
|
270 | 270 |
# start_glance() - Start running processes, including screen |
271 | 271 |
function start_glance { |
272 |
- screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" |
|
273 |
- screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" |
|
272 |
+ run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" |
|
273 |
+ run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" |
|
274 | 274 |
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." |
275 | 275 |
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then |
276 | 276 |
die $LINENO "g-api did not start" |
... | ... |
@@ -280,8 +280,8 @@ function start_glance { |
280 | 280 |
# stop_glance() - Stop running processes |
281 | 281 |
function stop_glance { |
282 | 282 |
# Kill the Glance screen windows |
283 |
- screen_stop g-api |
|
284 |
- screen_stop g-reg |
|
283 |
+ stop_process g-api |
|
284 |
+ stop_process g-reg |
|
285 | 285 |
} |
286 | 286 |
|
287 | 287 |
|
... | ... |
@@ -189,10 +189,10 @@ function install_heat_other { |
189 | 189 |
|
190 | 190 |
# start_heat() - Start running processes, including screen |
191 | 191 |
function start_heat { |
192 |
- screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF" |
|
193 |
- screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF" |
|
194 |
- screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF" |
|
195 |
- screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF" |
|
192 |
+ run_process h-eng "$HEAT_DIR/bin/heat-engine --config-file=$HEAT_CONF" |
|
193 |
+ run_process h-api "$HEAT_DIR/bin/heat-api --config-file=$HEAT_CONF" |
|
194 |
+ run_process h-api-cfn "$HEAT_DIR/bin/heat-api-cfn --config-file=$HEAT_CONF" |
|
195 |
+ run_process h-api-cw "$HEAT_DIR/bin/heat-api-cloudwatch --config-file=$HEAT_CONF" |
|
196 | 196 |
} |
197 | 197 |
|
198 | 198 |
# stop_heat() - Stop running processes |
... | ... |
@@ -200,7 +200,7 @@ function stop_heat { |
200 | 200 |
# Kill the screen windows |
201 | 201 |
local serv |
202 | 202 |
for serv in h-eng h-api h-api-cfn h-api-cw; do |
203 |
- screen_stop $serv |
|
203 |
+ stop_process $serv |
|
204 | 204 |
done |
205 | 205 |
} |
206 | 206 |
|
... | ... |
@@ -152,6 +152,7 @@ function init_horizon { |
152 | 152 |
|
153 | 153 |
# Remove old log files that could mess with how devstack detects whether Horizon |
154 | 154 |
# has been successfully started (see start_horizon() and functions::screen_it()) |
155 |
+ # and run_process |
|
155 | 156 |
sudo rm -f /var/log/$APACHE_NAME/horizon_* |
156 | 157 |
|
157 | 158 |
} |
... | ... |
@@ -173,7 +174,7 @@ function install_horizon { |
173 | 173 |
# start_horizon() - Start running processes, including screen |
174 | 174 |
function start_horizon { |
175 | 175 |
restart_apache_server |
176 |
- screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" |
|
176 |
+ tail_log horizon /var/log/$APACHE_NAME/horizon_error.log |
|
177 | 177 |
} |
178 | 178 |
|
179 | 179 |
# stop_horizon() - Stop running processes (non-screen) |
... | ... |
@@ -381,7 +381,7 @@ function start_ironic { |
381 | 381 |
# start_ironic_api() - Used by start_ironic(). |
382 | 382 |
# Starts Ironic API server. |
383 | 383 |
function start_ironic_api { |
384 |
- screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" |
|
384 |
+ run_process ir-api "$IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" |
|
385 | 385 |
echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." |
386 | 386 |
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then |
387 | 387 |
die $LINENO "ir-api did not start" |
... | ... |
@@ -391,7 +391,7 @@ function start_ironic_api { |
391 | 391 |
# start_ironic_conductor() - Used by start_ironic(). |
392 | 392 |
# Starts Ironic conductor. |
393 | 393 |
function start_ironic_conductor { |
394 |
- screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" |
|
394 |
+ run_process ir-cond "$IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" |
|
395 | 395 |
# TODO(romcheg): Find a way to check whether the conductor has started. |
396 | 396 |
} |
397 | 397 |
|
... | ... |
@@ -474,11 +474,11 @@ function start_keystone { |
474 | 474 |
|
475 | 475 |
if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then |
476 | 476 |
restart_apache_server |
477 |
- screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone.log" |
|
478 |
- screen_it key-access "sudo tail -f /var/log/$APACHE_NAME/keystone_access.log" |
|
477 |
+ tail_log key /var/log/$APACHE_NAME/keystone.log |
|
478 |
+ tail_log key-access /var/log/$APACHE_NAME/keystone_access.log |
|
479 | 479 |
else |
480 | 480 |
# Start Keystone in a screen window |
481 |
- screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug" |
|
481 |
+ run_process key "$KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug" |
|
482 | 482 |
fi |
483 | 483 |
|
484 | 484 |
echo "Waiting for keystone to start..." |
... | ... |
@@ -499,7 +499,7 @@ function start_keystone { |
499 | 499 |
# stop_keystone() - Stop running processes |
500 | 500 |
function stop_keystone { |
501 | 501 |
# Kill the Keystone screen window |
502 |
- screen_stop key |
|
502 |
+ stop_process key |
|
503 | 503 |
# Cleanup the WSGI files and VHOST |
504 | 504 |
_cleanup_keystone_apache_wsgi |
505 | 505 |
} |
... | ... |
@@ -591,7 +591,7 @@ function install_neutron_agent_packages { |
591 | 591 |
function start_neutron_service_and_check { |
592 | 592 |
local cfg_file_options="$(determine_config_files neutron-server)" |
593 | 593 |
# Start the Neutron service |
594 |
- screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options" |
|
594 |
+ run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options" |
|
595 | 595 |
echo "Waiting for Neutron to start..." |
596 | 596 |
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then |
597 | 597 |
die $LINENO "Neutron did not start" |
... | ... |
@@ -601,8 +601,8 @@ function start_neutron_service_and_check { |
601 | 601 |
# Start running processes, including screen |
602 | 602 |
function start_neutron_agents { |
603 | 603 |
# Start up the neutron agents if enabled |
604 |
- screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" |
|
605 |
- screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" |
|
604 |
+ run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" |
|
605 |
+ run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" |
|
606 | 606 |
|
607 | 607 |
if is_provider_network; then |
608 | 608 |
sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE |
... | ... |
@@ -612,24 +612,24 @@ function start_neutron_agents { |
612 | 612 |
fi |
613 | 613 |
|
614 | 614 |
if is_service_enabled q-vpn; then |
615 |
- screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)" |
|
615 |
+ run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)" |
|
616 | 616 |
else |
617 |
- screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" |
|
617 |
+ run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" |
|
618 | 618 |
fi |
619 | 619 |
|
620 |
- screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" |
|
620 |
+ run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" |
|
621 | 621 |
|
622 | 622 |
if [ "$VIRT_DRIVER" = 'xenserver' ]; then |
623 | 623 |
# For XenServer, start an agent for the domU openvswitch |
624 |
- screen_it q-domua "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" |
|
624 |
+ run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" |
|
625 | 625 |
fi |
626 | 626 |
|
627 | 627 |
if is_service_enabled q-lbaas; then |
628 |
- screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" |
|
628 |
+ run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" |
|
629 | 629 |
fi |
630 | 630 |
|
631 | 631 |
if is_service_enabled q-metering; then |
632 |
- screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" |
|
632 |
+ run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" |
|
633 | 633 |
fi |
634 | 634 |
} |
635 | 635 |
|
... | ... |
@@ -28,12 +28,14 @@ functions to be implemented |
28 | 28 |
git clone xxx |
29 | 29 |
|
30 | 30 |
* ``start_<third_party>``: |
31 |
- start running processes, including screen |
|
31 |
+ start running processes, including screen if USE_SCREEN=True |
|
32 | 32 |
e.g. |
33 |
- screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin" |
|
33 |
+ run_process XXXX "$XXXX_DIR/bin/XXXX-bin" |
|
34 | 34 |
|
35 | 35 |
* ``stop_<third_party>``: |
36 | 36 |
stop running processes (non-screen) |
37 |
+ e.g. |
|
38 |
+ stop_process XXXX |
|
37 | 39 |
|
38 | 40 |
* ``check_<third_party>``: |
39 | 41 |
verify that the integration between neutron server and third-party components is sane |
... | ... |
@@ -39,6 +39,7 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} |
39 | 39 |
NOVA_CONF_DIR=/etc/nova |
40 | 40 |
NOVA_CONF=$NOVA_CONF_DIR/nova.conf |
41 | 41 |
NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf |
42 |
+NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf |
|
42 | 43 |
NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} |
43 | 44 |
|
44 | 45 |
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} |
... | ... |
@@ -648,7 +649,7 @@ function start_nova_api { |
648 | 648 |
service_port=$NOVA_SERVICE_PORT_INT |
649 | 649 |
fi |
650 | 650 |
|
651 |
- screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" |
|
651 |
+ run_process n-api "$NOVA_BIN_DIR/nova-api" |
|
652 | 652 |
echo "Waiting for nova-api to start..." |
653 | 653 |
if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then |
654 | 654 |
die $LINENO "nova-api did not start" |
... | ... |
@@ -670,18 +671,24 @@ function start_nova_compute { |
670 | 670 |
|
671 | 671 |
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then |
672 | 672 |
# The group **$LIBVIRT_GROUP** is added to the current user in this script. |
673 |
- # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. |
|
674 |
- screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" |
|
673 |
+ # sg' will be used in run_process to execute nova-compute as a member of the |
|
674 |
+ # **$LIBVIRT_GROUP** group. |
|
675 |
+ run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP |
|
675 | 676 |
elif [[ "$VIRT_DRIVER" = 'fake' ]]; then |
676 | 677 |
local i |
677 | 678 |
for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do |
678 |
- screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')" |
|
679 |
+ # Avoid process redirection of fake host configurations by |
|
680 |
+ # creating or modifying real configurations. Each fake |
|
681 |
+ # gets its own configuration and own log file. |
|
682 |
+ local fake_conf="${NOVA_FAKE_CONF}-${i}" |
|
683 |
+ iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}" |
|
684 |
+ run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf" |
|
679 | 685 |
done |
680 | 686 |
else |
681 | 687 |
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then |
682 | 688 |
start_nova_hypervisor |
683 | 689 |
fi |
684 |
- screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" |
|
690 |
+ run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" |
|
685 | 691 |
fi |
686 | 692 |
} |
687 | 693 |
|
... | ... |
@@ -694,25 +701,25 @@ function start_nova_rest { |
694 | 694 |
local compute_cell_conf=$NOVA_CONF |
695 | 695 |
fi |
696 | 696 |
|
697 |
- # ``screen_it`` checks ``is_service_enabled``, it is not needed here |
|
698 |
- screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" |
|
699 |
- screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" |
|
700 |
- screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" |
|
697 |
+ # ``run_process`` checks ``is_service_enabled``, it is not needed here |
|
698 |
+ run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" |
|
699 |
+ run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" |
|
700 |
+ run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" |
|
701 | 701 |
|
702 |
- screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" |
|
703 |
- screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" |
|
704 |
- screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" |
|
705 |
- screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" |
|
702 |
+ run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" |
|
703 |
+ run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" |
|
704 |
+ run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" |
|
705 |
+ run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" |
|
706 | 706 |
|
707 |
- screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" |
|
708 |
- screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" |
|
709 |
- screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" |
|
710 |
- screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" |
|
707 |
+ run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" |
|
708 |
+ run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" |
|
709 |
+ run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" |
|
710 |
+ run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" |
|
711 | 711 |
|
712 | 712 |
# Starting the nova-objectstore only if swift3 service is not enabled. |
713 | 713 |
# Swift will act as s3 objectstore. |
714 | 714 |
is_service_enabled swift3 || \ |
715 |
- screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" |
|
715 |
+ run_process n-obj "$NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" |
|
716 | 716 |
} |
717 | 717 |
|
718 | 718 |
function start_nova { |
... | ... |
@@ -721,7 +728,7 @@ function start_nova { |
721 | 721 |
} |
722 | 722 |
|
723 | 723 |
function stop_nova_compute { |
724 |
- screen_stop n-cpu |
|
724 |
+ stop_process n-cpu |
|
725 | 725 |
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then |
726 | 726 |
stop_nova_hypervisor |
727 | 727 |
fi |
... | ... |
@@ -732,7 +739,7 @@ function stop_nova_rest { |
732 | 732 |
# Some services are listed here twice since more than one instance |
733 | 733 |
# of a service may be running in certain configs. |
734 | 734 |
for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do |
735 |
- screen_stop $serv |
|
735 |
+ stop_process $serv |
|
736 | 736 |
done |
737 | 737 |
} |
738 | 738 |
|
... | ... |
@@ -139,6 +139,8 @@ function start_opendaylight { |
139 | 139 |
# The flags to ODL have the following meaning: |
140 | 140 |
# -of13: runs ODL using OpenFlow 1.3 protocol support. |
141 | 141 |
# -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support |
142 |
+ # NOTE(chdent): Leaving this as screen_it instead of run_process until |
|
143 |
+ # the right thing for this service is determined. |
|
142 | 144 |
screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb" |
143 | 145 |
|
144 | 146 |
# Sleep a bit to let OpenDaylight finish starting up |
... | ... |
@@ -147,7 +149,7 @@ function start_opendaylight { |
147 | 147 |
|
148 | 148 |
# stop_opendaylight() - Stop running processes (non-screen) |
149 | 149 |
function stop_opendaylight { |
150 |
- screen_stop odl-server |
|
150 |
+ stop_process odl-server |
|
151 | 151 |
} |
152 | 152 |
|
153 | 153 |
# stop_opendaylight-compute() - Remove OVS bridges |
... | ... |
@@ -168,7 +168,7 @@ function install_python_saharaclient { |
168 | 168 |
|
169 | 169 |
# start_sahara() - Start running processes, including screen |
170 | 170 |
function start_sahara { |
171 |
- screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE" |
|
171 |
+ run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE" |
|
172 | 172 |
} |
173 | 173 |
|
174 | 174 |
# stop_sahara() - Stop running processes |
... | ... |
@@ -659,10 +659,10 @@ function start_swift { |
659 | 659 |
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then |
660 | 660 |
restart_apache_server |
661 | 661 |
swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start |
662 |
- screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server" |
|
662 |
+ tail_log s-proxy /var/log/$APACHE_NAME/proxy-server |
|
663 | 663 |
if [[ ${SWIFT_REPLICAS} == 1 ]]; then |
664 | 664 |
for type in object container account; do |
665 |
- screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1" |
|
665 |
+ tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1 |
|
666 | 666 |
done |
667 | 667 |
fi |
668 | 668 |
return 0 |
... | ... |
@@ -683,10 +683,10 @@ function start_swift { |
683 | 683 |
for type in proxy ${todo}; do |
684 | 684 |
swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true |
685 | 685 |
done |
686 |
- screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" |
|
686 |
+ run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" |
|
687 | 687 |
if [[ ${SWIFT_REPLICAS} == 1 ]]; then |
688 | 688 |
for type in object container account; do |
689 |
- screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" |
|
689 |
+ run_process s-${type} "$SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" |
|
690 | 690 |
done |
691 | 691 |
fi |
692 | 692 |
|
... | ... |
@@ -708,9 +708,9 @@ function stop_swift { |
708 | 708 |
swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true |
709 | 709 |
fi |
710 | 710 |
# Dump all of the servers |
711 |
- # Maintain the iteration as screen_stop() has some desirable side-effects |
|
711 |
+ # Maintain the iteration as stop_process() has some desirable side-effects |
|
712 | 712 |
for type in proxy object container account; do |
713 |
- screen_stop s-${type} |
|
713 |
+ stop_process s-${type} |
|
714 | 714 |
done |
715 | 715 |
# Blast out any stragglers |
716 | 716 |
pkill -f swift- |
... | ... |
@@ -75,13 +75,17 @@ function install_XXXX { |
75 | 75 |
|
76 | 76 |
# start_XXXX() - Start running processes, including screen |
77 | 77 |
function start_XXXX { |
78 |
- # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" |
|
78 |
+ # The quoted command must be a single command and not include an |
|
79 |
+ # shell metacharacters, redirections or shell builtins. |
|
80 |
+ # run_process XXXX "$XXXX_DIR/bin/XXXX-bin" |
|
79 | 81 |
: |
80 | 82 |
} |
81 | 83 |
|
82 | 84 |
# stop_XXXX() - Stop running processes (non-screen) |
83 | 85 |
function stop_XXXX { |
84 |
- # FIXME(dtroyer): stop only our screen screen window? |
|
86 |
+ # for serv in serv-a serv-b; do |
|
87 |
+ # stop_process $serv |
|
88 |
+ # done |
|
85 | 89 |
: |
86 | 90 |
} |
87 | 91 |
|
... | ... |
@@ -228,9 +228,9 @@ function init_trove { |
228 | 228 |
|
229 | 229 |
# start_trove() - Start running processes, including screen |
230 | 230 |
function start_trove { |
231 |
- screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" |
|
232 |
- screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" |
|
233 |
- screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" |
|
231 |
+ run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug" |
|
232 |
+ run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug" |
|
233 |
+ run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug" |
|
234 | 234 |
} |
235 | 235 |
|
236 | 236 |
# stop_trove() - Stop running processes |
... | ... |
@@ -238,7 +238,7 @@ function stop_trove { |
238 | 238 |
# Kill the trove screen windows |
239 | 239 |
local serv |
240 | 240 |
for serv in tr-api tr-tmgr tr-cond; do |
241 |
- screen_stop $serv |
|
241 |
+ stop_process $serv |
|
242 | 242 |
done |
243 | 243 |
} |
244 | 244 |
|
... | ... |
@@ -162,9 +162,9 @@ function install_zaqarclient { |
162 | 162 |
# start_zaqar() - Start running processes, including screen |
163 | 163 |
function start_zaqar { |
164 | 164 |
if [[ "$USE_SCREEN" = "False" ]]; then |
165 |
- screen_it zaqar-server "zaqar-server --config-file $ZAQAR_CONF --daemon" |
|
165 |
+ run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF --daemon" |
|
166 | 166 |
else |
167 |
- screen_it zaqar-server "zaqar-server --config-file $ZAQAR_CONF" |
|
167 |
+ run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF" |
|
168 | 168 |
fi |
169 | 169 |
|
170 | 170 |
echo "Waiting for Zaqar to start..." |
... | ... |
@@ -37,7 +37,6 @@ umask 022 |
37 | 37 |
# Keep track of the devstack directory |
38 | 38 |
TOP_DIR=$(cd $(dirname "$0") && pwd) |
39 | 39 |
|
40 |
- |
|
41 | 40 |
# Sanity Checks |
42 | 41 |
# ------------- |
43 | 42 |
|
... | ... |
@@ -74,7 +73,6 @@ if [[ $EUID -eq 0 ]]; then |
74 | 74 |
exit 1 |
75 | 75 |
fi |
76 | 76 |
|
77 |
- |
|
78 | 77 |
# Prepare the environment |
79 | 78 |
# ----------------------- |
80 | 79 |
|
... | ... |
@@ -1210,7 +1208,7 @@ fi |
1210 | 1210 |
|
1211 | 1211 |
if is_service_enabled zeromq; then |
1212 | 1212 |
echo_summary "Starting zermomq receiver" |
1213 |
- screen_it zeromq "cd $NOVA_DIR && $OSLO_BIN_DIR/oslo-messaging-zmq-receiver" |
|
1213 |
+ run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver" |
|
1214 | 1214 |
fi |
1215 | 1215 |
|
1216 | 1216 |
# Launch the nova-api and wait for it to answer before continuing |
... | ... |
@@ -1318,7 +1316,7 @@ if is_service_enabled nova && is_baremetal; then |
1318 | 1318 |
fi |
1319 | 1319 |
# ensure callback daemon is running |
1320 | 1320 |
sudo pkill nova-baremetal-deploy-helper || true |
1321 |
- screen_it baremetal "cd ; nova-baremetal-deploy-helper" |
|
1321 |
+ run_process baremetal "nova-baremetal-deploy-helper" |
|
1322 | 1322 |
fi |
1323 | 1323 |
|
1324 | 1324 |
# Save some values we generated for later use |
... | ... |
@@ -1,9 +1,9 @@ |
1 | 1 |
#!/bin/bash |
2 |
-# tests/exec.sh - Test DevStack screen_it() and screen_stop() |
|
2 |
+# tests/exec.sh - Test DevStack run_process() and stop_process() |
|
3 | 3 |
# |
4 | 4 |
# exec.sh start|stop|status |
5 | 5 |
# |
6 |
-# Set USE_SCREEN to change the default |
|
6 |
+# Set USE_SCREEN True|False to change use of screen. |
|
7 | 7 |
# |
8 | 8 |
# This script emulates the basic exec envirnment in ``stack.sh`` to test |
9 | 9 |
# the process spawn and kill operations. |
... | ... |
@@ -94,12 +94,12 @@ fi |
94 | 94 |
if [[ "$1" == "start" ]]; then |
95 | 95 |
echo "Start service" |
96 | 96 |
setup_screen |
97 |
- screen_it fake-service "$TOP_DIR/tests/fake-service.sh" |
|
97 |
+ run_process fake-service "$TOP_DIR/tests/fake-service.sh" |
|
98 | 98 |
sleep 1 |
99 | 99 |
status |
100 | 100 |
elif [[ "$1" == "stop" ]]; then |
101 | 101 |
echo "Stop service" |
102 |
- screen_stop fake-service |
|
102 |
+ stop_process fake-service |
|
103 | 103 |
status |
104 | 104 |
elif [[ "$1" == "status" ]]; then |
105 | 105 |
status |