lib/nova
e263c82e
 #!/bin/bash
 #
bf67c19c
 # lib/nova
6d04fd7b
 # Functions to control the configuration and operation of the **Nova** service
bf67c19c
 
 # Dependencies:
6a5aa7c6
 #
 # - ``functions`` file
 # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
08abba00
 # - ``FILES``
6a5aa7c6
 # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
 # - ``LIBVIRT_TYPE`` must be defined
 # - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined
 # - ``KEYSTONE_TOKEN_FORMAT`` must be defined
bf67c19c
 
 # ``stack.sh`` calls the entry points in this order:
 #
6a5aa7c6
 # - install_nova
 # - configure_nova
 # - create_nova_conf
 # - init_nova
 # - start_nova
 # - stop_nova
 # - cleanup_nova
bf67c19c
 
 # Save trace setting
523f4880
 _XTRACE_LIB_NOVA=$(set +o | grep xtrace)
bf67c19c
 set +o xtrace
 
 # Defaults
 # --------
 
 # Set up default directories
e08ab104
 GITDIR["python-novaclient"]=$DEST/python-novaclient
1258da64
 GITDIR["os-vif"]=$DEST/os-vif
4533eeec
 NOVA_DIR=$DEST/nova
5cb19069
 
4533eeec
 # Nova virtual environment
 if [[ ${USE_VENV} = True ]]; then
     PROJECT_VENV["nova"]=${NOVA_DIR}.venv
     NOVA_BIN_DIR=${PROJECT_VENV["nova"]}/bin
 else
     NOVA_BIN_DIR=$(get_python_exec_prefix)
 fi
5cb19069
 
bf67c19c
 NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
 # INSTANCES_PATH is the previous name for this
 NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}}
1f82f430
 NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova}
bf67c19c
 
 NOVA_CONF_DIR=/etc/nova
 NOVA_CONF=$NOVA_CONF_DIR/nova.conf
fb2a3ae3
 NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
afc14c8e
 NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf
f3d53315
 NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf
2f27a0ed
 NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
fb2a3ae3
 NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
03786b1c
 NOVA_API_DB=${NOVA_API_DB:-nova_api}
b90bb1a4
 NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi
 NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi
 NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini
 NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini
fb2a3ae3
 
f3d53315
 # The total number of cells we expect. Must be greater than one and doesn't
 # count cell0.
 NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1}
 # Our cell index, so we know what rabbit vhost to connect to.
 # This should be in the range of 1-$NOVA_NUM_CELLS
 NOVA_CPU_CELL=${NOVA_CPU_CELL:-1}
 
bf67c19c
 NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
 
b90bb1a4
 # Toggle for deploying Nova-API under a wsgi server. We default to
 # true to use UWSGI, but allow False so that fall back to the
 # eventlet server can happen for grenade runs.
 # NOTE(cdent): We can adjust to remove the eventlet-base api service
 # after pike, at which time we can stop using NOVA_USE_MOD_WSGI to
 # mean "use uwsgi" because we'll be always using uwsgi.
 NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True}
d5537c1d
 
f3b2f4c8
 if is_service_enabled tls-proxy; then
18d4778c
     NOVA_SERVICE_PROTOCOL="https"
 fi
 
e9870eb1
 # Whether to use TLS for comms between the VNC/SPICE/serial proxy
 # services and the compute node
 NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False}
 
3a3a2bac
 # Public facing bits
 NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
 NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
 NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
 NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
180f5eb6
 NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
dc7b4294
 NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
08abba00
 METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
3a3a2bac
 
24f6efad
 # Option to enable/disable config drive
dc97cb71
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
7682ea88
 FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
24f6efad
 
bf67c19c
 # Nova supports pluggable schedulers.  The default ``FilterScheduler``
 # should work in most cases.
b298e57c
 SCHEDULER=${SCHEDULER:-filter_scheduler}
bf67c19c
 
e0d61118
 # The following FILTERS contains SameHostFilter and DifferentHostFilter with
 # the default filters.
0629c4fe
 FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
e0d61118
 
bf67c19c
 QEMU_CONF=/etc/libvirt/qemu.conf
 
8c032d16
 # Set default defaults here as some hypervisor drivers override these
 PUBLIC_INTERFACE_DEFAULT=br100
 FLAT_NETWORK_BRIDGE_DEFAULT=br100
dc97cb71
 # Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
 # the default isn't completely crazy. This will match ``eth*``, ``em*``, or
 # the new ``p*`` interfaces, then basically picks the first
11007177
 # alphabetically. It's probably wrong, however it's less wrong than
dc97cb71
 # always using ``eth0`` which doesn't exist on new Linux distros at all.
fdb920c3
 GUEST_INTERFACE_DEFAULT=$(ip link \
     | grep 'state UP' \
     | awk '{print $2}' \
     | sed 's/://' \
     | grep ^[ep] \
     | head -1)
8c032d16
 
dc97cb71
 # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration.
 # In multi-node setups allows compute hosts to not run ``n-novnc``.
53753293
 NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED)
4e07fdcb
 
8c032d16
 # Get hypervisor configuration
 # ----------------------------
 
 NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins
 if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
     # Load plugin
     source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER
 fi
 
bf67c19c
 
b3236914
 # Nova Network Configuration
 # --------------------------
 
 NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
6a008fa7
 
b3236914
 VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
 FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
 
 # If you are using the FlatDHCP network mode on multiple hosts, set the
 # ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
 # have an IP or you risk breaking things.
 #
 # **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
 # hiccup while the network is moved from the flat interface to the flat network
 # bridge.  This will happen when you launch your first instance.  Upon launch
 # you will lose all connectivity to the node, and the VM launch will probably
 # fail.
 #
 # If you are running on a single node and don't need to access the VMs from
 # devices other than that node, you can set ``FLAT_INTERFACE=``
 # This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
b93b74ca
 FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
b3236914
 
 # ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
 # allows network operations and routing for a VM to occur on the server that is
 # running the VM - removing a SPOF and bandwidth bottleneck.
97524318
 MULTI_HOST=$(trueorfalse False MULTI_HOST)
b3236914
 
dc97cb71
 # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
18d6298e
 # where there are at least two nova-computes.
53753293
 NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST)
18d6298e
 
b3236914
 # Test floating pool and range are used for testing.  They are defined
 # here until the admin APIs can replace nova-manage
 TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
 TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
 
14e16e42
 # Other Nova configurations
 # ----------------------------
 
 # ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with
 # user token while communicating to external RESP API's like Neutron, Cinder
 # and Glance.
 NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN)
 
cc6b4435
 # Functions
 # ---------
bf67c19c
 
e4fa7213
 # Test if any Nova services are enabled
 # is_nova_enabled
 function is_nova_enabled {
902158bb
     [[ ,${DISABLED_SERVICES} =~ ,"nova" ]] && return 1
e4fa7213
     [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0
     return 1
 }
 
 # Test if any Nova Cell services are enabled
 # is_nova_enabled
 function is_n-cell_enabled {
6b2f2657
     [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0
e4fa7213
     return 1
 }
 
e9870eb1
 # is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy
 # service has TLS enabled
 function is_nova_console_proxy_compute_tls_enabled {
     [[ ${NOVA_CONSOLE_PROXY_COMPUTE_TLS} = "True" ]] && return 0
     return 1
 }
 
bf67c19c
 # Helper to clean iptables rules
aee18c74
 function clean_iptables {
bf67c19c
     # Delete rules
     sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" |  sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash
     # Delete nat rules
     sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" |  grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash
     # Delete chains
     sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" |  sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash
     # Delete nat chains
     sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" |  grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash
 }
 
 # cleanup_nova() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
aee18c74
 function cleanup_nova {
bf67c19c
     if is_service_enabled n-cpu; then
         # Clean iptables from previous runs
         clean_iptables
 
         # Destroy old instances
ada886dd
         local instances
         instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
bf67c19c
         if [ ! "$instances" = "" ]; then
             echo $instances | xargs -n1 sudo virsh destroy || true
cfc3edc9
             if ! xargs -n1 sudo virsh undefine --managed-save --nvram <<< $instances; then
                 # Can't delete with nvram flags, then just try without this flag
                 xargs -n1 sudo virsh undefine --managed-save <<< $instances
             fi
bf67c19c
         fi
 
         # Logout and delete iscsi sessions
ada886dd
         local tgts
         tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
0038a1ac
         local target
c0fad2b6
         for target in $tgts; do
             sudo iscsiadm --mode node -T $target --logout || true
         done
         sudo iscsiadm --mode node --op delete || true
bf67c19c
 
         # Clean out the instances directory.
         sudo rm -rf $NOVA_INSTANCES_PATH/*
     fi
995eb927
 
1f82f430
     sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
2aa2a89c
 
     # NOTE(dtroyer): This really should be called from here but due to the way
     #                nova abuses the _cleanup() function we're moving it
     #                directly into cleanup.sh until this can be fixed.
     #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
     #    cleanup_nova_hypervisor
     #fi
13f65572
 
b90bb1a4
     stop_process "n-api"
     stop_process "n-api-meta"
     remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI"
     remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI"
d5537c1d
 }
 
bf67c19c
 # configure_nova() - Set config files, create data dirs, etc
aee18c74
 function configure_nova {
bf67c19c
     # Put config files in ``/etc/nova`` for everyone to find
8421c2b9
     sudo install -d -o $STACK_USER $NOVA_CONF_DIR
bf67c19c
 
c6782413
     configure_rootwrap nova
bf67c19c
 
ea70cc94
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
bf67c19c
         # Get the sample configuration file in place
         cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR
     fi
 
     if is_service_enabled n-cpu; then
         # Force IP forwarding on, just on case
         sudo sysctl -w net.ipv4.ip_forward=1
 
b1e49bfd
         if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
             # Check for kvm (hardware based virtualization).  If unable to initialize
             # kvm, we drop back to the slower emulation mode (qemu).  Note: many systems
             # come with hardware virtualization disabled in BIOS.
             if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
                 sudo modprobe kvm || true
                 if [ ! -e /dev/kvm ]; then
                     echo "WARNING: Switching to QEMU"
                     LIBVIRT_TYPE=qemu
91e3c1ec
                     if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
b1e49bfd
                         # https://bugzilla.redhat.com/show_bug.cgi?id=753589
                         sudo setsebool virt_use_execmem on
                     fi
bf67c19c
                 fi
             fi
 
b1e49bfd
             # Install and configure **LXC** if specified.  LXC is another approach to
             # splitting a system into many smaller parts.  LXC uses cgroups and chroot
             # to simulate multiple systems.
             if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
                 if is_ubuntu; then
dca06dc7
                     # enable nbd for lxc unless you're using an lvm backend
                     # otherwise you can't boot instances
                     if [[ "$NOVA_BACKEND" != "LVM" ]]; then
                         sudo modprobe nbd
                     fi
bf67c19c
                 fi
             fi
         fi
 
         # Instance Storage
         # ----------------
 
         # Nova stores each instance in its own directory.
8421c2b9
         sudo install -d -o $STACK_USER $NOVA_INSTANCES_PATH
bf67c19c
 
         # You can specify a different disk to be mounted and used for backing the
         # virtual machines.  If there is a partition labeled nova-instances we
         # mount it (ext filesystems can be labeled via e2label).
         if [ -L /dev/disk/by-label/nova-instances ]; then
             if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then
                 sudo mount -L nova-instances $NOVA_INSTANCES_PATH
91b8d13e
                 sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
bf67c19c
             fi
         fi
65aaa183
         if is_suse; then
             # iscsid is not started by default
             start_service iscsid
         fi
bf67c19c
     fi
8c032d16
 
     # Rebuild the config file from scratch
     create_nova_conf
 
a99b869d
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
8c032d16
         # Configure hypervisor plugin
         configure_nova_hypervisor
     fi
bf67c19c
 }
 
a0dce264
 # create_nova_accounts() - Set up common required nova accounts
67bc8e8a
 #
42a59c2b
 # Project              User         Roles
a0dce264
 # ------------------------------------------------------------------
7580a0c3
 # SERVICE_PROJECT_NAME  nova         admin
 # SERVICE_PROJECT_NAME  nova         ResellerAdmin (if Swift is enabled)
67bc8e8a
 function create_nova_accounts {
a0dce264
 
     # Nova
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
0abde393
 
e8bc2b82
         # NOTE(jamielennox): Nova doesn't need the admin role here, however neutron uses
         # this service user when notifying nova of changes and that requires the admin role.
85ff5323
         create_service_user "nova" "admin"
0abde393
 
985e958d
         local nova_api_url
         if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
             nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT"
         else
             nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
a0dce264
         fi
985e958d
 
         get_or_create_service "nova_legacy" "compute_legacy" "Nova Compute Service (Legacy 2.0)"
ae4578be
         get_or_create_endpoint \
985e958d
             "compute_legacy" \
             "$REGION_NAME" \
bd27cc2e
             "$nova_api_url/v2/\$(project_id)s"
985e958d
 
         get_or_create_service "nova" "compute" "Nova Compute Service"
ae4578be
         get_or_create_endpoint \
985e958d
             "compute" \
             "$REGION_NAME" \
7f87efdd
             "$nova_api_url/v2.1"
a0dce264
     fi
42a59c2b
 
     if is_service_enabled n-api; then
         # Swift
         if is_service_enabled swift; then
             # Nova needs ResellerAdmin role to download images when accessing
             # swift through the s3 api.
cbcbd8f3
             get_or_add_user_project_role ResellerAdmin nova $SERVICE_PROJECT_NAME $SERVICE_DOMAIN_NAME $SERVICE_DOMAIN_NAME
42a59c2b
         fi
     fi
 
     # S3
4a974e61
     if is_service_enabled swift3; then
985e958d
         get_or_create_service "s3" "s3" "S3"
ae4578be
         get_or_create_endpoint \
985e958d
             "s3" \
             "$REGION_NAME" \
             "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
             "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
             "http://$SERVICE_HOST:$S3_SERVICE_PORT"
42a59c2b
     fi
a0dce264
 }
 
da7b8091
 # create_nova_conf() - Create a new nova.conf file
aee18c74
 function create_nova_conf {
bf67c19c
     # Remove legacy ``nova.conf``
     rm -f $NOVA_DIR/bin/nova.conf
 
     # (Re)create ``nova.conf``
3cf1ffbc
     rm -f $NOVA_CONF
03997942
     iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
18d6298e
     if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then
         iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
     fi
38880982
     iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI"
9bc47db2
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
886d7dbe
     iniset $NOVA_CONF scheduler driver "$SCHEDULER"
1ade00da
     iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS"
a3844240
     if [[ $SCHEDULER == "filter_scheduler" ]]; then
         iniset $NOVA_CONF scheduler workers "$API_WORKERS"
     fi
9bc47db2
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
180f5eb6
     if [[ $SERVICE_IP_VERSION == 6 ]]; then
         iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
         iniset $NOVA_CONF DEFAULT use_ipv6 "True"
     else
         iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
     fi
9bc47db2
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
180f5eb6
     iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
     iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
bf67c19c
 
8dd918c5
     iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager
a5b72b05
 
55dd68a6
     if is_fedora || is_suse; then
         # nova defaults to /usr/local/bin, but fedora and suse pip like to
00fd79d3
         # install things in /usr/bin
         iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
     fi
 
1b457c9a
     # only setup database connections if there are services that
     # require them running on the host. The ensures that n-cpu doesn't
     # leak a need to use the db in a multinode scenario.
     if is_service_enabled n-api n-cond n-sched; then
5adfef0a
         # If we're in multi-tier cells mode, we want our control services pointing
         # at cell0 instead of cell1 to ensure isolation. If not, we point everything
         # at the main database like normal.
         if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
             local db="nova_cell1"
         else
             local db="nova_cell0"
ab980ce5
             # When in superconductor mode, nova-compute can't send instance
             # info updates to the scheduler, so just disable it.
             iniset $NOVA_CONF filter_scheduler track_instance_changes False
5adfef0a
         fi
 
         iniset $NOVA_CONF database connection `database_connection_url $db`
1b457c9a
         iniset $NOVA_CONF api_database connection `database_connection_url nova_api`
     fi
 
bf67c19c
     if is_service_enabled n-api; then
d73af878
         if is_service_enabled n-api-meta; then
             # If running n-api-meta as a separate service
             NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
         fi
9bc47db2
         iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
b90bb1a4
         if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
3a3a2bac
             # Set the service port for a proxy to take the original
9bc47db2
             iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
6254d5fd
             iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT
3a3a2bac
         fi
741fc5c0
 
1f82f430
         configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR
bf67c19c
     fi
741fc5c0
 
18d4778c
     if is_service_enabled cinder; then
f3b2f4c8
         if is_service_enabled tls-proxy; then
18d4778c
             CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
             CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
55c7ebbd
             iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
18d4778c
         fi
     fi
 
bf67c19c
     if [ -n "$NOVA_STATE_PATH" ]; then
9bc47db2
         iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH"
23d6d506
         iniset $NOVA_CONF oslo_concurrency lock_path "$NOVA_STATE_PATH"
bf67c19c
     fi
     if [ -n "$NOVA_INSTANCES_PATH" ]; then
9bc47db2
         iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH"
bf67c19c
     fi
     if [ "$MULTI_HOST" != "False" ]; then
9bc47db2
         iniset $NOVA_CONF DEFAULT multi_host "True"
         iniset $NOVA_CONF DEFAULT send_arp_for_ha "True"
bf67c19c
     fi
     if [ "$SYSLOG" != "False" ]; then
9bc47db2
         iniset $NOVA_CONF DEFAULT use_syslog "True"
bf67c19c
     fi
24f6efad
     if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then
         iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
     fi
c114449b
 
     # nova defaults to genisoimage but only mkisofs is available for 15.0+
     if is_suse; then
         iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs
     fi
 
05ae833b
     # Format logging
b90bb1a4
     setup_logging $NOVA_CONF
9751be66
 
21221d1a
     iniset $NOVA_CONF upgrade_levels compute "auto"
 
b90bb1a4
     write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
dc7b4294
     write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
d5537c1d
 
1fcc6a1f
     if is_service_enabled ceilometer; then
9bc47db2
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
         iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
a9787d07
         iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state"
1fcc6a1f
     fi
 
14d86e84
     # Set the oslo messaging driver to the typical default. This does not
     # enable notifications, but it will allow them to function when enabled.
45da777d
     iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2"
b645904d
     iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_notification_url)
2dd110ce
     iniset_rpc_backend nova $NOVA_CONF
2f72050a
 
1f79bad7
     iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
05bd7b80
     iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
1ce19ab7
     # don't let the conductor get out of control now that we're using a pure python db driver
     iniset $NOVA_CONF conductor workers "$API_WORKERS"
18d4778c
 
679f395f
     iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
 
f3b2f4c8
     if is_service_enabled tls-proxy; then
18d4778c
         iniset $NOVA_CONF DEFAULT glance_protocol https
411c34da
         iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True
18d4778c
     fi
 
7159b4ba
     iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
ac8ff0f1
 
     # Setup logging for nova-dhcpbridge command line
     sudo cp "$NOVA_CONF" "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
bd7a5120
 
ecbebd5d
     if is_service_enabled n-net; then
         local service="n-dhcp"
         local logfile="${service}.log.${CURRENT_LOG_TIME}"
         local real_logfile="${LOGDIR}/${logfile}"
         if [[ -n ${LOGDIR} ]]; then
             bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
             iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile"
bd7a5120
         fi
 
ecbebd5d
         iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
     fi
14e16e42
 
     if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then
         init_nova_service_user_conf
     fi
f3d53315
 
     if is_service_enabled n-cond; then
         for i in $(seq 1 $NOVA_NUM_CELLS); do
             local conf
             local vhost
             conf=$(conductor_conf $i)
             vhost="nova_cell${i}"
1a2c86cf
             # clean old conductor conf
             rm -f $conf
f3d53315
             iniset $conf database connection `database_connection_url nova_cell${i}`
             iniset $conf conductor workers "$API_WORKERS"
             iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
afc14c8e
             # if we have a singleconductor, we don't have per host message queues.
             if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
                 iniset_rpc_backend nova $conf DEFAULT
             else
                 rpc_backend_add_vhost $vhost
                 iniset_rpc_backend nova $conf DEFAULT $vhost
f6d566c2
                 # When running in superconductor mode, the cell conductor
                 # must be configured to talk to the placement service for
                 # reschedules to work.
                 if is_service_enabled placement placement-client; then
                     configure_placement_nova_compute $conf
                 fi
afc14c8e
             fi
9d7e74e5
             # Format logging
             setup_logging $conf
f3d53315
         done
     fi
12579c3d
 
     # Console proxy configuration has to go after conductor configuration
     # because the per cell config file nova_cellN.conf is cleared out as part
     # of conductor configuration.
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         configure_console_proxies
     else
         for i in $(seq 1 $NOVA_NUM_CELLS); do
             local conf
             conf=$(conductor_conf $i)
             configure_console_proxies $conf
         done
     fi
 }
 
65ad7940
 function configure_console_compute {
12579c3d
     # All nova-compute workers need to know the vnc configuration options
     # These settings don't hurt anything if n-xvnc and n-novnc are disabled
     if is_service_enabled n-cpu; then
         NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
65ad7940
         iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
12579c3d
         XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
65ad7940
         iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL"
12579c3d
         SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
65ad7940
         iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
12579c3d
     fi
 
     if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
         # Address on which instance vncservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
         VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
         VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
65ad7940
         iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN"
         iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
     else
         iniset $NOVA_CPU_CONF vnc enabled false
     fi
 
     if is_service_enabled n-spice; then
         # Address on which instance spiceservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
         SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
         SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
         iniset $NOVA_CPU_CONF spice enabled true
         iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN"
         iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
     fi
 
     if is_service_enabled n-sproxy; then
         iniset $NOVA_CPU_CONF serial_console enabled True
     fi
 }
 
 function configure_console_proxies {
     # Use the provided config file path or default to $NOVA_CONF.
     local conf=${1:-$NOVA_CONF}
 
     if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
12579c3d
         iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
         iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
 
         if is_nova_console_proxy_compute_tls_enabled ; then
             iniset $conf vnc auth_schemes "vencrypt"
             iniset $conf vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem"
             iniset $conf vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem"
             iniset $conf vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem"
 
             sudo mkdir -p /etc/pki/nova-novnc
             deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem
             deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem
         fi
     fi
 
     if is_service_enabled n-spice; then
         iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
     fi
 
     if is_service_enabled n-sproxy; then
         iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
     fi
14e16e42
 }
 
 function init_nova_service_user_conf {
     iniset $NOVA_CONF service_user send_service_user_token True
     iniset $NOVA_CONF service_user auth_type password
c2c89e4b
     iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_URI"
14e16e42
     iniset $NOVA_CONF service_user username nova
     iniset $NOVA_CONF service_user password "$SERVICE_PASSWORD"
     iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME"
     iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME"
     iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME"
     iniset $NOVA_CONF service_user auth_strategy keystone
da7b8091
 }
bf67c19c
 
f3d53315
 function conductor_conf {
     local cell="$1"
     echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf"
 }
 
aee18c74
 function init_nova_cells {
fb2a3ae3
     if is_service_enabled n-cell; then
         cp $NOVA_CONF $NOVA_CELLS_CONF
23d6d506
         iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB`
6f0205b0
         rpc_backend_add_vhost child_cell
6176ae68
         iniset_rpc_backend nova $NOVA_CELLS_CONF DEFAULT child_cell
fb2a3ae3
         iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
         iniset $NOVA_CELLS_CONF cells enable True
c62c2b9b
         iniset $NOVA_CELLS_CONF cells cell_type compute
fb2a3ae3
         iniset $NOVA_CELLS_CONF cells name child
 
         iniset $NOVA_CONF cells enable True
c62c2b9b
         iniset $NOVA_CONF cells cell_type api
fb2a3ae3
         iniset $NOVA_CONF cells name region
 
         if is_service_enabled n-api-meta; then
             NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
             iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS
             iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
         fi
 
afc14c8e
         # Cells v1 conductor should be the nova-cells.conf
         NOVA_COND_CONF=$NOVA_CELLS_CONF
 
633dbc3d
         time_start "dbsync"
fb2a3ae3
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
633dbc3d
         time_stop "dbsync"
d5b74c68
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1
         $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1
f15224c7
 
         # Creates the single cells v2 cell for the child cell (v1) nova db.
a62ede7d
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell_v2 create_cell \
f15224c7
             --transport-url $(get_transport_url child_cell) --name 'cell1'
fb2a3ae3
     fi
 }
 
1f82f430
 # create_nova_cache_dir() - Part of the init_nova() process
 function create_nova_cache_dir {
     # Create cache dir
     sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR
     rm -f $NOVA_AUTH_CACHE_DIR/*
 }
 
aee18c74
 function create_nova_conf_nova_network {
6a008fa7
     local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
b3236914
     iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
6a008fa7
     iniset $NOVA_CONF DEFAULT public_interface "$public_interface"
9bc47db2
     iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
     iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
66afb47c
     if [ -n "$FLAT_INTERFACE" ]; then
9bc47db2
         iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
66afb47c
     fi
2a7e909b
     iniset $NOVA_CONF DEFAULT use_neutron False
66afb47c
 }
 
f03bafeb
 # create_nova_keys_dir() - Part of the init_nova() process
aee18c74
 function create_nova_keys_dir {
f03bafeb
     # Create keys dir
8421c2b9
     sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys
f03bafeb
 }
 
da7b8091
 # init_nova() - Initialize databases, etc.
aee18c74
 function init_nova {
f03bafeb
     # All nova components talk to a central database.
     # Only do this step once on the API node for an entire cluster.
c439b5df
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
f575aefd
         recreate_database $NOVA_API_DB
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
 
29bb53fd
         recreate_database nova_cell0
bf67c19c
 
ac5fdb4c
         # map_cell0 will create the cell mapping record in the nova_api DB so
         # this needs to come after the api_db sync happens. We also want to run
         # this before the db sync below since that will migrate both the nova
         # and nova_cell0 databases.
a62ede7d
         $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`
ac5fdb4c
 
f3d53315
         # (Re)create nova databases
         for i in $(seq 1 $NOVA_NUM_CELLS); do
             recreate_database nova_cell${i}
2b3bb30d
             $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync --local_cell
f3d53315
         done
 
5adfef0a
         # Migrate nova and nova_cell0 databases.
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
 
fb2a3ae3
         if is_service_enabled n-cell; then
157c84b8
             recreate_database $NOVA_CELLS_DB
fb2a3ae3
         fi
03786b1c
 
bb49d357
         # Run online migrations on the new databases
         # Needed for flavor conversion
4f55c2d5
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations
f15224c7
 
         # create the cell1 cell for the main nova db where the hosts live
f3d53315
         for i in $(seq 1 $NOVA_NUM_CELLS); do
a62ede7d
             $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i"
f3d53315
         done
bf67c19c
     fi
 
1f82f430
     create_nova_cache_dir
f03bafeb
     create_nova_keys_dir
c070a3db
 
     if [[ "$NOVA_BACKEND" == "LVM" ]]; then
         init_default_lvm_volume_group
     fi
bf67c19c
 }
 
 # install_novaclient() - Collect source and prepare
aee18c74
 function install_novaclient {
e08ab104
     if use_library_from_git "python-novaclient"; then
         git_clone_by_name "python-novaclient"
         setup_dev_lib "python-novaclient"
         sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-novaclient"]}/tools/,/etc/bash_completion.d/}nova.bash_completion
5cb19069
     fi
bf67c19c
 }
 
 # install_nova() - Collect source and prepare
aee18c74
 function install_nova {
1258da64
 
     # Install os-vif
     if use_library_from_git "os-vif"; then
         git_clone_by_name "os-vif"
         setup_dev_lib "os-vif"
     fi
 
8c032d16
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         install_nova_hypervisor
bf67c19c
     fi
 
5a35e73b
     if is_service_enabled n-novnc; then
         # a websockets/html5 or flash powered VNC console for vm instances
53753293
         NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE)
add4ca3e
         if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then
5a35e73b
             NOVNC_WEB_DIR=/usr/share/novnc
             install_package novnc
         else
             NOVNC_WEB_DIR=$DEST/noVNC
             git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
         fi
     fi
 
     if is_service_enabled n-spice; then
         # a websockets/html5 or flash powered SPICE console for vm instances
53753293
         SPICE_FROM_PACKAGE=$(trueorfalse True SPICE_FROM_PACKAGE)
add4ca3e
         if [ "$SPICE_FROM_PACKAGE" = "True" ]; then
5a35e73b
             SPICE_WEB_DIR=/usr/share/spice-html5
             install_package spice-html5
         else
             SPICE_WEB_DIR=$DEST/spice-html5
             git_clone $SPICE_REPO $SPICE_WEB_DIR $SPICE_BRANCH
         fi
     fi
 
bf67c19c
     git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
253a1a35
     setup_develop $NOVA_DIR
fac533e3
     sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion
bf67c19c
 }
 
3a3a2bac
 # start_nova_api() - Start the API process ahead of other things
aee18c74
 function start_nova_api {
3a3a2bac
     # Get right service port for testing
     local service_port=$NOVA_SERVICE_PORT
18d4778c
     local service_protocol=$NOVA_SERVICE_PROTOCOL
b90bb1a4
     local nova_url
3a3a2bac
     if is_service_enabled tls-proxy; then
         service_port=$NOVA_SERVICE_PORT_INT
18d4778c
         service_protocol="http"
3a3a2bac
     fi
 
4533eeec
     # Hack to set the path for rootwrap
     local old_path=$PATH
     export PATH=$NOVA_BIN_DIR:$PATH
 
b90bb1a4
     if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
d5537c1d
         run_process n-api "$NOVA_BIN_DIR/nova-api"
b90bb1a4
         nova_url=$service_protocol://$SERVICE_HOST:$service_port
         # Start proxy if tsl enabled
         if is_service_enabled tls-proxy; then
             start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
         fi
     else
aceb27e8
         run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
b90bb1a4
         nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
d5537c1d
     fi
 
3a3a2bac
     echo "Waiting for nova-api to start..."
b90bb1a4
     if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then
101b4248
         die $LINENO "nova-api did not start"
3a3a2bac
     fi
 
4533eeec
     export PATH=$old_path
3a3a2bac
 }
 
afc14c8e
 # Detect and setup conditions under which singleconductor setup is
 # needed. Notably cellsv1.
 function _set_singleconductor {
     # NOTE(danms): Don't setup conductor fleet for cellsv1
     if is_service_enabled n-cell; then
         CELLSV2_SETUP="singleconductor"
     fi
 }
 
 
2e159460
 # start_nova_compute() - Start the compute process
aee18c74
 function start_nova_compute {
4533eeec
     # Hack to set the path for rootwrap
     local old_path=$PATH
     export PATH=$NOVA_BIN_DIR:$PATH
 
6db29904
     if is_service_enabled n-cell; then
         local compute_cell_conf=$NOVA_CELLS_CONF
     else
         local compute_cell_conf=$NOVA_CONF
     fi
fb2a3ae3
 
5adfef0a
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
f3d53315
         # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so
         # skip these bits and use the normal config.
         NOVA_CPU_CONF=$compute_cell_conf
         echo "Skipping multi-cell conductor fleet setup"
     else
5adfef0a
         # "${CELLSV2_SETUP}" is "superconductor"
f3d53315
         cp $compute_cell_conf $NOVA_CPU_CONF
         # FIXME(danms): Should this be configurable?
         iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True
ab980ce5
         # Since the nova-compute service cannot reach nova-scheduler over
         # RPC, we also disable track_instance_changes.
         iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False
f3d53315
         iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}"
7d0003ef
         # Make sure we nuke any database config
         inidelete $NOVA_CPU_CONF database connection
         inidelete $NOVA_CPU_CONF api_database connection
f3d53315
     fi
 
65ad7940
     # Console proxies were configured earlier in create_nova_conf. Now that the
     # nova-cpu.conf has been created, configure the console settings required
     # by the compute process.
     configure_console_compute
 
b1e49bfd
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
3324f19f
         # ``sg`` is used in run_process to execute nova-compute as a member of the
2f27a0ed
         # **$LIBVIRT_GROUP** group.
f3d53315
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LIBVIRT_GROUP
0ffdfbdb
     elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then
f3d53315
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LXD_GROUP
53a49d10
     elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then
f3d53315
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $DOCKER_GROUP
2c94ee52
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
0038a1ac
         local i
101b4248
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
2f27a0ed
             # Avoid process redirection of fake host configurations by
             # creating or modifying real configurations. Each fake
             # gets its own configuration and own log file.
             local fake_conf="${NOVA_FAKE_CONF}-${i}"
ac475bbb
             iniset $fake_conf DEFAULT host "${HOSTNAME}${i}"
f3d53315
             run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf"
101b4248
         done
b1e49bfd
     else
2aa2a89c
         if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
             start_nova_hypervisor
         fi
f3d53315
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF"
b1e49bfd
     fi
4533eeec
 
     export PATH=$old_path
2e159460
 }
 
0eebeb41
 # start_nova() - Start running processes
aee18c74
 function start_nova_rest {
4533eeec
     # Hack to set the path for rootwrap
     local old_path=$PATH
     export PATH=$NOVA_BIN_DIR:$PATH
 
86199fcd
     local api_cell_conf=$NOVA_CONF
2e159460
     if is_service_enabled n-cell; then
86199fcd
         local compute_cell_conf=$NOVA_CELLS_CONF
     else
         local compute_cell_conf=$NOVA_CONF
2e159460
     fi
 
2f27a0ed
     # ``run_process`` checks ``is_service_enabled``, it is not needed here
     run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
     run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
b3a210f6
 
     if is_service_enabled n-net; then
0bf75a47
         if ! running_in_container; then
             enable_kernel_bridge_firewall
         fi
b3a210f6
     fi
2f27a0ed
     run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
b3a210f6
 
2f27a0ed
     run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
b90bb1a4
     if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
         run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
     else
aceb27e8
         run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
b90bb1a4
     fi
fb2a3ae3
 
ed2d4919
     # nova-consoleauth always runs globally
2f27a0ed
     run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
ed2d4919
 
     export PATH=$old_path
 }
 
 function enable_nova_console_proxies {
     for i in $(seq 1 $NOVA_NUM_CELLS); do
         for srv in n-novnc n-xvnc n-spice n-sproxy; do
             if is_service_enabled $srv; then
                 enable_service ${srv}-cell${i}
             fi
         done
     done
 }
 
 function start_nova_console_proxies {
     # Hack to set the path for rootwrap
     local old_path=$PATH
     # This is needed to find the nova conf
     export PATH=$NOVA_BIN_DIR:$PATH
 
     local api_cell_conf=$NOVA_CONF
     # console proxies run globally for singleconductor, else they run per cell
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
         run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
         run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
         run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
     else
         enable_nova_console_proxies
         for i in $(seq 1 $NOVA_NUM_CELLS); do
             local conf
             conf=$(conductor_conf $i)
             run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR"
             run_process n-xvnc-cell${i} "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $conf"
             run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR"
             run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf"
         done
     fi
1c6c1125
 
4533eeec
     export PATH=$old_path
bf67c19c
 }
 
f3d53315
 function enable_nova_fleet {
     if is_service_enabled n-cond; then
         enable_service n-super-cond
         for i in $(seq 1 $NOVA_NUM_CELLS); do
             enable_service n-cond-cell${i}
         done
     fi
 }
 
 function start_nova_conductor {
afc14c8e
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
f3d53315
         echo "Starting nova-conductor in a cellsv1-compatible way"
afc14c8e
         run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF"
f3d53315
         return
     fi
 
     enable_nova_fleet
     if is_service_enabled n-super-cond; then
afc14c8e
         run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF"
f3d53315
     fi
     for i in $(seq 1 $NOVA_NUM_CELLS); do
         if is_service_enabled n-cond-cell${i}; then
             local conf
             conf=$(conductor_conf $i)
             run_process n-cond-cell${i} "$NOVA_BIN_DIR/nova-conductor --config-file $conf"
         fi
     done
 }
 
c2fe916f
 function is_nova_ready {
     # NOTE(sdague): with cells v2 all the compute services must be up
     # and checked into the database before discover_hosts is run. This
     # happens in all in one installs by accident, because > 30 seconds
     # happen between here and the script ending. However, in multinode
     # tests this can very often not be the case. So ensure that the
     # compute is up before we move on.
     if is_service_enabled n-cell; then
         # cells v1 can't complete the check below because it munges
         # hostnames with cell information (grumble grumble).
         return
     fi
     # TODO(sdague): honestly, this probably should be a plug point for
     # an external system.
     if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
         # xenserver encodes information in the hostname of the compute
         # because of the dom0/domU split. Just ignore for now.
         return
     fi
730ce454
     wait_for_compute $NOVA_READY_TIMEOUT
c2fe916f
 }
 
aee18c74
 function start_nova {
afc14c8e
     # this catches the cells v1 case early
     _set_singleconductor
2e159460
     start_nova_rest
ed2d4919
     start_nova_console_proxies
f3d53315
     start_nova_conductor
26ac3d7e
     start_nova_compute
afc14c8e
     if is_service_enabled n-api; then
         # dump the cell mapping to ensure life is good
         echo "Dumping cells_v2 mapping"
a62ede7d
         $NOVA_BIN_DIR/nova-manage cell_v2 list_cells --verbose
afc14c8e
     fi
2e159460
 }
 
767b5a45
 function stop_nova_compute {
d29ca35e
     if [ "$VIRT_DRIVER" == "fake" ]; then
         local i
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
             stop_process n-cpu-${i}
         done
     else
         stop_process n-cpu
     fi
767b5a45
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         stop_nova_hypervisor
     fi
 }
 
 function stop_nova_rest {
b90bb1a4
     # Kill the non-compute nova processes
ed2d4919
     for serv in n-api n-api-meta n-net n-sch n-cauth n-cell n-cell; do
2f27a0ed
         stop_process $serv
bf67c19c
     done
767b5a45
 }
 
ed2d4919
 function stop_nova_console_proxies {
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         for srv in n-novnc n-xvnc n-spice n-sproxy; do
             stop_process $srv
         done
     else
         enable_nova_console_proxies
         for i in $(seq 1 $NOVA_NUM_CELLS); do
             for srv in n-novnc n-xvnc n-spice n-sproxy; do
                 stop_process ${srv}-cell${i}
             done
         done
     fi
 }
 
f3d53315
 function stop_nova_conductor {
98c95f4f
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         stop_process n-cond
         return
     fi
 
f3d53315
     enable_nova_fleet
     for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do
         if is_service_enabled $srv; then
             stop_process $srv
         fi
     done
 }
 
0eebeb41
 # stop_nova() - Stop running processes
767b5a45
 function stop_nova {
     stop_nova_rest
ed2d4919
     stop_nova_console_proxies
f3d53315
     stop_nova_conductor
767b5a45
     stop_nova_compute
bf67c19c
 }
 
4b205db4
 # create_instance_types(): Create default flavors
 function create_flavors {
d3d21394
     if is_service_enabled n-api; then
878d7d8f
         if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then
d3d21394
             # Note that danms hates these flavors and apologizes for sdague
878d7d8f
             openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 0 --vcpus 1 cirros256
             openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M
             openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G
             openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G
             openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 ds4G
d3d21394
         fi
 
878d7d8f
         if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then
             openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny
             openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small
             openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium
             openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large
             openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge
d3d21394
         fi
4b205db4
     fi
 }
cc6b4435
 
bf67c19c
 # Restore xtrace
523f4880
 $_XTRACE_LIB_NOVA
584d90ec
 
6a5aa7c6
 # Tell emacs to use shell-script-mode
 ## Local variables:
 ## mode: shell-script
 ## End: