bf67c19c |
# lib/nova |
6d04fd7b |
# Functions to control the configuration and operation of the **Nova** service |
bf67c19c |
# Dependencies:
# ``functions`` file |
91b8d13e |
# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined |
bf67c19c |
# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
# ``LIBVIRT_TYPE`` must be defined
# ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined |
bc071bce |
# ``KEYSTONE_TOKEN_FORMAT`` must be defined |
bf67c19c |
# ``stack.sh`` calls the entry points in this order:
#
# install_nova
# configure_nova |
da7b8091 |
# create_nova_conf |
bf67c19c |
# init_nova
# start_nova
# stop_nova
# cleanup_nova
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# Set up default directories
NOVA_DIR=$DEST/nova
NOVACLIENT_DIR=$DEST/python-novaclient
NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
# INSTANCES_PATH is the previous name for this
NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} |
bc071bce |
NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} |
bf67c19c |
NOVA_CONF_DIR=/etc/nova
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
|
3a3a2bac |
# Public facing bits
NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
|
bf67c19c |
# Support entry points installation of console scripts
if [[ -d $NOVA_DIR/bin ]]; then
NOVA_BIN_DIR=$NOVA_DIR/bin
else |
4196d556 |
NOVA_BIN_DIR=$(get_python_exec_prefix) |
bf67c19c |
fi
# Set the paths of certain binaries |
856a11e0 |
NOVA_ROOTWRAP=$(get_rootwrap_location nova) |
bf67c19c |
# Allow rate limiting to be turned off for testing, like for Tempest
# NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting
API_RATE_LIMIT=${API_RATE_LIMIT:-"True"}
# Nova supports pluggable schedulers. The default ``FilterScheduler``
# should work in most cases.
SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
QEMU_CONF=/etc/libvirt/qemu.conf
|
b7490da9 |
NOVNC_DIR=$DEST/noVNC
SPICE_DIR=$DEST/spice-html5
|
bf67c19c |
|
b3236914 |
# Nova Network Configuration
# --------------------------
# Set defaults according to the virt driver
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
PUBLIC_INTERFACE_DEFAULT=eth3
GUEST_INTERFACE_DEFAULT=eth1
# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
PUBLIC_INTERFACE_DEFAULT=eth0
FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
FLAT_NETWORK_BRIDGE_DEFAULT=br100
STUB_NETWORK=${STUB_NETWORK:-False}
else
PUBLIC_INTERFACE_DEFAULT=br100
GUEST_INTERFACE_DEFAULT=eth0
FLAT_NETWORK_BRIDGE_DEFAULT=br100
fi
NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
# If you are using the FlatDHCP network mode on multiple hosts, set the
# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
# have an IP or you risk breaking things.
#
# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
# hiccup while the network is moved from the flat interface to the flat network
# bridge. This will happen when you launch your first instance. Upon launch
# you will lose all connectivity to the node, and the VM launch will probably
# fail.
#
# If you are running on a single node and don't need to access the VMs from
# devices other than that node, you can set ``FLAT_INTERFACE=``
# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. |
b93b74ca |
FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} |
b3236914 |
# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=`trueorfalse False $MULTI_HOST`
# Test floating pool and range are used for testing. They are defined
# here until the admin APIs can replace nova-manage
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
|
cc6b4435 |
# Functions
# --------- |
bf67c19c |
function add_nova_opt {
echo "$1" >>$NOVA_CONF
}
# Helper to clean iptables rules
function clean_iptables() {
# Delete rules
sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash
# Delete nat rules
sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash
# Delete chains
sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash
# Delete nat chains
sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash
}
# cleanup_nova() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_nova() {
if is_service_enabled n-cpu; then
# Clean iptables from previous runs
clean_iptables
# Destroy old instances
instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 sudo virsh destroy || true
echo $instances | xargs -n1 sudo virsh undefine || true
fi
# Logout and delete iscsi sessions |
c0fad2b6 |
tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
for target in $tgts; do
sudo iscsiadm --mode node -T $target --logout || true
done
sudo iscsiadm --mode node --op delete || true |
bf67c19c |
# Clean out the instances directory.
sudo rm -rf $NOVA_INSTANCES_PATH/*
fi |
995eb927 |
sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR |
bf67c19c |
}
# configure_nova_rootwrap() - configure Nova's rootwrap
function configure_nova_rootwrap() {
# Deploy new rootwrap filters files (owned by root).
# Wipe any existing rootwrap.d files first
if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
fi
# Deploy filters to /etc/nova/rootwrap.d
sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
# Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
# Specify rootwrap.conf as first parameter to nova-rootwrap
ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
# Set up the rootwrap sudoers for nova
TEMPFILE=`mktemp`
echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
}
# configure_nova() - Set config files, create data dirs, etc
function configure_nova() {
# Put config files in ``/etc/nova`` for everyone to find
if [[ ! -d $NOVA_CONF_DIR ]]; then
sudo mkdir -p $NOVA_CONF_DIR
fi |
91b8d13e |
sudo chown $STACK_USER $NOVA_CONF_DIR |
bf67c19c |
cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR
configure_nova_rootwrap
if is_service_enabled n-api; then
# Use the sample http middleware configuration supplied in the
# Nova sources. This paste config adds the configuration required
# for Nova to validate Keystone tokens.
# Remove legacy paste config if present
rm -f $NOVA_DIR/bin/nova-api-paste.ini
# Get the sample configuration file in place
cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR
|
a6601a51 |
iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST |
3a3a2bac |
if is_service_enabled tls-proxy; then |
a6601a51 |
iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL |
3a3a2bac |
fi |
b205cc8f |
iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova
iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD |
bf67c19c |
fi
|
5e3deb67 |
iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR |
bc071bce |
|
bf67c19c |
if is_service_enabled n-cpu; then
# Force IP forwarding on, just on case
sudo sysctl -w net.ipv4.ip_forward=1
# Attempt to load modules: network block device - used to manage qcow images
sudo modprobe nbd || true
# Check for kvm (hardware based virtualization). If unable to initialize
# kvm, we drop back to the slower emulation mode (qemu). Note: many systems
# come with hardware virtualization disabled in BIOS.
if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
sudo modprobe kvm || true
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
fi
fi
fi
# Install and configure **LXC** if specified. LXC is another approach to
# splitting a system into many smaller parts. LXC uses cgroups and chroot
# to simulate multiple systems.
if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then |
c18b9651 |
if is_ubuntu; then |
bf67c19c |
if [[ ! "$DISTRO" > natty ]]; then
cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
sudo mkdir -p /cgroup
if ! grep -q cgroup /etc/fstab; then
echo "$cgline" | sudo tee -a /etc/fstab
fi
if ! mount -n | grep -q cgroup; then
sudo mount /cgroup
fi
fi
fi
fi
|
f35cf91a |
# Prepare directories and packages for baremetal driver
if is_baremetal; then
configure_baremetal_nova_dirs
fi
|
cc6b4435 |
if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then |
bf67c19c |
# Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
cat <<EOF | sudo tee -a $QEMU_CONF
cgroup_device_acl = [
"/dev/null", "/dev/full", "/dev/zero",
"/dev/random", "/dev/urandom",
"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
"/dev/rtc", "/dev/hpet","/dev/net/tun",
]
EOF
fi
|
c18b9651 |
if is_ubuntu; then |
bf67c19c |
LIBVIRT_DAEMON=libvirt-bin
else |
f1c094cb |
LIBVIRT_DAEMON=libvirtd
fi
|
a534e0bf |
|
f1c094cb |
if is_fedora; then |
a534e0bf |
# Starting with fedora 18 enable stack-user to virsh -c qemu:///system
# by creating a policy-kit rule for stack-user
if [[ "$os_RELEASE" -ge "18" ]]; then
rules_dir=/etc/polkit-1/rules.d
sudo mkdir -p $rules_dir
sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
polkit.addRule(function(action, subject) {
if (action.id == 'org.libvirt.unix.manage' &&
subject.user == '"$STACK_USER"') {
return polkit.Result.YES;
}
});
EOF"
unset rules_dir
else
sudo bash -c 'cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla |
bf67c19c |
[libvirt Management Access]
Identity=unix-group:libvirtd
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
EOF' |
a534e0bf |
fi |
f1c094cb |
elif is_suse; then
# Work around the fact that polkit-default-privs overrules pklas
# with 'unix-group:$group'.
sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
[libvirt Management Access]
Identity=unix-user:$USER
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
EOF" |
bf67c19c |
fi
# The user that nova runs as needs to be member of **libvirtd** group otherwise
# nova-compute will be unable to use libvirt. |
f1c094cb |
if ! getent group libvirtd >/dev/null; then
sudo groupadd libvirtd
fi |
91b8d13e |
add_user_to_group $STACK_USER libvirtd |
bf67c19c |
# libvirt detects various settings on startup, as we potentially changed
# the system configuration (modules, filesystems), we need to restart
# libvirt to detect those changes.
restart_service $LIBVIRT_DAEMON
# Instance Storage
# ----------------
# Nova stores each instance in its own directory. |
cf9eef85 |
sudo mkdir -p $NOVA_INSTANCES_PATH
sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH |
bf67c19c |
# You can specify a different disk to be mounted and used for backing the
# virtual machines. If there is a partition labeled nova-instances we
# mount it (ext filesystems can be labeled via e2label).
if [ -L /dev/disk/by-label/nova-instances ]; then
if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then
sudo mount -L nova-instances $NOVA_INSTANCES_PATH |
91b8d13e |
sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH |
bf67c19c |
fi
fi
fi
}
|
a0dce264 |
# create_nova_accounts() - Set up common required nova accounts
# Tenant User Roles
# ------------------------------------------------------------------
# service nova admin, [ResellerAdmin (swift only)]
# Migrated from keystone_data.sh
create_nova_accounts() {
SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
# Nova
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
NOVA_USER=$(keystone user-create \
--name=nova \
--pass="$SERVICE_PASSWORD" \
--tenant_id $SERVICE_TENANT \
--email=nova@example.com \
| grep " id " | get_field 2)
keystone user-role-add \
--tenant_id $SERVICE_TENANT \
--user_id $NOVA_USER \
--role_id $ADMIN_ROLE
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
NOVA_SERVICE=$(keystone service-create \
--name=nova \
--type=compute \
--description="Nova Compute Service" \
| grep " id " | get_field 2)
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_SERVICE \ |
3a3a2bac |
--publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
--adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
--internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" |
a0dce264 |
fi
fi
}
|
da7b8091 |
# create_nova_conf() - Create a new nova.conf file
function create_nova_conf() { |
bf67c19c |
# Remove legacy ``nova.conf``
rm -f $NOVA_DIR/bin/nova.conf
# (Re)create ``nova.conf`` |
3cf1ffbc |
rm -f $NOVA_CONF |
bf67c19c |
add_nova_opt "[DEFAULT]" |
9bc47db2 |
iniset $NOVA_CONF DEFAULT verbose "True" |
8750b3d5 |
iniset $NOVA_CONF DEFAULT debug "True" |
9bc47db2 |
iniset $NOVA_CONF DEFAULT auth_strategy "keystone"
iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER"
iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
iniset $NOVA_CONF DEFAULT force_dhcp_release "True" |
27c06950 |
iniset $NOVA_CONF DEFAULT fixed_range "" |
9bc47db2 |
iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions"
iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" |
7e79d913 |
iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` |
f35cf91a |
if is_baremetal; then |
7e79d913 |
iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` |
f35cf91a |
fi |
9bc47db2 |
iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" |
bf67c19c |
if is_service_enabled n-api; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" |
3a3a2bac |
if is_service_enabled tls-proxy; then
# Set the service port for a proxy to take the original |
9bc47db2 |
iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" |
3a3a2bac |
fi |
bf67c19c |
fi
if is_service_enabled cinder; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" |
bf67c19c |
fi
if [ -n "$NOVA_STATE_PATH" ]; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH"
iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH" |
bf67c19c |
fi
if [ -n "$NOVA_INSTANCES_PATH" ]; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH" |
bf67c19c |
fi
if [ "$MULTI_HOST" != "False" ]; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT multi_host "True"
iniset $NOVA_CONF DEFAULT send_arp_for_ha "True" |
bf67c19c |
fi
if [ "$SYSLOG" != "False" ]; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT use_syslog "True" |
bf67c19c |
fi
if [ "$API_RATE_LIMIT" != "True" ]; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT api_rate_limit "False" |
bf67c19c |
fi
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
# Add color to logging output |
07db7135 |
iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(user_name)s %(project_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m" |
9bc47db2 |
iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m" |
07db7135 |
iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;35m%(instance)s[00m" |
bf67c19c |
else
# Show user_name and project_name instead of user_id and project_id |
07db7135 |
iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" |
bf67c19c |
fi |
1fcc6a1f |
if is_service_enabled ceilometer; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" |
239f3246 |
iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier" |
1fcc6a1f |
fi
|
bf67c19c |
# Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
EXTRA_OPTS=$EXTRA_FLAGS
fi
# Define extra nova conf flags by defining the array ``EXTRA_OPTS``.
# For Example: ``EXTRA_OPTS=(foo=true bar=2)``
for I in "${EXTRA_OPTS[@]}"; do |
dff95122 |
# Replace the first '=' with ' ' for iniset syntax
iniset $NOVA_CONF DEFAULT ${I/=/ } |
bf67c19c |
done |
b3236914 |
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
if is_service_enabled n-cpu; then
NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
fi
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
else
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
fi
|
cc6b4435 |
if is_service_enabled n-novnc || is_service_enabled n-xvnc; then |
b3236914 |
# Address on which instance vncservers will listen on compute hosts.
# For multi-host, this should be the management ip of the compute host.
VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
iniset $NOVA_CONF DEFAULT vnc_enabled true
iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
else
iniset $NOVA_CONF DEFAULT vnc_enabled false
fi
if is_service_enabled n-spice; then
# Address on which instance spiceservers will listen on compute hosts.
# For multi-host, this should be the management ip of the compute host.
SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
iniset $NOVA_CONF spice enabled true
iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
else
iniset $NOVA_CONF spice enabled false
fi
iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
iniset_rpc_backend nova $NOVA_CONF DEFAULT
iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" |
da7b8091 |
} |
bf67c19c |
|
f03bafeb |
# create_nova_cache_dir() - Part of the init_nova() process
function create_nova_cache_dir() {
# Create cache dir
sudo mkdir -p $NOVA_AUTH_CACHE_DIR
sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR
rm -f $NOVA_AUTH_CACHE_DIR/*
}
|
66afb47c |
function create_nova_conf_nova_network() { |
b3236914 |
iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" |
9bc47db2 |
iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" |
66afb47c |
if [ -n "$FLAT_INTERFACE" ]; then |
9bc47db2 |
iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" |
66afb47c |
fi
}
|
f03bafeb |
# create_nova_keys_dir() - Part of the init_nova() process
function create_nova_keys_dir() {
# Create keys dir
sudo mkdir -p ${NOVA_STATE_PATH}/keys
sudo chown -R $STACK_USER ${NOVA_STATE_PATH}
}
|
da7b8091 |
# init_nova() - Initialize databases, etc.
function init_nova() { |
f03bafeb |
# All nova components talk to a central database.
# Only do this step once on the API node for an entire cluster. |
c439b5df |
if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then |
bf67c19c |
# (Re)create nova database
# Explicitly use latin1: to avoid lp#829209, nova expects the database to
# use latin1 by default, and then upgrades the database to utf8 (see the
# 082_essex.py in nova) |
428af5a2 |
recreate_database nova latin1 |
bf67c19c |
|
f03bafeb |
# Migrate nova database |
bf67c19c |
$NOVA_BIN_DIR/nova-manage db sync |
f35cf91a |
# (Re)create nova baremetal database
if is_baremetal; then
recreate_database nova_bm latin1
$NOVA_BIN_DIR/nova-baremetal-manage db sync
fi |
bf67c19c |
fi
|
f03bafeb |
create_nova_cache_dir
create_nova_keys_dir |
bf67c19c |
}
# install_novaclient() - Collect source and prepare
function install_novaclient() {
git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH |
253a1a35 |
setup_develop $NOVACLIENT_DIR |
bf67c19c |
}
# install_nova() - Collect source and prepare
function install_nova() {
if is_service_enabled n-cpu; then |
c18b9651 |
if is_ubuntu; then |
00011c08 |
install_package libvirt-bin
elif is_fedora || is_suse; then
install_package libvirt |
bf67c19c |
else |
00011c08 |
exit_distro_not_supported "libvirt installation" |
bf67c19c |
fi |
00011c08 |
|
bf67c19c |
# Install and configure **LXC** if specified. LXC is another approach to
# splitting a system into many smaller parts. LXC uses cgroups and chroot
# to simulate multiple systems.
if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then |
c18b9651 |
if is_ubuntu; then |
bf67c19c |
if [[ "$DISTRO" > natty ]]; then
install_package cgroup-lite
fi
else
### FIXME(dtroyer): figure this out
echo "RPM-based cgroup not implemented yet"
yum_install libcgroup-tools
fi
fi
fi
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH |
253a1a35 |
setup_develop $NOVA_DIR |
bf67c19c |
}
|
3a3a2bac |
# start_nova_api() - Start the API process ahead of other things
function start_nova_api() {
# Get right service port for testing
local service_port=$NOVA_SERVICE_PORT
if is_service_enabled tls-proxy; then
service_port=$NOVA_SERVICE_PORT_INT
fi
screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
echo "Waiting for nova-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then |
07115eb5 |
die $LINENO "nova-api did not start" |
3a3a2bac |
fi
# Start proxies if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
fi
}
|
bf67c19c |
# start_nova() - Start running processes, including screen
function start_nova() {
# The group **libvirtd** is added to the current user in this script.
# Use 'sg' to execute nova-compute as a member of the **libvirtd** group.
# ``screen_it`` checks ``is_service_enabled``, it is not needed here |
ff7f308e |
screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" |
bf67c19c |
screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute"
screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network"
screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" |
3edddd10 |
screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" |
0edfd6f6 |
screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" |
d10e12f1 |
screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" |
0edfd6f6 |
screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" |
1c6c1125 |
# Starting the nova-objectstore only if swift3 service is not enabled.
# Swift will act as s3 objectstore.
is_service_enabled swift3 || \
screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" |
bf67c19c |
}
# stop_nova() - Stop running processes (non-screen)
function stop_nova() {
# Kill the nova screen windows |
d10e12f1 |
for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do |
bf67c19c |
screen -S $SCREEN_NAME -p $serv -X kill
done
}
|
cc6b4435 |
|
bf67c19c |
# Restore xtrace
$XTRACE |
584d90ec |
# Local variables:
# mode: shell-script
# End: |