* Move shared volume configuration from stack.sh to stackrc
* Move Nova network and vnc/spice configuration settings from stack.sh
into lib/nova
* Rename NET_MAN to NETOWRK_MANAGER to match nova.conf attribute name
Change-Id: I9bd2955def553499aa832eda1f0959afe494206a
... | ... |
@@ -66,6 +66,59 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} |
66 | 66 |
QEMU_CONF=/etc/libvirt/qemu.conf |
67 | 67 |
|
68 | 68 |
|
69 |
+# Nova Network Configuration |
|
70 |
+# -------------------------- |
|
71 |
+ |
|
72 |
+# Set defaults according to the virt driver |
|
73 |
+if [ "$VIRT_DRIVER" = 'xenserver' ]; then |
|
74 |
+ PUBLIC_INTERFACE_DEFAULT=eth3 |
|
75 |
+ GUEST_INTERFACE_DEFAULT=eth1 |
|
76 |
+ # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args |
|
77 |
+ FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) |
|
78 |
+elif [ "$VIRT_DRIVER" = 'baremetal' ]; then |
|
79 |
+ NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} |
|
80 |
+ PUBLIC_INTERFACE_DEFAULT=eth0 |
|
81 |
+ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} |
|
82 |
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100 |
|
83 |
+ STUB_NETWORK=${STUB_NETWORK:-False} |
|
84 |
+else |
|
85 |
+ PUBLIC_INTERFACE_DEFAULT=br100 |
|
86 |
+ GUEST_INTERFACE_DEFAULT=eth0 |
|
87 |
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100 |
|
88 |
+fi |
|
89 |
+ |
|
90 |
+NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} |
|
91 |
+PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} |
|
92 |
+VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} |
|
93 |
+FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} |
|
94 |
+EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} |
|
95 |
+ |
|
96 |
+# If you are using the FlatDHCP network mode on multiple hosts, set the |
|
97 |
+# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already |
|
98 |
+# have an IP or you risk breaking things. |
|
99 |
+# |
|
100 |
+# **DHCP Warning**: If your flat interface device uses DHCP, there will be a |
|
101 |
+# hiccup while the network is moved from the flat interface to the flat network |
|
102 |
+# bridge. This will happen when you launch your first instance. Upon launch |
|
103 |
+# you will lose all connectivity to the node, and the VM launch will probably |
|
104 |
+# fail. |
|
105 |
+# |
|
106 |
+# If you are running on a single node and don't need to access the VMs from |
|
107 |
+# devices other than that node, you can set ``FLAT_INTERFACE=`` |
|
108 |
+# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. |
|
109 |
+FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} |
|
110 |
+ |
|
111 |
+# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This |
|
112 |
+# allows network operations and routing for a VM to occur on the server that is |
|
113 |
+# running the VM - removing a SPOF and bandwidth bottleneck. |
|
114 |
+MULTI_HOST=`trueorfalse False $MULTI_HOST` |
|
115 |
+ |
|
116 |
+# Test floating pool and range are used for testing. They are defined |
|
117 |
+# here until the admin APIs can replace nova-manage |
|
118 |
+TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} |
|
119 |
+TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} |
|
120 |
+ |
|
121 |
+ |
|
69 | 122 |
# Entry Points |
70 | 123 |
# ------------ |
71 | 124 |
|
... | ... |
@@ -439,6 +492,49 @@ function create_nova_conf() { |
439 | 439 |
# Replace the first '=' with ' ' for iniset syntax |
440 | 440 |
iniset $NOVA_CONF DEFAULT ${I/=/ } |
441 | 441 |
done |
442 |
+ |
|
443 |
+ # All nova-compute workers need to know the vnc configuration options |
|
444 |
+ # These settings don't hurt anything if n-xvnc and n-novnc are disabled |
|
445 |
+ if is_service_enabled n-cpu; then |
|
446 |
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} |
|
447 |
+ iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" |
|
448 |
+ XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} |
|
449 |
+ iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" |
|
450 |
+ SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} |
|
451 |
+ iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" |
|
452 |
+ fi |
|
453 |
+ if [ "$VIRT_DRIVER" = 'xenserver' ]; then |
|
454 |
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} |
|
455 |
+ else |
|
456 |
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} |
|
457 |
+ fi |
|
458 |
+ |
|
459 |
+ if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then |
|
460 |
+ # Address on which instance vncservers will listen on compute hosts. |
|
461 |
+ # For multi-host, this should be the management ip of the compute host. |
|
462 |
+ VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} |
|
463 |
+ iniset $NOVA_CONF DEFAULT vnc_enabled true |
|
464 |
+ iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" |
|
465 |
+ iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" |
|
466 |
+ else |
|
467 |
+ iniset $NOVA_CONF DEFAULT vnc_enabled false |
|
468 |
+ fi |
|
469 |
+ |
|
470 |
+ if is_service_enabled n-spice; then |
|
471 |
+ # Address on which instance spiceservers will listen on compute hosts. |
|
472 |
+ # For multi-host, this should be the management ip of the compute host. |
|
473 |
+ SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} |
|
474 |
+ SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} |
|
475 |
+ iniset $NOVA_CONF spice enabled true |
|
476 |
+ iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" |
|
477 |
+ iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" |
|
478 |
+ else |
|
479 |
+ iniset $NOVA_CONF spice enabled false |
|
480 |
+ fi |
|
481 |
+ |
|
482 |
+ iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" |
|
483 |
+ iniset_rpc_backend nova $NOVA_CONF DEFAULT |
|
484 |
+ iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" |
|
442 | 485 |
} |
443 | 486 |
|
444 | 487 |
# create_nova_cache_dir() - Part of the init_nova() process |
... | ... |
@@ -450,7 +546,7 @@ function create_nova_cache_dir() { |
450 | 450 |
} |
451 | 451 |
|
452 | 452 |
function create_nova_conf_nova_network() { |
453 |
- iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" |
|
453 |
+ iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" |
|
454 | 454 |
iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" |
455 | 455 |
iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" |
456 | 456 |
iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" |
... | ... |
@@ -278,11 +278,6 @@ SWIFT3_DIR=$DEST/swift3 |
278 | 278 |
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 |
279 | 279 |
CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` |
280 | 280 |
|
281 |
-# Name of the LVM volume group to use/create for iscsi volumes |
|
282 |
-VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} |
|
283 |
-VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} |
|
284 |
-INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} |
|
285 |
- |
|
286 | 281 |
# Generic helper to configure passwords |
287 | 282 |
function read_password { |
288 | 283 |
XTRACE=$(set +o | grep xtrace) |
... | ... |
@@ -326,64 +321,6 @@ function read_password { |
326 | 326 |
} |
327 | 327 |
|
328 | 328 |
|
329 |
-# Nova Network Configuration |
|
330 |
-# -------------------------- |
|
331 |
- |
|
332 |
-# FIXME: more documentation about why these are important options. Also |
|
333 |
-# we should make sure we use the same variable names as the option names. |
|
334 |
- |
|
335 |
-if [ "$VIRT_DRIVER" = 'xenserver' ]; then |
|
336 |
- PUBLIC_INTERFACE_DEFAULT=eth3 |
|
337 |
- # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args |
|
338 |
- FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) |
|
339 |
- GUEST_INTERFACE_DEFAULT=eth1 |
|
340 |
-elif [ "$VIRT_DRIVER" = 'baremetal' ]; then |
|
341 |
- PUBLIC_INTERFACE_DEFAULT=eth0 |
|
342 |
- FLAT_NETWORK_BRIDGE_DEFAULT=br100 |
|
343 |
- FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} |
|
344 |
- FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False} |
|
345 |
- NET_MAN=${NET_MAN:-FlatManager} |
|
346 |
- STUB_NETWORK=${STUB_NETWORK:-False} |
|
347 |
-else |
|
348 |
- PUBLIC_INTERFACE_DEFAULT=br100 |
|
349 |
- FLAT_NETWORK_BRIDGE_DEFAULT=br100 |
|
350 |
- GUEST_INTERFACE_DEFAULT=eth0 |
|
351 |
-fi |
|
352 |
- |
|
353 |
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} |
|
354 |
-NET_MAN=${NET_MAN:-FlatDHCPManager} |
|
355 |
-EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} |
|
356 |
-FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} |
|
357 |
-VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} |
|
358 |
-FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True} |
|
359 |
- |
|
360 |
-# Test floating pool and range are used for testing. They are defined |
|
361 |
-# here until the admin APIs can replace nova-manage |
|
362 |
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} |
|
363 |
-TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} |
|
364 |
- |
|
365 |
-# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This |
|
366 |
-# allows network operations and routing for a VM to occur on the server that is |
|
367 |
-# running the VM - removing a SPOF and bandwidth bottleneck. |
|
368 |
-MULTI_HOST=`trueorfalse False $MULTI_HOST` |
|
369 |
- |
|
370 |
-# If you are using the FlatDHCP network mode on multiple hosts, set the |
|
371 |
-# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already |
|
372 |
-# have an IP or you risk breaking things. |
|
373 |
-# |
|
374 |
-# **DHCP Warning**: If your flat interface device uses DHCP, there will be a |
|
375 |
-# hiccup while the network is moved from the flat interface to the flat network |
|
376 |
-# bridge. This will happen when you launch your first instance. Upon launch |
|
377 |
-# you will lose all connectivity to the node, and the VM launch will probably |
|
378 |
-# fail. |
|
379 |
-# |
|
380 |
-# If you are running on a single node and don't need to access the VMs from |
|
381 |
-# devices other than that node, you can set ``FLAT_INTERFACE=`` |
|
382 |
-# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. |
|
383 |
-FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} |
|
384 |
- |
|
385 |
-## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? |
|
386 |
- |
|
387 | 329 |
# Database Configuration |
388 | 330 |
# ---------------------- |
389 | 331 |
|
... | ... |
@@ -980,48 +917,6 @@ if is_service_enabled nova; then |
980 | 980 |
elif is_service_enabled n-net; then |
981 | 981 |
create_nova_conf_nova_network |
982 | 982 |
fi |
983 |
- # All nova-compute workers need to know the vnc configuration options |
|
984 |
- # These settings don't hurt anything if n-xvnc and n-novnc are disabled |
|
985 |
- if is_service_enabled n-cpu; then |
|
986 |
- NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} |
|
987 |
- iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" |
|
988 |
- XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} |
|
989 |
- iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" |
|
990 |
- SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} |
|
991 |
- iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" |
|
992 |
- fi |
|
993 |
- if [ "$VIRT_DRIVER" = 'xenserver' ]; then |
|
994 |
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} |
|
995 |
- else |
|
996 |
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} |
|
997 |
- fi |
|
998 |
- |
|
999 |
- if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then |
|
1000 |
- # Address on which instance vncservers will listen on compute hosts. |
|
1001 |
- # For multi-host, this should be the management ip of the compute host. |
|
1002 |
- VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} |
|
1003 |
- iniset $NOVA_CONF DEFAULT vnc_enabled true |
|
1004 |
- iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" |
|
1005 |
- iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" |
|
1006 |
- else |
|
1007 |
- iniset $NOVA_CONF DEFAULT vnc_enabled false |
|
1008 |
- fi |
|
1009 |
- |
|
1010 |
- if is_service_enabled n-spice; then |
|
1011 |
- # Address on which instance spiceservers will listen on compute hosts. |
|
1012 |
- # For multi-host, this should be the management ip of the compute host. |
|
1013 |
- SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} |
|
1014 |
- SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} |
|
1015 |
- iniset $NOVA_CONF spice enabled true |
|
1016 |
- iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" |
|
1017 |
- iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" |
|
1018 |
- else |
|
1019 |
- iniset $NOVA_CONF spice enabled false |
|
1020 |
- fi |
|
1021 |
- |
|
1022 |
- iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" |
|
1023 |
- iniset_rpc_backend nova $NOVA_CONF DEFAULT |
|
1024 |
- iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" |
|
1025 | 983 |
|
1026 | 984 |
|
1027 | 985 |
# XenServer |
... | ... |
@@ -196,5 +196,10 @@ esac |
196 | 196 |
# 5Gb default volume backing file size |
197 | 197 |
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} |
198 | 198 |
|
199 |
+# Name of the LVM volume group to use/create for iscsi volumes |
|
200 |
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} |
|
201 |
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} |
|
202 |
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} |
|
203 |
+ |
|
199 | 204 |
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} |
200 | 205 |
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} |
... | ... |
@@ -6,7 +6,7 @@ |
6 | 6 |
SHELL_AFTER_RUN=no |
7 | 7 |
|
8 | 8 |
# Variables common amongst all hosts in the cluster |
9 |
-COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN" |
|
9 |
+COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NETWORK_MANAGER=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN" |
|
10 | 10 |
|
11 | 11 |
# Helper to launch containers |
12 | 12 |
function run_bm { |