Browse code

Move Glance data and Nova state dirs out of source dir

* allow NOVA_STATE_PATH to be overridden,
default is now /opt/stack/data/nova
* add NOVA_INSTANCES_PATH to move the instances dir separately
from the state dir
* allow GLANCE_CACHE_DIR to be overridden,
default is now /opt/stack/data/glance/cache
* allow GLANCE_IMAGE_DIR to be overridden,
default is inow /opt/stack/data/glance/images
* set GLANCE_BIN_DIR to support entry points (future)
* allow CINDER_STATE_PATH to be overridden,
default is now /opt/stack/data/cinder

Change-Id: If95dc19b957ef5b9b14397835cd0543f82717f50

Dean Troyer authored on 2012/09/14 04:02:01
Showing 3 changed files
... ...
@@ -3,6 +3,7 @@
3 3
 
4 4
 # Dependencies:
5 5
 # - functions
6
+# - DEST, DATA_DIR must be defined
6 7
 # - KEYSTONE_AUTH_* must be defined
7 8
 # SERVICE_{TENANT_NAME|PASSWORD} must be defined
8 9
 
... ...
@@ -25,14 +26,17 @@ set -o xtrace
25 25
 
26 26
 # set up default directories
27 27
 CINDER_DIR=$DEST/cinder
28
-if [ -d $CINDER_DIR/bin ] ; then
28
+CINDERCLIENT_DIR=$DEST/python-cinderclient
29
+CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder}
30
+CINDER_CONF_DIR=/etc/cinder
31
+CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
32
+
33
+# Support entry points installation of console scripts
34
+if [[ -d $CINDER_DIR/bin ]]; then
29 35
     CINDER_BIN_DIR=$CINDER_DIR/bin
30 36
 else
31 37
     CINDER_BIN_DIR=/usr/local/bin
32 38
 fi
33
-CINDERCLIENT_DIR=$DEST/python-cinderclient
34
-CINDER_CONF_DIR=/etc/cinder
35
-CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
36 39
 
37 40
 # Name of the lvm volume group to use/create for iscsi volumes
38 41
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
... ...
@@ -112,6 +116,7 @@ function configure_cinder() {
112 112
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
113 113
     iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}"
114 114
     iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions
115
+    iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
115 116
 
116 117
     if is_service_enabled qpid ; then
117 118
         iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid
... ...
@@ -162,7 +167,7 @@ function init_cinder() {
162 162
             if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
163 163
         fi
164 164
 
165
-        mkdir -p $CINDER_DIR/volumes
165
+        mkdir -p $CINDER_STATE_PATH/volumes
166 166
 
167 167
         if sudo vgs $VOLUME_GROUP; then
168 168
             if [[ "$os_PACKAGE" = "rpm" ]]; then
... ...
@@ -3,7 +3,9 @@
3 3
 
4 4
 # Dependencies:
5 5
 # - functions
6
+# - DATA_DIR must be defined
6 7
 # - KEYSTONE_AUTH_* must be defined
8
+# - NOVA_DIR, NOVA_BIN_DIR, NOVA_STATE_PATH must be defined
7 9
 # SERVICE_{TENANT_NAME|PASSWORD} must be defined
8 10
 # _configure_tgt_for_config_d() from lib/cinder
9 11
 
... ...
@@ -64,7 +66,7 @@ function init_nvol() {
64 64
         if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
65 65
     fi
66 66
 
67
-    mkdir -p $NOVA_DIR/volumes
67
+    mkdir -p $NOVA_STATE_PATH/volumes
68 68
 
69 69
     if sudo vgs $VOLUME_GROUP; then
70 70
         if [[ "$os_PACKAGE" = "rpm" ]]; then
... ...
@@ -97,7 +99,7 @@ function start_nvol() {
97 97
     if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
98 98
         _configure_tgt_for_config_d
99 99
        sudo mkdir -p /etc/tgt/conf.d
100
-       echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
100
+       echo "include $NOVA_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
101 101
     fi
102 102
 
103 103
     if [[ "$os_PACKAGE" = "deb" ]]; then
... ...
@@ -109,7 +111,7 @@ function start_nvol() {
109 109
         restart_service tgtd
110 110
     fi
111 111
 
112
-    screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
112
+    screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume"
113 113
 }
114 114
 
115 115
 # stop_nvol() - Stop running processes (non-screen)
... ...
@@ -317,11 +317,7 @@ source $TOP_DIR/lib/heat
317 317
 source $TOP_DIR/lib/quantum
318 318
 
319 319
 # Set the destination directories for OpenStack projects
320
-NOVA_DIR=$DEST/nova
321 320
 HORIZON_DIR=$DEST/horizon
322
-GLANCE_DIR=$DEST/glance
323
-GLANCECLIENT_DIR=$DEST/python-glanceclient
324
-NOVACLIENT_DIR=$DEST/python-novaclient
325 321
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
326 322
 NOVNC_DIR=$DEST/noVNC
327 323
 SWIFT_DIR=$DEST/swift
... ...
@@ -330,6 +326,33 @@ SWIFTCLIENT_DIR=$DEST/python-swiftclient
330 330
 QUANTUM_DIR=$DEST/quantum
331 331
 QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
332 332
 
333
+# Nova defaults
334
+NOVA_DIR=$DEST/nova
335
+NOVACLIENT_DIR=$DEST/python-novaclient
336
+NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
337
+# INSTANCES_PATH is the previous name for this
338
+NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}}
339
+
340
+# Support entry points installation of console scripts
341
+if [[ -d $NOVA_DIR/bin ]]; then
342
+    NOVA_BIN_DIR=$NOVA_DIR/bin
343
+else
344
+    NOVA_BIN_DIR=/usr/local/bin
345
+fi
346
+
347
+# Glance defaults
348
+GLANCE_DIR=$DEST/glance
349
+GLANCECLIENT_DIR=$DEST/python-glanceclient
350
+GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
351
+GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
352
+
353
+# Support entry points installation of console scripts
354
+if [[ -d $GLANCE_DIR/bin ]]; then
355
+    GLANCE_BIN_DIR=$GLANCE_DIR/bin
356
+else
357
+    GLANCE_BIN_DIR=/usr/local/bin
358
+fi
359
+
333 360
 # Default Quantum Plugin
334 361
 Q_PLUGIN=${Q_PLUGIN:-openvswitch}
335 362
 # Default Quantum Port
... ...
@@ -1062,13 +1085,11 @@ if is_service_enabled g-reg; then
1062 1062
     fi
1063 1063
     sudo chown `whoami` $GLANCE_CONF_DIR
1064 1064
 
1065
-    GLANCE_IMAGE_DIR=$DEST/glance/images
1066 1065
     # Delete existing images
1067 1066
     rm -rf $GLANCE_IMAGE_DIR
1068 1067
     mkdir -p $GLANCE_IMAGE_DIR
1069 1068
 
1070
-    GLANCE_CACHE_DIR=$DEST/glance/cache
1071
-    # Delete existing images
1069
+    # Delete existing cache
1072 1070
     rm -rf $GLANCE_CACHE_DIR
1073 1071
     mkdir -p $GLANCE_CACHE_DIR
1074 1072
 
... ...
@@ -1144,7 +1165,7 @@ if is_service_enabled g-reg; then
1144 1144
     GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
1145 1145
     cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
1146 1146
 
1147
-    $GLANCE_DIR/bin/glance-manage db_sync
1147
+    $GLANCE_BIN_DIR/glance-manage db_sync
1148 1148
 
1149 1149
 fi
1150 1150
 
... ...
@@ -1613,15 +1634,15 @@ EOF'
1613 1613
     # ~~~~~~~~~~~~~~~~
1614 1614
 
1615 1615
     # Nova stores each instance in its own directory.
1616
-    mkdir -p $NOVA_DIR/instances
1616
+    mkdir -p $NOVA_INSTANCES_PATH
1617 1617
 
1618 1618
     # You can specify a different disk to be mounted and used for backing the
1619 1619
     # virtual machines.  If there is a partition labeled nova-instances we
1620 1620
     # mount it (ext filesystems can be labeled via e2label).
1621 1621
     if [ -L /dev/disk/by-label/nova-instances ]; then
1622
-        if ! mount -n | grep -q $NOVA_DIR/instances; then
1623
-            sudo mount -L nova-instances $NOVA_DIR/instances
1624
-            sudo chown -R `whoami` $NOVA_DIR/instances
1622
+        if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then
1623
+            sudo mount -L nova-instances $NOVA_INSTANCES_PATH
1624
+            sudo chown -R `whoami` $NOVA_INSTANCES_PATH
1625 1625
         fi
1626 1626
     fi
1627 1627
 
... ...
@@ -1640,15 +1661,15 @@ EOF'
1640 1640
     sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true
1641 1641
 
1642 1642
     # Clean out the instances directory.
1643
-    sudo rm -rf $NOVA_DIR/instances/*
1643
+    sudo rm -rf $NOVA_INSTANCES_PATH/*
1644 1644
 fi
1645 1645
 
1646 1646
 if is_service_enabled n-net q-dhcp; then
1647 1647
     # Delete traces of nova networks from prior runs
1648 1648
     sudo killall dnsmasq || true
1649 1649
     clean_iptables
1650
-    rm -rf $NOVA_DIR/networks
1651
-    mkdir -p $NOVA_DIR/networks
1650
+    rm -rf $NOVA_STATE_PATH/networks
1651
+    mkdir -p $NOVA_STATE_PATH/networks
1652 1652
 
1653 1653
     # Force IP forwarding on, just on case
1654 1654
     sudo sysctl -w net.ipv4.ip_forward=1
... ...
@@ -1918,13 +1939,6 @@ elif is_service_enabled n-vol; then
1918 1918
     init_nvol
1919 1919
 fi
1920 1920
 
1921
-# Support entry points installation of console scripts
1922
-if [ -d $NOVA_DIR/bin ] ; then
1923
-    NOVA_BIN_DIR=$NOVA_DIR/bin
1924
-else
1925
-    NOVA_BIN_DIR=/usr/local/bin
1926
-fi
1927
-
1928 1921
 NOVA_CONF=nova.conf
1929 1922
 function add_nova_opt {
1930 1923
     echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF
... ...
@@ -2016,8 +2030,11 @@ elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
2016 2016
 fi
2017 2017
 add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT"
2018 2018
 add_nova_opt "force_dhcp_release=True"
2019
-if [ -n "$INSTANCES_PATH" ]; then
2020
-    add_nova_opt "instances_path=$INSTANCES_PATH"
2019
+if [ -n "$NOVA_STATE_PATH" ]; then
2020
+    add_nova_opt "state_path=$NOVA_STATE_PATH"
2021
+fi
2022
+if [ -n "$NOVA_INSTANCES_PATH" ]; then
2023
+    add_nova_opt "instances_path=$NOVA_INSTANCES_PATH"
2021 2024
 fi
2022 2025
 if [ "$MULTI_HOST" != "False" ]; then
2023 2026
     add_nova_opt "multi_host=True"
... ...
@@ -2124,12 +2141,12 @@ fi
2124 2124
 
2125 2125
 # Launch the glance registry service
2126 2126
 if is_service_enabled g-reg; then
2127
-    screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
2127
+    screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
2128 2128
 fi
2129 2129
 
2130 2130
 # Launch the glance api and wait for it to answer before continuing
2131 2131
 if is_service_enabled g-api; then
2132
-    screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
2132
+    screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
2133 2133
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
2134 2134
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
2135 2135
       echo "g-api did not start"