Browse code

Ability to use a remote Ceph cluster

Sometimes we want to run some benchmarks on virtual machines that will be
backed by a Ceph cluster. The first idea that comes in our mind is to
use devstack to quickly get an OpenStack up and running but what about
the configuration of Devstack with this remote cluster?

Thanks to this commit it's now possible to use an already existing Ceph
cluster. In this case Devstack just needs two things:

* the location of the Ceph config file (by default devstack will look
for /etc/ceph/ceph.conf
* the admin key of the remote ceph cluster (by default devstack will
look for /etc/ceph/ceph.client.admin.keyring)

Devstack will then create the necessary pools, users, keys and will
connect the OpenStack environment as usual. During the unstack phase
every pools, users and keys will be deleted on the remote cluster while
local files and ceph-common package will be removed from the current
Devstack host.

To enable this mode simply add REMOTE_CEPH=True to your localrc file.

Change-Id: I1a4b6fd676d50b6a41a09e7beba9b11f8d1478f7
Signed-off-by: Sébastien Han <sebastien.han@enovance.com>

Sébastien Han authored on 2014/12/05 00:22:41
Showing 3 changed files
... ...
@@ -6,14 +6,19 @@ if is_service_enabled ceph; then
6 6
         source $TOP_DIR/lib/ceph
7 7
     elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
8 8
         echo_summary "Installing Ceph"
9
-        install_ceph
10
-        echo_summary "Configuring Ceph"
11
-        configure_ceph
12
-        # NOTE (leseb): Do everything here because we need to have Ceph started before the main
13
-        # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
14
-        echo_summary "Initializing Ceph"
15
-        init_ceph
16
-        start_ceph
9
+        check_os_support_ceph
10
+        if [ "$REMOTE_CEPH" = "False" ]; then
11
+            install_ceph
12
+            echo_summary "Configuring Ceph"
13
+            configure_ceph
14
+            # NOTE (leseb): Do everything here because we need to have Ceph started before the main
15
+            # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
16
+            echo_summary "Initializing Ceph"
17
+            init_ceph
18
+            start_ceph
19
+        else
20
+            install_ceph_remote
21
+        fi
17 22
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
18 23
         if is_service_enabled glance; then
19 24
             echo_summary "Configuring Glance for Ceph"
... ...
@@ -32,14 +37,39 @@ if is_service_enabled ceph; then
32 32
             echo_summary "Configuring libvirt secret"
33 33
             import_libvirt_secret_ceph
34 34
         fi
35
+
36
+        if [ "$REMOTE_CEPH" = "False" ]; then
37
+            if is_service_enabled glance; then
38
+                echo_summary "Configuring Glance for Ceph"
39
+                configure_ceph_embedded_glance
40
+            fi
41
+            if is_service_enabled nova; then
42
+                echo_summary "Configuring Nova for Ceph"
43
+                configure_ceph_embedded_nova
44
+            fi
45
+            if is_service_enabled cinder; then
46
+                echo_summary "Configuring Cinder for Ceph"
47
+                configure_ceph_embedded_cinder
48
+            fi
49
+        fi
35 50
     fi
36 51
 
37 52
     if [[ "$1" == "unstack" ]]; then
38
-        stop_ceph
39
-        cleanup_ceph
53
+        if [ "$REMOTE_CEPH" = "True" ]; then
54
+            cleanup_ceph_remote
55
+        else
56
+            cleanup_ceph_embedded
57
+            stop_ceph
58
+        fi
59
+        cleanup_ceph_general
40 60
     fi
41 61
 
42 62
     if [[ "$1" == "clean" ]]; then
43
-        cleanup_ceph
63
+        if [ "$REMOTE_CEPH" = "True" ]; then
64
+            cleanup_ceph_remote
65
+        else
66
+            cleanup_ceph_embedded
67
+        fi
68
+        cleanup_ceph_general
44 69
     fi
45 70
 fi
... ...
@@ -68,6 +68,11 @@ CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
68 68
 CEPH_REPLICAS=${CEPH_REPLICAS:-1}
69 69
 CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
70 70
 
71
+# Connect to an existing Ceph cluster
72
+REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
73
+REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
74
+
75
+
71 76
 # Functions
72 77
 # ------------
73 78
 
... ...
@@ -92,29 +97,69 @@ EOF
92 92
     sudo rm -f secret.xml
93 93
 }
94 94
 
95
+# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
96
+function undefine_virsh_secret {
97
+    if is_service_enabled cinder || is_service_enabled nova; then
98
+        local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
99
+        sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
100
+    fi
101
+}
102
+
103
+
104
+# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
105
+function check_os_support_ceph {
106
+    if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then
107
+        echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
108
+        if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
109
+            die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
110
+        fi
111
+        NO_UPDATE_REPOS=False
112
+    fi
113
+}
114
+
95 115
 # cleanup_ceph() - Remove residual data files, anything left over from previous
96 116
 # runs that a clean run would need to clean up
97
-function cleanup_ceph {
117
+function cleanup_ceph_remote {
118
+    # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
119
+    if is_service_enabled glance; then
120
+        sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
121
+        sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
122
+    fi
123
+    if is_service_enabled cinder; then
124
+        sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
125
+        sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
126
+    fi
127
+    if is_service_enabled c-bak; then
128
+        sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
129
+        sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
130
+    fi
131
+    if is_service_enabled nova; then
132
+        iniset $NOVA_CONF libvirt rbd_secret_uuid ""
133
+        sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
134
+    fi
135
+}
136
+
137
+function cleanup_ceph_embedded {
98 138
     sudo pkill -f ceph-mon
99 139
     sudo pkill -f ceph-osd
100 140
     sudo rm -rf ${CEPH_DATA_DIR}/*/*
101
-    sudo rm -rf ${CEPH_CONF_DIR}/*
102 141
     if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
103 142
         sudo umount ${CEPH_DATA_DIR}
104 143
     fi
105 144
     if [[ -e ${CEPH_DISK_IMAGE} ]]; then
106 145
         sudo rm -f ${CEPH_DISK_IMAGE}
107 146
     fi
147
+}
148
+
149
+function cleanup_ceph_general {
150
+    undefine_virsh_secret
108 151
     uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
109
-    if is_service_enabled cinder || is_service_enabled nova; then
110
-        local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
111
-        sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
112
-    fi
113
-    if is_service_enabled nova; then
114
-        iniset $NOVA_CONF libvirt rbd_secret_uuid ""
115
-    fi
152
+
153
+    # purge ceph config file and keys
154
+    sudo rm -rf ${CEPH_CONF_DIR}/*
116 155
 }
117 156
 
157
+
118 158
 # configure_ceph() - Set config files, create data dirs, etc
119 159
 function configure_ceph {
120 160
     local count=0
... ...
@@ -130,7 +175,7 @@ function configure_ceph {
130 130
     sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
131 131
 
132 132
     # create a default ceph configuration file
133
-    sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
133
+    sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
134 134
 [global]
135 135
 fsid = ${CEPH_FSID}
136 136
 mon_initial_members = $(hostname)
... ...
@@ -203,14 +248,17 @@ EOF
203 203
     done
204 204
 }
205 205
 
206
-# configure_ceph_glance() - Glance config needs to come after Glance is set up
207
-function configure_ceph_glance {
206
+function configure_ceph_embedded_glance {
208 207
     # configure Glance service options, ceph pool, ceph user and ceph key
209
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
210 208
     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
211 209
     if [[ $CEPH_REPLICAS -ne 1 ]]; then
212 210
         sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
213 211
     fi
212
+}
213
+
214
+# configure_ceph_glance() - Glance config needs to come after Glance is set up
215
+function configure_ceph_glance {
216
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
214 217
     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
215 218
     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
216 219
 
... ...
@@ -225,14 +273,17 @@ function configure_ceph_glance {
225 225
     iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
226 226
 }
227 227
 
228
-# configure_ceph_nova() - Nova config needs to come after Nova is set up
229
-function configure_ceph_nova {
228
+function configure_ceph_embedded_nova {
230 229
     # configure Nova service options, ceph pool, ceph user and ceph key
231
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
232 230
     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
233 231
     if [[ $CEPH_REPLICAS -ne 1 ]]; then
234 232
         sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
235 233
     fi
234
+}
235
+
236
+# configure_ceph_nova() - Nova config needs to come after Nova is set up
237
+function configure_ceph_nova {
238
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
236 239
     iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
237 240
     iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
238 241
     iniset $NOVA_CONF libvirt inject_key false
... ...
@@ -248,15 +299,17 @@ function configure_ceph_nova {
248 248
     fi
249 249
 }
250 250
 
251
-# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
252
-function configure_ceph_cinder {
251
+function configure_ceph_embedded_cinder {
253 252
     # Configure Cinder service options, ceph pool, ceph user and ceph key
254
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
255 253
     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
256 254
     if [[ $CEPH_REPLICAS -ne 1 ]]; then
257 255
         sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
258
-
259 256
     fi
257
+}
258
+
259
+# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
260
+function configure_ceph_cinder {
261
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
260 262
     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
261 263
     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
262 264
 }
... ...
@@ -270,15 +323,12 @@ function init_ceph {
270 270
 }
271 271
 
272 272
 # install_ceph() - Collect source and prepare
273
+function install_ceph_remote {
274
+    install_package ceph-common
275
+}
276
+
273 277
 function install_ceph {
274
-    # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
275
-    #                leveraging the list in stack.sh
276
-    if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
277
-        NO_UPDATE_REPOS=False
278
-        install_package ceph
279
-    else
280
-        exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
281
-    fi
278
+    install_package ceph
282 279
 }
283 280
 
284 281
 # start_ceph() - Start running processes, including screen
... ...
@@ -52,11 +52,13 @@ function configure_cinder_backend_ceph {
52 52
     iniset $CINDER_CONF DEFAULT glance_api_version 2
53 53
 
54 54
     if is_service_enabled c-bak; then
55
-        # Configure Cinder backup service options, ceph pool, ceph user and ceph key
56 55
         sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
57
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
58
-        if [[ $CEPH_REPLICAS -ne 1 ]]; then
59
-            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
56
+        if [ "$REMOTE_CEPH" = "False" ]; then
57
+            # Configure Cinder backup service options, ceph pool, ceph user and ceph key
58
+            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
59
+            if [[ $CEPH_REPLICAS -ne 1 ]]; then
60
+                sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
61
+            fi
60 62
         fi
61 63
         sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
62 64
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring