Browse code

Merge "Ability to use a remote Ceph cluster"

Jenkins authored on 2015/01/14 03:16:02
Showing 3 changed files
... ...
@@ -6,14 +6,19 @@ if is_service_enabled ceph; then
6 6
         source $TOP_DIR/lib/ceph
7 7
     elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
8 8
         echo_summary "Installing Ceph"
9
-        install_ceph
10
-        echo_summary "Configuring Ceph"
11
-        configure_ceph
12
-        # NOTE (leseb): Do everything here because we need to have Ceph started before the main
13
-        # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
14
-        echo_summary "Initializing Ceph"
15
-        init_ceph
16
-        start_ceph
9
+        check_os_support_ceph
10
+        if [ "$REMOTE_CEPH" = "False" ]; then
11
+            install_ceph
12
+            echo_summary "Configuring Ceph"
13
+            configure_ceph
14
+            # NOTE (leseb): Do everything here because we need to have Ceph started before the main
15
+            # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
16
+            echo_summary "Initializing Ceph"
17
+            init_ceph
18
+            start_ceph
19
+        else
20
+            install_ceph_remote
21
+        fi
17 22
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
18 23
         if is_service_enabled glance; then
19 24
             echo_summary "Configuring Glance for Ceph"
... ...
@@ -32,14 +37,39 @@ if is_service_enabled ceph; then
32 32
             echo_summary "Configuring libvirt secret"
33 33
             import_libvirt_secret_ceph
34 34
         fi
35
+
36
+        if [ "$REMOTE_CEPH" = "False" ]; then
37
+            if is_service_enabled glance; then
38
+                echo_summary "Configuring Glance for Ceph"
39
+                configure_ceph_embedded_glance
40
+            fi
41
+            if is_service_enabled nova; then
42
+                echo_summary "Configuring Nova for Ceph"
43
+                configure_ceph_embedded_nova
44
+            fi
45
+            if is_service_enabled cinder; then
46
+                echo_summary "Configuring Cinder for Ceph"
47
+                configure_ceph_embedded_cinder
48
+            fi
49
+        fi
35 50
     fi
36 51
 
37 52
     if [[ "$1" == "unstack" ]]; then
38
-        stop_ceph
39
-        cleanup_ceph
53
+        if [ "$REMOTE_CEPH" = "True" ]; then
54
+            cleanup_ceph_remote
55
+        else
56
+            cleanup_ceph_embedded
57
+            stop_ceph
58
+        fi
59
+        cleanup_ceph_general
40 60
     fi
41 61
 
42 62
     if [[ "$1" == "clean" ]]; then
43
-        cleanup_ceph
63
+        if [ "$REMOTE_CEPH" = "True" ]; then
64
+            cleanup_ceph_remote
65
+        else
66
+            cleanup_ceph_embedded
67
+        fi
68
+        cleanup_ceph_general
44 69
     fi
45 70
 fi
... ...
@@ -70,6 +70,11 @@ CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
70 70
 CEPH_REPLICAS=${CEPH_REPLICAS:-1}
71 71
 CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
72 72
 
73
+# Connect to an existing Ceph cluster
74
+REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
75
+REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
76
+
77
+
73 78
 # Functions
74 79
 # ------------
75 80
 
... ...
@@ -94,29 +99,69 @@ EOF
94 94
     sudo rm -f secret.xml
95 95
 }
96 96
 
97
+# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
98
+function undefine_virsh_secret {
99
+    if is_service_enabled cinder || is_service_enabled nova; then
100
+        local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
101
+        sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
102
+    fi
103
+}
104
+
105
+
106
+# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
107
+function check_os_support_ceph {
108
+    if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then
109
+        echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
110
+        if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
111
+            die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
112
+        fi
113
+        NO_UPDATE_REPOS=False
114
+    fi
115
+}
116
+
97 117
 # cleanup_ceph() - Remove residual data files, anything left over from previous
98 118
 # runs that a clean run would need to clean up
99
-function cleanup_ceph {
119
+function cleanup_ceph_remote {
120
+    # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
121
+    if is_service_enabled glance; then
122
+        sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
123
+        sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
124
+    fi
125
+    if is_service_enabled cinder; then
126
+        sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
127
+        sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
128
+    fi
129
+    if is_service_enabled c-bak; then
130
+        sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
131
+        sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
132
+    fi
133
+    if is_service_enabled nova; then
134
+        iniset $NOVA_CONF libvirt rbd_secret_uuid ""
135
+        sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
136
+    fi
137
+}
138
+
139
+function cleanup_ceph_embedded {
100 140
     sudo pkill -f ceph-mon
101 141
     sudo pkill -f ceph-osd
102 142
     sudo rm -rf ${CEPH_DATA_DIR}/*/*
103
-    sudo rm -rf ${CEPH_CONF_DIR}/*
104 143
     if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
105 144
         sudo umount ${CEPH_DATA_DIR}
106 145
     fi
107 146
     if [[ -e ${CEPH_DISK_IMAGE} ]]; then
108 147
         sudo rm -f ${CEPH_DISK_IMAGE}
109 148
     fi
149
+}
150
+
151
+function cleanup_ceph_general {
152
+    undefine_virsh_secret
110 153
     uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
111
-    if is_service_enabled cinder || is_service_enabled nova; then
112
-        local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
113
-        sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
114
-    fi
115
-    if is_service_enabled nova; then
116
-        iniset $NOVA_CONF libvirt rbd_secret_uuid ""
117
-    fi
154
+
155
+    # purge ceph config file and keys
156
+    sudo rm -rf ${CEPH_CONF_DIR}/*
118 157
 }
119 158
 
159
+
120 160
 # configure_ceph() - Set config files, create data dirs, etc
121 161
 function configure_ceph {
122 162
     local count=0
... ...
@@ -132,7 +177,7 @@ function configure_ceph {
132 132
     sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
133 133
 
134 134
     # create a default ceph configuration file
135
-    sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
135
+    sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
136 136
 [global]
137 137
 fsid = ${CEPH_FSID}
138 138
 mon_initial_members = $(hostname)
... ...
@@ -205,14 +250,17 @@ EOF
205 205
     done
206 206
 }
207 207
 
208
-# configure_ceph_glance() - Glance config needs to come after Glance is set up
209
-function configure_ceph_glance {
208
+function configure_ceph_embedded_glance {
210 209
     # configure Glance service options, ceph pool, ceph user and ceph key
211
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
212 210
     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
213 211
     if [[ $CEPH_REPLICAS -ne 1 ]]; then
214 212
         sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
215 213
     fi
214
+}
215
+
216
+# configure_ceph_glance() - Glance config needs to come after Glance is set up
217
+function configure_ceph_glance {
218
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
216 219
     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
217 220
     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
218 221
 
... ...
@@ -227,14 +275,17 @@ function configure_ceph_glance {
227 227
     iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
228 228
 }
229 229
 
230
-# configure_ceph_nova() - Nova config needs to come after Nova is set up
231
-function configure_ceph_nova {
230
+function configure_ceph_embedded_nova {
232 231
     # configure Nova service options, ceph pool, ceph user and ceph key
233
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
234 232
     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
235 233
     if [[ $CEPH_REPLICAS -ne 1 ]]; then
236 234
         sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
237 235
     fi
236
+}
237
+
238
+# configure_ceph_nova() - Nova config needs to come after Nova is set up
239
+function configure_ceph_nova {
240
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
238 241
     iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
239 242
     iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
240 243
     iniset $NOVA_CONF libvirt inject_key false
... ...
@@ -250,15 +301,17 @@ function configure_ceph_nova {
250 250
     fi
251 251
 }
252 252
 
253
-# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
254
-function configure_ceph_cinder {
253
+function configure_ceph_embedded_cinder {
255 254
     # Configure Cinder service options, ceph pool, ceph user and ceph key
256
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
257 255
     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
258 256
     if [[ $CEPH_REPLICAS -ne 1 ]]; then
259 257
         sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
260
-
261 258
     fi
259
+}
260
+
261
+# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
262
+function configure_ceph_cinder {
263
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
262 264
     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
263 265
     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
264 266
 }
... ...
@@ -272,15 +325,12 @@ function init_ceph {
272 272
 }
273 273
 
274 274
 # install_ceph() - Collect source and prepare
275
+function install_ceph_remote {
276
+    install_package ceph-common
277
+}
278
+
275 279
 function install_ceph {
276
-    # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
277
-    #                leveraging the list in stack.sh
278
-    if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
279
-        NO_UPDATE_REPOS=False
280
-        install_package ceph
281
-    else
282
-        exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
283
-    fi
280
+    install_package ceph
284 281
 }
285 282
 
286 283
 # start_ceph() - Start running processes, including screen
... ...
@@ -54,11 +54,13 @@ function configure_cinder_backend_ceph {
54 54
     iniset $CINDER_CONF DEFAULT glance_api_version 2
55 55
 
56 56
     if is_service_enabled c-bak; then
57
-        # Configure Cinder backup service options, ceph pool, ceph user and ceph key
58 57
         sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
59
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
60
-        if [[ $CEPH_REPLICAS -ne 1 ]]; then
61
-            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
58
+        if [ "$REMOTE_CEPH" = "False" ]; then
59
+            # Configure Cinder backup service options, ceph pool, ceph user and ceph key
60
+            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
61
+            if [[ $CEPH_REPLICAS -ne 1 ]]; then
62
+                sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
63
+            fi
62 64
         fi
63 65
         sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
64 66
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring