Browse code

Remove lib/ceph

All jobs using ceph as a storage backend have been moved over
to using the devstack-plugin-ceph repo in project-config so we
should be safe to remove the now unused lib/ceph file.

The files are left in place because the devstack plugin does not
install xfsprogs but it's used by the create_disk function.

And the ceph cinder backend file is left in place since the
devstack-plugin-ceph repo uses that by setting
CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-ceph}.

Change-Id: I3fb09fc92bc6ab614e86d701ea46d5741a76b7a8

Matt Riedemann authored on 2016/08/31 06:21:30
Showing 3 changed files
1 1
deleted file mode 100644
... ...
@@ -1,75 +0,0 @@
1
-# ceph.sh - DevStack extras script to install Ceph
2
-
3
-if is_service_enabled ceph; then
4
-    if [[ "$1" == "source" ]]; then
5
-        # Initial source
6
-        source $TOP_DIR/lib/ceph
7
-    elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
8
-        echo_summary "Installing Ceph"
9
-        check_os_support_ceph
10
-        if [ "$REMOTE_CEPH" = "False" ]; then
11
-            install_ceph
12
-            echo_summary "Configuring Ceph"
13
-            configure_ceph
14
-            # NOTE (leseb): Do everything here because we need to have Ceph started before the main
15
-            # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
16
-            echo_summary "Initializing Ceph"
17
-            init_ceph
18
-            start_ceph
19
-        else
20
-            install_ceph_remote
21
-        fi
22
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
23
-        if is_service_enabled glance; then
24
-            echo_summary "Configuring Glance for Ceph"
25
-            configure_ceph_glance
26
-        fi
27
-        if is_service_enabled nova; then
28
-            echo_summary "Configuring Nova for Ceph"
29
-            configure_ceph_nova
30
-        fi
31
-        if is_service_enabled cinder; then
32
-            echo_summary "Configuring Cinder for Ceph"
33
-            configure_ceph_cinder
34
-        fi
35
-        if is_service_enabled n-cpu; then
36
-            # NOTE (leseb): the part below is a requirement to attach Ceph block devices
37
-            echo_summary "Configuring libvirt secret"
38
-            import_libvirt_secret_ceph
39
-        fi
40
-
41
-        if [ "$REMOTE_CEPH" = "False" ]; then
42
-            if is_service_enabled glance; then
43
-                echo_summary "Configuring Glance for Ceph"
44
-                configure_ceph_embedded_glance
45
-            fi
46
-            if is_service_enabled nova; then
47
-                echo_summary "Configuring Nova for Ceph"
48
-                configure_ceph_embedded_nova
49
-            fi
50
-            if is_service_enabled cinder; then
51
-                echo_summary "Configuring Cinder for Ceph"
52
-                configure_ceph_embedded_cinder
53
-            fi
54
-        fi
55
-    fi
56
-
57
-    if [[ "$1" == "unstack" ]]; then
58
-        if [ "$REMOTE_CEPH" = "True" ]; then
59
-            cleanup_ceph_remote
60
-        else
61
-            cleanup_ceph_embedded
62
-            stop_ceph
63
-        fi
64
-        cleanup_ceph_general
65
-    fi
66
-
67
-    if [[ "$1" == "clean" ]]; then
68
-        if [ "$REMOTE_CEPH" = "True" ]; then
69
-            cleanup_ceph_remote
70
-        else
71
-            cleanup_ceph_embedded
72
-        fi
73
-        cleanup_ceph_general
74
-    fi
75
-fi
... ...
@@ -1880,7 +1880,7 @@ function run_phase {
1880 1880
             # white listed elements in tree. We want these to move out
1881 1881
             # over time as well, but they are in tree, so we need to
1882 1882
             # manage that.
1883
-            local exceptions="60-ceph.sh 80-tempest.sh"
1883
+            local exceptions="80-tempest.sh"
1884 1884
             local extra
1885 1885
             extra=$(basename $extra_plugin_file_name)
1886 1886
             if [[ ! ( $exceptions =~ "$extra" ) ]]; then
1887 1887
deleted file mode 100644
... ...
@@ -1,381 +0,0 @@
1
-#!/bin/bash
2
-#
3
-# lib/ceph
4
-# Functions to control the configuration and operation of the **Ceph** storage service
5
-
6
-# Dependencies:
7
-#
8
-# - ``functions`` file
9
-# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
10
-
11
-# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
12
-#
13
-# - install_ceph
14
-# - configure_ceph
15
-# - init_ceph
16
-# - start_ceph
17
-# - stop_ceph
18
-# - cleanup_ceph
19
-
20
-# Save trace setting
21
-_XTRACE_LIB_CEPH=$(set +o | grep xtrace)
22
-set +o xtrace
23
-
24
-
25
-# Defaults
26
-# --------
27
-
28
-# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
29
-# Default is the common DevStack data directory.
30
-CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
31
-CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
32
-
33
-# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
34
-# Default is ``/etc/ceph``.
35
-CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
36
-
37
-# DevStack will create a loop-back disk formatted as XFS to store the
38
-# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
39
-# kilobytes.
40
-# Default is 1 gigabyte.
41
-CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
42
-CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
43
-
44
-# Common
45
-CEPH_FSID=$(uuidgen)
46
-CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
47
-
48
-# Glance
49
-GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
50
-GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
51
-GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
52
-GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
53
-
54
-# Nova
55
-NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
56
-NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
57
-NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
58
-
59
-# Cinder
60
-CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
61
-CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
62
-CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
63
-CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
64
-CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
65
-
66
-# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
67
-# configured for your Ceph cluster. By default we are configuring
68
-# only one replica since this is way less CPU and memory intensive. If
69
-# you are planning to test Ceph replication feel free to increase this value
70
-CEPH_REPLICAS=${CEPH_REPLICAS:-1}
71
-CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
72
-
73
-# Connect to an existing Ceph cluster
74
-REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
75
-REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
76
-
77
-# Cinder encrypted volume tests are not supported with a Ceph backend due to
78
-# bug 1463525.
79
-ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
80
-
81
-
82
-# Functions
83
-# ------------
84
-
85
-function get_ceph_version {
86
-    local ceph_version_str
87
-    ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
88
-    echo $ceph_version_str
89
-}
90
-
91
-# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
92
-# so it can connect to the Ceph cluster while attaching a Cinder block device
93
-function import_libvirt_secret_ceph {
94
-    cat > secret.xml <<EOF
95
-<secret ephemeral='no' private='no'>
96
-   <uuid>${CINDER_CEPH_UUID}</uuid>
97
-   <usage type='ceph'>
98
-     <name>client.${CINDER_CEPH_USER} secret</name>
99
-   </usage>
100
-</secret>
101
-EOF
102
-    sudo virsh secret-define --file secret.xml
103
-    sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
104
-    sudo rm -f secret.xml
105
-}
106
-
107
-# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
108
-function undefine_virsh_secret {
109
-    if is_service_enabled cinder || is_service_enabled nova; then
110
-        local virsh_uuid
111
-        virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
112
-        sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
113
-    fi
114
-}
115
-
116
-
117
-# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
118
-function check_os_support_ceph {
119
-    if [[ ! ${DISTRO} =~ (trusty|f23|f24) ]]; then
120
-        echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
121
-        if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
122
-            die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
123
-        fi
124
-        NO_UPDATE_REPOS=False
125
-    fi
126
-}
127
-
128
-# cleanup_ceph() - Remove residual data files, anything left over from previous
129
-# runs that a clean run would need to clean up
130
-function cleanup_ceph_remote {
131
-    # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
132
-    if is_service_enabled glance; then
133
-        sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
134
-        sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
135
-    fi
136
-    if is_service_enabled cinder; then
137
-        sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
138
-        sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
139
-    fi
140
-    if is_service_enabled c-bak; then
141
-        sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
142
-        sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
143
-    fi
144
-    if is_service_enabled nova; then
145
-        iniset $NOVA_CONF libvirt rbd_secret_uuid ""
146
-        sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
147
-    fi
148
-}
149
-
150
-function cleanup_ceph_embedded {
151
-    sudo killall -w -9 ceph-mon
152
-    sudo killall -w -9 ceph-osd
153
-    sudo rm -rf ${CEPH_DATA_DIR}/*/*
154
-    if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
155
-        sudo umount ${CEPH_DATA_DIR}
156
-    fi
157
-    if [[ -e ${CEPH_DISK_IMAGE} ]]; then
158
-        sudo rm -f ${CEPH_DISK_IMAGE}
159
-    fi
160
-
161
-    # purge ceph config file and keys
162
-    sudo rm -rf ${CEPH_CONF_DIR}/*
163
-}
164
-
165
-function cleanup_ceph_general {
166
-    undefine_virsh_secret
167
-}
168
-
169
-
170
-# configure_ceph() - Set config files, create data dirs, etc
171
-function configure_ceph {
172
-    local count=0
173
-
174
-    # create a backing file disk
175
-    create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
176
-
177
-    # populate ceph directory
178
-    sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
179
-
180
-    # create ceph monitor initial key and directory
181
-    sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \
182
-        --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \
183
-        --cap mon 'allow *'
184
-    sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
185
-
186
-    # create a default ceph configuration file
187
-    sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
188
-[global]
189
-fsid = ${CEPH_FSID}
190
-mon_initial_members = $(hostname)
191
-mon_host = ${SERVICE_HOST}
192
-auth_cluster_required = cephx
193
-auth_service_required = cephx
194
-auth_client_required = cephx
195
-filestore_xattr_use_omap = true
196
-osd crush chooseleaf type = 0
197
-osd journal size = 100
198
-EOF
199
-
200
-    # bootstrap the ceph monitor
201
-    sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
202
-        --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
203
-
204
-    if is_ubuntu; then
205
-        sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
206
-        sudo initctl emit ceph-mon id=$(hostname)
207
-    else
208
-        sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
209
-        sudo service ceph start mon.$(hostname)
210
-    fi
211
-
212
-    # wait for the admin key to come up otherwise we will not be able to do the actions below
213
-    until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
214
-        echo_summary "Waiting for the Ceph admin key to be ready..."
215
-
216
-        count=$(($count + 1))
217
-        if [ $count -eq 3 ]; then
218
-            die $LINENO "Maximum of 3 retries reached"
219
-        fi
220
-        sleep 5
221
-    done
222
-
223
-    # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
224
-    local ceph_version
225
-    ceph_version=$(get_ceph_version)
226
-    # change pool replica size according to the CEPH_REPLICAS set by the user
227
-    if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
228
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
229
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
230
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
231
-    else
232
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
233
-    fi
234
-
235
-    # create a simple rule to take OSDs instead of host with CRUSH
236
-    # then apply this rules to the default pool
237
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
238
-        sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
239
-        RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
240
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
241
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
242
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
243
-    fi
244
-
245
-    # create the OSD(s)
246
-    for rep in ${CEPH_REPLICAS_SEQ}; do
247
-        OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
248
-        sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
249
-        sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
250
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
251
-            mon 'allow profile osd ' osd 'allow *' | \
252
-            sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
253
-
254
-        # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
255
-        # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
256
-        # from the init script.
257
-        if is_ubuntu; then
258
-            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
259
-        else
260
-            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
261
-        fi
262
-    done
263
-}
264
-
265
-function configure_ceph_embedded_glance {
266
-    # configure Glance service options, ceph pool, ceph user and ceph key
267
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
268
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
269
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
270
-    fi
271
-}
272
-
273
-# configure_ceph_glance() - Glance config needs to come after Glance is set up
274
-function configure_ceph_glance {
275
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
276
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
277
-        mon "allow r" \
278
-        osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
279
-        sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
280
-    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
281
-
282
-    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
283
-    iniset $GLANCE_API_CONF glance_store default_store rbd
284
-    iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
285
-    iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
286
-    iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
287
-    iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
288
-}
289
-
290
-function configure_ceph_embedded_nova {
291
-    # configure Nova service options, ceph pool, ceph user and ceph key
292
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
293
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
294
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
295
-    fi
296
-}
297
-
298
-# configure_ceph_nova() - Nova config needs to come after Nova is set up
299
-function configure_ceph_nova {
300
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
301
-    iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
302
-    iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
303
-    iniset $NOVA_CONF libvirt inject_key false
304
-    iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
305
-    iniset $NOVA_CONF libvirt images_type rbd
306
-    iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
307
-    iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
308
-
309
-    if ! is_service_enabled cinder; then
310
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
311
-            mon "allow r" \
312
-            osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
313
-            sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
314
-        sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
315
-    fi
316
-}
317
-
318
-function configure_ceph_embedded_cinder {
319
-    # Configure Cinder service options, ceph pool, ceph user and ceph key
320
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
321
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
322
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
323
-    fi
324
-}
325
-
326
-# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
327
-function configure_ceph_cinder {
328
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
329
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
330
-        mon "allow r" \
331
-        osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
332
-        sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
333
-    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
334
-}
335
-
336
-# init_ceph() - Initialize databases, etc.
337
-function init_ceph {
338
-    # clean up from previous (possibly aborted) runs
339
-    # make sure to kill all ceph processes first
340
-    sudo pkill -f ceph-mon || true
341
-    sudo pkill -f ceph-osd || true
342
-}
343
-
344
-# install_ceph() - Collect source and prepare
345
-function install_ceph_remote {
346
-    install_package ceph-common
347
-}
348
-
349
-function install_ceph {
350
-    install_package ceph
351
-}
352
-
353
-# start_ceph() - Start running processes, including screen
354
-function start_ceph {
355
-    if is_ubuntu; then
356
-        sudo initctl emit ceph-mon id=$(hostname)
357
-        for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
358
-            sudo start ceph-osd id=${id}
359
-        done
360
-    else
361
-        sudo service ceph start
362
-    fi
363
-}
364
-
365
-# stop_ceph() - Stop running processes (non-screen)
366
-function stop_ceph {
367
-    if is_ubuntu; then
368
-        sudo service ceph-mon-all stop > /dev/null 2>&1
369
-        sudo service ceph-osd-all stop > /dev/null 2>&1
370
-    else
371
-        sudo service ceph stop > /dev/null 2>&1
372
-    fi
373
-}
374
-
375
-
376
-# Restore xtrace
377
-$_XTRACE_LIB_CEPH
378
-
379
-## Local variables:
380
-## mode: shell-script
381
-## End: