| 1 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,44 @@ |
| 0 |
+# ceph.sh - DevStack extras script to install Ceph |
|
| 1 |
+ |
|
| 2 |
+if is_service_enabled ceph; then |
|
| 3 |
+ if [[ "$1" == "source" ]]; then |
|
| 4 |
+ # Initial source |
|
| 5 |
+ source $TOP_DIR/lib/ceph |
|
| 6 |
+ elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then |
|
| 7 |
+ echo_summary "Installing Ceph" |
|
| 8 |
+ install_ceph |
|
| 9 |
+ echo_summary "Configuring Ceph" |
|
| 10 |
+ configure_ceph |
|
| 11 |
+ # NOTE (leseb): Do everything here because we need to have Ceph started before the main |
|
| 12 |
+ # OpenStack components. Ceph OSD must start here otherwise we can't upload any images. |
|
| 13 |
+ echo_summary "Initializing Ceph" |
|
| 14 |
+ init_ceph |
|
| 15 |
+ start_ceph |
|
| 16 |
+ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then |
|
| 17 |
+ if is_service_enabled glance; then |
|
| 18 |
+ echo_summary "Configuring Glance for Ceph" |
|
| 19 |
+ configure_ceph_glance |
|
| 20 |
+ fi |
|
| 21 |
+ if is_service_enabled nova; then |
|
| 22 |
+ echo_summary "Configuring Nova for Ceph" |
|
| 23 |
+ configure_ceph_nova |
|
| 24 |
+ fi |
|
| 25 |
+ if is_service_enabled cinder; then |
|
| 26 |
+ echo_summary "Configuring Cinder for Ceph" |
|
| 27 |
+ configure_ceph_cinder |
|
| 28 |
+ # NOTE (leseb): the part below is a requirement from Cinder in order to attach volumes |
|
| 29 |
+ # so we should run the following within the if statement. |
|
| 30 |
+ echo_summary "Configuring libvirt secret" |
|
| 31 |
+ import_libvirt_secret_ceph |
|
| 32 |
+ fi |
|
| 33 |
+ fi |
|
| 34 |
+ |
|
| 35 |
+ if [[ "$1" == "unstack" ]]; then |
|
| 36 |
+ stop_ceph |
|
| 37 |
+ cleanup_ceph |
|
| 38 |
+ fi |
|
| 39 |
+ |
|
| 40 |
+ if [[ "$1" == "clean" ]]; then |
|
| 41 |
+ cleanup_ceph |
|
| 42 |
+ fi |
|
| 43 |
+fi |
| ... | ... |
@@ -546,6 +546,40 @@ if ! function_exists echo_nolog; then |
| 546 | 546 |
} |
| 547 | 547 |
fi |
| 548 | 548 |
|
| 549 |
+ |
|
| 550 |
+# create_disk - Create backing disk |
|
| 551 |
+function create_disk {
|
|
| 552 |
+ local node_number |
|
| 553 |
+ local disk_image=${1}
|
|
| 554 |
+ local storage_data_dir=${2}
|
|
| 555 |
+ local loopback_disk_size=${3}
|
|
| 556 |
+ |
|
| 557 |
+ # Create a loopback disk and format it to XFS. |
|
| 558 |
+ if [[ -e ${disk_image} ]]; then
|
|
| 559 |
+ if egrep -q ${storage_data_dir} /proc/mounts; then
|
|
| 560 |
+ sudo umount ${storage_data_dir}/drives/sdb1
|
|
| 561 |
+ sudo rm -f ${disk_image}
|
|
| 562 |
+ fi |
|
| 563 |
+ fi |
|
| 564 |
+ |
|
| 565 |
+ sudo mkdir -p ${storage_data_dir}/drives/images
|
|
| 566 |
+ |
|
| 567 |
+ sudo truncate -s ${loopback_disk_size} ${disk_image}
|
|
| 568 |
+ |
|
| 569 |
+ # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in |
|
| 570 |
+ # a single inode. Keeping the default inode size (256) will result in multiple |
|
| 571 |
+ # inodes being used to store xattr. Retrieving the xattr will be slower |
|
| 572 |
+ # since we have to read multiple inodes. This statement is true for both |
|
| 573 |
+ # Swift and Ceph. |
|
| 574 |
+ sudo mkfs.xfs -f -i size=1024 ${disk_image}
|
|
| 575 |
+ |
|
| 576 |
+ # Mount the disk with mount options to make it as efficient as possible |
|
| 577 |
+ if ! egrep -q ${storage_data_dir} /proc/mounts; then
|
|
| 578 |
+ sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ |
|
| 579 |
+ ${disk_image} ${storage_data_dir}
|
|
| 580 |
+ fi |
|
| 581 |
+} |
|
| 582 |
+ |
|
| 549 | 583 |
# Restore xtrace |
| 550 | 584 |
$XTRACE |
| 551 | 585 |
|
| 552 | 586 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,286 @@ |
| 0 |
+# lib/ceph |
|
| 1 |
+# Functions to control the configuration and operation of the **Ceph** storage service |
|
| 2 |
+ |
|
| 3 |
+# Dependencies: |
|
| 4 |
+# |
|
| 5 |
+# - ``functions`` file |
|
| 6 |
+# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined |
|
| 7 |
+ |
|
| 8 |
+# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): |
|
| 9 |
+# |
|
| 10 |
+# - install_ceph |
|
| 11 |
+# - configure_ceph |
|
| 12 |
+# - init_ceph |
|
| 13 |
+# - start_ceph |
|
| 14 |
+# - stop_ceph |
|
| 15 |
+# - cleanup_ceph |
|
| 16 |
+ |
|
| 17 |
+# Save trace setting |
|
| 18 |
+XTRACE=$(set +o | grep xtrace) |
|
| 19 |
+set +o xtrace |
|
| 20 |
+ |
|
| 21 |
+ |
|
| 22 |
+# Defaults |
|
| 23 |
+# -------- |
|
| 24 |
+ |
|
| 25 |
+# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. |
|
| 26 |
+# Default is the common DevStack data directory. |
|
| 27 |
+CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
|
|
| 28 |
+CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
|
|
| 29 |
+ |
|
| 30 |
+# Set ``CEPH_CONF_DIR`` to the location of the configuration files. |
|
| 31 |
+# Default is ``/etc/ceph``. |
|
| 32 |
+CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
|
|
| 33 |
+ |
|
| 34 |
+# DevStack will create a loop-back disk formatted as XFS to store the |
|
| 35 |
+# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in |
|
| 36 |
+# kilobytes. |
|
| 37 |
+# Default is 1 gigabyte. |
|
| 38 |
+CEPH_LOOPBACK_DISK_SIZE_DEFAULT=2G |
|
| 39 |
+CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
|
|
| 40 |
+ |
|
| 41 |
+# Common |
|
| 42 |
+CEPH_FSID=$(uuidgen) |
|
| 43 |
+CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
|
|
| 44 |
+ |
|
| 45 |
+# Glance |
|
| 46 |
+GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
|
|
| 47 |
+GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
|
|
| 48 |
+GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
|
|
| 49 |
+GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
|
|
| 50 |
+ |
|
| 51 |
+# Nova |
|
| 52 |
+NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
|
|
| 53 |
+NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
|
|
| 54 |
+NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
|
|
| 55 |
+ |
|
| 56 |
+# Cinder |
|
| 57 |
+CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
|
|
| 58 |
+CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
|
|
| 59 |
+CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
|
|
| 60 |
+CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
|
|
| 61 |
+CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
|
|
| 62 |
+ |
|
| 63 |
+# Set ``CEPH_REPLICAS`` to configure how many replicas are to be |
|
| 64 |
+# configured for your Ceph cluster. By default we are configuring |
|
| 65 |
+# only one replica since this is way less CPU and memory intensive. If |
|
| 66 |
+# you are planning to test Ceph replication feel free to increase this value |
|
| 67 |
+CEPH_REPLICAS=${CEPH_REPLICAS:-1}
|
|
| 68 |
+CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
|
|
| 69 |
+ |
|
| 70 |
+# Functions |
|
| 71 |
+# ------------ |
|
| 72 |
+ |
|
| 73 |
+# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt |
|
| 74 |
+# so it can connect to the Ceph cluster while attaching a Cinder block device |
|
| 75 |
+function import_libvirt_secret_ceph {
|
|
| 76 |
+ cat > secret.xml <<EOF |
|
| 77 |
+<secret ephemeral='no' private='no'> |
|
| 78 |
+ <uuid>${CINDER_CEPH_UUID}</uuid>
|
|
| 79 |
+ <usage type='ceph'> |
|
| 80 |
+ <name>client.${CINDER_CEPH_USER} secret</name>
|
|
| 81 |
+ </usage> |
|
| 82 |
+</secret> |
|
| 83 |
+EOF |
|
| 84 |
+ sudo virsh secret-define --file secret.xml |
|
| 85 |
+ sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
|
|
| 86 |
+ sudo rm -f secret.xml |
|
| 87 |
+} |
|
| 88 |
+ |
|
| 89 |
+# cleanup_ceph() - Remove residual data files, anything left over from previous |
|
| 90 |
+# runs that a clean run would need to clean up |
|
| 91 |
+function cleanup_ceph {
|
|
| 92 |
+ sudo pkill -f ceph-mon |
|
| 93 |
+ sudo pkill -f ceph-osd |
|
| 94 |
+ sudo rm -rf ${CEPH_DATA_DIR}/*/*
|
|
| 95 |
+ sudo rm -rf ${CEPH_CONF_DIR}/*
|
|
| 96 |
+ if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
|
|
| 97 |
+ sudo umount ${CEPH_DATA_DIR}
|
|
| 98 |
+ fi |
|
| 99 |
+ if [[ -e ${CEPH_DISK_IMAGE} ]]; then
|
|
| 100 |
+ sudo rm -f ${CEPH_DISK_IMAGE}
|
|
| 101 |
+ fi |
|
| 102 |
+ uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 |
|
| 103 |
+ VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
|
|
| 104 |
+ sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1
|
|
| 105 |
+} |
|
| 106 |
+ |
|
| 107 |
+# configure_ceph() - Set config files, create data dirs, etc |
|
| 108 |
+function configure_ceph {
|
|
| 109 |
+ local count=0 |
|
| 110 |
+ |
|
| 111 |
+ # create a backing file disk |
|
| 112 |
+ create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
|
|
| 113 |
+ |
|
| 114 |
+ # populate ceph directory |
|
| 115 |
+ sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
|
|
| 116 |
+ |
|
| 117 |
+ # create ceph monitor initial key and directory |
|
| 118 |
+ sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *' |
|
| 119 |
+ sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) |
|
| 120 |
+ |
|
| 121 |
+ # create a default ceph configuration file |
|
| 122 |
+ sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
|
|
| 123 |
+[global] |
|
| 124 |
+fsid = ${CEPH_FSID}
|
|
| 125 |
+mon_initial_members = $(hostname) |
|
| 126 |
+mon_host = ${SERVICE_HOST}
|
|
| 127 |
+auth_cluster_required = cephx |
|
| 128 |
+auth_service_required = cephx |
|
| 129 |
+auth_client_required = cephx |
|
| 130 |
+filestore_xattr_use_omap = true |
|
| 131 |
+osd crush chooseleaf type = 0 |
|
| 132 |
+osd journal size = 100 |
|
| 133 |
+EOF |
|
| 134 |
+ |
|
| 135 |
+ # bootstrap the ceph monitor |
|
| 136 |
+ sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
|
|
| 137 |
+ if is_ubuntu; then |
|
| 138 |
+ sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart |
|
| 139 |
+ sudo initctl emit ceph-mon id=$(hostname) |
|
| 140 |
+ else |
|
| 141 |
+ sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit |
|
| 142 |
+ sudo service ceph start mon.$(hostname) |
|
| 143 |
+ fi |
|
| 144 |
+ |
|
| 145 |
+ # wait for the admin key to come up otherwise we will not be able to do the actions below |
|
| 146 |
+ until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
|
|
| 147 |
+ echo_summary "Waiting for the Ceph admin key to be ready..." |
|
| 148 |
+ |
|
| 149 |
+ count=$(($count + 1)) |
|
| 150 |
+ if [ $count -eq 3 ]; then |
|
| 151 |
+ die $LINENO "Maximum of 3 retries reached" |
|
| 152 |
+ fi |
|
| 153 |
+ sleep 5 |
|
| 154 |
+ done |
|
| 155 |
+ |
|
| 156 |
+ # change pool replica size according to the CEPH_REPLICAS set by the user |
|
| 157 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
|
|
| 158 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
|
|
| 159 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
|
|
| 160 |
+ |
|
| 161 |
+ # create a simple rule to take OSDs instead of host with CRUSH |
|
| 162 |
+ # then apply this rules to the default pool |
|
| 163 |
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then |
|
| 164 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
|
|
| 165 |
+ RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
|
|
| 166 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
|
|
| 167 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
|
|
| 168 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
|
|
| 169 |
+ fi |
|
| 170 |
+ |
|
| 171 |
+ # create the OSD(s) |
|
| 172 |
+ for rep in ${CEPH_REPLICAS_SEQ}; do
|
|
| 173 |
+ OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
|
|
| 174 |
+ sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
|
|
| 175 |
+ sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
|
|
| 176 |
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
|
|
| 177 |
+ |
|
| 178 |
+ # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
|
|
| 179 |
+ # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons |
|
| 180 |
+ # from the init script. |
|
| 181 |
+ if is_ubuntu; then |
|
| 182 |
+ sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
|
|
| 183 |
+ else |
|
| 184 |
+ sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
|
|
| 185 |
+ fi |
|
| 186 |
+ done |
|
| 187 |
+} |
|
| 188 |
+ |
|
| 189 |
+# configure_ceph_glance() - Glance config needs to come after Glance is set up |
|
| 190 |
+function configure_ceph_glance {
|
|
| 191 |
+ # configure Glance service options, ceph pool, ceph user and ceph key |
|
| 192 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
|
|
| 193 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
|
|
| 194 |
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then |
|
| 195 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
|
|
| 196 |
+ fi |
|
| 197 |
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
|
|
| 198 |
+ sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
|
|
| 199 |
+ iniset $GLANCE_API_CONF DEFAULT default_store rbd |
|
| 200 |
+ iniset $GLANCE_API_CONF DEFAULT rbd_store_ceph_conf $CEPH_CONF_FILE |
|
| 201 |
+ iniset $GLANCE_API_CONF DEFAULT rbd_store_user $GLANCE_CEPH_USER |
|
| 202 |
+ iniset $GLANCE_API_CONF DEFAULT rbd_store_pool $GLANCE_CEPH_POOL |
|
| 203 |
+ iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True |
|
| 204 |
+} |
|
| 205 |
+ |
|
| 206 |
+# configure_ceph_nova() - Nova config needs to come after Nova is set up |
|
| 207 |
+function configure_ceph_nova {
|
|
| 208 |
+ # configure Nova service options, ceph pool, ceph user and ceph key |
|
| 209 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
|
|
| 210 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
|
|
| 211 |
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then |
|
| 212 |
+ sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
|
|
| 213 |
+ fi |
|
| 214 |
+ iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
|
|
| 215 |
+ iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
|
|
| 216 |
+ iniset $NOVA_CONF libvirt inject_key false |
|
| 217 |
+ iniset $NOVA_CONF libvirt inject_partition -2 |
|
| 218 |
+ iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback" |
|
| 219 |
+ iniset $NOVA_CONF libvirt images_type rbd |
|
| 220 |
+ iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
|
|
| 221 |
+ iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
|
|
| 222 |
+} |
|
| 223 |
+ |
|
| 224 |
+# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up |
|
| 225 |
+function configure_ceph_cinder {
|
|
| 226 |
+ # Configure Cinder service options, ceph pool, ceph user and ceph key |
|
| 227 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
|
|
| 228 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
|
|
| 229 |
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then |
|
| 230 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
|
|
| 231 |
+ |
|
| 232 |
+ fi |
|
| 233 |
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
|
|
| 234 |
+ sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
|
|
| 235 |
+} |
|
| 236 |
+ |
|
| 237 |
+# init_ceph() - Initialize databases, etc. |
|
| 238 |
+function init_ceph {
|
|
| 239 |
+ # clean up from previous (possibly aborted) runs |
|
| 240 |
+ # make sure to kill all ceph processes first |
|
| 241 |
+ sudo pkill -f ceph-mon || true |
|
| 242 |
+ sudo pkill -f ceph-osd || true |
|
| 243 |
+} |
|
| 244 |
+ |
|
| 245 |
+# install_ceph() - Collect source and prepare |
|
| 246 |
+function install_ceph {
|
|
| 247 |
+ # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros, |
|
| 248 |
+ # leveraging the list in stack.sh |
|
| 249 |
+ if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
|
|
| 250 |
+ NO_UPDATE_REPOS=False |
|
| 251 |
+ install_package ceph |
|
| 252 |
+ else |
|
| 253 |
+ exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20" |
|
| 254 |
+ fi |
|
| 255 |
+} |
|
| 256 |
+ |
|
| 257 |
+# start_ceph() - Start running processes, including screen |
|
| 258 |
+function start_ceph {
|
|
| 259 |
+ if is_ubuntu; then |
|
| 260 |
+ sudo initctl emit ceph-mon id=$(hostname) |
|
| 261 |
+ for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
|
|
| 262 |
+ sudo start ceph-osd id=${id}
|
|
| 263 |
+ done |
|
| 264 |
+ else |
|
| 265 |
+ sudo service ceph start |
|
| 266 |
+ fi |
|
| 267 |
+} |
|
| 268 |
+ |
|
| 269 |
+# stop_ceph() - Stop running processes (non-screen) |
|
| 270 |
+function stop_ceph {
|
|
| 271 |
+ if is_ubuntu; then |
|
| 272 |
+ sudo service ceph-mon-all stop > /dev/null 2>&1 |
|
| 273 |
+ sudo service ceph-osd-all stop > /dev/null 2>&1 |
|
| 274 |
+ else |
|
| 275 |
+ sudo service ceph stop > /dev/null 2>&1 |
|
| 276 |
+ fi |
|
| 277 |
+} |
|
| 278 |
+ |
|
| 279 |
+ |
|
| 280 |
+# Restore xtrace |
|
| 281 |
+$XTRACE |
|
| 282 |
+ |
|
| 283 |
+## Local variables: |
|
| 284 |
+## mode: shell-script |
|
| 285 |
+## End: |
| 0 | 286 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,79 @@ |
| 0 |
+# lib/cinder_backends/ceph |
|
| 1 |
+# Configure the ceph backend |
|
| 2 |
+ |
|
| 3 |
+# Enable with: |
|
| 4 |
+# |
|
| 5 |
+# CINDER_ENABLED_BACKENDS+=,ceph:ceph |
|
| 6 |
+# |
|
| 7 |
+# Optional parameters: |
|
| 8 |
+# CINDER_BAK_CEPH_POOL=<pool-name> |
|
| 9 |
+# CINDER_BAK_CEPH_USER=<user> |
|
| 10 |
+# CINDER_BAK_CEPH_POOL_PG=<pg-num> |
|
| 11 |
+# CINDER_BAK_CEPH_POOL_PGP=<pgp-num> |
|
| 12 |
+ |
|
| 13 |
+# Dependencies: |
|
| 14 |
+# |
|
| 15 |
+# - ``functions`` file |
|
| 16 |
+# - ``cinder`` configurations |
|
| 17 |
+ |
|
| 18 |
+# configure_ceph_backend_lvm - called from configure_cinder() |
|
| 19 |
+ |
|
| 20 |
+ |
|
| 21 |
+# Save trace setting |
|
| 22 |
+MY_XTRACE=$(set +o | grep xtrace) |
|
| 23 |
+set +o xtrace |
|
| 24 |
+ |
|
| 25 |
+ |
|
| 26 |
+# Defaults |
|
| 27 |
+# -------- |
|
| 28 |
+ |
|
| 29 |
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
|
|
| 30 |
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
|
|
| 31 |
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
|
|
| 32 |
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
|
|
| 33 |
+ |
|
| 34 |
+ |
|
| 35 |
+# Entry Points |
|
| 36 |
+# ------------ |
|
| 37 |
+ |
|
| 38 |
+# configure_cinder_backend_ceph - Set config files, create data dirs, etc |
|
| 39 |
+# configure_cinder_backend_ceph $name |
|
| 40 |
+function configure_cinder_backend_ceph {
|
|
| 41 |
+ local be_name=$1 |
|
| 42 |
+ |
|
| 43 |
+ iniset $CINDER_CONF $be_name volume_backend_name $be_name |
|
| 44 |
+ iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver" |
|
| 45 |
+ iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF" |
|
| 46 |
+ iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" |
|
| 47 |
+ iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" |
|
| 48 |
+ iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID" |
|
| 49 |
+ iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False |
|
| 50 |
+ iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 |
|
| 51 |
+ iniset $CINDER_CONF DEFAULT glance_api_version 2 |
|
| 52 |
+ |
|
| 53 |
+ if is_service_enabled c-bak; then |
|
| 54 |
+ # Configure Cinder backup service options, ceph pool, ceph user and ceph key |
|
| 55 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
|
|
| 56 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
|
|
| 57 |
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then |
|
| 58 |
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
|
|
| 59 |
+ fi |
|
| 60 |
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
|
|
| 61 |
+ sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
|
|
| 62 |
+ |
|
| 63 |
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph" |
|
| 64 |
+ iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF" |
|
| 65 |
+ iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" |
|
| 66 |
+ iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" |
|
| 67 |
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 |
|
| 68 |
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 |
|
| 69 |
+ iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True |
|
| 70 |
+ fi |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+# Restore xtrace |
|
| 74 |
+$MY_XTRACE |
|
| 75 |
+ |
|
| 76 |
+# Local variables: |
|
| 77 |
+# mode: shell-script |
|
| 78 |
+# End: |