# lib/swift # Functions to control the configuration and operation of the **Swift** service # Dependencies: # ``functions`` file # ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined # ``STACK_USER`` must be defined # ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined # ``lib/keystone`` file # ``stack.sh`` calls the entry points in this order: # # install_swift # configure_swift # init_swift # start_swift # stop_swift # cleanup_swift # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set up default directories SWIFT_DIR=$DEST/swift SWIFTCLIENT_DIR=$DEST/python-swiftclient SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT3_DIR=$DEST/swift3 # TODO: add logging to different location. # Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. # Default is the common DevStack data directory. SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} # Set ``SWIFT_CONF_DIR`` to the location of the configuration files. # Default is ``/etc/swift``. # TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}} if is_service_enabled s-proxy && is_service_enabled swift3; then # If we are using swift3, we can default the s3 port to swift instead # of nova-objectstore S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} fi # DevStack will create a loop-back disk formatted as XFS to store the # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in # kilobytes. # Default is 1 gigabyte. SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. # Default is ``staticweb, tempurl, bulk, formpost`` SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb bulk} # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition # power indicates the partition count. Partitioning the full MD5 hash # ring allows other parts of the cluster to work in batches of items at # once which ends up either more efficient or at least less complex than # working with each item separately or the entire cluster all at once. # By default we define 9 for the partition count (which mean 512). SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} # Set ``SWIFT_REPLICAS`` to configure how many replicas are to be # configured for your Swift cluster. By default we are configuring # only one replica since this is way less CPU and memory intensive. If # you are planning to test swift replication you may want to set this # up to 3. SWIFT_REPLICAS=${SWIFT_REPLICAS:-1} SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` # Port bases used in port number calclution for the service "nodes" # The specified port number will be used, the additinal ports calculated by # base_port + node_num * 10 OBJECT_PORT_BASE=6013 CONTAINER_PORT_BASE=6011 ACCOUNT_PORT_BASE=6012 # Entry Points # ------------ # cleanup_swift() - Remove residual data files function cleanup_swift() { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 fi if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then rm ${SWIFT_DATA_DIR}/drives/images/swift.img fi rm -rf ${SWIFT_DATA_DIR}/run/ } # configure_swift() - Set config files, create data dirs and loop image function configure_swift() { local swift_pipeline=" " local node_number local swift_node_config local swift_log_dir setup_develop $SWIFT_DIR # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img fi fi mkdir -p ${SWIFT_DATA_DIR}/drives/images sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} # Make a fresh XFS filesystem mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img # Mount the disk with mount options to make it as efficient as possible mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 fi # Create a link to the above mount and # create all of the directories needed to emulate a few different servers for node_number in ${SWIFT_REPLICAS_SEQ}; do sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} node=${SWIFT_DATA_DIR}/${node_number}/node node_device=${node}/sdb1 [[ -d $node ]] && continue [[ -d $drive ]] && continue sudo install -o ${USER} -g $USER_GROUP -d $drive sudo install -o ${USER} -g $USER_GROUP -d $node_device sudo chown -R $USER: ${node} done sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server sudo chown -R $USER: ${SWIFT_CONF_DIR} if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. # Create a symlink if the config dir is moved sudo ln -sf ${SWIFT_CONF_DIR} /etc/swift fi # Swift use rsync to synchronize between all the different # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. sed -e " s/%GROUP%/${USER_GROUP}/; s/%USER%/$USER/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf # rsyncd.conf just prepared for 4 nodes if is_ubuntu; then sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync else sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync fi if is_service_enabled swift3;then swift_auth_server="s3token " fi # By default Swift will be installed with the tempauth middleware # which has some default username and password if you have # configured keystone it will checkout the directory. if is_service_enabled key; then swift_auth_server+="authtoken keystoneauth" else swift_auth_server=tempauth fi SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} # By default Swift will be installed with the tempauth middleware # which has some default username and password if you have # configured keystone it will configure swift with it. if is_service_enabled key;then if is_service_enabled swift3;then swift_pipeline=" s3token swift3 " fi swift_pipeline+=" authtoken keystoneauth " else if is_service_enabled swift3;then swift_pipeline=" swift3 " fi swift_pipeline+=" tempauth " fi sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true # Configure Keystone sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" if is_service_enabled swift3; then cat <>${SWIFT_CONFIG_PROXY_SERVER} # NOTE(chmou): s3token middleware is not updated yet to use only # username and password. [filter:s3token] paste.filter_factory = keystone.middleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} auth_token = ${SERVICE_TOKEN} admin_token = ${SERVICE_TOKEN} [filter:swift3] use = egg:swift3#swift3 EOF fi cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} # This function generates an object/account/proxy configuration # emulating 4 nodes on different ports function generate_swift_config() { local swift_node_config=$1 local node_id=$2 local bind_port=$3 log_facility=$[ node_id - 1 ] node_path=${SWIFT_DATA_DIR}/${node_number} iniuncomment ${swift_node_config} DEFAULT user iniset ${swift_node_config} DEFAULT user ${USER} iniuncomment ${swift_node_config} DEFAULT bind_port iniset ${swift_node_config} DEFAULT bind_port ${bind_port} iniuncomment ${swift_node_config} DEFAULT swift_dir iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} iniuncomment ${swift_node_config} DEFAULT devices iniset ${swift_node_config} DEFAULT devices ${node_path} iniuncomment ${swift_node_config} DEFAULT log_facility iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes } for node_number in ${SWIFT_REPLICAS_SEQ}; do swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache # Using a sed and not iniset/iniuncomment because we want to a global # modification and make sure it works for new sections. sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] iniuncomment ${swift_node_config} app:container-server allow_versions iniset ${swift_node_config} app:container-server allow_versions "true" sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} done swift_log_dir=${SWIFT_DATA_DIR}/logs rm -rf ${swift_log_dir} mkdir -p ${swift_log_dir}/hourly sudo chown -R $USER:adm ${swift_log_dir} sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf } # configure_swiftclient() - Set config files, create data dirs, etc function configure_swiftclient() { setup_develop $SWIFTCLIENT_DIR } # init_swift() - Initialize rings function init_swift() { local node_number # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # This is where we create three different rings for swift with # different object servers binding on different ports. pushd ${SWIFT_CONF_DIR} >/dev/null && { rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 for node_number in ${SWIFT_REPLICAS_SEQ}; do swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 done swift-ring-builder object.builder rebalance swift-ring-builder container.builder rebalance swift-ring-builder account.builder rebalance } && popd >/dev/null # Create cache dir sudo mkdir -p $SWIFT_AUTH_CACHE_DIR sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR rm -f $SWIFT_AUTH_CACHE_DIR/* } function install_swift() { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH } function install_swiftclient() { git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH } # start_swift() - Start running processes, including screen function start_swift() { # (re)start rsyslog restart_service rsyslog # (re)start memcached to make sure we have a clean memcache. restart_service memcached # Start rsync if is_ubuntu; then sudo /etc/init.d/rsync restart || : else sudo systemctl start xinetd.service fi # By default with only one replica we are launching the proxy, # container, account and object server in screen in foreground and # other services in background. If we have SWIFT_REPLICAS set to something # greater than one we first spawn all the swift services then kill the proxy # service so we can run it in foreground in screen. ``swift-init ... # {stop|restart}`` exits with '1' if no servers are running, ignore it just # in case swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true if [[ ${SWIFT_REPLICAS} == 1 ]]; then todo="object container account" fi for type in proxy ${todo}; do swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true done screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" if [[ ${SWIFT_REPLICAS} == 1 ]]; then for type in object container account;do screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" done fi } # stop_swift() - Stop running processes (non-screen) function stop_swift() { # screen normally killed by unstack.sh if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi # Dump the proxy server sudo pkill -f swift-proxy-server } # Restore xtrace $XTRACE