Based on github.sh/cloudbuilders/deploy.sh/swift. This is a WIP branch.
| 1 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,19 @@ |
| 0 |
+curl |
|
| 1 |
+gcc |
|
| 2 |
+memcached |
|
| 3 |
+memcached |
|
| 4 |
+python-configobj |
|
| 5 |
+python-coverage |
|
| 6 |
+python-dev |
|
| 7 |
+python-eventlet |
|
| 8 |
+python-greenlet |
|
| 9 |
+python-netifaces |
|
| 10 |
+python-nose |
|
| 11 |
+python-nose |
|
| 12 |
+python-pastedeploy |
|
| 13 |
+python-setuptools |
|
| 14 |
+python-simplejson |
|
| 15 |
+python-webob |
|
| 16 |
+python-xattr |
|
| 17 |
+sqlite3 |
|
| 18 |
+xfsprogs |
| 0 | 19 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,19 @@ |
| 0 |
+[DEFAULT] |
|
| 1 |
+devices = %NODE_PATH%/node |
|
| 2 |
+mount_check = false |
|
| 3 |
+bind_port = %BIND_PORT% |
|
| 4 |
+user = stack |
|
| 5 |
+log_facility = LOG_LOCAL%LOG_FACILITY% |
|
| 6 |
+ |
|
| 7 |
+[pipeline:main] |
|
| 8 |
+pipeline = account-server |
|
| 9 |
+ |
|
| 10 |
+[app:account-server] |
|
| 11 |
+use = egg:swift#account |
|
| 12 |
+ |
|
| 13 |
+[account-replicator] |
|
| 14 |
+vm_test_mode = yes |
|
| 15 |
+ |
|
| 16 |
+[account-auditor] |
|
| 17 |
+ |
|
| 18 |
+[account-reaper] |
| 0 | 19 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,21 @@ |
| 0 |
+[DEFAULT] |
|
| 1 |
+devices = %NODE_PATH%/node |
|
| 2 |
+mount_check = false |
|
| 3 |
+bind_port = %BIND_PORT% |
|
| 4 |
+user = stack |
|
| 5 |
+log_facility = LOG_LOCAL%LOG_FACILITY% |
|
| 6 |
+ |
|
| 7 |
+[pipeline:main] |
|
| 8 |
+pipeline = container-server |
|
| 9 |
+ |
|
| 10 |
+[app:container-server] |
|
| 11 |
+use = egg:swift#container |
|
| 12 |
+ |
|
| 13 |
+[container-replicator] |
|
| 14 |
+vm_test_mode = yes |
|
| 15 |
+ |
|
| 16 |
+[container-updater] |
|
| 17 |
+ |
|
| 18 |
+[container-auditor] |
|
| 19 |
+ |
|
| 20 |
+[container-sync] |
| 0 | 21 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,19 @@ |
| 0 |
+[DEFAULT] |
|
| 1 |
+devices = %NODE_PATH%/node |
|
| 2 |
+mount_check = false |
|
| 3 |
+bind_port = %BIND_PORT% |
|
| 4 |
+user = stack |
|
| 5 |
+log_facility = LOG_LOCAL%LOG_FACILITY% |
|
| 6 |
+ |
|
| 7 |
+[pipeline:main] |
|
| 8 |
+pipeline = object-server |
|
| 9 |
+ |
|
| 10 |
+[app:object-server] |
|
| 11 |
+use = egg:swift#object |
|
| 12 |
+ |
|
| 13 |
+[object-replicator] |
|
| 14 |
+vm_test_mode = yes |
|
| 15 |
+ |
|
| 16 |
+[object-updater] |
|
| 17 |
+ |
|
| 18 |
+[object-auditor] |
| 0 | 19 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,25 @@ |
| 0 |
+[DEFAULT] |
|
| 1 |
+bind_port = 8080 |
|
| 2 |
+user = stack |
|
| 3 |
+log_facility = LOG_LOCAL1 |
|
| 4 |
+ |
|
| 5 |
+[pipeline:main] |
|
| 6 |
+pipeline = healthcheck cache tempauth proxy-server |
|
| 7 |
+ |
|
| 8 |
+[app:proxy-server] |
|
| 9 |
+use = egg:swift#proxy |
|
| 10 |
+allow_account_management = true |
|
| 11 |
+ |
|
| 12 |
+[filter:tempauth] |
|
| 13 |
+use = egg:swift#tempauth |
|
| 14 |
+user_admin_admin = admin .admin .reseller_admin |
|
| 15 |
+user_test_tester = testing .admin |
|
| 16 |
+user_test2_tester2 = testing2 .admin |
|
| 17 |
+user_test_tester3 = testing3 |
|
| 18 |
+bind_ip = ${MY_IP}
|
|
| 19 |
+ |
|
| 20 |
+[filter:healthcheck] |
|
| 21 |
+use = egg:swift#healthcheck |
|
| 22 |
+ |
|
| 23 |
+[filter:cache] |
|
| 24 |
+use = egg:swift#memcache |
| 0 | 25 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,79 @@ |
| 0 |
+uid = stack |
|
| 1 |
+gid = stack |
|
| 2 |
+log file = /var/log/rsyncd.log |
|
| 3 |
+pid file = /var/run/rsyncd.pid |
|
| 4 |
+address = 127.0.0.1 |
|
| 5 |
+ |
|
| 6 |
+[account6012] |
|
| 7 |
+max connections = 25 |
|
| 8 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ |
|
| 9 |
+read only = false |
|
| 10 |
+lock file = /var/lock/account6012.lock |
|
| 11 |
+ |
|
| 12 |
+[account6022] |
|
| 13 |
+max connections = 25 |
|
| 14 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ |
|
| 15 |
+read only = false |
|
| 16 |
+lock file = /var/lock/account6022.lock |
|
| 17 |
+ |
|
| 18 |
+[account6032] |
|
| 19 |
+max connections = 25 |
|
| 20 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ |
|
| 21 |
+read only = false |
|
| 22 |
+lock file = /var/lock/account6032.lock |
|
| 23 |
+ |
|
| 24 |
+[account6042] |
|
| 25 |
+max connections = 25 |
|
| 26 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ |
|
| 27 |
+read only = false |
|
| 28 |
+lock file = /var/lock/account6042.lock |
|
| 29 |
+ |
|
| 30 |
+ |
|
| 31 |
+[container6011] |
|
| 32 |
+max connections = 25 |
|
| 33 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ |
|
| 34 |
+read only = false |
|
| 35 |
+lock file = /var/lock/container6011.lock |
|
| 36 |
+ |
|
| 37 |
+[container6021] |
|
| 38 |
+max connections = 25 |
|
| 39 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ |
|
| 40 |
+read only = false |
|
| 41 |
+lock file = /var/lock/container6021.lock |
|
| 42 |
+ |
|
| 43 |
+[container6031] |
|
| 44 |
+max connections = 25 |
|
| 45 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ |
|
| 46 |
+read only = false |
|
| 47 |
+lock file = /var/lock/container6031.lock |
|
| 48 |
+ |
|
| 49 |
+[container6041] |
|
| 50 |
+max connections = 25 |
|
| 51 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ |
|
| 52 |
+read only = false |
|
| 53 |
+lock file = /var/lock/container6041.lock |
|
| 54 |
+ |
|
| 55 |
+ |
|
| 56 |
+[object6010] |
|
| 57 |
+max connections = 25 |
|
| 58 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ |
|
| 59 |
+read only = false |
|
| 60 |
+lock file = /var/lock/object6010.lock |
|
| 61 |
+ |
|
| 62 |
+[object6020] |
|
| 63 |
+max connections = 25 |
|
| 64 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ |
|
| 65 |
+read only = false |
|
| 66 |
+lock file = /var/lock/object6020.lock |
|
| 67 |
+ |
|
| 68 |
+[object6030] |
|
| 69 |
+max connections = 25 |
|
| 70 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ |
|
| 71 |
+read only = false |
|
| 72 |
+lock file = /var/lock/object6030.lock |
|
| 73 |
+ |
|
| 74 |
+[object6040] |
|
| 75 |
+max connections = 25 |
|
| 76 |
+path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ |
|
| 77 |
+read only = false |
|
| 78 |
+lock file = /var/lock/object6040.lock |
| ... | ... |
@@ -150,9 +150,10 @@ KEYSTONE_DIR=$DEST/keystone |
| 150 | 150 |
NOVACLIENT_DIR=$DEST/python-novaclient |
| 151 | 151 |
OPENSTACKX_DIR=$DEST/openstackx |
| 152 | 152 |
NOVNC_DIR=$DEST/noVNC |
| 153 |
+SWIFT_DIR=$DEST/swift |
|
| 153 | 154 |
|
| 154 | 155 |
# Specify which services to launch. These generally correspond to screen tabs |
| 155 |
-ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit}
|
|
| 156 |
+ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,swift}
|
|
| 156 | 157 |
|
| 157 | 158 |
# Nova hypervisor configuration. We default to libvirt whth **kvm** but will |
| 158 | 159 |
# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can |
| ... | ... |
@@ -270,6 +271,14 @@ read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." |
| 270 | 270 |
# Glance connection info. Note the port must be specified. |
| 271 | 271 |
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292}
|
| 272 | 272 |
|
| 273 |
+# SWIFT |
|
| 274 |
+# ----- |
|
| 275 |
+# |
|
| 276 |
+# Location of SWIFT drives |
|
| 277 |
+SWIFT_DRIVE_LOCATION=${SWIFT_DRIVE_LOCATION:-/srv}
|
|
| 278 |
+ |
|
| 279 |
+# Size of the loopback disks |
|
| 280 |
+SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
|
|
| 273 | 281 |
|
| 274 | 282 |
# Keystone |
| 275 | 283 |
# -------- |
| ... | ... |
@@ -349,6 +358,8 @@ function git_clone {
|
| 349 | 349 |
|
| 350 | 350 |
# compute service |
| 351 | 351 |
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH |
| 352 |
+# storage service |
|
| 353 |
+git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH |
|
| 352 | 354 |
# image catalog service |
| 353 | 355 |
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH |
| 354 | 356 |
# unified auth system (manages accounts/tokens) |
| ... | ... |
@@ -370,6 +381,7 @@ git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH |
| 370 | 370 |
# setup our checkouts so they are installed into python path |
| 371 | 371 |
# allowing ``import nova`` or ``import glance.client`` |
| 372 | 372 |
cd $KEYSTONE_DIR; sudo python setup.py develop |
| 373 |
+cd $SWIFT_DIR; sudo python setup.py develop |
|
| 373 | 374 |
cd $GLANCE_DIR; sudo python setup.py develop |
| 374 | 375 |
cd $NOVACLIENT_DIR; sudo python setup.py develop |
| 375 | 376 |
cd $NOVA_DIR; sudo python setup.py develop |
| ... | ... |
@@ -580,6 +592,75 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then |
| 580 | 580 |
mkdir -p $NOVA_DIR/networks |
| 581 | 581 |
fi |
| 582 | 582 |
|
| 583 |
+# Storage Service |
|
| 584 |
+if [[ "$ENABLED_SERVICES" =~ "swift" ]];then |
|
| 585 |
+ mkdir -p ${SWIFT_DRIVE_LOCATION}/drives
|
|
| 586 |
+ local s=${SWIFT_DRIVE_LOCATION}/drives/sdb1 # Shortcut variable
|
|
| 587 |
+ |
|
| 588 |
+ # Create a loopback disk and format it with XFS. |
|
| 589 |
+ if [[ ! -e ${SWIFT_DRIVE_LOCATION}/swift-disk ]];then
|
|
| 590 |
+ dd if=/dev/zero of=${SWIFT_DRIVE_LOCATION}/swift-disk bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
|
|
| 591 |
+ mkfs.xfs -f -i size=1024 ${SWIFT_DRIVE_LOCATION}/swift-disk
|
|
| 592 |
+ fi |
|
| 593 |
+ |
|
| 594 |
+ # Add the mountpoint to fstab |
|
| 595 |
+ if ! egrep -q "^${SWIFT_DRIVE_LOCATION}/swift-disk" /etc/fstab;then
|
|
| 596 |
+ echo "# Added by devstack" | tee -a /etc/fstab |
|
| 597 |
+ echo "${SWIFT_DRIVE_LOCATION}/swift-disk ${s} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" | \
|
|
| 598 |
+ tee -a /etc/fstab |
|
| 599 |
+ fi |
|
| 600 |
+ |
|
| 601 |
+ # Create and mount drives. |
|
| 602 |
+ mkdir -p ${s}
|
|
| 603 |
+ mount ${s}
|
|
| 604 |
+ mkdir ${s}/{1..4}
|
|
| 605 |
+ |
|
| 606 |
+ # Create directories |
|
| 607 |
+ install -g stack -o stack -d /etc/swift/{object,container,account}-server \
|
|
| 608 |
+ ${SWIFT_DRIVE_LOCATION}/{1..4}/node/sdb1 /var/run/swift
|
|
| 609 |
+ |
|
| 610 |
+ # Adjust rc.local to always have a /var/run/swift on reboot |
|
| 611 |
+ # created and chown to our user. |
|
| 612 |
+ # TODO (chmou): We may not have a "exit 0" |
|
| 613 |
+ sed -i '/^exit 0/d' /etc/rc.local |
|
| 614 |
+cat <<EOF>>/etc/rc.local |
|
| 615 |
+mkdir -p /var/run/swift |
|
| 616 |
+chown stack: /var/run/swift |
|
| 617 |
+exit 0 |
|
| 618 |
+EOF |
|
| 619 |
+ |
|
| 620 |
+ # Add rsync file |
|
| 621 |
+ sed -e "s/%SWIFT_LOOPBACK_DISK_SIZE%/$SWIFT_DRIVE_LOCATION/" $FILES/swift-rsyncd.conf > /etc/rsyncd.conf |
|
| 622 |
+ |
|
| 623 |
+ # Copy proxy-server configuration |
|
| 624 |
+ cp $FILES/swift-proxy-server.conf /etc/swift/ |
|
| 625 |
+ |
|
| 626 |
+ # Generate swift.conf, we need to have the swift-hash being random |
|
| 627 |
+ # and unique. |
|
| 628 |
+ local SWIFT_HASH=$(od -t x8 -N 8 -A n </dev/random) |
|
| 629 |
+ sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift.conf > /etc/swift/swift.conf |
|
| 630 |
+ |
|
| 631 |
+ # We need to generate a object/account/proxy configuration |
|
| 632 |
+ # emulating 4 nodes on different ports we have a litle function |
|
| 633 |
+ # that help us doing that. |
|
| 634 |
+ function generate_swift_configuration() {
|
|
| 635 |
+ local server_type=$1 |
|
| 636 |
+ local bind_port=$2 |
|
| 637 |
+ local log_facility=$3 |
|
| 638 |
+ for node_number in {1..4};do
|
|
| 639 |
+ node_path=${SWIFT_DRIVE_LOCATION}/${node_number}/node
|
|
| 640 |
+ sed -e "s/%NODE_PATH%/${node_path}/;s/%BIND_PORT%/${bind_port}/;s/%LOG_FACILITY%/${log_facility}/" \
|
|
| 641 |
+ $FILES/swift-${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf
|
|
| 642 |
+ bind_port=$(( ${bind_port} + 10 ))
|
|
| 643 |
+ log_facility=$(( ${log_facility} + 1 ))
|
|
| 644 |
+ done |
|
| 645 |
+ } |
|
| 646 |
+ generate_swift_configuration object 6010 2 |
|
| 647 |
+ generate_swift_configuration container 6011 2 |
|
| 648 |
+ generate_swift_configuration account 6012 2 |
|
| 649 |
+ |
|
| 650 |
+fi |
|
| 651 |
+ |
|
| 583 | 652 |
# Volume Service |
| 584 | 653 |
# -------------- |
| 585 | 654 |
|
| ... | ... |
@@ -2,6 +2,10 @@ |
| 2 | 2 |
NOVA_REPO=https://github.com/cloudbuilders/nova.git |
| 3 | 3 |
NOVA_BRANCH=diablo |
| 4 | 4 |
|
| 5 |
+# storage service |
|
| 6 |
+SWIFT_REPO=https://github.com/openstack/swift.git |
|
| 7 |
+SWIFT_BRANCH=diablo |
|
| 8 |
+ |
|
| 5 | 9 |
# image catalog service |
| 6 | 10 |
GLANCE_REPO=https://github.com/cloudbuilders/glance.git |
| 7 | 11 |
GLANCE_BRANCH=diablo |