Browse code

Merge pull request #137 from cloudbuilders/new-swift-support

Add swift support

Chmouel Boudjnah authored on 2011/11/03 17:12:06
Showing 13 changed files
... ...
@@ -191,3 +191,22 @@ nova secgroup-delete $SECGROUP
191 191
 
192 192
 # make sure that we can describe instances
193 193
 euca-describe-instances
194
+
195
+# Testing Swift
196
+# =============
197
+
198
+# Check if we have to swift via keystone
199
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
200
+
201
+# We start by creating a test container
202
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
203
+
204
+# add some files into it.
205
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
206
+
207
+# list them
208
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer 
209
+
210
+# And we may want to delete them now that we have tested that
211
+# everything works.
212
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
194 213
new file mode 100644
... ...
@@ -0,0 +1,17 @@
0
+curl
1
+gcc
2
+memcached
3
+python-configobj
4
+python-coverage
5
+python-dev
6
+python-eventlet
7
+python-greenlet
8
+python-netifaces
9
+python-nose
10
+python-pastedeploy
11
+python-setuptools
12
+python-simplejson
13
+python-webob
14
+python-xattr
15
+sqlite3
16
+xfsprogs
... ...
@@ -30,12 +30,13 @@ $BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin
30 30
 $BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service"
31 31
 $BIN_DIR/keystone-manage $* service add glance image "Glance Image Service"
32 32
 $BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service"
33
+$BIN_DIR/keystone-manage $* service add swift object-store "Swift Service"
33 34
 
34 35
 #endpointTemplates
35 36
 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id%  http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
36 37
 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
37 38
 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
38
-# $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
39
+$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
39 40
 
40 41
 # Tokens
41 42
 $BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
42 43
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+[DEFAULT]
1
+devices = %NODE_PATH%/node
2
+mount_check = false
3
+bind_port = %BIND_PORT%
4
+user = %USER%
5
+log_facility = LOG_LOCAL%LOG_FACILITY%
6
+
7
+[pipeline:main]
8
+pipeline = account-server
9
+
10
+[app:account-server]
11
+use = egg:swift#account
12
+
13
+[account-replicator]
14
+vm_test_mode = yes
15
+
16
+[account-auditor]
17
+
18
+[account-reaper]
0 19
new file mode 100644
... ...
@@ -0,0 +1,21 @@
0
+[DEFAULT]
1
+devices = %NODE_PATH%/node
2
+mount_check = false
3
+bind_port = %BIND_PORT%
4
+user = %USER%
5
+log_facility = LOG_LOCAL%LOG_FACILITY%
6
+
7
+[pipeline:main]
8
+pipeline = container-server
9
+
10
+[app:container-server]
11
+use = egg:swift#container
12
+
13
+[container-replicator]
14
+vm_test_mode = yes
15
+
16
+[container-updater]
17
+
18
+[container-auditor]
19
+
20
+[container-sync]
0 21
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+[DEFAULT]
1
+devices = %NODE_PATH%/node
2
+mount_check = false
3
+bind_port = %BIND_PORT%
4
+user = %USER%
5
+log_facility = LOG_LOCAL%LOG_FACILITY%
6
+
7
+[pipeline:main]
8
+pipeline = object-server
9
+
10
+[app:object-server]
11
+use = egg:swift#object
12
+
13
+[object-replicator]
14
+vm_test_mode = yes
15
+
16
+[object-updater]
17
+
18
+[object-auditor]
0 19
new file mode 100644
... ...
@@ -0,0 +1,31 @@
0
+[DEFAULT]
1
+bind_port = 8080
2
+user = %USER%
3
+log_facility = LOG_LOCAL1
4
+
5
+[pipeline:main]
6
+pipeline = healthcheck cache %AUTH_SERVER% proxy-server
7
+
8
+[app:proxy-server]
9
+use = egg:swift#proxy
10
+allow_account_management = true
11
+account_autocreate = true
12
+
13
+[filter:keystone]
14
+use = egg:swiftkeystone2#keystone2
15
+keystone_admin_token = %SERVICE_TOKEN%
16
+keystone_url = http://localhost:35357/v2.0
17
+
18
+[filter:tempauth]
19
+use = egg:swift#tempauth
20
+user_admin_admin = admin .admin .reseller_admin
21
+user_test_tester = testing .admin
22
+user_test2_tester2 = testing2 .admin
23
+user_test_tester3 = testing3
24
+bind_ip = 0.0.0.0
25
+
26
+[filter:healthcheck]
27
+use = egg:swift#healthcheck
28
+
29
+[filter:cache]
30
+use = egg:swift#memcache
0 31
new file mode 100644
... ...
@@ -0,0 +1,79 @@
0
+uid = %USER%
1
+gid = %GROUP%
2
+log file = /var/log/rsyncd.log
3
+pid file = /var/run/rsyncd.pid
4
+address = 127.0.0.1
5
+
6
+[account6012]
7
+max connections = 25
8
+path = %SWIFT_LOCATION%/1/node/
9
+read only = false
10
+lock file = /var/lock/account6012.lock
11
+
12
+[account6022]
13
+max connections = 25
14
+path = %SWIFT_LOCATION%/2/node/
15
+read only = false
16
+lock file = /var/lock/account6022.lock
17
+
18
+[account6032]
19
+max connections = 25
20
+path = %SWIFT_LOCATION%/3/node/
21
+read only = false
22
+lock file = /var/lock/account6032.lock
23
+
24
+[account6042]
25
+max connections = 25
26
+path = %SWIFT_LOCATION%/4/node/
27
+read only = false
28
+lock file = /var/lock/account6042.lock
29
+
30
+
31
+[container6011]
32
+max connections = 25
33
+path = %SWIFT_LOCATION%/1/node/
34
+read only = false
35
+lock file = /var/lock/container6011.lock
36
+
37
+[container6021]
38
+max connections = 25
39
+path = %SWIFT_LOCATION%/2/node/
40
+read only = false
41
+lock file = /var/lock/container6021.lock
42
+
43
+[container6031]
44
+max connections = 25
45
+path = %SWIFT_LOCATION%/3/node/
46
+read only = false
47
+lock file = /var/lock/container6031.lock
48
+
49
+[container6041]
50
+max connections = 25
51
+path = %SWIFT_LOCATION%/4/node/
52
+read only = false
53
+lock file = /var/lock/container6041.lock
54
+
55
+
56
+[object6010]
57
+max connections = 25
58
+path = %SWIFT_LOCATION%/1/node/
59
+read only = false
60
+lock file = /var/lock/object6010.lock
61
+
62
+[object6020]
63
+max connections = 25
64
+path = %SWIFT_LOCATION%/2/node/
65
+read only = false
66
+lock file = /var/lock/object6020.lock
67
+
68
+[object6030]
69
+max connections = 25
70
+path = %SWIFT_LOCATION%/3/node/
71
+read only = false
72
+lock file = /var/lock/object6030.lock
73
+
74
+[object6040]
75
+max connections = 25
76
+path = %SWIFT_LOCATION%/4/node/
77
+read only = false
78
+lock file = /var/lock/object6040.lock
0 79
new file mode 100755
... ...
@@ -0,0 +1,26 @@
0
+#!/bin/bash
1
+
2
+cd /etc/swift
3
+
4
+rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
5
+
6
+swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
7
+swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1
8
+swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1
9
+swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1
10
+swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1
11
+swift-ring-builder object.builder rebalance
12
+
13
+swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
14
+swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1
15
+swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1
16
+swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1
17
+swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1
18
+swift-ring-builder container.builder rebalance
19
+
20
+swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
21
+swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1
22
+swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1
23
+swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1
24
+swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1
25
+swift-ring-builder account.builder rebalance
0 26
new file mode 100755
... ...
@@ -0,0 +1,3 @@
0
+#!/bin/bash
1
+
2
+swift-init all restart
0 3
new file mode 100644
... ...
@@ -0,0 +1,3 @@
0
+[swift-hash]
1
+# random unique string that can never change (DO NOT LOSE)
2
+swift_hash_path_suffix = %SWIFT_HASH%
... ...
@@ -70,7 +70,7 @@ fi
70 70
 # called ``localrc``
71 71
 #
72 72
 # If ``localrc`` exists, then ``stackrc`` will load those settings.  This is
73
-# useful for changing a branch or repostiory to test other versions.  Also you
73
+# useful for changing a branch or repository to test other versions.  Also you
74 74
 # can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead
75 75
 # of letting devstack generate random ones for you.
76 76
 source ./stackrc
... ...
@@ -121,7 +121,7 @@ if [[ $EUID -eq 0 ]]; then
121 121
     echo "Copying files to stack user"
122 122
     STACK_DIR="$DEST/${PWD##*/}"
123 123
     cp -r -f "$PWD" "$STACK_DIR"
124
-    chown -R stack "$STACK_DIR"
124
+    chown -R $USER "$STACK_DIR"
125 125
     if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
126 126
         exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
127 127
     else
... ...
@@ -150,6 +150,8 @@ KEYSTONE_DIR=$DEST/keystone
150 150
 NOVACLIENT_DIR=$DEST/python-novaclient
151 151
 OPENSTACKX_DIR=$DEST/openstackx
152 152
 NOVNC_DIR=$DEST/noVNC
153
+SWIFT_DIR=$DEST/swift
154
+SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2
153 155
 QUANTUM_DIR=$DEST/quantum
154 156
 
155 157
 # Default Quantum Plugin
... ...
@@ -243,7 +245,7 @@ MULTI_HOST=${MULTI_HOST:-0}
243 243
 # If you are running on a single node and don't need to access the VMs from
244 244
 # devices other than that node, you can set the flat interface to the same
245 245
 # value as ``FLAT_NETWORK_BRIDGE``.  This will stop the network hiccup from
246
-# occuring.
246
+# occurring.
247 247
 FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
248 248
 
249 249
 ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
... ...
@@ -285,6 +287,34 @@ read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
285 285
 # Glance connection info.  Note the port must be specified.
286 286
 GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292}
287 287
 
288
+# SWIFT
289
+# -----
290
+# TODO: implement glance support
291
+# TODO: add logging to different location.
292
+
293
+# By default the location of swift drives and objects is located inside
294
+# the swift source directory. SWIFT_LOCATION variable allow you to redefine
295
+# this.
296
+SWIFT_LOCATION=${SWIFT_LOCATION:-${SWIFT_DIR}/data}
297
+
298
+# devstack will create a loop-back disk formatted as XFS to store the
299
+# swift data. By default the disk size is 1 gigabyte. The variable
300
+# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change
301
+# that.
302
+SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
303
+
304
+# The ring uses a configurable number of bits from a path’s MD5 hash as
305
+# a partition index that designates a device. The number of bits kept
306
+# from the hash is known as the partition power, and 2 to the partition
307
+# power indicates the partition count. Partitioning the full MD5 hash
308
+# ring allows other parts of the cluster to work in batches of items at
309
+# once which ends up either more efficient or at least less complex than
310
+# working with each item separately or the entire cluster all at once.
311
+# By default we define 9 for the partition count (which mean 512).
312
+SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
313
+
314
+# SWIFT_HASH is a random unique string for a swift cluster that can never change.
315
+read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
288 316
 
289 317
 # Keystone
290 318
 # --------
... ...
@@ -298,7 +328,7 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (
298 298
 LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"}
299 299
 (
300 300
 # So that errors don't compound we exit on any errors so you see only the
301
-# first error that occured.
301
+# first error that occurred.
302 302
 trap failed ERR
303 303
 failed() {
304 304
     local r=$?
... ...
@@ -364,6 +394,10 @@ function git_clone {
364 364
 
365 365
 # compute service
366 366
 git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
367
+# storage service
368
+git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
369
+# swift + keystone middleware
370
+git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH
367 371
 # image catalog service
368 372
 git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
369 373
 # unified auth system (manages accounts/tokens)
... ...
@@ -387,6 +421,8 @@ git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
387 387
 # setup our checkouts so they are installed into python path
388 388
 # allowing ``import nova`` or ``import glance.client``
389 389
 cd $KEYSTONE_DIR; sudo python setup.py develop
390
+cd $SWIFT_DIR; sudo python setup.py develop
391
+cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop
390 392
 cd $GLANCE_DIR; sudo python setup.py develop
391 393
 cd $NOVACLIENT_DIR; sudo python setup.py develop
392 394
 cd $NOVA_DIR; sudo python setup.py develop
... ...
@@ -598,6 +634,122 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
598 598
     mkdir -p $NOVA_DIR/networks
599 599
 fi
600 600
 
601
+# Storage Service
602
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
603
+    # We first do a bit of setup by creating the directories and
604
+    # changing the permissions so we can run it as our user.
605
+
606
+    USER_GROUP=$(id -g)
607
+    sudo mkdir -p ${SWIFT_LOCATION}/drives
608
+    sudo chown -R $USER:${USER_GROUP} ${SWIFT_LOCATION}/drives
609
+    
610
+    # We then create a loopback disk and format it to XFS.
611
+    if [[ ! -e ${SWIFT_LOCATION}/drives/images/swift.img ]];then
612
+        mkdir -p  ${SWIFT_LOCATION}/drives/images
613
+        sudo touch  ${SWIFT_LOCATION}/drives/images/swift.img
614
+        sudo chown $USER: ${SWIFT_LOCATION}/drives/images/swift.img
615
+        
616
+        dd if=/dev/zero of=${SWIFT_LOCATION}/drives/images/swift.img \
617
+            bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
618
+        mkfs.xfs -f -i size=1024  ${SWIFT_LOCATION}/drives/images/swift.img
619
+    fi
620
+
621
+    # After the drive being created we mount the disk with a few mount
622
+    # options to make it most efficient as possible for swift.
623
+    mkdir -p ${SWIFT_LOCATION}/drives/sdb1
624
+    if ! egrep -q ${SWIFT_LOCATION}/drives/sdb1 /proc/mounts;then
625
+        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
626
+            ${SWIFT_LOCATION}/drives/images/swift.img ${SWIFT_LOCATION}/drives/sdb1
627
+    fi
628
+
629
+    # We then create link to that mounted location so swift would know
630
+    # where to go.
631
+    for x in {1..4}; do sudo ln -sf ${SWIFT_LOCATION}/drives/sdb1/$x ${SWIFT_LOCATION}/$x; done
632
+    
633
+    # We now have to emulate a few different servers into one we
634
+    # create all the directories needed for swift 
635
+    tmpd=""
636
+    for d in ${SWIFT_LOCATION}/drives/sdb1/{1..4} /etc/swift /etc/swift/{object,container,account}-server \
637
+        ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do
638
+        [[ -d $d ]] && continue
639
+        sudo install -o ${USER} -g $USER_GROUP -d $d
640
+    done
641
+
642
+    sudo chown -R $USER: ${SWIFT_LOCATION}/{1..4}/node
643
+
644
+   # Swift use rsync to syncronize between all the different
645
+   # partitions (which make more sense when you have a multi-node
646
+   # setup) we configure it with our version of rsync.
647
+   sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
648
+   sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
649
+
650
+   # By default Swift will be installed with the tempauth middleware
651
+   # which has some default username and password if you have
652
+   # configured keystone it will checkout the directory.
653
+   if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
654
+       swift_auth_server=keystone
655
+       # We need a special version of bin/swift which understand the
656
+       # OpenStack api 2.0, we download it until this is getting
657
+       # integrated in swift.
658
+       sudo curl -s -o/usr/local/bin/swift \
659
+           'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e'
660
+   else
661
+       swift_auth_server=tempauth
662
+   fi
663
+
664
+   # We do the install of the proxy-server and swift configuration
665
+   # replacing a few directives to match our configuration.
666
+   sed "s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \
667
+       $FILES/swift/proxy-server.conf|sudo tee  /etc/swift/proxy-server.conf
668
+
669
+   sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > /etc/swift/swift.conf
670
+
671
+   # We need to generate a object/account/proxy configuration
672
+   # emulating 4 nodes on different ports we have a little function
673
+   # that help us doing that.
674
+   function generate_swift_configuration() {
675
+       local server_type=$1
676
+       local bind_port=$2
677
+       local log_facility=$3
678
+       local node_number
679
+       
680
+       for node_number in {1..4};do
681
+           node_path=${SWIFT_LOCATION}/${node_number}
682
+           sed -e "s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
683
+               $FILES/swift/${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf
684
+           bind_port=$(( ${bind_port} + 10 ))
685
+           log_facility=$(( ${log_facility} + 1 ))
686
+       done
687
+   }
688
+   generate_swift_configuration object 6010 2
689
+   generate_swift_configuration container 6011 2
690
+   generate_swift_configuration account 6012 2
691
+
692
+   # We create two helper scripts :
693
+   #
694
+   # - swift-remakerings
695
+   #   Allow to recreate rings from scratch.
696
+   # - swift-startmain
697
+   #   Restart your full cluster.
698
+   #
699
+   sed -e "s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \
700
+       sudo tee /usr/local/bin/swift-remakerings
701
+   sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/
702
+   sudo chmod +x /usr/local/bin/swift-*
703
+
704
+   # We then can start rsync.
705
+   sudo /etc/init.d/rsync restart || :
706
+      
707
+   # Create our ring for the object/container/account.
708
+   /usr/local/bin/swift-remakerings
709
+
710
+   # And now we launch swift-startmain to get our cluster running
711
+   # ready to be tested.
712
+   /usr/local/bin/swift-startmain || :
713
+   
714
+   unset s swift_hash swift_auth_server tmpd
715
+fi
716
+
601 717
 # Volume Service
602 718
 # --------------
603 719
 
... ...
@@ -741,16 +893,16 @@ fi
741 741
 function screen_it {
742 742
     NL=`echo -ne '\015'`
743 743
     if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
744
-        screen -S nova -X screen -t $1
745
-        screen -S nova -p $1 -X stuff "$2$NL"
744
+        screen -S stack -X screen -t $1
745
+        screen -S stack -p $1 -X stuff "$2$NL"
746 746
     fi
747 747
 }
748 748
 
749 749
 # create a new named screen to run processes in
750
-screen -d -m -S nova -t nova
750
+screen -d -m -S stack -t stack
751 751
 sleep 1
752 752
 
753
-# launch the glance registery service
753
+# launch the glance registry service
754 754
 if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
755 755
     screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
756 756
 fi
... ...
@@ -858,7 +1010,7 @@ screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
858 858
 # TTY also uses cloud-init, supporting login via keypair and sending scripts as
859 859
 # userdata.  See https://help.ubuntu.com/community/CloudInit for more on cloud-init
860 860
 #
861
-# Override ``IMAGE_URLS`` with a comma-seperated list of uec images.
861
+# Override ``IMAGE_URLS`` with a comma-separated list of uec images.
862 862
 #
863 863
 #  * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz
864 864
 #  * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
... ...
@@ -2,6 +2,14 @@
2 2
 NOVA_REPO=https://github.com/cloudbuilders/nova.git
3 3
 NOVA_BRANCH=diablo
4 4
 
5
+# storage service
6
+SWIFT_REPO=https://github.com/openstack/swift.git
7
+SWIFT_BRANCH=1.4.3
8
+
9
+# swift and keystone integration
10
+SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git
11
+SWIFT_KEYSTONE_BRANCH=master
12
+
5 13
 # image catalog service
6 14
 GLANCE_REPO=https://github.com/cloudbuilders/glance.git
7 15
 GLANCE_BRANCH=diablo