Browse code

Add documentation and fixes.

- Fix some spelling mistakes in the documentation.
- Add swift documentation.
- Try to make the code more explicit (ie: remove shortcut variables).

Chmouel Boudjnah authored on 2011/11/03 01:57:11
Showing 1 changed files
... ...
@@ -70,7 +70,7 @@ fi
70 70
 # called ``localrc``
71 71
 #
72 72
 # If ``localrc`` exists, then ``stackrc`` will load those settings.  This is
73
-# useful for changing a branch or repostiory to test other versions.  Also you
73
+# useful for changing a branch or repository to test other versions.  Also you
74 74
 # can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead
75 75
 # of letting devstack generate random ones for you.
76 76
 source ./stackrc
... ...
@@ -241,7 +241,7 @@ MULTI_HOST=${MULTI_HOST:-0}
241 241
 # If you are running on a single node and don't need to access the VMs from
242 242
 # devices other than that node, you can set the flat interface to the same
243 243
 # value as ``FLAT_NETWORK_BRIDGE``.  This will stop the network hiccup from
244
-# occuring.
244
+# occurring.
245 245
 FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
246 246
 
247 247
 ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
... ...
@@ -274,17 +274,31 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292}
274 274
 
275 275
 # SWIFT
276 276
 # -----
277
+# TODO: implement glance support
278
+# TODO: add logging to different location.
277 279
 
278
-# Location of SWIFT drives
280
+# By default the location of swift drives and objects is located inside
281
+# the swift source directory. SWIFT_LOCATION variable allow you to redefine
282
+# this.
279 283
 SWIFT_LOCATION=${SWIFT_LOCATION:-${SWIFT_DIR}/data}
280 284
 
281
-# Size of the loopback disks
285
+# devstack will create a loop-back disk formatted as XFS to store the
286
+# swift data. By default the disk size is 1 gigabyte. The variable
287
+# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change
288
+# that.
282 289
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
283 290
 
284
-# Default partition power size (bigger is slower)
291
+# The ring uses a configurable number of bits from a path’s MD5 hash as
292
+# a partition index that designates a device. The number of bits kept
293
+# from the hash is known as the partition power, and 2 to the partition
294
+# power indicates the partition count. Partitioning the full MD5 hash
295
+# ring allows other parts of the cluster to work in batches of items at
296
+# once which ends up either more efficient or at least less complex than
297
+# working with each item separately or the entire cluster all at once.
298
+# By default we define 9 for the partition count (which mean 512).
285 299
 SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
286 300
 
287
-# Swift hash, this must be unique
301
+# SWIFT_HASH is a random unique string for a swift cluster that can never change.
288 302
 read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
289 303
 
290 304
 # Keystone
... ...
@@ -299,7 +313,7 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (
299 299
 LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"}
300 300
 (
301 301
 # So that errors don't compound we exit on any errors so you see only the
302
-# first error that occured.
302
+# first error that occurred.
303 303
 trap failed ERR
304 304
 failed() {
305 305
     local r=$?
... ...
@@ -604,13 +618,14 @@ fi
604 604
 
605 605
 # Storage Service
606 606
 if [[ "$ENABLED_SERVICES" =~ "swift" ]];then
607
+    # We first do a bit of setup by creating the directories and
608
+    # changing the permissions so we can run it as our user.
609
+
607 610
     USER_GROUP=$(id -g)
608
-    
609 611
     sudo mkdir -p ${SWIFT_LOCATION}/drives
610
-    sudo chown -R $USER: ${SWIFT_LOCATION}/drives
611
-    s=${SWIFT_LOCATION}/drives/sdb1 # Shortcut variable
612
+    sudo chown -R $USER:${USER_GROUP} ${SWIFT_LOCATION}/drives
612 613
     
613
-    # Create a loopback disk and format it with XFS.
614
+    # We then create a loopback disk and format it to XFS.
614 615
     if [[ ! -e ${SWIFT_LOCATION}/drives/images/swift.img ]];then
615 616
         mkdir -p  ${SWIFT_LOCATION}/drives/images
616 617
         sudo touch  ${SWIFT_LOCATION}/drives/images/swift.img
... ...
@@ -621,18 +636,22 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then
621 621
         mkfs.xfs -f -i size=1024  ${SWIFT_LOCATION}/drives/images/swift.img
622 622
     fi
623 623
 
624
-    # Create and mount drives.
625
-    mkdir -p ${s} 
626
-    if ! egrep -q "$s" /proc/mounts;then
624
+    # After the drive being created we mount the disk with a few mount
625
+    # options to make it most efficient as possible for swift.
626
+    mkdir -p ${SWIFT_LOCATION}/drives/sdb1
627
+    if ! egrep -q ${SWIFT_LOCATION}/drives/sdb1 /proc/mounts;then
627 628
         sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
628
-            ${SWIFT_LOCATION}/drives/images/swift.img ${s}
629
+            ${SWIFT_LOCATION}/drives/images/swift.img ${SWIFT_LOCATION}/drives/sdb1
629 630
     fi
630 631
 
632
+    # We then create link to that mounted location so swift would know
633
+    # where to go.
631 634
     for x in {1..4}; do sudo ln -sf $s/$x ${SWIFT_LOCATION}/$x; done
632 635
     
633
-    # Create directories
636
+    # We now have to emulate a few different servers into one we
637
+    # create all the directories needed for swift 
634 638
     tmpd=""
635
-    for d in ${s}/{1..4} /etc/swift /etc/swift/{object,container,account}-server \
639
+    for d in ${SWIFT_LOCATION}/drives/sdb1/{1..4} /etc/swift /etc/swift/{object,container,account}-server \
636 640
         ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do
637 641
         [[ -d $d ]] && continue
638 642
         sudo install -o ${USER} -g $USER_GROUP -d $d
... ...
@@ -640,28 +659,35 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then
640 640
 
641 641
     sudo chown -R $USER: ${SWIFT_LOCATION}/{1..4}/node
642 642
 
643
-   # Add rsync file
643
+   # Swift use rsync to syncronize between all the different
644
+   # partitions (which make more sense when you have a multi-node
645
+   # setup) we configure it with our version of rsync.
644 646
    sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf
645 647
    sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
646 648
 
649
+   # By default Swift will be installed with the tempauth middleware
650
+   # which has some default username and password if you have
651
+   # configured keystone it will checkout the directory.
647 652
    if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
648 653
        swift_auth_server=keystone
649
-       # Temporary until we get this integrated in swift.
654
+       # We need a special version of bin/swift which understand the
655
+       # OpenStack api 2.0, we download it until this is getting
656
+       # integrated in swift.
650 657
        sudo curl -s -o/usr/local/bin/swift \
651 658
            'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e'
652 659
    else
653 660
        swift_auth_server=tempauth
654 661
    fi
655 662
 
663
+   # We do the install of the proxy-server and swift configuration
664
+   # replacing a few directives to match our configuration.
656 665
    sed "s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \
657 666
        $FILES/swift-proxy-server.conf|sudo tee  /etc/swift/proxy-server.conf
658 667
 
659
-   # Generate swift.conf, we need to have the swift-hash being random
660
-   # and unique.
661 668
    sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift.conf > /etc/swift/swift.conf
662 669
 
663 670
    # We need to generate a object/account/proxy configuration
664
-   # emulating 4 nodes on different ports we have a litle function
671
+   # emulating 4 nodes on different ports we have a little function
665 672
    # that help us doing that.
666 673
    function generate_swift_configuration() {
667 674
        local server_type=$1
... ...
@@ -681,23 +707,28 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then
681 681
    generate_swift_configuration container 6011 2
682 682
    generate_swift_configuration account 6012 2
683 683
 
684
-   # Install swift helper scripts to remake the rings and start all services.
684
+   # We create two helper scripts :
685
+   #
686
+   # - swift-remakerings
687
+   #   Allow to recreate rings from scratch.
688
+   # - swift-startmain
689
+   #   Restart your full cluster.
690
+   #
685 691
    sed -e "s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift-remakerings | \
686 692
        sudo tee /usr/local/bin/swift-remakerings
687 693
    sudo install -m755 $FILES/swift-startmain /usr/local/bin/
688 694
    sudo chmod +x /usr/local/bin/swift-*
689 695
 
690
-   # Start rsync
696
+   # We then can start rsync.
691 697
    sudo /etc/init.d/rsync restart || :
692 698
       
693
-   # Create ring
699
+   # Create our ring for the object/container/account.
694 700
    /usr/local/bin/swift-remakerings
695 701
 
696
-   # Start everything
702
+   # And now we launch swift-startmain to get our cluster running
703
+   # ready to be tested.
697 704
    /usr/local/bin/swift-startmain || :
698 705
    
699
-   # This should work (tempauth)
700
-   # swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing stat
701 706
    unset s swift_hash swift_auth_server tmpd
702 707
 fi
703 708
 
... ...
@@ -851,7 +882,7 @@ function screen_it {
851 851
 screen -d -m -S stack -t stack
852 852
 sleep 1
853 853
 
854
-# launch the glance registery service
854
+# launch the glance registry service
855 855
 if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
856 856
     screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
857 857
 fi
... ...
@@ -908,7 +939,7 @@ screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
908 908
 # TTY also uses cloud-init, supporting login via keypair and sending scripts as
909 909
 # userdata.  See https://help.ubuntu.com/community/CloudInit for more on cloud-init
910 910
 #
911
-# Override ``IMAGE_URLS`` with a comma-seperated list of uec images.
911
+# Override ``IMAGE_URLS`` with a comma-separated list of uec images.
912 912
 #
913 913
 #  * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz
914 914
 #  * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz