Browse code

Merge branch 'master' into syslog

James E. Blair authored on 2011/10/27 04:45:52
Showing 13 changed files
... ...
@@ -19,44 +19,8 @@ set -o xtrace
19 19
 # Settings
20 20
 # ========
21 21
 
22
-# Use stackrc and localrc for settings
23
-source ./stackrc
24
-
25
-HOST=${HOST:-localhost}
26
-
27
-# Nova original used project_id as the *account* that owned resources (servers,
28
-# ip address, ...)   With the addition of Keystone we have standardized on the
29
-# term **tenant** as the entity that owns the resources.  **novaclient** still
30
-# uses the old deprecated terms project_id.  Note that this field should now be
31
-# set to tenant_name, not tenant_id.
32
-export NOVA_PROJECT_ID=${TENANT:-demo}
33
-
34
-# In addition to the owning entity (tenant), nova stores the entity performing
35
-# the action as the **user**.
36
-export NOVA_USERNAME=${USERNAME:-demo}
37
-
38
-# With Keystone you pass the keystone password instead of an api key.
39
-export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete}
40
-
41
-# With the addition of Keystone, to use an openstack cloud you should
42
-# authenticate against keystone, which returns a **Token** and **Service
43
-# Catalog**.  The catalog contains the endpoint for all services the user/tenant
44
-# has access to - including nova, glance, keystone, swift, ...  We currently
45
-# recommend using the 2.0 *auth api*.
46
-#
47
-# *NOTE*: Using the 2.0 *auth api* does mean that compute api is 2.0.  We will
48
-# use the 1.1 *compute api*
49
-export NOVA_URL=${NOVA_URL:-http://$HOST:5000/v2.0/}
50
-
51
-# Currently novaclient needs you to specify the *compute api* version.  This
52
-# needs to match the config of your catalog returned by Keystone.
53
-export NOVA_VERSION=1.1
54
-
55
-# FIXME - why does this need to be specified?
56
-export NOVA_REGION_NAME=RegionOne
57
-
58
-# set log level to DEBUG (helps debug issues)
59
-export NOVACLIENT_DEBUG=1
22
+# Use openrc + stackrc + localrc for settings
23
+source ./openrc
60 24
 
61 25
 # Get a token for clients that don't support service catalog
62 26
 # ==========================================================
... ...
@@ -65,7 +29,7 @@ export NOVACLIENT_DEBUG=1
65 65
 # returns a token and catalog of endpoints.  We use python to parse the token
66 66
 # and save it.
67 67
 
68
-TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
68
+TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
69 69
 
70 70
 # Launching a server
71 71
 # ==================
... ...
@@ -95,8 +59,8 @@ nova secgroup-list
95 95
 # Create a secgroup
96 96
 nova secgroup-create $SECGROUP "test_secgroup description"
97 97
 
98
-# Flavors
99
-# -------
98
+# determine flavor
99
+# ----------------
100 100
 
101 101
 # List of flavors:
102 102
 nova flavor-list
... ...
@@ -108,6 +72,16 @@ NAME="myserver"
108 108
 
109 109
 nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
110 110
 
111
+# Testing
112
+# =======
113
+
114
+# First check if it spins up (becomes active and responds to ping on
115
+# internal ip).  If you run this script from a nova node, you should
116
+# bypass security groups and have direct access to the server.
117
+
118
+# Waiting for boot
119
+# ----------------
120
+
111 121
 # let's give it 10 seconds to launch
112 122
 sleep 10
113 123
 
... ...
@@ -117,15 +91,23 @@ nova show $NAME | grep status | grep -q ACTIVE
117 117
 # get the IP of the server
118 118
 IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
119 119
 
120
-# ping it once (timeout of a second)
121
-ping -c1 -w1 $IP || true
120
+# for single node deployments, we can ping private ips
121
+MULTI_HOST=${MULTI_HOST:-0}
122
+if [ "$MULTI_HOST" = "0" ]; then
123
+    # ping it once (timeout of a second)
124
+    ping -c1 -w1 $IP || true
122 125
 
123
-# sometimes the first ping fails (10 seconds isn't enough time for the VM's
124
-# network to respond?), so let's wait 5 seconds and really test ping
125
-sleep 5
126
+    # sometimes the first ping fails (10 seconds isn't enough time for the VM's
127
+    # network to respond?), so let's wait 5 seconds and really test ping
128
+    sleep 5
126 129
 
127
-ping -c1 -w1 $IP
128
-# allow icmp traffic
130
+    ping -c1 -w1 $IP
131
+fi
132
+
133
+# Security Groups & Floating IPs
134
+# ------------------------------
135
+
136
+# allow icmp traffic (ping)
129 137
 nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
130 138
 
131 139
 # List rules for a secgroup
... ...
@@ -135,31 +117,31 @@ nova secgroup-list-rules $SECGROUP
135 135
 nova floating-ip-create
136 136
 
137 137
 # store  floating address
138
-FIP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
138
+FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
139 139
 
140 140
 # add floating ip to our server
141
-nova add-floating-ip $NAME $FIP
141
+nova add-floating-ip $NAME $FLOATING_IP
142 142
 
143 143
 # sleep for a smidge
144
-sleep 1
144
+sleep 5
145 145
 
146
-# ping our fip
147
-ping -c1 -w1 $FIP
146
+# ping our floating ip
147
+ping -c1 -w1 $FLOATING_IP
148 148
 
149
-# dis-allow icmp traffic
149
+# dis-allow icmp traffic (ping)
150 150
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
151 151
 
152 152
 # sleep for a smidge
153
-sleep 1
153
+sleep 5
154 154
 
155
-# ping our fip
156
-if ( ping -c1 -w1 $FIP); then
155
+# ping our floating ip
156
+if ( ping -c1 -w1 $FLOATING_IP ); then
157 157
     print "Security group failure - ping should not be allowed!"
158 158
     exit 1
159 159
 fi
160 160
 
161 161
 # de-allocate the floating ip
162
-nova floating-ip-delete $FIP
162
+nova floating-ip-delete $FLOATING_IP
163 163
 
164 164
 # shutdown the server
165 165
 nova delete $NAME
... ...
@@ -169,3 +151,9 @@ nova secgroup-delete $SECGROUP
169 169
 
170 170
 # FIXME: validate shutdown within 5 seconds
171 171
 # (nova show $NAME returns 1 or status != ACTIVE)?
172
+
173
+# Testing Euca2ools
174
+# ==================
175
+
176
+# make sure that we can describe instances
177
+euca-describe-instances
... ...
@@ -16,3 +16,4 @@ iputils-ping
16 16
 wget
17 17
 curl
18 18
 tcpdump
19
+euca2ools # only for testing client
... ...
@@ -33,3 +33,7 @@ python-suds
33 33
 python-lockfile
34 34
 python-m2crypto
35 35
 python-boto
36
+
37
+# Stuff for diablo volumes
38
+iscsitarget
39
+lvm2
... ...
@@ -36,8 +36,8 @@ $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOS
36 36
 # Tokens
37 37
 $BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
38 38
 
39
-# EC2 related creds - note we are setting the token to user_password
39
+# EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD
40 40
 # but keystone doesn't parse them - it is just a blob from keystone's
41 41
 # point of view
42
-$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin_%ADMIN_PASSWORD%' admin admin || echo "no support for adding credentials"
43
-$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo_%ADMIN_PASSWORD%' demo demo || echo "no support for adding credentials"
42
+$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials"
43
+$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials"
44 44
new file mode 100644
... ...
@@ -0,0 +1,51 @@
0
+#!/usr/bin/env bash
1
+
2
+# Load local configuration
3
+source ./stackrc
4
+
5
+# Set api host endpoint
6
+HOST_IP=${HOST_IP:-127.0.0.1}
7
+
8
+# Nova original used project_id as the *account* that owned resources (servers,
9
+# ip address, ...)   With the addition of Keystone we have standardized on the
10
+# term **tenant** as the entity that owns the resources.  **novaclient** still
11
+# uses the old deprecated terms project_id.  Note that this field should now be
12
+# set to tenant_name, not tenant_id.
13
+export NOVA_PROJECT_ID=${TENANT:-demo}
14
+
15
+# In addition to the owning entity (tenant), nova stores the entity performing
16
+# the action as the **user**.
17
+export NOVA_USERNAME=${USERNAME:-demo}
18
+
19
+# With Keystone you pass the keystone password instead of an api key.
20
+export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete}
21
+
22
+# With the addition of Keystone, to use an openstack cloud you should
23
+# authenticate against keystone, which returns a **Token** and **Service
24
+# Catalog**.  The catalog contains the endpoint for all services the user/tenant
25
+# has access to - including nova, glance, keystone, swift, ...  We currently
26
+# recommend using the 2.0 *auth api*.
27
+#
28
+# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0.  We
29
+# will use the 1.1 *compute api*
30
+export NOVA_URL=${NOVA_URL:-http://$HOST_IP:5000/v2.0/}
31
+
32
+# Currently novaclient needs you to specify the *compute api* version.  This
33
+# needs to match the config of your catalog returned by Keystone.
34
+export NOVA_VERSION=${NOVA_VERSION:-1.1}
35
+
36
+# FIXME - why does this need to be specified?
37
+export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne}
38
+
39
+# Set the ec2 url so euca2ools works
40
+export EC2_URL=${EC2_URL:-http://$HOST_IP:8773/services/Cloud}
41
+
42
+# Access key is set in the initial keystone data to be the same as username
43
+export EC2_ACCESS_KEY=${USERNAME:-demo}
44
+
45
+# Secret key is set in the initial keystone data to the admin password
46
+export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete}
47
+
48
+# set log level to DEBUG (helps debug issues)
49
+# export NOVACLIENT_DEBUG=1
50
+
... ...
@@ -305,20 +305,31 @@ sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*`
305 305
 # be owned by the installation user, we create the directory and change the
306 306
 # ownership to the proper user.
307 307
 function git_clone {
308
-    # if there is an existing checkout, move it out of the way
309
-    if [[ "$RECLONE" == "yes" ]]; then
310
-        # FIXME(ja): if we were smarter we could speed up RECLONE by
311
-        # using the old git repo as the basis of our new clone...
312
-        if [ -d $2 ]; then
313
-            mv $2 /tmp/stack.`date +%s`
314
-        fi
315
-    fi
316 308
 
317
-    if [ ! -d $2 ]; then
318
-        git clone $1 $2
309
+    GIT_REMOTE=$1
310
+    GIT_DEST=$2
311
+    GIT_BRANCH=$3
312
+
313
+    # do a full clone only if the directory doesn't exist
314
+    if [ ! -d $GIT_DEST ]; then
315
+        git clone $GIT_REMOTE $GIT_DEST
319 316
         cd $2
320 317
         # This checkout syntax works for both branches and tags
321
-        git checkout $3
318
+        git checkout $GIT_BRANCH
319
+    elif [[ "$RECLONE" == "yes" ]]; then
320
+        # if it does exist then simulate what clone does if asked to RECLONE
321
+        cd $GIT_DEST
322
+        # set the url to pull from and fetch
323
+        git remote set-url origin $GIT_REMOTE
324
+        git fetch origin
325
+        # remove the existing ignored files (like pyc) as they cause breakage
326
+        # (due to the py files having older timestamps than our pyc, so python
327
+        # thinks the pyc files are correct using them)
328
+        sudo git clean -f -d
329
+        git checkout -f origin/$GIT_BRANCH
330
+        # a local branch might not exist
331
+        git branch -D $GIT_BRANCH || true
332
+        git checkout -b $GIT_BRANCH
322 333
     fi
323 334
 }
324 335
 
... ...
@@ -475,11 +486,15 @@ fi
475 475
 # Nova
476 476
 # ----
477 477
 
478
-# We are going to use the sample http middleware configuration from the keystone
479
-# project to launch nova.  This paste config adds the configuration required
480
-# for nova to validate keystone tokens - except we need to switch the config
481
-# to use our service token instead (instead of the invalid token 999888777666).
482
-sudo sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $KEYSTONE_DIR/examples/paste/nova-api-paste.ini
478
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
479
+    # We are going to use the sample http middleware configuration from the
480
+    # keystone project to launch nova.  This paste config adds the configuration
481
+    # required for nova to validate keystone tokens - except we need to switch
482
+    # the config to use our service token instead (instead of the invalid token
483
+    # 999888777666).
484
+    cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
485
+    sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
486
+fi
483 487
 
484 488
 if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
485 489
 
... ...
@@ -552,6 +567,31 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
552 552
     mkdir -p $NOVA_DIR/networks
553 553
 fi
554 554
 
555
+# Volume Service
556
+# --------------
557
+
558
+if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
559
+    #
560
+    # Configure a default volume group called 'nova-volumes' for the nova-volume
561
+    # service if it does not yet exist.  If you don't wish to use a file backed
562
+    # volume group, create your own volume group called 'nova-volumes' before
563
+    # invoking stack.sh.
564
+    #
565
+    # By default, the backing file is 2G in size, and is stored in /opt/stack.
566
+    #
567
+    if ! sudo vgdisplay | grep -q nova-volumes; then
568
+        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-/opt/stack/nova-volumes-backing-file}
569
+        VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
570
+        truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
571
+        DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
572
+        sudo vgcreate nova-volumes $DEV
573
+    fi
574
+
575
+    # Configure iscsitarget
576
+    sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget
577
+    sudo /etc/init.d/iscsitarget restart
578
+fi
579
+
555 580
 function add_nova_flag {
556 581
     echo "$1" >> $NOVA_DIR/bin/nova.conf
557 582
 }
... ...
@@ -571,7 +611,7 @@ add_nova_flag "--libvirt_type=$LIBVIRT_TYPE"
571 571
 add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions"
572 572
 add_nova_flag "--vncproxy_url=http://$HOST_IP:6080"
573 573
 add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/"
574
-add_nova_flag "--api_paste_config=$KEYSTONE_DIR/examples/paste/nova-api-paste.ini"
574
+add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini"
575 575
 add_nova_flag "--image_service=nova.image.glance.GlanceImageService"
576 576
 add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
577 577
 add_nova_flag "--rabbit_host=$RABBIT_HOST"
... ...
@@ -583,6 +623,7 @@ if [ -n "$FLAT_INTERFACE" ]; then
583 583
 fi
584 584
 if [ -n "$MULTI_HOST" ]; then
585 585
     add_nova_flag "--multi_host=$MULTI_HOST"
586
+    add_nova_flag "--send_arp_for_ha=1"
586 587
 fi
587 588
 if [ "$SYSLOG" != "False" ]; then
588 589
     add_nova_flag "--use_syslog=1"
... ...
@@ -695,6 +736,7 @@ fi
695 695
 # within the context of our original shell (so our groups won't be updated).
696 696
 # Use 'sg' to execute nova-compute as a member of the libvirtd group.
697 697
 screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute"
698
+screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
698 699
 screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network"
699 700
 screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler"
700 701
 screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py 6080 --web . --flagfile=../nova/bin/nova.conf"
... ...
@@ -722,8 +764,8 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
722 722
 
723 723
     for image_url in ${IMAGE_URLS//,/ }; do
724 724
         # Downloads the image (uec ami+aki style), then extracts it.
725
-        IMAGE_FNAME=`echo "$image_url" | python -c "import sys; print sys.stdin.read().split('/')[-1]"`
726
-        IMAGE_NAME=`echo "$IMAGE_FNAME" | python -c "import sys; print sys.stdin.read().split('.tar.gz')[0].split('.tgz')[0]"`
725
+        IMAGE_FNAME=`basename "$image_url"`
726
+        IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz`
727 727
         if [ ! -f $FILES/$IMAGE_FNAME ]; then
728 728
             wget -c $image_url -O $FILES/$IMAGE_FNAME
729 729
         fi
... ...
@@ -12,14 +12,14 @@ KEYSTONE_BRANCH=diablo
12 12
 
13 13
 # a websockets/html5 or flash powered VNC console for vm instances
14 14
 NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git
15
-NOVNC_BRANCH=master
15
+NOVNC_BRANCH=diablo
16 16
 
17 17
 # django powered web control panel for openstack
18 18
 DASH_REPO=https://github.com/cloudbuilders/openstack-dashboard.git
19 19
 DASH_BRANCH=diablo
20 20
 
21 21
 # python client library to nova that dashboard (and others) use
22
-NOVACLIENT_REPO=https://github.com/cloudbuilders/python-novaclient.git
22
+NOVACLIENT_REPO=https://github.com/rackspace/python-novaclient.git
23 23
 NOVACLIENT_BRANCH=master
24 24
 
25 25
 # openstackx is a collection of extensions to openstack.compute & nova
... ...
@@ -1,5 +1,8 @@
1 1
 #!/usr/bin/env bash
2 2
 
3
+# exit on error to stop unexpected errors
4
+set -o errexit
5
+
3 6
 # Make sure that we have the proper version of ubuntu
4 7
 UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'`
5 8
 if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then
... ...
@@ -16,9 +19,6 @@ set -o xtrace
16 16
 TOOLS_DIR=$(cd $(dirname "$0") && pwd)
17 17
 TOP_DIR=$TOOLS_DIR/..
18 18
 
19
-# Configure the root password of the vm
20
-ROOT_PASSWORD=${ROOT_PASSWORD:password}
21
-
22 19
 # Where to store files and instances
23 20
 KVMSTACK_DIR=${KVMSTACK_DIR:-/opt/kvmstack}
24 21
 
... ...
@@ -41,24 +41,28 @@ fi
41 41
 # Source params
42 42
 source ./stackrc
43 43
 
44
+# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
45
+ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
46
+
47
+
44 48
 # Base image (natty by default)
45 49
 DIST_NAME=${DIST_NAME:-natty}
46 50
 IMAGE_FNAME=$DIST_NAME.raw
47 51
 
52
+# Name of our instance, used by libvirt
53
+GUEST_NAME=${GUEST_NAME:-kvmstack}
54
+
48 55
 # Original version of built image
49
-BASE_IMAGE=$KVMSTACK_DIR/images/natty.raw
56
+BASE_IMAGE=$KVMSTACK_DIR/images/$DIST_NAME.raw
50 57
 
51 58
 # Copy of base image, which we pre-install with tasty treats
52
-BASE_IMAGE_COPY=$IMAGES_DIR/$DIST_NAME.raw.copy
53
-
54
-# Name of our instance, used by libvirt
55
-CONTAINER_NAME=${CONTAINER_NAME:-kvmstack}
59
+VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw
56 60
 
57 61
 # Mop up after previous runs
58
-virsh destroy $CONTAINER_NAME
62
+virsh destroy $GUEST_NAME || true
59 63
 
60 64
 # Where this vm is stored
61
-VM_DIR=$KVMSTACK_DIR/instances/$CONTAINER_NAME
65
+VM_DIR=$KVMSTACK_DIR/instances/$GUEST_NAME
62 66
 
63 67
 # Create vm dir
64 68
 mkdir -p $VM_DIR
... ...
@@ -70,14 +74,14 @@ mkdir -p $COPY_DIR
70 70
 # Create the base image if it does not yet exist
71 71
 if [ ! -e $IMAGES_DIR/$IMAGE_FNAME ]; then
72 72
     cd $TOOLS_DIR
73
-    ./make_image.sh -m -r 5000  natty raw
74
-    mv natty.raw $BASE_IMAGE
73
+    ./make_image.sh -m -r 5000  $DIST_NAME raw
74
+    mv $DIST_NAME.raw $BASE_IMAGE
75 75
     cd $TOP_DIR
76 76
 fi
77 77
 
78 78
 # Create a copy of the base image
79
-if [ ! -e $BASE_IMAGE_COPY ]; then
80
-    cp -p $BASE_IMAGE $BASE_IMAGE_COPY
79
+if [ ! -e $VM_IMAGE ]; then
80
+    cp -p $BASE_IMAGE $VM_IMAGE
81 81
 fi
82 82
 
83 83
 # Unmount the copied base image
... ...
@@ -98,8 +102,8 @@ function kill_unmount() {
98 98
     exit 1
99 99
 }
100 100
 
101
-# Install deps
102
-apt-get install -y --force-yes kvm libvirt-bin kpartx
101
+# Install deps if needed
102
+dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx
103 103
 
104 104
 # Let Ctrl-c kill tail and exit
105 105
 trap kill_unmount SIGINT
... ...
@@ -108,7 +112,7 @@ trap kill_unmount SIGINT
108 108
 DEST=${DEST:-/opt/stack}
109 109
 
110 110
 # Mount the file system
111
-mount -o loop,offset=32256 $BASE_IMAGE_COPY  $COPY_DIR
111
+mount -o loop,offset=32256 $VM_IMAGE  $COPY_DIR
112 112
 
113 113
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
114 114
 # be owned by the installation user, we create the directory and change the
... ...
@@ -148,29 +152,28 @@ git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH
148 148
 git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH
149 149
 git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
150 150
 
151
-# Unmount the filesystems
152
-unmount_images
153
-
154 151
 # Back to devstack
155 152
 cd $TOP_DIR
156 153
 
154
+# Unmount the filesystems
155
+unmount_images
156
+
157 157
 # Network configuration variables
158 158
 BRIDGE=${BRIDGE:-br0}
159
-CONTAINER=${CONTAINER:-STACK}
160
-CONTAINER_IP=${CONTAINER_IP:-192.168.1.50}
161
-CONTAINER_CIDR=${CONTAINER_CIDR:-$CONTAINER_IP/24}
162
-CONTAINER_NETMASK=${CONTAINER_NETMASK:-255.255.255.0}
163
-CONTAINER_GATEWAY=${CONTAINER_GATEWAY:-192.168.1.1}
164
-CONTAINER_MAC=${CONTAINER_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $CONTAINER_IP | sed "s/.*\.//")`"}
165
-CONTAINER_RAM=${CONTAINER_RAM:-1524288}
166
-CONTAINER_CORES=${CONTAINER_CORES:-1}
159
+GUEST_IP=${GUEST_IP:-192.168.1.50}
160
+GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
161
+GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
162
+GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1}
163
+GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $GUEST_IP | sed "s/.*\.//")`"}
164
+GUEST_RAM=${GUEST_RAM:-1524288}
165
+GUEST_CORES=${GUEST_CORES:-1}
167 166
 
168 167
 # libvirt.xml configuration
169
-LIBVIRT_XML=libvirt.xml
168
+LIBVIRT_XML=$VM_DIR/libvirt.xml
170 169
 cat > $LIBVIRT_XML <<EOF
171 170
 <domain type='kvm'>
172
-    <name>$CONTAINER_NAME</name>
173
-    <memory>$CONTAINER_RAM</memory>
171
+    <name>$GUEST_NAME</name>
172
+    <memory>$GUEST_RAM</memory>
174 173
     <os>
175 174
         <type>hvm</type>
176 175
         <bootmenu enable='yes'/>
... ...
@@ -178,7 +181,7 @@ cat > $LIBVIRT_XML <<EOF
178 178
     <features>
179 179
         <acpi/>
180 180
     </features>
181
-    <vcpu>$CONTAINER_CORES</vcpu>
181
+    <vcpu>$GUEST_CORES</vcpu>
182 182
     <devices>
183 183
         <disk type='file'>
184 184
             <driver type='qcow2'/>
... ...
@@ -188,7 +191,7 @@ cat > $LIBVIRT_XML <<EOF
188 188
 
189 189
         <interface type='bridge'>
190 190
             <source bridge='$BRIDGE'/>
191
-            <mac address='$CONTAINER_MAC'/>
191
+            <mac address='$GUEST_MAC'/>
192 192
         </interface>
193 193
 
194 194
         <!-- The order is significant here.  File must be defined first -->
... ...
@@ -231,13 +234,15 @@ cd $VM_DIR
231 231
 rm -f $VM_DIR/disk
232 232
 
233 233
 # Create our instance fs
234
-qemu-img create -f qcow2 -b $BASE_IMAGE_COPY disk
235
-
236
-sleep 5
234
+qemu-img create -f qcow2 -b $VM_IMAGE disk
237 235
 
236
+# Connect our nbd and wait till it is mountable
238 237
 qemu-nbd -c $NBD disk
239
-
240
-sleep 5
238
+NBD_DEV=`basename $NBD`
239
+if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
240
+    echo "Couldn't connect $NBD"
241
+    exit 1
242
+fi
241 243
 
242 244
 # Mount the instance
243 245
 mount $NBD $ROOTFS -o offset=32256 -t ext4
... ...
@@ -250,13 +255,13 @@ iface lo inet loopback
250 250
 
251 251
 auto eth0
252 252
 iface eth0 inet static
253
-        address $CONTAINER_IP
254
-        netmask $CONTAINER_NETMASK
255
-        gateway $CONTAINER_GATEWAY
253
+        address $GUEST_IP
254
+        netmask $GUEST_NETMASK
255
+        gateway $GUEST_GATEWAY
256 256
 EOF
257 257
 
258 258
 # User configuration for the instance
259
-chroot $ROOTFS groupadd libvirtd
259
+chroot $ROOTFS groupadd libvirtd || true
260 260
 chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd
261 261
 cp -pr $TOOLS_DIR/.. $ROOTFS/$DEST/devstack
262 262
 echo "root:$ROOT_PASSWORD" | chroot $ROOTFS chpasswd
... ...
@@ -280,6 +285,15 @@ if [ "$COPYENV" = "1" ]; then
280 280
     cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc
281 281
 fi
282 282
 
283
+# pre-cache uec images
284
+for image_url in ${IMAGE_URLS//,/ }; do
285
+    IMAGE_FNAME=`basename "$image_url"`
286
+    if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then
287
+        wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME
288
+    fi
289
+    cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files
290
+done
291
+
283 292
 # Configure the runner
284 293
 RUN_SH=$ROOTFS/$DEST/run.sh
285 294
 cat > $RUN_SH <<EOF
... ...
@@ -306,7 +320,7 @@ chmod 755 $RUN_SH
306 306
 RC_LOCAL=$ROOTFS/etc/init.d/local
307 307
 cat > $RC_LOCAL <<EOF
308 308
 #!/bin/sh -e
309
-# Reboot if this is our first run to enable console log on natty :(
309
+# Reboot if this is our first run to enable console log on $DIST_NAME :(
310 310
 if [ ! -e /root/firstlaunch ]; then
311 311
     touch /root/firstlaunch
312 312
     reboot -f
... ...
@@ -372,6 +386,10 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
372 372
     done
373 373
 
374 374
     kill $TAIL_PID
375
+
376
+    if grep -q "stack.sh failed" $VM_DIR/console.log; then
377
+        exit 1
378
+    fi
375 379
     echo ""
376 380
     echo "Finished - Zip-a-dee Doo-dah!"
377 381
 fi
... ...
@@ -27,18 +27,19 @@ CWD=`pwd`
27 27
 
28 28
 # Configurable params
29 29
 BRIDGE=${BRIDGE:-br0}
30
-CONTAINER=${CONTAINER:-STACK}
31
-CONTAINER_IP=${CONTAINER_IP:-192.168.1.50}
32
-CONTAINER_CIDR=${CONTAINER_CIDR:-$CONTAINER_IP/24}
33
-CONTAINER_NETMASK=${CONTAINER_NETMASK:-255.255.255.0}
34
-CONTAINER_GATEWAY=${CONTAINER_GATEWAY:-192.168.1.1}
30
+GUEST_NAME=${GUEST_NAME:-STACK}
31
+GUEST_IP=${GUEST_IP:-192.168.1.50}
32
+GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
33
+GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
34
+GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1}
35 35
 NAMESERVER=${NAMESERVER:-`cat /etc/resolv.conf | grep nameserver | head -1 | cut -d " " -f2`}
36 36
 COPYENV=${COPYENV:-1}
37 37
 DEST=${DEST:-/opt/stack}
38 38
 WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
39 39
 
40 40
 # Param string to pass to stack.sh.  Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
41
-STACKSH_PARAMS=${STACKSH_PARAMS:-}
41
+# By default, n-vol is disabled for lxc, as iscsitarget doesn't work properly in lxc
42
+STACKSH_PARAMS=${STACKSH_PARAMS:-"ENABLED_SERVICES=g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,dash,mysql,rabbit"}
42 43
 
43 44
 # Option to use the version of devstack on which we are currently working
44 45
 USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
... ...
@@ -59,22 +60,22 @@ if ! which cgdelete | grep -q cgdelete; then
59 59
 fi
60 60
 
61 61
 # Create lxc configuration
62
-LXC_CONF=/tmp/$CONTAINER.conf
62
+LXC_CONF=/tmp/$GUEST_NAME.conf
63 63
 cat > $LXC_CONF <<EOF
64 64
 lxc.network.type = veth
65 65
 lxc.network.link = $BRIDGE
66 66
 lxc.network.flags = up
67
-lxc.network.ipv4 = $CONTAINER_CIDR
67
+lxc.network.ipv4 = $GUEST_CIDR
68 68
 # allow tap/tun devices
69 69
 lxc.cgroup.devices.allow = c 10:200 rwm
70 70
 EOF
71 71
 
72 72
 # Shutdown any existing container
73
-lxc-stop -n $CONTAINER
73
+lxc-stop -n $GUEST_NAME
74 74
 
75 75
 # This kills zombie containers
76
-if [ -d /cgroup/$CONTAINER ]; then
77
-    cgdelete -r cpu,net_cls:$CONTAINER
76
+if [ -d /cgroup/$GUEST_NAME ]; then
77
+    cgdelete -r cpu,net_cls:$GUEST_NAME
78 78
 fi
79 79
 
80 80
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
... ...
@@ -94,9 +95,9 @@ function git_clone {
94 94
 # Helper to create the container
95 95
 function create_lxc {
96 96
     if [ "natty" = "$UBUNTU_VERSION" ]; then
97
-        lxc-create -n $CONTAINER -t natty -f $LXC_CONF
97
+        lxc-create -n $GUEST_NAME -t natty -f $LXC_CONF
98 98
     else
99
-        lxc-create -n $CONTAINER -t ubuntu -f $LXC_CONF
99
+        lxc-create -n $GUEST_NAME -t ubuntu -f $LXC_CONF
100 100
     fi
101 101
 }
102 102
 
... ...
@@ -116,7 +117,7 @@ fi
116 116
 if [ ! -f $CACHEDIR/bootstrapped ]; then
117 117
     # by deleting the container, we force lxc-create to re-bootstrap (lxc is
118 118
     # lazy and doesn't do anything if a container already exists)
119
-    lxc-destroy -n $CONTAINER
119
+    lxc-destroy -n $GUEST_NAME
120 120
     # trigger the initial debootstrap
121 121
     create_lxc
122 122
     touch $CACHEDIR/bootstrapped
... ...
@@ -152,7 +153,7 @@ if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
152 152
 fi
153 153
 
154 154
 # Destroy the old container
155
-lxc-destroy -n $CONTAINER
155
+lxc-destroy -n $GUEST_NAME
156 156
 
157 157
 # If this call is to TERMINATE the container then exit
158 158
 if [ "$TERMINATE" = "1" ]; then
... ...
@@ -163,7 +164,7 @@ fi
163 163
 create_lxc
164 164
 
165 165
 # Specify where our container rootfs lives
166
-ROOTFS=/var/lib/lxc/$CONTAINER/rootfs/
166
+ROOTFS=/var/lib/lxc/$GUEST_NAME/rootfs/
167 167
 
168 168
 # Create a stack user that is a member of the libvirtd group so that stack
169 169
 # is able to interact with libvirt.
... ...
@@ -213,9 +214,9 @@ iface lo inet loopback
213 213
 
214 214
 auto eth0
215 215
 iface eth0 inet static
216
-        address $CONTAINER_IP
217
-        netmask $CONTAINER_NETMASK
218
-        gateway $CONTAINER_GATEWAY
216
+        address $GUEST_IP
217
+        netmask $GUEST_NETMASK
218
+        gateway $GUEST_GATEWAY
219 219
 EOF
220 220
 
221 221
 # Configure the runner
... ...
@@ -226,7 +227,7 @@ cat > $RUN_SH <<EOF
226 226
 echo "nameserver $NAMESERVER" | sudo resolvconf -a eth0
227 227
 # Make there is a default route - needed for natty
228 228
 if ! route | grep -q default; then
229
-    sudo ip route add default via $CONTAINER_GATEWAY
229
+    sudo ip route add default via $GUEST_GATEWAY
230 230
 fi
231 231
 sleep 1
232 232
 
... ...
@@ -264,7 +265,7 @@ if ! mount | grep -q cgroup; then
264 264
 fi
265 265
 
266 266
 # Start our container
267
-lxc-start -d -n $CONTAINER
267
+lxc-start -d -n $GUEST_NAME
268 268
 
269 269
 if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
270 270
     # Done creating the container, let's tail the log
... ...
@@ -300,6 +301,11 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
300 300
     done
301 301
 
302 302
     kill $TAIL_PID
303
+
304
+    if grep -q "stack.sh failed" $ROOTFS/$DEST/run.sh.log; then
305
+        exit 1
306
+    fi
307
+
303 308
     echo ""
304 309
     echo "Finished - Zip-a-dee Doo-dah!"
305 310
 fi
... ...
@@ -18,7 +18,7 @@ COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_
18 18
 # Helper to launch containers
19 19
 function run_lxc {
20 20
     # For some reason container names with periods can cause issues :/
21
-    CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_NETMASK=$NETMASK CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
21
+    GUEST_NAME=$1 GUEST_IP=$2 GUEST_NETMASK=$NETMASK GUEST_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
22 22
 }
23 23
 
24 24
 # Launch the head node - headnode uses a non-ip domain name,
... ...
@@ -99,11 +99,10 @@ git_clone $DASH_REPO $DEST/dash $DASH_BRANCH
99 99
 git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
100 100
 git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
101 101
 
102
-# Use this version of devstack?
103
-if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
104
-    rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack
105
-    cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack
106
-fi
102
+# Use this version of devstack
103
+rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack
104
+cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack
105
+chroot $CHROOTCACHE/natty-stack chown -R stack $DEST/devstack
107 106
 
108 107
 # Configure host network for DHCP
109 108
 mkdir -p $CHROOTCACHE/natty-stack/etc/network
110 109
new file mode 100755
... ...
@@ -0,0 +1,159 @@
0
+#!/bin/bash
1
+# get_uec_image.sh - Prepare Ubuntu images in various formats
2
+#
3
+# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw
4
+#
5
+# Required to run as root
6
+
7
+CACHEDIR=${CACHEDIR:-/var/cache/devstack}
8
+FORMAT=${FORMAT:-qcow2}
9
+ROOTSIZE=${ROOTSIZE:-2000}
10
+MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
11
+
12
+usage() {
13
+    echo "Usage: $0 - Prepare Ubuntu images"
14
+    echo ""
15
+    echo "$0 [-f format] [-r rootsize] release imagefile"
16
+    echo ""
17
+    echo "-f format - image format: qcow2 (default), vmdk, vdi, vhd, xen, raw, fs"
18
+    echo "-r size   - root fs size in MB (min 2000MB)"
19
+    echo "release   - Ubuntu release: jaunty - oneric"
20
+    echo "imagefile - output image file"
21
+    exit 1
22
+}
23
+
24
+while getopts f:hmr: c; do
25
+    case $c in
26
+        f)  FORMAT=$OPTARG
27
+            ;;
28
+        h)  usage
29
+            ;;
30
+        m)  MINIMAL=1
31
+            ;;
32
+        r)  ROOTSIZE=$OPTARG
33
+            if $(( ROOTSIZE < 2000 )); then
34
+                echo "root size must be greater than 2000MB"
35
+                exit 1
36
+            fi
37
+            ;;
38
+    esac
39
+done
40
+shift `expr $OPTIND - 1`
41
+
42
+if [ ! "$#" -eq "2" ]; then
43
+    usage
44
+fi
45
+
46
+# Default args
47
+DIST_NAME=$1
48
+IMG_FILE=$2
49
+
50
+case $FORMAT in
51
+    kvm|qcow2)  FORMAT=qcow2
52
+                QFORMAT=qcow2
53
+                ;;
54
+    vmserver|vmdk)
55
+                FORMAT=vmdk
56
+                QFORMAT=vmdk
57
+                ;;
58
+    vbox|vdi)   FORMAT=vdi
59
+                QFORMAT=vdi
60
+                ;;
61
+    vhd|vpc)    FORMAT=vhd
62
+                QFORMAT=vpc
63
+                ;;
64
+    xen)        FORMAT=raw
65
+                QFORMAT=raw
66
+                ;;
67
+    raw)        FORMAT=raw
68
+                QFORMAT=raw
69
+                ;;
70
+    *)          echo "Unknown format: $FORMAT"
71
+                usage
72
+esac
73
+
74
+case $DIST_NAME in
75
+    oneiric)    ;;
76
+    natty)      ;;
77
+    maverick)   ;;
78
+    lucid)      ;;
79
+    karmic)     ;;
80
+    jaunty)     ;;
81
+    *)          echo "Unknown release: $DIST_NAME"
82
+                usage
83
+                ;;
84
+esac
85
+
86
+# Set up nbd
87
+modprobe nbd max_part=63
88
+NBD=${NBD:-/dev/nbd9}
89
+NBD_DEV=`basename $NBD`
90
+
91
+# Prepare the base image
92
+
93
+# Get the UEC image
94
+UEC_NAME=$DIST_NAME-server-cloudimg-amd64
95
+if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then
96
+    (cd $CACHEDIR; wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img)
97
+
98
+
99
+    # Connect to nbd and wait till it is ready
100
+    qemu-nbd -d $NBD
101
+    qemu-nbd -c $NBD $CACHEDIR/$UEC_NAME-disk1.img
102
+    if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
103
+        echo "Couldn't connect $NBD"
104
+        exit 1
105
+    fi
106
+    MNTDIR=`mktemp -d mntXXXXXXXX`
107
+    mount -t ext4 ${NBD}p1 $MNTDIR
108
+
109
+    # Install our required packages
110
+    cp -p files/sources.list $MNTDIR/etc/apt/sources.list
111
+    cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf
112
+    chroot $MNTDIR apt-get update
113
+    chroot $MNTDIR apt-get install -y $MIN_PKGS
114
+    rm -f $MNTDIR/etc/resolv.conf
115
+
116
+    umount $MNTDIR
117
+    rmdir $MNTDIR
118
+    qemu-nbd -d $NBD
119
+fi
120
+
121
+if [ "$FORMAT" = "qcow2" ]; then
122
+    # Just copy image
123
+    cp -p $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE
124
+else
125
+    # Convert image
126
+    qemu-img convert -O $QFORMAT $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE
127
+fi
128
+
129
+# Resize the image if necessary
130
+if [ $ROOTSIZE -gt 2000 ]; then
131
+    # Resize the container
132
+    qemu-img resize $IMG_FILE +$((ROOTSIZE - 2000))M
133
+
134
+    # Connect to nbd and wait till it is ready
135
+    qemu-nbd -c $NBD $IMG_FILE
136
+    if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
137
+        echo "Couldn't connect $NBD"
138
+        exit 1
139
+    fi
140
+
141
+    # Resize partition 1 to full size of the disk image
142
+    echo "d
143
+n
144
+p
145
+1
146
+2
147
+
148
+t
149
+83
150
+a
151
+1
152
+w
153
+" | fdisk $NBD
154
+    fsck -t ext4 -f ${NBD}p1
155
+    resize2fs ${NBD}p1
156
+
157
+    qemu-nbd -d $NBD
158
+fi
... ...
@@ -65,6 +65,13 @@ if [ -n "$IMAGEONLY" ]; then
65 65
     RELEASE="pass"
66 66
 fi
67 67
 
68
+# Make sure that we have the proper version of ubuntu
69
+UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'`
70
+if [ "$UBUNTU_VERSION" = "natty" -a "$RELEASE" = "oneiric" ]; then
71
+    echo "natty installs can't build oneiric images"
72
+    exit 1
73
+fi
74
+
68 75
 case $FORMAT in
69 76
     kvm|qcow2)  FORMAT=qcow2
70 77
                 QFORMAT=qcow2
... ...
@@ -97,6 +104,7 @@ case $FORMAT in
97 97
 esac
98 98
 
99 99
 case $RELEASE in
100
+    oneiric)    ;;
100 101
     natty)      ;;
101 102
     maverick)   ;;
102 103
     lucid)      ;;