Browse code

Merge remote-tracking branch 'origin/master' into jenkins

Anthony Young authored on 2011/11/09 08:24:58
Showing 9 changed files
... ...
@@ -165,7 +165,7 @@ ping -c1 -w1 $IP
165 165
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
166 166
 
167 167
 # FIXME (anthony): make xs support security groups
168
-if [ "$VIRT_DRIVER" != "xenserver"]; then
168
+if [ "$VIRT_DRIVER" != "xenserver" ]; then
169 169
     # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
170 170
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
171 171
         print "Security group failure - ping should not be allowed!"
172 172
new file mode 100644
... ...
@@ -0,0 +1,127 @@
0
+#######
1
+# EC2 #
2
+#######
3
+
4
+[composite:ec2]
5
+use = egg:Paste#urlmap
6
+/: ec2versions
7
+/services/Cloud: ec2cloud
8
+/services/Admin: ec2admin
9
+/latest: ec2metadata
10
+/2007-01-19: ec2metadata
11
+/2007-03-01: ec2metadata
12
+/2007-08-29: ec2metadata
13
+/2007-10-10: ec2metadata
14
+/2007-12-15: ec2metadata
15
+/2008-02-01: ec2metadata
16
+/2008-09-01: ec2metadata
17
+/2009-04-04: ec2metadata
18
+/1.0: ec2metadata
19
+
20
+[pipeline:ec2cloud]
21
+pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
22
+
23
+[pipeline:ec2admin]
24
+pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
25
+
26
+[pipeline:ec2metadata]
27
+pipeline = logrequest ec2md
28
+
29
+[pipeline:ec2versions]
30
+pipeline = logrequest ec2ver
31
+
32
+[filter:logrequest]
33
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
34
+
35
+[filter:ec2lockout]
36
+paste.filter_factory = nova.api.ec2:Lockout.factory
37
+
38
+[filter:totoken]
39
+paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
40
+
41
+[filter:ec2noauth]
42
+paste.filter_factory = nova.api.ec2:NoAuth.factory
43
+
44
+[filter:authenticate]
45
+paste.filter_factory = nova.api.ec2:Authenticate.factory
46
+
47
+[filter:cloudrequest]
48
+controller = nova.api.ec2.cloud.CloudController
49
+paste.filter_factory = nova.api.ec2:Requestify.factory
50
+
51
+[filter:adminrequest]
52
+controller = nova.api.ec2.admin.AdminController
53
+paste.filter_factory = nova.api.ec2:Requestify.factory
54
+
55
+[filter:authorizer]
56
+paste.filter_factory = nova.api.ec2:Authorizer.factory
57
+
58
+[app:ec2executor]
59
+paste.app_factory = nova.api.ec2:Executor.factory
60
+
61
+[app:ec2ver]
62
+paste.app_factory = nova.api.ec2:Versions.factory
63
+
64
+[app:ec2md]
65
+paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
66
+
67
+#############
68
+# Openstack #
69
+#############
70
+
71
+[composite:osapi]
72
+use = egg:Paste#urlmap
73
+/: osversions
74
+/v1.0: openstackapi10
75
+/v1.1: openstackapi11
76
+
77
+[pipeline:openstackapi10]
78
+pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
79
+
80
+[pipeline:openstackapi11]
81
+pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
82
+
83
+[filter:faultwrap]
84
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
85
+
86
+[filter:auth]
87
+paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
88
+
89
+[filter:noauth]
90
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
91
+
92
+[filter:ratelimit]
93
+paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
94
+
95
+[filter:extensions]
96
+paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
97
+
98
+[app:osapiapp10]
99
+paste.app_factory = nova.api.openstack:APIRouterV10.factory
100
+
101
+[app:osapiapp11]
102
+paste.app_factory = nova.api.openstack:APIRouterV11.factory
103
+
104
+[pipeline:osversions]
105
+pipeline = faultwrap osversionapp
106
+
107
+[app:osversionapp]
108
+paste.app_factory = nova.api.openstack.versions:Versions.factory
109
+
110
+##########
111
+# Shared #
112
+##########
113
+
114
+[filter:keystonecontext]
115
+paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
116
+
117
+[filter:authtoken]
118
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
119
+service_protocol = http
120
+service_host = 127.0.0.1
121
+service_port = 5000
122
+auth_host = 127.0.0.1
123
+auth_port = 35357
124
+auth_protocol = http
125
+auth_uri = http://127.0.0.1:5000/
126
+admin_token = %SERVICE_TOKEN%
... ...
@@ -41,7 +41,7 @@ Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \
41 41
                       /usr/bin/socat,                           \
42 42
                       /sbin/parted,                             \
43 43
                       /usr/sbin/dnsmasq,                        \
44
-                      /usr/bin/arping
44
+                      /usr/sbin/arping
45 45
 
46 46
 %USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS
47 47
 
... ...
@@ -103,8 +103,7 @@ if [[ $EUID -eq 0 ]]; then
103 103
 
104 104
     # since this script runs as a normal user, we need to give that user
105 105
     # ability to run sudo
106
-    apt_get update
107
-    apt_get install sudo
106
+    dpkg -l sudo || apt_get update && apt_get install sudo
108 107
 
109 108
     if ! getent passwd stack >/dev/null; then
110 109
         echo "Creating a user called stack"
... ...
@@ -121,7 +120,7 @@ if [[ $EUID -eq 0 ]]; then
121 121
     echo "Copying files to stack user"
122 122
     STACK_DIR="$DEST/${PWD##*/}"
123 123
     cp -r -f "$PWD" "$STACK_DIR"
124
-    chown -R $USER "$STACK_DIR"
124
+    chown -R stack "$STACK_DIR"
125 125
     if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
126 126
         exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
127 127
     else
... ...
@@ -175,6 +174,9 @@ if [ ! -n "$HOST_IP" ]; then
175 175
     HOST_IP=`LC_ALL=C /sbin/ifconfig  | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
176 176
 fi
177 177
 
178
+# Service startup timeout
179
+SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
180
+
178 181
 # Generic helper to configure passwords
179 182
 function read_password {
180 183
     set +o xtrace
... ...
@@ -230,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
230 230
 # Multi-host is a mode where each compute node runs its own network node.  This
231 231
 # allows network operations and routing for a VM to occur on the server that is
232 232
 # running the VM - removing a SPOF and bandwidth bottleneck.
233
-MULTI_HOST=${MULTI_HOST:-0}
233
+MULTI_HOST=${MULTI_HOST:-False}
234 234
 
235 235
 # If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
236 236
 # variable but make sure that the interface doesn't already have an
... ...
@@ -323,7 +325,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
323 323
     # can never change.
324 324
     read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
325 325
 fi
326
-    
326
+
327 327
 # Keystone
328 328
 # --------
329 329
 
... ...
@@ -562,13 +564,12 @@ fi
562 562
 # ----
563 563
 
564 564
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
565
-    # We are going to use the sample http middleware configuration from the
566
-    # keystone project to launch nova.  This paste config adds the configuration
567
-    # required for nova to validate keystone tokens - except we need to switch
568
-    # the config to use our service token instead (instead of the invalid token
569
-    # 999888777666).
570
-    cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
571
-    sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
565
+    # We are going to use a sample http middleware configuration based on the
566
+    # one from the keystone project to launch nova.  This paste config adds
567
+    # the configuration required for nova to validate keystone tokens. We add
568
+    # our own service token to the configuration.
569
+    cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
570
+    sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
572 571
 fi
573 572
 
574 573
 if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
... ...
@@ -650,13 +651,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
650 650
     USER_GROUP=$(id -g)
651 651
     sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
652 652
     sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
653
-    
653
+
654 654
     # We then create a loopback disk and format it to XFS.
655 655
     if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
656 656
         mkdir -p  ${SWIFT_DATA_LOCATION}/drives/images
657 657
         sudo touch  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
658 658
         sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
659
-        
659
+
660 660
         dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
661 661
             bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
662 662
         mkfs.xfs -f -i size=1024  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
... ...
@@ -673,9 +674,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
673 673
     # We then create link to that mounted location so swift would know
674 674
     # where to go.
675 675
     for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
676
-    
676
+
677 677
     # We now have to emulate a few different servers into one we
678
-    # create all the directories needed for swift 
678
+    # create all the directories needed for swift
679 679
     tmpd=""
680 680
     for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
681 681
         ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
... ...
@@ -691,7 +692,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
691 691
    # swift-init has a bug using /etc/swift until bug #885595 is fixed
692 692
    # we have to create a link
693 693
    sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
694
-   
694
+
695 695
    # Swift use rsync to syncronize between all the different
696 696
    # partitions (which make more sense when you have a multi-node
697 697
    # setup) we configure it with our version of rsync.
... ...
@@ -727,7 +728,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
727 727
        local bind_port=$2
728 728
        local log_facility=$3
729 729
        local node_number
730
-       
730
+
731 731
        for node_number in {1..4};do
732 732
            node_path=${SWIFT_DATA_LOCATION}/${node_number}
733 733
            sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
... ...
@@ -754,14 +755,14 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
754 754
 
755 755
    # We then can start rsync.
756 756
    sudo /etc/init.d/rsync restart || :
757
-      
757
+
758 758
    # Create our ring for the object/container/account.
759 759
    /usr/local/bin/swift-remakerings
760 760
 
761 761
    # And now we launch swift-startmain to get our cluster running
762 762
    # ready to be tested.
763 763
    /usr/local/bin/swift-startmain || :
764
-   
764
+
765 765
    unset s swift_hash swift_auth_server tmpd
766 766
 fi
767 767
 
... ...
@@ -828,12 +829,12 @@ add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
828 828
 if [ -n "$INSTANCES_PATH" ]; then
829 829
     add_nova_flag "--instances_path=$INSTANCES_PATH"
830 830
 fi
831
-if [ -n "$MULTI_HOST" ]; then
832
-    add_nova_flag "--multi_host=$MULTI_HOST"
833
-    add_nova_flag "--send_arp_for_ha=1"
831
+if [ "$MULTI_HOST" != "False" ]; then
832
+    add_nova_flag "--multi_host"
833
+    add_nova_flag "--send_arp_for_ha"
834 834
 fi
835 835
 if [ "$SYSLOG" != "False" ]; then
836
-    add_nova_flag "--use_syslog=1"
836
+    add_nova_flag "--use_syslog"
837 837
 fi
838 838
 
839 839
 # XenServer
... ...
@@ -909,6 +910,10 @@ function screen_it {
909 909
     NL=`echo -ne '\015'`
910 910
     if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
911 911
         screen -S stack -X screen -t $1
912
+        # sleep to allow bash to be ready to be send the command - we are
913
+        # creating a new window in screen and then sends characters, so if
914
+        # bash isn't running by the time we send the command, nothing happens
915
+        sleep 1
912 916
         screen -S stack -p $1 -X stuff "$2$NL"
913 917
     fi
914 918
 }
... ...
@@ -926,7 +931,7 @@ fi
926 926
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
927 927
     screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
928 928
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
929
-    if ! timeout 60 sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
929
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
930 930
       echo "g-api did not start"
931 931
       exit 1
932 932
     fi
... ...
@@ -936,7 +941,7 @@ fi
936 936
 if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
937 937
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d"
938 938
     echo "Waiting for keystone to start..."
939
-    if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
939
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
940 940
       echo "keystone did not start"
941 941
       exit 1
942 942
     fi
... ...
@@ -946,7 +951,7 @@ fi
946 946
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
947 947
     screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
948 948
     echo "Waiting for nova-api to start..."
949
-    if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
949
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
950 950
       echo "nova-api did not start"
951 951
       exit 1
952 952
     fi
953 953
new file mode 100755
... ...
@@ -0,0 +1,248 @@
0
+#!/usr/bin/env bash
1
+
2
+# Make sure that we have the proper version of ubuntu (only works on natty/oneiric)
3
+if ! egrep -q "oneiric|natty" /etc/lsb-release; then
4
+    echo "This script only works with ubuntu oneiric and natty"
5
+    exit 1
6
+fi
7
+
8
+# Keep track of the current directory
9
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
10
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
11
+
12
+cd $TOP_DIR
13
+
14
+# Source params
15
+source ./stackrc
16
+
17
+# Ubuntu distro to install
18
+DIST_NAME=${DIST_NAME:-oneiric}
19
+
20
+# Configure how large the VM should be
21
+GUEST_SIZE=${GUEST_SIZE:-10G}
22
+
23
+# exit on error to stop unexpected errors
24
+set -o errexit
25
+set -o xtrace
26
+
27
+# Abort if localrc is not set
28
+if [ ! -e $TOP_DIR/localrc ]; then
29
+    echo "You must have a localrc with ALL necessary passwords defined before proceeding."
30
+    echo "See stack.sh for required passwords."
31
+    exit 1
32
+fi
33
+
34
+# Install deps if needed
35
+DEPS="kvm libvirt-bin kpartx"
36
+dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
37
+
38
+# Where to store files and instances
39
+WORK_DIR=${WORK_DIR:-/opt/kvmstack}
40
+
41
+# Where to store images
42
+image_dir=$WORK_DIR/images/$DIST_NAME
43
+mkdir -p $image_dir
44
+
45
+# Original version of built image
46
+uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
47
+tarball=$image_dir/$(basename $uec_url)
48
+
49
+# download the base uec image if we haven't already
50
+if [ ! -f $tarball ]; then
51
+    curl $uec_url -o $tarball
52
+    (cd $image_dir && tar -Sxvzf $tarball)
53
+    resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
54
+    cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
55
+fi
56
+
57
+
58
+# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
59
+ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
60
+
61
+# Name of our instance, used by libvirt
62
+GUEST_NAME=${GUEST_NAME:-devstack}
63
+
64
+# Mop up after previous runs
65
+virsh destroy $GUEST_NAME || true
66
+
67
+# Where this vm is stored
68
+vm_dir=$WORK_DIR/instances/$GUEST_NAME
69
+
70
+# Create vm dir and remove old disk
71
+mkdir -p $vm_dir
72
+rm -f $vm_dir/disk
73
+
74
+# Create a copy of the base image
75
+qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
76
+
77
+# Back to devstack
78
+cd $TOP_DIR
79
+
80
+GUEST_NETWORK=${GUEST_NETWORK:-1}
81
+GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
82
+GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
83
+GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
84
+GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
85
+GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
86
+GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
87
+GUEST_RAM=${GUEST_RAM:-1524288}
88
+GUEST_CORES=${GUEST_CORES:-1}
89
+
90
+# libvirt.xml configuration
91
+NET_XML=$vm_dir/net.xml
92
+cat > $NET_XML <<EOF
93
+<network>
94
+  <name>devstack-$GUEST_NETWORK</name>
95
+  <bridge name="stackbr%d" />
96
+  <forward/>
97
+  <ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
98
+    <dhcp>
99
+      <range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
100
+    </dhcp>
101
+  </ip>
102
+</network>
103
+EOF
104
+
105
+if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
106
+    virsh net-destroy devstack-$GUEST_NETWORK || true
107
+    # destroying the network isn't enough to delete the leases
108
+    rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
109
+    virsh net-create $vm_dir/net.xml
110
+fi
111
+
112
+# libvirt.xml configuration
113
+LIBVIRT_XML=$vm_dir/libvirt.xml
114
+cat > $LIBVIRT_XML <<EOF
115
+<domain type='kvm'>
116
+  <name>$GUEST_NAME</name>
117
+  <memory>$GUEST_RAM</memory>
118
+  <os>
119
+    <type>hvm</type>
120
+    <kernel>$image_dir/kernel</kernel>
121
+    <cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
122
+  </os>
123
+  <features>
124
+    <acpi/>
125
+  </features>
126
+  <clock offset='utc'/>
127
+  <vcpu>$GUEST_CORES</vcpu>
128
+  <devices>
129
+    <disk type='file'>
130
+      <driver type='qcow2'/>
131
+      <source file='$vm_dir/disk'/>
132
+      <target dev='vda' bus='virtio'/>
133
+    </disk>
134
+
135
+    <interface type='network'>
136
+      <source network='devstack-$GUEST_NETWORK'/>
137
+    </interface>
138
+        
139
+    <!-- The order is significant here.  File must be defined first -->
140
+    <serial type="file">
141
+      <source path='$vm_dir/console.log'/>
142
+      <target port='1'/>
143
+    </serial>
144
+
145
+    <console type='pty' tty='/dev/pts/2'>
146
+      <source path='/dev/pts/2'/>
147
+      <target port='0'/>
148
+    </console>
149
+
150
+    <serial type='pty'>
151
+      <source path='/dev/pts/2'/>
152
+      <target port='0'/>
153
+    </serial>
154
+
155
+    <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
156
+  </devices>
157
+</domain>
158
+EOF
159
+
160
+
161
+rm -rf $vm_dir/uec
162
+cp -r $TOOLS_DIR/uec $vm_dir/uec
163
+
164
+# set metadata
165
+cat > $vm_dir/uec/meta-data<<EOF
166
+hostname: $GUEST_NAME
167
+instance-id: i-hop
168
+instance-type: m1.ignore
169
+local-hostname: $GUEST_NAME.local
170
+EOF
171
+
172
+# set metadata
173
+cat > $vm_dir/uec/user-data<<EOF
174
+#!/bin/bash
175
+# hostname needs to resolve for rabbit
176
+sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
177
+apt-get update
178
+apt-get install git sudo -y
179
+git clone https://github.com/cloudbuilders/devstack.git
180
+cd devstack
181
+git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
182
+git fetch
183
+git checkout `git rev-parse HEAD`
184
+cat > localrc <<LOCAL_EOF
185
+ROOTSLEEP=0
186
+`cat $TOP_DIR/localrc`
187
+LOCAL_EOF
188
+./stack.sh
189
+EOF
190
+
191
+# (re)start a metadata service
192
+(
193
+  pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
194
+  [ -z "$pid" ] || kill -9 $pid
195
+)
196
+cd $vm_dir/uec
197
+python meta.py 192.168.$GUEST_NETWORK.1:4567 &
198
+
199
+# Create the instance
200
+virsh create $vm_dir/libvirt.xml
201
+
202
+# Tail the console log till we are done
203
+WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
204
+if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
205
+    set +o xtrace
206
+    # Done creating the container, let's tail the log
207
+    echo
208
+    echo "============================================================="
209
+    echo "                          -- YAY! --"
210
+    echo "============================================================="
211
+    echo
212
+    echo "We're done launching the vm, about to start tailing the"
213
+    echo "stack.sh log. It will take a second or two to start."
214
+    echo
215
+    echo "Just CTRL-C at any time to stop tailing."
216
+
217
+    while [ ! -e "$vm_dir/console.log" ]; do
218
+      sleep 1
219
+    done
220
+
221
+    tail -F $vm_dir/console.log &
222
+
223
+    TAIL_PID=$!
224
+
225
+    function kill_tail() {
226
+        kill $TAIL_PID
227
+        exit 1
228
+    }
229
+
230
+    # Let Ctrl-c kill tail and exit
231
+    trap kill_tail SIGINT
232
+
233
+    echo "Waiting stack.sh to finish..."
234
+    while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
235
+        sleep 1
236
+    done
237
+
238
+    set -o xtrace
239
+
240
+    kill $TAIL_PID
241
+
242
+    if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
243
+        exit 1
244
+    fi
245
+    echo ""
246
+    echo "Finished - Zip-a-dee Doo-dah!"
247
+fi
... ...
@@ -14,6 +14,9 @@ MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
14 14
 TOOLS_DIR=$(cd $(dirname "$0") && pwd)
15 15
 TOP_DIR=`cd $TOOLS_DIR/..; pwd`
16 16
 
17
+# exit on error to stop unexpected errors
18
+set -o errexit
19
+
17 20
 usage() {
18 21
     echo "Usage: $0 - Prepare Ubuntu images"
19 22
     echo ""
... ...
@@ -44,6 +47,14 @@ cleanup() {
44 44
     trap 2; kill -2 $$
45 45
 }
46 46
 
47
+# apt-get wrapper to just get arguments set correctly
48
+function apt_get() {
49
+    local sudo="sudo"
50
+    [ "$(id -u)" = "0" ] && sudo="env"
51
+    $sudo DEBIAN_FRONTEND=noninteractive apt-get \
52
+        --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
53
+}
54
+
47 55
 while getopts f:hmr: c; do
48 56
     case $c in
49 57
         f)  FORMAT=$OPTARG
... ...
@@ -107,7 +118,14 @@ case $DIST_NAME in
107 107
                 ;;
108 108
 esac
109 109
 
110
-trap cleanup SIGHUP SIGINT SIGTERM
110
+trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT
111
+
112
+# Check for dependencies
113
+
114
+if [ ! -x "`which qemu-img`" -o ! -x "`which qemu-nbd`" ]; then
115
+    # Missing KVM?
116
+    apt_get install qemu-kvm
117
+fi
111 118
 
112 119
 # Prepare the base image
113 120
 
114 121
new file mode 100644
... ...
@@ -0,0 +1,29 @@
0
+import sys
1
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
2
+from SimpleHTTPServer import SimpleHTTPRequestHandler
3
+
4
+def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
5
+         ServerClass = HTTPServer, protocol="HTTP/1.0"):
6
+    """simple http server that listens on a give address:port"""
7
+
8
+    server_address = (host, port)
9
+
10
+    HandlerClass.protocol_version = protocol
11
+    httpd = ServerClass(server_address, HandlerClass)
12
+
13
+    sa = httpd.socket.getsockname()
14
+    print "Serving HTTP on", sa[0], "port", sa[1], "..."
15
+    httpd.serve_forever()
16
+
17
+if __name__ == '__main__':
18
+    if sys.argv[1:]:
19
+        address = sys.argv[1]
20
+    else:
21
+        address = '0.0.0.0'
22
+    if ':' in address:
23
+        host, port = address.split(':')
24
+    else:
25
+        host = address
26
+        port = 8080
27
+
28
+    main(host, int(port))
... ...
@@ -226,16 +226,21 @@ mkdir -p /boot/guest
226 226
 SR_UUID=`xe sr-list --minimal name-label="Local storage"`
227 227
 xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage
228 228
 
229
-# Uninstall previous runs
230
-xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
231
-
232
-# Destroy any instances that were launched
233
-for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
234
-    echo "Shutting down nova instance $uuid"
235
-    xe vm-unpause uuid=$uuid || true
236
-    xe vm-shutdown uuid=$uuid
237
-    xe vm-destroy uuid=$uuid
238
-done
229
+
230
+# Shutdown previous runs
231
+DO_SHUTDOWN=${DO_SHUTDOWN:-1}
232
+if [ "$DO_SHUTDOWN" = "1" ]; then
233
+    # Shutdown all domU's that created previously
234
+    xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
235
+
236
+    # Destroy any instances that were launched
237
+    for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
238
+        echo "Shutting down nova instance $uuid"
239
+        xe vm-unpause uuid=$uuid || true
240
+        xe vm-shutdown uuid=$uuid
241
+        xe vm-destroy uuid=$uuid
242
+    done
243
+fi
239 244
 
240 245
 # Path to head xva.  By default keep overwriting the same one to save space
241 246
 USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0}
242 247
new file mode 100755
... ...
@@ -0,0 +1,35 @@
0
+#!/usr/bin/env bash
1
+
2
+# Echo commands
3
+set -o xtrace
4
+
5
+# Head node host, which runs glance, api, keystone
6
+HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57}
7
+HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57}
8
+
9
+COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58}
10
+COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58}
11
+
12
+# Networking params
13
+FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
14
+
15
+# Variables common amongst all hosts in the cluster
16
+COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE"
17
+
18
+# Helper to launch containers
19
+function build_domU {
20
+    GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh
21
+}
22
+
23
+# Launch the head node - headnode uses a non-ip domain name,
24
+# because rabbit won't launch with an ip addr hostname :(
25
+build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
26
+
27
+# Wait till the head node is up
28
+while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
29
+    echo "Waiting for head node ($HEAD_PUB_IP) to start..."
30
+    sleep 5
31
+done
32
+
33
+# Build the HA compute host
34
+build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"