Browse code

Merge branch 'master' of https://github.com/cloudbuilders/devstack into test

James E. Blair authored on 2011/11/11 06:06:11
Showing 15 changed files
... ...
@@ -1,214 +1,46 @@
1 1
 #!/usr/bin/env bash
2 2
 
3
-# **exercise.sh** - using the cloud can be fun
4
-
5
-# we will use the ``nova`` cli tool provided by the ``python-novaclient``
6
-# package
7
-#
8
-
9
-
10
-# This script exits on an error so that errors don't compound and you see
11
-# only the first error that occured.
12
-set -o errexit
13
-
14
-# Print the commands being run so that we can see the command that triggers
15
-# an error.  It is also useful for following allowing as the install occurs.
16
-set -o xtrace
17
-
18
-
19
-# Settings
20
-# ========
21
-
22
-# Use openrc + stackrc + localrc for settings
23
-source ./openrc
24
-
25
-# Get a token for clients that don't support service catalog
26
-# ==========================================================
27
-
28
-# manually create a token by querying keystone (sending JSON data).  Keystone
29
-# returns a token and catalog of endpoints.  We use python to parse the token
30
-# and save it.
31
-
32
-TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
33
-
34
-# Launching a server
35
-# ==================
36
-
37
-# List servers for tenant:
38
-nova list
39
-
40
-# Images
41
-# ------
42
-
43
-# Nova has a **deprecated** way of listing images.
44
-nova image-list
45
-
46
-# But we recommend using glance directly
47
-glance -A $TOKEN index
48
-
49
-# Let's grab the id of the first AMI image to launch
50
-IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
51
-
52
-# Security Groups
53
-# ---------------
54
-SECGROUP=test_secgroup
55
-
56
-# List of secgroups:
57
-nova secgroup-list
58
-
59
-# Create a secgroup
60
-nova secgroup-create $SECGROUP "test_secgroup description"
61
-
62
-# determine flavor
63
-# ----------------
64
-
65
-# List of flavors:
66
-nova flavor-list
67
-
68
-# and grab the first flavor in the list to launch
69
-FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
70
-
71
-NAME="myserver"
72
-
73
-nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
74
-
75
-# Testing
76
-# =======
77
-
78
-# First check if it spins up (becomes active and responds to ping on
79
-# internal ip).  If you run this script from a nova node, you should
80
-# bypass security groups and have direct access to the server.
81
-
82
-# Waiting for boot
83
-# ----------------
84
-
85
-# Max time to wait while vm goes from build to active state
86
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
87
-
88
-# Max time till the vm is bootable
89
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
90
-
91
-# Max time to wait for proper association and dis-association.
92
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
93
-
94
-# check that the status is active within ACTIVE_TIMEOUT seconds
95
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
96
-    echo "server didn't become active!"
97
-    exit 1
98
-fi
99
-
100
-# get the IP of the server
101
-IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
102
-
103
-# for single node deployments, we can ping private ips
104
-MULTI_HOST=${MULTI_HOST:-0}
105
-if [ "$MULTI_HOST" = "0" ]; then
106
-    # sometimes the first ping fails (10 seconds isn't enough time for the VM's
107
-    # network to respond?), so let's ping for a default of 15 seconds with a
108
-    # timeout of a second for each ping.
109
-    if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
110
-        echo "Couldn't ping server"
111
-        exit 1
3
+# Run everything in the exercises/ directory that isn't explicitly disabled
4
+
5
+# comma separated list of script basenames to skip
6
+# to refrain from exercising euca.sh use SKIP_EXERCISES=euca
7
+SKIP_EXERCISES=${SKIP_EXERCISES:-""}
8
+
9
+# Locate the scripts we should run
10
+EXERCISE_DIR=$(dirname "$0")/exercises
11
+basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
12
+
13
+# Track the state of each script
14
+passes=""
15
+failures=""
16
+skips=""
17
+
18
+# Loop over each possible script (by basename)
19
+for script in $basenames; do
20
+    if [[ "$SKIP_EXERCISES" =~ $script ]] ; then
21
+        skips="$skips $script"
22
+    else
23
+        echo =========================
24
+        echo Running $script
25
+        echo =========================
26
+        $EXERCISE_DIR/$script.sh
27
+        if [[ $? -ne 0 ]] ; then
28
+            failures="$failures $script"
29
+        else
30
+            passes="$passes $script"
31
+        fi
112 32
     fi
113
-else
114
-    # On a multi-host system, without vm net access, do a sleep to wait for the boot
115
-    sleep $BOOT_TIMEOUT
116
-fi
117
-
118
-# Security Groups & Floating IPs
119
-# ------------------------------
120
-
121
-# allow icmp traffic (ping)
122
-nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
123
-
124
-# List rules for a secgroup
125
-nova secgroup-list-rules $SECGROUP
126
-
127
-# allocate a floating ip
128
-nova floating-ip-create
129
-
130
-# store  floating address
131
-FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
132
-
133
-# add floating ip to our server
134
-nova add-floating-ip $NAME $FLOATING_IP
135
-
136
-# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
137
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
138
-    echo "Couldn't ping server with floating ip"
139
-    exit 1
140
-fi
141
-
142
-# pause the VM and verify we can't ping it anymore
143
-nova pause $NAME
144
-
145
-sleep 2
146
-
147
-if ( ping -c1 -w1 $IP); then
148
-    echo "Pause failure - ping shouldn't work"
149
-    exit 1
150
-fi
151
-
152
-if ( ping -c1 -w1 $FLOATING_IP); then
153
-    echo "Pause failure - ping floating ips shouldn't work"
154
-    exit 1
155
-fi
156
-
157
-# unpause the VM and verify we can ping it again
158
-nova unpause $NAME
159
-
160
-sleep 2
161
-
162
-ping -c1 -w1 $IP
163
-
164
-# dis-allow icmp traffic (ping)
165
-nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
166
-
167
-# FIXME (anthony): make xs support security groups
168
-if [ "$VIRT_DRIVER" != "xenserver" ]; then
169
-    # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
170
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
171
-        print "Security group failure - ping should not be allowed!"
172
-        echo "Couldn't ping server with floating ip"
173
-        exit 1
174
-    fi
175
-fi
176
-
177
-# de-allocate the floating ip
178
-nova floating-ip-delete $FLOATING_IP
179
-
180
-# shutdown the server
181
-nova delete $NAME
182
-
183
-# Delete a secgroup
184
-nova secgroup-delete $SECGROUP
185
-
186
-# FIXME: validate shutdown within 5 seconds
187
-# (nova show $NAME returns 1 or status != ACTIVE)?
188
-
189
-# Testing Euca2ools
190
-# ==================
191
-
192
-# make sure that we can describe instances
193
-euca-describe-instances
194
-
195
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
196
-    # Testing Swift
197
-    # =============
198
-
199
-    # Check if we have to swift via keystone
200
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
201
-
202
-    # We start by creating a test container
203
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
204
-
205
-    # add some files into it.
206
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
207
-
208
-    # list them
209
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer 
210
-
211
-    # And we may want to delete them now that we have tested that
212
-    # everything works.
213
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
214
-fi
33
+done
34
+
35
+# output status of exercise run
36
+echo =========================
37
+echo =========================
38
+for script in $skips; do
39
+    echo SKIP $script
40
+done
41
+for script in $passes; do
42
+    echo PASS $script
43
+done
44
+for script in $failures; do
45
+    echo FAILED $script
46
+done
215 47
new file mode 100755
... ...
@@ -0,0 +1,36 @@
0
+#!/usr/bin/env bash
1
+
2
+# we will use the ``euca2ools`` cli tool that wraps the python boto 
3
+# library to test ec2 compatibility
4
+#
5
+
6
+# This script exits on an error so that errors don't compound and you see
7
+# only the first error that occured.
8
+set -o errexit
9
+
10
+# Print the commands being run so that we can see the command that triggers
11
+# an error.  It is also useful for following allowing as the install occurs.
12
+set -o xtrace
13
+
14
+
15
+# Settings
16
+# ========
17
+
18
+# Use openrc + stackrc + localrc for settings
19
+pushd $(cd $(dirname "$0")/.. && pwd)
20
+source ./openrc
21
+popd
22
+
23
+# find a machine image to boot
24
+IMAGE=`euca-describe-images | grep machine | cut -f2`
25
+
26
+# launch it
27
+INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2`
28
+
29
+# assure it has booted within a reasonable time
30
+if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
31
+    echo "server didn't become active within $RUNNING_TIMEOUT seconds"
32
+    exit 1
33
+fi
34
+
35
+euca-terminate-instances $INSTANCE
0 36
new file mode 100755
... ...
@@ -0,0 +1,190 @@
0
+#!/usr/bin/env bash
1
+
2
+# **exercise.sh** - using the cloud can be fun
3
+
4
+# we will use the ``nova`` cli tool provided by the ``python-novaclient``
5
+# package
6
+#
7
+
8
+
9
+# This script exits on an error so that errors don't compound and you see
10
+# only the first error that occured.
11
+set -o errexit
12
+
13
+# Print the commands being run so that we can see the command that triggers
14
+# an error.  It is also useful for following allowing as the install occurs.
15
+set -o xtrace
16
+
17
+
18
+# Settings
19
+# ========
20
+
21
+# Use openrc + stackrc + localrc for settings
22
+pushd $(cd $(dirname "$0")/.. && pwd)
23
+source ./openrc
24
+popd
25
+
26
+# Get a token for clients that don't support service catalog
27
+# ==========================================================
28
+
29
+# manually create a token by querying keystone (sending JSON data).  Keystone
30
+# returns a token and catalog of endpoints.  We use python to parse the token
31
+# and save it.
32
+
33
+TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
34
+
35
+# Launching a server
36
+# ==================
37
+
38
+# List servers for tenant:
39
+nova list
40
+
41
+# Images
42
+# ------
43
+
44
+# Nova has a **deprecated** way of listing images.
45
+nova image-list
46
+
47
+# But we recommend using glance directly
48
+glance -A $TOKEN index
49
+
50
+# Let's grab the id of the first AMI image to launch
51
+IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
52
+
53
+# Security Groups
54
+# ---------------
55
+SECGROUP=test_secgroup
56
+
57
+# List of secgroups:
58
+nova secgroup-list
59
+
60
+# Create a secgroup
61
+nova secgroup-create $SECGROUP "test_secgroup description"
62
+
63
+# determine flavor
64
+# ----------------
65
+
66
+# List of flavors:
67
+nova flavor-list
68
+
69
+# and grab the first flavor in the list to launch
70
+FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
71
+
72
+NAME="myserver"
73
+
74
+nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
75
+
76
+# Testing
77
+# =======
78
+
79
+# First check if it spins up (becomes active and responds to ping on
80
+# internal ip).  If you run this script from a nova node, you should
81
+# bypass security groups and have direct access to the server.
82
+
83
+# Waiting for boot
84
+# ----------------
85
+
86
+# Max time to wait while vm goes from build to active state
87
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
88
+
89
+# Max time till the vm is bootable
90
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
91
+
92
+# Max time to wait for proper association and dis-association.
93
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
94
+
95
+# check that the status is active within ACTIVE_TIMEOUT seconds
96
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
97
+    echo "server didn't become active!"
98
+    exit 1
99
+fi
100
+
101
+# get the IP of the server
102
+IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
103
+
104
+# for single node deployments, we can ping private ips
105
+MULTI_HOST=${MULTI_HOST:-0}
106
+if [ "$MULTI_HOST" = "0" ]; then
107
+    # sometimes the first ping fails (10 seconds isn't enough time for the VM's
108
+    # network to respond?), so let's ping for a default of 15 seconds with a
109
+    # timeout of a second for each ping.
110
+    if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
111
+        echo "Couldn't ping server"
112
+        exit 1
113
+    fi
114
+else
115
+    # On a multi-host system, without vm net access, do a sleep to wait for the boot
116
+    sleep $BOOT_TIMEOUT
117
+fi
118
+
119
+# Security Groups & Floating IPs
120
+# ------------------------------
121
+
122
+# allow icmp traffic (ping)
123
+nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
124
+
125
+# List rules for a secgroup
126
+nova secgroup-list-rules $SECGROUP
127
+
128
+# allocate a floating ip
129
+nova floating-ip-create
130
+
131
+# store  floating address
132
+FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
133
+
134
+# add floating ip to our server
135
+nova add-floating-ip $NAME $FLOATING_IP
136
+
137
+# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
138
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
139
+    echo "Couldn't ping server with floating ip"
140
+    exit 1
141
+fi
142
+
143
+# pause the VM and verify we can't ping it anymore
144
+nova pause $NAME
145
+
146
+sleep 2
147
+
148
+if ( ping -c1 -w1 $IP); then
149
+    echo "Pause failure - ping shouldn't work"
150
+    exit 1
151
+fi
152
+
153
+if ( ping -c1 -w1 $FLOATING_IP); then
154
+    echo "Pause failure - ping floating ips shouldn't work"
155
+    exit 1
156
+fi
157
+
158
+# unpause the VM and verify we can ping it again
159
+nova unpause $NAME
160
+
161
+sleep 2
162
+
163
+ping -c1 -w1 $IP
164
+
165
+# dis-allow icmp traffic (ping)
166
+nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
167
+
168
+# FIXME (anthony): make xs support security groups
169
+if [ "$VIRT_DRIVER" != "xenserver" ]; then
170
+    # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
171
+    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
172
+        print "Security group failure - ping should not be allowed!"
173
+        echo "Couldn't ping server with floating ip"
174
+        exit 1
175
+    fi
176
+fi
177
+
178
+# de-allocate the floating ip
179
+nova floating-ip-delete $FLOATING_IP
180
+
181
+# shutdown the server
182
+nova delete $NAME
183
+
184
+# Delete a secgroup
185
+nova secgroup-delete $SECGROUP
186
+
187
+# FIXME: validate shutdown within 5 seconds
188
+# (nova show $NAME returns 1 or status != ACTIVE)?
189
+
0 190
new file mode 100755
... ...
@@ -0,0 +1,40 @@
0
+#!/usr/bin/env bash
1
+
2
+# Test swift via the command line tools that ship with it.
3
+
4
+# This script exits on an error so that errors don't compound and you see
5
+# only the first error that occured.
6
+set -o errexit
7
+
8
+# Print the commands being run so that we can see the command that triggers
9
+# an error.  It is also useful for following allowing as the install occurs.
10
+set -o xtrace
11
+
12
+
13
+# Settings
14
+# ========
15
+
16
+# Use openrc + stackrc + localrc for settings
17
+pushd $(cd $(dirname "$0")/.. && pwd)
18
+source ./openrc
19
+popd
20
+
21
+
22
+# Testing Swift
23
+# =============
24
+
25
+# Check if we have to swift via keystone
26
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
27
+
28
+# We start by creating a test container
29
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
30
+
31
+# add some files into it.
32
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
33
+
34
+# list them
35
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
36
+
37
+# And we may want to delete them now that we have tested that
38
+# everything works.
39
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
... ...
@@ -1,4 +1,5 @@
1 1
 dnsmasq-base
2
+dnsmasq-utils # for dhcp_release
2 3
 kpartx
3 4
 parted
4 5
 arping # used for send_arp_for_ha option in nova-network
5 6
deleted file mode 100644
... ...
@@ -1,18 +0,0 @@
1
-# a collection of packages that speed up installation as they are dependencies
2
-# of packages we can't install during bootstraping (rabbitmq-server, 
3
-# mysql-server, libvirt-bin)
4
-#
5
-# NOTE: only add packages to this file that aren't needed directly
6
-mysql-common
7
-mysql-client-5.1
8
-erlang-base
9
-erlang-ssl 
10
-erlang-nox
11
-erlang-inets
12
-erlang-mnesia
13
-libhtml-template-perl
14
-gettext-base
15
-libavahi-client3
16
-libxml2-utils
17
-libpciaccess0
18
-libparted0debian1
... ...
@@ -24,7 +24,7 @@ registry_port = 9191
24 24
 
25 25
 # Log to this file. Make sure you do not set the same log
26 26
 # file for both the API and registry servers!
27
-log_file = %DEST%/glance/api.log
27
+#log_file = %DEST%/glance/api.log
28 28
 
29 29
 # Send logs to syslog (/dev/log) instead of to file specified by `log_file`
30 30
 use_syslog = %SYSLOG%
... ...
@@ -13,7 +13,7 @@ bind_port = 9191
13 13
 
14 14
 # Log to this file. Make sure you do not set the same log
15 15
 # file for both the API and registry servers!
16
-log_file = %DEST%/glance/registry.log
16
+#log_file = %DEST%/glance/registry.log
17 17
 
18 18
 # Where to store images
19 19
 filesystem_store_datadir = %DEST%/glance/images
20 20
new file mode 100644
... ...
@@ -0,0 +1,127 @@
0
+#######
1
+# EC2 #
2
+#######
3
+
4
+[composite:ec2]
5
+use = egg:Paste#urlmap
6
+/: ec2versions
7
+/services/Cloud: ec2cloud
8
+/services/Admin: ec2admin
9
+/latest: ec2metadata
10
+/2007-01-19: ec2metadata
11
+/2007-03-01: ec2metadata
12
+/2007-08-29: ec2metadata
13
+/2007-10-10: ec2metadata
14
+/2007-12-15: ec2metadata
15
+/2008-02-01: ec2metadata
16
+/2008-09-01: ec2metadata
17
+/2009-04-04: ec2metadata
18
+/1.0: ec2metadata
19
+
20
+[pipeline:ec2cloud]
21
+pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
22
+
23
+[pipeline:ec2admin]
24
+pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
25
+
26
+[pipeline:ec2metadata]
27
+pipeline = logrequest ec2md
28
+
29
+[pipeline:ec2versions]
30
+pipeline = logrequest ec2ver
31
+
32
+[filter:logrequest]
33
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
34
+
35
+[filter:ec2lockout]
36
+paste.filter_factory = nova.api.ec2:Lockout.factory
37
+
38
+[filter:totoken]
39
+paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
40
+
41
+[filter:ec2noauth]
42
+paste.filter_factory = nova.api.ec2:NoAuth.factory
43
+
44
+[filter:authenticate]
45
+paste.filter_factory = nova.api.ec2:Authenticate.factory
46
+
47
+[filter:cloudrequest]
48
+controller = nova.api.ec2.cloud.CloudController
49
+paste.filter_factory = nova.api.ec2:Requestify.factory
50
+
51
+[filter:adminrequest]
52
+controller = nova.api.ec2.admin.AdminController
53
+paste.filter_factory = nova.api.ec2:Requestify.factory
54
+
55
+[filter:authorizer]
56
+paste.filter_factory = nova.api.ec2:Authorizer.factory
57
+
58
+[app:ec2executor]
59
+paste.app_factory = nova.api.ec2:Executor.factory
60
+
61
+[app:ec2ver]
62
+paste.app_factory = nova.api.ec2:Versions.factory
63
+
64
+[app:ec2md]
65
+paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
66
+
67
+#############
68
+# Openstack #
69
+#############
70
+
71
+[composite:osapi]
72
+use = egg:Paste#urlmap
73
+/: osversions
74
+/v1.0: openstackapi10
75
+/v1.1: openstackapi11
76
+
77
+[pipeline:openstackapi10]
78
+pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
79
+
80
+[pipeline:openstackapi11]
81
+pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
82
+
83
+[filter:faultwrap]
84
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
85
+
86
+[filter:auth]
87
+paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
88
+
89
+[filter:noauth]
90
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
91
+
92
+[filter:ratelimit]
93
+paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
94
+
95
+[filter:extensions]
96
+paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
97
+
98
+[app:osapiapp10]
99
+paste.app_factory = nova.api.openstack:APIRouterV10.factory
100
+
101
+[app:osapiapp11]
102
+paste.app_factory = nova.api.openstack:APIRouterV11.factory
103
+
104
+[pipeline:osversions]
105
+pipeline = faultwrap osversionapp
106
+
107
+[app:osversionapp]
108
+paste.app_factory = nova.api.openstack.versions:Versions.factory
109
+
110
+##########
111
+# Shared #
112
+##########
113
+
114
+[filter:keystonecontext]
115
+paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
116
+
117
+[filter:authtoken]
118
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
119
+service_protocol = http
120
+service_host = 127.0.0.1
121
+service_port = 5000
122
+auth_host = 127.0.0.1
123
+auth_port = 35357
124
+auth_protocol = http
125
+auth_uri = http://127.0.0.1:5000/
126
+admin_token = %SERVICE_TOKEN%
... ...
@@ -49,3 +49,14 @@ export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete}
49 49
 # set log level to DEBUG (helps debug issues)
50 50
 # export NOVACLIENT_DEBUG=1
51 51
 
52
+# Max time till the vm is bootable
53
+export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
54
+
55
+# Max time to wait while vm goes from build to active state
56
+export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
57
+
58
+# Max time from run instance command until it is running
59
+export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
60
+
61
+# Max time to wait for proper IP association and dis-association.
62
+export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
... ...
@@ -103,8 +103,7 @@ if [[ $EUID -eq 0 ]]; then
103 103
 
104 104
     # since this script runs as a normal user, we need to give that user
105 105
     # ability to run sudo
106
-    apt_get update
107
-    apt_get install sudo
106
+    dpkg -l sudo || apt_get update && apt_get install sudo
108 107
 
109 108
     if ! getent passwd stack >/dev/null; then
110 109
         echo "Creating a user called stack"
... ...
@@ -121,7 +120,7 @@ if [[ $EUID -eq 0 ]]; then
121 121
     echo "Copying files to stack user"
122 122
     STACK_DIR="$DEST/${PWD##*/}"
123 123
     cp -r -f "$PWD" "$STACK_DIR"
124
-    chown -R $USER "$STACK_DIR"
124
+    chown -R stack "$STACK_DIR"
125 125
     if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
126 126
         exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
127 127
     else
... ...
@@ -233,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
233 233
 # Multi-host is a mode where each compute node runs its own network node.  This
234 234
 # allows network operations and routing for a VM to occur on the server that is
235 235
 # running the VM - removing a SPOF and bandwidth bottleneck.
236
-MULTI_HOST=${MULTI_HOST:-0}
236
+MULTI_HOST=${MULTI_HOST:-False}
237 237
 
238 238
 # If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
239 239
 # variable but make sure that the interface doesn't already have an
... ...
@@ -326,7 +325,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
326 326
     # can never change.
327 327
     read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
328 328
 fi
329
-    
329
+
330 330
 # Keystone
331 331
 # --------
332 332
 
... ...
@@ -591,13 +590,12 @@ fi
591 591
 # ----
592 592
 
593 593
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
594
-    # We are going to use the sample http middleware configuration from the
595
-    # keystone project to launch nova.  This paste config adds the configuration
596
-    # required for nova to validate keystone tokens - except we need to switch
597
-    # the config to use our service token instead (instead of the invalid token
598
-    # 999888777666).
599
-    cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
600
-    sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
594
+    # We are going to use a sample http middleware configuration based on the
595
+    # one from the keystone project to launch nova.  This paste config adds
596
+    # the configuration required for nova to validate keystone tokens. We add
597
+    # our own service token to the configuration.
598
+    cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
599
+    sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
601 600
 fi
602 601
 
603 602
 if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
... ...
@@ -679,13 +677,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
679 679
     USER_GROUP=$(id -g)
680 680
     sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
681 681
     sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
682
-    
682
+
683 683
     # We then create a loopback disk and format it to XFS.
684 684
     if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
685 685
         mkdir -p  ${SWIFT_DATA_LOCATION}/drives/images
686 686
         sudo touch  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
687 687
         sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
688
-        
688
+
689 689
         dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
690 690
             bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
691 691
         mkfs.xfs -f -i size=1024  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
... ...
@@ -702,9 +700,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
702 702
     # We then create link to that mounted location so swift would know
703 703
     # where to go.
704 704
     for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
705
-    
705
+
706 706
     # We now have to emulate a few different servers into one we
707
-    # create all the directories needed for swift 
707
+    # create all the directories needed for swift
708 708
     tmpd=""
709 709
     for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
710 710
         ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
... ...
@@ -720,7 +718,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
720 720
    # swift-init has a bug using /etc/swift until bug #885595 is fixed
721 721
    # we have to create a link
722 722
    sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
723
-   
723
+
724 724
    # Swift use rsync to syncronize between all the different
725 725
    # partitions (which make more sense when you have a multi-node
726 726
    # setup) we configure it with our version of rsync.
... ...
@@ -756,7 +754,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
756 756
        local bind_port=$2
757 757
        local log_facility=$3
758 758
        local node_number
759
-       
759
+
760 760
        for node_number in {1..4};do
761 761
            node_path=${SWIFT_DATA_LOCATION}/${node_number}
762 762
            sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
... ...
@@ -783,14 +781,14 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
783 783
 
784 784
    # We then can start rsync.
785 785
    sudo /etc/init.d/rsync restart || :
786
-      
786
+
787 787
    # Create our ring for the object/container/account.
788 788
    /usr/local/bin/swift-remakerings
789 789
 
790 790
    # And now we launch swift-startmain to get our cluster running
791 791
    # ready to be tested.
792 792
    /usr/local/bin/swift-startmain || :
793
-   
793
+
794 794
    unset s swift_hash swift_auth_server tmpd
795 795
 fi
796 796
 
... ...
@@ -858,15 +856,16 @@ add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
858 858
 add_nova_flag "--rabbit_host=$RABBIT_HOST"
859 859
 add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
860 860
 add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
861
+add_nova_flag "--force_dhcp_release"
861 862
 if [ -n "$INSTANCES_PATH" ]; then
862 863
     add_nova_flag "--instances_path=$INSTANCES_PATH"
863 864
 fi
864
-if [ -n "$MULTI_HOST" ]; then
865
-    add_nova_flag "--multi_host=$MULTI_HOST"
866
-    add_nova_flag "--send_arp_for_ha=1"
865
+if [ "$MULTI_HOST" != "False" ]; then
866
+    add_nova_flag "--multi_host"
867
+    add_nova_flag "--send_arp_for_ha"
867 868
 fi
868 869
 if [ "$SYSLOG" != "False" ]; then
869
-    add_nova_flag "--use_syslog=1"
870
+    add_nova_flag "--use_syslog"
870 871
 fi
871 872
 
872 873
 # XenServer
... ...
@@ -942,6 +941,10 @@ function screen_it {
942 942
     NL=`echo -ne '\015'`
943 943
     if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
944 944
         screen -S stack -X screen -t $1
945
+        # sleep to allow bash to be ready to be send the command - we are
946
+        # creating a new window in screen and then sends characters, so if
947
+        # bash isn't running by the time we send the command, nothing happens
948
+        sleep 1
945 949
         screen -S stack -p $1 -X stuff "$2$NL"
946 950
     fi
947 951
 }
948 952
new file mode 100755
... ...
@@ -0,0 +1,248 @@
0
+#!/usr/bin/env bash
1
+
2
+# Make sure that we have the proper version of ubuntu (only works on oneiric)
3
+if ! egrep -q "oneiric" /etc/lsb-release; then
4
+    echo "This script only works with ubuntu oneiric."
5
+    exit 1
6
+fi
7
+
8
+# Keep track of the current directory
9
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
10
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
11
+
12
+cd $TOP_DIR
13
+
14
+# Source params
15
+source ./stackrc
16
+
17
+# Ubuntu distro to install
18
+DIST_NAME=${DIST_NAME:-oneiric}
19
+
20
+# Configure how large the VM should be
21
+GUEST_SIZE=${GUEST_SIZE:-10G}
22
+
23
+# exit on error to stop unexpected errors
24
+set -o errexit
25
+set -o xtrace
26
+
27
+# Abort if localrc is not set
28
+if [ ! -e $TOP_DIR/localrc ]; then
29
+    echo "You must have a localrc with ALL necessary passwords defined before proceeding."
30
+    echo "See stack.sh for required passwords."
31
+    exit 1
32
+fi
33
+
34
+# Install deps if needed
35
+DEPS="kvm libvirt-bin kpartx cloud-utils"
36
+dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
37
+
38
+# Where to store files and instances
39
+WORK_DIR=${WORK_DIR:-/opt/kvmstack}
40
+
41
+# Where to store images
42
+image_dir=$WORK_DIR/images/$DIST_NAME
43
+mkdir -p $image_dir
44
+
45
+# Original version of built image
46
+uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
47
+tarball=$image_dir/$(basename $uec_url)
48
+
49
+# download the base uec image if we haven't already
50
+if [ ! -f $tarball ]; then
51
+    curl $uec_url -o $tarball
52
+    (cd $image_dir && tar -Sxvzf $tarball)
53
+    resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
54
+    cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
55
+fi
56
+
57
+
58
+# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
59
+ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
60
+
61
+# Name of our instance, used by libvirt
62
+GUEST_NAME=${GUEST_NAME:-devstack}
63
+
64
+# Mop up after previous runs
65
+virsh destroy $GUEST_NAME || true
66
+
67
+# Where this vm is stored
68
+vm_dir=$WORK_DIR/instances/$GUEST_NAME
69
+
70
+# Create vm dir and remove old disk
71
+mkdir -p $vm_dir
72
+rm -f $vm_dir/disk
73
+
74
+# Create a copy of the base image
75
+qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
76
+
77
+# Back to devstack
78
+cd $TOP_DIR
79
+
80
+GUEST_NETWORK=${GUEST_NETWORK:-1}
81
+GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
82
+GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
83
+GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
84
+GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
85
+GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
86
+GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
87
+GUEST_RAM=${GUEST_RAM:-1524288}
88
+GUEST_CORES=${GUEST_CORES:-1}
89
+
90
+# libvirt.xml configuration
91
+NET_XML=$vm_dir/net.xml
92
+cat > $NET_XML <<EOF
93
+<network>
94
+  <name>devstack-$GUEST_NETWORK</name>
95
+  <bridge name="stackbr%d" />
96
+  <forward/>
97
+  <ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
98
+    <dhcp>
99
+      <range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
100
+    </dhcp>
101
+  </ip>
102
+</network>
103
+EOF
104
+
105
+if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
106
+    virsh net-destroy devstack-$GUEST_NETWORK || true
107
+    # destroying the network isn't enough to delete the leases
108
+    rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
109
+    virsh net-create $vm_dir/net.xml
110
+fi
111
+
112
+# libvirt.xml configuration
113
+LIBVIRT_XML=$vm_dir/libvirt.xml
114
+cat > $LIBVIRT_XML <<EOF
115
+<domain type='kvm'>
116
+  <name>$GUEST_NAME</name>
117
+  <memory>$GUEST_RAM</memory>
118
+  <os>
119
+    <type>hvm</type>
120
+    <kernel>$image_dir/kernel</kernel>
121
+    <cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
122
+  </os>
123
+  <features>
124
+    <acpi/>
125
+  </features>
126
+  <clock offset='utc'/>
127
+  <vcpu>$GUEST_CORES</vcpu>
128
+  <devices>
129
+    <disk type='file'>
130
+      <driver type='qcow2'/>
131
+      <source file='$vm_dir/disk'/>
132
+      <target dev='vda' bus='virtio'/>
133
+    </disk>
134
+
135
+    <interface type='network'>
136
+      <source network='devstack-$GUEST_NETWORK'/>
137
+    </interface>
138
+        
139
+    <!-- The order is significant here.  File must be defined first -->
140
+    <serial type="file">
141
+      <source path='$vm_dir/console.log'/>
142
+      <target port='1'/>
143
+    </serial>
144
+
145
+    <console type='pty' tty='/dev/pts/2'>
146
+      <source path='/dev/pts/2'/>
147
+      <target port='0'/>
148
+    </console>
149
+
150
+    <serial type='pty'>
151
+      <source path='/dev/pts/2'/>
152
+      <target port='0'/>
153
+    </serial>
154
+
155
+    <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
156
+  </devices>
157
+</domain>
158
+EOF
159
+
160
+
161
+rm -rf $vm_dir/uec
162
+cp -r $TOOLS_DIR/uec $vm_dir/uec
163
+
164
+# set metadata
165
+cat > $vm_dir/uec/meta-data<<EOF
166
+hostname: $GUEST_NAME
167
+instance-id: i-hop
168
+instance-type: m1.ignore
169
+local-hostname: $GUEST_NAME.local
170
+EOF
171
+
172
+# set metadata
173
+cat > $vm_dir/uec/user-data<<EOF
174
+#!/bin/bash
175
+# hostname needs to resolve for rabbit
176
+sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
177
+apt-get update
178
+apt-get install git sudo -y
179
+git clone https://github.com/cloudbuilders/devstack.git
180
+cd devstack
181
+git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
182
+git fetch
183
+git checkout `git rev-parse HEAD`
184
+cat > localrc <<LOCAL_EOF
185
+ROOTSLEEP=0
186
+`cat $TOP_DIR/localrc`
187
+LOCAL_EOF
188
+./stack.sh
189
+EOF
190
+
191
+# (re)start a metadata service
192
+(
193
+  pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
194
+  [ -z "$pid" ] || kill -9 $pid
195
+)
196
+cd $vm_dir/uec
197
+python meta.py 192.168.$GUEST_NETWORK.1:4567 &
198
+
199
+# Create the instance
200
+virsh create $vm_dir/libvirt.xml
201
+
202
+# Tail the console log till we are done
203
+WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
204
+if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
205
+    set +o xtrace
206
+    # Done creating the container, let's tail the log
207
+    echo
208
+    echo "============================================================="
209
+    echo "                          -- YAY! --"
210
+    echo "============================================================="
211
+    echo
212
+    echo "We're done launching the vm, about to start tailing the"
213
+    echo "stack.sh log. It will take a second or two to start."
214
+    echo
215
+    echo "Just CTRL-C at any time to stop tailing."
216
+
217
+    while [ ! -e "$vm_dir/console.log" ]; do
218
+      sleep 1
219
+    done
220
+
221
+    tail -F $vm_dir/console.log &
222
+
223
+    TAIL_PID=$!
224
+
225
+    function kill_tail() {
226
+        kill $TAIL_PID
227
+        exit 1
228
+    }
229
+
230
+    # Let Ctrl-c kill tail and exit
231
+    trap kill_tail SIGINT
232
+
233
+    echo "Waiting stack.sh to finish..."
234
+    while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
235
+        sleep 1
236
+    done
237
+
238
+    set -o xtrace
239
+
240
+    kill $TAIL_PID
241
+
242
+    if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
243
+        exit 1
244
+    fi
245
+    echo ""
246
+    echo "Finished - Zip-a-dee Doo-dah!"
247
+fi
0 248
new file mode 100755
... ...
@@ -0,0 +1,74 @@
0
+#!/usr/bin/env bash
1
+
2
+# Echo commands
3
+set -o xtrace
4
+
5
+# Exit on error to stop unexpected errors
6
+set -o errexit
7
+
8
+# Keep track of the current directory
9
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
10
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
11
+
12
+# Change dir to top of devstack
13
+cd $TOP_DIR
14
+
15
+# Echo usage
16
+usage() {
17
+    echo "Add stack user and keys"
18
+    echo ""
19
+    echo "Usage: $0 [full path to raw uec base image]"
20
+}
21
+
22
+# Make sure this is a raw image
23
+if ! qemu-img info $1 | grep -q "file format: raw"; then
24
+    usage
25
+    exit 1
26
+fi
27
+
28
+# Mount the image
29
+DEST=/opt/stack
30
+STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage.user
31
+mkdir -p $STAGING_DIR
32
+umount $STAGING_DIR || true
33
+sleep 1
34
+mount -t ext4 -o loop $1 $STAGING_DIR
35
+mkdir -p $STAGING_DIR/$DEST
36
+
37
+# Create a stack user that is a member of the libvirtd group so that stack
38
+# is able to interact with libvirt.
39
+chroot $STAGING_DIR groupadd libvirtd || true
40
+chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
41
+
42
+# Add a simple password - pass
43
+echo stack:pass | chroot $STAGING_DIR chpasswd
44
+
45
+# Configure sudo
46
+grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
47
+    echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
48
+cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
49
+sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
50
+
51
+# and has sudo ability (in the future this should be limited to only what
52
+# stack requires)
53
+echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
54
+
55
+# Gracefully cp only if source file/dir exists
56
+function cp_it {
57
+    if [ -e $1 ] || [ -d $1 ]; then
58
+        cp -pRL $1 $2
59
+    fi
60
+}
61
+
62
+# Copy over your ssh keys and env if desired
63
+cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
64
+cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
65
+cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
66
+cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
67
+cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
68
+
69
+# Give stack ownership over $DEST so it may do the work needed
70
+chroot $STAGING_DIR chown -R stack $DEST
71
+
72
+# Unmount
73
+umount $STAGING_DIR
0 74
new file mode 100644
... ...
@@ -0,0 +1,29 @@
0
+import sys
1
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
2
+from SimpleHTTPServer import SimpleHTTPRequestHandler
3
+
4
+def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
5
+         ServerClass = HTTPServer, protocol="HTTP/1.0"):
6
+    """simple http server that listens on a give address:port"""
7
+
8
+    server_address = (host, port)
9
+
10
+    HandlerClass.protocol_version = protocol
11
+    httpd = ServerClass(server_address, HandlerClass)
12
+
13
+    sa = httpd.socket.getsockname()
14
+    print "Serving HTTP on", sa[0], "port", sa[1], "..."
15
+    httpd.serve_forever()
16
+
17
+if __name__ == '__main__':
18
+    if sys.argv[1:]:
19
+        address = sys.argv[1]
20
+    else:
21
+        address = '0.0.0.0'
22
+    if ':' in address:
23
+        host, port = address.split(':')
24
+    else:
25
+        host = address
26
+        port = 8080
27
+
28
+    main(host, int(port))
0 29
new file mode 100755
... ...
@@ -0,0 +1,53 @@
0
+#!/usr/bin/env bash
1
+
2
+# Echo commands
3
+set -o xtrace
4
+
5
+# Exit on error to stop unexpected errors
6
+set -o errexit
7
+
8
+# Keep track of the current directory
9
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
10
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
11
+
12
+# Change dir to top of devstack
13
+cd $TOP_DIR
14
+
15
+# Echo usage
16
+usage() {
17
+    echo "Cache OpenStack dependencies on a uec image to speed up performance."
18
+    echo ""
19
+    echo "Usage: $0 [full path to raw uec base image]"
20
+}
21
+
22
+# Make sure this is a raw image
23
+if ! qemu-img info $1 | grep -q "file format: raw"; then
24
+    usage
25
+    exit 1
26
+fi
27
+
28
+# Make sure we are in the correct dir
29
+if [ ! -d files/apts ]; then
30
+    echo "Please run this script from devstack/tools/"
31
+    exit 1
32
+fi 
33
+
34
+# Mount the image
35
+STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage
36
+mkdir -p $STAGING_DIR
37
+umount $STAGING_DIR || true
38
+sleep 1
39
+mount -t ext4 -o loop $1 $STAGING_DIR
40
+
41
+# Make sure that base requirements are installed
42
+cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
43
+
44
+# Perform caching on the base image to speed up subsequent runs
45
+chroot $STAGING_DIR apt-get update
46
+chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
47
+chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
48
+mkdir -p $STAGING_DIR/var/cache/pip
49
+PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
50
+
51
+# Unmount
52
+umount $STAGING_DIR