Browse code

merge origin/master

Jesse Andrews authored on 2011/11/11 04:46:18
Showing 34 changed files
... ...
@@ -1,3 +1,5 @@
1 1
 proto
2 2
 *~
3
+*.log
4
+src
3 5
 localrc
4 6
old mode 100755
5 7
new mode 100644
... ...
@@ -84,6 +84,15 @@ nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
84 84
 # Waiting for boot
85 85
 # ----------------
86 86
 
87
+# Max time to wait while vm goes from build to active state
88
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
89
+
90
+# Max time till the vm is bootable
91
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
92
+
93
+# Max time to wait for proper association and dis-association.
94
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
95
+
87 96
 # check that the status is active within ACTIVE_TIMEOUT seconds
88 97
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
89 98
     echo "server didn't become active!"
... ...
@@ -158,7 +167,7 @@ ping -c1 -w1 $IP
158 158
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
159 159
 
160 160
 # FIXME (anthony): make xs support security groups
161
-if [ "$VIRT_DRIVER" != "xenserver"]; then
161
+if [ "$VIRT_DRIVER" != "xenserver" ]; then
162 162
     # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
163 163
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
164 164
         print "Security group failure - ping should not be allowed!"
... ...
@@ -1,7 +1,9 @@
1 1
 dnsmasq-base
2
+dnsmasq-utils # for dhcp_release
2 3
 kpartx
3 4
 parted
4
-mysql-server
5
+arping # used for send_arp_for_ha option in nova-network
6
+mysql-server # NOPRIME
5 7
 python-mysqldb
6 8
 kvm
7 9
 gawk
... ...
@@ -10,10 +12,10 @@ ebtables
10 10
 sqlite3
11 11
 sudo
12 12
 kvm
13
-libvirt-bin
13
+libvirt-bin # NOPRIME
14 14
 vlan
15 15
 curl
16
-rabbitmq-server
16
+rabbitmq-server # NOPRIME
17 17
 socat # used by ajaxterm
18 18
 python-mox
19 19
 python-paste
20 20
deleted file mode 100644
... ...
@@ -1,18 +0,0 @@
1
-# a collection of packages that speed up installation as they are dependencies
2
-# of packages we can't install during bootstraping (rabbitmq-server, 
3
-# mysql-server, libvirt-bin)
4
-#
5
-# NOTE: only add packages to this file that aren't needed directly
6
-mysql-common
7
-mysql-client-5.1
8
-erlang-base
9
-erlang-ssl 
10
-erlang-nox
11
-erlang-inets
12
-erlang-mnesia
13
-libhtml-template-perl
14
-gettext-base
15
-libavahi-client3
16
-libxml2-utils
17
-libpciaccess0
18
-libparted0debian1
19 1
new file mode 100644
... ...
@@ -0,0 +1,17 @@
0
+curl
1
+gcc
2
+memcached # NOPRIME
3
+python-configobj
4
+python-coverage
5
+python-dev
6
+python-eventlet
7
+python-greenlet
8
+python-netifaces
9
+python-nose
10
+python-pastedeploy
11
+python-setuptools
12
+python-simplejson
13
+python-webob
14
+python-xattr
15
+sqlite3
16
+xfsprogs
... ...
@@ -24,7 +24,7 @@ registry_port = 9191
24 24
 
25 25
 # Log to this file. Make sure you do not set the same log
26 26
 # file for both the API and registry servers!
27
-log_file = %DEST%/glance/api.log
27
+#log_file = %DEST%/glance/api.log
28 28
 
29 29
 # Send logs to syslog (/dev/log) instead of to file specified by `log_file`
30 30
 use_syslog = %SYSLOG%
... ...
@@ -13,7 +13,7 @@ bind_port = 9191
13 13
 
14 14
 # Log to this file. Make sure you do not set the same log
15 15
 # file for both the API and registry servers!
16
-log_file = %DEST%/glance/registry.log
16
+#log_file = %DEST%/glance/registry.log
17 17
 
18 18
 # Where to store images
19 19
 filesystem_store_datadir = %DEST%/glance/images
... ...
@@ -30,12 +30,13 @@ $BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin
30 30
 $BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service"
31 31
 $BIN_DIR/keystone-manage $* service add glance image "Glance Image Service"
32 32
 $BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service"
33
+$BIN_DIR/keystone-manage $* service add swift object-store "Swift Service"
33 34
 
34 35
 #endpointTemplates
35 36
 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id%  http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
36 37
 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
37 38
 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
38
-# $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
39
+$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
39 40
 
40 41
 # Tokens
41 42
 $BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
42 43
new file mode 100644
... ...
@@ -0,0 +1,127 @@
0
+#######
1
+# EC2 #
2
+#######
3
+
4
+[composite:ec2]
5
+use = egg:Paste#urlmap
6
+/: ec2versions
7
+/services/Cloud: ec2cloud
8
+/services/Admin: ec2admin
9
+/latest: ec2metadata
10
+/2007-01-19: ec2metadata
11
+/2007-03-01: ec2metadata
12
+/2007-08-29: ec2metadata
13
+/2007-10-10: ec2metadata
14
+/2007-12-15: ec2metadata
15
+/2008-02-01: ec2metadata
16
+/2008-09-01: ec2metadata
17
+/2009-04-04: ec2metadata
18
+/1.0: ec2metadata
19
+
20
+[pipeline:ec2cloud]
21
+pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
22
+
23
+[pipeline:ec2admin]
24
+pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
25
+
26
+[pipeline:ec2metadata]
27
+pipeline = logrequest ec2md
28
+
29
+[pipeline:ec2versions]
30
+pipeline = logrequest ec2ver
31
+
32
+[filter:logrequest]
33
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
34
+
35
+[filter:ec2lockout]
36
+paste.filter_factory = nova.api.ec2:Lockout.factory
37
+
38
+[filter:totoken]
39
+paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
40
+
41
+[filter:ec2noauth]
42
+paste.filter_factory = nova.api.ec2:NoAuth.factory
43
+
44
+[filter:authenticate]
45
+paste.filter_factory = nova.api.ec2:Authenticate.factory
46
+
47
+[filter:cloudrequest]
48
+controller = nova.api.ec2.cloud.CloudController
49
+paste.filter_factory = nova.api.ec2:Requestify.factory
50
+
51
+[filter:adminrequest]
52
+controller = nova.api.ec2.admin.AdminController
53
+paste.filter_factory = nova.api.ec2:Requestify.factory
54
+
55
+[filter:authorizer]
56
+paste.filter_factory = nova.api.ec2:Authorizer.factory
57
+
58
+[app:ec2executor]
59
+paste.app_factory = nova.api.ec2:Executor.factory
60
+
61
+[app:ec2ver]
62
+paste.app_factory = nova.api.ec2:Versions.factory
63
+
64
+[app:ec2md]
65
+paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
66
+
67
+#############
68
+# Openstack #
69
+#############
70
+
71
+[composite:osapi]
72
+use = egg:Paste#urlmap
73
+/: osversions
74
+/v1.0: openstackapi10
75
+/v1.1: openstackapi11
76
+
77
+[pipeline:openstackapi10]
78
+pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
79
+
80
+[pipeline:openstackapi11]
81
+pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
82
+
83
+[filter:faultwrap]
84
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
85
+
86
+[filter:auth]
87
+paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
88
+
89
+[filter:noauth]
90
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
91
+
92
+[filter:ratelimit]
93
+paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
94
+
95
+[filter:extensions]
96
+paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
97
+
98
+[app:osapiapp10]
99
+paste.app_factory = nova.api.openstack:APIRouterV10.factory
100
+
101
+[app:osapiapp11]
102
+paste.app_factory = nova.api.openstack:APIRouterV11.factory
103
+
104
+[pipeline:osversions]
105
+pipeline = faultwrap osversionapp
106
+
107
+[app:osversionapp]
108
+paste.app_factory = nova.api.openstack.versions:Versions.factory
109
+
110
+##########
111
+# Shared #
112
+##########
113
+
114
+[filter:keystonecontext]
115
+paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
116
+
117
+[filter:authtoken]
118
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
119
+service_protocol = http
120
+service_host = 127.0.0.1
121
+service_port = 5000
122
+auth_host = 127.0.0.1
123
+auth_port = 35357
124
+auth_protocol = http
125
+auth_uri = http://127.0.0.1:5000/
126
+admin_token = %SERVICE_TOKEN%
... ...
@@ -41,7 +41,7 @@ Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \
41 41
                       /usr/bin/socat,                           \
42 42
                       /sbin/parted,                             \
43 43
                       /usr/sbin/dnsmasq,                        \
44
-                      /usr/bin/arping
44
+                      /usr/sbin/arping
45 45
 
46 46
 %USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS
47 47
 
48 48
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+[DEFAULT]
1
+devices = %NODE_PATH%/node
2
+mount_check = false
3
+bind_port = %BIND_PORT%
4
+user = %USER%
5
+log_facility = LOG_LOCAL%LOG_FACILITY%
6
+swift_dir = %SWIFT_CONFIG_LOCATION%
7
+
8
+[pipeline:main]
9
+pipeline = account-server
10
+
11
+[app:account-server]
12
+use = egg:swift#account
13
+
14
+[account-replicator]
15
+vm_test_mode = yes
16
+
17
+[account-auditor]
18
+
19
+[account-reaper]
0 20
new file mode 100644
... ...
@@ -0,0 +1,22 @@
0
+[DEFAULT]
1
+devices = %NODE_PATH%/node
2
+mount_check = false
3
+bind_port = %BIND_PORT%
4
+user = %USER%
5
+log_facility = LOG_LOCAL%LOG_FACILITY%
6
+swift_dir = %SWIFT_CONFIG_LOCATION%
7
+
8
+[pipeline:main]
9
+pipeline = container-server
10
+
11
+[app:container-server]
12
+use = egg:swift#container
13
+
14
+[container-replicator]
15
+vm_test_mode = yes
16
+
17
+[container-updater]
18
+
19
+[container-auditor]
20
+
21
+[container-sync]
0 22
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+[DEFAULT]
1
+devices = %NODE_PATH%/node
2
+mount_check = false
3
+bind_port = %BIND_PORT%
4
+user = %USER%
5
+log_facility = LOG_LOCAL%LOG_FACILITY%
6
+swift_dir = %SWIFT_CONFIG_LOCATION%
7
+
8
+[pipeline:main]
9
+pipeline = object-server
10
+
11
+[app:object-server]
12
+use = egg:swift#object
13
+
14
+[object-replicator]
15
+vm_test_mode = yes
16
+
17
+[object-updater]
18
+
19
+[object-auditor]
0 20
new file mode 100644
... ...
@@ -0,0 +1,32 @@
0
+[DEFAULT]
1
+bind_port = 8080
2
+user = %USER%
3
+log_facility = LOG_LOCAL1
4
+swift_dir = %SWIFT_CONFIG_LOCATION%
5
+
6
+[pipeline:main]
7
+pipeline = healthcheck cache %AUTH_SERVER% proxy-server
8
+
9
+[app:proxy-server]
10
+use = egg:swift#proxy
11
+allow_account_management = true
12
+account_autocreate = true
13
+
14
+[filter:keystone]
15
+use = egg:swiftkeystone2#keystone2
16
+keystone_admin_token = %SERVICE_TOKEN%
17
+keystone_url = http://localhost:35357/v2.0
18
+
19
+[filter:tempauth]
20
+use = egg:swift#tempauth
21
+user_admin_admin = admin .admin .reseller_admin
22
+user_test_tester = testing .admin
23
+user_test2_tester2 = testing2 .admin
24
+user_test_tester3 = testing3
25
+bind_ip = 0.0.0.0
26
+
27
+[filter:healthcheck]
28
+use = egg:swift#healthcheck
29
+
30
+[filter:cache]
31
+use = egg:swift#memcache
0 32
new file mode 100644
... ...
@@ -0,0 +1,79 @@
0
+uid = %USER%
1
+gid = %GROUP%
2
+log file = /var/log/rsyncd.log
3
+pid file = /var/run/rsyncd.pid
4
+address = 127.0.0.1
5
+
6
+[account6012]
7
+max connections = 25
8
+path = %SWIFT_DATA_LOCATION%/1/node/
9
+read only = false
10
+lock file = /var/lock/account6012.lock
11
+
12
+[account6022]
13
+max connections = 25
14
+path = %SWIFT_DATA_LOCATION%/2/node/
15
+read only = false
16
+lock file = /var/lock/account6022.lock
17
+
18
+[account6032]
19
+max connections = 25
20
+path = %SWIFT_DATA_LOCATION%/3/node/
21
+read only = false
22
+lock file = /var/lock/account6032.lock
23
+
24
+[account6042]
25
+max connections = 25
26
+path = %SWIFT_DATA_LOCATION%/4/node/
27
+read only = false
28
+lock file = /var/lock/account6042.lock
29
+
30
+
31
+[container6011]
32
+max connections = 25
33
+path = %SWIFT_DATA_LOCATION%/1/node/
34
+read only = false
35
+lock file = /var/lock/container6011.lock
36
+
37
+[container6021]
38
+max connections = 25
39
+path = %SWIFT_DATA_LOCATION%/2/node/
40
+read only = false
41
+lock file = /var/lock/container6021.lock
42
+
43
+[container6031]
44
+max connections = 25
45
+path = %SWIFT_DATA_LOCATION%/3/node/
46
+read only = false
47
+lock file = /var/lock/container6031.lock
48
+
49
+[container6041]
50
+max connections = 25
51
+path = %SWIFT_DATA_LOCATION%/4/node/
52
+read only = false
53
+lock file = /var/lock/container6041.lock
54
+
55
+
56
+[object6010]
57
+max connections = 25
58
+path = %SWIFT_DATA_LOCATION%/1/node/
59
+read only = false
60
+lock file = /var/lock/object6010.lock
61
+
62
+[object6020]
63
+max connections = 25
64
+path = %SWIFT_DATA_LOCATION%/2/node/
65
+read only = false
66
+lock file = /var/lock/object6020.lock
67
+
68
+[object6030]
69
+max connections = 25
70
+path = %SWIFT_DATA_LOCATION%/3/node/
71
+read only = false
72
+lock file = /var/lock/object6030.lock
73
+
74
+[object6040]
75
+max connections = 25
76
+path = %SWIFT_DATA_LOCATION%/4/node/
77
+read only = false
78
+lock file = /var/lock/object6040.lock
0 79
new file mode 100755
... ...
@@ -0,0 +1,26 @@
0
+#!/bin/bash
1
+
2
+cd %SWIFT_CONFIG_LOCATION%
3
+
4
+rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
5
+
6
+swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
7
+swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1
8
+swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1
9
+swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1
10
+swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1
11
+swift-ring-builder object.builder rebalance
12
+
13
+swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
14
+swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1
15
+swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1
16
+swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1
17
+swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1
18
+swift-ring-builder container.builder rebalance
19
+
20
+swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
21
+swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1
22
+swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1
23
+swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1
24
+swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1
25
+swift-ring-builder account.builder rebalance
0 26
new file mode 100755
... ...
@@ -0,0 +1,3 @@
0
+#!/bin/bash
1
+
2
+swift-init all restart
0 3
new file mode 100644
... ...
@@ -0,0 +1,3 @@
0
+[swift-hash]
1
+# random unique string that can never change (DO NOT LOSE)
2
+swift_hash_path_suffix = %SWIFT_HASH%
... ...
@@ -70,7 +70,7 @@ fi
70 70
 # called ``localrc``
71 71
 #
72 72
 # If ``localrc`` exists, then ``stackrc`` will load those settings.  This is
73
-# useful for changing a branch or repostiory to test other versions.  Also you
73
+# useful for changing a branch or repository to test other versions.  Also you
74 74
 # can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead
75 75
 # of letting devstack generate random ones for you.
76 76
 source ./stackrc
... ...
@@ -103,8 +103,7 @@ if [[ $EUID -eq 0 ]]; then
103 103
 
104 104
     # since this script runs as a normal user, we need to give that user
105 105
     # ability to run sudo
106
-    apt_get update
107
-    apt_get install sudo
106
+    dpkg -l sudo || apt_get update && apt_get install sudo
108 107
 
109 108
     if ! getent passwd stack >/dev/null; then
110 109
         echo "Creating a user called stack"
... ...
@@ -150,6 +149,12 @@ KEYSTONE_DIR=$DEST/keystone
150 150
 NOVACLIENT_DIR=$DEST/python-novaclient
151 151
 OPENSTACKX_DIR=$DEST/openstackx
152 152
 NOVNC_DIR=$DEST/noVNC
153
+SWIFT_DIR=$DEST/swift
154
+SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2
155
+QUANTUM_DIR=$DEST/quantum
156
+
157
+# Default Quantum Plugin
158
+Q_PLUGIN=${Q_PLUGIN:-openvswitch}
153 159
 
154 160
 # Specify which services to launch.  These generally correspond to screen tabs
155 161
 ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit}
... ...
@@ -169,6 +174,9 @@ if [ ! -n "$HOST_IP" ]; then
169 169
     HOST_IP=`LC_ALL=C /sbin/ifconfig  | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
170 170
 fi
171 171
 
172
+# Service startup timeout
173
+SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
174
+
172 175
 # Generic helper to configure passwords
173 176
 function read_password {
174 177
     set +o xtrace
... ...
@@ -224,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
224 224
 # Multi-host is a mode where each compute node runs its own network node.  This
225 225
 # allows network operations and routing for a VM to occur on the server that is
226 226
 # running the VM - removing a SPOF and bandwidth bottleneck.
227
-MULTI_HOST=${MULTI_HOST:-0}
227
+MULTI_HOST=${MULTI_HOST:-False}
228 228
 
229 229
 # If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
230 230
 # variable but make sure that the interface doesn't already have an
... ...
@@ -239,11 +247,22 @@ MULTI_HOST=${MULTI_HOST:-0}
239 239
 # If you are running on a single node and don't need to access the VMs from
240 240
 # devices other than that node, you can set the flat interface to the same
241 241
 # value as ``FLAT_NETWORK_BRIDGE``.  This will stop the network hiccup from
242
-# occuring.
242
+# occurring.
243 243
 FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
244 244
 
245 245
 ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
246 246
 
247
+# Using Quantum networking:
248
+#
249
+# Make sure that q-svc is enabled in ENABLED_SERVICES.  If it is the network
250
+# manager will be set to the QuantumManager.
251
+#
252
+# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to
253
+# "openvswitch" and make sure the q-agt service is enabled in
254
+# ENABLED_SERVICES.
255
+#
256
+# With Quantum networking the NET_MAN variable is ignored.
257
+
247 258
 
248 259
 # MySQL & RabbitMQ
249 260
 # ----------------
... ...
@@ -270,6 +289,42 @@ read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
270 270
 # Glance connection info.  Note the port must be specified.
271 271
 GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292}
272 272
 
273
+# SWIFT
274
+# -----
275
+# TODO: implement glance support
276
+# TODO: add logging to different location.
277
+
278
+# By default the location of swift drives and objects is located inside
279
+# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine
280
+# this.
281
+SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data}
282
+
283
+# We are going to have the configuration files inside the source
284
+# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that.
285
+SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config}
286
+
287
+# devstack will create a loop-back disk formatted as XFS to store the
288
+# swift data. By default the disk size is 1 gigabyte. The variable
289
+# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change
290
+# that.
291
+SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
292
+
293
+# The ring uses a configurable number of bits from a path’s MD5 hash as
294
+# a partition index that designates a device. The number of bits kept
295
+# from the hash is known as the partition power, and 2 to the partition
296
+# power indicates the partition count. Partitioning the full MD5 hash
297
+# ring allows other parts of the cluster to work in batches of items at
298
+# once which ends up either more efficient or at least less complex than
299
+# working with each item separately or the entire cluster all at once.
300
+# By default we define 9 for the partition count (which mean 512).
301
+SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
302
+
303
+# We only ask for Swift Hash if we have enabled swift service.
304
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
305
+    # SWIFT_HASH is a random unique string for a swift cluster that
306
+    # can never change.
307
+    read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
308
+fi
273 309
 
274 310
 # Keystone
275 311
 # --------
... ...
@@ -283,7 +338,7 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (
283 283
 LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"}
284 284
 (
285 285
 # So that errors don't compound we exit on any errors so you see only the
286
-# first error that occured.
286
+# first error that occurred.
287 287
 trap failed ERR
288 288
 failed() {
289 289
     local r=$?
... ...
@@ -310,7 +365,7 @@ fi
310 310
 
311 311
 # install apt requirements
312 312
 apt_get update
313
-apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server"`
313
+apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"`
314 314
 
315 315
 # install python requirements
316 316
 sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*`
... ...
@@ -349,6 +404,10 @@ function git_clone {
349 349
 
350 350
 # compute service
351 351
 git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
352
+# storage service
353
+git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
354
+# swift + keystone middleware
355
+git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH
352 356
 # image catalog service
353 357
 git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
354 358
 # unified auth system (manages accounts/tokens)
... ...
@@ -362,6 +421,8 @@ git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
362 362
 # openstackx is a collection of extensions to openstack.compute & nova
363 363
 # that is *deprecated*.  The code is being moved into python-novaclient & nova.
364 364
 git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
365
+# quantum
366
+git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
365 367
 
366 368
 # Initialization
367 369
 # ==============
... ...
@@ -370,12 +431,15 @@ git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
370 370
 # setup our checkouts so they are installed into python path
371 371
 # allowing ``import nova`` or ``import glance.client``
372 372
 cd $KEYSTONE_DIR; sudo python setup.py develop
373
+cd $SWIFT_DIR; sudo python setup.py develop
374
+cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop
373 375
 cd $GLANCE_DIR; sudo python setup.py develop
374 376
 cd $NOVACLIENT_DIR; sudo python setup.py develop
375 377
 cd $NOVA_DIR; sudo python setup.py develop
376 378
 cd $OPENSTACKX_DIR; sudo python setup.py develop
377 379
 cd $HORIZON_DIR/django-openstack; sudo python setup.py develop
378 380
 cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop
381
+cd $QUANTUM_DIR; sudo python setup.py develop
379 382
 
380 383
 # Add a useful screenrc.  This isn't required to run openstack but is we do
381 384
 # it since we are going to run the services in screen for simple
... ...
@@ -500,13 +564,12 @@ fi
500 500
 # ----
501 501
 
502 502
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
503
-    # We are going to use the sample http middleware configuration from the
504
-    # keystone project to launch nova.  This paste config adds the configuration
505
-    # required for nova to validate keystone tokens - except we need to switch
506
-    # the config to use our service token instead (instead of the invalid token
507
-    # 999888777666).
508
-    cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
509
-    sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
503
+    # We are going to use a sample http middleware configuration based on the
504
+    # one from the keystone project to launch nova.  This paste config adds
505
+    # the configuration required for nova to validate keystone tokens. We add
506
+    # our own service token to the configuration.
507
+    cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
508
+    sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
510 509
 fi
511 510
 
512 511
 if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
... ...
@@ -580,6 +643,129 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
580 580
     mkdir -p $NOVA_DIR/networks
581 581
 fi
582 582
 
583
+# Storage Service
584
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
585
+    # We first do a bit of setup by creating the directories and
586
+    # changing the permissions so we can run it as our user.
587
+
588
+    USER_GROUP=$(id -g)
589
+    sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
590
+    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
591
+
592
+    # We then create a loopback disk and format it to XFS.
593
+    if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
594
+        mkdir -p  ${SWIFT_DATA_LOCATION}/drives/images
595
+        sudo touch  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
596
+        sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
597
+
598
+        dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
599
+            bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
600
+        mkfs.xfs -f -i size=1024  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
601
+    fi
602
+
603
+    # After the drive being created we mount the disk with a few mount
604
+    # options to make it most efficient as possible for swift.
605
+    mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1
606
+    if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts;then
607
+        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
608
+            ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1
609
+    fi
610
+
611
+    # We then create link to that mounted location so swift would know
612
+    # where to go.
613
+    for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
614
+
615
+    # We now have to emulate a few different servers into one we
616
+    # create all the directories needed for swift
617
+    tmpd=""
618
+    for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
619
+        ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
620
+        ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do
621
+        [[ -d $d ]] && continue
622
+        sudo install -o ${USER} -g $USER_GROUP -d $d
623
+    done
624
+
625
+   # We do want to make sure this is all owned by our user.
626
+   sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node
627
+   sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION}
628
+
629
+   # swift-init has a bug using /etc/swift until bug #885595 is fixed
630
+   # we have to create a link
631
+   sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
632
+
633
+   # Swift use rsync to syncronize between all the different
634
+   # partitions (which make more sense when you have a multi-node
635
+   # setup) we configure it with our version of rsync.
636
+   sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
637
+   sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
638
+
639
+   # By default Swift will be installed with the tempauth middleware
640
+   # which has some default username and password if you have
641
+   # configured keystone it will checkout the directory.
642
+   if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
643
+       swift_auth_server=keystone
644
+       # We need a special version of bin/swift which understand the
645
+       # OpenStack api 2.0, we download it until this is getting
646
+       # integrated in swift.
647
+       sudo curl -s -o/usr/local/bin/swift \
648
+           'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e'
649
+   else
650
+       swift_auth_server=tempauth
651
+   fi
652
+
653
+   # We do the install of the proxy-server and swift configuration
654
+   # replacing a few directives to match our configuration.
655
+   sed "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \
656
+       $FILES/swift/proxy-server.conf|sudo tee  ${SWIFT_CONFIG_LOCATION}/proxy-server.conf
657
+
658
+   sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf
659
+
660
+   # We need to generate a object/account/proxy configuration
661
+   # emulating 4 nodes on different ports we have a little function
662
+   # that help us doing that.
663
+   function generate_swift_configuration() {
664
+       local server_type=$1
665
+       local bind_port=$2
666
+       local log_facility=$3
667
+       local node_number
668
+
669
+       for node_number in {1..4};do
670
+           node_path=${SWIFT_DATA_LOCATION}/${node_number}
671
+           sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
672
+               $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf
673
+           bind_port=$(( ${bind_port} + 10 ))
674
+           log_facility=$(( ${log_facility} + 1 ))
675
+       done
676
+   }
677
+   generate_swift_configuration object 6010 2
678
+   generate_swift_configuration container 6011 2
679
+   generate_swift_configuration account 6012 2
680
+
681
+   # We create two helper scripts :
682
+   #
683
+   # - swift-remakerings
684
+   #   Allow to recreate rings from scratch.
685
+   # - swift-startmain
686
+   #   Restart your full cluster.
687
+   #
688
+   sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \
689
+       sudo tee /usr/local/bin/swift-remakerings
690
+   sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/
691
+   sudo chmod +x /usr/local/bin/swift-*
692
+
693
+   # We then can start rsync.
694
+   sudo /etc/init.d/rsync restart || :
695
+
696
+   # Create our ring for the object/container/account.
697
+   /usr/local/bin/swift-remakerings
698
+
699
+   # And now we launch swift-startmain to get our cluster running
700
+   # ready to be tested.
701
+   /usr/local/bin/swift-startmain || :
702
+
703
+   unset s swift_hash swift_auth_server tmpd
704
+fi
705
+
583 706
 # Volume Service
584 707
 # --------------
585 708
 
... ...
@@ -593,7 +779,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
593 593
     # By default, the backing file is 2G in size, and is stored in /opt/stack.
594 594
     #
595 595
     if ! sudo vgdisplay | grep -q nova-volumes; then
596
-        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-/opt/stack/nova-volumes-backing-file}
596
+        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
597 597
         VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
598 598
         truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
599 599
         DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
... ...
@@ -616,8 +802,16 @@ add_nova_flag "--nodaemon"
616 616
 add_nova_flag "--allow_admin_api"
617 617
 add_nova_flag "--scheduler_driver=$SCHEDULER"
618 618
 add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf"
619
-add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
620 619
 add_nova_flag "--fixed_range=$FIXED_RANGE"
620
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
621
+    add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager"
622
+    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
623
+        add_nova_flag "--libvirt_vif_type=ethernet"
624
+        add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
625
+    fi
626
+else
627
+    add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
628
+fi
621 629
 add_nova_flag "--my_ip=$HOST_IP"
622 630
 add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
623 631
 add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
... ...
@@ -632,15 +826,16 @@ add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
632 632
 add_nova_flag "--rabbit_host=$RABBIT_HOST"
633 633
 add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
634 634
 add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
635
+add_nova_flag "--force_dhcp_release"
635 636
 if [ -n "$INSTANCES_PATH" ]; then
636 637
     add_nova_flag "--instances_path=$INSTANCES_PATH"
637 638
 fi
638
-if [ -n "$MULTI_HOST" ]; then
639
-    add_nova_flag "--multi_host=$MULTI_HOST"
640
-    add_nova_flag "--send_arp_for_ha=1"
639
+if [ "$MULTI_HOST" != "False" ]; then
640
+    add_nova_flag "--multi_host"
641
+    add_nova_flag "--send_arp_for_ha"
641 642
 fi
642 643
 if [ "$SYSLOG" != "False" ]; then
643
-    add_nova_flag "--use_syslog=1"
644
+    add_nova_flag "--use_syslog"
644 645
 fi
645 646
 
646 647
 # XenServer
... ...
@@ -676,12 +871,6 @@ if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
676 676
 
677 677
     # (re)create nova database
678 678
     $NOVA_DIR/bin/nova-manage db sync
679
-
680
-    # create a small network
681
-    $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE
682
-
683
-    # create some floating ips
684
-    $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
685 679
 fi
686 680
 
687 681
 
... ...
@@ -721,16 +910,20 @@ fi
721 721
 function screen_it {
722 722
     NL=`echo -ne '\015'`
723 723
     if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
724
-        screen -S nova -X screen -t $1
725
-        screen -S nova -p $1 -X stuff "$2$NL"
724
+        screen -S stack -X screen -t $1
725
+        # sleep to allow bash to be ready to be send the command - we are
726
+        # creating a new window in screen and then sends characters, so if
727
+        # bash isn't running by the time we send the command, nothing happens
728
+        sleep 1
729
+        screen -S stack -p $1 -X stuff "$2$NL"
726 730
     fi
727 731
 }
728 732
 
729 733
 # create a new named screen to run processes in
730
-screen -d -m -S nova -t nova
734
+screen -d -m -S stack -t stack
731 735
 sleep 1
732 736
 
733
-# launch the glance registery service
737
+# launch the glance registry service
734 738
 if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
735 739
     screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
736 740
 fi
... ...
@@ -739,7 +932,7 @@ fi
739 739
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
740 740
     screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
741 741
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
742
-    if ! timeout 60 sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
742
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
743 743
       echo "g-api did not start"
744 744
       exit 1
745 745
     fi
... ...
@@ -749,7 +942,7 @@ fi
749 749
 if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
750 750
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d"
751 751
     echo "Waiting for keystone to start..."
752
-    if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
752
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
753 753
       echo "keystone did not start"
754 754
       exit 1
755 755
     fi
... ...
@@ -759,11 +952,62 @@ fi
759 759
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
760 760
     screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
761 761
     echo "Waiting for nova-api to start..."
762
-    if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
762
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
763 763
       echo "nova-api did not start"
764 764
       exit 1
765 765
     fi
766 766
 fi
767
+
768
+# Quantum
769
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
770
+    # Install deps
771
+    # FIXME add to files/apts/quantum, but don't install if not needed!
772
+    apt_get install openvswitch-switch openvswitch-datapath-dkms
773
+
774
+    # Create database for the plugin/agent
775
+    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
776
+        if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
777
+            mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
778
+        else
779
+            echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
780
+            exit 1
781
+        fi
782
+    fi
783
+
784
+    QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/quantum/plugins.ini
785
+    # Make sure we're using the openvswitch plugin
786
+    sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
787
+    screen_it q-svc "cd $QUANTUM_DIR && export PYTHONPATH=.:$PYTHONPATH; python $QUANTUM_DIR/bin/quantum $QUANTUM_DIR/etc/quantum.conf"
788
+fi
789
+
790
+# Quantum agent (for compute nodes)
791
+if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then
792
+    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
793
+        # Set up integration bridge
794
+        OVS_BRIDGE=${OVS_BRIDGE:-br-int}
795
+        sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
796
+        sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
797
+        sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
798
+    fi
799
+
800
+    # Start up the quantum <-> openvswitch agent
801
+    screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v"
802
+fi
803
+
804
+# If we're using Quantum (i.e. q-svc is enabled), network creation has to
805
+# happen after we've started the Quantum service.
806
+if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
807
+    # create a small network
808
+    $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE
809
+
810
+    if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
811
+        echo "Not creating floating IPs (not supported by QuantumManager)"
812
+    else
813
+        # create some floating ips
814
+        $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
815
+    fi
816
+fi
817
+
767 818
 # Launching nova-compute should be as simple as running ``nova-compute`` but
768 819
 # have to do a little more than that in our script.  Since we add the group
769 820
 # ``libvirtd`` to our user in this script, when nova-compute is run it is
... ...
@@ -787,7 +1031,7 @@ screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
787 787
 # TTY also uses cloud-init, supporting login via keypair and sending scripts as
788 788
 # userdata.  See https://help.ubuntu.com/community/CloudInit for more on cloud-init
789 789
 #
790
-# Override ``IMAGE_URLS`` with a comma-seperated list of uec images.
790
+# Override ``IMAGE_URLS`` with a comma-separated list of uec images.
791 791
 #
792 792
 #  * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz
793 793
 #  * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
... ...
@@ -843,6 +1087,10 @@ for ret in "${PIPESTATUS[@]}"; do [ $ret -eq 0 ] || exit $ret; done
843 843
 # Using the cloud
844 844
 # ===============
845 845
 
846
+echo ""
847
+echo ""
848
+echo ""
849
+
846 850
 # If you installed the horizon on this server, then you should be able
847 851
 # to access the site using your browser.
848 852
 if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then
... ...
@@ -2,6 +2,14 @@
2 2
 NOVA_REPO=https://github.com/cloudbuilders/nova.git
3 3
 NOVA_BRANCH=diablo
4 4
 
5
+# storage service
6
+SWIFT_REPO=https://github.com/openstack/swift.git
7
+SWIFT_BRANCH=stable/diablo
8
+
9
+# swift and keystone integration
10
+SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git
11
+SWIFT_KEYSTONE_BRANCH=master
12
+
5 13
 # image catalog service
6 14
 GLANCE_REPO=https://github.com/cloudbuilders/glance.git
7 15
 GLANCE_BRANCH=diablo
... ...
@@ -27,6 +35,10 @@ NOVACLIENT_BRANCH=master
27 27
 OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git
28 28
 OPENSTACKX_BRANCH=diablo
29 29
 
30
+# quantum service
31
+QUANTUM_REPO=https://github.com/openstack/quantum
32
+QUANTUM_BRANCH=stable/diablo
33
+
30 34
 # Specify a comma-separated list of uec images to download and install into glance.
31 35
 IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz
32 36
 
33 37
deleted file mode 100755
... ...
@@ -1,402 +0,0 @@
1
-#!/usr/bin/env bash
2
-
3
-# exit on error to stop unexpected errors
4
-set -o errexit
5
-
6
-# Make sure that we have the proper version of ubuntu
7
-UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'`
8
-if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then
9
-    if [ ! "natty" = "$UBUNTU_VERSION" ]; then
10
-        echo "This script only works with oneiric and natty"
11
-        exit 1
12
-    fi
13
-fi
14
-
15
-# Echo commands
16
-set -o xtrace
17
-
18
-# Keep track of the current directory
19
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
20
-TOP_DIR=$TOOLS_DIR/..
21
-
22
-# Where to store files and instances
23
-KVMSTACK_DIR=${KVMSTACK_DIR:-/opt/kvmstack}
24
-
25
-# Where to store images
26
-IMAGES_DIR=$KVMSTACK_DIR/images
27
-
28
-# Create images dir
29
-mkdir -p $IMAGES_DIR
30
-
31
-# Move to top devstack dir
32
-cd $TOP_DIR
33
-
34
-# Abort if localrc is not set
35
-if [ ! -e ./localrc ]; then
36
-    echo "You must have a localrc with ALL necessary passwords defined before proceeding."
37
-    echo "See stack.sh for required passwords."
38
-    exit 1
39
-fi
40
-
41
-# Source params
42
-source ./stackrc
43
-
44
-# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
45
-ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
46
-
47
-
48
-# Base image (natty by default)
49
-DIST_NAME=${DIST_NAME:-natty}
50
-IMAGE_FNAME=$DIST_NAME.raw
51
-
52
-# Name of our instance, used by libvirt
53
-GUEST_NAME=${GUEST_NAME:-kvmstack}
54
-
55
-# Original version of built image
56
-BASE_IMAGE=$KVMSTACK_DIR/images/$DIST_NAME.raw
57
-
58
-# Copy of base image, which we pre-install with tasty treats
59
-VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw
60
-
61
-# Mop up after previous runs
62
-virsh destroy $GUEST_NAME || true
63
-
64
-# Where this vm is stored
65
-VM_DIR=$KVMSTACK_DIR/instances/$GUEST_NAME
66
-
67
-# Create vm dir
68
-mkdir -p $VM_DIR
69
-
70
-# Mount point into copied base image
71
-COPY_DIR=$VM_DIR/copy
72
-mkdir -p $COPY_DIR
73
-
74
-# Create the base image if it does not yet exist
75
-if [ ! -e $IMAGES_DIR/$IMAGE_FNAME ]; then
76
-    cd $TOOLS_DIR
77
-    ./make_image.sh -m -r 5000  $DIST_NAME raw
78
-    mv $DIST_NAME.raw $BASE_IMAGE
79
-    cd $TOP_DIR
80
-fi
81
-
82
-# Create a copy of the base image
83
-if [ ! -e $VM_IMAGE ]; then
84
-    cp -p $BASE_IMAGE $VM_IMAGE
85
-fi
86
-
87
-# Unmount the copied base image
88
-function unmount_images() {
89
-    # unmount the filesystem
90
-    while df | grep -q $COPY_DIR; do
91
-        umount $COPY_DIR || echo 'ok'
92
-        sleep 1
93
-    done
94
-}
95
-
96
-# Unmount from failed runs
97
-unmount_images
98
-
99
-# Ctrl-c catcher
100
-function kill_unmount() {
101
-    unmount_images
102
-    exit 1
103
-}
104
-
105
-# Install deps if needed
106
-dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx
107
-
108
-# Let Ctrl-c kill tail and exit
109
-trap kill_unmount SIGINT
110
-
111
-# Where Openstack code will live in image
112
-DEST=${DEST:-/opt/stack}
113
-
114
-# Mount the file system
115
-mount -o loop,offset=32256 $VM_IMAGE  $COPY_DIR
116
-
117
-# git clone only if directory doesn't exist already.  Since ``DEST`` might not
118
-# be owned by the installation user, we create the directory and change the
119
-# ownership to the proper user.
120
-function git_clone {
121
-    if [ ! -d $2 ]; then
122
-        sudo mkdir $2
123
-        sudo chown `whoami` $2
124
-        git clone $1 $2
125
-        cd $2
126
-        # This checkout syntax works for both branches and tags
127
-        git checkout $3
128
-    fi
129
-}
130
-
131
-# Make sure that base requirements are installed
132
-cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf
133
-chroot $COPY_DIR apt-get update
134
-chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
135
-chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server
136
-chroot $COPY_DIR pip install `cat files/pips/*`
137
-
138
-# Clean out code repos if directed to do so
139
-if [ "$CLEAN" = "1" ]; then
140
-    rm -rf $COPY_DIR/$DEST
141
-fi
142
-
143
-# Cache openstack code
144
-mkdir -p $COPY_DIR/$DEST
145
-git_clone $NOVA_REPO $COPY_DIR/$DEST/nova $NOVA_BRANCH
146
-git_clone $GLANCE_REPO $COPY_DIR/$DEST/glance $GLANCE_BRANCH
147
-git_clone $KEYSTONE_REPO $COPY_DIR/$DESTkeystone $KEYSTONE_BRANCH
148
-git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
149
-git_clone $HORIZON_REPO $COPY_DIR/$DEST/horizon $HORIZON_BRANCH $HORIZON_TAG
150
-git_clone $NOVACLIENT_REPO $COPY_DIR/$DEST/python-novaclient $NOVACLIENT_BRANCH
151
-git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH
152
-git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH
153
-git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
154
-
155
-# Back to devstack
156
-cd $TOP_DIR
157
-
158
-# Unmount the filesystems
159
-unmount_images
160
-
161
-# Network configuration variables
162
-BRIDGE=${BRIDGE:-br0}
163
-GUEST_IP=${GUEST_IP:-192.168.1.50}
164
-GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
165
-GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
166
-GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1}
167
-GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $GUEST_IP | sed "s/.*\.//")`"}
168
-GUEST_RAM=${GUEST_RAM:-1524288}
169
-GUEST_CORES=${GUEST_CORES:-1}
170
-
171
-# libvirt.xml configuration
172
-LIBVIRT_XML=$VM_DIR/libvirt.xml
173
-cat > $LIBVIRT_XML <<EOF
174
-<domain type='kvm'>
175
-    <name>$GUEST_NAME</name>
176
-    <memory>$GUEST_RAM</memory>
177
-    <os>
178
-        <type>hvm</type>
179
-        <bootmenu enable='yes'/>
180
-    </os>
181
-    <features>
182
-        <acpi/>
183
-    </features>
184
-    <vcpu>$GUEST_CORES</vcpu>
185
-    <devices>
186
-        <disk type='file'>
187
-            <driver type='qcow2'/>
188
-            <source file='$VM_DIR/disk'/>
189
-            <target dev='vda' bus='virtio'/>
190
-        </disk>
191
-
192
-        <interface type='bridge'>
193
-            <source bridge='$BRIDGE'/>
194
-            <mac address='$GUEST_MAC'/>
195
-        </interface>
196
-
197
-        <!-- The order is significant here.  File must be defined first -->
198
-        <serial type="file">
199
-            <source path='$VM_DIR/console.log'/>
200
-            <target port='1'/>
201
-        </serial>
202
-
203
-        <console type='pty' tty='/dev/pts/2'>
204
-            <source path='/dev/pts/2'/>
205
-            <target port='0'/>
206
-        </console>
207
-
208
-        <serial type='pty'>
209
-            <source path='/dev/pts/2'/>
210
-            <target port='0'/>
211
-        </serial>
212
-
213
-        <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
214
-    </devices>
215
-</domain>
216
-EOF
217
-
218
-# Mount point for instance fs
219
-ROOTFS=$VM_DIR/root
220
-mkdir -p $ROOTFS
221
-
222
-# Make sure we have nbd-ness
223
-modprobe nbd max_part=63
224
-
225
-# Which NBD device to use?
226
-NBD=${NBD:-/dev/nbd5}
227
-
228
-# Clean up from previous runs
229
-umount $ROOTFS || echo 'ok'
230
-qemu-nbd -d $NBD || echo 'ok'
231
-
232
-# Clean up old runs
233
-cd $VM_DIR
234
-rm -f $VM_DIR/disk
235
-
236
-# Create our instance fs
237
-qemu-img create -f qcow2 -b $VM_IMAGE disk
238
-
239
-# Connect our nbd and wait till it is mountable
240
-qemu-nbd -c $NBD disk
241
-NBD_DEV=`basename $NBD`
242
-if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
243
-    echo "Couldn't connect $NBD"
244
-    exit 1
245
-fi
246
-
247
-# Mount the instance
248
-mount $NBD $ROOTFS -o offset=32256 -t ext4
249
-
250
-# Configure instance network
251
-INTERFACES=$ROOTFS/etc/network/interfaces
252
-cat > $INTERFACES <<EOF
253
-auto lo
254
-iface lo inet loopback
255
-
256
-auto eth0
257
-iface eth0 inet static
258
-        address $GUEST_IP
259
-        netmask $GUEST_NETMASK
260
-        gateway $GUEST_GATEWAY
261
-EOF
262
-
263
-# User configuration for the instance
264
-chroot $ROOTFS groupadd libvirtd || true
265
-chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd
266
-cp -pr $TOOLS_DIR/.. $ROOTFS/$DEST/devstack
267
-echo "root:$ROOT_PASSWORD" | chroot $ROOTFS chpasswd
268
-echo "stack:pass" | chroot $ROOTFS chpasswd
269
-echo "stack ALL=(ALL) NOPASSWD: ALL" >> $ROOTFS/etc/sudoers
270
-
271
-# Gracefully cp only if source file/dir exists
272
-function cp_it {
273
-    if [ -e $1 ] || [ -d $1 ]; then
274
-        cp -pRL $1 $2
275
-    fi
276
-}
277
-
278
-# Copy over your ssh keys and env if desired
279
-COPYENV=${COPYENV:-1}
280
-if [ "$COPYENV" = "1" ]; then
281
-    cp_it ~/.ssh $ROOTFS/$DEST/.ssh
282
-    cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys
283
-    cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig
284
-    cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc
285
-    cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc
286
-fi
287
-
288
-# pre-cache uec images
289
-for image_url in ${IMAGE_URLS//,/ }; do
290
-    IMAGE_FNAME=`basename "$image_url"`
291
-    if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then
292
-        wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME
293
-    fi
294
-    cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files
295
-done
296
-
297
-# Configure the runner
298
-RUN_SH=$ROOTFS/$DEST/run.sh
299
-cat > $RUN_SH <<EOF
300
-#!/usr/bin/env bash
301
-
302
-# Kill any existing screens
303
-killall screen
304
-
305
-# Install and run stack.sh
306
-sudo apt-get update
307
-sudo apt-get -y --force-yes install git-core vim-nox sudo
308
-if [ ! -d "$DEST/devstack" ]; then
309
-    git clone git://github.com/cloudbuilders/devstack.git $DEST/devstack
310
-fi
311
-cd $DEST/devstack && $STACKSH_PARAMS FORCE=yes ./stack.sh > /$DEST/run.sh.log
312
-echo >> /$DEST/run.sh.log
313
-echo >> /$DEST/run.sh.log
314
-echo "All done! Time to start clicking." >> /$DEST/run.sh.log
315
-cat $DEST/run.sh.log
316
-EOF
317
-chmod 755 $RUN_SH
318
-
319
-# Make runner launch on boot
320
-RC_LOCAL=$ROOTFS/etc/init.d/local
321
-cat > $RC_LOCAL <<EOF
322
-#!/bin/sh -e
323
-# Reboot if this is our first run to enable console log on $DIST_NAME :(
324
-if [ ! -e /root/firstlaunch ]; then
325
-    touch /root/firstlaunch
326
-    reboot -f
327
-    exit 0
328
-fi
329
-su -c "$DEST/run.sh" stack
330
-EOF
331
-chmod +x $RC_LOCAL
332
-chroot $ROOTFS sudo update-rc.d local defaults 80
333
-
334
-# Make our ip address hostnames look nice at the command prompt
335
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc
336
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile
337
-
338
-# Give stack ownership over $DEST so it may do the work needed
339
-chroot $ROOTFS chown -R stack $DEST
340
-
341
-# Change boot params so that we get a console log
342
-sudo sed -e "s/quiet splash/splash console=ttyS0 console=ttyS1,19200n8/g" -i $ROOTFS/boot/grub/menu.lst
343
-sudo sed -e "s/^hiddenmenu//g" -i $ROOTFS/boot/grub/menu.lst
344
-
345
-# Set the hostname
346
-echo $GUEST_NAME > $ROOTFS/etc/hostname
347
-
348
-# We need the hostname to resolve for rabbit to launch
349
-if ! grep -q $GUEST_NAME $ROOTFS/etc/hosts; then
350
-    echo "$GUEST_IP $GUEST_NAME" >> $ROOTFS/etc/hosts
351
-fi
352
-
353
-# Unmount
354
-umount $ROOTFS || echo 'ok'
355
-qemu-nbd -d $NBD
356
-
357
-# Create the instance
358
-cd $VM_DIR && virsh create libvirt.xml
359
-
360
-# Tail the console log till we are done
361
-WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
362
-if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
363
-    # Done creating the container, let's tail the log
364
-    echo
365
-    echo "============================================================="
366
-    echo "                          -- YAY! --"
367
-    echo "============================================================="
368
-    echo
369
-    echo "We're done launching the vm, about to start tailing the"
370
-    echo "stack.sh log. It will take a second or two to start."
371
-    echo
372
-    echo "Just CTRL-C at any time to stop tailing."
373
-
374
-    while [ ! -e "$VM_DIR/console.log" ]; do
375
-      sleep 1
376
-    done
377
-
378
-    tail -F $VM_DIR/console.log &
379
-
380
-    TAIL_PID=$!
381
-
382
-    function kill_tail() {
383
-        kill $TAIL_PID
384
-        exit 1
385
-    }
386
-
387
-    # Let Ctrl-c kill tail and exit
388
-    trap kill_tail SIGINT
389
-
390
-    echo "Waiting stack.sh to finish..."
391
-    while ! cat $VM_DIR/console.log | grep -q 'All done' ; do
392
-        sleep 5
393
-    done
394
-
395
-    kill $TAIL_PID
396
-
397
-    if grep -q "stack.sh failed" $VM_DIR/console.log; then
398
-        exit 1
399
-    fi
400
-    echo ""
401
-    echo "Finished - Zip-a-dee Doo-dah!"
402
-fi
... ...
@@ -12,6 +12,27 @@ if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then
12 12
     fi
13 13
 fi
14 14
 
15
+# Clean up any resources that may be in use
16
+cleanup() {
17
+    set +o errexit
18
+    unmount_images
19
+
20
+    if [ -n "$ROOTFS" ]; then
21
+        umount $ROOTFS/dev
22
+        umount $ROOTFS
23
+    fi
24
+
25
+    # Release NBD devices
26
+    if [ -n "$NBD" ]; then
27
+        qemu-nbd -d $NBD
28
+    fi
29
+
30
+    # Kill ourselves to signal any calling process
31
+    trap 2; kill -2 $$
32
+}
33
+
34
+trap cleanup SIGHUP SIGINT SIGTERM
35
+
15 36
 # Echo commands
16 37
 set -o xtrace
17 38
 
... ...
@@ -100,9 +121,6 @@ function kill_unmount() {
100 100
 # Install deps if needed
101 101
 dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx
102 102
 
103
-# Let Ctrl-c kill tail and exit
104
-trap kill_unmount SIGINT
105
-
106 103
 # Where Openstack code will live in image
107 104
 DEST=${DEST:-/opt/stack}
108 105
 
... ...
@@ -127,8 +145,8 @@ function git_clone {
127 127
 # Make sure that base requirements are installed
128 128
 cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf
129 129
 chroot $COPY_DIR apt-get update
130
-chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
131
-chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server
130
+chroot $COPY_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
131
+chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
132 132
 chroot $COPY_DIR pip install `cat files/pips/*`
133 133
 
134 134
 # Clean out code repos if directed to do so
... ...
@@ -156,6 +174,7 @@ unmount_images
156 156
 
157 157
 # Network configuration variables
158 158
 GUEST_NETWORK=${GUEST_NETWORK:-1}
159
+GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
159 160
 
160 161
 GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
161 162
 GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
... ...
@@ -176,8 +195,10 @@ cat > $NET_XML <<EOF
176 176
 </network>
177 177
 EOF
178 178
 
179
-virsh net-destroy devstack-$GUEST_NETWORK || true
180
-virsh net-create $VM_DIR/net.xml
179
+if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
180
+    virsh net-destroy devstack-$GUEST_NETWORK || true
181
+    virsh net-create $VM_DIR/net.xml
182
+fi
181 183
 
182 184
 # libvirt.xml configuration
183 185
 LIBVIRT_XML=$VM_DIR/libvirt.xml
... ...
@@ -239,26 +260,35 @@ rm -f $VM_DIR/disk
239 239
 # Create our instance fs
240 240
 qemu-img create -f qcow2 -b $VM_IMAGE disk
241 241
 
242
+# Finds the next available NBD device
243
+# Exits script if error connecting or none free
244
+# map_nbd image
245
+# returns full nbd device path
246
+function map_nbd {
247
+    for i in `seq 0 15`; do
248
+        if [ ! -e /sys/block/nbd$i/pid ]; then
249
+            NBD=/dev/nbd$i
250
+            # Connect to nbd and wait till it is ready
251
+            qemu-nbd -c $NBD $1
252
+            if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
253
+                echo "Couldn't connect $NBD"
254
+                exit 1
255
+            fi
256
+            break
257
+        fi
258
+    done
259
+    if [ -z "$NBD" ]; then
260
+        echo "No free NBD slots"
261
+        exit 1
262
+    fi
263
+    echo $NBD
264
+}
265
+
242 266
 # Make sure we have nbd-ness
243 267
 modprobe nbd max_part=63
244 268
 
245 269
 # Set up nbd
246
-for i in `seq 0 15`; do
247
-    if [ ! -e /sys/block/nbd$i/pid ]; then
248
-        NBD=/dev/nbd$i
249
-        # Connect to nbd and wait till it is ready
250
-        qemu-nbd -c $NBD disk
251
-        if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
252
-            echo "Couldn't connect $NBD"
253
-            exit 1
254
-        fi
255
-        break
256
-    fi
257
-done
258
-if [ -z "$NBD" ]; then
259
-    echo "No free NBD slots"
260
-    exit 1
261
-fi
270
+NBD=`map_nbd disk`
262 271
 NBD_DEV=`basename $NBD`
263 272
 
264 273
 # Mount the instance
... ...
@@ -381,7 +411,9 @@ sed -e 's/^PasswordAuthentication.*$/PasswordAuthentication yes/' -i $ROOTFS/etc
381 381
 
382 382
 # Unmount
383 383
 umount $ROOTFS || echo 'ok'
384
+ROOTFS=""
384 385
 qemu-nbd -d $NBD
386
+NBD=""
385 387
 
386 388
 # Create the instance
387 389
 cd $VM_DIR && virsh create libvirt.xml
... ...
@@ -125,8 +125,8 @@ fi
125 125
 
126 126
 # Make sure that base requirements are installed
127 127
 chroot $CACHEDIR apt-get update
128
-chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
129
-chroot $CACHEDIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server
128
+chroot $CACHEDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
129
+chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
130 130
 chroot $CACHEDIR pip install `cat files/pips/*`
131 131
 
132 132
 # Clean out code repos if directed to do so
... ...
@@ -32,8 +32,9 @@ fi
32 32
 # prime natty with as many apt/pips as we can
33 33
 if [ ! -d $CHROOTCACHE/natty-dev ]; then
34 34
     rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/
35
-    chroot $CHROOTCACHE/natty-dev apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
36
-    chroot $CHROOTCACHE/natty-dev pip install `cat files/pips/*`
35
+    chroot $CHROOTCACHE apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
36
+    chroot $CHROOTCACHE apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
37
+    chroot $CHROOTCACHE pip install `cat files/pips/*`
37 38
 
38 39
     # Create a stack user that is a member of the libvirtd group so that stack
39 40
     # is able to interact with libvirt.
... ...
@@ -11,6 +11,22 @@ PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
11 11
 OPWD=`pwd`
12 12
 PROGDIR=`dirname $0`
13 13
 
14
+# Clean up any resources that may be in use
15
+cleanup() {
16
+    set +o errexit
17
+
18
+    # Mop up temporary files
19
+    if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
20
+        umount $MNTDIR
21
+        rmdir $MNTDIR
22
+    fi
23
+
24
+    # Kill ourselves to signal any calling process
25
+    trap 2; kill -2 $$
26
+}
27
+
28
+trap cleanup SIGHUP SIGINT SIGTERM
29
+
14 30
 mkdir -p $DEST_DIR/pxelinux.cfg
15 31
 cd $DEST_DIR
16 32
 for i in memdisk menu.c32 pxelinux.0; do
... ...
@@ -1,60 +1,120 @@
1 1
 #!/bin/bash
2 2
 # build_ramdisk.sh - Build RAM disk images
3 3
 
4
+# exit on error to stop unexpected errors
5
+set -o errexit
6
+
4 7
 if [ ! "$#" -eq "1" ]; then
5
-    echo "$0 builds a gziped natty openstack install"
8
+    echo "$0 builds a gziped Ubuntu OpenStack install"
6 9
     echo "usage: $0 dest"
7 10
     exit 1
8 11
 fi
9 12
 
13
+# Clean up any resources that may be in use
14
+cleanup() {
15
+    set +o errexit
16
+
17
+    # Mop up temporary files
18
+    if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
19
+        umount $MNTDIR
20
+        rmdir $MNTDIR
21
+    fi
22
+    if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP "]; then
23
+        rm -f $DEV_FILE_TMP
24
+    fi
25
+    if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then
26
+        rm -f $IMG_FILE_TMP
27
+    fi
28
+
29
+    # Release NBD devices
30
+    if [ -n "$NBD" ]; then
31
+        qemu-nbd -d $NBD
32
+    fi
33
+
34
+    # Kill ourselves to signal any calling process
35
+    trap 2; kill -2 $$
36
+}
37
+
38
+trap cleanup SIGHUP SIGINT SIGTERM
39
+
40
+# Set up nbd
41
+modprobe nbd max_part=63
42
+
43
+# Echo commands
44
+set -o xtrace
45
+
10 46
 IMG_FILE=$1
11 47
 
12
-PROGDIR=`dirname $0`
13
-CHROOTCACHE=${CHROOTCACHE:-/var/cache/devstack}
48
+# Keep track of the current directory
49
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
50
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
51
+
52
+# Store cwd
53
+CWD=`pwd`
54
+
55
+cd $TOP_DIR
14 56
 
15 57
 # Source params
16 58
 source ./stackrc
17 59
 
18
-# Store cwd
19
-CWD=`pwd`
60
+CACHEDIR=${CACHEDIR:-/var/cache/devstack}
20 61
 
21 62
 DEST=${DEST:-/opt/stack}
22 63
 
64
+# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
65
+ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
66
+
67
+# Base image (natty by default)
68
+DIST_NAME=${DIST_NAME:-natty}
69
+
23 70
 # Param string to pass to stack.sh.  Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
24 71
 STACKSH_PARAMS=${STACKSH_PARAMS:-}
25 72
 
26 73
 # Option to use the version of devstack on which we are currently working
27 74
 USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
28 75
 
29
-# Set up nbd
30
-modprobe nbd max_part=63
31
-NBD=${NBD:-/dev/nbd9}
32
-NBD_DEV=`basename $NBD`
33
-
34
-# clean install of natty
35
-if [ ! -r $CHROOTCACHE/natty-base.img ]; then
36
-    $PROGDIR/get_uec_image.sh natty $CHROOTCACHE/natty-base.img
37
-#    # copy kernel modules...
38
-#    # NOTE(ja): is there a better way to do this?
39
-#    cp -pr /lib/modules/`uname -r` $CHROOTCACHE/natty-base/lib/modules
40
-#    # a simple password - pass
41
-#    echo root:pass | chroot $CHROOTCACHE/natty-base chpasswd
76
+# clean install
77
+if [ ! -r $CACHEDIR/$DIST_NAME-base.img ]; then
78
+    $TOOLS_DIR/get_uec_image.sh $DIST_NAME $CACHEDIR/$DIST_NAME-base.img
42 79
 fi
43 80
 
44
-# prime natty with as many apt/pips as we can
45
-if [ ! -r $CHROOTCACHE/natty-dev.img ]; then
46
-    cp -p $CHROOTCACHE/natty-base.img $CHROOTCACHE/natty-dev.img
47
-
48
-    qemu-nbd -c $NBD $CHROOTCACHE/natty-dev.img
49
-    if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
50
-        echo "Couldn't connect $NBD"
81
+# Finds the next available NBD device
82
+# Exits script if error connecting or none free
83
+# map_nbd image
84
+# returns full nbd device path
85
+function map_nbd {
86
+    for i in `seq 0 15`; do
87
+        if [ ! -e /sys/block/nbd$i/pid ]; then
88
+            NBD=/dev/nbd$i
89
+            # Connect to nbd and wait till it is ready
90
+            qemu-nbd -c $NBD $1
91
+            if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
92
+                echo "Couldn't connect $NBD"
93
+                exit 1
94
+            fi
95
+            break
96
+        fi
97
+    done
98
+    if [ -z "$NBD" ]; then
99
+        echo "No free NBD slots"
51 100
         exit 1
52 101
     fi
102
+    echo $NBD
103
+}
104
+
105
+# prime image with as many apt/pips as we can
106
+DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img
107
+DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX`
108
+if [ ! -r $DEV_FILE ]; then
109
+    cp -p $CACHEDIR/$DIST_NAME-base.img $DEV_FILE_TMP
110
+
111
+    NBD=`map_nbd $DEV_FILE_TMP`
53 112
     MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
54 113
     mount -t ext4 ${NBD}p1 $MNTDIR
55 114
     cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf
56 115
 
57
-    chroot $MNTDIR apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
116
+    chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
117
+    chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
58 118
     chroot $MNTDIR pip install `cat files/pips/*`
59 119
 
60 120
     # Create a stack user that is a member of the libvirtd group so that stack
... ...
@@ -66,6 +126,7 @@ if [ ! -r $CHROOTCACHE/natty-dev.img ]; then
66 66
 
67 67
     # a simple password - pass
68 68
     echo stack:pass | chroot $MNTDIR chpasswd
69
+    echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd
69 70
 
70 71
     # and has sudo ability (in the future this should be limited to only what
71 72
     # stack requires)
... ...
@@ -74,27 +135,31 @@ if [ ! -r $CHROOTCACHE/natty-dev.img ]; then
74 74
     umount $MNTDIR
75 75
     rmdir $MNTDIR
76 76
     qemu-nbd -d $NBD
77
+    NBD=""
78
+    mv $DEV_FILE_TMP $DEV_FILE
77 79
 fi
80
+rm -f $DEV_FILE_TMP
78 81
 
79 82
 # clone git repositories onto the system
80 83
 # ======================================
81 84
 
85
+IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX`
86
+
82 87
 if [ ! -r $IMG_FILE ]; then
83
-    qemu-nbd -c $NBD $CHROOTCACHE/natty-dev.img
84
-    if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
85
-        echo "Couldn't connect $NBD"
86
-        exit 1
87
-    fi
88
+    NBD=`map_nbd $DEV_FILE`
88 89
 
89 90
     # Pre-create the image file
90 91
     # FIXME(dt): This should really get the partition size to
91 92
     #            pre-create the image file
92
-    dd if=/dev/zero of=$IMG_FILE bs=1 count=1 seek=$((2*1024*1024*1024))
93
+    dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024))
93 94
     # Create filesystem image for RAM disk
94
-    dd if=${NBD}p1 of=$IMG_FILE bs=1M
95
+    dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M
95 96
 
96 97
     qemu-nbd -d $NBD
98
+    NBD=""
99
+    mv $IMG_FILE_TMP $IMG_FILE
97 100
 fi
101
+rm -f $IMG_FILE_TMP
98 102
 
99 103
 MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
100 104
 mount -t ext4 -o loop $IMG_FILE $MNTDIR
101 105
new file mode 100755
... ...
@@ -0,0 +1,248 @@
0
+#!/usr/bin/env bash
1
+
2
+# Make sure that we have the proper version of ubuntu (only works on oneiric)
3
+if ! egrep -q "oneiric" /etc/lsb-release; then
4
+    echo "This script only works with ubuntu oneiric."
5
+    exit 1
6
+fi
7
+
8
+# Keep track of the current directory
9
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
10
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
11
+
12
+cd $TOP_DIR
13
+
14
+# Source params
15
+source ./stackrc
16
+
17
+# Ubuntu distro to install
18
+DIST_NAME=${DIST_NAME:-oneiric}
19
+
20
+# Configure how large the VM should be
21
+GUEST_SIZE=${GUEST_SIZE:-10G}
22
+
23
+# exit on error to stop unexpected errors
24
+set -o errexit
25
+set -o xtrace
26
+
27
+# Abort if localrc is not set
28
+if [ ! -e $TOP_DIR/localrc ]; then
29
+    echo "You must have a localrc with ALL necessary passwords defined before proceeding."
30
+    echo "See stack.sh for required passwords."
31
+    exit 1
32
+fi
33
+
34
+# Install deps if needed
35
+DEPS="kvm libvirt-bin kpartx cloud-utils"
36
+dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
37
+
38
+# Where to store files and instances
39
+WORK_DIR=${WORK_DIR:-/opt/kvmstack}
40
+
41
+# Where to store images
42
+image_dir=$WORK_DIR/images/$DIST_NAME
43
+mkdir -p $image_dir
44
+
45
+# Original version of built image
46
+uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
47
+tarball=$image_dir/$(basename $uec_url)
48
+
49
+# download the base uec image if we haven't already
50
+if [ ! -f $tarball ]; then
51
+    curl $uec_url -o $tarball
52
+    (cd $image_dir && tar -Sxvzf $tarball)
53
+    resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
54
+    cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
55
+fi
56
+
57
+
58
+# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
59
+ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
60
+
61
+# Name of our instance, used by libvirt
62
+GUEST_NAME=${GUEST_NAME:-devstack}
63
+
64
+# Mop up after previous runs
65
+virsh destroy $GUEST_NAME || true
66
+
67
+# Where this vm is stored
68
+vm_dir=$WORK_DIR/instances/$GUEST_NAME
69
+
70
+# Create vm dir and remove old disk
71
+mkdir -p $vm_dir
72
+rm -f $vm_dir/disk
73
+
74
+# Create a copy of the base image
75
+qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
76
+
77
+# Back to devstack
78
+cd $TOP_DIR
79
+
80
+GUEST_NETWORK=${GUEST_NETWORK:-1}
81
+GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
82
+GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
83
+GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
84
+GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
85
+GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
86
+GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
87
+GUEST_RAM=${GUEST_RAM:-1524288}
88
+GUEST_CORES=${GUEST_CORES:-1}
89
+
90
+# libvirt.xml configuration
91
+NET_XML=$vm_dir/net.xml
92
+cat > $NET_XML <<EOF
93
+<network>
94
+  <name>devstack-$GUEST_NETWORK</name>
95
+  <bridge name="stackbr%d" />
96
+  <forward/>
97
+  <ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
98
+    <dhcp>
99
+      <range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
100
+    </dhcp>
101
+  </ip>
102
+</network>
103
+EOF
104
+
105
+if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
106
+    virsh net-destroy devstack-$GUEST_NETWORK || true
107
+    # destroying the network isn't enough to delete the leases
108
+    rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
109
+    virsh net-create $vm_dir/net.xml
110
+fi
111
+
112
+# libvirt.xml configuration
113
+LIBVIRT_XML=$vm_dir/libvirt.xml
114
+cat > $LIBVIRT_XML <<EOF
115
+<domain type='kvm'>
116
+  <name>$GUEST_NAME</name>
117
+  <memory>$GUEST_RAM</memory>
118
+  <os>
119
+    <type>hvm</type>
120
+    <kernel>$image_dir/kernel</kernel>
121
+    <cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
122
+  </os>
123
+  <features>
124
+    <acpi/>
125
+  </features>
126
+  <clock offset='utc'/>
127
+  <vcpu>$GUEST_CORES</vcpu>
128
+  <devices>
129
+    <disk type='file'>
130
+      <driver type='qcow2'/>
131
+      <source file='$vm_dir/disk'/>
132
+      <target dev='vda' bus='virtio'/>
133
+    </disk>
134
+
135
+    <interface type='network'>
136
+      <source network='devstack-$GUEST_NETWORK'/>
137
+    </interface>
138
+        
139
+    <!-- The order is significant here.  File must be defined first -->
140
+    <serial type="file">
141
+      <source path='$vm_dir/console.log'/>
142
+      <target port='1'/>
143
+    </serial>
144
+
145
+    <console type='pty' tty='/dev/pts/2'>
146
+      <source path='/dev/pts/2'/>
147
+      <target port='0'/>
148
+    </console>
149
+
150
+    <serial type='pty'>
151
+      <source path='/dev/pts/2'/>
152
+      <target port='0'/>
153
+    </serial>
154
+
155
+    <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
156
+  </devices>
157
+</domain>
158
+EOF
159
+
160
+
161
+rm -rf $vm_dir/uec
162
+cp -r $TOOLS_DIR/uec $vm_dir/uec
163
+
164
+# set metadata
165
+cat > $vm_dir/uec/meta-data<<EOF
166
+hostname: $GUEST_NAME
167
+instance-id: i-hop
168
+instance-type: m1.ignore
169
+local-hostname: $GUEST_NAME.local
170
+EOF
171
+
172
+# set metadata
173
+cat > $vm_dir/uec/user-data<<EOF
174
+#!/bin/bash
175
+# hostname needs to resolve for rabbit
176
+sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
177
+apt-get update
178
+apt-get install git sudo -y
179
+git clone https://github.com/cloudbuilders/devstack.git
180
+cd devstack
181
+git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
182
+git fetch
183
+git checkout `git rev-parse HEAD`
184
+cat > localrc <<LOCAL_EOF
185
+ROOTSLEEP=0
186
+`cat $TOP_DIR/localrc`
187
+LOCAL_EOF
188
+./stack.sh
189
+EOF
190
+
191
+# (re)start a metadata service
192
+(
193
+  pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
194
+  [ -z "$pid" ] || kill -9 $pid
195
+)
196
+cd $vm_dir/uec
197
+python meta.py 192.168.$GUEST_NETWORK.1:4567 &
198
+
199
+# Create the instance
200
+virsh create $vm_dir/libvirt.xml
201
+
202
+# Tail the console log till we are done
203
+WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
204
+if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
205
+    set +o xtrace
206
+    # Done creating the container, let's tail the log
207
+    echo
208
+    echo "============================================================="
209
+    echo "                          -- YAY! --"
210
+    echo "============================================================="
211
+    echo
212
+    echo "We're done launching the vm, about to start tailing the"
213
+    echo "stack.sh log. It will take a second or two to start."
214
+    echo
215
+    echo "Just CTRL-C at any time to stop tailing."
216
+
217
+    while [ ! -e "$vm_dir/console.log" ]; do
218
+      sleep 1
219
+    done
220
+
221
+    tail -F $vm_dir/console.log &
222
+
223
+    TAIL_PID=$!
224
+
225
+    function kill_tail() {
226
+        kill $TAIL_PID
227
+        exit 1
228
+    }
229
+
230
+    # Let Ctrl-c kill tail and exit
231
+    trap kill_tail SIGINT
232
+
233
+    echo "Waiting stack.sh to finish..."
234
+    while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
235
+        sleep 1
236
+    done
237
+
238
+    set -o xtrace
239
+
240
+    kill $TAIL_PID
241
+
242
+    if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
243
+        exit 1
244
+    fi
245
+    echo ""
246
+    echo "Finished - Zip-a-dee Doo-dah!"
247
+fi
... ...
@@ -11,6 +11,26 @@ PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
11 11
 OPWD=`pwd`
12 12
 PROGDIR=`dirname $0`
13 13
 
14
+# Clean up any resources that may be in use
15
+cleanup() {
16
+    set +o errexit
17
+
18
+    # Mop up temporary files
19
+    if [ -n "$DEST_DEV" ]; then
20
+        umount $DEST_DIR
21
+        rmdir $DEST_DIR
22
+    fi
23
+    if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
24
+        umount $MNTDIR
25
+        rmdir $MNTDIR
26
+    fi
27
+
28
+    # Kill ourselves to signal any calling process
29
+    trap 2; kill -2 $$
30
+}
31
+
32
+trap cleanup SIGHUP SIGINT SIGTERM
33
+
14 34
 if [ -b $DEST_DIR ]; then
15 35
     # We have a block device, install syslinux and mount it
16 36
     DEST_DEV=$DEST_DIR
... ...
@@ -14,6 +14,9 @@ MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
14 14
 TOOLS_DIR=$(cd $(dirname "$0") && pwd)
15 15
 TOP_DIR=`cd $TOOLS_DIR/..; pwd`
16 16
 
17
+# exit on error to stop unexpected errors
18
+set -o errexit
19
+
17 20
 usage() {
18 21
     echo "Usage: $0 - Prepare Ubuntu images"
19 22
     echo ""
... ...
@@ -26,6 +29,32 @@ usage() {
26 26
     exit 1
27 27
 }
28 28
 
29
+# Clean up any resources that may be in use
30
+cleanup() {
31
+    set +o errexit
32
+
33
+    # Mop up temporary files
34
+    if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then
35
+        rm -f $IMG_FILE_TMP
36
+    fi
37
+
38
+    # Release NBD devices
39
+    if [ -n "$NBD" ]; then
40
+        qemu-nbd -d $NBD
41
+    fi
42
+
43
+    # Kill ourselves to signal any calling process
44
+    trap 2; kill -2 $$
45
+}
46
+
47
+# apt-get wrapper to just get arguments set correctly
48
+function apt_get() {
49
+    local sudo="sudo"
50
+    [ "$(id -u)" = "0" ] && sudo="env"
51
+    $sudo DEBIAN_FRONTEND=noninteractive apt-get \
52
+        --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
53
+}
54
+
29 55
 while getopts f:hmr: c; do
30 56
     case $c in
31 57
         f)  FORMAT=$OPTARG
... ...
@@ -89,11 +118,21 @@ case $DIST_NAME in
89 89
                 ;;
90 90
 esac
91 91
 
92
+trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT
93
+
94
+# Check for dependencies
95
+
96
+if [ ! -x "`which qemu-img`" -o ! -x "`which qemu-nbd`" ]; then
97
+    # Missing KVM?
98
+    apt_get install qemu-kvm
99
+fi
100
+
92 101
 # Prepare the base image
93 102
 
94 103
 # Get the UEC image
95 104
 UEC_NAME=$DIST_NAME-server-cloudimg-amd64
96 105
 if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then
106
+    mkdir -p $CACHEDIR
97 107
     (cd $CACHEDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img)
98 108
 fi
99 109
 
... ...
@@ -111,25 +150,33 @@ if [ $ROOTSIZE -gt 2000 ]; then
111 111
     qemu-img resize $IMG_FILE_TMP +$((ROOTSIZE - 2000))M
112 112
 fi
113 113
 
114
-# Set up nbd
115
-modprobe nbd max_part=63
116
-for i in `seq 1 15`; do
117
-    if [ ! -e /sys/block/nbd$i/pid ]; then
118
-        NBD=/dev/nbd$i
119
-        # Connect to nbd and wait till it is ready
120
-        qemu-nbd -c $NBD $IMG_FILE_TMP
121
-        if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
122
-            echo "Couldn't connect $NBD"
123
-            exit 1
114
+# Finds the next available NBD device
115
+# Exits script if error connecting or none free
116
+# map_nbd image
117
+# returns full nbd device path
118
+function map_nbd {
119
+    for i in `seq 0 15`; do
120
+        if [ ! -e /sys/block/nbd$i/pid ]; then
121
+            NBD=/dev/nbd$i
122
+            # Connect to nbd and wait till it is ready
123
+            qemu-nbd -c $NBD $1
124
+            if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
125
+                echo "Couldn't connect $NBD"
126
+                exit 1
127
+            fi
128
+            break
124 129
         fi
125
-        break
130
+    done
131
+    if [ -z "$NBD" ]; then
132
+        echo "No free NBD slots"
133
+        exit 1
126 134
     fi
127
-done
128
-if [ -z "$NBD" ]; then
129
-    echo "No free NBD slots"
130
-    exit 1
131
-fi
132
-NBD_DEV=`basename $NBD`
135
+    echo $NBD
136
+}
137
+
138
+# Set up nbd
139
+modprobe nbd max_part=63
140
+NBD=`map_nbd $IMG_FILE_TMP`
133 141
 
134 142
 # Resize partition 1 to full size of the disk image
135 143
 echo "d
... ...
@@ -162,5 +209,6 @@ rm -f $MNTDIR/etc/resolv.conf
162 162
 umount $MNTDIR
163 163
 rmdir $MNTDIR
164 164
 qemu-nbd -d $NBD
165
+NBD=""
165 166
 
166 167
 mv $IMG_FILE_TMP $IMG_FILE
167 168
new file mode 100755
... ...
@@ -0,0 +1,74 @@
0
+#!/usr/bin/env bash
1
+
2
+# Echo commands
3
+set -o xtrace
4
+
5
+# Exit on error to stop unexpected errors
6
+set -o errexit
7
+
8
+# Keep track of the current directory
9
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
10
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
11
+
12
+# Change dir to top of devstack
13
+cd $TOP_DIR
14
+
15
+# Echo usage
16
+usage() {
17
+    echo "Add stack user and keys"
18
+    echo ""
19
+    echo "Usage: $0 [full path to raw uec base image]"
20
+}
21
+
22
+# Make sure this is a raw image
23
+if ! qemu-img info $1 | grep -q "file format: raw"; then
24
+    usage
25
+    exit 1
26
+fi
27
+
28
+# Mount the image
29
+DEST=/opt/stack
30
+STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage.user
31
+mkdir -p $STAGING_DIR
32
+umount $STAGING_DIR || true
33
+sleep 1
34
+mount -t ext4 -o loop $1 $STAGING_DIR
35
+mkdir -p $STAGING_DIR/$DEST
36
+
37
+# Create a stack user that is a member of the libvirtd group so that stack
38
+# is able to interact with libvirt.
39
+chroot $STAGING_DIR groupadd libvirtd || true
40
+chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
41
+
42
+# Add a simple password - pass
43
+echo stack:pass | chroot $STAGING_DIR chpasswd
44
+
45
+# Configure sudo
46
+grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
47
+    echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
48
+cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
49
+sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
50
+
51
+# and has sudo ability (in the future this should be limited to only what
52
+# stack requires)
53
+echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
54
+
55
+# Gracefully cp only if source file/dir exists
56
+function cp_it {
57
+    if [ -e $1 ] || [ -d $1 ]; then
58
+        cp -pRL $1 $2
59
+    fi
60
+}
61
+
62
+# Copy over your ssh keys and env if desired
63
+cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
64
+cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
65
+cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
66
+cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
67
+cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
68
+
69
+# Give stack ownership over $DEST so it may do the work needed
70
+chroot $STAGING_DIR chown -R stack $DEST
71
+
72
+# Unmount
73
+umount $STAGING_DIR
0 74
new file mode 100644
... ...
@@ -0,0 +1,29 @@
0
+import sys
1
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
2
+from SimpleHTTPServer import SimpleHTTPRequestHandler
3
+
4
+def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
5
+         ServerClass = HTTPServer, protocol="HTTP/1.0"):
6
+    """simple http server that listens on a give address:port"""
7
+
8
+    server_address = (host, port)
9
+
10
+    HandlerClass.protocol_version = protocol
11
+    httpd = ServerClass(server_address, HandlerClass)
12
+
13
+    sa = httpd.socket.getsockname()
14
+    print "Serving HTTP on", sa[0], "port", sa[1], "..."
15
+    httpd.serve_forever()
16
+
17
+if __name__ == '__main__':
18
+    if sys.argv[1:]:
19
+        address = sys.argv[1]
20
+    else:
21
+        address = '0.0.0.0'
22
+    if ':' in address:
23
+        host, port = address.split(':')
24
+    else:
25
+        host = address
26
+        port = 8080
27
+
28
+    main(host, int(port))
0 29
new file mode 100755
... ...
@@ -0,0 +1,53 @@
0
+#!/usr/bin/env bash
1
+
2
+# Echo commands
3
+set -o xtrace
4
+
5
+# Exit on error to stop unexpected errors
6
+set -o errexit
7
+
8
+# Keep track of the current directory
9
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
10
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
11
+
12
+# Change dir to top of devstack
13
+cd $TOP_DIR
14
+
15
+# Echo usage
16
+usage() {
17
+    echo "Cache OpenStack dependencies on a uec image to speed up performance."
18
+    echo ""
19
+    echo "Usage: $0 [full path to raw uec base image]"
20
+}
21
+
22
+# Make sure this is a raw image
23
+if ! qemu-img info $1 | grep -q "file format: raw"; then
24
+    usage
25
+    exit 1
26
+fi
27
+
28
+# Make sure we are in the correct dir
29
+if [ ! -d files/apts ]; then
30
+    echo "Please run this script from devstack/tools/"
31
+    exit 1
32
+fi 
33
+
34
+# Mount the image
35
+STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage
36
+mkdir -p $STAGING_DIR
37
+umount $STAGING_DIR || true
38
+sleep 1
39
+mount -t ext4 -o loop $1 $STAGING_DIR
40
+
41
+# Make sure that base requirements are installed
42
+cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
43
+
44
+# Perform caching on the base image to speed up subsequent runs
45
+chroot $STAGING_DIR apt-get update
46
+chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
47
+chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
48
+mkdir -p $STAGING_DIR/var/cache/pip
49
+PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
50
+
51
+# Unmount
52
+umount $STAGING_DIR
... ...
@@ -7,6 +7,12 @@ if [ ! -e ../../localrc ]; then
7 7
     exit 1
8 8
 fi
9 9
 
10
+# This directory
11
+TOP_DIR=$(cd $(dirname "$0") && pwd)
12
+
13
+# Source params
14
+cd ../.. && source ./stackrc && cd $TOP_DIR
15
+
10 16
 # Echo commands
11 17
 set -o xtrace
12 18
 
... ...
@@ -41,9 +47,6 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
41 41
 # Size of image
42 42
 VDI_MB=${VDI_MB:-2500}
43 43
 
44
-# This directory
45
-TOP_DIR=$(cd $(dirname "$0") && pwd)
46
-
47 44
 # Make sure we have git
48 45
 if ! which git; then
49 46
     GITDIR=/tmp/git-1.7.7
... ...
@@ -223,15 +226,21 @@ mkdir -p /boot/guest
223 223
 SR_UUID=`xe sr-list --minimal name-label="Local storage"`
224 224
 xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage
225 225
 
226
-# Uninstall previous runs
227
-xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
228 226
 
229
-# Destroy any instances that were launched
230
-for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
231
-    echo "Shutting down nova instance $uuid"
232
-    xe vm-shutdown uuid=$uuid
233
-    xe vm-destroy uuid=$uuid
234
-done
227
+# Shutdown previous runs
228
+DO_SHUTDOWN=${DO_SHUTDOWN:-1}
229
+if [ "$DO_SHUTDOWN" = "1" ]; then
230
+    # Shutdown all domU's that created previously
231
+    xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
232
+
233
+    # Destroy any instances that were launched
234
+    for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
235
+        echo "Shutting down nova instance $uuid"
236
+        xe vm-unpause uuid=$uuid || true
237
+        xe vm-shutdown uuid=$uuid
238
+        xe vm-destroy uuid=$uuid
239
+    done
240
+fi
235 241
 
236 242
 # Path to head xva.  By default keep overwriting the same one to save space
237 243
 USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0}
238 244
new file mode 100755
... ...
@@ -0,0 +1,35 @@
0
+#!/usr/bin/env bash
1
+
2
+# Echo commands
3
+set -o xtrace
4
+
5
+# Head node host, which runs glance, api, keystone
6
+HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57}
7
+HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57}
8
+
9
+COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58}
10
+COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58}
11
+
12
+# Networking params
13
+FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
14
+
15
+# Variables common amongst all hosts in the cluster
16
+COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE"
17
+
18
+# Helper to launch containers
19
+function build_domU {
20
+    GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh
21
+}
22
+
23
+# Launch the head node - headnode uses a non-ip domain name,
24
+# because rabbit won't launch with an ip addr hostname :(
25
+build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
26
+
27
+# Wait till the head node is up
28
+while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
29
+    echo "Waiting for head node ($HEAD_PUB_IP) to start..."
30
+    sleep 5
31
+done
32
+
33
+# Build the HA compute host
34
+build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"