Browse code

Merge remote-tracking branch 'upstream/master'

Dean Troyer authored on 2011/09/21 00:41:16
Showing 7 changed files
... ...
@@ -1,4 +1,5 @@
1 1
 #!/usr/bin/env bash
2
+
2 3
 # Configurable params
3 4
 BRIDGE=${BRIDGE:-br0}
4 5
 CONTAINER=${CONTAINER:-STACK}
... ...
@@ -18,11 +19,11 @@ if ! grep -q natty /etc/lsb-release; then
18 18
 fi
19 19
 
20 20
 # Install deps
21
-apt-get install lxc debootstrap
21
+apt-get install -y lxc debootstrap
22 22
 
23 23
 # Install cgroup-bin from source, since the packaging is buggy and possibly incompatible with our setup
24 24
 if ! which cgdelete | grep -q cgdelete; then
25
-    apt-get install g++ bison flex libpam0g-dev
25
+    apt-get install -y g++ bison flex libpam0g-dev
26 26
     wget http://sourceforge.net/projects/libcg/files/libcgroup/v0.37.1/libcgroup-0.37.1.tar.bz2/download -O /tmp/libcgroup-0.37.1.tar.bz2 
27 27
     cd /tmp && bunzip2 libcgroup-0.37.1.tar.bz2  && tar xfv libcgroup-0.37.1.tar
28 28
     cd libcgroup-0.37.1
... ...
@@ -49,15 +50,21 @@ if [ -d /cgroup/$CONTAINER ]; then
49 49
     cgdelete -r cpu,net_cls:$CONTAINER
50 50
 fi
51 51
 
52
+
52 53
 # Warm the base image on first install
53 54
 CACHEDIR=/var/cache/lxc/natty/rootfs-amd64
54 55
 if [ ! -d $CACHEDIR ]; then
56
+    # by deleting the container, we force lxc-create to re-bootstrap (lxc is
57
+    # lazy and doesn't do anything if a container already exists)
58
+    lxc-destroy -n $CONTAINER
55 59
     # trigger the initial debootstrap
56 60
     lxc-create -n $CONTAINER -t natty -f $LXC_CONF
57 61
     chroot $CACHEDIR apt-get update
58
-    chroot $CACHEDIR apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
62
+    chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
59 63
     chroot $CACHEDIR pip install `cat files/pips/*`
60
-    git clone https://github.com/cloudbuilders/nova.git $CACHEDIR/opt/nova
64
+    # FIXME (anthony) - provide ability to vary source locations
65
+    #git clone https://github.com/cloudbuilders/nova.git $CACHEDIR/opt/nova
66
+    bzr clone lp:~hudson-openstack/nova/milestone-proposed/ $CACHEDIR/opt/nova
61 67
     git clone https://github.com/cloudbuilders/openstackx.git $CACHEDIR/opt/openstackx
62 68
     git clone https://github.com/cloudbuilders/noVNC.git $CACHEDIR/opt/noVNC
63 69
     git clone https://github.com/cloudbuilders/openstack-dashboard.git $CACHEDIR/opt/dash
... ...
@@ -61,14 +61,8 @@ QUANTUM_PORT = '9696'
61 61
 QUANTUM_TENANT = '1234'
62 62
 QUANTUM_CLIENT_VERSION='0.1'
63 63
 
64
-# If you have external monitoring links
65
-EXTERNAL_MONITORING = [
66
-    ['Nagios','http://foo.com'],
67
-    ['Ganglia','http://bar.com'],
68
-]
69
-
70
-# If you do not have external monitoring links
71
-# EXTERNAL_MONITORING = []
64
+# We use nixon to embed instead of external monitoring links
65
+EXTERNAL_MONITORING = []
72 66
 
73 67
 # Uncomment the following segment to silence most logging
74 68
 # django.db and boto DEBUG logging is extremely verbose.
75 69
new file mode 100644
... ...
@@ -0,0 +1,178 @@
0
+[DEFAULT]
1
+# Show more verbose log output (sets INFO log level output)
2
+verbose = True
3
+
4
+# Show debugging output in logs (sets DEBUG log level output)
5
+debug = True
6
+
7
+# Which backend store should Glance use by default is not specified
8
+# in a request to add a new image to Glance? Default: 'file'
9
+# Available choices are 'file', 'swift', and 's3'
10
+default_store = file
11
+
12
+# Address to bind the API server
13
+bind_host = 0.0.0.0
14
+
15
+# Port the bind the API server to
16
+bind_port = 9292
17
+
18
+# Address to find the registry server
19
+registry_host = 0.0.0.0
20
+
21
+# Port the registry server is listening on
22
+registry_port = 9191
23
+
24
+# Log to this file. Make sure you do not set the same log
25
+# file for both the API and registry servers!
26
+log_file = /var/log/glance/api.log
27
+
28
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
29
+use_syslog = False
30
+
31
+# ============ Notification System Options =====================
32
+
33
+# Notifications can be sent when images are create, updated or deleted.
34
+# There are three methods of sending notifications, logging (via the
35
+# log_file directive), rabbit (via a rabbitmq queue) or noop (no
36
+# notifications sent, the default)
37
+notifier_strategy = noop
38
+
39
+# Configuration options if sending notifications via rabbitmq (these are
40
+# the defaults)
41
+rabbit_host = localhost
42
+rabbit_port = 5672
43
+rabbit_use_ssl = false
44
+rabbit_userid = guest
45
+rabbit_password = guest
46
+rabbit_virtual_host = /
47
+rabbit_notification_topic = glance_notifications
48
+
49
+# ============ Filesystem Store Options ========================
50
+
51
+# Directory that the Filesystem backend store
52
+# writes image data to
53
+filesystem_store_datadir = /var/lib/glance/images/
54
+
55
+# ============ Swift Store Options =============================
56
+
57
+# Address where the Swift authentication service lives
58
+swift_store_auth_address = 127.0.0.1:8080/v1.0/
59
+
60
+# User to authenticate against the Swift authentication service
61
+swift_store_user = jdoe
62
+
63
+# Auth key for the user authenticating against the
64
+# Swift authentication service
65
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
66
+
67
+# Container within the account that the account should use
68
+# for storing images in Swift
69
+swift_store_container = glance
70
+
71
+# Do we create the container if it does not exist?
72
+swift_store_create_container_on_put = False
73
+
74
+# What size, in MB, should Glance start chunking image files
75
+# and do a large object manifest in Swift? By default, this is
76
+# the maximum object size in Swift, which is 5GB
77
+swift_store_large_object_size = 5120
78
+
79
+# When doing a large object manifest, what size, in MB, should
80
+# Glance write chunks to Swift? This amount of data is written
81
+# to a temporary disk buffer during the process of chunking
82
+# the image file, and the default is 200MB
83
+swift_store_large_object_chunk_size = 200
84
+
85
+# Whether to use ServiceNET to communicate with the Swift storage servers.
86
+# (If you aren't RACKSPACE, leave this False!)
87
+#
88
+# To use ServiceNET for authentication, prefix hostname of
89
+# `swift_store_auth_address` with 'snet-'.
90
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
91
+swift_enable_snet = False
92
+
93
+# ============ S3 Store Options =============================
94
+
95
+# Address where the S3 authentication service lives
96
+s3_store_host = 127.0.0.1:8080/v1.0/
97
+
98
+# User to authenticate against the S3 authentication service
99
+s3_store_access_key = <20-char AWS access key>
100
+
101
+# Auth key for the user authenticating against the
102
+# S3 authentication service
103
+s3_store_secret_key = <40-char AWS secret key>
104
+
105
+# Container within the account that the account should use
106
+# for storing images in S3. Note that S3 has a flat namespace,
107
+# so you need a unique bucket name for your glance images. An
108
+# easy way to do this is append your AWS access key to "glance".
109
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
110
+# your AWS access key if you use it in your bucket name below!
111
+s3_store_bucket = <lowercased 20-char aws access key>glance
112
+
113
+# Do we create the bucket if it does not exist?
114
+s3_store_create_bucket_on_put = False
115
+
116
+# ============ Image Cache Options ========================
117
+
118
+image_cache_enabled = False
119
+
120
+# Directory that the Image Cache writes data to
121
+# Make sure this is also set in glance-pruner.conf
122
+image_cache_datadir = /var/lib/glance/image-cache/
123
+
124
+# Number of seconds after which we should consider an incomplete image to be
125
+# stalled and eligible for reaping
126
+image_cache_stall_timeout = 86400
127
+
128
+# ============ Delayed Delete Options =============================
129
+
130
+# Turn on/off delayed delete
131
+delayed_delete = False
132
+
133
+# Delayed delete time in seconds
134
+scrub_time = 43200
135
+
136
+# Directory that the scrubber will use to remind itself of what to delete
137
+# Make sure this is also set in glance-scrubber.conf
138
+scrubber_datadir = /var/lib/glance/scrubber
139
+
140
+[pipeline:glance-api]
141
+#pipeline = versionnegotiation context apiv1app
142
+# NOTE: use the following pipeline for keystone
143
+pipeline = versionnegotiation authtoken context apiv1app
144
+
145
+# To enable Image Cache Management API replace pipeline with below:
146
+# pipeline = versionnegotiation context imagecache apiv1app
147
+# NOTE: use the following pipeline for keystone auth (with caching)
148
+# pipeline = versionnegotiation authtoken context imagecache apiv1app
149
+
150
+[pipeline:versions]
151
+pipeline = versionsapp
152
+
153
+[app:versionsapp]
154
+paste.app_factory = glance.api.versions:app_factory
155
+
156
+[app:apiv1app]
157
+paste.app_factory = glance.api.v1:app_factory
158
+
159
+[filter:versionnegotiation]
160
+paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory
161
+
162
+[filter:imagecache]
163
+paste.filter_factory = glance.api.middleware.image_cache:filter_factory
164
+
165
+[filter:context]
166
+paste.filter_factory = glance.common.context:filter_factory
167
+
168
+[filter:authtoken]
169
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
170
+service_protocol = http
171
+service_host = 127.0.0.1
172
+service_port = 5000
173
+auth_host = 127.0.0.1
174
+auth_port = 5001
175
+auth_protocol = http
176
+auth_uri = http://127.0.0.1:5000/
177
+admin_token = 999888777666
... ...
@@ -41,9 +41,9 @@ api_limit_max = 1000
41 41
 limit_param_default = 25
42 42
 
43 43
 [pipeline:glance-registry]
44
-pipeline = context registryapp
44
+#pipeline = context registryapp
45 45
 # NOTE: use the following pipeline for keystone
46
-# pipeline = authtoken keystone_shim context registryapp
46
+pipeline = authtoken keystone_shim context registryapp
47 47
 
48 48
 [app:registryapp]
49 49
 paste.app_factory = glance.registry.server:app_factory
... ...
@@ -42,7 +42,8 @@ admin_port = 5001
42 42
 keystone-admin-role = Admin
43 43
 
44 44
 #Role that allows to perform service admin operations.
45
-keystone-service-admin-role = KeystoneServiceAdmin
45
+# FIXME: need to separate this into a different role like KeystoneServiceAdmin
46
+keystone-service-admin-role = Admin
46 47
 
47 48
 [keystone.backends.sqlalchemy]
48 49
 # SQLAlchemy connection string for the reference implementation registry
... ...
@@ -5,8 +5,8 @@ $BIN_DIR/keystone-manage $* tenant add admin
5 5
 $BIN_DIR/keystone-manage $* tenant add demo
6 6
 
7 7
 # Users
8
-$BIN_DIR/keystone-manage $* user add demo secrete demo
9
-$BIN_DIR/keystone-manage $* user add admin secrete admin
8
+$BIN_DIR/keystone-manage $* user add admin secrete 1
9
+$BIN_DIR/keystone-manage $* user add demo secrete 2
10 10
 
11 11
 # Roles
12 12
 $BIN_DIR/keystone-manage $* role add Admin
... ...
@@ -21,21 +21,21 @@ $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_
21 21
 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne identity http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:5001/v2.0 http://%HOST_IP%:5000/v2.0 1 1
22 22
 
23 23
 # Tokens
24
-$BIN_DIR/keystone-manage $* token add 999888777666 admin admin 2015-02-05T00:00
24
+$BIN_DIR/keystone-manage $* token add 999888777666 1 1 2015-02-05T00:00
25 25
 
26 26
 #Tenant endpoints
27
-$BIN_DIR/keystone-manage $* endpoint add admin 1
28
-$BIN_DIR/keystone-manage $* endpoint add admin 2
29
-$BIN_DIR/keystone-manage $* endpoint add admin 3
30
-$BIN_DIR/keystone-manage $* endpoint add admin 4
31
-$BIN_DIR/keystone-manage $* endpoint add admin 5
32
-$BIN_DIR/keystone-manage $* endpoint add admin 6
27
+$BIN_DIR/keystone-manage $* endpoint add 1 1
28
+$BIN_DIR/keystone-manage $* endpoint add 1 2
29
+$BIN_DIR/keystone-manage $* endpoint add 1 3
30
+$BIN_DIR/keystone-manage $* endpoint add 1 4
31
+$BIN_DIR/keystone-manage $* endpoint add 1 5
32
+$BIN_DIR/keystone-manage $* endpoint add 1 6
33 33
 
34
-$BIN_DIR/keystone-manage $* endpoint add demo 1
35
-$BIN_DIR/keystone-manage $* endpoint add demo 2
36
-$BIN_DIR/keystone-manage $* endpoint add demo 3
37
-$BIN_DIR/keystone-manage $* endpoint add demo 4
38
-$BIN_DIR/keystone-manage $* endpoint add demo 5
39
-$BIN_DIR/keystone-manage $* endpoint add demo 6
34
+$BIN_DIR/keystone-manage $* endpoint add 2 1
35
+$BIN_DIR/keystone-manage $* endpoint add 2 2
36
+$BIN_DIR/keystone-manage $* endpoint add 2 3
37
+$BIN_DIR/keystone-manage $* endpoint add 2 4
38
+$BIN_DIR/keystone-manage $* endpoint add 2 5
39
+$BIN_DIR/keystone-manage $* endpoint add 2 6
40 40
 
41 41
 $BIN_DIR/keystone-manage $* credentials add admin EC2 'admin:admin' admin admin || echo "no support for adding credentials"
... ...
@@ -281,6 +281,9 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
281 281
     GLANCE_CONF=$GLANCE_DIR/etc/glance-registry.conf
282 282
     cp $FILES/glance-registry.conf $GLANCE_CONF
283 283
     sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/glance,g" -i $GLANCE_CONF
284
+
285
+    GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf
286
+    cp $FILES/glance-api.conf $GLANCE_API_CONF
284 287
 fi
285 288
 
286 289
 # Nova
... ...
@@ -293,7 +296,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
293 293
     # qcow images) and kvm (hardware based virtualization).  If unable to 
294 294
     # load kvm, set the libvirt type to qemu.
295 295
     sudo modprobe nbd || true
296
-    if ! sudo modprobe kvm; then
296
+    if ! -e /dev/kvm; then
297 297
         LIBVIRT_TYPE=qemu
298 298
     fi
299 299
     # User needs to be member of libvirtd group for nova-compute to use libvirt.
... ...
@@ -318,7 +321,7 @@ fi
318 318
 
319 319
 if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
320 320
     # delete traces of nova networks from prior runs
321
-    killall dnsmasq || true
321
+    sudo killall dnsmasq || true
322 322
     rm -rf $NOVA_DIR/networks
323 323
     mkdir -p $NOVA_DIR/networks
324 324
 fi
... ...
@@ -408,10 +411,33 @@ function screen_it {
408 408
 screen -d -m -S nova -t nova
409 409
 sleep 1
410 410
 
411
-screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
412
-screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
413
-screen_it key "$KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF"
414
-screen_it n-api "$NOVA_DIR/bin/nova-api"
411
+if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
412
+    screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
413
+fi
414
+
415
+if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
416
+    screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
417
+    while ! wget -q -O- http://$GLANCE_HOSTPORT; do
418
+        echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
419
+        sleep 1
420
+    done
421
+fi
422
+
423
+if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
424
+    screen_it key "$KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF"
425
+    while ! wget -q -O- http://127.0.0.1:5000; do
426
+        echo "Waiting for keystone to start..."
427
+        sleep 1
428
+    done
429
+fi
430
+
431
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
432
+    screen_it n-api "$NOVA_DIR/bin/nova-api"
433
+    while ! wget -q -O- http://127.0.0.1:8774; do
434
+        echo "Waiting for nova-api to start..."
435
+        sleep 1
436
+    done
437
+fi
415 438
 # Launching nova-compute should be as simple as running ``nova-compute`` but 
416 439
 # have to do a little more than that in our script.  Since we add the group 
417 440
 # ``libvirtd`` to our user in this script, when nova-compute is run it is
... ...
@@ -441,9 +467,9 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
441 441
 
442 442
     # add images to glance 
443 443
     # FIXME: kernel/ramdisk is hardcoded - use return result from add
444
-    glance add name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image 
445
-    glance add name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image 
446
-    glance add name="tty" is_public=true container_format=ami disk_format=ami kernel_id=1 ramdisk_id=2 < $FILES/images/ami-tty/image
444
+    glance add -A 999888777666 name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image 
445
+    glance add -A 999888777666 name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image 
446
+    glance add -A 999888777666 name="tty" is_public=true container_format=ami disk_format=ami kernel_id=1 ramdisk_id=2 < $FILES/images/ami-tty/image
447 447
 fi
448 448
 
449 449
 # Using the cloud