Browse code

Merge pull request #167 from cloudbuilders/trunk-fixes

make some changes prepping for trunk branch

sleepsonthefloor authored on 2011/11/08 04:02:55
Showing 2 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,127 @@
0
+#######
1
+# EC2 #
2
+#######
3
+
4
+[composite:ec2]
5
+use = egg:Paste#urlmap
6
+/: ec2versions
7
+/services/Cloud: ec2cloud
8
+/services/Admin: ec2admin
9
+/latest: ec2metadata
10
+/2007-01-19: ec2metadata
11
+/2007-03-01: ec2metadata
12
+/2007-08-29: ec2metadata
13
+/2007-10-10: ec2metadata
14
+/2007-12-15: ec2metadata
15
+/2008-02-01: ec2metadata
16
+/2008-09-01: ec2metadata
17
+/2009-04-04: ec2metadata
18
+/1.0: ec2metadata
19
+
20
+[pipeline:ec2cloud]
21
+pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
22
+
23
+[pipeline:ec2admin]
24
+pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
25
+
26
+[pipeline:ec2metadata]
27
+pipeline = logrequest ec2md
28
+
29
+[pipeline:ec2versions]
30
+pipeline = logrequest ec2ver
31
+
32
+[filter:logrequest]
33
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
34
+
35
+[filter:ec2lockout]
36
+paste.filter_factory = nova.api.ec2:Lockout.factory
37
+
38
+[filter:totoken]
39
+paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
40
+
41
+[filter:ec2noauth]
42
+paste.filter_factory = nova.api.ec2:NoAuth.factory
43
+
44
+[filter:authenticate]
45
+paste.filter_factory = nova.api.ec2:Authenticate.factory
46
+
47
+[filter:cloudrequest]
48
+controller = nova.api.ec2.cloud.CloudController
49
+paste.filter_factory = nova.api.ec2:Requestify.factory
50
+
51
+[filter:adminrequest]
52
+controller = nova.api.ec2.admin.AdminController
53
+paste.filter_factory = nova.api.ec2:Requestify.factory
54
+
55
+[filter:authorizer]
56
+paste.filter_factory = nova.api.ec2:Authorizer.factory
57
+
58
+[app:ec2executor]
59
+paste.app_factory = nova.api.ec2:Executor.factory
60
+
61
+[app:ec2ver]
62
+paste.app_factory = nova.api.ec2:Versions.factory
63
+
64
+[app:ec2md]
65
+paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
66
+
67
+#############
68
+# Openstack #
69
+#############
70
+
71
+[composite:osapi]
72
+use = egg:Paste#urlmap
73
+/: osversions
74
+/v1.0: openstackapi10
75
+/v1.1: openstackapi11
76
+
77
+[pipeline:openstackapi10]
78
+pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
79
+
80
+[pipeline:openstackapi11]
81
+pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
82
+
83
+[filter:faultwrap]
84
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
85
+
86
+[filter:auth]
87
+paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
88
+
89
+[filter:noauth]
90
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
91
+
92
+[filter:ratelimit]
93
+paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
94
+
95
+[filter:extensions]
96
+paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
97
+
98
+[app:osapiapp10]
99
+paste.app_factory = nova.api.openstack:APIRouterV10.factory
100
+
101
+[app:osapiapp11]
102
+paste.app_factory = nova.api.openstack:APIRouterV11.factory
103
+
104
+[pipeline:osversions]
105
+pipeline = faultwrap osversionapp
106
+
107
+[app:osversionapp]
108
+paste.app_factory = nova.api.openstack.versions:Versions.factory
109
+
110
+##########
111
+# Shared #
112
+##########
113
+
114
+[filter:keystonecontext]
115
+paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
116
+
117
+[filter:authtoken]
118
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
119
+service_protocol = http
120
+service_host = 127.0.0.1
121
+service_port = 5000
122
+auth_host = 127.0.0.1
123
+auth_port = 35357
124
+auth_protocol = http
125
+auth_uri = http://127.0.0.1:5000/
126
+admin_token = %SERVICE_TOKEN%
... ...
@@ -232,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
232 232
 # Multi-host is a mode where each compute node runs its own network node.  This
233 233
 # allows network operations and routing for a VM to occur on the server that is
234 234
 # running the VM - removing a SPOF and bandwidth bottleneck.
235
-MULTI_HOST=${MULTI_HOST:-0}
235
+MULTI_HOST=${MULTI_HOST:-False}
236 236
 
237 237
 # If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
238 238
 # variable but make sure that the interface doesn't already have an
... ...
@@ -325,7 +325,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
325 325
     # can never change.
326 326
     read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
327 327
 fi
328
-    
328
+
329 329
 # Keystone
330 330
 # --------
331 331
 
... ...
@@ -564,13 +564,12 @@ fi
564 564
 # ----
565 565
 
566 566
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
567
-    # We are going to use the sample http middleware configuration from the
568
-    # keystone project to launch nova.  This paste config adds the configuration
569
-    # required for nova to validate keystone tokens - except we need to switch
570
-    # the config to use our service token instead (instead of the invalid token
571
-    # 999888777666).
572
-    cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
573
-    sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
567
+    # We are going to use a sample http middleware configuration based on the
568
+    # one from the keystone project to launch nova.  This paste config adds
569
+    # the configuration required for nova to validate keystone tokens. We add
570
+    # our own service token to the configuration.
571
+    cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
572
+    sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
574 573
 fi
575 574
 
576 575
 if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
... ...
@@ -652,13 +651,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
652 652
     USER_GROUP=$(id -g)
653 653
     sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
654 654
     sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
655
-    
655
+
656 656
     # We then create a loopback disk and format it to XFS.
657 657
     if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
658 658
         mkdir -p  ${SWIFT_DATA_LOCATION}/drives/images
659 659
         sudo touch  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
660 660
         sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
661
-        
661
+
662 662
         dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
663 663
             bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
664 664
         mkfs.xfs -f -i size=1024  ${SWIFT_DATA_LOCATION}/drives/images/swift.img
... ...
@@ -675,9 +674,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
675 675
     # We then create link to that mounted location so swift would know
676 676
     # where to go.
677 677
     for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
678
-    
678
+
679 679
     # We now have to emulate a few different servers into one we
680
-    # create all the directories needed for swift 
680
+    # create all the directories needed for swift
681 681
     tmpd=""
682 682
     for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
683 683
         ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
... ...
@@ -693,7 +692,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
693 693
    # swift-init has a bug using /etc/swift until bug #885595 is fixed
694 694
    # we have to create a link
695 695
    sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
696
-   
696
+
697 697
    # Swift use rsync to syncronize between all the different
698 698
    # partitions (which make more sense when you have a multi-node
699 699
    # setup) we configure it with our version of rsync.
... ...
@@ -729,7 +728,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
729 729
        local bind_port=$2
730 730
        local log_facility=$3
731 731
        local node_number
732
-       
732
+
733 733
        for node_number in {1..4};do
734 734
            node_path=${SWIFT_DATA_LOCATION}/${node_number}
735 735
            sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
... ...
@@ -756,14 +755,14 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
756 756
 
757 757
    # We then can start rsync.
758 758
    sudo /etc/init.d/rsync restart || :
759
-      
759
+
760 760
    # Create our ring for the object/container/account.
761 761
    /usr/local/bin/swift-remakerings
762 762
 
763 763
    # And now we launch swift-startmain to get our cluster running
764 764
    # ready to be tested.
765 765
    /usr/local/bin/swift-startmain || :
766
-   
766
+
767 767
    unset s swift_hash swift_auth_server tmpd
768 768
 fi
769 769
 
... ...
@@ -830,12 +829,12 @@ add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
830 830
 if [ -n "$INSTANCES_PATH" ]; then
831 831
     add_nova_flag "--instances_path=$INSTANCES_PATH"
832 832
 fi
833
-if [ -n "$MULTI_HOST" ]; then
834
-    add_nova_flag "--multi_host=$MULTI_HOST"
835
-    add_nova_flag "--send_arp_for_ha=1"
833
+if [ "$MULTI_HOST" != "False" ]; then
834
+    add_nova_flag "--multi_host"
835
+    add_nova_flag "--send_arp_for_ha"
836 836
 fi
837 837
 if [ "$SYSLOG" != "False" ]; then
838
-    add_nova_flag "--use_syslog=1"
838
+    add_nova_flag "--use_syslog"
839 839
 fi
840 840
 
841 841
 # XenServer