|
...
|
...
|
@@ -245,7 +245,6 @@ sudo chown `whoami` $DATA_DIR
|
|
245
|
245
|
# Get project function libraries
|
|
246
|
246
|
source $TOP_DIR/lib/cinder
|
|
247
|
247
|
|
|
248
|
|
-
|
|
249
|
248
|
# Set the destination directories for openstack projects
|
|
250
|
249
|
NOVA_DIR=$DEST/nova
|
|
251
|
250
|
HORIZON_DIR=$DEST/horizon
|
|
...
|
...
|
@@ -463,9 +462,9 @@ fi
|
|
463
|
463
|
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
|
|
464
|
464
|
|
|
465
|
465
|
|
|
466
|
|
-# SWIFT
|
|
|
466
|
+# Swift
|
|
467
|
467
|
# -----
|
|
468
|
|
-# TODO: implement glance support
|
|
|
468
|
+
|
|
469
|
469
|
# TODO: add logging to different location.
|
|
470
|
470
|
|
|
471
|
471
|
# By default the location of swift drives and objects is located inside
|
|
...
|
...
|
@@ -477,7 +476,7 @@ SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift}
|
|
477
|
477
|
# directory, change SWIFT_CONFIG_DIR if you want to adjust that.
|
|
478
|
478
|
SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
|
|
479
|
479
|
|
|
480
|
|
-# devstack will create a loop-back disk formatted as XFS to store the
|
|
|
480
|
+# DevStack will create a loop-back disk formatted as XFS to store the
|
|
481
|
481
|
# swift data. By default the disk size is 1 gigabyte. The variable
|
|
482
|
482
|
# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change
|
|
483
|
483
|
# that.
|
|
...
|
...
|
@@ -512,6 +511,7 @@ fi
|
|
512
|
512
|
# Set default port for nova-objectstore
|
|
513
|
513
|
S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
|
|
514
|
514
|
|
|
|
515
|
+
|
|
515
|
516
|
# Keystone
|
|
516
|
517
|
# --------
|
|
517
|
518
|
|
|
...
|
...
|
@@ -609,10 +609,10 @@ set -o xtrace
|
|
609
|
609
|
|
|
610
|
610
|
# Install Packages
|
|
611
|
611
|
# ================
|
|
612
|
|
-#
|
|
|
612
|
+
|
|
613
|
613
|
# Openstack uses a fair number of other projects.
|
|
614
|
614
|
|
|
615
|
|
-# install package requirements
|
|
|
615
|
+# Install package requirements
|
|
616
|
616
|
if [[ "$os_PACKAGE" = "deb" ]]; then
|
|
617
|
617
|
apt_get update
|
|
618
|
618
|
install_package $(get_packages $FILES/apts)
|
|
...
|
...
|
@@ -620,12 +620,13 @@ else
|
|
620
|
620
|
install_package $(get_packages $FILES/rpms)
|
|
621
|
621
|
fi
|
|
622
|
622
|
|
|
623
|
|
-# install python requirements
|
|
|
623
|
+# Install python requirements
|
|
624
|
624
|
pip_install $(get_packages $FILES/pips | sort -u)
|
|
625
|
625
|
|
|
626
|
|
-# compute service
|
|
|
626
|
+# Check out OpenStack sources
|
|
627
|
627
|
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
|
|
628
|
|
-# python client library to nova that horizon (and others) use
|
|
|
628
|
+
|
|
|
629
|
+# Check out the client libs that are used most
|
|
629
|
630
|
git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
|
|
630
|
631
|
git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
|
|
631
|
632
|
git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
|
|
...
|
...
|
@@ -678,7 +679,7 @@ fi
|
|
678
|
678
|
# Initialization
|
|
679
|
679
|
# ==============
|
|
680
|
680
|
|
|
681
|
|
-# setup our checkouts so they are installed into python path
|
|
|
681
|
+# Set up our checkouts so they are installed into python path
|
|
682
|
682
|
# allowing ``import nova`` or ``import glance.client``
|
|
683
|
683
|
setup_develop $KEYSTONECLIENT_DIR
|
|
684
|
684
|
setup_develop $NOVACLIENT_DIR
|
|
...
|
...
|
@@ -874,16 +875,17 @@ function screen_it {
|
|
874
|
874
|
fi
|
|
875
|
875
|
}
|
|
876
|
876
|
|
|
877
|
|
-# create a new named screen to run processes in
|
|
|
877
|
+# Create a new named screen to run processes in
|
|
878
|
878
|
screen -d -m -S stack -t stack -s /bin/bash
|
|
879
|
879
|
sleep 1
|
|
880
|
|
-# set a reasonable statusbar
|
|
|
880
|
+# Set a reasonable statusbar
|
|
881
|
881
|
screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
|
|
882
|
882
|
|
|
|
883
|
+
|
|
883
|
884
|
# Horizon
|
|
884
|
885
|
# -------
|
|
885
|
886
|
|
|
886
|
|
-# Setup the django horizon application to serve via apache/wsgi
|
|
|
887
|
+# Set up the django horizon application to serve via apache/wsgi
|
|
887
|
888
|
|
|
888
|
889
|
if is_service_enabled horizon; then
|
|
889
|
890
|
|
|
...
|
...
|
@@ -900,7 +902,7 @@ if is_service_enabled horizon; then
|
|
900
|
900
|
python manage.py syncdb
|
|
901
|
901
|
cd $TOP_DIR
|
|
902
|
902
|
|
|
903
|
|
- # create an empty directory that apache uses as docroot
|
|
|
903
|
+ # Create an empty directory that apache uses as docroot
|
|
904
|
904
|
sudo mkdir -p $HORIZON_DIR/.blackhole
|
|
905
|
905
|
|
|
906
|
906
|
if [[ "$os_PACKAGE" = "deb" ]]; then
|
|
...
|
...
|
@@ -1007,8 +1009,10 @@ if is_service_enabled g-reg; then
|
|
1007
|
1007
|
|
|
1008
|
1008
|
fi
|
|
1009
|
1009
|
|
|
1010
|
|
-# Quantum (for controller or agent nodes)
|
|
|
1010
|
+
|
|
|
1011
|
+# Quantum
|
|
1011
|
1012
|
# -------
|
|
|
1013
|
+
|
|
1012
|
1014
|
if is_service_enabled quantum; then
|
|
1013
|
1015
|
# Put config files in /etc/quantum for everyone to find
|
|
1014
|
1016
|
if [[ ! -d /etc/quantum ]]; then
|
|
...
|
...
|
@@ -1034,7 +1038,7 @@ if is_service_enabled quantum; then
|
|
1034
|
1034
|
exit 1
|
|
1035
|
1035
|
fi
|
|
1036
|
1036
|
|
|
1037
|
|
- # if needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum
|
|
|
1037
|
+ # If needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum
|
|
1038
|
1038
|
mkdir -p /$Q_PLUGIN_CONF_PATH
|
|
1039
|
1039
|
Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
|
|
1040
|
1040
|
if [[ -e $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE ]]; then
|
|
...
|
...
|
@@ -1143,7 +1147,6 @@ if is_service_enabled m-svc; then
|
|
1143
|
1143
|
fi
|
|
1144
|
1144
|
|
|
1145
|
1145
|
|
|
1146
|
|
-
|
|
1147
|
1146
|
# Nova
|
|
1148
|
1147
|
# ----
|
|
1149
|
1148
|
|
|
...
|
...
|
@@ -1279,7 +1282,7 @@ if is_service_enabled n-cpu; then
|
|
1279
|
1279
|
|
|
1280
|
1280
|
QEMU_CONF=/etc/libvirt/qemu.conf
|
|
1281
|
1281
|
if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
|
|
1282
|
|
- # add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
|
|
|
1282
|
+ # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
|
|
1283
|
1283
|
sudo chmod 666 $QEMU_CONF
|
|
1284
|
1284
|
sudo cat <<EOF >> /etc/libvirt/qemu.conf
|
|
1285
|
1285
|
cgroup_device_acl = [
|
|
...
|
...
|
@@ -1363,7 +1366,10 @@ if is_service_enabled n-net; then
|
|
1363
|
1363
|
sudo sysctl -w net.ipv4.ip_forward=1
|
|
1364
|
1364
|
fi
|
|
1365
|
1365
|
|
|
|
1366
|
+
|
|
1366
|
1367
|
# Storage Service
|
|
|
1368
|
+# ---------------
|
|
|
1369
|
+
|
|
1367
|
1370
|
if is_service_enabled swift; then
|
|
1368
|
1371
|
# Install memcached for swift.
|
|
1369
|
1372
|
install_package memcached
|
|
...
|
...
|
@@ -1663,7 +1669,7 @@ function add_nova_opt {
|
|
1663
|
1663
|
echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF
|
|
1664
|
1664
|
}
|
|
1665
|
1665
|
|
|
1666
|
|
-# remove legacy nova.conf
|
|
|
1666
|
+# Remove legacy nova.conf
|
|
1667
|
1667
|
rm -f $NOVA_DIR/bin/nova.conf
|
|
1668
|
1668
|
|
|
1669
|
1669
|
# (re)create nova.conf
|
|
...
|
...
|
@@ -1924,7 +1930,7 @@ if is_service_enabled key; then
|
|
1924
|
1924
|
iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
|
|
1925
|
1925
|
iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
|
|
1926
|
1926
|
|
|
1927
|
|
- # Set up the keystone database
|
|
|
1927
|
+ # Initialize keystone database
|
|
1928
|
1928
|
$KEYSTONE_DIR/bin/keystone-manage db_sync
|
|
1929
|
1929
|
|
|
1930
|
1930
|
# launch keystone and wait for it to answer before continuing
|
|
...
|
...
|
@@ -1950,7 +1956,7 @@ if is_service_enabled key; then
|
|
1950
|
1950
|
export OS_USERNAME=admin
|
|
1951
|
1951
|
export OS_PASSWORD=$ADMIN_PASSWORD
|
|
1952
|
1952
|
|
|
1953
|
|
- # create an access key and secret key for nova ec2 register image
|
|
|
1953
|
+ # Create an access key and secret key for nova ec2 register image
|
|
1954
|
1954
|
if is_service_enabled swift && is_service_enabled nova; then
|
|
1955
|
1955
|
NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
|
|
1956
|
1956
|
NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
|
|
...
|
...
|
@@ -1963,7 +1969,7 @@ if is_service_enabled key; then
|
|
1963
|
1963
|
fi
|
|
1964
|
1964
|
fi
|
|
1965
|
1965
|
|
|
1966
|
|
-# launch the nova-api and wait for it to answer before continuing
|
|
|
1966
|
+# Launch the nova-api and wait for it to answer before continuing
|
|
1967
|
1967
|
if is_service_enabled n-api; then
|
|
1968
|
1968
|
add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
|
|
1969
|
1969
|
screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
|
|
...
|
...
|
@@ -1977,13 +1983,13 @@ fi
|
|
1977
|
1977
|
# If we're using Quantum (i.e. q-svc is enabled), network creation has to
|
|
1978
|
1978
|
# happen after we've started the Quantum service.
|
|
1979
|
1979
|
if is_service_enabled mysql && is_service_enabled nova; then
|
|
1980
|
|
- # create a small network
|
|
|
1980
|
+ # Create a small network
|
|
1981
|
1981
|
$NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
|
|
1982
|
1982
|
|
|
1983
|
|
- # create some floating ips
|
|
|
1983
|
+ # Create some floating ips
|
|
1984
|
1984
|
$NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
|
|
1985
|
1985
|
|
|
1986
|
|
- # create a second pool
|
|
|
1986
|
+ # Create a second pool
|
|
1987
|
1987
|
$NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
|
|
1988
|
1988
|
fi
|
|
1989
|
1989
|
|
|
...
|
...
|
@@ -2012,6 +2018,7 @@ screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF
|
|
2012
|
2012
|
is_service_enabled swift || \
|
|
2013
|
2013
|
screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore"
|
|
2014
|
2014
|
|
|
|
2015
|
+
|
|
2015
|
2016
|
# Install Images
|
|
2016
|
2017
|
# ==============
|
|
2017
|
2018
|
|