| ... | ... |
@@ -56,7 +56,7 @@ exit_if_aggregate_present() {
|
| 56 | 56 |
if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then |
| 57 | 57 |
echo "SUCCESS $aggregate_name not present" |
| 58 | 58 |
else |
| 59 |
- echo "ERROR found aggregate: $aggregate_name" |
|
| 59 |
+ die $LINENO "found aggregate: $aggregate_name" |
|
| 60 | 60 |
exit -1 |
| 61 | 61 |
fi |
| 62 | 62 |
} |
| ... | ... |
@@ -67,15 +67,14 @@ AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " |
| 67 | 67 |
AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) |
| 68 | 68 |
|
| 69 | 69 |
# check aggregate created |
| 70 |
-nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" |
|
| 70 |
+nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created" |
|
| 71 | 71 |
|
| 72 | 72 |
|
| 73 | 73 |
# Ensure creating a duplicate fails |
| 74 | 74 |
# ================================= |
| 75 | 75 |
|
| 76 | 76 |
if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then |
| 77 |
- echo "ERROR could create duplicate aggregate" |
|
| 78 |
- exit -1 |
|
| 77 |
+ die $LINENO "could create duplicate aggregate" |
|
| 79 | 78 |
fi |
| 80 | 79 |
|
| 81 | 80 |
|
| ... | ... |
@@ -113,7 +112,7 @@ nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=78
|
| 113 | 113 |
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY |
| 114 | 114 |
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY |
| 115 | 115 |
|
| 116 |
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" |
|
| 116 |
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" |
|
| 117 | 117 |
|
| 118 | 118 |
nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY |
| 119 | 119 |
nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
|
| ... | ... |
@@ -129,8 +128,7 @@ FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) |
| 129 | 129 |
nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST |
| 130 | 130 |
nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST |
| 131 | 131 |
if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then |
| 132 |
- echo "ERROR could add duplicate host to single aggregate" |
|
| 133 |
- exit -1 |
|
| 132 |
+ die $LINENO "could add duplicate host to single aggregate" |
|
| 134 | 133 |
fi |
| 135 | 134 |
nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST |
| 136 | 135 |
nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST |
| ... | ... |
@@ -72,7 +72,7 @@ glance image-list |
| 72 | 72 |
|
| 73 | 73 |
# Grab the id of the image to launch |
| 74 | 74 |
IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) |
| 75 |
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 75 |
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 76 | 76 |
|
| 77 | 77 |
# Security Groups |
| 78 | 78 |
# --------------- |
| ... | ... |
@@ -140,7 +140,7 @@ fi |
| 140 | 140 |
# Create the bootable volume |
| 141 | 141 |
start_time=$(date +%s) |
| 142 | 142 |
cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ |
| 143 |
- die "Failure creating volume $VOL_NAME" |
|
| 143 |
+ die $LINENO "Failure creating volume $VOL_NAME" |
|
| 144 | 144 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then |
| 145 | 145 |
echo "Volume $VOL_NAME not created" |
| 146 | 146 |
exit 1 |
| ... | ... |
@@ -150,7 +150,7 @@ echo "Completed cinder create in $((end_time - start_time)) seconds" |
| 150 | 150 |
|
| 151 | 151 |
# Get volume ID |
| 152 | 152 |
VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) |
| 153 |
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" |
|
| 153 |
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" |
|
| 154 | 154 |
|
| 155 | 155 |
# Boot instance |
| 156 | 156 |
# ------------- |
| ... | ... |
@@ -159,7 +159,7 @@ die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" |
| 159 | 159 |
# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate> |
| 160 | 160 |
# Leaving the middle two fields blank appears to do-the-right-thing |
| 161 | 161 |
VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) |
| 162 |
-die_if_not_set VM_UUID "Failure launching $VM_NAME" |
|
| 162 |
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" |
|
| 163 | 163 |
|
| 164 | 164 |
# Check that the status is active within ACTIVE_TIMEOUT seconds |
| 165 | 165 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then |
| ... | ... |
@@ -169,7 +169,7 @@ fi |
| 169 | 169 |
|
| 170 | 170 |
# Get the instance IP |
| 171 | 171 |
IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) |
| 172 |
-die_if_not_set IP "Failure retrieving IP address" |
|
| 172 |
+die_if_not_set $LINENO IP "Failure retrieving IP address" |
|
| 173 | 173 |
|
| 174 | 174 |
# Private IPs can be pinged in single node deployments |
| 175 | 175 |
ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT |
| ... | ... |
@@ -178,7 +178,7 @@ ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT |
| 178 | 178 |
# -------- |
| 179 | 179 |
|
| 180 | 180 |
# Delete volume backed instance |
| 181 |
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" |
|
| 181 |
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" |
|
| 182 | 182 |
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then |
| 183 | 183 |
echo "Server $VM_NAME not deleted" |
| 184 | 184 |
exit 1 |
| ... | ... |
@@ -192,7 +192,7 @@ fi |
| 192 | 192 |
|
| 193 | 193 |
# Delete volume |
| 194 | 194 |
start_time=$(date +%s) |
| 195 |
-cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME" |
|
| 195 |
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME" |
|
| 196 | 196 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then |
| 197 | 197 |
echo "Volume $VOL_NAME not deleted" |
| 198 | 198 |
exit 1 |
| ... | ... |
@@ -201,7 +201,7 @@ end_time=$(date +%s) |
| 201 | 201 |
echo "Completed cinder delete in $((end_time - start_time)) seconds" |
| 202 | 202 |
|
| 203 | 203 |
# Delete secgroup |
| 204 |
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" |
|
| 204 |
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" |
|
| 205 | 205 |
|
| 206 | 206 |
set +o xtrace |
| 207 | 207 |
echo "*********************************************************************" |
| ... | ... |
@@ -49,21 +49,20 @@ REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15}
|
| 49 | 49 |
BUCKET=testbucket |
| 50 | 50 |
IMAGE=bundle.img |
| 51 | 51 |
truncate -s 5M /tmp/$IMAGE |
| 52 |
-euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" |
|
| 52 |
+euca-bundle-image -i /tmp/$IMAGE || die $LINENO "Failure bundling image $IMAGE" |
|
| 53 | 53 |
|
| 54 |
-euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" |
|
| 54 |
+euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die $LINENO "Failure uploading bundle $IMAGE to $BUCKET" |
|
| 55 | 55 |
|
| 56 | 56 |
AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` |
| 57 |
-die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" |
|
| 57 |
+die_if_not_set $LINENO AMI "Failure registering $BUCKET/$IMAGE" |
|
| 58 | 58 |
|
| 59 | 59 |
# Wait for the image to become available |
| 60 | 60 |
if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then |
| 61 |
- echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" |
|
| 62 |
- exit 1 |
|
| 61 |
+ die $LINENO "Image $AMI not available within $REGISTER_TIMEOUT seconds" |
|
| 63 | 62 |
fi |
| 64 | 63 |
|
| 65 | 64 |
# Clean up |
| 66 |
-euca-deregister $AMI || die "Failure deregistering $AMI" |
|
| 65 |
+euca-deregister $AMI || die $LINENO "Failure deregistering $AMI" |
|
| 67 | 66 |
|
| 68 | 67 |
set +o xtrace |
| 69 | 68 |
echo "*********************************************************************" |
| ... | ... |
@@ -56,68 +56,62 @@ SECGROUP=${SECGROUP:-euca_secgroup}
|
| 56 | 56 |
|
| 57 | 57 |
# Find a machine image to boot |
| 58 | 58 |
IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
|
| 59 |
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 59 |
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 60 | 60 |
|
| 61 | 61 |
# Add a secgroup |
| 62 | 62 |
if ! euca-describe-groups | grep -q $SECGROUP; then |
| 63 | 63 |
euca-add-group -d "$SECGROUP description" $SECGROUP |
| 64 | 64 |
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then |
| 65 |
- echo "Security group not created" |
|
| 66 |
- exit 1 |
|
| 65 |
+ die $LINENO "Security group not created" |
|
| 67 | 66 |
fi |
| 68 | 67 |
fi |
| 69 | 68 |
|
| 70 | 69 |
# Launch it |
| 71 | 70 |
INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2` |
| 72 |
-die_if_not_set INSTANCE "Failure launching instance" |
|
| 71 |
+die_if_not_set $LINENO INSTANCE "Failure launching instance" |
|
| 73 | 72 |
|
| 74 | 73 |
# Assure it has booted within a reasonable time |
| 75 | 74 |
if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then |
| 76 |
- echo "server didn't become active within $RUNNING_TIMEOUT seconds" |
|
| 77 |
- exit 1 |
|
| 75 |
+ die $LINENO "server didn't become active within $RUNNING_TIMEOUT seconds" |
|
| 78 | 76 |
fi |
| 79 | 77 |
|
| 80 | 78 |
# Volumes |
| 81 | 79 |
# ------- |
| 82 | 80 |
if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then |
| 83 | 81 |
VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` |
| 84 |
- die_if_not_set VOLUME_ZONE "Failure to find zone for volume" |
|
| 82 |
+ die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" |
|
| 85 | 83 |
|
| 86 | 84 |
VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` |
| 87 |
- die_if_not_set VOLUME "Failure to create volume" |
|
| 85 |
+ die_if_not_set $LINENO VOLUME "Failure to create volume" |
|
| 88 | 86 |
|
| 89 | 87 |
# Test that volume has been created |
| 90 | 88 |
VOLUME=`euca-describe-volumes | cut -f2` |
| 91 |
- die_if_not_set VOLUME "Failure to get volume" |
|
| 89 |
+ die_if_not_set $LINENO VOLUME "Failure to get volume" |
|
| 92 | 90 |
|
| 93 | 91 |
# Test volume has become available |
| 94 | 92 |
if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then |
| 95 |
- echo "volume didnt become available within $RUNNING_TIMEOUT seconds" |
|
| 96 |
- exit 1 |
|
| 93 |
+ die $LINENO "volume didnt become available within $RUNNING_TIMEOUT seconds" |
|
| 97 | 94 |
fi |
| 98 | 95 |
|
| 99 | 96 |
# Attach volume to an instance |
| 100 | 97 |
euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ |
| 101 |
- die "Failure attaching volume $VOLUME to $INSTANCE" |
|
| 98 |
+ die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" |
|
| 102 | 99 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then |
| 103 |
- echo "Could not attach $VOLUME to $INSTANCE" |
|
| 104 |
- exit 1 |
|
| 100 |
+ die $LINENO "Could not attach $VOLUME to $INSTANCE" |
|
| 105 | 101 |
fi |
| 106 | 102 |
|
| 107 | 103 |
# Detach volume from an instance |
| 108 | 104 |
euca-detach-volume $VOLUME || \ |
| 109 |
- die "Failure detaching volume $VOLUME to $INSTANCE" |
|
| 105 |
+ die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" |
|
| 110 | 106 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then |
| 111 |
- echo "Could not detach $VOLUME to $INSTANCE" |
|
| 112 |
- exit 1 |
|
| 107 |
+ die $LINENO "Could not detach $VOLUME to $INSTANCE" |
|
| 113 | 108 |
fi |
| 114 | 109 |
|
| 115 | 110 |
# Remove volume |
| 116 | 111 |
euca-delete-volume $VOLUME || \ |
| 117 |
- die "Failure to delete volume" |
|
| 112 |
+ die $LINENO "Failure to delete volume" |
|
| 118 | 113 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then |
| 119 |
- echo "Could not delete $VOLUME" |
|
| 120 |
- exit 1 |
|
| 114 |
+ die $LINENO "Could not delete $VOLUME" |
|
| 121 | 115 |
fi |
| 122 | 116 |
else |
| 123 | 117 |
echo "Volume Tests Skipped" |
| ... | ... |
@@ -125,58 +119,55 @@ fi |
| 125 | 125 |
|
| 126 | 126 |
# Allocate floating address |
| 127 | 127 |
FLOATING_IP=`euca-allocate-address | cut -f2` |
| 128 |
-die_if_not_set FLOATING_IP "Failure allocating floating IP" |
|
| 128 |
+die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP" |
|
| 129 | 129 |
|
| 130 | 130 |
# Associate floating address |
| 131 | 131 |
euca-associate-address -i $INSTANCE $FLOATING_IP || \ |
| 132 |
- die "Failure associating address $FLOATING_IP to $INSTANCE" |
|
| 132 |
+ die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE" |
|
| 133 | 133 |
|
| 134 | 134 |
# Authorize pinging |
| 135 | 135 |
euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ |
| 136 |
- die "Failure authorizing rule in $SECGROUP" |
|
| 136 |
+ die $LINENO "Failure authorizing rule in $SECGROUP" |
|
| 137 | 137 |
|
| 138 | 138 |
# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds |
| 139 | 139 |
ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT |
| 140 | 140 |
|
| 141 | 141 |
# Revoke pinging |
| 142 | 142 |
euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ |
| 143 |
- die "Failure revoking rule in $SECGROUP" |
|
| 143 |
+ die $LINENO "Failure revoking rule in $SECGROUP" |
|
| 144 | 144 |
|
| 145 | 145 |
# Release floating address |
| 146 | 146 |
euca-disassociate-address $FLOATING_IP || \ |
| 147 |
- die "Failure disassociating address $FLOATING_IP" |
|
| 147 |
+ die $LINENO "Failure disassociating address $FLOATING_IP" |
|
| 148 | 148 |
|
| 149 | 149 |
# Wait just a tick for everything above to complete so release doesn't fail |
| 150 | 150 |
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then |
| 151 |
- echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" |
|
| 152 |
- exit 1 |
|
| 151 |
+ die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" |
|
| 153 | 152 |
fi |
| 154 | 153 |
|
| 155 | 154 |
# Release floating address |
| 156 | 155 |
euca-release-address $FLOATING_IP || \ |
| 157 |
- die "Failure releasing address $FLOATING_IP" |
|
| 156 |
+ die $LINENO "Failure releasing address $FLOATING_IP" |
|
| 158 | 157 |
|
| 159 | 158 |
# Wait just a tick for everything above to complete so terminate doesn't fail |
| 160 | 159 |
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then |
| 161 |
- echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" |
|
| 162 |
- exit 1 |
|
| 160 |
+ die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" |
|
| 163 | 161 |
fi |
| 164 | 162 |
|
| 165 | 163 |
# Terminate instance |
| 166 | 164 |
euca-terminate-instances $INSTANCE || \ |
| 167 |
- die "Failure terminating instance $INSTANCE" |
|
| 165 |
+ die $LINENO "Failure terminating instance $INSTANCE" |
|
| 168 | 166 |
|
| 169 | 167 |
# Assure it has terminated within a reasonable time. The behaviour of this |
| 170 | 168 |
# case changed with bug/836978. Requesting the status of an invalid instance |
| 171 | 169 |
# will now return an error message including the instance id, so we need to |
| 172 | 170 |
# filter that out. |
| 173 | 171 |
if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then |
| 174 |
- echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" |
|
| 175 |
- exit 1 |
|
| 172 |
+ die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds" |
|
| 176 | 173 |
fi |
| 177 | 174 |
|
| 178 | 175 |
# Delete secgroup |
| 179 |
-euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" |
|
| 176 |
+euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" |
|
| 180 | 177 |
|
| 181 | 178 |
set +o xtrace |
| 182 | 179 |
echo "*********************************************************************" |
| ... | ... |
@@ -71,7 +71,7 @@ glance image-list |
| 71 | 71 |
|
| 72 | 72 |
# Grab the id of the image to launch |
| 73 | 73 |
IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) |
| 74 |
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 74 |
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 75 | 75 |
|
| 76 | 76 |
# Security Groups |
| 77 | 77 |
# --------------- |
| ... | ... |
@@ -83,8 +83,7 @@ nova secgroup-list |
| 83 | 83 |
if ! nova secgroup-list | grep -q $SECGROUP; then |
| 84 | 84 |
nova secgroup-create $SECGROUP "$SECGROUP description" |
| 85 | 85 |
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then |
| 86 |
- echo "Security group not created" |
|
| 87 |
- exit 1 |
|
| 86 |
+ die $LINENO "Security group not created" |
|
| 88 | 87 |
fi |
| 89 | 88 |
fi |
| 90 | 89 |
|
| ... | ... |
@@ -115,7 +114,7 @@ fi |
| 115 | 115 |
# Clean-up from previous runs |
| 116 | 116 |
nova delete $VM_NAME || true |
| 117 | 117 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then |
| 118 |
- echo "server didn't terminate!" |
|
| 118 |
+ die $LINENO "server didn't terminate!" |
|
| 119 | 119 |
exit 1 |
| 120 | 120 |
fi |
| 121 | 121 |
|
| ... | ... |
@@ -123,17 +122,16 @@ fi |
| 123 | 123 |
# ------------- |
| 124 | 124 |
|
| 125 | 125 |
VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) |
| 126 |
-die_if_not_set VM_UUID "Failure launching $VM_NAME" |
|
| 126 |
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" |
|
| 127 | 127 |
|
| 128 | 128 |
# Check that the status is active within ACTIVE_TIMEOUT seconds |
| 129 | 129 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then |
| 130 |
- echo "server didn't become active!" |
|
| 131 |
- exit 1 |
|
| 130 |
+ die $LINENO "server didn't become active!" |
|
| 132 | 131 |
fi |
| 133 | 132 |
|
| 134 | 133 |
# Get the instance IP |
| 135 | 134 |
IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) |
| 136 |
-die_if_not_set IP "Failure retrieving IP address" |
|
| 135 |
+die_if_not_set $LINENO IP "Failure retrieving IP address" |
|
| 137 | 136 |
|
| 138 | 137 |
# Private IPs can be pinged in single node deployments |
| 139 | 138 |
ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT |
| ... | ... |
@@ -143,17 +141,16 @@ ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT |
| 143 | 143 |
|
| 144 | 144 |
# Allocate a floating IP from the default pool |
| 145 | 145 |
FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) |
| 146 |
-die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" |
|
| 146 |
+die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" |
|
| 147 | 147 |
|
| 148 | 148 |
# List floating addresses |
| 149 | 149 |
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then |
| 150 |
- echo "Floating IP not allocated" |
|
| 151 |
- exit 1 |
|
| 150 |
+ die $LINENO "Floating IP not allocated" |
|
| 152 | 151 |
fi |
| 153 | 152 |
|
| 154 | 153 |
# Add floating IP to our server |
| 155 | 154 |
nova add-floating-ip $VM_UUID $FLOATING_IP || \ |
| 156 |
- die "Failure adding floating IP $FLOATING_IP to $VM_NAME" |
|
| 155 |
+ die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME" |
|
| 157 | 156 |
|
| 158 | 157 |
# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds |
| 159 | 158 |
ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT |
| ... | ... |
@@ -161,18 +158,17 @@ ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT |
| 161 | 161 |
if ! is_service_enabled quantum; then |
| 162 | 162 |
# Allocate an IP from second floating pool |
| 163 | 163 |
TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) |
| 164 |
- die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" |
|
| 164 |
+ die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" |
|
| 165 | 165 |
|
| 166 | 166 |
# list floating addresses |
| 167 | 167 |
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then |
| 168 |
- echo "Floating IP not allocated" |
|
| 169 |
- exit 1 |
|
| 168 |
+ die $LINENO "Floating IP not allocated" |
|
| 170 | 169 |
fi |
| 171 | 170 |
fi |
| 172 | 171 |
|
| 173 | 172 |
# Dis-allow icmp traffic (ping) |
| 174 | 173 |
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ |
| 175 |
- die "Failure deleting security group rule from $SECGROUP" |
|
| 174 |
+ die $LINENO "Failure deleting security group rule from $SECGROUP" |
|
| 176 | 175 |
|
| 177 | 176 |
# FIXME (anthony): make xs support security groups |
| 178 | 177 |
if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then |
| ... | ... |
@@ -186,24 +182,23 @@ fi |
| 186 | 186 |
if ! is_service_enabled quantum; then |
| 187 | 187 |
# Delete second floating IP |
| 188 | 188 |
nova floating-ip-delete $TEST_FLOATING_IP || \ |
| 189 |
- die "Failure deleting floating IP $TEST_FLOATING_IP" |
|
| 189 |
+ die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP" |
|
| 190 | 190 |
fi |
| 191 | 191 |
|
| 192 | 192 |
# Delete the floating ip |
| 193 | 193 |
nova floating-ip-delete $FLOATING_IP || \ |
| 194 |
- die "Failure deleting floating IP $FLOATING_IP" |
|
| 194 |
+ die $LINENO "Failure deleting floating IP $FLOATING_IP" |
|
| 195 | 195 |
|
| 196 | 196 |
# Delete instance |
| 197 |
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" |
|
| 197 |
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" |
|
| 198 | 198 |
# Wait for termination |
| 199 | 199 |
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then |
| 200 |
- echo "Server $VM_NAME not deleted" |
|
| 201 |
- exit 1 |
|
| 200 |
+ die $LINENO "Server $VM_NAME not deleted" |
|
| 202 | 201 |
fi |
| 203 | 202 |
|
| 204 | 203 |
# Delete secgroup |
| 205 | 204 |
nova secgroup-delete $SECGROUP || \ |
| 206 |
- die "Failure deleting security group $SECGROUP" |
|
| 205 |
+ die $LINENO "Failure deleting security group $SECGROUP" |
|
| 207 | 206 |
|
| 208 | 207 |
set +o xtrace |
| 209 | 208 |
echo "*********************************************************************" |
| ... | ... |
@@ -36,7 +36,7 @@ source $TOP_DIR/exerciserc |
| 36 | 36 |
is_service_enabled horizon || exit 55 |
| 37 | 37 |
|
| 38 | 38 |
# can we get the front page |
| 39 |
-curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die "Horizon front page not functioning!" |
|
| 39 |
+curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die $LINENO "Horizon front page not functioning!" |
|
| 40 | 40 |
|
| 41 | 41 |
set +o xtrace |
| 42 | 42 |
echo "*********************************************************************" |
| ... | ... |
@@ -264,7 +264,7 @@ function create_vm {
|
| 264 | 264 |
--image $(get_image_id) \ |
| 265 | 265 |
$NIC \ |
| 266 | 266 |
$TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` |
| 267 |
- die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID |
|
| 267 |
+ die_if_not_set $LINENO VM_UUID "Failure launching $TENANT-server$NUM" |
|
| 268 | 268 |
confirm_server_active $VM_UUID |
| 269 | 269 |
} |
| 270 | 270 |
|
| ... | ... |
@@ -309,8 +309,7 @@ function shutdown_vm {
|
| 309 | 309 |
function shutdown_vms {
|
| 310 | 310 |
foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%'
|
| 311 | 311 |
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then |
| 312 |
- echo "Some VMs failed to shutdown" |
|
| 313 |
- false |
|
| 312 |
+ die $LINENO "Some VMs failed to shutdown" |
|
| 314 | 313 |
fi |
| 315 | 314 |
} |
| 316 | 315 |
|
| ... | ... |
@@ -68,7 +68,7 @@ done |
| 68 | 68 |
|
| 69 | 69 |
# Delete secgroup |
| 70 | 70 |
nova secgroup-delete $SEC_GROUP_NAME || \ |
| 71 |
- die "Failure deleting security group $SEC_GROUP_NAME" |
|
| 71 |
+ die $LINENO "Failure deleting security group $SEC_GROUP_NAME" |
|
| 72 | 72 |
|
| 73 | 73 |
set +o xtrace |
| 74 | 74 |
echo "*********************************************************************" |
| ... | ... |
@@ -45,20 +45,20 @@ CONTAINER=ex-swift |
| 45 | 45 |
# ============= |
| 46 | 46 |
|
| 47 | 47 |
# Check if we have to swift via keystone |
| 48 |
-swift stat || die "Failure geting status" |
|
| 48 |
+swift stat || die $LINENO "Failure geting status" |
|
| 49 | 49 |
|
| 50 | 50 |
# We start by creating a test container |
| 51 |
-swift post $CONTAINER || die "Failure creating container $CONTAINER" |
|
| 51 |
+swift post $CONTAINER || die $LINENO "Failure creating container $CONTAINER" |
|
| 52 | 52 |
|
| 53 | 53 |
# add some files into it. |
| 54 |
-swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER" |
|
| 54 |
+swift upload $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER" |
|
| 55 | 55 |
|
| 56 | 56 |
# list them |
| 57 |
-swift list $CONTAINER || die "Failure listing contents of container $CONTAINER" |
|
| 57 |
+swift list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER" |
|
| 58 | 58 |
|
| 59 | 59 |
# And we may want to delete them now that we have tested that |
| 60 | 60 |
# everything works. |
| 61 |
-swift delete $CONTAINER || die "Failure deleting container $CONTAINER" |
|
| 61 |
+swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER" |
|
| 62 | 62 |
|
| 63 | 63 |
set +o xtrace |
| 64 | 64 |
echo "*********************************************************************" |
| ... | ... |
@@ -70,7 +70,7 @@ glance image-list |
| 70 | 70 |
|
| 71 | 71 |
# Grab the id of the image to launch |
| 72 | 72 |
IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) |
| 73 |
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 73 |
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" |
|
| 74 | 74 |
|
| 75 | 75 |
# Security Groups |
| 76 | 76 |
# --------------- |
| ... | ... |
@@ -114,25 +114,23 @@ fi |
| 114 | 114 |
# Clean-up from previous runs |
| 115 | 115 |
nova delete $VM_NAME || true |
| 116 | 116 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then |
| 117 |
- echo "server didn't terminate!" |
|
| 118 |
- exit 1 |
|
| 117 |
+ die $LINENO "server didn't terminate!" |
|
| 119 | 118 |
fi |
| 120 | 119 |
|
| 121 | 120 |
# Boot instance |
| 122 | 121 |
# ------------- |
| 123 | 122 |
|
| 124 | 123 |
VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) |
| 125 |
-die_if_not_set VM_UUID "Failure launching $VM_NAME" |
|
| 124 |
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" |
|
| 126 | 125 |
|
| 127 | 126 |
# Check that the status is active within ACTIVE_TIMEOUT seconds |
| 128 | 127 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then |
| 129 |
- echo "server didn't become active!" |
|
| 130 |
- exit 1 |
|
| 128 |
+ die $LINENO "server didn't become active!" |
|
| 131 | 129 |
fi |
| 132 | 130 |
|
| 133 | 131 |
# Get the instance IP |
| 134 | 132 |
IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) |
| 135 |
-die_if_not_set IP "Failure retrieving IP address" |
|
| 133 |
+die_if_not_set $LINENO IP "Failure retrieving IP address" |
|
| 136 | 134 |
|
| 137 | 135 |
# Private IPs can be pinged in single node deployments |
| 138 | 136 |
ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT |
| ... | ... |
@@ -142,42 +140,38 @@ ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT |
| 142 | 142 |
|
| 143 | 143 |
# Verify it doesn't exist |
| 144 | 144 |
if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then |
| 145 |
- echo "Volume $VOL_NAME already exists" |
|
| 146 |
- exit 1 |
|
| 145 |
+ die $LINENO "Volume $VOL_NAME already exists" |
|
| 147 | 146 |
fi |
| 148 | 147 |
|
| 149 | 148 |
# Create a new volume |
| 150 | 149 |
start_time=$(date +%s) |
| 151 | 150 |
cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ |
| 152 |
- die "Failure creating volume $VOL_NAME" |
|
| 151 |
+ die $LINENO "Failure creating volume $VOL_NAME" |
|
| 153 | 152 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then |
| 154 |
- echo "Volume $VOL_NAME not created" |
|
| 155 |
- exit 1 |
|
| 153 |
+ die $LINENO "Volume $VOL_NAME not created" |
|
| 156 | 154 |
fi |
| 157 | 155 |
end_time=$(date +%s) |
| 158 | 156 |
echo "Completed cinder create in $((end_time - start_time)) seconds" |
| 159 | 157 |
|
| 160 | 158 |
# Get volume ID |
| 161 | 159 |
VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) |
| 162 |
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" |
|
| 160 |
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" |
|
| 163 | 161 |
|
| 164 | 162 |
# Attach to server |
| 165 | 163 |
DEVICE=/dev/vdb |
| 166 | 164 |
start_time=$(date +%s) |
| 167 | 165 |
nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ |
| 168 |
- die "Failure attaching volume $VOL_NAME to $VM_NAME" |
|
| 166 |
+ die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME" |
|
| 169 | 167 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then |
| 170 |
- echo "Volume $VOL_NAME not attached to $VM_NAME" |
|
| 171 |
- exit 1 |
|
| 168 |
+ die $LINENO "Volume $VOL_NAME not attached to $VM_NAME" |
|
| 172 | 169 |
fi |
| 173 | 170 |
end_time=$(date +%s) |
| 174 | 171 |
echo "Completed volume-attach in $((end_time - start_time)) seconds" |
| 175 | 172 |
|
| 176 | 173 |
VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) |
| 177 |
-die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" |
|
| 174 |
+die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status" |
|
| 178 | 175 |
if [[ "$VOL_ATTACH" != $VM_UUID ]]; then |
| 179 |
- echo "Volume not attached to correct instance" |
|
| 180 |
- exit 1 |
|
| 176 |
+ die $LINENO "Volume not attached to correct instance" |
|
| 181 | 177 |
fi |
| 182 | 178 |
|
| 183 | 179 |
# Clean up |
| ... | ... |
@@ -185,33 +179,30 @@ fi |
| 185 | 185 |
|
| 186 | 186 |
# Detach volume |
| 187 | 187 |
start_time=$(date +%s) |
| 188 |
-nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME" |
|
| 188 |
+nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME" |
|
| 189 | 189 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then |
| 190 |
- echo "Volume $VOL_NAME not detached from $VM_NAME" |
|
| 191 |
- exit 1 |
|
| 190 |
+ die $LINENO "Volume $VOL_NAME not detached from $VM_NAME" |
|
| 192 | 191 |
fi |
| 193 | 192 |
end_time=$(date +%s) |
| 194 | 193 |
echo "Completed volume-detach in $((end_time - start_time)) seconds" |
| 195 | 194 |
|
| 196 | 195 |
# Delete volume |
| 197 | 196 |
start_time=$(date +%s) |
| 198 |
-cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" |
|
| 197 |
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME" |
|
| 199 | 198 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then |
| 200 |
- echo "Volume $VOL_NAME not deleted" |
|
| 201 |
- exit 1 |
|
| 199 |
+ die $LINENO "Volume $VOL_NAME not deleted" |
|
| 202 | 200 |
fi |
| 203 | 201 |
end_time=$(date +%s) |
| 204 | 202 |
echo "Completed cinder delete in $((end_time - start_time)) seconds" |
| 205 | 203 |
|
| 206 | 204 |
# Delete instance |
| 207 |
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" |
|
| 205 |
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" |
|
| 208 | 206 |
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then |
| 209 |
- echo "Server $VM_NAME not deleted" |
|
| 210 |
- exit 1 |
|
| 207 |
+ die $LINENO "Server $VM_NAME not deleted" |
|
| 211 | 208 |
fi |
| 212 | 209 |
|
| 213 | 210 |
# Delete secgroup |
| 214 |
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" |
|
| 211 |
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" |
|
| 215 | 212 |
|
| 216 | 213 |
set +o xtrace |
| 217 | 214 |
echo "*********************************************************************" |
| ... | ... |
@@ -57,8 +57,15 @@ function cp_it {
|
| 57 | 57 |
# die "message" |
| 58 | 58 |
function die() {
|
| 59 | 59 |
local exitcode=$? |
| 60 |
+ if [ $exitcode == 0 ]; then |
|
| 61 |
+ exitcode=1 |
|
| 62 |
+ fi |
|
| 60 | 63 |
set +o xtrace |
| 61 |
- echo $@ |
|
| 64 |
+ local msg="[ERROR] $0:$1 $2" |
|
| 65 |
+ echo $msg 1>&2; |
|
| 66 |
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
|
|
| 67 |
+ echo $msg >> "${SCREEN_LOGDIR}/error.log"
|
|
| 68 |
+ fi |
|
| 62 | 69 |
exit $exitcode |
| 63 | 70 |
} |
| 64 | 71 |
|
| ... | ... |
@@ -71,10 +78,9 @@ function die_if_not_set() {
|
| 71 | 71 |
( |
| 72 | 72 |
local exitcode=$? |
| 73 | 73 |
set +o xtrace |
| 74 |
- local evar=$1; shift |
|
| 74 |
+ local evar=$2; shift |
|
| 75 | 75 |
if ! is_set $evar || [ $exitcode != 0 ]; then |
| 76 |
- echo $@ |
|
| 77 |
- exit -1 |
|
| 76 |
+ die $@ |
|
| 78 | 77 |
fi |
| 79 | 78 |
) |
| 80 | 79 |
} |
| ... | ... |
@@ -418,12 +424,10 @@ function exit_distro_not_supported {
|
| 418 | 418 |
fi |
| 419 | 419 |
|
| 420 | 420 |
if [ $# -gt 0 ]; then |
| 421 |
- echo "Support for $DISTRO is incomplete: no support for $@" |
|
| 421 |
+ die $LINENO "Support for $DISTRO is incomplete: no support for $@" |
|
| 422 | 422 |
else |
| 423 |
- echo "Support for $DISTRO is incomplete." |
|
| 423 |
+ die $LINENO "Support for $DISTRO is incomplete." |
|
| 424 | 424 |
fi |
| 425 |
- |
|
| 426 |
- exit 1 |
|
| 427 | 425 |
} |
| 428 | 426 |
|
| 429 | 427 |
|
| ... | ... |
@@ -1105,9 +1109,9 @@ function _ping_check_novanet() {
|
| 1105 | 1105 |
fi |
| 1106 | 1106 |
if ! timeout $boot_timeout sh -c "$check_command"; then |
| 1107 | 1107 |
if [[ "$expected" = "True" ]]; then |
| 1108 |
- echo "[Fail] Couldn't ping server" |
|
| 1108 |
+ die $LINENO "[Fail] Couldn't ping server" |
|
| 1109 | 1109 |
else |
| 1110 |
- echo "[Fail] Could ping server" |
|
| 1110 |
+ die $LINENO "[Fail] Could ping server" |
|
| 1111 | 1111 |
fi |
| 1112 | 1112 |
exit 1 |
| 1113 | 1113 |
fi |
| ... | ... |
@@ -1131,8 +1135,7 @@ function _ssh_check_novanet() {
|
| 1131 | 1131 |
local ACTIVE_TIMEOUT=$5 |
| 1132 | 1132 |
local probe_cmd="" |
| 1133 | 1133 |
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then
|
| 1134 |
- echo "server didn't become ssh-able!" |
|
| 1135 |
- exit 1 |
|
| 1134 |
+ die $LINENO "server didn't become ssh-able!" |
|
| 1136 | 1135 |
fi |
| 1137 | 1136 |
} |
| 1138 | 1137 |
|
| ... | ... |
@@ -186,8 +186,7 @@ function start_glance() {
|
| 186 | 186 |
screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" |
| 187 | 187 |
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." |
| 188 | 188 |
if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then |
| 189 |
- echo "g-api did not start" |
|
| 190 |
- exit 1 |
|
| 189 |
+ die $LINENO "g-api did not start" |
|
| 191 | 190 |
fi |
| 192 | 191 |
} |
| 193 | 192 |
|
| ... | ... |
@@ -323,8 +323,7 @@ function start_keystone() {
|
| 323 | 323 |
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" |
| 324 | 324 |
echo "Waiting for keystone to start..." |
| 325 | 325 |
if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then |
| 326 |
- echo "keystone did not start" |
|
| 327 |
- exit 1 |
|
| 326 |
+ die $LINENO "keystone did not start" |
|
| 328 | 327 |
fi |
| 329 | 328 |
|
| 330 | 329 |
# Start proxies if enabled |
| ... | ... |
@@ -542,8 +542,7 @@ function start_nova_api() {
|
| 542 | 542 |
screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" |
| 543 | 543 |
echo "Waiting for nova-api to start..." |
| 544 | 544 |
if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then |
| 545 |
- echo "nova-api did not start" |
|
| 546 |
- exit 1 |
|
| 545 |
+ die $LINENO "nova-api did not start" |
|
| 547 | 546 |
fi |
| 548 | 547 |
|
| 549 | 548 |
# Start proxies if enabled |
| ... | ... |
@@ -351,8 +351,7 @@ function start_quantum_service_and_check() {
|
| 351 | 351 |
screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" |
| 352 | 352 |
echo "Waiting for Quantum to start..." |
| 353 | 353 |
if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then |
| 354 |
- echo "Quantum did not start" |
|
| 355 |
- exit 1 |
|
| 354 |
+ die $LINENO "Quantum did not start" |
|
| 356 | 355 |
fi |
| 357 | 356 |
} |
| 358 | 357 |
|
| ... | ... |
@@ -396,8 +395,7 @@ function _configure_quantum_common() {
|
| 396 | 396 |
quantum_plugin_configure_common |
| 397 | 397 |
|
| 398 | 398 |
if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then |
| 399 |
- echo "Quantum plugin not set.. exiting" |
|
| 400 |
- exit 1 |
|
| 399 |
+ die $LINENO "Quantum plugin not set.. exiting" |
|
| 401 | 400 |
fi |
| 402 | 401 |
|
| 403 | 402 |
# If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` |
| ... | ... |
@@ -508,8 +506,7 @@ function _configure_quantum_service() {
|
| 508 | 508 |
if is_service_enabled $DATABASE_BACKENDS; then |
| 509 | 509 |
recreate_database $Q_DB_NAME utf8 |
| 510 | 510 |
else |
| 511 |
- echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." |
|
| 512 |
- exit 1 |
|
| 511 |
+ die $LINENO "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." |
|
| 513 | 512 |
fi |
| 514 | 513 |
|
| 515 | 514 |
# Update either configuration file with plugin |
| ... | ... |
@@ -659,11 +656,10 @@ function _ping_check_quantum() {
|
| 659 | 659 |
fi |
| 660 | 660 |
if ! timeout $timeout_sec sh -c "$check_command"; then |
| 661 | 661 |
if [[ "$expected" = "True" ]]; then |
| 662 |
- echo "[Fail] Couldn't ping server" |
|
| 662 |
+ die $LINENO "[Fail] Couldn't ping server" |
|
| 663 | 663 |
else |
| 664 |
- echo "[Fail] Could ping server" |
|
| 664 |
+ die $LINENO "[Fail] Could ping server" |
|
| 665 | 665 |
fi |
| 666 |
- exit 1 |
|
| 667 | 666 |
fi |
| 668 | 667 |
} |
| 669 | 668 |
|
| ... | ... |
@@ -677,8 +673,7 @@ function _ssh_check_quantum() {
|
| 677 | 677 |
local probe_cmd = "" |
| 678 | 678 |
probe_cmd=`_get_probe_cmd_prefix $from_net` |
| 679 | 679 |
if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then
|
| 680 |
- echo "server didn't become ssh-able!" |
|
| 681 |
- exit 1 |
|
| 680 |
+ die $LINENO "server didn't become ssh-able!" |
|
| 682 | 681 |
fi |
| 683 | 682 |
} |
| 684 | 683 |
|
| ... | ... |
@@ -19,8 +19,7 @@ function setup_integration_bridge() {
|
| 19 | 19 |
conn=(${NVP_CONTROLLER_CONNECTION//\:/ })
|
| 20 | 20 |
OVS_MGR_IP=${conn[0]}
|
| 21 | 21 |
else |
| 22 |
- echo "Error - No controller specified. Unable to set a manager for OVS" |
|
| 23 |
- exit 1 |
|
| 22 |
+ die $LINENO "Error - No controller specified. Unable to set a manager for OVS" |
|
| 24 | 23 |
fi |
| 25 | 24 |
sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP |
| 26 | 25 |
} |
| ... | ... |
@@ -63,14 +62,12 @@ function quantum_plugin_configure_dhcp_agent() {
|
| 63 | 63 |
|
| 64 | 64 |
function quantum_plugin_configure_l3_agent() {
|
| 65 | 65 |
# Nicira plugin does not run L3 agent |
| 66 |
- echo "ERROR - q-l3 should must not be executed with Nicira plugin!" |
|
| 67 |
- exit 1 |
|
| 66 |
+ die $LINENO "q-l3 should must not be executed with Nicira plugin!" |
|
| 68 | 67 |
} |
| 69 | 68 |
|
| 70 | 69 |
function quantum_plugin_configure_plugin_agent() {
|
| 71 | 70 |
# Nicira plugin does not run L2 agent |
| 72 |
- echo "ERROR - q-agt must not be executed with Nicira plugin!" |
|
| 73 |
- exit 1 |
|
| 71 |
+ die $LINENO "q-agt must not be executed with Nicira plugin!" |
|
| 74 | 72 |
} |
| 75 | 73 |
|
| 76 | 74 |
function quantum_plugin_configure_service() {
|
| ... | ... |
@@ -93,8 +90,7 @@ function quantum_plugin_configure_service() {
|
| 93 | 93 |
if [[ "$DEFAULT_TZ_UUID" != "" ]]; then |
| 94 | 94 |
iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_tz_uuid $DEFAULT_TZ_UUID |
| 95 | 95 |
else |
| 96 |
- echo "ERROR - The nicira plugin won't work without a default transport zone." |
|
| 97 |
- exit 1 |
|
| 96 |
+ die $LINENO "The nicira plugin won't work without a default transport zone." |
|
| 98 | 97 |
fi |
| 99 | 98 |
if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then |
| 100 | 99 |
iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID |
| ... | ... |
@@ -114,8 +110,7 @@ function quantum_plugin_configure_service() {
|
| 114 | 114 |
# Only 1 controller can be specified in this case |
| 115 | 115 |
iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controller_connection $NVP_CONTROLLER_CONNECTION |
| 116 | 116 |
else |
| 117 |
- echo "ERROR - The nicira plugin needs at least an NVP controller." |
|
| 118 |
- exit 1 |
|
| 117 |
+ die $LINENO "The nicira plugin needs at least an NVP controller." |
|
| 119 | 118 |
fi |
| 120 | 119 |
if [[ "$NVP_USER" != "" ]]; then |
| 121 | 120 |
iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_user $NVP_USER |
| ... | ... |
@@ -50,9 +50,7 @@ function quantum_plugin_configure_plugin_agent() {
|
| 50 | 50 |
# REVISIT - also check kernel module support for GRE and patch ports |
| 51 | 51 |
OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
|
| 52 | 52 |
if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then |
| 53 |
- echo "You are running OVS version $OVS_VERSION." |
|
| 54 |
- echo "OVS 1.4+ is required for tunneling between multiple hosts." |
|
| 55 |
- exit 1 |
|
| 53 |
+ die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." |
|
| 56 | 54 |
fi |
| 57 | 55 |
iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True |
| 58 | 56 |
iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP |
| ... | ... |
@@ -39,8 +39,7 @@ function check_rpc_backend() {
|
| 39 | 39 |
fi |
| 40 | 40 |
|
| 41 | 41 |
if is_service_enabled qpid && ! qpid_is_supported; then |
| 42 |
- echo "Qpid support is not available for this version of your distribution." |
|
| 43 |
- exit 1 |
|
| 42 |
+ die $LINENO "Qpid support is not available for this version of your distribution." |
|
| 44 | 43 |
fi |
| 45 | 44 |
} |
| 46 | 45 |
|
| ... | ... |
@@ -55,8 +55,7 @@ GetDistro |
| 55 | 55 |
# allow you to safely override those settings. |
| 56 | 56 |
|
| 57 | 57 |
if [[ ! -r $TOP_DIR/stackrc ]]; then |
| 58 |
- echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" |
|
| 59 |
- exit 1 |
|
| 58 |
+ log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" |
|
| 60 | 59 |
fi |
| 61 | 60 |
source $TOP_DIR/stackrc |
| 62 | 61 |
|
| ... | ... |
@@ -93,8 +92,7 @@ disable_negated_services |
| 93 | 93 |
if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then
|
| 94 | 94 |
echo "WARNING: this script has not been tested on $DISTRO" |
| 95 | 95 |
if [[ "$FORCE" != "yes" ]]; then |
| 96 |
- echo "If you wish to run this script anyway run with FORCE=yes" |
|
| 97 |
- exit 1 |
|
| 96 |
+ die $LINENO "If you wish to run this script anyway run with FORCE=yes" |
|
| 98 | 97 |
fi |
| 99 | 98 |
fi |
| 100 | 99 |
|
| ... | ... |
@@ -105,16 +103,14 @@ check_rpc_backend |
| 105 | 105 |
# ``stack.sh`` keeps function libraries here |
| 106 | 106 |
# Make sure ``$TOP_DIR/lib`` directory is present |
| 107 | 107 |
if [ ! -d $TOP_DIR/lib ]; then |
| 108 |
- echo "ERROR: missing devstack/lib" |
|
| 109 |
- exit 1 |
|
| 108 |
+ log_error $LINENO "missing devstack/lib" |
|
| 110 | 109 |
fi |
| 111 | 110 |
|
| 112 | 111 |
# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config |
| 113 | 112 |
# templates and other useful files in the ``files`` subdirectory |
| 114 | 113 |
FILES=$TOP_DIR/files |
| 115 | 114 |
if [ ! -d $FILES ]; then |
| 116 |
- echo "ERROR: missing devstack/files" |
|
| 117 |
- exit 1 |
|
| 115 |
+ log_error $LINENO "missing devstack/files" |
|
| 118 | 116 |
fi |
| 119 | 117 |
|
| 120 | 118 |
SCREEN_NAME=${SCREEN_NAME:-stack}
|
| ... | ... |
@@ -248,9 +244,7 @@ if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then |
| 248 | 248 |
fi |
| 249 | 249 |
done |
| 250 | 250 |
if [ "$HOST_IP" == "" ]; then |
| 251 |
- echo "Could not determine host ip address." |
|
| 252 |
- echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
|
|
| 253 |
- exit 1 |
|
| 251 |
+ die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
|
|
| 254 | 252 |
fi |
| 255 | 253 |
fi |
| 256 | 254 |
|