Browse code

extended tests for setting forcePull in the 3 strategies; changes stemming from Ben's comments; some debug improvements; Michal's comments; address merge conflicts; adjust to extended test refactor

gabemontero authored on 2015/08/11 04:55:53
Showing 14 changed files
... ...
@@ -16,3 +16,4 @@ origin.iml
16 16
 *.pyc
17 17
 .tag*
18 18
 .project
19
+*.go~
... ...
@@ -385,6 +385,7 @@ oc exec -p ${registry_pod} du /registry > ${LOG_DIR}/prune-images.after.txt
385 385
 # make sure there were changes to the registry's storage
386 386
 [ -n "$(diff ${LOG_DIR}/prune-images.before.txt ${LOG_DIR}/prune-images.after.txt)" ]
387 387
 
388
+
388 389
 # UI e2e tests can be found in assets/test/e2e
389 390
 if [[ "$TEST_ASSETS" == "true" ]]; then
390 391
 
... ...
@@ -394,8 +395,13 @@ if [[ "$TEST_ASSETS" == "true" ]]; then
394 394
 		Xvfb :10 -screen 0 1024x768x24 -ac &
395 395
 	fi
396 396
 
397
-	echo "[INFO] Running UI e2e tests..."
397
+	echo "[INFO] Running UI e2e tests at time..."
398
+	echo `date`
398 399
 	pushd ${OS_ROOT}/assets > /dev/null
399 400
 		grunt test-e2e
401
+	echo "UI  e2e done at time "
402
+	echo `date`
403
+
400 404
 	popd > /dev/null
405
+
401 406
 fi
... ...
@@ -14,136 +14,38 @@ source ${OS_ROOT}/hack/util.sh
14 14
 source ${OS_ROOT}/hack/common.sh
15 15
 
16 16
 
17
-cleanup() {
18
-	out=$?
19
-	set +e
20
-	
21
-	echo "[INFO] Tearing down test"
22
-	kill_all_processes
23
-	rm -rf ${ETCD_DIR-}
24
-	echo "[INFO] Stopping k8s docker containers"; docker ps | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker stop
25
-	if [[ -z "${SKIP_IMAGE_CLEANUP-}" ]]; then
26
-		echo "[INFO] Removing k8s docker containers"; docker ps -a | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker rm
27
-	fi
28
-
29
-	set -e
30
-	echo "[INFO] Cleanup complete"
31
-	echo "[INFO] Exiting"
32
-	exit $out
33
-}
34
-
35
-# Check if we have ginkgo command
36 17
 set -e
37
-which ginkgo &>/dev/null || (echo 'Run: "go get github.com/onsi/ginkgo/ginkgo"' && exit 1)
18
+ginkgo_check_extended
38 19
 set +e
39 20
 
40
-# Compile the extended tests first to avoid waiting for OpenShift server to
41
-# start and fail sooner on compilation errors.
42
-echo "[INFO] Compiling test/extended package ..."
43
-GOPATH="${OS_ROOT}/Godeps/_workspace:${GOPATH}" \
44
-  go test -c ./test/extended -o ${OS_OUTPUT_BINPATH}/extended.test || exit 1
21
+compile_extended
45 22
 
46 23
 test_privileges
47 24
 
48 25
 echo "[INFO] Starting 'default' extended tests"
49 26
 
50
-TIME_SEC=1000
51
-TIME_MIN=$((60 * $TIME_SEC))
27
+dirs_image_env_setup_extended
52 28
 
53
-TEST_TYPE="openshift-extended-tests"
54
-TMPDIR="${TMPDIR:-"/tmp"}"
55
-BASETMPDIR="${TMPDIR}/${TEST_TYPE}"
56
-
57
-if [[ -d "${BASETMPDIR}" ]]; then
58
-	remove_tmp_dir $TEST_TYPE
59
-fi
60
-
61
-mkdir -p ${BASETMPDIR}
62
-
63
-# Use either the latest release built images, or latest.
64
-if [[ -z "${USE_IMAGES-}" ]]; then
65
-	USE_IMAGES='openshift/origin-${component}:latest'
66
-	if [[ -e "${OS_ROOT}/_output/local/releases/.commit" ]]; then
67
-		COMMIT="$(cat "${OS_ROOT}/_output/local/releases/.commit")"
68
-		USE_IMAGES="openshift/origin-\${component}:${COMMIT}"
69
-	fi
70
-fi
71
-
72
-LOG_DIR="${LOG_DIR:-${BASETMPDIR}/logs}"
73
-ARTIFACT_DIR="${ARTIFACT_DIR:-${BASETMPDIR}/artifacts}"
74
-DEFAULT_SERVER_IP=`ifconfig | grep -Ev "(127.0.0.1|172.17.42.1)" | grep "inet " | head -n 1 | sed 's/adr://' | awk '{print $2}'`
75
-API_HOST="${API_HOST:-${DEFAULT_SERVER_IP}}"
76 29
 setup_env_vars
77
-mkdir -p $LOG_DIR $ARTIFACT_DIR
78
-
79
-# use the docker bridge ip address until there is a good way to get the auto-selected address from master
80
-# this address is considered stable
81
-# used as a resolve IP to test routing
82
-CONTAINER_ACCESSIBLE_API_HOST="${CONTAINER_ACCESSIBLE_API_HOST:-172.17.42.1}"
83
-
84
-STI_CONFIG_FILE="${LOG_DIR}/stiAppConfig.json"
85
-DOCKER_CONFIG_FILE="${LOG_DIR}/dockerAppConfig.json"
86
-CUSTOM_CONFIG_FILE="${LOG_DIR}/customAppConfig.json"
87 30
 
88 31
 trap "exit" INT TERM
89
-trap "cleanup" EXIT
90
-
91
-# Setup
92
-echo "[INFO] `openshift version`"
93
-echo "[INFO] Server logs will be at:    ${LOG_DIR}/openshift.log"
94
-echo "[INFO] Test artifacts will be in: ${ARTIFACT_DIR}"
95
-echo "[INFO] Volumes dir is:            ${VOLUME_DIR}"
96
-echo "[INFO] Config dir is:             ${SERVER_CONFIG_DIR}"
97
-echo "[INFO] Using images:              ${USE_IMAGES}"
98
-
99
-# Start All-in-one server and wait for health
100
-echo "[INFO] Create certificates for the OpenShift server"
101
-# find the same IP that openshift start will bind to.  This allows access from pods that have to talk back to master
102
-ALL_IP_ADDRESSES=`ifconfig | grep "inet " | sed 's/adr://' | awk '{print $2}'`
103
-SERVER_HOSTNAME_LIST="${PUBLIC_MASTER_HOST},localhost"
104
-while read -r IP_ADDRESS
105
-do
106
-	SERVER_HOSTNAME_LIST="${SERVER_HOSTNAME_LIST},${IP_ADDRESS}"
107
-done <<< "${ALL_IP_ADDRESSES}"
32
+trap "cleanup_extended" EXIT
108 33
 
109
-configure_os_server
110
-
111
-export HOME="${FAKE_HOME_DIR}"
112
-# This directory must exist so Docker can store credentials in $HOME/.dockercfg
113
-mkdir -p ${FAKE_HOME_DIR}
114
-
115
-export ADMIN_KUBECONFIG="${MASTER_CONFIG_DIR}/admin.kubeconfig"
116
-CLUSTER_ADMIN_CONTEXT=$(oc config view --flatten -o template -t '{{index . "current-context"}}')
34
+info_msgs_ip_host_setup_extended
117 35
 
118
-if [[ "${API_SCHEME}" == "https" ]]; then
119
-	export CURL_CA_BUNDLE="${MASTER_CONFIG_DIR}/ca.crt"
120
-	export CURL_CERT="${MASTER_CONFIG_DIR}/admin.crt"
121
-	export CURL_KEY="${MASTER_CONFIG_DIR}/admin.key"
36
+configure_os_server
122 37
 
123
-	# Make oc use ${MASTER_CONFIG_DIR}/admin.kubeconfig, and ignore anything in the running user's $HOME dir
124
-	sudo chmod -R a+rwX "${ADMIN_KUBECONFIG}"
125
-	echo "[INFO] To debug: export ADMIN_KUBECONFIG=$ADMIN_KUBECONFIG"
126
-fi
38
+auth_setup_extended
127 39
 
128 40
 start_os_server
129 41
 
130
-# install the router
131
-echo "[INFO] Installing the router"
132
-echo '{"kind":"ServiceAccount","apiVersion":"v1","metadata":{"name":"router"}}' | oc create -f - --config="${ADMIN_KUBECONFIG}"
133
-oc get scc privileged -o json --config="${ADMIN_KUBECONFIG}" | sed '/\"users\"/a \"system:serviceaccount:default:router\",' | oc replace scc privileged -f - --config="${ADMIN_KUBECONFIG}"
134
-openshift admin router --create --credentials="${MASTER_CONFIG_DIR}/openshift-router.kubeconfig" --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}" --service-account=router
42
+install_router_extended
135 43
 
136
-# install the registry. The --mount-host option is provided to reuse local storage.
137
-echo "[INFO] Installing the registry"
138
-openshift admin registry --create --credentials="${MASTER_CONFIG_DIR}/openshift-registry.kubeconfig" --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}"
44
+install_registry_extended
139 45
 
140 46
 wait_for_command '[[ "$(oc get endpoints docker-registry --output-version=v1 -t "{{ if .subsets }}{{ len .subsets }}{{ else }}0{{ end }}" --config=/tmp/openshift-extended-tests/openshift.local.config/master/admin.kubeconfig || echo "0")" != "0" ]]' $((5*TIME_MIN))
141 47
 
142
-echo "[INFO] Creating image streams"
143
-oc create -n openshift -f examples/image-streams/image-streams-centos7.json --config="${ADMIN_KUBECONFIG}"
144
-
145
-registry="$(dig @${API_HOST} "docker-registry.default.svc.cluster.local." +short A | head -n 1)"
146
-echo "[INFO] Registry IP - ${registry}"
48
+create_image_streams_extended
147 49
 
148 50
 echo "[INFO] MASTER IP - ${MASTER_ADDR}"
149 51
 echo "[INFO] SERVER CONFIG PATH - ${SERVER_CONFIG_DIR}"
150 52
new file mode 100755
... ...
@@ -0,0 +1,64 @@
0
+#!/bin/bash
1
+#
2
+# This scripts starts the OpenShift server where
3
+# the OpenShift Docker registry and router are installed,
4
+# and then the forcepull tests are launched.
5
+# We intentionally do not run the force pull tests in parallel
6
+# given the tagging based image corruption that occurs - do not
7
+# want 2 tests corrupting an image differently at the same time.
8
+
9
+set -o nounset
10
+set -o pipefail
11
+
12
+OS_ROOT=$(dirname "${BASH_SOURCE}")/../../..
13
+cd "${OS_ROOT}"
14
+
15
+source ${OS_ROOT}/hack/util.sh
16
+source ${OS_ROOT}/hack/common.sh
17
+
18
+
19
+set -e
20
+ginkgo_check_extended
21
+set +e
22
+
23
+compile_extended
24
+
25
+test_privileges
26
+
27
+echo "[INFO] Starting 'forcepull' extended tests"
28
+
29
+dirs_image_env_setup_extended
30
+
31
+setup_env_vars
32
+
33
+trap "exit" INT TERM
34
+trap "cleanup_extended" EXIT
35
+
36
+info_msgs_ip_host_setup_extended
37
+
38
+configure_os_server
39
+
40
+auth_setup_extended
41
+
42
+start_os_server
43
+
44
+install_router_extended
45
+
46
+install_registry_extended
47
+
48
+# I've seen this hang, even though when I run it manually concurrently it works ... simply bypassing this check has simply accelerated the test ... so far, not seeing value in this verification ... the registry seems to come up consistently
49
+#wait_for_command '[[ "$(oc get endpoints docker-registry --output-version=v1 -t "{{ if .subsets }}{{ len .subsets }}{{ else }}0{{ end }}" --config=/tmp/openshift-extended-tests/openshift.local.config/master/admin.kubeconfig || echo "0")" != "0" ]]' $((5*TIME_MIN))
50
+
51
+create_image_streams_extended
52
+
53
+echo "[INFO] MASTER IP - ${MASTER_ADDR}"
54
+echo "[INFO] SERVER CONFIG PATH - ${SERVER_CONFIG_DIR}"
55
+echo "[INFO] Starting extended tests for forcepull ..."
56
+
57
+# Run the tests
58
+pushd ${OS_ROOT}/test/extended >/dev/null
59
+export KUBECONFIG="${ADMIN_KUBECONFIG}"
60
+export EXTENDED_TEST_PATH="${OS_ROOT}/test/extended"
61
+# we don't run in parallel with this suite - do not want different tests tagging the same image in different was at the same time
62
+ginkgo -progress -stream -v -focus="forcepull:" ${OS_OUTPUT_BINPATH}/extended.test
63
+popd >/dev/null
... ...
@@ -71,18 +71,26 @@ function configure_os_server {
71 71
 # start_os_server starts the OS server, exports the PID of the OS server
72 72
 # and waits until OS server endpoints are available
73 73
 function start_os_server {
74
-  echo "[INFO] Starting OpenShift server"
75
-  sudo env "PATH=${PATH}" OPENSHIFT_PROFILE=web OPENSHIFT_ON_PANIC=crash openshift start \
76
-    --master-config=${MASTER_CONFIG_DIR}/master-config.yaml \
77
-    --node-config=${NODE_CONFIG_DIR}/node-config.yaml \
78
-    --loglevel=4 \
79
-    &> "${BASETMPDIR}/openshift.log" &
80
-  export OS_PID=$!
81
-
82
-  wait_for_url "${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz" "[INFO] kubelet: " 0.5 60
83
-  wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz" "apiserver: " 0.25 80
84
-  wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz/ready" "apiserver(ready): " 0.25 80
85
-  wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/api/v1/nodes/${KUBELET_HOST}" "apiserver(nodes): " 0.25 80
74
+    echo "[INFO] Scan of OpenShift related processes already up via ps -ef  | grep openshift : "
75
+    ps -ef | grep openshift
76
+    echo "[INFO] Starting OpenShift server"
77
+    sudo env "PATH=${PATH}" OPENSHIFT_PROFILE=web OPENSHIFT_ON_PANIC=crash openshift start \
78
+	 --master-config=${MASTER_CONFIG_DIR}/master-config.yaml \
79
+	 --node-config=${NODE_CONFIG_DIR}/node-config.yaml \
80
+	 --loglevel=4 \
81
+    &> "${LOG_DIR}/openshift.log" &
82
+    export OS_PID=$!
83
+
84
+    echo "[INFO] OpenShift server start at: "
85
+    echo `date`
86
+  
87
+    wait_for_url "${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz" "[INFO] kubelet: " 0.5 60
88
+    wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz" "apiserver: " 0.25 80
89
+    wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz/ready" "apiserver(ready): " 0.25 80
90
+    wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/api/v1/nodes/${KUBELET_HOST}" "apiserver(nodes): " 0.25 80
91
+    
92
+    echo "[INFO] OpenShift server health checks done at: "
93
+    echo `date`
86 94
 }
87 95
 
88 96
 # test_privileges tests if the testing machine has iptables available
... ...
@@ -217,7 +225,7 @@ function wait_for_url {
217 217
 
218 218
   set +e
219 219
   cmd="env -i CURL_CA_BUNDLE=${CURL_CA_BUNDLE:-} $(which curl) ${clientcert_args} -fs ${url}"
220
-  #echo "run: ${cmd}"
220
+  echo "wait_for_url - run: ${cmd}"
221 221
   for i in $(seq 1 $times); do
222 222
     out=$(${cmd})
223 223
     if [ $? -eq 0 ]; then
... ...
@@ -225,6 +233,7 @@ function wait_for_url {
225 225
       echo "${prefix}${out}"
226 226
       return 0
227 227
     fi
228
+    echo " wait_for_url: ${cmd} non zero rc: ${out}"
228 229
     sleep $wait
229 230
   done
230 231
   echo "ERROR: gave up waiting for ${url}"
... ...
@@ -403,6 +412,146 @@ function delete_large_and_empty_logs()
403 403
   find ${LOG_DIR} -name *.log -size 0 -exec echo Deleting {} because it is empty. \; -exec rm -f {} \;
404 404
 }
405 405
 
406
+######
407
+# start of common functions for extended test group's run.sh scripts
408
+######
409
+
410
+# exit run if ginkgo not installed
411
+function ginkgo_check_extended {
412
+    which ginkgo &>/dev/null || (echo 'Run: "go get github.com/onsi/ginkgo/ginkgo"' && exit 1)
413
+}
414
+
415
+# create extended.test binary to run extended tests
416
+function compile_extended {
417
+    # Compile the extended tests first to avoid waiting for OpenShift server to
418
+    # start and fail sooner on compilation errors.
419
+    echo "[INFO] Compiling test/extended package ..."
420
+    GOPATH="${OS_ROOT}/Godeps/_workspace:${GOPATH}" \
421
+	  go test -c ./test/extended -o ${OS_OUTPUT_BINPATH}/extended.test || exit 1
422
+    export GOPATH
423
+}
424
+
425
+# various env var and setup for directories and image related information
426
+function dirs_image_env_setup_extended {
427
+    export TIME_SEC=1000
428
+    export TIME_MIN=$((60 * $TIME_SEC))
429
+
430
+    export TEST_TYPE="openshift-extended-tests"
431
+    export TMPDIR="${TMPDIR:-"/tmp"}"
432
+    export BASETMPDIR="${TMPDIR}/${TEST_TYPE}"
433
+
434
+    if [[ -d "${BASETMPDIR}" ]]; then
435
+	remove_tmp_dir $TEST_TYPE
436
+    fi
437
+
438
+    mkdir -p ${BASETMPDIR}
439
+
440
+    # Use either the latest release built images, or latest.
441
+    if [[ -z "${USE_IMAGES-}" ]]; then
442
+	export USE_IMAGES='openshift/origin-${component}:latest'
443
+	if [[ -e "${OS_ROOT}/_output/local/releases/.commit" ]]; then
444
+	    export COMMIT="$(cat "${OS_ROOT}/_output/local/releases/.commit")"
445
+	    export USE_IMAGES="openshift/origin-\${component}:${COMMIT}"
446
+	fi
447
+    fi
448
+
449
+    export LOG_DIR="${LOG_DIR:-${BASETMPDIR}/logs}"
450
+    export ARTIFACT_DIR="${ARTIFACT_DIR:-${BASETMPDIR}/artifacts}"
451
+    export DEFAULT_SERVER_IP=`ifconfig | grep -Ev "(127.0.0.1|172.17.42.1)" | grep "inet " | head -n 1 | sed 's/adr://' | awk '{print $2}'`
452
+    export API_HOST="${API_HOST:-${DEFAULT_SERVER_IP}}"
453
+    mkdir -p $LOG_DIR $ARTIFACT_DIR
454
+}
455
+
456
+# cleanup function for the extended tests
457
+function cleanup_extended {
458
+	out=$?
459
+	set +e
460
+	
461
+	echo "[INFO] Tearing down test"
462
+	kill_all_processes
463
+	rm -rf ${ETCD_DIR-}
464
+	echo "[INFO] Stopping k8s docker containers"; docker ps | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker stop
465
+	if [[ -z "${SKIP_IMAGE_CLEANUP-}" ]]; then
466
+		echo "[INFO] Removing k8s docker containers"; docker ps -a | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker rm
467
+	fi
468
+
469
+	set -e
470
+	echo "[INFO] Cleanup complete"
471
+	echo "[INFO] Exiting"
472
+	exit $out
473
+}    
474
+
475
+# ip/host setup function for the extended tests, along with some debug
476
+function info_msgs_ip_host_setup_extended {
477
+    echo "[INFO] `openshift version`"
478
+    echo "[INFO] Server logs will be at:    ${LOG_DIR}/openshift.log"
479
+    echo "[INFO] Test artifacts will be in: ${ARTIFACT_DIR}"
480
+    echo "[INFO] Volumes dir is:            ${VOLUME_DIR}"
481
+    echo "[INFO] Config dir is:             ${SERVER_CONFIG_DIR}"
482
+    echo "[INFO] Using images:              ${USE_IMAGES}"
483
+
484
+    # Start All-in-one server and wait for health
485
+    echo "[INFO] Create certificates for the OpenShift server"
486
+    # find the same IP that openshift start will bind to.  This allows access from pods that have to talk back to master
487
+    ALL_IP_ADDRESSES=`ifconfig | grep "inet " | sed 's/adr://' | awk '{print $2}'`
488
+    SERVER_HOSTNAME_LIST="${PUBLIC_MASTER_HOST},localhost"
489
+    while read -r IP_ADDRESS
490
+    do
491
+	SERVER_HOSTNAME_LIST="${SERVER_HOSTNAME_LIST},${IP_ADDRESS}"
492
+    done <<< "${ALL_IP_ADDRESSES}"
493
+    export ALL_IP_ADDRESSES
494
+    export SERVER_HOSTNAME_LIST
495
+}
496
+
497
+# auth set up for extended tests
498
+function auth_setup_extended {
499
+    export HOME="${FAKE_HOME_DIR}"
500
+    # This directory must exist so Docker can store credentials in $HOME/.dockercfg
501
+    mkdir -p ${FAKE_HOME_DIR}
502
+
503
+    export ADMIN_KUBECONFIG="${MASTER_CONFIG_DIR}/admin.kubeconfig"
504
+    export CLUSTER_ADMIN_CONTEXT=$(oc config view --flatten -o template -t '{{index . "current-context"}}')
505
+
506
+    if [[ "${API_SCHEME}" == "https" ]]; then
507
+	export CURL_CA_BUNDLE="${MASTER_CONFIG_DIR}/ca.crt"
508
+	export CURL_CERT="${MASTER_CONFIG_DIR}/admin.crt"
509
+	export CURL_KEY="${MASTER_CONFIG_DIR}/admin.key"
510
+
511
+	# Make oc use ${MASTER_CONFIG_DIR}/admin.kubeconfig, and ignore anything in the running user's $HOME dir
512
+	sudo chmod -R a+rwX "${ADMIN_KUBECONFIG}"
513
+	echo "[INFO] To debug: export ADMIN_KUBECONFIG=$ADMIN_KUBECONFIG"
514
+    fi
515
+
516
+}
517
+
518
+# install the router for the extended tests
519
+function install_router_extended {
520
+    echo "[INFO] Installing the router"
521
+    echo '{"kind":"ServiceAccount","apiVersion":"v1","metadata":{"name":"router"}}' | oc create -f - --config="${ADMIN_KUBECONFIG}"
522
+    oc get scc privileged -o json --config="${ADMIN_KUBECONFIG}" | sed '/\"users\"/a \"system:serviceaccount:default:router\",' | oc replace scc privileged -f - --config="${ADMIN_KUBECONFIG}"
523
+    openshift admin router --create --credentials="${MASTER_CONFIG_DIR}/openshift-router.kubeconfig" --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}" --service-account=router
524
+}
525
+
526
+# install registry for the extended tests
527
+function install_registry_extended {
528
+    # The --mount-host option is provided to reuse local storage.
529
+    echo "[INFO] Installing the registry"
530
+    openshift admin registry --create --credentials="${MASTER_CONFIG_DIR}/openshift-registry.kubeconfig" --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}"
531
+}
532
+
533
+# create the images streams for the extended tests suites
534
+function create_image_streams_extended {
535
+    echo "[INFO] Creating image streams"
536
+    oc create -n openshift -f examples/image-streams/image-streams-centos7.json --config="${ADMIN_KUBECONFIG}"
537
+
538
+    registry="$(dig @${API_HOST} "docker-registry.default.svc.cluster.local." +short A | head -n 1)"
539
+    echo "[INFO] Registry IP - ${registry}"
540
+}
541
+
542
+######
543
+# end of common functions for extended test group's run.sh scripts
544
+######
545
+
406 546
 # Handler for when we exit automatically on an error.
407 547
 # Borrowed from https://gist.github.com/ahendrix/7030300
408 548
 os::log::errexit() {
... ...
@@ -539,4 +688,4 @@ find_files() {
539 539
         -o -wholename '*/Godeps/*' \
540 540
       \) -prune \
541 541
     \) -name '*.go' | sort -u
542
-}
543 542
\ No newline at end of file
543
+}
544 544
new file mode 100644
... ...
@@ -0,0 +1,190 @@
0
+package builds
1
+
2
+/*
3
+This particular builds test suite is not part of the "default" group,  because its testing
4
+centers around manipulation of images tags to confirm whether the `docker pull` invocation occurs
5
+correctly based on `forcePull` setting in the BuildConfig, and rather than spend time creating / pulling down separate, test only,
6
+images for each test scenario, we reuse existing images and ensure that the tests do not run in parallel, and step on each
7
+others toes by tagging the same, existing images in ways which conflict.
8
+*/
9
+
10
+import (
11
+	"fmt"
12
+
13
+	g "github.com/onsi/ginkgo"
14
+	o "github.com/onsi/gomega"
15
+
16
+	exutil "github.com/openshift/origin/test/extended/util"
17
+	"time"
18
+)
19
+
20
+var (
21
+	resetData map[string]string
22
+)
23
+
24
+const (
25
+	buildPrefix = "ruby-sample-build"
26
+	buildName   = buildPrefix + "-1"
27
+	s2iDockBldr = "docker.io/openshift/ruby-20-centos7"
28
+	custBldr    = "docker.io/openshift/origin-custom-docker-builder"
29
+)
30
+
31
+/*
32
+If docker.io is not responding to requests in a timely manner, this test suite will be adversely affected.
33
+
34
+If you suspect such a situation, attempt pulling some openshift images other than ruby-20-centos7 or origin-custom-docker-builder
35
+while this test is running and compare results.  Restarting your docker daemon, assuming you can ping docker.io quickly, could
36
+be a quick fix.
37
+*/
38
+
39
+var _ = g.BeforeSuite(func() {
40
+	// do a pull initially just to insure have the latest version
41
+	exutil.PullImage(s2iDockBldr)
42
+	exutil.PullImage(custBldr)
43
+	// save hex image IDs for image reset after corruption
44
+	tags := []string{s2iDockBldr + ":latest", custBldr + ":latest"}
45
+	hexIDs, ierr := exutil.GetImageIDForTags(tags)
46
+	o.Expect(ierr).NotTo(o.HaveOccurred())
47
+	for _, hexID := range hexIDs {
48
+		g.By(fmt.Sprintf("\n%s FORCE PULL TEST:  hex id %s ", time.Now().Format(time.RFC850), hexID))
49
+	}
50
+	o.Expect(len(hexIDs)).To(o.Equal(2))
51
+	resetData = map[string]string{s2iDockBldr: hexIDs[0], custBldr: hexIDs[1]}
52
+	g.By(fmt.Sprintf("\n%s FORCE PULL TEST:  hex id for s2i/docker %s and for custom %s ", time.Now().Format(time.RFC850), hexIDs[0], hexIDs[1]))
53
+})
54
+
55
+var _ = g.Describe("forcepull: ForcePull from OpenShift induced builds (vs. sti)", func() {
56
+	defer g.GinkgoRecover()
57
+
58
+	g.Describe("\n FORCE PULL TEST:  Force pull and s2i builder", func() {
59
+		// corrupt the s2i builder image
60
+		g.BeforeEach(func() {
61
+			exutil.CorruptImage(s2iDockBldr, custBldr, "s21")
62
+		})
63
+
64
+		g.AfterEach(func() {
65
+			exutil.ResetImage(resetData)
66
+		})
67
+
68
+		g.Context("\n FORCE PULL TEST:  when s2i force pull is false and the image is bad", func() {
69
+			var (
70
+				oc = exutil.NewCLI("force-pull-s2i-false-env", exutil.KubeConfigPath())
71
+			)
72
+			g.It("\n FORCE PULL TEST s2i false", func() {
73
+				fpFalseS2I := exutil.FixturePath("fixtures", "forcepull-false-s2i.json")
74
+				g.By(fmt.Sprintf("\n%s FORCE PULL TEST s2i false:  calling create on %s", time.Now().Format(time.RFC850), fpFalseS2I))
75
+				exutil.StartBuild(fpFalseS2I, buildPrefix, oc)
76
+
77
+				exutil.WaitForBuild("FORCE PULL TEST s2i false:  ", buildName, oc)
78
+
79
+				exutil.VerifyImagesSame(s2iDockBldr, custBldr, "FORCE PULL TEST s2i false:  ")
80
+
81
+			})
82
+		})
83
+
84
+		g.Context("\n FORCE PULL TEST:  when s2i force pull is true and the image is bad", func() {
85
+			var (
86
+				oc = exutil.NewCLI("force-pull-s2i-true-env", exutil.KubeConfigPath())
87
+			)
88
+			g.It("\n FORCE PULL TEST s2i true", func() {
89
+				fpTrueS2I := exutil.FixturePath("fixtures", "forcepull-true-s2i.json")
90
+				g.By(fmt.Sprintf("\n%s FORCE PULL TEST s2i true:  calling create on %s", time.Now().Format(time.RFC850), fpTrueS2I))
91
+				exutil.StartBuild(fpTrueS2I, buildPrefix, oc)
92
+
93
+				exutil.WaitForBuild("FORCE PULL TEST s2i true: ", buildName, oc)
94
+
95
+				exutil.VerifyImagesDifferent(s2iDockBldr, custBldr, "FORCE PULL TEST s2i true:  ")
96
+			})
97
+		})
98
+	})
99
+
100
+	g.Describe("\n FORCE PULL TEST:  Force pull and docker builder", func() {
101
+		// corrupt the docker builder image
102
+		g.BeforeEach(func() {
103
+			exutil.CorruptImage(s2iDockBldr, custBldr, "docker")
104
+		})
105
+
106
+		g.AfterEach(func() {
107
+			exutil.ResetImage(resetData)
108
+		})
109
+
110
+		g.Context("\n FORCE PULL TEST:  when docker force pull is false and the image is bad", func() {
111
+			var (
112
+				oc = exutil.NewCLI("force-pull-dock-false-env", exutil.KubeConfigPath())
113
+			)
114
+			g.It("\n FORCE PULL TEST dock false", func() {
115
+				fpFalseDock := exutil.FixturePath("fixtures", "forcepull-false-dock.json")
116
+				g.By(fmt.Sprintf("\n%s FORCE PULL TEST dock false:  calling create on %s", time.Now().Format(time.RFC850), fpFalseDock))
117
+				exutil.StartBuild(fpFalseDock, buildPrefix, oc)
118
+
119
+				exutil.WaitForBuild("FORCE PULL TEST dock false", buildName, oc)
120
+
121
+				exutil.VerifyImagesSame(s2iDockBldr, custBldr, "FORCE PULL TEST docker false:  ")
122
+
123
+			})
124
+		})
125
+
126
+		g.Context("\n FORCE PULL TEST:  docker when force pull is true and the image is bad", func() {
127
+			var (
128
+				oc = exutil.NewCLI("force-pull-dock-true-env", exutil.KubeConfigPath())
129
+			)
130
+			g.It("\n FORCE PULL TEST dock true", func() {
131
+				fpTrueDock := exutil.FixturePath("fixtures", "forcepull-true-dock.json")
132
+				g.By(fmt.Sprintf("\n%s FORCE PULL TEST dock true:  calling create on %s", time.Now().Format(time.RFC850), fpTrueDock))
133
+				exutil.StartBuild(fpTrueDock, buildPrefix, oc)
134
+
135
+				exutil.WaitForBuild("FORCE PULL TEST dock true", buildName, oc)
136
+
137
+				exutil.VerifyImagesDifferent(s2iDockBldr, custBldr, "FORCE PULL TEST docker true:  ")
138
+
139
+			})
140
+		})
141
+	})
142
+
143
+	g.Describe("\n FORCE PULL TEST:  Force pull and custom builder", func() {
144
+		// corrupt the custom builder image
145
+		g.BeforeEach(func() {
146
+			exutil.CorruptImage(custBldr, s2iDockBldr, "custom")
147
+		})
148
+
149
+		g.AfterEach(func() {
150
+			exutil.ResetImage(resetData)
151
+		})
152
+
153
+		g.Context("\n FORCE PULL TEST:  when custom force pull is false and the image is bad", func() {
154
+			var (
155
+				oc = exutil.NewCLI("force-pull-cust-false-env", exutil.KubeConfigPath())
156
+			)
157
+			g.It("\nFORCE PULL TEST cust false", func() {
158
+				fpFalseCust := exutil.FixturePath("fixtures", "forcepull-false-cust.json")
159
+				g.By(fmt.Sprintf("\n%s FORCE PULL TEST cust false:  calling create on %s", time.Now().Format(time.RFC850), fpFalseCust))
160
+				exutil.StartBuild(fpFalseCust, buildPrefix, oc)
161
+
162
+				g.By("\nFORCE PULL TEST cust false:  expecting the image is not refreshed")
163
+
164
+				exutil.WaitForBuild("FORCE PULL TEST cust false", buildName, oc)
165
+
166
+				exutil.VerifyImagesSame(s2iDockBldr, custBldr, "FORCE PULL TEST custom false:  ")
167
+			})
168
+		})
169
+
170
+		g.Context("\n FORCE PULL TEST:  when custom force pull is true and the image is bad", func() {
171
+			var (
172
+				oc = exutil.NewCLI("force-pull-cust-true-env", exutil.KubeConfigPath())
173
+			)
174
+			g.It("\n FORCE PULL TEST cust true", func() {
175
+				fpTrueCust := exutil.FixturePath("fixtures", "forcepull-true-cust.json")
176
+				g.By(fmt.Sprintf("\n%s FORCE PULL TEST cust true:  calling create on %s", time.Now().Format(time.RFC850), fpTrueCust))
177
+				exutil.StartBuild(fpTrueCust, buildPrefix, oc)
178
+
179
+				g.By("\n FORCE PULL TEST cust true:  expecting the image is refreshed")
180
+
181
+				exutil.WaitForBuild("FORCE PULL TEST cust true", buildName, oc)
182
+
183
+				exutil.VerifyImagesDifferent(s2iDockBldr, custBldr, "FORCE PULL TEST custom true:  ")
184
+
185
+			})
186
+		})
187
+
188
+	})
189
+})
0 190
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+{
1
+    "kind": "BuildConfig",
2
+    "apiVersion": "v1",
3
+    "metadata": {
4
+        "name": "ruby-sample-build",
5
+        "creationTimestamp": null,
6
+        "labels": {
7
+            "name": "ruby-sample-build"
8
+        }
9
+    },
10
+    "spec": {
11
+        "triggers": [],
12
+        "source": {
13
+            "type": "Git",
14
+            "git": {
15
+		"uri": "https://github.com/openshift/ruby-hello-world.git"
16
+            }
17
+        },
18
+        "strategy": {
19
+            "type": "Custom",
20
+            "customStrategy": {
21
+		"from": {
22
+		    "kind": "DockerImage",
23
+		    "name": "docker.io/openshift/origin-custom-docker-builder"
24
+		},
25
+		"env": [
26
+		    {
27
+			"name": "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE",
28
+			"value": "docker.io/openshift/origin-custom-docker-builder"
29
+		    }
30
+		],
31
+		"exposeDockerSocket": true,
32
+		"forcePull":false
33
+            }
34
+        }
35
+    }
36
+}
0 37
new file mode 100644
... ...
@@ -0,0 +1,30 @@
0
+{
1
+    "kind": "BuildConfig",
2
+    "apiVersion": "v1",
3
+    "metadata": {
4
+        "name": "ruby-sample-build",
5
+        "creationTimestamp": null,
6
+        "labels": {
7
+            "name": "ruby-sample-build"
8
+        }
9
+    },
10
+    "spec": {
11
+        "triggers": [],
12
+        "source": {
13
+            "type": "Git",
14
+            "git": {
15
+		"uri": "https://github.com/openshift/ruby-hello-world.git"
16
+            }
17
+        },
18
+        "strategy": {
19
+            "type": "Docker",
20
+            "dockerStrategy": {
21
+		"from": {
22
+		    "kind": "DockerImage",
23
+		    "name": "docker.io/openshift/ruby-20-centos7"
24
+		},
25
+		"forcePull": false
26
+            }
27
+        }
28
+    }
29
+}
0 30
new file mode 100644
... ...
@@ -0,0 +1,30 @@
0
+{
1
+    "kind": "BuildConfig",
2
+    "apiVersion": "v1",
3
+    "metadata": {
4
+        "name": "ruby-sample-build",
5
+        "creationTimestamp": null,
6
+        "labels": {
7
+            "name": "ruby-sample-build"
8
+        }
9
+    },
10
+    "spec": {
11
+        "triggers": [],
12
+        "source": {
13
+            "type": "Git",
14
+            "git": {
15
+		"uri": "https://github.com/openshift/ruby-hello-world.git"
16
+            }
17
+        },
18
+        "strategy": {
19
+            "type": "Source",
20
+            "sourceStrategy": {
21
+		"from": {
22
+		    "kind": "DockerImage",
23
+		    "name": "docker.io/openshift/ruby-20-centos7"
24
+		},
25
+		"forcePull": false
26
+            }
27
+        }
28
+    }
29
+}
0 30
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+{
1
+    "kind": "BuildConfig",
2
+    "apiVersion": "v1",
3
+    "metadata": {
4
+        "name": "ruby-sample-build",
5
+        "creationTimestamp": null,
6
+        "labels": {
7
+            "name": "ruby-sample-build"
8
+        }
9
+    },
10
+    "spec": {
11
+        "triggers": [],
12
+        "source": {
13
+            "type": "Git",
14
+            "git": {
15
+		"uri": "https://github.com/openshift/ruby-hello-world.git"
16
+            }
17
+        },
18
+        "strategy": {
19
+            "type": "Custom",
20
+            "customStrategy": {
21
+		"from": {
22
+		    "kind": "DockerImage",
23
+		    "name": "docker.io/openshift/origin-custom-docker-builder"
24
+		},
25
+		"env": [
26
+		    {
27
+			"name": "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE",
28
+			"value": "docker.io/openshift/origin-custom-docker-builder"
29
+		    }
30
+		],
31
+		"exposeDockerSocket": true,
32
+		"forcePull":true
33
+            }
34
+        }
35
+    }
36
+}
0 37
new file mode 100644
... ...
@@ -0,0 +1,30 @@
0
+{
1
+    "kind": "BuildConfig",
2
+    "apiVersion": "v1",
3
+    "metadata": {
4
+        "name": "ruby-sample-build",
5
+        "creationTimestamp": null,
6
+        "labels": {
7
+            "name": "ruby-sample-build"
8
+        }
9
+    },
10
+    "spec": {
11
+        "triggers": [],
12
+        "source": {
13
+            "type": "Git",
14
+            "git": {
15
+		"uri": "https://github.com/openshift/ruby-hello-world.git"
16
+            }
17
+        },
18
+        "strategy": {
19
+            "type": "Docker",
20
+            "dockerStrategy": {
21
+		"from": {
22
+		    "kind": "DockerImage",
23
+		    "name": "docker.io/openshift/ruby-20-centos7"
24
+		},
25
+		"forcePull": true
26
+            }
27
+        }
28
+    }
29
+}
0 30
new file mode 100644
... ...
@@ -0,0 +1,30 @@
0
+{
1
+    "kind": "BuildConfig",
2
+    "apiVersion": "v1",
3
+    "metadata": {
4
+        "name": "ruby-sample-build",
5
+        "creationTimestamp": null,
6
+        "labels": {
7
+            "name": "ruby-sample-build"
8
+        }
9
+    },
10
+    "spec": {
11
+        "triggers": [],
12
+        "source": {
13
+            "type": "Git",
14
+            "git": {
15
+		"uri": "https://github.com/openshift/ruby-hello-world.git"
16
+            }
17
+        },
18
+        "strategy": {
19
+            "type": "Source",
20
+            "sourceStrategy": {
21
+		"from": {
22
+		    "kind": "DockerImage",
23
+		    "name": "docker.io/openshift/ruby-20-centos7"
24
+		},
25
+		"forcePull": true
26
+            }
27
+        }
28
+    }
29
+}
0 30
new file mode 100644
... ...
@@ -0,0 +1,86 @@
0
+package util
1
+
2
+import (
3
+	"fmt"
4
+	dockerClient "github.com/fsouza/go-dockerclient"
5
+	tutil "github.com/openshift/origin/test/util"
6
+)
7
+
8
+//TagImage, as the name implies, will apply the "tagor" tag string to the image current tagged by "tagee"
9
+func TagImage(tagee, tagor string) error {
10
+	client, dcerr := tutil.NewDockerClient()
11
+	if dcerr != nil {
12
+		return dcerr
13
+	}
14
+	opts := dockerClient.TagImageOptions{
15
+		Repo:  tagee,
16
+		Tag:   "latest",
17
+		Force: true,
18
+	}
19
+	return client.TagImage(tagor, opts)
20
+}
21
+
22
+//PullImage, as the name implies, initiates the equivalent of a `docker pull` for the "name" parameter
23
+func PullImage(name string) error {
24
+	client, err := tutil.NewDockerClient()
25
+	if err != nil {
26
+		return err
27
+	}
28
+	opts := dockerClient.PullImageOptions{
29
+		Repository: name,
30
+		Tag:        "latest",
31
+	}
32
+	return client.PullImage(opts, dockerClient.AuthConfiguration{})
33
+}
34
+
35
+type MissingTagError struct {
36
+	Tags []string
37
+}
38
+
39
+func (mte MissingTagError) Error() string {
40
+	return fmt.Sprintf("the tag %s passed in was invalid, and not found in the list of images returned from docker", mte.Tags)
41
+}
42
+
43
+//GetImageIDForTags will obtain the hexadecimal IDs for the array of human readible image tags IDs provided
44
+func GetImageIDForTags(comps []string) ([]string, error) {
45
+	client, dcerr := tutil.NewDockerClient()
46
+	if dcerr != nil {
47
+		return nil, dcerr
48
+	}
49
+	imageList, serr := client.ListImages(dockerClient.ListImagesOptions{})
50
+	if serr != nil {
51
+		return nil, serr
52
+	}
53
+
54
+	returnTags := make([]string, 0)
55
+	missingTags := make([]string, 0)
56
+	for _, comp := range comps {
57
+		var found bool
58
+		for _, image := range imageList {
59
+			for _, repTag := range image.RepoTags {
60
+				if repTag == comp {
61
+					found = true
62
+					returnTags = append(returnTags, image.ID)
63
+					break
64
+				}
65
+			}
66
+			if found {
67
+				break
68
+			}
69
+		}
70
+
71
+		if !found {
72
+			returnTags = append(returnTags, "")
73
+			missingTags = append(missingTags, comp)
74
+		}
75
+	}
76
+
77
+	if len(missingTags) == 0 {
78
+		return returnTags, nil
79
+	} else {
80
+		mte := MissingTagError{
81
+			Tags: missingTags,
82
+		}
83
+		return returnTags, mte
84
+	}
85
+}
0 86
new file mode 100644
... ...
@@ -0,0 +1,89 @@
0
+package util
1
+
2
+import (
3
+	"fmt"
4
+	g "github.com/onsi/ginkgo"
5
+	o "github.com/onsi/gomega"
6
+	buildapi "github.com/openshift/origin/pkg/build/api"
7
+	"time"
8
+)
9
+
10
+//CorruptImage is a helper that tags the image to be corrupted, the corruptee, as the corruptor string, resulting in the wrong image being used when corruptee is referenced later on;  strategy is for ginkgo debug; ginkgo error checking leveraged
11
+func CorruptImage(corruptee, corruptor, strategy string) {
12
+	g.By(fmt.Sprintf("\n%s  Calling docker tag to corrupt %s builder image %s by tagging %s", time.Now().Format(time.RFC850), strategy, corruptee, corruptor))
13
+
14
+	cerr := TagImage(corruptee, corruptor)
15
+
16
+	g.By(fmt.Sprintf("\n%s  Tagging %s to %s complete with err %v", time.Now().Format(time.RFC850), corruptor, corruptee, cerr))
17
+	o.Expect(cerr).NotTo(o.HaveOccurred())
18
+}
19
+
20
+//ResetImage is a helper the allows the programmer to undo any corruption performed by CorruptImage; ginkgo error checking leveraged
21
+func ResetImage(tags map[string]string) {
22
+	g.By(fmt.Sprintf("\n%s Calling docker tag to reset images", time.Now().Format(time.RFC850)))
23
+
24
+	for corruptedTag, goodTag := range tags {
25
+		err := TagImage(corruptedTag, goodTag)
26
+		g.By(fmt.Sprintf("\n%s  Reset for %s to %s complete with err %v", time.Now().Format(time.RFC850), corruptedTag, goodTag, err))
27
+		o.Expect(err).NotTo(o.HaveOccurred())
28
+	}
29
+
30
+}
31
+
32
+//VerifyImagesSame will take the two supplied image tags and see if they reference the same hexadecimal image ID;  strategy is for debug
33
+func VerifyImagesSame(comp1, comp2, strategy string) {
34
+	tag1 := comp1 + ":latest"
35
+	tag2 := comp2 + ":latest"
36
+
37
+	comps := []string{tag1, tag2}
38
+	retIDs, gerr := GetImageIDForTags(comps)
39
+
40
+	o.Expect(gerr).NotTo(o.HaveOccurred())
41
+	g.By(fmt.Sprintf("\n%s %s  compare image - %s, %s, %s, %s", time.Now().Format(time.RFC850), strategy, tag1, tag2, retIDs[0], retIDs[1]))
42
+	o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0))
43
+	o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0))
44
+	o.Ω(retIDs[0]).Should(o.Equal(retIDs[1]))
45
+}
46
+
47
+//VerifyImagesDifferent will that the two supplied image tags and see if they reference different hexadecimal image IDs; strategy is for ginkgo debug, also leverage ginkgo error checking
48
+func VerifyImagesDifferent(comp1, comp2, strategy string) {
49
+	tag1 := comp1 + ":latest"
50
+	tag2 := comp2 + ":latest"
51
+
52
+	comps := []string{tag1, tag2}
53
+	retIDs, gerr := GetImageIDForTags(comps)
54
+
55
+	o.Expect(gerr).NotTo(o.HaveOccurred())
56
+	g.By(fmt.Sprintf("\n%s %s  compare image - %s, %s, %s, %s", time.Now().Format(time.RFC850), strategy, tag1, tag2, retIDs[0], retIDs[1]))
57
+	o.Ω(len(retIDs[0])).Should(o.BeNumerically(">", 0))
58
+	o.Ω(len(retIDs[1])).Should(o.BeNumerically(">", 0))
59
+	o.Ω(retIDs[0] != retIDs[1]).Should(o.BeTrue())
60
+}
61
+
62
+//WaitForBuild is a wrapper for WaitForABuild in this package that takes in an oc/cli client; some ginkgo based debug along with ginkgo error checking
63
+func WaitForBuild(context, buildName string, oc *CLI) {
64
+	g.By(fmt.Sprintf("\n%s %s:   waiting for %s", time.Now().Format(time.RFC850), context, buildName))
65
+	WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName,
66
+		// The build passed
67
+		func(b *buildapi.Build) bool {
68
+			return b.Name == buildName && b.Status.Phase == buildapi.BuildPhaseComplete
69
+		},
70
+		// The build failed
71
+		func(b *buildapi.Build) bool {
72
+			if b.Name != buildName {
73
+				return false
74
+			}
75
+			return b.Status.Phase == buildapi.BuildPhaseFailed || b.Status.Phase == buildapi.BuildPhaseError
76
+		},
77
+	)
78
+	// do not care if build returned an error ... entirely possible ... we only check if the image was updated or left the same appropriately
79
+	g.By(fmt.Sprintf("\n%s %s   done waiting for %s", time.Now().Format(time.RFC850), context, buildName))
80
+}
81
+
82
+//StartBuild creates a build config from the supplied json file (not a template) and then starts a build, using the supplied oc/cli client for both operations; ginkgo error checking included
83
+func StartBuild(jsonFile, buildPrefix string, oc *CLI) {
84
+	err := oc.Run("create").Args("-f", jsonFile).Execute()
85
+	o.Expect(err).NotTo(o.HaveOccurred())
86
+	_, berr := oc.Run("start-build").Args(buildPrefix).Output()
87
+	o.Expect(berr).NotTo(o.HaveOccurred())
88
+}