Browse code

Extended previous version compatibility tests

Adds tests (tagged with [Compatibility]) that can be run with:
- current version API server + current version Controllers,
- current version API server + previous version Controllers,
- previous version API server + previous version Controllers (current
client)

Cesar Wong authored on 2016/08/12 07:41:25
Showing 10 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,587 @@
0
+#!/bin/bash
1
+#
2
+# This library holds functions for configuring and starting an OpenShift server
3
+
4
+# os::start::configure_server will create and write OS master certificates, node configurations, and OpenShift configurations.
5
+# It is recommended to run the following environment setup functions before configuring the OpenShift server:
6
+#  - os::util::environment::setup_all_server_vars
7
+#  - os::util::environment::use_sudo -- if your script should be using root privileges
8
+#
9
+# Globals:
10
+#  - ALL_IP_ADDRESSES
11
+#  - PUBLIC_MASTER_HOST
12
+#  - MASTER_CONFIG_DIR
13
+#  - SERVER_CONFIG_DIR
14
+#  - MASTER_ADDR
15
+#  - API_SCHEME
16
+#  - PUBLIC_MASTER_HOST
17
+#  - API_PORT
18
+#  - KUBELET_SCHEME
19
+#  - KUBELET_BIND_HOST
20
+#  - KUBELET_PORT
21
+#  - NODE_CONFIG_DIR
22
+#  - KUBELET_HOST
23
+#  - API_BIND_HOST
24
+#  - VOLUME_DIR
25
+#  - ETCD_DATA_DIR
26
+#  - USE_IMAGES
27
+#  - USE_SUDO
28
+# Arguments:
29
+#  1 - alternate version for the config
30
+# Returns:
31
+#  - export ADMIN_KUBECONFIG
32
+#  - export CLUSTER_ADMIN_CONTEXT
33
+function os::start::configure_server() {
34
+	local version="${1:-}"
35
+	local current_user
36
+	current_user="$( whoami )"
37
+
38
+	os::start::internal::create_master_certs     "${version}"
39
+	os::start::internal::configure_node          "${version}"
40
+	os::start::internal::create_bootstrap_policy "${version}"
41
+	os::start::internal::configure_master        "${version}"
42
+
43
+	# fix up owner after creating initial config
44
+	${USE_SUDO:+sudo} chown -R "${current_user}" "${SERVER_CONFIG_DIR}"
45
+
46
+	os::start::internal::patch_master_config
47
+}
48
+readonly -f os::start::configure_server
49
+
50
+# os::start::internal::create_master_certs creates master certificates for the Openshift server
51
+#
52
+# Globals:
53
+#  - PUBLIC_MASTER_HOST
54
+#  - MASTER_CONFIG_DIR
55
+#  - MASTER_ADDR
56
+#  - API_SCHEME
57
+#  - PUBLIC_MASTER_HOST
58
+#  - API_PORT
59
+# Arguments:
60
+#  1 - alternate version for the config
61
+function os::start::internal::create_master_certs() {
62
+	local version="${1:-}"
63
+	local openshift_volumes=( "${MASTER_CONFIG_DIR}" )
64
+	local openshift_executable
65
+	openshift_executable="$(os::start::internal::openshift_executable "${version}")"
66
+
67
+	os::log::info "Creating certificates for the OpenShift server"
68
+	${openshift_executable} admin create-master-certs                                   \
69
+	                        --overwrite=false                                           \
70
+	                        --master="${MASTER_ADDR}"                                   \
71
+	                        --cert-dir="${MASTER_CONFIG_DIR}"                           \
72
+	                        --hostnames="$( os::start::internal::determine_hostnames )" \
73
+	                        --public-master="${API_SCHEME}://${PUBLIC_MASTER_HOST}:${API_PORT}"
74
+}
75
+readonly -f os::start::internal::create_master_certs
76
+
77
+# os::start::internal::configure_node creates a node configuration
78
+#
79
+# Globals:
80
+#  - NODE_CONFIG_DIR
81
+#  - KUBELET_SCHEME
82
+#  - KUBELET_BIND_HOST
83
+#  - KUBELET_PORT
84
+#  - KUBELET_HOST
85
+#  - MASTER_ADDR
86
+#  - MASTER_CONFIG_DIR
87
+# Arguments:
88
+#  1 - alternate version for the config
89
+function os::start::internal::configure_node() {
90
+	local version="${1:-}"
91
+	local openshift_volumes=( "${MASTER_CONFIG_DIR}" "${NODE_CONFIG_DIR}" )
92
+	local openshift_executable
93
+	openshift_executable="$(os::start::internal::openshift_executable "${version}")"
94
+
95
+	os::log::info "Creating node configuration for the OpenShift server"
96
+	${openshift_executable} admin create-node-config                                          \
97
+	                        --node-dir="${NODE_CONFIG_DIR}"                                   \
98
+	                        --node="${KUBELET_HOST}"                                          \
99
+	                        --hostnames="${KUBELET_HOST}"                                     \
100
+	                        --master="${MASTER_ADDR}"                                         \
101
+	                        --signer-cert="${MASTER_CONFIG_DIR}/ca.crt"                       \
102
+	                        --signer-key="${MASTER_CONFIG_DIR}/ca.key"                        \
103
+	                        --signer-serial="${MASTER_CONFIG_DIR}/ca.serial.txt"              \
104
+	                        --certificate-authority="${MASTER_CONFIG_DIR}/ca.crt"             \
105
+	                        --node-client-certificate-authority="${MASTER_CONFIG_DIR}/ca.crt" \
106
+	                        --listen="${KUBELET_SCHEME}://${KUBELET_BIND_HOST}:${KUBELET_PORT}"
107
+}
108
+readonly -f os::start::internal::configure_node
109
+
110
+# os::start::internal::create_bootstrap_policy creates bootstrap policy files
111
+#
112
+# Globals:
113
+#  - MASTER_CONFIG_DIR
114
+# Arguments:
115
+#  1 - alternate version for the config
116
+function os::start::internal::create_bootstrap_policy() {
117
+	local version="${1:-}"
118
+	local openshift_volumes=( "${MASTER_CONFIG_DIR}" )
119
+	local openshift_executable
120
+	openshift_executable="$(os::start::internal::openshift_executable "${version}")"
121
+
122
+	os::log::info "Creating boostrap policy files for the OpenShift server"
123
+	${openshift_executable} admin create-bootstrap-policy-file --filename="${MASTER_CONFIG_DIR}/policy.json"
124
+}
125
+readonly -f os::start::internal::create_bootstrap_policy
126
+
127
+# os::start::internal::configure_master creates the configuration for the OpenShift master
128
+#
129
+# Globals:
130
+#  - MASTER_CONFIG_DIR
131
+#  - USE_IMAGES
132
+#  - USE_SUDO
133
+#  - API_HOST
134
+#  - KUBELET_HOST
135
+#  - VOLUME_DIR
136
+#  - ETCD_DATA_DIR
137
+#  - SERVER_CONFIG_DIR
138
+#  - API_SCHEME
139
+#  - API_BIND_HOST
140
+#  - API_PORT
141
+#  - PUBLIC_MASTER_HOST
142
+# Arguments
143
+#  1 - alternate version for the config
144
+#  - MASTER_CONFIG_DIR
145
+function os::start::internal::configure_master() {
146
+	local version="${1:-}"
147
+	local openshift_volumes=( "${MASTER_CONFIG_DIR}" )
148
+	local openshift_executable
149
+	openshift_executable="$(os::start::internal::openshift_executable "${version}")"
150
+
151
+	os::log::info "Creating master configuration for the OpenShift server"
152
+	${openshift_executable} start                                                   \
153
+	                        --create-certs=false                                    \
154
+	                        --images="${USE_IMAGES}"                                \
155
+	                        --master="${MASTER_ADDR}"                               \
156
+	                        --dns="tcp://${API_HOST}:53"                            \
157
+	                        --hostname="${KUBELET_HOST}"                            \
158
+	                        --volume-dir="${VOLUME_DIR}"                            \
159
+	                        --etcd-dir="${ETCD_DATA_DIR}"                           \
160
+	                        --write-config="${SERVER_CONFIG_DIR}"                   \
161
+	                        --listen="${API_SCHEME}://${API_BIND_HOST}:${API_PORT}" \
162
+	                        --public-master="${API_SCHEME}://${PUBLIC_MASTER_HOST}:${API_PORT}"
163
+
164
+}
165
+readonly -f os::start::internal::configure_master
166
+
167
+# os::start::internal::patch_master_config patches the master configuration
168
+#
169
+# Globals:
170
+#  - MASTER_CONFIG_DIR
171
+#  - SERVER_CONFIG_DIR
172
+#  - API_HOST
173
+#  - ETCD_PORT
174
+#  - ETCD_PEER_PORT
175
+#  - USE_SUDO
176
+#  - MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY
177
+# Returns:
178
+#  - export ADMIN_KUBECONFIG
179
+#  - export CLUSTER_ADMIN_CONTEXT
180
+function os::start::internal::patch_master_config() {
181
+	local sudo=${USE_SUDO:+sudo}
182
+	cp "${SERVER_CONFIG_DIR}/master/master-config.yaml" "${SERVER_CONFIG_DIR}/master/master-config.orig.yaml"
183
+	openshift ex config patch ${SERVER_CONFIG_DIR}/master/master-config.orig.yaml --patch="{\"etcdConfig\": {\"address\": \"${API_HOST}:${ETCD_PORT}\"}}" | \
184
+	openshift ex config patch - --patch="{\"etcdConfig\": {\"servingInfo\": {\"bindAddress\": \"${API_HOST}:${ETCD_PORT}\"}}}" | \
185
+	openshift ex config patch - --type json --patch="[{\"op\": \"replace\", \"path\": \"/etcdClientInfo/urls\", \"value\": [\"${API_SCHEME}://${API_HOST}:${ETCD_PORT}\"]}]" | \
186
+	openshift ex config patch - --patch="{\"etcdConfig\": {\"peerAddress\": \"${API_HOST}:${ETCD_PEER_PORT}\"}}" | \
187
+	openshift ex config patch - --patch="{\"etcdConfig\": {\"peerServingInfo\": {\"bindAddress\": \"${API_HOST}:${ETCD_PEER_PORT}\"}}}" | \
188
+	openshift ex config patch - --patch="{\"imagePolicyConfig\": {\"maxImagesBulkImportedPerRepository\": ${MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY:-5}}}" > "${SERVER_CONFIG_DIR}/master/master-config.yaml"
189
+
190
+	# Make oc use ${MASTER_CONFIG_DIR}/admin.kubeconfig, and ignore anything in the running user's $HOME dir
191
+	export ADMIN_KUBECONFIG="${MASTER_CONFIG_DIR}/admin.kubeconfig"
192
+	CLUSTER_ADMIN_CONTEXT=$(oc config view --config="${ADMIN_KUBECONFIG}" --flatten -o template --template='{{index . "current-context"}}'); export CLUSTER_ADMIN_CONTEXT
193
+	${sudo} chmod -R a+rwX "${ADMIN_KUBECONFIG}"
194
+	echo "[INFO] To debug: export KUBECONFIG=$ADMIN_KUBECONFIG"
195
+}
196
+readonly -f os::start::internal::patch_master_config
197
+
198
+# os::start::server starts the OpenShift server, exports the PID of the OpenShift server and waits until openshift server endpoints are available
199
+# It is advised to use this function after a successful run of 'configure_os_server'
200
+#
201
+# Globals:
202
+#  - USE_SUDO
203
+#  - LOG_DIR
204
+#  - ARTIFACT_DIR
205
+#  - VOLUME_DIR
206
+#  - SERVER_CONFIG_DIR
207
+#  - USE_IMAGES
208
+#  - MASTER_ADDR
209
+#  - MASTER_CONFIG_DIR
210
+#  - NODE_CONFIG_DIR
211
+#  - API_SCHEME
212
+#  - API_HOST
213
+#  - API_PORT
214
+#  - KUBELET_SCHEME
215
+#  - KUBELET_HOST
216
+#  - KUBELET_PORT
217
+# Arguments:
218
+#  1 - API server version (ie. "v1.2.0")
219
+#  2 - Controllers version (ie. "v1.2.0")
220
+#  3 - Skip node start ("1" to skip node start)
221
+# Returns:
222
+#  - export OS_PID
223
+#  - export ETCD_PID
224
+#  - export API_SERVER_PID
225
+#  - export CONTROLLERS_PID
226
+#  - export NODE_PID
227
+function os::start::server() {
228
+	local api_server_version="${1:-}"
229
+	local controllers_version="${2:-}"
230
+	local skip_node="${3:-}"
231
+
232
+	echo "[INFO] Scan of OpenShift related processes already up via ps -ef	| grep openshift : "
233
+	ps -ef | grep openshift
234
+
235
+	mkdir -p "${LOG_DIR}"
236
+
237
+	if [[ -z "${api_server_version}" && -z "${controllers_version}" ]]; then
238
+		if [[ -z "${skip_node}" ]]; then
239
+			os::start::internal::print_server_info
240
+			os::start::all_in_one
241
+		else
242
+			os::start::master
243
+		fi
244
+	else
245
+		os::start::internal::print_server_info
246
+		os::start::etcd
247
+		os::start::api_server "${api_server_version}"
248
+		os::start::controllers "${controllers_version}"
249
+		if [[ -z "${skip_node}" ]]; then
250
+			os::start::node
251
+		fi
252
+	fi
253
+}
254
+readonly -f os::start::server
255
+
256
+# os::start::master starts the OpenShift master, exports the PID of the OpenShift master and waits until OpenShift master endpoints are available
257
+# It is advised to use this function after a successful run of 'os::start::configure_server'
258
+#
259
+# Globals:
260
+#  - USE_SUDO
261
+#  - LOG_DIR
262
+#  - ARTIFACT_DIR
263
+#  - SERVER_CONFIG_DIR
264
+#  - USE_IMAGES
265
+#  - MASTER_ADDR
266
+#  - MASTER_CONFIG_DIR
267
+#  - API_SCHEME
268
+#  - API_HOST
269
+#  - API_PORT
270
+# Arguments:
271
+#  None
272
+# Returns:
273
+#  - export OS_PID
274
+function os::start::master() {
275
+
276
+	os::start::internal::print_server_info
277
+
278
+	mkdir -p "${LOG_DIR}"
279
+
280
+	echo "[INFO] Scan of OpenShift related processes already up via ps -ef	| grep openshift : "
281
+	ps -ef | grep openshift
282
+
283
+	echo "[INFO] Starting OpenShift server"
284
+	local openshift_env=( "OPENSHIFT_PROFILE=web" "OPENSHIFT_ON_PANIC=crash" )
285
+	$(os::start::internal::openshift_executable) start master \
286
+		--config="${MASTER_CONFIG_DIR}/master-config.yaml" \
287
+		--loglevel=4 --logspec='*importer=5' \
288
+	&>"${LOG_DIR}/openshift.log" &
289
+	export OS_PID=$!
290
+
291
+	echo "[INFO] OpenShift server start at: "
292
+	date
293
+
294
+	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz" "apiserver: " 0.25 160
295
+	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz/ready" "apiserver(ready): " 0.25 160
296
+
297
+	echo "[INFO] OpenShift server health checks done at: "
298
+	date
299
+}
300
+readonly -f os::start::master
301
+
302
+# os::start::all_in_one starts the OpenShift server all in one.
303
+# It is advised to use this function after a successful run of 'configure_os_server'
304
+#
305
+# Globals:
306
+#  - USE_SUDO
307
+#  - LOG_DIR
308
+#  - ARTIFACT_DIR
309
+#  - VOLUME_DIR
310
+#  - SERVER_CONFIG_DIR
311
+#  - USE_IMAGES
312
+#  - MASTER_ADDR
313
+#  - MASTER_CONFIG_DIR
314
+#  - NODE_CONFIG_DIR
315
+#  - API_SCHEME
316
+#  - API_HOST
317
+#  - API_PORT
318
+#  - KUBELET_SCHEME
319
+#  - KUBELET_HOST
320
+#  - KUBELET_PORT
321
+# Returns:
322
+#  - export OS_PID
323
+function os::start::all_in_one() {
324
+	local use_latest_images
325
+	if [[ -n "${USE_LATEST_IMAGES:-}" ]]; then
326
+		use_latest_images="true"
327
+	else
328
+		use_latest_images="false"
329
+	fi
330
+
331
+	echo "[INFO] Starting OpenShift server"
332
+	local openshift_env=( "OPENSHIFT_PROFILE=web" "OPENSHIFT_ON_PANIC=crash" )
333
+	local openshift_executable
334
+	openshift_executable="$(os::start::internal::openshift_executable)"
335
+	${openshift_executable} start                                                       \
336
+		                      --loglevel=4                                              \
337
+		                      --logspec='*importer=5'                                   \
338
+		                      --latest-images="${use_latest_images}"                    \
339
+		                      --node-config="${NODE_CONFIG_DIR}/node-config.yaml"       \
340
+		                      --master-config="${MASTER_CONFIG_DIR}/master-config.yaml" \
341
+		                      &>"${LOG_DIR}/openshift.log" &
342
+	export OS_PID=$!
343
+
344
+	echo "[INFO] OpenShift server start at: "
345
+	date
346
+
347
+	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz" "apiserver: " 0.25 80
348
+	wait_for_url "${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz" "[INFO] kubelet: " 0.5 120
349
+	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz/ready" "apiserver(ready): " 0.25 80
350
+	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/api/v1/nodes/${KUBELET_HOST}" "apiserver(nodes): " 0.25 80
351
+
352
+	echo "[INFO] OpenShift server health checks done at: "
353
+	date
354
+}
355
+readonly -f os::start::all_in_one
356
+
357
+# os::start::etcd starts etcd for OpenShift
358
+# Globals:
359
+#  - USE_SUDO
360
+#  - LOG_DIR
361
+#  - MASTER_CONFIG_DIR
362
+#  - API_SCHEME
363
+#  - API_HOST
364
+#  - ETCD_PORT
365
+# Arguments:
366
+#  None
367
+# Returns:
368
+#  - export ETCD_PID
369
+function os::start::etcd() {
370
+	echo "[INFO] Starting etcd"
371
+	local openshift_env=( "OPENSHIFT_ON_PANIC=crash" )
372
+	local openshift_executable
373
+	openshift_executable="$(os::start::internal::openshift_executable)"
374
+	${openshift_executable} start etcd \
375
+		--config="${MASTER_CONFIG_DIR}/master-config.yaml" &>"${LOG_DIR}/etcd.log" &
376
+	export ETCD_PID=$!
377
+
378
+	echo "[INFO] etcd server start at: "
379
+	date 
380
+
381
+	wait_for_url "${API_SCHEME}://${API_HOST}:${ETCD_PORT}/version" "etcd: " 0.25 80
382
+
383
+	echo "[INFO] etcd server health checks done at: "
384
+	date
385
+}
386
+readonly -f os::start::etcd
387
+
388
+# os::start::api_server starts the OpenShift API server
389
+# Globals:
390
+#  - USE_SUDO
391
+#  - LOG_DIR
392
+#  - ARTIFACT_DIR
393
+#  - MASTER_CONFIG_DIR
394
+#  - API_SCHEME
395
+#  - API_HOST
396
+#  - API_PORT
397
+#  - KUBELET_SCHEME
398
+#  - KUBELET_HOST
399
+#  - KUBELET_PORT
400
+# Arguments:
401
+# 1 - api server version
402
+# Returns:
403
+#  - export OS_PID
404
+function os::start::api_server() {
405
+	local api_server_version=${1:-}
406
+	local openshift_volumes=( "${MASTER_CONFIG_DIR}" )
407
+	local openshift_env=( "OPENSHIFT_PROFILE=web" "OPENSHIFT_ON_PANIC=crash" )
408
+	local openshift_executable
409
+	openshift_executable="$(os::start::internal::openshift_executable "${api_server_version}")"
410
+
411
+	${openshift_executable} start master api \
412
+		--config="${MASTER_CONFIG_DIR}/master-config.yaml" \
413
+	&>"${LOG_DIR}/apiserver.log" &
414
+
415
+	export API_SERVER_PID=$!
416
+
417
+	echo "[INFO] OpenShift API server start at: "
418
+	date
419
+
420
+	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz" "apiserver: " 0.25 80
421
+	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz/ready" "apiserver(ready): " 0.25 160
422
+
423
+	echo "[INFO] OpenShift API server health checks done at: "
424
+	date
425
+}
426
+readonly -f os::start::api_server
427
+
428
+# os::start::controllers starts the OpenShift controllers
429
+# Globals:
430
+#  - USE_SUDO
431
+#  - LOG_DIR
432
+#  - MASTER_CONFIG_DIR
433
+# Arguments:
434
+# 1 - controllers version
435
+# Returns:
436
+#  - export CONTROLLERS_PID
437
+function os::start::controllers() {
438
+	local controllers_version=${1:-}
439
+	local openshift_volumes=( "${MASTER_CONFIG_DIR}" )
440
+	local openshift_env=( "OPENSHIFT_ON_PANIC=crash" )
441
+	local openshift_executable
442
+	openshift_executable="$(os::start::internal::openshift_executable "${controllers_version}")"
443
+
444
+	${openshift_executable} start master controllers \
445
+		--config="${MASTER_CONFIG_DIR}/master-config.yaml" \
446
+	&>"${LOG_DIR}/controllers.log" &
447
+
448
+	export CONTROLLERS_PID=$!
449
+
450
+	echo "[INFO] OpenShift controllers start at: "
451
+	date
452
+}
453
+readonly -f os::start::controllers
454
+
455
+
456
+# os::start::internal::start_node starts the OpenShift node
457
+# Globals:
458
+#  - USE_SUDO
459
+#  - LOG_DIR
460
+#  - USE_LATEST_IMAGES
461
+#  - NODE_CONFIG_DIR
462
+#  - KUBELET_SCHEME
463
+#  - KUBELET_HOST
464
+#  - KUBELET_PORT
465
+# Arguments:
466
+#    none
467
+# Returns:
468
+#  - export NODE_PID
469
+function os::start::internal::start_node() {
470
+	local use_latest_images
471
+	if [[ -n "${USE_LATEST_IMAGES:-}" ]]; then
472
+		use_latest_images="true"
473
+	else
474
+		use_latest_images="false"
475
+	fi
476
+
477
+	mkdir -p "${LOG_DIR}"
478
+
479
+	echo "[INFO] Starting OpenShift node"
480
+	local openshift_env=( "OPENSHIFT_ON_PANIC=crash" )
481
+	$(os::start::internal::openshift_executable) openshift start node \
482
+		--config="${NODE_CONFIG_DIR}/node-config.yaml" \
483
+		--loglevel=4 --logspec='*importer=5' \
484
+		--latest-images="${use_latest_images}" \
485
+	&>"${LOG_DIR}/node.log" &
486
+	export NODE_PID=$!
487
+
488
+	echo "[INFO] OpenShift node start at: "
489
+	date
490
+
491
+	wait_for_url "${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz" "[INFO] kubelet: " 0.5 120
492
+
493
+	echo "[INFO] OpenShift node health checks done at: "
494
+	date
495
+}
496
+readonly -f os::start::internal::start_node
497
+
498
+# os::start::internal::openshift_executable returns an openshift executable
499
+# Vars:
500
+#  - openshift_volumes - array of volumes to mount to openshift container (if previous version)
501
+#  - openshift_env - array of environment variables to use when running the openshift executable
502
+# Arguments:
503
+#  1 - version - the version of openshift to run. If empty, execute current version
504
+# Returns:
505
+#  - openshift executable
506
+function os::start::internal::openshift_executable() {
507
+	local sudo="${USE_SUDO:+sudo}"
508
+	local version="${1:-}"
509
+	local openshift_executable
510
+	if [[ -n "${version}" ]]; then
511
+		local docker_options="--rm --privileged --net=host"
512
+		local volumes=""
513
+		local envvars=""
514
+
515
+		if [[ -n "${openshift_volumes:-}" ]]; then
516
+			for volume in "${openshift_volumes[@]}"; do
517
+				volumes+=" -v ${volume}:${volume}"
518
+			done
519
+		fi
520
+
521
+		if [[ -n "${openshift_env:-}" ]]; then
522
+			for envvar in "${openshift_env[@]}"; do
523
+				envvars+=" -e ${envvar}"
524
+			done
525
+		fi
526
+
527
+		openshift_executable="${sudo} docker run ${docker_options} ${volumes} ${envvars} openshift/origin:${version}"
528
+	else
529
+		local envvars=""
530
+		if [[ -n "${ENV:-}" ]]; then
531
+			envvars="env "
532
+			for envvar in "${ENV[@]}"; do
533
+				envvars+="${envvar} "
534
+			done
535
+		fi
536
+
537
+		openshift_executable="${sudo} ${envvars} $(which openshift)"
538
+	fi
539
+
540
+	echo "${openshift_executable}"
541
+}
542
+readonly -f os::start::internal::openshift_executable
543
+
544
+# os::start::internal::determine_hostnames determines host names to add to tls cert
545
+#
546
+# Globals:
547
+#  - PUBLIC_MASTER_HOST
548
+# Returns:
549
+#  - hostnames - list of hostnames to add to tls cert
550
+function os::start::internal::determine_hostnames() {
551
+	local hostnames
552
+	hostnames="${PUBLIC_MASTER_HOST},"
553
+	hostnames+="localhost,172.30.0.1,"
554
+	for address in $(openshift start --print-ip); do
555
+		hostnames+="${address},"
556
+	done
557
+	hostnames+="kubernetes.default.svc.cluster.local,"
558
+	hostnames+="kubernetes.default.svc,"
559
+	hostnames+="kubernetes.default,"
560
+	hostnames+="kubernetes,"
561
+	hostnames+="openshift.default.svc.cluster.local,"
562
+	hostnames+="openshift.default.svc,"
563
+	hostnames+="openshift.default,"
564
+	hostnames+="openshift"
565
+
566
+	echo "${hostnames}"
567
+}
568
+readonly -f os::start::internal::determine_hostnames
569
+
570
+
571
+# os::start::internal::determine_hostnames determines host names to add to tls cert
572
+#
573
+# Globals:
574
+#  - LOG_DIR
575
+#  - SERVER_CONFIG_DIR
576
+#  - USE_IMAGES
577
+#  - MASTER_ADDR
578
+function os::start::internal::print_server_info() {
579
+	local openshift_executable
580
+	openshift_executable="$(os::start::internal::openshift_executable)"
581
+	echo "[INFO] $(${openshift_executable} version)"
582
+	echo "[INFO] Server logs will be at:   ${LOG_DIR}"
583
+	echo "[INFO] Config dir is:            ${SERVER_CONFIG_DIR}"
584
+	echo "[INFO] Using images:             ${USE_IMAGES}"
585
+	echo "[INFO] MasterIP is:              ${MASTER_ADDR}"
586
+}
... ...
@@ -2,221 +2,20 @@
2 2
 
3 3
 # Provides simple utility functions
4 4
 
5
-# configure_os_server will create and write OS master certificates, node configurations, and OpenShift configurations.
6
-# It is recommended to run the following environment setup functions before configuring the OpenShift server:
7
-#  - os::util::environment::setup_all_server_vars
8
-#  - os::util::environment::use_sudo -- if your script should be using root privileges
9
-#
10
-# Globals:
11
-#  - ALL_IP_ADDRESSES
12
-#  - PUBLIC_MASTER_HOST
13
-#  - MASTER_CONFIG_DIR
14
-#  - MASTER_ADDR
15
-#  - API_SCHEME
16
-#  - PUBLIC_MASTER_HOST
17
-#  - API_PORT
18
-#  - KUBELET_SCHEME
19
-#  - KUBELET_BIND_HOST
20
-#  - KUBELET_PORT
21
-#  - NODE_CONFIG_DIR
22
-#  - KUBELET_HOST
23
-#  - API_BIND_HOST
24
-#  - VOLUME_DIR
25
-#  - ETCD_DATA_DIR
26
-#  - USE_IMAGES
27
-#  - USE_SUDO
28
-# Arguments:
29
-#  None
30
-# Returns:
31
-#  - export ADMIN_KUBECONFIG
32
-#  - export CLUSTER_ADMIN_CONTEXT
33
-function configure_os_server() {
34
-	# find the same IP that openshift start will bind to.	This allows access from pods that have to talk back to master
35
-	if [[ -z "${ALL_IP_ADDRESSES-}" ]]; then
36
-		ALL_IP_ADDRESSES="$(openshift start --print-ip)"
37
-		SERVER_HOSTNAME_LIST="${PUBLIC_MASTER_HOST},localhost,172.30.0.1"
38
-                SERVER_HOSTNAME_LIST="${SERVER_HOSTNAME_LIST},kubernetes.default.svc.cluster.local,kubernetes.default.svc,kubernetes.default,kubernetes"
39
-                SERVER_HOSTNAME_LIST="${SERVER_HOSTNAME_LIST},openshift.default.svc.cluster.local,openshift.default.svc,openshift.default,openshift"
40
-
41
-		while read -r IP_ADDRESS
42
-		do
43
-			SERVER_HOSTNAME_LIST="${SERVER_HOSTNAME_LIST},${IP_ADDRESS}"
44
-		done <<< "${ALL_IP_ADDRESSES}"
45
-
46
-		export ALL_IP_ADDRESSES
47
-		export SERVER_HOSTNAME_LIST
48
-	fi
49
-
50
-	echo "[INFO] Creating certificates for the OpenShift server"
51
-	openshift admin ca create-master-certs \
52
-	--overwrite=false \
53
-	--cert-dir="${MASTER_CONFIG_DIR}" \
54
-	--hostnames="${SERVER_HOSTNAME_LIST}" \
55
-	--master="${MASTER_ADDR}" \
56
-	--public-master="${API_SCHEME}://${PUBLIC_MASTER_HOST}:${API_PORT}"
57
-
58
-	echo "[INFO] Creating OpenShift node config"
59
-	openshift admin create-node-config \
60
-	--listen="${KUBELET_SCHEME}://${KUBELET_BIND_HOST}:${KUBELET_PORT}" \
61
-	--node-dir="${NODE_CONFIG_DIR}" \
62
-	--node="${KUBELET_HOST}" \
63
-	--hostnames="${KUBELET_HOST}" \
64
-	--master="${MASTER_ADDR}" \
65
-	--node-client-certificate-authority="${MASTER_CONFIG_DIR}/ca.crt" \
66
-	--certificate-authority="${MASTER_CONFIG_DIR}/ca.crt" \
67
-	--signer-cert="${MASTER_CONFIG_DIR}/ca.crt" \
68
-	--signer-key="${MASTER_CONFIG_DIR}/ca.key" \
69
-	--signer-serial="${MASTER_CONFIG_DIR}/ca.serial.txt"
70
-
71
-	oadm create-bootstrap-policy-file --filename="${MASTER_CONFIG_DIR}/policy.json"
72
-
73
-	echo "[INFO] Creating OpenShift config"
74
-	openshift start \
75
-	--write-config=${SERVER_CONFIG_DIR} \
76
-	--create-certs=false \
77
-	--dns="tcp://${API_HOST}:53" \
78
-	--listen="${API_SCHEME}://${API_BIND_HOST}:${API_PORT}" \
79
-	--master="${MASTER_ADDR}" \
80
-	--public-master="${API_SCHEME}://${PUBLIC_MASTER_HOST}:${API_PORT}" \
81
-	--hostname="${KUBELET_HOST}" \
82
-	--volume-dir="${VOLUME_DIR}" \
83
-	--etcd-dir="${ETCD_DATA_DIR}" \
84
-	--images="${USE_IMAGES}"
85
-
86
-
87
-	cp ${SERVER_CONFIG_DIR}/master/master-config.yaml ${SERVER_CONFIG_DIR}/master/master-config.orig.yaml
88
-	openshift ex config patch ${SERVER_CONFIG_DIR}/master/master-config.orig.yaml --patch="{\"etcdConfig\": {\"address\": \"${API_HOST}:${ETCD_PORT}\"}}" | \
89
-	openshift ex config patch - --patch="{\"etcdConfig\": {\"servingInfo\": {\"bindAddress\": \"${API_HOST}:${ETCD_PORT}\"}}}" | \
90
-	openshift ex config patch - --type json --patch="[{\"op\": \"replace\", \"path\": \"/etcdClientInfo/urls\", \"value\": [\"${API_SCHEME}://${API_HOST}:${ETCD_PORT}\"]}]" | \
91
-	openshift ex config patch - --patch="{\"etcdConfig\": {\"peerAddress\": \"${API_HOST}:${ETCD_PEER_PORT}\"}}" | \
92
-	openshift ex config patch - --patch="{\"etcdConfig\": {\"peerServingInfo\": {\"bindAddress\": \"${API_HOST}:${ETCD_PEER_PORT}\"}}}" | \
93
-	openshift ex config patch - --patch="{\"imagePolicyConfig\": {\"maxImagesBulkImportedPerRepository\": ${MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY:-5}}}" > ${SERVER_CONFIG_DIR}/master/master-config.yaml
94
-
95
-	# Make oc use ${MASTER_CONFIG_DIR}/admin.kubeconfig, and ignore anything in the running user's $HOME dir
96
-	export ADMIN_KUBECONFIG="${MASTER_CONFIG_DIR}/admin.kubeconfig"
97
-	export CLUSTER_ADMIN_CONTEXT=$(oc config view --config=${ADMIN_KUBECONFIG} --flatten -o template --template='{{index . "current-context"}}')
98
-	local sudo="${USE_SUDO:+sudo}"
99
-	${sudo} chmod -R a+rwX "${ADMIN_KUBECONFIG}"
100
-	echo "[INFO] To debug: export KUBECONFIG=$ADMIN_KUBECONFIG"
101
-}
102
-readonly -f configure_os_server
103
-
104
-# start_os_server starts the OpenShift server, exports the PID of the OpenShift server and waits until openshift server endpoints are available
105
-# It is advised to use this function after a successful run of 'configure_os_server'
106
-#
107
-# Globals:
108
-#  - USE_SUDO
109
-#  - LOG_DIR
110
-#  - ARTIFACT_DIR
111
-#  - VOLUME_DIR
112
-#  - SERVER_CONFIG_DIR
113
-#  - USE_IMAGES
114
-#  - MASTER_ADDR
115
-#  - MASTER_CONFIG_DIR
116
-#  - NODE_CONFIG_DIR
117
-#  - API_SCHEME
118
-#  - API_HOST
119
-#  - API_PORT
120
-#  - KUBELET_SCHEME
121
-#  - KUBELET_HOST
122
-#  - KUBELET_PORT
123
-# Arguments:
124
-#  None
125
-# Returns:
126
-#  - export OS_PID
127 5
 function start_os_server() {
128
-	local sudo="${USE_SUDO:+sudo}"
129
-
130
-	local use_latest_images
131
-	if [[ -n "${USE_LATEST_IMAGES:-}" ]]; then
132
-		use_latest_images="true"
133
-	else
134
-		use_latest_images="false"
135
-	fi
136
-
137
-	echo "[INFO] `openshift version`"
138
-	echo "[INFO] Server logs will be at:    ${LOG_DIR}/openshift.log"
139
-	echo "[INFO] Test artifacts will be in: ${ARTIFACT_DIR}"
140
-	echo "[INFO] Volumes dir is:            ${VOLUME_DIR}"
141
-	echo "[INFO] Config dir is:             ${SERVER_CONFIG_DIR}"
142
-	echo "[INFO] Using images:              ${USE_IMAGES}"
143
-	echo "[INFO] MasterIP is:               ${MASTER_ADDR}"
144
-
145
-	mkdir -p ${LOG_DIR}
146
-
147
-	echo "[INFO] Scan of OpenShift related processes already up via ps -ef	| grep openshift : "
148
-	ps -ef | grep openshift
149
-	echo "[INFO] Starting OpenShift server"
150
-	${sudo} env "PATH=${PATH}" OPENSHIFT_PROFILE=web OPENSHIFT_ON_PANIC=crash openshift start \
151
-	 --master-config=${MASTER_CONFIG_DIR}/master-config.yaml \
152
-	 --node-config=${NODE_CONFIG_DIR}/node-config.yaml \
153
-	 --loglevel=4 --logspec='*importer=5' \
154
-	 --latest-images="${use_latest_images}" \
155
-	&>"${LOG_DIR}/openshift.log" &
156
-	export OS_PID=$!
157
-
158
-	echo "[INFO] OpenShift server start at: "
159
-	date
160
-
161
-	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz" "apiserver: " 0.25 80
162
-	wait_for_url "${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz" "[INFO] kubelet: " 0.5 120
163
-	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz/ready" "apiserver(ready): " 0.25 80
164
-	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/api/v1/nodes/${KUBELET_HOST}" "apiserver(nodes): " 0.25 80
6
+        os::log::warn "start_os_server is deprecated, please use os::start::server"
7
+        os::start::server "$@"
8
+}
165 9
 
166
-	echo "[INFO] OpenShift server health checks done at: "
167
-	date
10
+function configure_os_server() {
11
+        os::log::warn "configure_os_server is deprecated, please use os::start::configure_server"
12
+        os::start::configure_server "$@"
168 13
 }
169
-readonly -f start_os_server
170 14
 
171
-# start_os_master starts the OpenShift master, exports the PID of the OpenShift master and waits until OpenShift master endpoints are available
172
-# It is advised to use this function after a successful run of 'configure_os_server'
173
-#
174
-# Globals:
175
-#  - USE_SUDO
176
-#  - LOG_DIR
177
-#  - ARTIFACT_DIR
178
-#  - SERVER_CONFIG_DIR
179
-#  - USE_IMAGES
180
-#  - MASTER_ADDR
181
-#  - MASTER_CONFIG_DIR
182
-#  - API_SCHEME
183
-#  - API_HOST
184
-#  - API_PORT
185
-# Arguments:
186
-#  None
187
-# Returns:
188
-#  - export OS_PID
189 15
 function start_os_master() {
190
-	local sudo="${USE_SUDO:+sudo}"
191
-
192
-	echo "[INFO] `openshift version`"
193
-	echo "[INFO] Server logs will be at:    ${LOG_DIR}/openshift.log"
194
-	echo "[INFO] Test artifacts will be in: ${ARTIFACT_DIR}"
195
-	echo "[INFO] Config dir is:             ${SERVER_CONFIG_DIR}"
196
-	echo "[INFO] Using images:              ${USE_IMAGES}"
197
-	echo "[INFO] MasterIP is:               ${MASTER_ADDR}"
198
-
199
-	mkdir -p ${LOG_DIR}
200
-
201
-	echo "[INFO] Scan of OpenShift related processes already up via ps -ef	| grep openshift : "
202
-	ps -ef | grep openshift
203
-	echo "[INFO] Starting OpenShift server"
204
-	${sudo} env "PATH=${PATH}" OPENSHIFT_PROFILE=web OPENSHIFT_ON_PANIC=crash openshift start master \
205
-	 --config=${MASTER_CONFIG_DIR}/master-config.yaml \
206
-	 --loglevel=4 --logspec='*importer=5' \
207
-	&>"${LOG_DIR}/openshift.log" &
208
-	export OS_PID=$!
209
-
210
-	echo "[INFO] OpenShift server start at: "
211
-	date
212
-
213
-	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz" "apiserver: " 0.25 160
214
-	wait_for_url "${API_SCHEME}://${API_HOST}:${API_PORT}/healthz/ready" "apiserver(ready): " 0.25 160
215
-
216
-	echo "[INFO] OpenShift server health checks done at: "
217
-	date
16
+        os::log::warn "start_os_master is deprecated, please use os::start::master"
17
+        os::start::master "$@"
218 18
 }
219
-readonly -f start_os_master
220 19
 
221 20
 # ensure_iptables_or_die tests if the testing machine has iptables available
222 21
 # and in PATH. Also test whether current user has sudo privileges.
... ...
@@ -322,15 +121,15 @@ function wait_for_url_timed() {
322 322
 	expire=$(($(time_now) + $max_wait))
323 323
 	set +e
324 324
 	while [[ $(time_now) -lt $expire ]]; do
325
-	out=$(curl --max-time 2 -fs $url 2>/dev/null)
326
-	if [ $? -eq 0 ]; then
327
-		set -e
328
-		echo ${prefix}${out}
329
-		ENDTIME=$(date +%s)
330
-		echo "[INFO] Success accessing '$url' after $(($ENDTIME - $STARTTIME)) seconds"
331
-		return 0
332
-	fi
333
-	sleep $wait
325
+		out=$(curl --max-time 2 -fs $url 2>/dev/null)
326
+		if [ $? -eq 0 ]; then
327
+			set -e
328
+			echo ${prefix}${out}
329
+			ENDTIME=$(date +%s)
330
+			echo "[INFO] Success accessing '$url' after $(($ENDTIME - $STARTTIME)) seconds"
331
+			return 0
332
+		fi
333
+		sleep $wait
334 334
 	done
335 335
 	echo "ERROR: gave up waiting for $url"
336 336
 	set -e
... ...
@@ -348,10 +147,10 @@ function wait_for_file() {
348 348
 	wait=${2:-0.2}
349 349
 	times=${3:-10}
350 350
 	for i in $(seq 1 $times); do
351
-	if [ -f "${file}" ]; then
352
-		return 0
353
-	fi
354
-	sleep $wait
351
+		if [ -f "${file}" ]; then
352
+			return 0
353
+		fi
354
+		sleep $wait
355 355
 	done
356 356
 	echo "ERROR: gave up waiting for file ${file}"
357 357
 	return 1
... ...
@@ -407,22 +206,22 @@ function set_curl_args() {
407 407
 	clientcert_args="${CURL_EXTRA:-} "
408 408
 
409 409
 	if [ -n "${CURL_CERT}" ]; then
410
-	 if [ -n "${CURL_KEY}" ]; then
411
-	 if [[ `curl -V` == *"SecureTransport"* ]]; then
412
-		 # Convert to a p12 cert for SecureTransport
413
-		 export CURL_CERT_DIR=$(dirname "${CURL_CERT}")
414
-		 export CURL_CERT_P12=${CURL_CERT_P12:-${CURL_CERT_DIR}/cert.p12}
415
-		 export CURL_CERT_P12_PASSWORD=${CURL_CERT_P12_PASSWORD:-password}
416
-		 if [ ! -f "${CURL_CERT_P12}" ]; then
417
-		 wait_for_file "${CURL_CERT}" $wait $times
418
-		 wait_for_file "${CURL_KEY}" $wait $times
419
-		 openssl pkcs12 -export -inkey "${CURL_KEY}" -in "${CURL_CERT}" -out "${CURL_CERT_P12}" -password "pass:${CURL_CERT_P12_PASSWORD}"
420
-		 fi
421
-		 clientcert_args="--cert ${CURL_CERT_P12}:${CURL_CERT_P12_PASSWORD} ${CURL_EXTRA:-}"
422
-	 else
423
-		 clientcert_args="--cert ${CURL_CERT} --key ${CURL_KEY} ${CURL_EXTRA:-}"
424
-	 fi
425
-	 fi
410
+		if [ -n "${CURL_KEY}" ]; then
411
+			if [[ `curl -V` == *"SecureTransport"* ]]; then
412
+					# Convert to a p12 cert for SecureTransport
413
+					export CURL_CERT_DIR=$(dirname "${CURL_CERT}")
414
+					export CURL_CERT_P12=${CURL_CERT_P12:-${CURL_CERT_DIR}/cert.p12}
415
+					export CURL_CERT_P12_PASSWORD=${CURL_CERT_P12_PASSWORD:-password}
416
+					if [ ! -f "${CURL_CERT_P12}" ]; then
417
+					wait_for_file "${CURL_CERT}" $wait $times
418
+					wait_for_file "${CURL_KEY}" $wait $times
419
+					openssl pkcs12 -export -inkey "${CURL_KEY}" -in "${CURL_CERT}" -out "${CURL_CERT_P12}" -password "pass:${CURL_CERT_P12_PASSWORD}"
420
+					fi
421
+					clientcert_args="--cert ${CURL_CERT_P12}:${CURL_CERT_P12_PASSWORD} ${CURL_EXTRA:-}"
422
+			else
423
+				clientcert_args="--cert ${CURL_CERT} --key ${CURL_KEY} ${CURL_EXTRA:-}"
424
+			fi
425
+		fi
426 426
 	fi
427 427
 	export CURL_ARGS="${clientcert_args}"
428 428
 }
... ...
@@ -440,14 +239,14 @@ function validate_response() {
440 440
 
441 441
 	set +e
442 442
 	for i in $(seq 1 $times); do
443
-	response=`curl $url`
444
-	echo $response | grep -q "$expected_response"
445
-	if [ $? -eq 0 ]; then
446
-		echo "[INFO] Response is valid."
447
-		set -e
448
-		return 0
449
-	fi
450
-	sleep $wait
443
+		response=`curl $url`
444
+		echo $response | grep -q "$expected_response"
445
+		if [ $? -eq 0 ]; then
446
+			echo "[INFO] Response is valid."
447
+			set -e
448
+			return 0
449
+		fi
450
+		sleep $wait
451 451
 	done
452 452
 
453 453
 	echo "[INFO] Response is invalid: $response"
... ...
@@ -559,7 +358,7 @@ function cleanup_openshift() {
559 559
 	set +e
560 560
 	dump_container_logs
561 561
 
562
- 	# pull information out of the server log so that we can get failure management in jenkins to highlight it and
562
+	# pull information out of the server log so that we can get failure management in jenkins to highlight it and
563 563
 	# really have it smack people in their logs.  This is a severe correctness problem
564 564
 	grep -a5 "CACHE.*ALTERED" ${LOG_DIR}/openshift.log
565 565
 
... ...
@@ -624,26 +423,26 @@ function create_valid_file() {
624 624
 function install_router() {
625 625
 	echo "[INFO] Installing the router"
626 626
 	oadm policy add-scc-to-user privileged -z router --config="${ADMIN_KUBECONFIG}"
627
-        # Create a TLS certificate for the router
628
-        if [[ -n "${CREATE_ROUTER_CERT:-}" ]]; then
629
-            echo "[INFO] Generating router TLS certificate"
630
-            oadm ca create-server-cert --signer-cert=${MASTER_CONFIG_DIR}/ca.crt \
631
-                 --signer-key=${MASTER_CONFIG_DIR}/ca.key \
632
-                 --signer-serial=${MASTER_CONFIG_DIR}/ca.serial.txt \
633
-                 --hostnames="*.${API_HOST}.xip.io" \
634
-                 --cert=${MASTER_CONFIG_DIR}/router.crt --key=${MASTER_CONFIG_DIR}/router.key
635
-            cat ${MASTER_CONFIG_DIR}/router.crt ${MASTER_CONFIG_DIR}/router.key \
636
-                ${MASTER_CONFIG_DIR}/ca.crt > ${MASTER_CONFIG_DIR}/router.pem
637
-            ROUTER_DEFAULT_CERT="--default-cert=${MASTER_CONFIG_DIR}/router.pem"
638
-        fi
639
-        openshift admin router --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}" --service-account=router ${ROUTER_DEFAULT_CERT-}
640
-
641
-        # Set the SYN eater to make router reloads more robust
642
-        if [[ -n "${DROP_SYN_DURING_RESTART:-}" ]]; then
643
-            # Rewrite the DC for the router to add the environment variable into the pod definition
644
-            echo "[INFO] Changing the router DC to drop SYN packets during a reload"
645
-            oc set env dc/router -c router DROP_SYN_DURING_RESTART=true
646
-        fi
627
+	# Create a TLS certificate for the router
628
+	if [[ -n "${CREATE_ROUTER_CERT:-}" ]]; then
629
+		echo "[INFO] Generating router TLS certificate"
630
+		oadm ca create-server-cert --signer-cert=${MASTER_CONFIG_DIR}/ca.crt \
631
+			--signer-key=${MASTER_CONFIG_DIR}/ca.key \
632
+			--signer-serial=${MASTER_CONFIG_DIR}/ca.serial.txt \
633
+			--hostnames="*.${API_HOST}.xip.io" \
634
+			--cert=${MASTER_CONFIG_DIR}/router.crt --key=${MASTER_CONFIG_DIR}/router.key
635
+		cat ${MASTER_CONFIG_DIR}/router.crt ${MASTER_CONFIG_DIR}/router.key \
636
+			${MASTER_CONFIG_DIR}/ca.crt > ${MASTER_CONFIG_DIR}/router.pem
637
+		ROUTER_DEFAULT_CERT="--default-cert=${MASTER_CONFIG_DIR}/router.pem"
638
+	fi
639
+	openshift admin router --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}" --service-account=router ${ROUTER_DEFAULT_CERT-}
640
+
641
+	# Set the SYN eater to make router reloads more robust
642
+	if [[ -n "${DROP_SYN_DURING_RESTART:-}" ]]; then
643
+		# Rewrite the DC for the router to add the environment variable into the pod definition
644
+		echo "[INFO] Changing the router DC to drop SYN packets during a reload"
645
+		oc set env dc/router -c router DROP_SYN_DURING_RESTART=true
646
+	fi
647 647
 }
648 648
 readonly -f create_gitconfig
649 649
 
... ...
@@ -700,20 +499,20 @@ readonly -f os::build:wait_for_end
700 700
 SELINUX_DISABLED=0
701 701
 
702 702
 function enable-selinux() {
703
-  if [ "${SELINUX_DISABLED}" = "1" ]; then
704
-    os::log::info "Re-enabling selinux enforcement"
705
-    sudo setenforce 1
706
-    SELINUX_DISABLED=0
707
-  fi
703
+	if [ "${SELINUX_DISABLED}" = "1" ]; then
704
+		os::log::info "Re-enabling selinux enforcement"
705
+		sudo setenforce 1
706
+		SELINUX_DISABLED=0
707
+	fi
708 708
 }
709 709
 readonly -f enable-selinux
710 710
 
711 711
 function disable-selinux() {
712
-  if selinuxenabled && [ "$(getenforce)" = "Enforcing" ]; then
713
-    os::log::info "Temporarily disabling selinux enforcement"
714
-    sudo setenforce 0
715
-    SELINUX_DISABLED=1
716
-  fi
712
+	if selinuxenabled && [ "$(getenforce)" = "Enforcing" ]; then
713
+		os::log::info "Temporarily disabling selinux enforcement"
714
+		sudo setenforce 0
715
+		SELINUX_DISABLED=1
716
+	fi
717 717
 }
718 718
 readonly -f disable-selinux
719 719
 
... ...
@@ -722,25 +521,25 @@ readonly -f disable-selinux
722 722
 ######
723 723
 
724 724
 function os::log::with-severity() {
725
-  local msg=$1
726
-  local severity=$2
725
+	local msg=$1
726
+	local severity=$2
727 727
 
728
-  echo "[$2] ${1}"
728
+	echo "[$2] ${1}"
729 729
 }
730 730
 readonly -f os::log::with-severity
731 731
 
732 732
 function os::log::info() {
733
-  os::log::with-severity "${1}" "INFO"
733
+	os::log::with-severity "${1}" "INFO"
734 734
 }
735 735
 readonly -f os::log::info
736 736
 
737 737
 function os::log::warn() {
738
-  os::log::with-severity "${1}" "WARNING" 1>&2
738
+	os::log::with-severity "${1}" "WARNING" 1>&2
739 739
 }
740 740
 readonly -f os::log::warn
741 741
 
742 742
 function os::log::error() {
743
-  os::log::with-severity "${1}" "ERROR" 1>&2
743
+	os::log::with-severity "${1}" "ERROR" 1>&2
744 744
 }
745 745
 readonly -f os::log::error
746 746
 
... ...
@@ -763,44 +562,44 @@ readonly -f find_files
763 763
 # Asks golang what it thinks the host platform is.  The go tool chain does some
764 764
 # slightly different things when the target platform matches the host platform.
765 765
 function os::util::host_platform() {
766
-  echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
766
+	echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
767 767
 }
768 768
 readonly -f os::util::host_platform
769 769
 
770 770
 function os::util::sed() {
771
-  if LANG=C sed --help 2>&1 | grep -qs "GNU sed"; then
772
-  	sed -i'' "$@"
773
-  else
774
-  	sed -i '' "$@"
775
-  fi
771
+	if LANG=C sed --help 2>&1 | grep -qs "GNU sed"; then
772
+		sed -i'' "$@"
773
+	else
774
+		sed -i '' "$@"
775
+	fi
776 776
 }
777 777
 readonly -f os::util::sed
778 778
 
779 779
 function os::util::base64decode() {
780
-  if [[ "$(go env GOHOSTOS)" == "darwin" ]]; then
781
-  	base64 -D $@
782
-  else
783
-  	base64 -d $@
784
-  fi
780
+	if [[ "$(go env GOHOSTOS)" == "darwin" ]]; then
781
+		base64 -D $@
782
+	else
783
+		base64 -d $@
784
+	fi
785 785
 }
786 786
 readonly -f os::util::base64decode
787 787
 
788 788
 function os::util::get_object_assert() {
789
-  local object=$1
790
-  local request=$2
791
-  local expected=$3
792
-
793
-  res=$(eval oc get $object -o go-template=\"$request\")
794
-
795
-  if [[ "$res" =~ ^$expected$ ]]; then
796
-      echo "Successful get $object $request: $res"
797
-      return 0
798
-  else
799
-      echo "FAIL!"
800
-      echo "Get $object $request"
801
-      echo "  Expected: $expected"
802
-      echo "  Got:      $res"
803
-      return 1
804
-  fi
789
+	local object=$1
790
+	local request=$2
791
+	local expected=$3
792
+
793
+	res=$(eval oc get $object -o go-template=\"$request\")
794
+
795
+	if [[ "$res" =~ ^$expected$ ]]; then
796
+		echo "Successful get $object $request: $res"
797
+		return 0
798
+	else
799
+		echo "FAIL!"
800
+		echo "Get $object $request"
801
+		echo "  Expected: $expected"
802
+		echo "  Got:	$res"
803
+		return 1
804
+	fi
805 805
 }
806 806
 readonly -f os::util::get_object_assert
807 807
new file mode 100644
... ...
@@ -0,0 +1,822 @@
0
+package build
1
+
2
+import (
3
+	"fmt"
4
+	"sync/atomic"
5
+	"time"
6
+
7
+	kapi "k8s.io/kubernetes/pkg/api"
8
+	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	"k8s.io/kubernetes/pkg/fields"
10
+	watchapi "k8s.io/kubernetes/pkg/watch"
11
+
12
+	buildapi "github.com/openshift/origin/pkg/build/api"
13
+	"github.com/openshift/origin/pkg/client"
14
+	imageapi "github.com/openshift/origin/pkg/image/api"
15
+	testutil "github.com/openshift/origin/test/util"
16
+)
17
+
18
+var (
19
+	//TODO: Make these externally configurable
20
+
21
+	// BuildControllerTestWait is the time that RunBuildControllerTest waits
22
+	// for any other changes to happen when testing whether only a single build got processed
23
+	BuildControllerTestWait = 10 * time.Second
24
+
25
+	// BuildPodControllerTestWait is the time that RunBuildPodControllerTest waits
26
+	// after a state transition to make sure other state transitions don't occur unexpectedly
27
+	BuildPodControllerTestWait = 10 * time.Second
28
+
29
+	// BuildControllersWatchTimeout is used by all tests to wait for watch events. In case where only
30
+	// a single watch event is expected, the test will fail after the timeout.
31
+	BuildControllersWatchTimeout = 60 * time.Second
32
+)
33
+
34
+type testingT interface {
35
+	Fail()
36
+	Error(args ...interface{})
37
+	Errorf(format string, args ...interface{})
38
+	FailNow()
39
+	Fatal(args ...interface{})
40
+	Fatalf(format string, args ...interface{})
41
+	Log(args ...interface{})
42
+	Logf(format string, args ...interface{})
43
+	Failed() bool
44
+	Parallel()
45
+	Skip(args ...interface{})
46
+	Skipf(format string, args ...interface{})
47
+	SkipNow()
48
+	Skipped() bool
49
+}
50
+
51
+func mockBuild() *buildapi.Build {
52
+	return &buildapi.Build{
53
+		ObjectMeta: kapi.ObjectMeta{
54
+			GenerateName: "mock-build",
55
+			Labels: map[string]string{
56
+				"label1":                     "value1",
57
+				"label2":                     "value2",
58
+				buildapi.BuildConfigLabel:    "mock-build-config",
59
+				buildapi.BuildRunPolicyLabel: string(buildapi.BuildRunPolicyParallel),
60
+			},
61
+		},
62
+		Spec: buildapi.BuildSpec{
63
+			CommonSpec: buildapi.CommonSpec{
64
+				Source: buildapi.BuildSource{
65
+					Git: &buildapi.GitBuildSource{
66
+						URI: "http://my.docker/build",
67
+					},
68
+					ContextDir: "context",
69
+				},
70
+				Strategy: buildapi.BuildStrategy{
71
+					DockerStrategy: &buildapi.DockerBuildStrategy{},
72
+				},
73
+				Output: buildapi.BuildOutput{
74
+					To: &kapi.ObjectReference{
75
+						Kind: "DockerImage",
76
+						Name: "namespace/builtimage",
77
+					},
78
+				},
79
+			},
80
+		},
81
+	}
82
+}
83
+
84
+func RunBuildControllerTest(t testingT, osClient *client.Client, kClient *kclient.Client) {
85
+	// Setup an error channel
86
+	errChan := make(chan error) // go routines will send a message on this channel if an error occurs. Once this happens the test is over
87
+
88
+	// Create a build
89
+	ns := testutil.Namespace()
90
+	b, err := osClient.Builds(ns).Create(mockBuild())
91
+	if err != nil {
92
+		t.Fatal(err)
93
+	}
94
+
95
+	// Start watching builds for New -> Pending transition
96
+	buildWatch, err := osClient.Builds(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", b.Name), ResourceVersion: b.ResourceVersion})
97
+	if err != nil {
98
+		t.Fatal(err)
99
+	}
100
+	defer buildWatch.Stop()
101
+	buildModifiedCount := int32(0)
102
+	go func() {
103
+		for e := range buildWatch.ResultChan() {
104
+			if e.Type != watchapi.Modified {
105
+				errChan <- fmt.Errorf("received an unexpected event of type: %s with object: %#v", e.Type, e.Object)
106
+			}
107
+			build, ok := e.Object.(*buildapi.Build)
108
+			if !ok {
109
+				errChan <- fmt.Errorf("received something other than build: %#v", e.Object)
110
+				break
111
+			}
112
+			// If unexpected status, throw error
113
+			if build.Status.Phase != buildapi.BuildPhasePending && build.Status.Phase != buildapi.BuildPhaseNew {
114
+				errChan <- fmt.Errorf("received unexpected build status: %s", build.Status.Phase)
115
+				break
116
+			}
117
+			atomic.AddInt32(&buildModifiedCount, 1)
118
+		}
119
+	}()
120
+
121
+	// Watch build pods as they are created
122
+	podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))})
123
+	if err != nil {
124
+		t.Fatal(err)
125
+	}
126
+	defer podWatch.Stop()
127
+	podAddedCount := int32(0)
128
+	go func() {
129
+		for e := range podWatch.ResultChan() {
130
+			// Look for creation events
131
+			if e.Type == watchapi.Added {
132
+				atomic.AddInt32(&podAddedCount, 1)
133
+			}
134
+		}
135
+	}()
136
+
137
+	select {
138
+	case err := <-errChan:
139
+		t.Errorf("Error: %v", err)
140
+	case <-time.After(BuildControllerTestWait):
141
+		if atomic.LoadInt32(&buildModifiedCount) < 1 {
142
+			t.Errorf("The build was modified an unexpected number of times. Got: %d, Expected: >= 1", buildModifiedCount)
143
+		}
144
+		if atomic.LoadInt32(&podAddedCount) != 1 {
145
+			t.Errorf("The build pod was created an unexpected number of times. Got: %d, Expected: 1", podAddedCount)
146
+		}
147
+	}
148
+}
149
+
150
+type buildControllerPodState struct {
151
+	PodPhase   kapi.PodPhase
152
+	BuildPhase buildapi.BuildPhase
153
+}
154
+
155
+type buildControllerPodTest struct {
156
+	Name   string
157
+	States []buildControllerPodState
158
+}
159
+
160
+func RunBuildPodControllerTest(t testingT, osClient *client.Client, kClient *kclient.Client) {
161
+	ns := testutil.Namespace()
162
+	waitTime := BuildPodControllerTestWait
163
+
164
+	tests := []buildControllerPodTest{
165
+		{
166
+			Name: "running state test",
167
+			States: []buildControllerPodState{
168
+				{
169
+					PodPhase:   kapi.PodRunning,
170
+					BuildPhase: buildapi.BuildPhaseRunning,
171
+				},
172
+			},
173
+		},
174
+		{
175
+			Name: "build succeeded",
176
+			States: []buildControllerPodState{
177
+				{
178
+					PodPhase:   kapi.PodRunning,
179
+					BuildPhase: buildapi.BuildPhaseRunning,
180
+				},
181
+				{
182
+					PodPhase:   kapi.PodSucceeded,
183
+					BuildPhase: buildapi.BuildPhaseComplete,
184
+				},
185
+			},
186
+		},
187
+		{
188
+			Name: "build failed",
189
+			States: []buildControllerPodState{
190
+				{
191
+					PodPhase:   kapi.PodRunning,
192
+					BuildPhase: buildapi.BuildPhaseRunning,
193
+				},
194
+				{
195
+					PodPhase:   kapi.PodFailed,
196
+					BuildPhase: buildapi.BuildPhaseFailed,
197
+				},
198
+			},
199
+		},
200
+	}
201
+	for _, test := range tests {
202
+		// Setup communications channels
203
+		podReadyChan := make(chan *kapi.Pod) // Will receive a value when a build pod is ready
204
+		errChan := make(chan error)          // Will receive a value when an error occurs
205
+		stateReached := int32(0)
206
+
207
+		// Create a build
208
+		b, err := osClient.Builds(ns).Create(mockBuild())
209
+		if err != nil {
210
+			t.Fatal(err)
211
+		}
212
+
213
+		// Watch build pod for transition to pending
214
+		podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))})
215
+		if err != nil {
216
+			t.Fatal(err)
217
+		}
218
+		go func() {
219
+			for e := range podWatch.ResultChan() {
220
+				pod, ok := e.Object.(*kapi.Pod)
221
+				if !ok {
222
+					t.Fatalf("%s: unexpected object received: %#v\n", test.Name, e.Object)
223
+				}
224
+				if pod.Status.Phase == kapi.PodPending {
225
+					podReadyChan <- pod
226
+					break
227
+				}
228
+			}
229
+		}()
230
+
231
+		var pod *kapi.Pod
232
+		select {
233
+		case pod = <-podReadyChan:
234
+			if pod.Status.Phase != kapi.PodPending {
235
+				t.Errorf("Got wrong pod phase: %s", pod.Status.Phase)
236
+				podWatch.Stop()
237
+				continue
238
+			}
239
+
240
+		case <-time.After(BuildControllersWatchTimeout):
241
+			t.Errorf("Timed out waiting for build pod to be ready")
242
+			podWatch.Stop()
243
+			continue
244
+		}
245
+		podWatch.Stop()
246
+
247
+		for _, state := range test.States {
248
+			if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
249
+				// Update pod state and verify that corresponding build state happens accordingly
250
+				pod, err := kClient.Pods(ns).Get(pod.Name)
251
+				if err != nil {
252
+					return err
253
+				}
254
+				if pod.Status.Phase == state.PodPhase {
255
+					return fmt.Errorf("another client altered the pod phase to %s: %#v", state.PodPhase, pod)
256
+				}
257
+				pod.Status.Phase = state.PodPhase
258
+				_, err = kClient.Pods(ns).UpdateStatus(pod)
259
+				return err
260
+			}); err != nil {
261
+				t.Fatal(err)
262
+			}
263
+
264
+			buildWatch, err := osClient.Builds(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", b.Name), ResourceVersion: b.ResourceVersion})
265
+			if err != nil {
266
+				t.Fatal(err)
267
+			}
268
+			defer buildWatch.Stop()
269
+			go func() {
270
+				done := false
271
+				for e := range buildWatch.ResultChan() {
272
+					var ok bool
273
+					b, ok = e.Object.(*buildapi.Build)
274
+					if !ok {
275
+						errChan <- fmt.Errorf("%s: unexpected object received: %#v", test.Name, e.Object)
276
+					}
277
+					if e.Type != watchapi.Modified {
278
+						errChan <- fmt.Errorf("%s: unexpected event received: %s, object: %#v", test.Name, e.Type, e.Object)
279
+					}
280
+					if done {
281
+						errChan <- fmt.Errorf("%s: unexpected build state: %#v", test.Name, e.Object)
282
+					} else if b.Status.Phase == state.BuildPhase {
283
+						done = true
284
+						atomic.StoreInt32(&stateReached, 1)
285
+					}
286
+				}
287
+			}()
288
+
289
+			select {
290
+			case err := <-errChan:
291
+				buildWatch.Stop()
292
+				t.Errorf("%s: Error: %v\n", test.Name, err)
293
+				break
294
+			case <-time.After(waitTime):
295
+				buildWatch.Stop()
296
+				if atomic.LoadInt32(&stateReached) != 1 {
297
+					t.Errorf("%s: Did not reach desired build state: %s", test.Name, state.BuildPhase)
298
+					break
299
+				}
300
+			}
301
+		}
302
+	}
303
+}
304
+
305
+func waitForWatch(t testingT, name string, w watchapi.Interface) *watchapi.Event {
306
+	select {
307
+	case e := <-w.ResultChan():
308
+		return &e
309
+	case <-time.After(BuildControllersWatchTimeout):
310
+		t.Fatalf("Timed out waiting for watch: %s", name)
311
+		return nil
312
+	}
313
+}
314
+
315
+func RunImageChangeTriggerTest(t testingT, clusterAdminClient *client.Client) {
316
+	tag := "latest"
317
+	streamName := "test-image-trigger-repo"
318
+
319
+	imageStream := mockImageStream2(tag)
320
+	imageStreamMapping := mockImageStreamMapping(imageStream.Name, "someimage", tag, "registry:8080/openshift/test-image-trigger:"+tag)
321
+	config := imageChangeBuildConfig("sti-imagestreamtag", stiStrategy("ImageStreamTag", streamName+":"+tag))
322
+	created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
323
+	if err != nil {
324
+		t.Fatalf("Couldn't create BuildConfig: %v", err)
325
+	}
326
+
327
+	watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
328
+	if err != nil {
329
+		t.Fatalf("Couldn't subscribe to Builds %v", err)
330
+	}
331
+
332
+	watch2, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
333
+	if err != nil {
334
+		t.Fatalf("Couldn't subscribe to BuildConfigs %v", err)
335
+	}
336
+	defer watch2.Stop()
337
+
338
+	imageStream, err = clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream)
339
+	if err != nil {
340
+		t.Fatalf("Couldn't create ImageStream: %v", err)
341
+	}
342
+
343
+	err = clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(imageStreamMapping)
344
+	if err != nil {
345
+		t.Fatalf("Couldn't create Image: %v", err)
346
+	}
347
+
348
+	// wait for initial build event from the creation of the imagerepo with tag latest
349
+	event := waitForWatch(t, "initial build added", watch)
350
+	if e, a := watchapi.Added, event.Type; e != a {
351
+		t.Fatalf("expected watch event type %s, got %s", e, a)
352
+	}
353
+	newBuild := event.Object.(*buildapi.Build)
354
+	strategy := newBuild.Spec.Strategy
355
+	switch {
356
+	case strategy.SourceStrategy != nil:
357
+		if strategy.SourceStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
358
+			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
359
+			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
360
+			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.SourceStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
361
+		}
362
+	case strategy.DockerStrategy != nil:
363
+		if strategy.DockerStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
364
+			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
365
+			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
366
+			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
367
+		}
368
+	case strategy.CustomStrategy != nil:
369
+		if strategy.CustomStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
370
+			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
371
+			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
372
+			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.CustomStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
373
+		}
374
+	}
375
+	// Wait for an update on the specific build that was added
376
+	watch3, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", newBuild.Name), ResourceVersion: newBuild.ResourceVersion})
377
+	defer watch3.Stop()
378
+	if err != nil {
379
+		t.Fatalf("Couldn't subscribe to Builds %v", err)
380
+	}
381
+	event = waitForWatch(t, "initial build update", watch3)
382
+	if e, a := watchapi.Modified, event.Type; e != a {
383
+		t.Fatalf("expected watch event type %s, got %s", e, a)
384
+	}
385
+	newBuild = event.Object.(*buildapi.Build)
386
+	// Make sure the resolution of the build's docker image pushspec didn't mutate the persisted API object
387
+	if newBuild.Spec.Output.To.Name != "test-image-trigger-repo:outputtag" {
388
+		t.Fatalf("unexpected build output: %#v %#v", newBuild.Spec.Output.To, newBuild.Spec.Output)
389
+	}
390
+	if newBuild.Labels["testlabel"] != "testvalue" {
391
+		t.Fatalf("Expected build with label %s=%s from build config got %s=%s", "testlabel", "testvalue", "testlabel", newBuild.Labels["testlabel"])
392
+	}
393
+
394
+	// wait for build config to be updated
395
+WaitLoop:
396
+	for {
397
+		select {
398
+		case e := <-watch2.ResultChan():
399
+			event = &e
400
+			continue
401
+		case <-time.After(BuildControllersWatchTimeout):
402
+			break WaitLoop
403
+		}
404
+	}
405
+	updatedConfig := event.Object.(*buildapi.BuildConfig)
406
+	if err != nil {
407
+		t.Fatalf("Couldn't get BuildConfig: %v", err)
408
+	}
409
+	// the first tag did not have an image id, so the last trigger field is the pull spec
410
+	if updatedConfig.Spec.Triggers[0].ImageChange.LastTriggeredImageID != "registry:8080/openshift/test-image-trigger:"+tag {
411
+		t.Fatalf("Expected imageID equal to pull spec, got %#v", updatedConfig.Spec.Triggers[0].ImageChange)
412
+	}
413
+
414
+	// clear out the build/buildconfig watches before triggering a new build
415
+WaitLoop2:
416
+	for {
417
+		select {
418
+		case <-watch.ResultChan():
419
+			continue
420
+		case <-watch2.ResultChan():
421
+			continue
422
+		case <-time.After(BuildControllersWatchTimeout):
423
+			break WaitLoop2
424
+		}
425
+	}
426
+
427
+	// trigger a build by posting a new image
428
+	if err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(&imageapi.ImageStreamMapping{
429
+		ObjectMeta: kapi.ObjectMeta{
430
+			Namespace: testutil.Namespace(),
431
+			Name:      imageStream.Name,
432
+		},
433
+		Tag: tag,
434
+		Image: imageapi.Image{
435
+			ObjectMeta: kapi.ObjectMeta{
436
+				Name: "ref-2-random",
437
+			},
438
+			DockerImageReference: "registry:8080/openshift/test-image-trigger:ref-2-random",
439
+		},
440
+	}); err != nil {
441
+		t.Fatalf("unexpected error: %v", err)
442
+	}
443
+	event = waitForWatch(t, "second build created", watch)
444
+	if e, a := watchapi.Added, event.Type; e != a {
445
+		t.Fatalf("expected watch event type %s, got %s", e, a)
446
+	}
447
+	newBuild = event.Object.(*buildapi.Build)
448
+	strategy = newBuild.Spec.Strategy
449
+	switch {
450
+	case strategy.SourceStrategy != nil:
451
+		if strategy.SourceStrategy.From.Name != "registry:8080/openshift/test-image-trigger:ref-2-random" {
452
+			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
453
+			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
454
+			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\trigger is %s\n", "registry:8080/openshift/test-image-trigger:ref-2-random", strategy.SourceStrategy.From.Name, i, bc.Spec.Triggers[3].ImageChange)
455
+		}
456
+	case strategy.DockerStrategy != nil:
457
+		if strategy.DockerStrategy.From.Name != "registry:8080/openshift/test-image-trigger:ref-2-random" {
458
+			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
459
+			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
460
+			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\trigger is %s\n", "registry:8080/openshift/test-image-trigger:ref-2-random", strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[3].ImageChange)
461
+		}
462
+	case strategy.CustomStrategy != nil:
463
+		if strategy.CustomStrategy.From.Name != "registry:8080/openshift/test-image-trigger:ref-2-random" {
464
+			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
465
+			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
466
+			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\trigger is %s\n", "registry:8080/openshift/test-image-trigger:ref-2-random", strategy.CustomStrategy.From.Name, i, bc.Spec.Triggers[3].ImageChange)
467
+		}
468
+	}
469
+
470
+	// Listen to events on specific  build
471
+	watch4, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", newBuild.Name), ResourceVersion: newBuild.ResourceVersion})
472
+	defer watch4.Stop()
473
+
474
+	event = waitForWatch(t, "update on second build", watch4)
475
+	if e, a := watchapi.Modified, event.Type; e != a {
476
+		t.Fatalf("expected watch event type %s, got %s", e, a)
477
+	}
478
+	newBuild = event.Object.(*buildapi.Build)
479
+	// Make sure the resolution of the build's docker image pushspec didn't mutate the persisted API object
480
+	if newBuild.Spec.Output.To.Name != "test-image-trigger-repo:outputtag" {
481
+		t.Fatalf("unexpected build output: %#v %#v", newBuild.Spec.Output.To, newBuild.Spec.Output)
482
+	}
483
+	if newBuild.Labels["testlabel"] != "testvalue" {
484
+		t.Fatalf("Expected build with label %s=%s from build config got %s=%s", "testlabel", "testvalue", "testlabel", newBuild.Labels["testlabel"])
485
+	}
486
+
487
+WaitLoop3:
488
+	for {
489
+		select {
490
+		case e := <-watch2.ResultChan():
491
+			event = &e
492
+			continue
493
+		case <-time.After(BuildControllersWatchTimeout):
494
+			break WaitLoop3
495
+		}
496
+	}
497
+	updatedConfig = event.Object.(*buildapi.BuildConfig)
498
+	if e, a := "registry:8080/openshift/test-image-trigger:ref-2-random", updatedConfig.Spec.Triggers[0].ImageChange.LastTriggeredImageID; e != a {
499
+		t.Errorf("unexpected trigger id: expected %v, got %v", e, a)
500
+	}
501
+}
502
+
503
+func RunBuildDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
504
+
505
+	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
506
+	if err != nil {
507
+		t.Fatalf("Couldn't subscribe to Builds %v", err)
508
+	}
509
+	defer buildWatch.Stop()
510
+
511
+	created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
512
+	if err != nil {
513
+		t.Fatalf("Couldn't create Build: %v", err)
514
+	}
515
+
516
+	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
517
+	if err != nil {
518
+		t.Fatalf("Couldn't subscribe to Pods %v", err)
519
+	}
520
+	defer podWatch.Stop()
521
+
522
+	// wait for initial build event from the creation of the imagerepo with tag latest
523
+	event := waitForWatch(t, "initial build added", buildWatch)
524
+	if e, a := watchapi.Added, event.Type; e != a {
525
+		t.Fatalf("expected watch event type %s, got %s", e, a)
526
+	}
527
+	newBuild := event.Object.(*buildapi.Build)
528
+
529
+	// initial pod creation for build
530
+	event = waitForWatch(t, "build pod created", podWatch)
531
+	if e, a := watchapi.Added, event.Type; e != a {
532
+		t.Fatalf("expected watch event type %s, got %s", e, a)
533
+	}
534
+
535
+	clusterAdminClient.Builds(testutil.Namespace()).Delete(newBuild.Name)
536
+
537
+	event = waitForWatchType(t, "pod deleted due to build deleted", podWatch, watchapi.Deleted)
538
+	if e, a := watchapi.Deleted, event.Type; e != a {
539
+		t.Fatalf("expected watch event type %s, got %s", e, a)
540
+	}
541
+	pod := event.Object.(*kapi.Pod)
542
+	if expected := buildapi.GetBuildPodName(newBuild); pod.Name != expected {
543
+		t.Fatalf("Expected pod %s to be deleted, but pod %s was deleted", expected, pod.Name)
544
+	}
545
+
546
+}
547
+
548
+// waitForWatchType tolerates receiving 3 events before failing while watching for a particular event
549
+// type.
550
+func waitForWatchType(t testingT, name string, w watchapi.Interface, expect watchapi.EventType) *watchapi.Event {
551
+	tries := 3
552
+	for i := 0; i < tries; i++ {
553
+		select {
554
+		case e := <-w.ResultChan():
555
+			if e.Type != expect {
556
+				continue
557
+			}
558
+			return &e
559
+		case <-time.After(BuildControllersWatchTimeout):
560
+			t.Fatalf("Timed out waiting for watch: %s", name)
561
+			return nil
562
+		}
563
+	}
564
+	t.Fatalf("Waited for a %v event with %d tries but never received one", expect, tries)
565
+	return nil
566
+}
567
+
568
+func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
569
+
570
+	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
571
+	if err != nil {
572
+		t.Fatalf("Couldn't subscribe to Builds %v", err)
573
+	}
574
+	defer buildWatch.Stop()
575
+
576
+	created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
577
+	if err != nil {
578
+		t.Fatalf("Couldn't create Build: %v", err)
579
+	}
580
+
581
+	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
582
+	if err != nil {
583
+		t.Fatalf("Couldn't subscribe to Pods %v", err)
584
+	}
585
+	defer podWatch.Stop()
586
+
587
+	// wait for initial build event from the creation of the imagerepo with tag latest
588
+	event := waitForWatch(t, "initial build added", buildWatch)
589
+	if e, a := watchapi.Added, event.Type; e != a {
590
+		t.Fatalf("expected watch event type %s, got %s", e, a)
591
+	}
592
+	newBuild := event.Object.(*buildapi.Build)
593
+	buildName := newBuild.Name
594
+	podName := newBuild.Name + "-build"
595
+
596
+	// initial pod creation for build
597
+	for {
598
+		event = waitForWatch(t, "build pod created", podWatch)
599
+		newPod := event.Object.(*kapi.Pod)
600
+		if newPod.Name == podName {
601
+			break
602
+		}
603
+	}
604
+	if e, a := watchapi.Added, event.Type; e != a {
605
+		t.Fatalf("expected watch event type %s, got %s", e, a)
606
+	}
607
+
608
+	// throw away events from other builds, we only care about the new build
609
+	// we just triggered
610
+	for {
611
+		event = waitForWatch(t, "build updated to pending", buildWatch)
612
+		newBuild = event.Object.(*buildapi.Build)
613
+		if newBuild.Name == buildName {
614
+			break
615
+		}
616
+	}
617
+	if e, a := watchapi.Modified, event.Type; e != a {
618
+		t.Fatalf("expected watch event type %s, got %s", e, a)
619
+	}
620
+	if newBuild.Status.Phase != buildapi.BuildPhasePending {
621
+		t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
622
+	}
623
+
624
+	clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
625
+	event = waitForWatch(t, "build updated to error", buildWatch)
626
+	if e, a := watchapi.Modified, event.Type; e != a {
627
+		t.Fatalf("expected watch event type %s, got %s", e, a)
628
+	}
629
+	newBuild = event.Object.(*buildapi.Build)
630
+	if newBuild.Status.Phase != buildapi.BuildPhaseError {
631
+		t.Fatalf("expected build status to be marked error, but was marked %s", newBuild.Status.Phase)
632
+	}
633
+}
634
+
635
+func RunBuildCompletePodDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
636
+
637
+	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
638
+	if err != nil {
639
+		t.Fatalf("Couldn't subscribe to Builds %v", err)
640
+	}
641
+	defer buildWatch.Stop()
642
+
643
+	created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
644
+	if err != nil {
645
+		t.Fatalf("Couldn't create Build: %v", err)
646
+	}
647
+
648
+	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
649
+	if err != nil {
650
+		t.Fatalf("Couldn't subscribe to Pods %v", err)
651
+	}
652
+	defer podWatch.Stop()
653
+
654
+	// wait for initial build event from the creation of the imagerepo with tag latest
655
+	event := waitForWatch(t, "initial build added", buildWatch)
656
+	if e, a := watchapi.Added, event.Type; e != a {
657
+		t.Fatalf("expected watch event type %s, got %s", e, a)
658
+	}
659
+	newBuild := event.Object.(*buildapi.Build)
660
+
661
+	// initial pod creation for build
662
+	event = waitForWatch(t, "build pod created", podWatch)
663
+	if e, a := watchapi.Added, event.Type; e != a {
664
+		t.Fatalf("expected watch event type %s, got %s", e, a)
665
+	}
666
+
667
+	event = waitForWatch(t, "build updated to pending", buildWatch)
668
+	if e, a := watchapi.Modified, event.Type; e != a {
669
+		t.Fatalf("expected watch event type %s, got %s", e, a)
670
+	}
671
+
672
+	newBuild = event.Object.(*buildapi.Build)
673
+	if newBuild.Status.Phase != buildapi.BuildPhasePending {
674
+		t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
675
+	}
676
+
677
+	newBuild.Status.Phase = buildapi.BuildPhaseComplete
678
+	clusterAdminClient.Builds(testutil.Namespace()).Update(newBuild)
679
+	event = waitForWatch(t, "build updated to complete", buildWatch)
680
+	if e, a := watchapi.Modified, event.Type; e != a {
681
+		t.Fatalf("expected watch event type %s, got %s", e, a)
682
+	}
683
+	newBuild = event.Object.(*buildapi.Build)
684
+	if newBuild.Status.Phase != buildapi.BuildPhaseComplete {
685
+		t.Fatalf("expected build status to be marked complete, but was marked %s", newBuild.Status.Phase)
686
+	}
687
+
688
+	clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
689
+	time.Sleep(10 * time.Second)
690
+	newBuild, err = clusterAdminClient.Builds(testutil.Namespace()).Get(newBuild.Name)
691
+	if err != nil {
692
+		t.Fatalf("unexpected error %v", err)
693
+	}
694
+	if newBuild.Status.Phase != buildapi.BuildPhaseComplete {
695
+		t.Fatalf("build status was updated to %s after deleting pod, should have stayed as %s", newBuild.Status.Phase, buildapi.BuildPhaseComplete)
696
+	}
697
+}
698
+
699
+func RunBuildConfigChangeControllerTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
700
+	config := configChangeBuildConfig()
701
+	created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
702
+	if err != nil {
703
+		t.Fatalf("Couldn't create BuildConfig: %v", err)
704
+	}
705
+
706
+	watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
707
+	if err != nil {
708
+		t.Fatalf("Couldn't subscribe to Builds %v", err)
709
+	}
710
+	defer watch.Stop()
711
+
712
+	watch2, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
713
+	if err != nil {
714
+		t.Fatalf("Couldn't subscribe to BuildConfigs %v", err)
715
+	}
716
+	defer watch2.Stop()
717
+
718
+	// wait for initial build event
719
+	event := waitForWatch(t, "config change initial build added", watch)
720
+	if e, a := watchapi.Added, event.Type; e != a {
721
+		t.Fatalf("expected watch event type %s, got %s", e, a)
722
+	}
723
+
724
+	event = waitForWatch(t, "config change config updated", watch2)
725
+	if e, a := watchapi.Modified, event.Type; e != a {
726
+		t.Fatalf("expected watch event type %s, got %s", e, a)
727
+	}
728
+	if bc := event.Object.(*buildapi.BuildConfig); bc.Status.LastVersion == 0 {
729
+		t.Fatalf("expected build config lastversion to be greater than zero after build")
730
+	}
731
+}
732
+
733
+func configChangeBuildConfig() *buildapi.BuildConfig {
734
+	bc := &buildapi.BuildConfig{}
735
+	bc.Name = "testcfgbc"
736
+	bc.Namespace = testutil.Namespace()
737
+	bc.Spec.Source.Git = &buildapi.GitBuildSource{}
738
+	bc.Spec.Source.Git.URI = "git://github.com/openshift/ruby-hello-world.git"
739
+	bc.Spec.Strategy.DockerStrategy = &buildapi.DockerBuildStrategy{}
740
+	configChangeTrigger := buildapi.BuildTriggerPolicy{Type: buildapi.ConfigChangeBuildTriggerType}
741
+	bc.Spec.Triggers = append(bc.Spec.Triggers, configChangeTrigger)
742
+	return bc
743
+}
744
+
745
+func mockImageStream2(tag string) *imageapi.ImageStream {
746
+	return &imageapi.ImageStream{
747
+		ObjectMeta: kapi.ObjectMeta{Name: "test-image-trigger-repo"},
748
+
749
+		Spec: imageapi.ImageStreamSpec{
750
+			DockerImageRepository: "registry:8080/openshift/test-image-trigger",
751
+			Tags: map[string]imageapi.TagReference{
752
+				tag: {
753
+					From: &kapi.ObjectReference{
754
+						Kind: "DockerImage",
755
+						Name: "registry:8080/openshift/test-image-trigger:" + tag,
756
+					},
757
+				},
758
+			},
759
+		},
760
+	}
761
+}
762
+
763
+func mockImageStreamMapping(stream, image, tag, reference string) *imageapi.ImageStreamMapping {
764
+	// create a mapping to an image that doesn't exist
765
+	return &imageapi.ImageStreamMapping{
766
+		ObjectMeta: kapi.ObjectMeta{Name: stream},
767
+		Tag:        tag,
768
+		Image: imageapi.Image{
769
+			ObjectMeta: kapi.ObjectMeta{
770
+				Name: image,
771
+			},
772
+			DockerImageReference: reference,
773
+		},
774
+	}
775
+}
776
+
777
+func imageChangeBuildConfig(name string, strategy buildapi.BuildStrategy) *buildapi.BuildConfig {
778
+	return &buildapi.BuildConfig{
779
+		ObjectMeta: kapi.ObjectMeta{
780
+			Name:      name,
781
+			Namespace: testutil.Namespace(),
782
+			Labels:    map[string]string{"testlabel": "testvalue"},
783
+		},
784
+		Spec: buildapi.BuildConfigSpec{
785
+
786
+			RunPolicy: buildapi.BuildRunPolicyParallel,
787
+			CommonSpec: buildapi.CommonSpec{
788
+				Source: buildapi.BuildSource{
789
+					Git: &buildapi.GitBuildSource{
790
+						URI: "git://github.com/openshift/ruby-hello-world.git",
791
+					},
792
+					ContextDir: "contextimage",
793
+				},
794
+				Strategy: strategy,
795
+				Output: buildapi.BuildOutput{
796
+					To: &kapi.ObjectReference{
797
+						Kind: "ImageStreamTag",
798
+						Name: "test-image-trigger-repo:outputtag",
799
+					},
800
+				},
801
+			},
802
+			Triggers: []buildapi.BuildTriggerPolicy{
803
+				{
804
+					Type:        buildapi.ImageChangeBuildTriggerType,
805
+					ImageChange: &buildapi.ImageChangeTrigger{},
806
+				},
807
+			},
808
+		},
809
+	}
810
+}
811
+
812
+func stiStrategy(kind, name string) buildapi.BuildStrategy {
813
+	return buildapi.BuildStrategy{
814
+		SourceStrategy: &buildapi.SourceBuildStrategy{
815
+			From: kapi.ObjectReference{
816
+				Kind: kind,
817
+				Name: name,
818
+			},
819
+		},
820
+	}
821
+}
0 822
new file mode 100644
... ...
@@ -0,0 +1,52 @@
0
+package builds
1
+
2
+import (
3
+	"os"
4
+
5
+	g "github.com/onsi/ginkgo"
6
+
7
+	"github.com/openshift/origin/test/common/build"
8
+	exutil "github.com/openshift/origin/test/extended/util"
9
+)
10
+
11
+var _ = g.Describe("[builds][Slow][Compatibility] builds controller", func() {
12
+	defer g.GinkgoRecover()
13
+	var (
14
+		oc = exutil.NewCLI("compat-build-controllers", exutil.KubeConfigPath())
15
+	)
16
+
17
+	g.JustBeforeEach(func() {
18
+		os.Setenv("OS_TEST_NAMESPACE", oc.Namespace())
19
+	})
20
+
21
+	g.Describe("RunBuildControllerTest", func() {
22
+		g.It("should succeed", func() {
23
+			build.RunBuildControllerTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
24
+		})
25
+	})
26
+	g.Describe("RunBuildPodControllerTest", func() {
27
+		g.It("should succeed", func() {
28
+			build.RunBuildPodControllerTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
29
+		})
30
+	})
31
+	g.Describe("RunImageChangeTriggerTest [SkipPrevControllers]", func() {
32
+		g.It("should succeed", func() {
33
+			build.RunImageChangeTriggerTest(g.GinkgoT(), oc.AdminREST())
34
+		})
35
+	})
36
+	g.Describe("RunBuildDeleteTest", func() {
37
+		g.It("should succeed", func() {
38
+			build.RunBuildDeleteTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
39
+		})
40
+	})
41
+	g.Describe("RunBuildRunningPodDeleteTest", func() {
42
+		g.It("should succeed", func() {
43
+			build.RunBuildRunningPodDeleteTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
44
+		})
45
+	})
46
+	g.Describe("RunBuildConfigChangeControllerTest", func() {
47
+		g.It("should succeed", func() {
48
+			build.RunBuildConfigChangeControllerTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
49
+		})
50
+	})
51
+})
0 52
new file mode 100755
... ...
@@ -0,0 +1,22 @@
0
+#!/bin/bash
1
+#
2
+# Runs extended compatibility tests with a previous version
3
+source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh"
4
+source "${OS_ROOT}/test/extended/setup.sh"
5
+
6
+# Previous version to test against
7
+PREVIOUS_VERSION="v1.3.0"
8
+
9
+export API_SERVER_VERSION="${RUN_PREVIOUS_API:+${PREVIOUS_VERSION}}"
10
+export CONTROLLER_VERSION="${RUN_PREVIOUS_CONTROLLER:+${PREVIOUS_VERSION}}"
11
+
12
+# For now, compatibility tests will not require a node
13
+# so tests can execute quicker
14
+export SKIP_NODE=1
15
+
16
+os::test::extended::setup
17
+os::test::extended::focus "$@"
18
+
19
+
20
+echo "[INFO] Running compatibility tests"
21
+FOCUS="\[Compatibility\]" SKIP="${SKIP_TESTS:-}" TEST_REPORT_FILE_NAME=compatibility os::test::extended::run -- -ginkgo.v -test.timeout 2h
... ...
@@ -22,19 +22,20 @@ ss=$(join '|' "${serial_exclude[@]}")
22 22
 
23 23
 
24 24
 # print the tests we are skipping
25
-echo "[INFO] The following tests are excluded:"
26
-TEST_REPORT_DIR= TEST_OUTPUT_QUIET=true ${EXTENDEDTEST} "--ginkgo.skip=${ss}" --ginkgo.dryRun --ginkgo.noColor | grep skip | cut -c 20- | sort
27
-echo
25
+os::log::info "The following tests are excluded:"
26
+SKIP_ONLY=1 PRINT_TESTS=1 os::test::extended::test_list "--ginkgo.skip=${ss}" 
27
+os::log::info ""
28 28
 
29 29
 exitstatus=0
30 30
 
31 31
 # run parallel tests
32 32
 nodes="${PARALLEL_NODES:-5}"
33
-echo "[INFO] Running parallel tests N=${nodes}"
34
-TEST_REPORT_FILE_NAME=core_parallel ${GINKGO} -v "-focus=${pf}" "-skip=${ps}" -p -nodes "${nodes}" ${EXTENDEDTEST} -- -ginkgo.v -test.timeout 6h || exitstatus=$?
33
+os::log::info "Running parallel tests N=${nodes}"
34
+FOCUS="${pf}" SKIP="${ps}" TEST_REPORT_FILE_NAME=core_parallel os::test::extended::run -p -nodes "${nodes}" -- ginkgo.v -test.timeout 6h || exitstatus=$?
35 35
 
36 36
 # run tests in serial
37
-echo "[INFO] Running serial tests"
38
-TEST_REPORT_FILE_NAME=core_serial ${GINKGO} -v "-focus=${sf}" "-skip=${ss}" ${EXTENDEDTEST} -- -ginkgo.v -test.timeout 2h || exitstatus=$?
37
+os::log::info ""
38
+os::log::info "Running serial tests"
39
+FOCUS="${sf}" SKIP="${ss}" TEST_REPORT_FILE_NAME=core_serial os::test::extended::run -- -ginkgo.v -test.timeout 2h || exitstatus=$?
39 40
 
40 41
 exit $exitstatus
41 42
new file mode 100755
... ...
@@ -0,0 +1,5 @@
0
+#!/bin/bash
1
+#
2
+# Runs compatibility tests with a previous controller and API server version
3
+RUN_PREVIOUS_CONTROLLER=1 RUN_PREVIOUS_API=1 SKIP_TESTS="\[SkipPrevAPIAndControllers\]" \
4
+	"$(dirname "${BASH_SOURCE}")/compatibility.sh"
0 5
new file mode 100755
... ...
@@ -0,0 +1,5 @@
0
+#!/bin/bash
1
+#
2
+# Runs compatibility tests with a previous controller version
3
+RUN_PREVIOUS_CONTROLLER=1 SKIP_TESTS="\[SkipPrevControllers\]" \
4
+	"$(dirname "${BASH_SOURCE}")/compatibility.sh"
... ...
@@ -4,226 +4,310 @@
4 4
 
5 5
 # If invoked with arguments, executes the test directly.
6 6
 function os::test::extended::focus {
7
-  if [[ $# -ne 0 ]]; then
8
-    echo "[INFO] Running custom: $*"
9
-    tests=$(TEST_REPORT_DIR= TEST_OUTPUT_QUIET=true ${EXTENDEDTEST} --ginkgo.dryRun --ginkgo.noColor "$@" | col -b | grep -v "35mskip0m" | grep "1mok0m" | wc -l)
10
-    if [[ "${tests}" -eq 0 ]]; then
11
-      echo "[ERROR] No tests would be run"
12
-      exit 1
13
-    fi
14
-    ${EXTENDEDTEST} "$@"
15
-    exit $?
16
-  fi
7
+	if [[ $# -ne 0 ]]; then
8
+		os::log::info "Running custom: $*"
9
+		os::test::extended::test_list "$@"
10
+		if [[ "${TEST_COUNT}" -eq 0 ]]; then
11
+			os::log::error "No tests would be run"
12
+			exit 1
13
+		fi
14
+		${EXTENDEDTEST} "$@"
15
+		exit $?
16
+	fi
17 17
 }
18 18
 
19 19
 # Launches an extended server for OpenShift
20 20
 # TODO: this should be doing less, because clusters should be stood up outside
21
-#   and then tests are executed.  Tests that depend on fine grained setup should
22
-#   be done in other contexts.
23
-function os::test::extended::setup {
24
-  # build binaries
25
-  if [[ -z $(os::build::find-binary ginkgo) ]]; then
26
-    hack/build-go.sh vendor/github.com/onsi/ginkgo/ginkgo
27
-  fi
28
-  if [[ -z $(os::build::find-binary extended.test) ]]; then
29
-    hack/build-go.sh test/extended/extended.test
30
-  fi
31
-  if [[ -z $(os::build::find-binary openshift) ]]; then
32
-    hack/build-go.sh
33
-  fi
34
-
35
-  os::util::environment::setup_time_vars
36
-
37
-  # ensure proper relative directories are set
38
-  export GINKGO="$(os::build::find-binary ginkgo)"
39
-  export EXTENDEDTEST="$(os::build::find-binary extended.test)"
40
-  export TMPDIR=${BASETMPDIR:-/tmp}
41
-  export EXTENDED_TEST_PATH="$(pwd)/test/extended"
42
-  export KUBE_REPO_ROOT="$(pwd)/vendor/k8s.io/kubernetes"
43
-
44
-  # output tests instead of running
45
-  if [[ -n "${SHOW_ALL:-}" ]]; then
46
-    TEST_OUTPUT_QUIET=true ${EXTENDEDTEST} --ginkgo.dryRun --ginkgo.noColor | grep ok | grep -v skip | cut -c 20- | sort
47
-    exit 0
48
-  fi
49
-
50
-  # allow setup to be skipped
51
-  if [[ -z "${TEST_ONLY+x}" ]]; then
52
-    ensure_iptables_or_die
53
-
54
-    function cleanup()
55
-    {
56
-      out=$?
57
-      cleanup_openshift
58
-      echo "[INFO] Exiting"
59
-      return $out
60
-    }
61
-
62
-    trap "exit" INT TERM
63
-    trap "cleanup" EXIT
64
-    echo "[INFO] Starting server"
65
-
66
-    os::util::environment::setup_all_server_vars "test-extended/core"
67
-    os::util::environment::use_sudo
68
-    os::util::environment::setup_images_vars
69
-    reset_tmp_dir
70
-
71
-    # If the current system has the XFS volume dir mount point we configure
72
-    # in the test images, assume to use it which will allow the local storage
73
-    # quota tests to pass.
74
-    if [ -d "/mnt/openshift-xfs-vol-dir" ]; then
75
-      export VOLUME_DIR="/mnt/openshift-xfs-vol-dir"
76
-    else
77
-      echo "[WARN] /mnt/openshift-xfs-vol-dir does not exist, local storage quota tests may fail."
78
-    fi
79
-
80
-    os::log::start_system_logger
81
-
82
-    # when selinux is enforcing, the volume dir selinux label needs to be
83
-    # svirt_sandbox_file_t
84
-    #
85
-    # TODO: fix the selinux policy to either allow openshift_var_lib_dir_t
86
-    # or to default the volume dir to svirt_sandbox_file_t.
87
-    if selinuxenabled; then
88
-          sudo chcon -t svirt_sandbox_file_t ${VOLUME_DIR}
89
-    fi
90
-    configure_os_server
91
-    #turn on audit logging for extended tests ... mimic what is done in util.sh configure_os_server, but don't
92
-    # put change there - only want this for extended tests
93
-    echo "[INFO] Turn on audit logging"
94
-    cp ${SERVER_CONFIG_DIR}/master/master-config.yaml ${SERVER_CONFIG_DIR}/master/master-config.orig2.yaml
95
-    openshift ex config patch ${SERVER_CONFIG_DIR}/master/master-config.orig2.yaml --patch="{\"auditConfig\": {\"enabled\": true}}"  > ${SERVER_CONFIG_DIR}/master/master-config.yaml
96
-
97
-    # Similar to above check, if the XFS volume dir mount point exists enable
98
-    # local storage quota in node-config.yaml so these tests can pass:
99
-    if [ -d "/mnt/openshift-xfs-vol-dir" ]; then
100
-	# The ec2 images usually have ~5Gi of space defined for the xfs vol for the registry; want to give /registry a good chunk of that
101
-	# to store the images created when the extended tests run
102
-      sed -i 's/perFSGroup: null/perFSGroup: 4480Mi/' $NODE_CONFIG_DIR/node-config.yaml
103
-    fi
104
-    echo "[INFO] Using VOLUME_DIR=${VOLUME_DIR}"
105
-
106
-    # This is a bit hacky, but set the pod gc threshold appropriately for the garbage_collector test.
107
-    os::util::sed 's/\(controllerArguments:\ \)null/\1\n    terminated-pod-gc-threshold: ["100"]/' \
108
-      ${MASTER_CONFIG_DIR}/master-config.yaml
109
-
110
-    start_os_server
111
-
112
-    export KUBECONFIG="${ADMIN_KUBECONFIG}"
113
-
114
-    install_registry
115
-    wait_for_registry
116
-    DROP_SYN_DURING_RESTART=1 CREATE_ROUTER_CERT=1 install_router
117
-
118
-    echo "[INFO] Creating image streams"
119
-    oc create -n openshift -f examples/image-streams/image-streams-centos7.json --config="${ADMIN_KUBECONFIG}"
120
-  else
121
-    # be sure to set VOLUME_DIR if you are running with TEST_ONLY
122
-    echo "[INFO] Not starting server, VOLUME_DIR=${VOLUME_DIR:-}"
123
-  fi
21
+#		and then tests are executed.	Tests that depend on fine grained setup should
22
+#		be done in other contexts.
23
+function os::test::extended::setup () {
24
+	# build binaries
25
+	if [[ -z "$(os::build::find-binary ginkgo)" ]]; then
26
+		hack/build-go.sh vendor/github.com/onsi/ginkgo/ginkgo
27
+	fi
28
+	if [[ -z "$(os::build::find-binary extended.test)" ]]; then
29
+		hack/build-go.sh test/extended/extended.test
30
+	fi
31
+	if [[ -z "$(os::build::find-binary openshift)" ]]; then
32
+		hack/build-go.sh
33
+	fi
34
+
35
+	os::util::environment::setup_time_vars
36
+
37
+	# ensure proper relative directories are set
38
+	GINKGO="$(os::build::find-binary ginkgo)"
39
+	EXTENDEDTEST="$(os::build::find-binary extended.test)"
40
+	export GINKGO
41
+	export EXTENDEDTEST
42
+	export EXTENDED_TEST_PATH="${OS_ROOT}/test/extended"
43
+	export KUBE_REPO_ROOT="${OS_ROOT}/vendor/k8s.io/kubernetes"
44
+
45
+	# allow setup to be skipped
46
+	if [[ -z "${TEST_ONLY+x}" ]]; then
47
+		ensure_iptables_or_die
48
+
49
+		function cleanup() {
50
+			out=$?
51
+			cleanup_openshift
52
+			os::log::info "Exiting"
53
+			return $out
54
+		}
55
+
56
+		trap "exit" INT TERM
57
+		trap "cleanup" EXIT
58
+		os::log::info "Starting server"
59
+
60
+		os::util::environment::setup_all_server_vars "test-extended/core"
61
+		os::util::environment::use_sudo
62
+		os::util::environment::setup_images_vars
63
+		reset_tmp_dir
64
+
65
+		local sudo=${USE_SUDO:+sudo}
66
+
67
+		# If the current system has the XFS volume dir mount point we configure
68
+		# in the test images, assume to use it which will allow the local storage
69
+		# quota tests to pass.
70
+		LOCAL_STORAGE_QUOTA=""
71
+		if [[ -d "/mnt/openshift-xfs-vol-dir" ]] && ${sudo} lvs | grep -q "xfs"; then
72
+			LOCAL_STORAGE_QUOTA="1"
73
+			export VOLUME_DIR="/mnt/openshift-xfs-vol-dir"
74
+		else
75
+			os::log::warn "/mnt/openshift-xfs-vol-dir does not exist, local storage quota tests may fail."
76
+		fi
77
+
78
+		os::log::start_system_logger
79
+
80
+		
81
+		if [[ -n "${SHOW_ALL:-}" ]]; then
82
+			SKIP_NODE=1
83
+		fi
84
+
85
+		# when selinux is enforcing, the volume dir selinux label needs to be
86
+		# svirt_sandbox_file_t
87
+		#
88
+		# TODO: fix the selinux policy to either allow openshift_var_lib_dir_t
89
+		# or to default the volume dir to svirt_sandbox_file_t.
90
+		if selinuxenabled; then
91
+			${sudo} chcon -t svirt_sandbox_file_t ${VOLUME_DIR}
92
+		fi
93
+		CONFIG_VERSION=""
94
+		if [[ -n "${API_SERVER_VERSION:-}" ]]; then
95
+			CONFIG_VERSION="${API_SERVER_VERSION}"
96
+		elif [[ -n "${CONTROLLER_VERSION:-}" ]]; then
97
+			CONFIG_VERSION="${CONTROLLER_VERSION}"
98
+		fi
99
+		os::start::configure_server "${CONFIG_VERSION}"
100
+		#turn on audit logging for extended tests ... mimic what is done in util.sh configure_os_server, but don't
101
+		# put change there - only want this for extended tests
102
+		os::log::info "Turn on audit logging"
103
+		cp "${SERVER_CONFIG_DIR}/master/master-config.yaml" "${SERVER_CONFIG_DIR}/master/master-config.orig2.yaml"
104
+		openshift ex config patch "${SERVER_CONFIG_DIR}/master/master-config.orig2.yaml" --patch="{\"auditConfig\": {\"enabled\": true}}"  > "${SERVER_CONFIG_DIR}/master/master-config.yaml"
105
+
106
+		# If the XFS volume dir mount point exists enable local storage quota in node-config.yaml so these tests can pass:
107
+		if [[ -n "${LOCAL_STORAGE_QUOTA}" ]]; then
108
+			# The ec2 images usually have ~5Gi of space defined for the xfs vol for the registry; want to give /registry a good chunk of that
109
+			# to store the images created when the extended tests run
110
+			cp "${NODE_CONFIG_DIR}/node-config.yaml" "${NODE_CONFIG_DIR}/node-config.orig2.yaml"
111
+			openshift ex config patch "${NODE_CONFIG_DIR}/node-config.orig2.yaml" --patch='{"volumeConfig":{"localQuota":{"perFSGroup":"4480Mi"}}}' > "${NODE_CONFIG_DIR}/node-config.yaml"
112
+		fi
113
+		os::log::info "Using VOLUME_DIR=${VOLUME_DIR}"
114
+
115
+		# This is a bit hacky, but set the pod gc threshold appropriately for the garbage_collector test.
116
+		cp "${SERVER_CONFIG_DIR}/master/master-config.yaml" "${SERVER_CONFIG_DIR}/master/master-config.orig3.yaml"
117
+		openshift ex config patch "${SERVER_CONFIG_DIR}/master/master-config.orig3.yaml" --patch='{"kubernetesMasterConfig":{"controllerArguments":{"terminated-pod-gc-threshold":["100"]}}}' > "${SERVER_CONFIG_DIR}/master/master-config.yaml"
118
+
119
+		os::start::server "${API_SERVER_VERSION:-}" "${CONTROLLER_VERSION:-}" "${SKIP_NODE:-}"
120
+
121
+		export KUBECONFIG="${ADMIN_KUBECONFIG}"
122
+
123
+		install_registry
124
+		if [[ -z "${SKIP_NODE:-}" ]]; then
125
+			wait_for_registry
126
+		fi
127
+		DROP_SYN_DURING_RESTART=1 CREATE_ROUTER_CERT=1 install_router
128
+
129
+		os::log::info "Creating image streams"
130
+		oc create -n openshift -f "${OS_ROOT}/examples/image-streams/image-streams-centos7.json" --config="${ADMIN_KUBECONFIG}"
131
+	else
132
+		# be sure to set VOLUME_DIR if you are running with TEST_ONLY
133
+		os::log::info "Not starting server, VOLUME_DIR=${VOLUME_DIR:-}"
134
+	fi
124 135
 }
125 136
 
137
+# Run extended tests or print out a list of tests that need to be run
138
+# Input:
139
+# - FOCUS - the extended test focus
140
+# - SKIP - the tests to skip
141
+# - SHOW_ALL - if set, then only print out tests to be run
142
+# - Arguments - arguments to pass to ginkgo
143
+function os::test::extended::run () {
144
+        local listArgs=()
145
+        local runArgs=()
146
+        if [[ -n "${FOCUS:-}" ]]; then
147
+          listArgs+=("--ginkgo.focus=${FOCUS}")
148
+          runArgs+=("-focus=${FOCUS}")
149
+        fi
150
+        if [[ -n "${SKIP:-}" ]]; then
151
+          listArgs+=("--ginkgo.skip=${SKIP}")
152
+          runArgs+=("-skip=${SKIP}")
153
+        fi
154
+
155
+	if [[ -n "${SHOW_ALL:-}" ]]; then
156
+		PRINT_TESTS=1
157
+		os::test::extended::test_list "${listArgs[@]}"
158
+		return
159
+	fi
160
+
161
+	os::test::extended::test_list "${listArgs[@]}"
162
+
163
+	if [[ "${TEST_COUNT}" -eq 0 ]]; then
164
+		os::log::warn "No tests were selected"
165
+		return
166
+	fi
167
+
168
+	"${GINKGO}" -v "${runArgs[@]}" "${EXTENDEDTEST}" "$@"
169
+}
170
+
171
+# Create a list of extended tests to be run with the given arguments
172
+# Input:
173
+# - Arguments to pass to ginkgo
174
+# - SKIP_ONLY - If set, only selects tests to be skipped
175
+# - PRINT_TESTS - If set, print the list of tests
176
+# Output:
177
+# - TEST_COUNT - the number of tests selected by the arguments
178
+function os::test::extended::test_list () {
179
+	local full_test_list=()
180
+	local selected_tests=()
181
+
182
+	while IFS= read -r; do
183
+		full_test_list+=( "${REPLY}" )
184
+	done < <(TEST_OUTPUT_QUIET=true "${EXTENDEDTEST}" "$@" --ginkgo.dryRun --ginkgo.noColor )
185
+	if [[ "{$REPLY}" ]]; then lines+=( "$REPLY" ); fi
186
+
187
+	for test in "${full_test_list[@]}"; do
188
+		if [[ -n "${SKIP_ONLY:-}" ]]; then
189
+			if grep -q "35mskip" <<< "${test}"; then
190
+				selected_tests+=( "${test}" )
191
+			fi
192
+		else
193
+			if grep -q "1mok" <<< "${test}"; then
194
+				selected_tests+=( "${test}" )
195
+			fi
196
+		fi
197
+	done
198
+	if [[ -n "${PRINT_TESTS:-}" ]]; then
199
+		if [[ ${#selected_tests[@]} -eq 0 ]]; then
200
+			os::log::warn "No tests were selected"
201
+		else
202
+			printf '%s\n' "${selected_tests[@]}" | sort
203
+		fi
204
+	fi
205
+	export TEST_COUNT=${#selected_tests[@]}
206
+}
207
+readonly -f os::test::extended::test_list
208
+
126 209
 # Not run by any suite
127 210
 readonly EXCLUDED_TESTS=(
128
-  "\[Skipped\]"
129
-  "\[Disruptive\]"
130
-  "\[Slow\]"
131
-  "\[Flaky\]"
132
-
133
-  "\[Feature:Performance\]"
134
-
135
-  # Depends on external components, may not need yet
136
-  Monitoring              # Not installed, should be
137
-  "Cluster level logging" # Not installed yet
138
-  Kibana                  # Not installed
139
-  Ubernetes               # Can't set zone labels today
140
-  kube-ui                 # Not installed by default
141
-  "^Kubernetes Dashboard"  # Not installed by default (also probbaly slow image pull)
142
-
143
-  "\[Feature:Federation\]"   # Not enabled yet
144
-  "\[Feature:PodAffinity\]"  # Not enabled yet
145
-  Ingress                    # Not enabled yet
146
-  "Cinder"                   # requires an OpenStack cluster
147
-  "should support r/w"       # hostPath: This test expects that host's tmp dir is WRITABLE by a container.  That isn't something we need to gaurantee for openshift.
148
-  "should check that the kubernetes-dashboard instance is alive" # we don't create this
149
-  "\[Feature:ManualPerformance\]" # requires /resetMetrics which we don't expose
150
-
151
-  # See the CanSupport implementation in upstream to determine wether these work.
152
-  "Ceph RBD"      # Works if ceph-common Binary installed (but we can't gaurantee this on all clusters).
153
-  "GlusterFS" # May work if /sbin/mount.glusterfs to be installed for plugin to work (also possibly blocked by serial pulling)
154
-  "should support r/w" # hostPath: This test expects that host's tmp dir is WRITABLE by a container.  That isn't something we need to guarantee for openshift.
155
-
156
-  "should allow starting 95 pods per node" # needs cherry-pick of https://github.com/kubernetes/kubernetes/pull/23945
157
-
158
-  # Need fixing
159
-  "Horizontal pod autoscaling" # needs heapster
160
-  "should provide Internet connection for containers" # Needs recursive DNS
161
-  PersistentVolume           # https://github.com/openshift/origin/pull/6884 for recycler
162
-  "mount an API token into pods" # We add 6 secrets, not 1
163
-  "ServiceAccounts should ensure a single API token exists" # We create lots of secrets
164
-  "Networking should function for intra-pod" # Needs two nodes, add equiv test for 1 node, then use networking suite
165
-  "should test kube-proxy"     # needs 2 nodes
166
-  "authentication: OpenLDAP"   # needs separate setup and bucketing for openldap bootstrapping
167
-  "should support exec through an HTTP proxy" # doesn't work because it requires a) static binary b) linux c) kubectl, https://github.com/openshift/origin/issues/7097
168
-  "NFS"                      # no permissions https://github.com/openshift/origin/pull/6884
169
-  "\[Feature:Example\]"      # may need to pre-pull images
170
-  "ResourceQuota and capture the life of a secret" # https://github.com/openshift/origin/issue/9414
171
-  "NodeProblemDetector"        # requires a non-master node to run on
172
-  "unchanging, static URL paths for kubernetes api services" # the test needs to exclude URLs that are not part of conformance (/logs)
173
-
174
-  # Needs triage to determine why it is failing
175
-  "Addon update"          # TRIAGE
176
-  SSH                     # TRIAGE
177
-  "\[Feature:Upgrade\]"   # TRIAGE
178
-  "SELinux relabeling"    # started failing
179
-  "schedule jobs on pod slaves use of jenkins with kubernetes plugin by creating slave from existing builder and adding it to Jenkins master" # https://github.com/openshift/origin/issues/7619
180
-  "openshift mongodb replication creating from a template" # flaking on deployment
181
-  "Update Demo should do a rolling update of a replication controller" # this is flaky and needs triaging
182
-
183
-  # Test will never work
184
-  "should proxy to cadvisor" # we don't expose cAdvisor port directly for security reasons
185
-
186
-  # Need to relax security restrictions
187
-  "validates that InterPod Affinity and AntiAffinity is respected if matching" # this *may* now be safe
188
-
189
-  # Need multiple nodes
190
-  "validates that InterPodAntiAffinity is respected if matching 2"
191
-
192
-  # Inordinately slow tests
193
-  "should create and stop a working application"
194
-  "should always delete fast" # will be uncommented in etcd3
211
+	"\[Skipped\]"
212
+	"\[Disruptive\]"
213
+	"\[Slow\]"
214
+	"\[Flaky\]"
215
+	"\[Compatibility\]"
216
+
217
+	"\[Feature:Performance\]"
218
+
219
+	# Depends on external components, may not need yet
220
+	Monitoring              # Not installed, should be
221
+	"Cluster level logging" # Not installed yet
222
+	Kibana                  # Not installed
223
+	Ubernetes               # Can't set zone labels today
224
+	kube-ui                 # Not installed by default
225
+	"^Kubernetes Dashboard"  # Not installed by default (also probbaly slow image pull)
226
+
227
+	"\[Feature:Federation\]"   # Not enabled yet
228
+	"\[Feature:PodAffinity\]"  # Not enabled yet
229
+	Ingress                    # Not enabled yet
230
+	"Cinder"                   # requires an OpenStack cluster
231
+	"should support r/w"       # hostPath: This test expects that host's tmp dir is WRITABLE by a container.  That isn't something we need to gaurantee for openshift.
232
+	"should check that the kubernetes-dashboard instance is alive" # we don't create this
233
+	"\[Feature:ManualPerformance\]" # requires /resetMetrics which we don't expose
234
+
235
+	# See the CanSupport implementation in upstream to determine wether these work.
236
+	"Ceph RBD"      # Works if ceph-common Binary installed (but we can't gaurantee this on all clusters).
237
+	"GlusterFS" # May work if /sbin/mount.glusterfs to be installed for plugin to work (also possibly blocked by serial pulling)
238
+	"should support r/w" # hostPath: This test expects that host's tmp dir is WRITABLE by a container.  That isn't something we need to guarantee for openshift.
239
+
240
+	"should allow starting 95 pods per node" # needs cherry-pick of https://github.com/kubernetes/kubernetes/pull/23945
241
+
242
+	# Need fixing
243
+	"Horizontal pod autoscaling" # needs heapster
244
+	"should provide Internet connection for containers" # Needs recursive DNS
245
+	PersistentVolume           # https://github.com/openshift/origin/pull/6884 for recycler
246
+	"mount an API token into pods" # We add 6 secrets, not 1
247
+	"ServiceAccounts should ensure a single API token exists" # We create lots of secrets
248
+	"Networking should function for intra-pod" # Needs two nodes, add equiv test for 1 node, then use networking suite
249
+	"should test kube-proxy"     # needs 2 nodes
250
+	"authentication: OpenLDAP"   # needs separate setup and bucketing for openldap bootstrapping
251
+	"should support exec through an HTTP proxy" # doesn't work because it requires a) static binary b) linux c) kubectl, https://github.com/openshift/origin/issues/7097
252
+	"NFS"                      # no permissions https://github.com/openshift/origin/pull/6884
253
+	"\[Feature:Example\]"      # may need to pre-pull images
254
+	"ResourceQuota and capture the life of a secret" # https://github.com/openshift/origin/issue/9414
255
+	"NodeProblemDetector"        # requires a non-master node to run on
256
+	"unchanging, static URL paths for kubernetes api services" # the test needs to exclude URLs that are not part of conformance (/logs)
257
+
258
+	# Needs triage to determine why it is failing
259
+	"Addon update"          # TRIAGE
260
+	SSH                     # TRIAGE
261
+	"\[Feature:Upgrade\]"   # TRIAGE
262
+	"SELinux relabeling"    # started failing
263
+	"schedule jobs on pod slaves use of jenkins with kubernetes plugin by creating slave from existing builder and adding it to Jenkins master" # https://github.com/openshift/origin/issues/7619
264
+	"openshift mongodb replication creating from a template" # flaking on deployment
265
+	"Update Demo should do a rolling update of a replication controller" # this is flaky and needs triaging
266
+
267
+	# Test will never work
268
+	"should proxy to cadvisor" # we don't expose cAdvisor port directly for security reasons
269
+
270
+	# Need to relax security restrictions
271
+	"validates that InterPod Affinity and AntiAffinity is respected if matching" # this *may* now be safe
272
+
273
+	# Need multiple nodes
274
+	"validates that InterPodAntiAffinity is respected if matching 2"
275
+
276
+	# Inordinately slow tests
277
+	"should create and stop a working application"
278
+	"should always delete fast" # will be uncommented in etcd3
195 279
 )
196 280
 
197 281
 readonly SERIAL_TESTS=(
198
-  "\[Serial\]"
199
-  "\[Feature:ManualPerformance\]" # requires isolation
200
-  "Service endpoints latency" # requires low latency
201
-  "\[Feature:HighDensityPerformance\]" # requires no other namespaces
282
+	"\[Serial\]"
283
+	"\[Feature:ManualPerformance\]" # requires isolation
284
+	"Service endpoints latency" # requires low latency
285
+	"\[Feature:HighDensityPerformance\]" # requires no other namespaces
202 286
 )
203 287
 
204 288
 readonly CONFORMANCE_TESTS=(
205
-  "\[Conformance\]"
206
-
207
-  "Services.*NodePort"
208
-  "ResourceQuota should"
209
-  "\[networking\] basic openshift networking"
210
-  "\[networking\]\[router\]"
211
-  "Ensure supplemental groups propagate to docker"
212
-  "EmptyDir"
213
-  "PetSet"
214
-  "PrivilegedPod should test privileged pod"
215
-  "Pods should support remote command execution"
216
-  "Pods should support retrieving logs from the container"
217
-  "Kubectl client Simple pod should support"
218
-  "Job should run a job to completion when tasks succeed"
219
-  "\[images\]\[mongodb\] openshift mongodb replication"
220
-  "\[job\] openshift can execute jobs controller"
221
-  "\[volumes\] Test local storage quota FSGroup"
222
-  "test deployment should run a deployment to completion"
223
-  "Variable Expansion"
224
-  "init containers"
225
-  "Clean up pods on node kubelet"
226
-  "\[Feature\:SecurityContext\]"
227
-  "should create a LimitRange with defaults"
228
-  "Generated release_1_2 clientset"
289
+	"\[Conformance\]"
290
+
291
+	"Services.*NodePort"
292
+	"ResourceQuota should"
293
+	"\[networking\] basic openshift networking"
294
+	"\[networking\]\[router\]"
295
+	"Ensure supplemental groups propagate to docker"
296
+	"EmptyDir"
297
+	"PetSet"
298
+	"PrivilegedPod should test privileged pod"
299
+	"Pods should support remote command execution"
300
+	"Pods should support retrieving logs from the container"
301
+	"Kubectl client Simple pod should support"
302
+	"Job should run a job to completion when tasks succeed"
303
+	"\[images\]\[mongodb\] openshift mongodb replication"
304
+	"\[job\] openshift can execute jobs controller"
305
+	"\[volumes\] Test local storage quota FSGroup"
306
+	"test deployment should run a deployment to completion"
307
+	"Variable Expansion"
308
+	"init containers"
309
+	"Clean up pods on node kubelet"
310
+	"\[Feature\:SecurityContext\]"
311
+	"should create a LimitRange with defaults"
312
+	"Generated release_1_2 clientset"
229 313
 )
... ...
@@ -1,41 +1,19 @@
1 1
 package integration
2 2
 
3 3
 import (
4
-	"fmt"
5
-	"sync/atomic"
6 4
 	"testing"
7
-	"time"
8 5
 
9 6
 	kapi "k8s.io/kubernetes/pkg/api"
10 7
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
-	"k8s.io/kubernetes/pkg/fields"
12
-	watchapi "k8s.io/kubernetes/pkg/watch"
13 8
 
14
-	buildapi "github.com/openshift/origin/pkg/build/api"
15 9
 	"github.com/openshift/origin/pkg/client"
16 10
 	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
17 11
 	"github.com/openshift/origin/pkg/cmd/server/origin"
18
-	imageapi "github.com/openshift/origin/pkg/image/api"
12
+	"github.com/openshift/origin/test/common/build"
19 13
 	testutil "github.com/openshift/origin/test/util"
20 14
 	testserver "github.com/openshift/origin/test/util/server"
21 15
 )
22 16
 
23
-var (
24
-	//TODO: Make these externally configurable
25
-
26
-	// ConcurrentBuildControllersTestWait is the time that TestConcurrentBuildControllers waits
27
-	// for any other changes to happen when testing whether only a single build got processed
28
-	ConcurrentBuildControllersTestWait = 10 * time.Second
29
-
30
-	// ConcurrentBuildPodControllersTestWait is the time that TestConcurrentBuildPodControllers waits
31
-	// after a state transition to make sure other state transitions don't occur unexpectedly
32
-	ConcurrentBuildPodControllersTestWait = 10 * time.Second
33
-
34
-	// BuildControllersWatchTimeout is used by all tests to wait for watch events. In case where only
35
-	// a single watch event is expected, the test will fail after the timeout.
36
-	BuildControllersWatchTimeout = 60 * time.Second
37
-)
38
-
39 17
 type controllerCount struct {
40 18
 	BuildControllers,
41 19
 	BuildPodControllers,
... ...
@@ -43,119 +21,13 @@ type controllerCount struct {
43 43
 	ConfigChangeControllers int
44 44
 }
45 45
 
46
-func mockBuild() *buildapi.Build {
47
-	return &buildapi.Build{
48
-		ObjectMeta: kapi.ObjectMeta{
49
-			GenerateName: "mock-build",
50
-			Labels: map[string]string{
51
-				"label1":                     "value1",
52
-				"label2":                     "value2",
53
-				buildapi.BuildConfigLabel:    "mock-build-config",
54
-				buildapi.BuildRunPolicyLabel: string(buildapi.BuildRunPolicyParallel),
55
-			},
56
-		},
57
-		Spec: buildapi.BuildSpec{
58
-			CommonSpec: buildapi.CommonSpec{
59
-				Source: buildapi.BuildSource{
60
-					Git: &buildapi.GitBuildSource{
61
-						URI: "http://my.docker/build",
62
-					},
63
-					ContextDir: "context",
64
-				},
65
-				Strategy: buildapi.BuildStrategy{
66
-					DockerStrategy: &buildapi.DockerBuildStrategy{},
67
-				},
68
-				Output: buildapi.BuildOutput{
69
-					To: &kapi.ObjectReference{
70
-						Kind: "DockerImage",
71
-						Name: "namespace/builtimage",
72
-					},
73
-				},
74
-			},
75
-		},
76
-	}
77
-}
78
-
79 46
 // TestConcurrentBuildControllers tests the transition of a build from new to pending. Ensures that only a single New -> Pending
80 47
 // transition happens and that only a single pod is created during a set period of time.
81 48
 func TestConcurrentBuildControllers(t *testing.T) {
82 49
 	defer testutil.DumpEtcdOnFailure(t)
83 50
 	// Start a master with multiple BuildControllers
84 51
 	osClient, kClient := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)
85
-
86
-	// Setup an error channel
87
-	errChan := make(chan error) // go routines will send a message on this channel if an error occurs. Once this happens the test is over
88
-
89
-	// Create a build
90
-	ns := testutil.Namespace()
91
-	b, err := osClient.Builds(ns).Create(mockBuild())
92
-	if err != nil {
93
-		t.Fatal(err)
94
-	}
95
-
96
-	// Start watching builds for New -> Pending transition
97
-	buildWatch, err := osClient.Builds(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", b.Name), ResourceVersion: b.ResourceVersion})
98
-	if err != nil {
99
-		t.Fatal(err)
100
-	}
101
-	defer buildWatch.Stop()
102
-	buildModifiedCount := int32(0)
103
-	go func() {
104
-		for e := range buildWatch.ResultChan() {
105
-			if e.Type != watchapi.Modified {
106
-				errChan <- fmt.Errorf("received an unexpected event of type: %s with object: %#v", e.Type, e.Object)
107
-			}
108
-			build, ok := e.Object.(*buildapi.Build)
109
-			if !ok {
110
-				errChan <- fmt.Errorf("received something other than build: %#v", e.Object)
111
-				break
112
-			}
113
-			// If unexpected status, throw error
114
-			if build.Status.Phase != buildapi.BuildPhasePending && build.Status.Phase != buildapi.BuildPhaseNew {
115
-				errChan <- fmt.Errorf("received unexpected build status: %s", build.Status.Phase)
116
-				break
117
-			}
118
-			atomic.AddInt32(&buildModifiedCount, 1)
119
-		}
120
-	}()
121
-
122
-	// Watch build pods as they are created
123
-	podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))})
124
-	if err != nil {
125
-		t.Fatal(err)
126
-	}
127
-	defer podWatch.Stop()
128
-	podAddedCount := int32(0)
129
-	go func() {
130
-		for e := range podWatch.ResultChan() {
131
-			// Look for creation events
132
-			if e.Type == watchapi.Added {
133
-				atomic.AddInt32(&podAddedCount, 1)
134
-			}
135
-		}
136
-	}()
137
-
138
-	select {
139
-	case err := <-errChan:
140
-		t.Errorf("Error: %v", err)
141
-	case <-time.After(ConcurrentBuildControllersTestWait):
142
-		if atomic.LoadInt32(&buildModifiedCount) < 1 {
143
-			t.Errorf("The build was modified an unexpected number of times. Got: %d, Expected: >= 1", buildModifiedCount)
144
-		}
145
-		if atomic.LoadInt32(&podAddedCount) != 1 {
146
-			t.Errorf("The build pod was created an unexpected number of times. Got: %d, Expected: 1", podAddedCount)
147
-		}
148
-	}
149
-}
150
-
151
-type buildControllerPodState struct {
152
-	PodPhase   kapi.PodPhase
153
-	BuildPhase buildapi.BuildPhase
154
-}
155
-
156
-type buildControllerPodTest struct {
157
-	Name   string
158
-	States []buildControllerPodState
52
+	build.RunBuildControllerTest(t, osClient, kClient)
159 53
 }
160 54
 
161 55
 // TestConcurrentBuildPodControllers tests the lifecycle of a build pod when running multiple controllers.
... ...
@@ -163,187 +35,38 @@ func TestConcurrentBuildPodControllers(t *testing.T) {
163 163
 	defer testutil.DumpEtcdOnFailure(t)
164 164
 	// Start a master with multiple BuildPodControllers
165 165
 	osClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t)
166
-
167
-	ns := testutil.Namespace()
168
-	waitTime := ConcurrentBuildPodControllersTestWait
169
-
170
-	tests := []buildControllerPodTest{
171
-		{
172
-			Name: "running state test",
173
-			States: []buildControllerPodState{
174
-				{
175
-					PodPhase:   kapi.PodRunning,
176
-					BuildPhase: buildapi.BuildPhaseRunning,
177
-				},
178
-			},
179
-		},
180
-		{
181
-			Name: "build succeeded",
182
-			States: []buildControllerPodState{
183
-				{
184
-					PodPhase:   kapi.PodRunning,
185
-					BuildPhase: buildapi.BuildPhaseRunning,
186
-				},
187
-				{
188
-					PodPhase:   kapi.PodSucceeded,
189
-					BuildPhase: buildapi.BuildPhaseComplete,
190
-				},
191
-			},
192
-		},
193
-		{
194
-			Name: "build failed",
195
-			States: []buildControllerPodState{
196
-				{
197
-					PodPhase:   kapi.PodRunning,
198
-					BuildPhase: buildapi.BuildPhaseRunning,
199
-				},
200
-				{
201
-					PodPhase:   kapi.PodFailed,
202
-					BuildPhase: buildapi.BuildPhaseFailed,
203
-				},
204
-			},
205
-		},
206
-	}
207
-	for _, test := range tests {
208
-		// Setup communications channels
209
-		podReadyChan := make(chan *kapi.Pod) // Will receive a value when a build pod is ready
210
-		errChan := make(chan error)          // Will receive a value when an error occurs
211
-		stateReached := int32(0)
212
-
213
-		// Create a build
214
-		b, err := osClient.Builds(ns).Create(mockBuild())
215
-		if err != nil {
216
-			t.Fatal(err)
217
-		}
218
-
219
-		// Watch build pod for transition to pending
220
-		podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))})
221
-		if err != nil {
222
-			t.Fatal(err)
223
-		}
224
-		go func() {
225
-			for e := range podWatch.ResultChan() {
226
-				pod, ok := e.Object.(*kapi.Pod)
227
-				if !ok {
228
-					t.Fatalf("%s: unexpected object received: %#v\n", test.Name, e.Object)
229
-				}
230
-				if pod.Status.Phase == kapi.PodPending {
231
-					podReadyChan <- pod
232
-					break
233
-				}
234
-			}
235
-		}()
236
-
237
-		var pod *kapi.Pod
238
-		select {
239
-		case pod = <-podReadyChan:
240
-			if pod.Status.Phase != kapi.PodPending {
241
-				t.Errorf("Got wrong pod phase: %s", pod.Status.Phase)
242
-				podWatch.Stop()
243
-				continue
244
-			}
245
-
246
-		case <-time.After(BuildControllersWatchTimeout):
247
-			t.Errorf("Timed out waiting for build pod to be ready")
248
-			podWatch.Stop()
249
-			continue
250
-		}
251
-		podWatch.Stop()
252
-
253
-		for _, state := range test.States {
254
-			if err := kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
255
-				// Update pod state and verify that corresponding build state happens accordingly
256
-				pod, err := kClient.Pods(ns).Get(pod.Name)
257
-				if err != nil {
258
-					return err
259
-				}
260
-				if pod.Status.Phase == state.PodPhase {
261
-					return fmt.Errorf("another client altered the pod phase to %s: %#v", state.PodPhase, pod)
262
-				}
263
-				pod.Status.Phase = state.PodPhase
264
-				_, err = kClient.Pods(ns).UpdateStatus(pod)
265
-				return err
266
-			}); err != nil {
267
-				t.Fatal(err)
268
-			}
269
-
270
-			buildWatch, err := osClient.Builds(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", b.Name), ResourceVersion: b.ResourceVersion})
271
-			if err != nil {
272
-				t.Fatal(err)
273
-			}
274
-			defer buildWatch.Stop()
275
-			go func() {
276
-				done := false
277
-				for e := range buildWatch.ResultChan() {
278
-					var ok bool
279
-					b, ok = e.Object.(*buildapi.Build)
280
-					if !ok {
281
-						errChan <- fmt.Errorf("%s: unexpected object received: %#v", test.Name, e.Object)
282
-					}
283
-					if e.Type != watchapi.Modified {
284
-						errChan <- fmt.Errorf("%s: unexpected event received: %s, object: %#v", test.Name, e.Type, e.Object)
285
-					}
286
-					if done {
287
-						errChan <- fmt.Errorf("%s: unexpected build state: %#v", test.Name, e.Object)
288
-					} else if b.Status.Phase == state.BuildPhase {
289
-						done = true
290
-						atomic.StoreInt32(&stateReached, 1)
291
-					}
292
-				}
293
-			}()
294
-
295
-			select {
296
-			case err := <-errChan:
297
-				buildWatch.Stop()
298
-				t.Errorf("%s: Error: %v\n", test.Name, err)
299
-				break
300
-			case <-time.After(waitTime):
301
-				buildWatch.Stop()
302
-				if atomic.LoadInt32(&stateReached) != 1 {
303
-					t.Errorf("%s: Did not reach desired build state: %s", test.Name, state.BuildPhase)
304
-					break
305
-				}
306
-			}
307
-		}
308
-	}
166
+	build.RunBuildPodControllerTest(t, osClient, kClient)
309 167
 }
310 168
 
311 169
 func TestConcurrentBuildImageChangeTriggerControllers(t *testing.T) {
312 170
 	defer testutil.DumpEtcdOnFailure(t)
313 171
 	// Start a master with multiple ImageChangeTrigger controllers
314 172
 	osClient, _ := setupBuildControllerTest(controllerCount{ImageChangeControllers: 5}, t)
315
-	tag := "latest"
316
-	streamName := "test-image-trigger-repo"
317
-
318
-	imageStream := mockImageStream2(tag)
319
-	imageStreamMapping := mockImageStreamMapping(imageStream.Name, "someimage", tag, "registry:8080/openshift/test-image-trigger:"+tag)
320
-	strategy := stiStrategy("ImageStreamTag", streamName+":"+tag)
321
-	config := imageChangeBuildConfig("sti-imagestreamtag", strategy)
322
-	runImageChangeTriggerTest(t, osClient, imageStream, imageStreamMapping, config, tag)
173
+	build.RunImageChangeTriggerTest(t, osClient)
323 174
 }
324 175
 
325 176
 func TestBuildDeleteController(t *testing.T) {
326 177
 	defer testutil.DumpEtcdOnFailure(t)
327 178
 	osClient, kClient := setupBuildControllerTest(controllerCount{}, t)
328
-	runBuildDeleteTest(t, osClient, kClient)
179
+	build.RunBuildDeleteTest(t, osClient, kClient)
329 180
 }
330 181
 
331 182
 func TestBuildRunningPodDeleteController(t *testing.T) {
332 183
 	defer testutil.DumpEtcdOnFailure(t)
333 184
 	osClient, kClient := setupBuildControllerTest(controllerCount{}, t)
334
-	runBuildRunningPodDeleteTest(t, osClient, kClient)
185
+	build.RunBuildRunningPodDeleteTest(t, osClient, kClient)
335 186
 }
336 187
 
337 188
 func TestBuildCompletePodDeleteController(t *testing.T) {
338 189
 	defer testutil.DumpEtcdOnFailure(t)
339 190
 	osClient, kClient := setupBuildControllerTest(controllerCount{}, t)
340
-	runBuildCompletePodDeleteTest(t, osClient, kClient)
191
+	build.RunBuildCompletePodDeleteTest(t, osClient, kClient)
341 192
 }
342 193
 
343 194
 func TestConcurrentBuildConfigControllers(t *testing.T) {
344 195
 	defer testutil.DumpEtcdOnFailure(t)
345 196
 	osClient, kClient := setupBuildControllerTest(controllerCount{ConfigChangeControllers: 5}, t)
346
-	runBuildConfigChangeControllerTest(t, osClient, kClient)
197
+	build.RunBuildConfigChangeControllerTest(t, osClient, kClient)
347 198
 }
348 199
 
349 200
 func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Client, *kclient.Client) {
... ...
@@ -396,437 +119,3 @@ func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Cli
396 396
 	}
397 397
 	return clusterAdminClient, clusterAdminKubeClient
398 398
 }
399
-
400
-func waitForWatch(t *testing.T, name string, w watchapi.Interface) *watchapi.Event {
401
-	select {
402
-	case e := <-w.ResultChan():
403
-		return &e
404
-	case <-time.After(BuildControllersWatchTimeout):
405
-		t.Fatalf("Timed out waiting for watch: %s", name)
406
-		return nil
407
-	}
408
-}
409
-
410
-func runImageChangeTriggerTest(t *testing.T, clusterAdminClient *client.Client, imageStream *imageapi.ImageStream, imageStreamMapping *imageapi.ImageStreamMapping, config *buildapi.BuildConfig, tag string) {
411
-	created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
412
-	if err != nil {
413
-		t.Fatalf("Couldn't create BuildConfig: %v", err)
414
-	}
415
-
416
-	watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
417
-	if err != nil {
418
-		t.Fatalf("Couldn't subscribe to Builds %v", err)
419
-	}
420
-
421
-	watch2, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
422
-	if err != nil {
423
-		t.Fatalf("Couldn't subscribe to BuildConfigs %v", err)
424
-	}
425
-	defer watch2.Stop()
426
-
427
-	imageStream, err = clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream)
428
-	if err != nil {
429
-		t.Fatalf("Couldn't create ImageStream: %v", err)
430
-	}
431
-
432
-	err = clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(imageStreamMapping)
433
-	if err != nil {
434
-		t.Fatalf("Couldn't create Image: %v", err)
435
-	}
436
-
437
-	// wait for initial build event from the creation of the imagerepo with tag latest
438
-	event := waitForWatch(t, "initial build added", watch)
439
-	if e, a := watchapi.Added, event.Type; e != a {
440
-		t.Fatalf("expected watch event type %s, got %s", e, a)
441
-	}
442
-	newBuild := event.Object.(*buildapi.Build)
443
-	strategy := newBuild.Spec.Strategy
444
-	switch {
445
-	case strategy.SourceStrategy != nil:
446
-		if strategy.SourceStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
447
-			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
448
-			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
449
-			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.SourceStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
450
-		}
451
-	case strategy.DockerStrategy != nil:
452
-		if strategy.DockerStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
453
-			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
454
-			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
455
-			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
456
-		}
457
-	case strategy.CustomStrategy != nil:
458
-		if strategy.CustomStrategy.From.Name != "registry:8080/openshift/test-image-trigger:"+tag {
459
-			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
460
-			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
461
-			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\ntrigger is %s\n", "registry:8080/openshift/test-image-trigger:"+tag, strategy.CustomStrategy.From.Name, i, bc.Spec.Triggers[0].ImageChange)
462
-		}
463
-	}
464
-	// Wait for an update on the specific build that was added
465
-	watch3, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", newBuild.Name), ResourceVersion: newBuild.ResourceVersion})
466
-	defer watch3.Stop()
467
-	if err != nil {
468
-		t.Fatalf("Couldn't subscribe to Builds %v", err)
469
-	}
470
-	event = waitForWatch(t, "initial build update", watch3)
471
-	if e, a := watchapi.Modified, event.Type; e != a {
472
-		t.Fatalf("expected watch event type %s, got %s", e, a)
473
-	}
474
-	newBuild = event.Object.(*buildapi.Build)
475
-	// Make sure the resolution of the build's docker image pushspec didn't mutate the persisted API object
476
-	if newBuild.Spec.Output.To.Name != "test-image-trigger-repo:outputtag" {
477
-		t.Fatalf("unexpected build output: %#v %#v", newBuild.Spec.Output.To, newBuild.Spec.Output)
478
-	}
479
-	if newBuild.Labels["testlabel"] != "testvalue" {
480
-		t.Fatalf("Expected build with label %s=%s from build config got %s=%s", "testlabel", "testvalue", "testlabel", newBuild.Labels["testlabel"])
481
-	}
482
-
483
-	// wait for build config to be updated
484
-WaitLoop:
485
-	for {
486
-		select {
487
-		case e := <-watch2.ResultChan():
488
-			event = &e
489
-			continue
490
-		case <-time.After(BuildControllersWatchTimeout):
491
-			break WaitLoop
492
-		}
493
-	}
494
-	updatedConfig := event.Object.(*buildapi.BuildConfig)
495
-	if err != nil {
496
-		t.Fatalf("Couldn't get BuildConfig: %v", err)
497
-	}
498
-	// the first tag did not have an image id, so the last trigger field is the pull spec
499
-	if updatedConfig.Spec.Triggers[0].ImageChange.LastTriggeredImageID != "registry:8080/openshift/test-image-trigger:"+tag {
500
-		t.Fatalf("Expected imageID equal to pull spec, got %#v", updatedConfig.Spec.Triggers[0].ImageChange)
501
-	}
502
-
503
-	// clear out the build/buildconfig watches before triggering a new build
504
-WaitLoop2:
505
-	for {
506
-		select {
507
-		case <-watch.ResultChan():
508
-			continue
509
-		case <-watch2.ResultChan():
510
-			continue
511
-		case <-time.After(BuildControllersWatchTimeout):
512
-			break WaitLoop2
513
-		}
514
-	}
515
-
516
-	// trigger a build by posting a new image
517
-	if err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(&imageapi.ImageStreamMapping{
518
-		ObjectMeta: kapi.ObjectMeta{
519
-			Namespace: testutil.Namespace(),
520
-			Name:      imageStream.Name,
521
-		},
522
-		Tag: tag,
523
-		Image: imageapi.Image{
524
-			ObjectMeta: kapi.ObjectMeta{
525
-				Name: "ref-2-random",
526
-			},
527
-			DockerImageReference: "registry:8080/openshift/test-image-trigger:ref-2-random",
528
-		},
529
-	}); err != nil {
530
-		t.Fatalf("unexpected error: %v", err)
531
-	}
532
-	event = waitForWatch(t, "second build created", watch)
533
-	if e, a := watchapi.Added, event.Type; e != a {
534
-		t.Fatalf("expected watch event type %s, got %s", e, a)
535
-	}
536
-	newBuild = event.Object.(*buildapi.Build)
537
-	strategy = newBuild.Spec.Strategy
538
-	switch {
539
-	case strategy.SourceStrategy != nil:
540
-		if strategy.SourceStrategy.From.Name != "registry:8080/openshift/test-image-trigger:ref-2-random" {
541
-			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
542
-			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
543
-			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\trigger is %s\n", "registry:8080/openshift/test-image-trigger:ref-2-random", strategy.SourceStrategy.From.Name, i, bc.Spec.Triggers[3].ImageChange)
544
-		}
545
-	case strategy.DockerStrategy != nil:
546
-		if strategy.DockerStrategy.From.Name != "registry:8080/openshift/test-image-trigger:ref-2-random" {
547
-			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
548
-			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
549
-			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\trigger is %s\n", "registry:8080/openshift/test-image-trigger:ref-2-random", strategy.DockerStrategy.From.Name, i, bc.Spec.Triggers[3].ImageChange)
550
-		}
551
-	case strategy.CustomStrategy != nil:
552
-		if strategy.CustomStrategy.From.Name != "registry:8080/openshift/test-image-trigger:ref-2-random" {
553
-			i, _ := clusterAdminClient.ImageStreams(testutil.Namespace()).Get(imageStream.Name)
554
-			bc, _ := clusterAdminClient.BuildConfigs(testutil.Namespace()).Get(config.Name)
555
-			t.Fatalf("Expected build with base image %s, got %s\n, imagerepo is %v\trigger is %s\n", "registry:8080/openshift/test-image-trigger:ref-2-random", strategy.CustomStrategy.From.Name, i, bc.Spec.Triggers[3].ImageChange)
556
-		}
557
-	}
558
-
559
-	// Listen to events on specific  build
560
-	watch4, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", newBuild.Name), ResourceVersion: newBuild.ResourceVersion})
561
-	defer watch4.Stop()
562
-
563
-	event = waitForWatch(t, "update on second build", watch4)
564
-	if e, a := watchapi.Modified, event.Type; e != a {
565
-		t.Fatalf("expected watch event type %s, got %s", e, a)
566
-	}
567
-	newBuild = event.Object.(*buildapi.Build)
568
-	// Make sure the resolution of the build's docker image pushspec didn't mutate the persisted API object
569
-	if newBuild.Spec.Output.To.Name != "test-image-trigger-repo:outputtag" {
570
-		t.Fatalf("unexpected build output: %#v %#v", newBuild.Spec.Output.To, newBuild.Spec.Output)
571
-	}
572
-	if newBuild.Labels["testlabel"] != "testvalue" {
573
-		t.Fatalf("Expected build with label %s=%s from build config got %s=%s", "testlabel", "testvalue", "testlabel", newBuild.Labels["testlabel"])
574
-	}
575
-
576
-WaitLoop3:
577
-	for {
578
-		select {
579
-		case e := <-watch2.ResultChan():
580
-			event = &e
581
-			continue
582
-		case <-time.After(BuildControllersWatchTimeout):
583
-			break WaitLoop3
584
-		}
585
-	}
586
-	updatedConfig = event.Object.(*buildapi.BuildConfig)
587
-	if e, a := "registry:8080/openshift/test-image-trigger:ref-2-random", updatedConfig.Spec.Triggers[0].ImageChange.LastTriggeredImageID; e != a {
588
-		t.Errorf("unexpected trigger id: expected %v, got %v", e, a)
589
-	}
590
-}
591
-
592
-func runBuildDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
593
-
594
-	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
595
-	if err != nil {
596
-		t.Fatalf("Couldn't subscribe to Builds %v", err)
597
-	}
598
-	defer buildWatch.Stop()
599
-
600
-	created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
601
-	if err != nil {
602
-		t.Fatalf("Couldn't create Build: %v", err)
603
-	}
604
-
605
-	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
606
-	if err != nil {
607
-		t.Fatalf("Couldn't subscribe to Pods %v", err)
608
-	}
609
-	defer podWatch.Stop()
610
-
611
-	// wait for initial build event from the creation of the imagerepo with tag latest
612
-	event := waitForWatch(t, "initial build added", buildWatch)
613
-	if e, a := watchapi.Added, event.Type; e != a {
614
-		t.Fatalf("expected watch event type %s, got %s", e, a)
615
-	}
616
-	newBuild := event.Object.(*buildapi.Build)
617
-
618
-	// initial pod creation for build
619
-	event = waitForWatch(t, "build pod created", podWatch)
620
-	if e, a := watchapi.Added, event.Type; e != a {
621
-		t.Fatalf("expected watch event type %s, got %s", e, a)
622
-	}
623
-
624
-	clusterAdminClient.Builds(testutil.Namespace()).Delete(newBuild.Name)
625
-
626
-	event = waitForWatchType(t, "pod deleted due to build deleted", podWatch, watchapi.Deleted)
627
-	if e, a := watchapi.Deleted, event.Type; e != a {
628
-		t.Fatalf("expected watch event type %s, got %s", e, a)
629
-	}
630
-	pod := event.Object.(*kapi.Pod)
631
-	if expected := buildapi.GetBuildPodName(newBuild); pod.Name != expected {
632
-		t.Fatalf("Expected pod %s to be deleted, but pod %s was deleted", expected, pod.Name)
633
-	}
634
-
635
-}
636
-
637
-// waitForWatchType tolerates receiving 3 events before failing while watching for a particular event
638
-// type.
639
-func waitForWatchType(t *testing.T, name string, w watchapi.Interface, expect watchapi.EventType) *watchapi.Event {
640
-	tries := 3
641
-	for i := 0; i < tries; i++ {
642
-		select {
643
-		case e := <-w.ResultChan():
644
-			if e.Type != expect {
645
-				continue
646
-			}
647
-			return &e
648
-		case <-time.After(BuildControllersWatchTimeout):
649
-			t.Fatalf("Timed out waiting for watch: %s", name)
650
-			return nil
651
-		}
652
-	}
653
-	t.Fatalf("Waited for a %v event with %d tries but never received one", expect, tries)
654
-	return nil
655
-}
656
-
657
-func runBuildRunningPodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
658
-
659
-	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
660
-	if err != nil {
661
-		t.Fatalf("Couldn't subscribe to Builds %v", err)
662
-	}
663
-	defer buildWatch.Stop()
664
-
665
-	created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
666
-	if err != nil {
667
-		t.Fatalf("Couldn't create Build: %v", err)
668
-	}
669
-
670
-	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
671
-	if err != nil {
672
-		t.Fatalf("Couldn't subscribe to Pods %v", err)
673
-	}
674
-	defer podWatch.Stop()
675
-
676
-	// wait for initial build event from the creation of the imagerepo with tag latest
677
-	event := waitForWatch(t, "initial build added", buildWatch)
678
-	if e, a := watchapi.Added, event.Type; e != a {
679
-		t.Fatalf("expected watch event type %s, got %s", e, a)
680
-	}
681
-	newBuild := event.Object.(*buildapi.Build)
682
-	buildName := newBuild.Name
683
-	podName := newBuild.Name + "-build"
684
-
685
-	// initial pod creation for build
686
-	for {
687
-		event = waitForWatch(t, "build pod created", podWatch)
688
-		newPod := event.Object.(*kapi.Pod)
689
-		if newPod.Name == podName {
690
-			break
691
-		}
692
-	}
693
-	if e, a := watchapi.Added, event.Type; e != a {
694
-		t.Fatalf("expected watch event type %s, got %s", e, a)
695
-	}
696
-
697
-	// throw away events from other builds, we only care about the new build
698
-	// we just triggered
699
-	for {
700
-		event = waitForWatch(t, "build updated to pending", buildWatch)
701
-		newBuild = event.Object.(*buildapi.Build)
702
-		if newBuild.Name == buildName {
703
-			break
704
-		}
705
-	}
706
-	if e, a := watchapi.Modified, event.Type; e != a {
707
-		t.Fatalf("expected watch event type %s, got %s", e, a)
708
-	}
709
-	if newBuild.Status.Phase != buildapi.BuildPhasePending {
710
-		t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
711
-	}
712
-
713
-	clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
714
-	event = waitForWatch(t, "build updated to error", buildWatch)
715
-	if e, a := watchapi.Modified, event.Type; e != a {
716
-		t.Fatalf("expected watch event type %s, got %s", e, a)
717
-	}
718
-	newBuild = event.Object.(*buildapi.Build)
719
-	if newBuild.Status.Phase != buildapi.BuildPhaseError {
720
-		t.Fatalf("expected build status to be marked error, but was marked %s", newBuild.Status.Phase)
721
-	}
722
-}
723
-
724
-func runBuildCompletePodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
725
-
726
-	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
727
-	if err != nil {
728
-		t.Fatalf("Couldn't subscribe to Builds %v", err)
729
-	}
730
-	defer buildWatch.Stop()
731
-
732
-	created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild())
733
-	if err != nil {
734
-		t.Fatalf("Couldn't create Build: %v", err)
735
-	}
736
-
737
-	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
738
-	if err != nil {
739
-		t.Fatalf("Couldn't subscribe to Pods %v", err)
740
-	}
741
-	defer podWatch.Stop()
742
-
743
-	// wait for initial build event from the creation of the imagerepo with tag latest
744
-	event := waitForWatch(t, "initial build added", buildWatch)
745
-	if e, a := watchapi.Added, event.Type; e != a {
746
-		t.Fatalf("expected watch event type %s, got %s", e, a)
747
-	}
748
-	newBuild := event.Object.(*buildapi.Build)
749
-
750
-	// initial pod creation for build
751
-	event = waitForWatch(t, "build pod created", podWatch)
752
-	if e, a := watchapi.Added, event.Type; e != a {
753
-		t.Fatalf("expected watch event type %s, got %s", e, a)
754
-	}
755
-
756
-	event = waitForWatch(t, "build updated to pending", buildWatch)
757
-	if e, a := watchapi.Modified, event.Type; e != a {
758
-		t.Fatalf("expected watch event type %s, got %s", e, a)
759
-	}
760
-
761
-	newBuild = event.Object.(*buildapi.Build)
762
-	if newBuild.Status.Phase != buildapi.BuildPhasePending {
763
-		t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
764
-	}
765
-
766
-	newBuild.Status.Phase = buildapi.BuildPhaseComplete
767
-	clusterAdminClient.Builds(testutil.Namespace()).Update(newBuild)
768
-	event = waitForWatch(t, "build updated to complete", buildWatch)
769
-	if e, a := watchapi.Modified, event.Type; e != a {
770
-		t.Fatalf("expected watch event type %s, got %s", e, a)
771
-	}
772
-	newBuild = event.Object.(*buildapi.Build)
773
-	if newBuild.Status.Phase != buildapi.BuildPhaseComplete {
774
-		t.Fatalf("expected build status to be marked complete, but was marked %s", newBuild.Status.Phase)
775
-	}
776
-
777
-	clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
778
-	time.Sleep(10 * time.Second)
779
-	newBuild, err = clusterAdminClient.Builds(testutil.Namespace()).Get(newBuild.Name)
780
-	if err != nil {
781
-		t.Fatalf("unexpected error %v", err)
782
-	}
783
-	if newBuild.Status.Phase != buildapi.BuildPhaseComplete {
784
-		t.Fatalf("build status was updated to %s after deleting pod, should have stayed as %s", newBuild.Status.Phase, buildapi.BuildPhaseComplete)
785
-	}
786
-}
787
-
788
-func runBuildConfigChangeControllerTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
789
-	config := configChangeBuildConfig()
790
-	created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
791
-	if err != nil {
792
-		t.Fatalf("Couldn't create BuildConfig: %v", err)
793
-	}
794
-
795
-	watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
796
-	if err != nil {
797
-		t.Fatalf("Couldn't subscribe to Builds %v", err)
798
-	}
799
-	defer watch.Stop()
800
-
801
-	watch2, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
802
-	if err != nil {
803
-		t.Fatalf("Couldn't subscribe to BuildConfigs %v", err)
804
-	}
805
-	defer watch2.Stop()
806
-
807
-	// wait for initial build event
808
-	event := waitForWatch(t, "config change initial build added", watch)
809
-	if e, a := watchapi.Added, event.Type; e != a {
810
-		t.Fatalf("expected watch event type %s, got %s", e, a)
811
-	}
812
-
813
-	event = waitForWatch(t, "config change config updated", watch2)
814
-	if e, a := watchapi.Modified, event.Type; e != a {
815
-		t.Fatalf("expected watch event type %s, got %s", e, a)
816
-	}
817
-	if bc := event.Object.(*buildapi.BuildConfig); bc.Status.LastVersion == 0 {
818
-		t.Fatalf("expected build config lastversion to be greater than zero after build")
819
-	}
820
-}
821
-
822
-func configChangeBuildConfig() *buildapi.BuildConfig {
823
-	bc := &buildapi.BuildConfig{}
824
-	bc.Name = "testcfgbc"
825
-	bc.Namespace = testutil.Namespace()
826
-	bc.Spec.Source.Git = &buildapi.GitBuildSource{}
827
-	bc.Spec.Source.Git.URI = "git://github.com/openshift/ruby-hello-world.git"
828
-	bc.Spec.Strategy.DockerStrategy = &buildapi.DockerBuildStrategy{}
829
-	configChangeTrigger := buildapi.BuildTriggerPolicy{Type: buildapi.ConfigChangeBuildTriggerType}
830
-	bc.Spec.Triggers = append(bc.Spec.Triggers, configChangeTrigger)
831
-	return bc
832
-}