| ... | ... |
@@ -180,6 +180,9 @@ For example, start the OpenShift server, create a "test" project, and then run |
| 180 | 180 |
$ oc new-project test |
| 181 | 181 |
$ test/cmd/newapp.sh |
| 182 | 182 |
|
| 183 |
+In order to run the suite, generate a jUnit XML report, and see a summary of the test suite, use: |
|
| 184 |
+ |
|
| 185 |
+ $ JUNIT_REPORT='true' hack/test-cmd.sh |
|
| 183 | 186 |
|
| 184 | 187 |
### End-to-End (e2e) and Extended Tests |
| 185 | 188 |
|
| ... | ... |
@@ -5,6 +5,7 @@ |
| 5 | 5 |
# We assume ${OS_ROOT} is set
|
| 6 | 6 |
source "${OS_ROOT}/hack/text.sh"
|
| 7 | 7 |
source "${OS_ROOT}/hack/util.sh"
|
| 8 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 8 | 9 |
|
| 9 | 10 |
# expect_success runs the cmd and expects an exit code of 0 |
| 10 | 11 |
function os::cmd::expect_success() {
|
| ... | ... |
@@ -146,6 +147,18 @@ os_cmd_internal_tmperr="${os_cmd_internal_tmpdir}/tmp_stderr.log"
|
| 146 | 146 |
# command to be tested is suppressed unless either `VERBOSE=1` or the test fails. This function bypasses |
| 147 | 147 |
# any error exiting settings or traps set by upstream callers by masking the return code of the command |
| 148 | 148 |
# with the return code of setting the result variable on failure. |
| 149 |
+# |
|
| 150 |
+# Globals: |
|
| 151 |
+# - JUNIT_REPORT_OUTPUT |
|
| 152 |
+# - VERBOSE |
|
| 153 |
+# Arguments: |
|
| 154 |
+# - 1: the command to run |
|
| 155 |
+# - 2: command evaluation assertion to use |
|
| 156 |
+# - 3: text to test for |
|
| 157 |
+# - 4: text assertion to use |
|
| 158 |
+# Returns: |
|
| 159 |
+# - 0: if all assertions met |
|
| 160 |
+# - 1: if any assertions fail |
|
| 149 | 161 |
function os::cmd::internal::expect_exit_code_run_grep() {
|
| 150 | 162 |
local cmd=$1 |
| 151 | 163 |
# default expected cmd code to 0 for success |
| ... | ... |
@@ -155,10 +168,15 @@ function os::cmd::internal::expect_exit_code_run_grep() {
|
| 155 | 155 |
# default expected test code to 0 for success |
| 156 | 156 |
local test_eval_func=${4:-os::cmd::internal::success_func}
|
| 157 | 157 |
|
| 158 |
+ local -a junit_log |
|
| 159 |
+ |
|
| 158 | 160 |
os::cmd::internal::init_tempdir |
| 161 |
+ os::test::junit::declare_test_start |
|
| 159 | 162 |
|
| 160 | 163 |
local name=$(os::cmd::internal::describe_call "${cmd}" "${cmd_eval_func}" "${grep_args}" "${test_eval_func}")
|
| 161 | 164 |
echo "Running ${name}..."
|
| 165 |
+ # for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';' |
|
| 166 |
+ junit_log+=( "${name//$'\n'/;}" )
|
|
| 162 | 167 |
|
| 163 | 168 |
local start_time=$(os::cmd::internal::seconds_since_epoch) |
| 164 | 169 |
|
| ... | ... |
@@ -181,19 +199,31 @@ function os::cmd::internal::expect_exit_code_run_grep() {
|
| 181 | 181 |
os::text::clear_last_line |
| 182 | 182 |
done |
| 183 | 183 |
|
| 184 |
+ local return_code |
|
| 184 | 185 |
if (( cmd_succeeded && test_succeeded )); then |
| 185 | 186 |
os::text::print_green "SUCCESS after ${time_elapsed}s: ${name}"
|
| 187 |
+ junit_log+=( "SUCCESS after ${time_elapsed}s: ${name//$'\n'/;}" )
|
|
| 188 |
+ |
|
| 186 | 189 |
if [[ -n ${VERBOSE-} ]]; then
|
| 187 | 190 |
os::cmd::internal::print_results |
| 188 | 191 |
fi |
| 189 |
- return 0 |
|
| 192 |
+ return_code=0 |
|
| 190 | 193 |
else |
| 191 | 194 |
local cause=$(os::cmd::internal::assemble_causes "${cmd_succeeded}" "${test_succeeded}")
|
| 192 | 195 |
|
| 193 | 196 |
os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${name}: ${cause}"
|
| 197 |
+ junit_log+=( "FAILURE after ${time_elapsed}s: ${name//$'\n'/;}: ${cause}" )
|
|
| 198 |
+ |
|
| 194 | 199 |
os::text::print_red "$(os::cmd::internal::print_results)" |
| 195 |
- return 1 |
|
| 200 |
+ return_code=1 |
|
| 196 | 201 |
fi |
| 202 |
+ |
|
| 203 |
+ junit_log+=( "$(os::cmd::internal::print_results)" ) |
|
| 204 |
+ # append inside of a subshell so that IFS doesn't get propagated out |
|
| 205 |
+ ( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" )
|
|
| 206 |
+ os::test::junit::declare_test_end |
|
| 207 |
+ return "${return_code}"
|
|
| 208 |
+ |
|
| 197 | 209 |
} |
| 198 | 210 |
|
| 199 | 211 |
# os::cmd::internal::init_tempdir initializes the temporary directory |
| ... | ... |
@@ -367,14 +397,14 @@ function os::cmd::internal::compress_output() {
|
| 367 | 367 |
function os::cmd::internal::print_results() {
|
| 368 | 368 |
if [[ -s "${os_cmd_internal_tmpout}" ]]; then
|
| 369 | 369 |
echo "Standard output from the command:" |
| 370 |
- cat "${os_cmd_internal_tmpout}"
|
|
| 370 |
+ cat "${os_cmd_internal_tmpout}"; echo
|
|
| 371 | 371 |
else |
| 372 | 372 |
echo "There was no output from the command." |
| 373 | 373 |
fi |
| 374 | 374 |
|
| 375 | 375 |
if [[ -s "${os_cmd_internal_tmperr}" ]]; then
|
| 376 | 376 |
echo "Standard error from the command:" |
| 377 |
- cat "${os_cmd_internal_tmperr}"
|
|
| 377 |
+ cat "${os_cmd_internal_tmperr}"; echo
|
|
| 378 | 378 |
else |
| 379 | 379 |
echo "There was no error output from the command." |
| 380 | 380 |
fi |
| ... | ... |
@@ -404,19 +434,36 @@ function os::cmd::internal::assemble_causes() {
|
| 404 | 404 |
# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps |
| 405 | 405 |
# set by upstream callers by masking the return code of the command with the return code of setting |
| 406 | 406 |
# the result variable on failure. |
| 407 |
+# |
|
| 408 |
+# Globals: |
|
| 409 |
+# - JUNIT_REPORT_OUTPUT |
|
| 410 |
+# - VERBOSE |
|
| 411 |
+# Arguments: |
|
| 412 |
+# - 1: the command to run |
|
| 413 |
+# - 2: command evaluation assertion to use |
|
| 414 |
+# - 3: timeout duration |
|
| 415 |
+# - 4: interval duration |
|
| 416 |
+# Returns: |
|
| 417 |
+# - 0: if all assertions met before timeout |
|
| 418 |
+# - 1: if timeout occurs |
|
| 407 | 419 |
function os::cmd::internal::run_until_exit_code() {
|
| 408 | 420 |
local cmd=$1 |
| 409 | 421 |
local cmd_eval_func=$2 |
| 410 | 422 |
local duration=$3 |
| 411 | 423 |
local interval=$4 |
| 412 | 424 |
|
| 425 |
+ local -a junit_log |
|
| 426 |
+ |
|
| 413 | 427 |
os::cmd::internal::init_tempdir |
| 428 |
+ os::test::junit::declare_test_start |
|
| 414 | 429 |
|
| 415 | 430 |
local description=$(os::cmd::internal::describe_call "${cmd}" "${cmd_eval_func}")
|
| 416 | 431 |
local duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f') |
| 417 | 432 |
local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s"
|
| 418 | 433 |
echo "Running ${description}..."
|
| 419 |
- |
|
| 434 |
+ # for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';' |
|
| 435 |
+ junit_log+=( "${description//$'\n'/;}" )
|
|
| 436 |
+ |
|
| 420 | 437 |
local start_time=$(os::cmd::internal::seconds_since_epoch) |
| 421 | 438 |
|
| 422 | 439 |
local deadline=$(( $(date +%s000) + $duration )) |
| ... | ... |
@@ -440,18 +487,27 @@ function os::cmd::internal::run_until_exit_code() {
|
| 440 | 440 |
os::text::clear_last_line |
| 441 | 441 |
done |
| 442 | 442 |
|
| 443 |
+ local return_code |
|
| 443 | 444 |
if (( cmd_succeeded )); then |
| 444 |
- |
|
| 445 | 445 |
os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}"
|
| 446 |
+ junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" )
|
|
| 447 |
+ |
|
| 446 | 448 |
if [[ -n ${VERBOSE-} ]]; then
|
| 447 | 449 |
os::cmd::internal::print_try_until_results |
| 448 | 450 |
fi |
| 449 |
- return 0 |
|
| 451 |
+ return_code=0 |
|
| 450 | 452 |
else |
| 451 | 453 |
os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out"
|
| 454 |
+ junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" )
|
|
| 455 |
+ |
|
| 452 | 456 |
os::text::print_red "$(os::cmd::internal::print_try_until_results)" |
| 453 |
- return 1 |
|
| 457 |
+ return_code=1 |
|
| 454 | 458 |
fi |
| 459 |
+ |
|
| 460 |
+ junit_log+=( "$(os::cmd::internal::print_try_until_results)" ) |
|
| 461 |
+ ( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" )
|
|
| 462 |
+ os::test::junit::declare_test_end |
|
| 463 |
+ return "${return_code}"
|
|
| 455 | 464 |
} |
| 456 | 465 |
|
| 457 | 466 |
# os::cmd::internal::run_until_text runs the provided command until the command output contains the |
| ... | ... |
@@ -459,19 +515,36 @@ function os::cmd::internal::run_until_exit_code() {
|
| 459 | 459 |
# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps |
| 460 | 460 |
# set by upstream callers by masking the return code of the command with the return code of setting |
| 461 | 461 |
# the result variable on failure. |
| 462 |
+# |
|
| 463 |
+# Globals: |
|
| 464 |
+# - JUNIT_REPORT_OUTPUT |
|
| 465 |
+# - VERBOSE |
|
| 466 |
+# Arguments: |
|
| 467 |
+# - 1: the command to run |
|
| 468 |
+# - 2: text to test for |
|
| 469 |
+# - 3: timeout duration |
|
| 470 |
+# - 4: interval duration |
|
| 471 |
+# Returns: |
|
| 472 |
+# - 0: if all assertions met before timeout |
|
| 473 |
+# - 1: if timeout occurs |
|
| 462 | 474 |
function os::cmd::internal::run_until_text() {
|
| 463 | 475 |
local cmd=$1 |
| 464 | 476 |
local text=$2 |
| 465 | 477 |
local duration=$3 |
| 466 | 478 |
local interval=$4 |
| 467 | 479 |
|
| 480 |
+ local -a junit_log |
|
| 481 |
+ |
|
| 468 | 482 |
os::cmd::internal::init_tempdir |
| 483 |
+ os::test::junit::declare_test_start |
|
| 469 | 484 |
|
| 470 | 485 |
local description=$(os::cmd::internal::describe_call "${cmd}" "" "${text}" "os::cmd::internal::success_func")
|
| 471 | 486 |
local duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f') |
| 472 | 487 |
local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s"
|
| 473 | 488 |
echo "Running ${description}..."
|
| 474 |
- |
|
| 489 |
+ # for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';' |
|
| 490 |
+ junit_log+=( "${description//$'\n'/;}" )
|
|
| 491 |
+ |
|
| 475 | 492 |
local start_time=$(os::cmd::internal::seconds_since_epoch) |
| 476 | 493 |
|
| 477 | 494 |
local deadline=$(( $(date +%s000) + $duration )) |
| ... | ... |
@@ -497,16 +570,26 @@ function os::cmd::internal::run_until_text() {
|
| 497 | 497 |
os::text::clear_last_line |
| 498 | 498 |
done |
| 499 | 499 |
|
| 500 |
+ local return_code |
|
| 500 | 501 |
if (( test_succeeded )); then |
| 501 |
- |
|
| 502 | 502 |
os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}"
|
| 503 |
+ junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" )
|
|
| 504 |
+ |
|
| 503 | 505 |
if [[ -n ${VERBOSE-} ]]; then
|
| 504 | 506 |
os::cmd::internal::print_try_until_results |
| 505 | 507 |
fi |
| 506 |
- return 0 |
|
| 508 |
+ return_code=0 |
|
| 507 | 509 |
else |
| 508 | 510 |
os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out"
|
| 511 |
+ junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" )
|
|
| 512 |
+ |
|
| 509 | 513 |
os::text::print_red "$(os::cmd::internal::print_try_until_results)" |
| 510 |
- return 1 |
|
| 514 |
+ os::test::junit::declare_test_end |
|
| 515 |
+ return_code=1 |
|
| 511 | 516 |
fi |
| 517 |
+ |
|
| 518 |
+ junit_log+=( "$(os::cmd::internal::print_try_until_results)" ) |
|
| 519 |
+ ( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" )
|
|
| 520 |
+ os::test::junit::declare_test_end |
|
| 521 |
+ return "${return_code}"
|
|
| 512 | 522 |
} |
| 513 | 523 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,139 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+# This utility file contains functions that format test output to be parsed into jUnit XML |
|
| 2 |
+ |
|
| 3 |
+# os::test::junit::declare_suite_start prints a message declaring the start of a test suite |
|
| 4 |
+# Any number of suites can be in flight at any time, so there is no failure condition for this |
|
| 5 |
+# script based on the number of suites in flight. |
|
| 6 |
+# |
|
| 7 |
+# Globals: |
|
| 8 |
+# - JUNIT_REPORT_OUTPUT |
|
| 9 |
+# - NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 10 |
+# Arguments: |
|
| 11 |
+# - 1: the suite name that is starting |
|
| 12 |
+# Returns: |
|
| 13 |
+# - increment NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 14 |
+function os::test::junit::declare_suite_start() {
|
|
| 15 |
+ local suite_name=$1 |
|
| 16 |
+ local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}
|
|
| 17 |
+ |
|
| 18 |
+ echo "=== BEGIN TEST SUITE github.com/openshift/origin/test/${suite_name} ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
|
| 19 |
+ NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( ${num_suites} + 1 ))
|
|
| 20 |
+ export NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+# os::test::junit::declare_suite_end prints a message declaring the end of a test suite |
|
| 24 |
+# If there aren't any suites in flight, this function will fail. |
|
| 25 |
+# |
|
| 26 |
+# Globals: |
|
| 27 |
+# - JUNIT_REPORT_OUTPUT |
|
| 28 |
+# - NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 29 |
+# Arguments: |
|
| 30 |
+# - 1: the suite name that is starting |
|
| 31 |
+# Returns: |
|
| 32 |
+# - export/decrement NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 33 |
+function os::test::junit::declare_suite_end() {
|
|
| 34 |
+ local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}
|
|
| 35 |
+ if [[ "${num_suites}" -lt "1" ]]; then
|
|
| 36 |
+ # we can't end a suite if none have been started yet |
|
| 37 |
+ echo "[ERROR] jUnit suite marker could not be placed, expected suites in flight, got ${num_suites}"
|
|
| 38 |
+ return 1 |
|
| 39 |
+ fi |
|
| 40 |
+ |
|
| 41 |
+ echo "=== END TEST SUITE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
|
| 42 |
+ NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( ${num_suites} - 1 ))
|
|
| 43 |
+ export NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 44 |
+} |
|
| 45 |
+ |
|
| 46 |
+# os::test::junit::declare_test_start prints a message declaring the start of a test case |
|
| 47 |
+# If there is already a test marked as being in flight, this function will fail. |
|
| 48 |
+# |
|
| 49 |
+# Globals: |
|
| 50 |
+# - JUNIT_REPORT_OUTPUT |
|
| 51 |
+# - NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 52 |
+# Arguments: |
|
| 53 |
+# None |
|
| 54 |
+# Returns: |
|
| 55 |
+# - increment NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 56 |
+function os::test::junit::declare_test_start() {
|
|
| 57 |
+ local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}
|
|
| 58 |
+ if [[ "${num_tests}" -ne "0" ]]; then
|
|
| 59 |
+ # someone's declaring the starting of a test when a test is already in flight |
|
| 60 |
+ echo "[ERROR] jUnit test marker could not be placed, expected no tests in flight, got ${num_tests}"
|
|
| 61 |
+ return 1 |
|
| 62 |
+ fi |
|
| 63 |
+ |
|
| 64 |
+ local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}
|
|
| 65 |
+ if [[ "${num_suites}" -lt "1" ]]; then
|
|
| 66 |
+ # we can't end a test if no suites are in flight |
|
| 67 |
+ echo "[ERROR] jUnit test marker could not be placed, expected suites in flight, got ${num_suites}"
|
|
| 68 |
+ return 1 |
|
| 69 |
+ fi |
|
| 70 |
+ |
|
| 71 |
+ echo "=== BEGIN TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
|
| 72 |
+ NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( ${num_tests} + 1 ))
|
|
| 73 |
+ export NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 74 |
+} |
|
| 75 |
+ |
|
| 76 |
+# os::test::junit::declare_test_end prints a message declaring the end of a test case |
|
| 77 |
+# If there is no test marked as being in flight, this function will fail. |
|
| 78 |
+# |
|
| 79 |
+# Globals: |
|
| 80 |
+# - JUNIT_REPORT_OUTPUT |
|
| 81 |
+# - NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 82 |
+# Arguments: |
|
| 83 |
+# None |
|
| 84 |
+# Returns: |
|
| 85 |
+# - decrement NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 86 |
+function os::test::junit::declare_test_end() {
|
|
| 87 |
+ local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}
|
|
| 88 |
+ if [[ "${num_tests}" -ne "1" ]]; then
|
|
| 89 |
+ # someone's declaring the end of a test when a test is not in flight |
|
| 90 |
+ echo "[ERROR] jUnit test marker could not be placed, expected one test in flight, got ${num_tests}"
|
|
| 91 |
+ return 1 |
|
| 92 |
+ fi |
|
| 93 |
+ |
|
| 94 |
+ echo "=== END TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
|
| 95 |
+ NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( ${num_tests} - 1 ))
|
|
| 96 |
+ export NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+# os::test::junit::check_test_counters checks that we do not have any test suites or test cases in flight |
|
| 100 |
+# This function should be called at the very end of any test script using jUnit markers to make sure no error in |
|
| 101 |
+# marking has occured. |
|
| 102 |
+# |
|
| 103 |
+# Globals: |
|
| 104 |
+# - NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 105 |
+# - NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 106 |
+# Arguments: |
|
| 107 |
+# None |
|
| 108 |
+# Returns: |
|
| 109 |
+# None |
|
| 110 |
+function os::test::junit::check_test_counters() {
|
|
| 111 |
+ if [[ "${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}" -ne "0" ]]; then
|
|
| 112 |
+ echo "[ERROR] Expected no test suites to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}"
|
|
| 113 |
+ return 1 |
|
| 114 |
+ elif [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}" -ne "0" ]]; then
|
|
| 115 |
+ echo "[ERROR] Expected no test cases to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}"
|
|
| 116 |
+ return 1 |
|
| 117 |
+ fi |
|
| 118 |
+} |
|
| 119 |
+ |
|
| 120 |
+# os::test::junit::reconcile_output appends the necessary suite and test end statements to the jUnit output file |
|
| 121 |
+# in order to ensure that the file is in a consistent state to allow for parsing |
|
| 122 |
+# |
|
| 123 |
+# Globals: |
|
| 124 |
+# - NUM_OS_JUNIT_SUITES_IN_FLIGHT |
|
| 125 |
+# - NUM_OS_JUNIT_TESTS_IN_FLIGHT |
|
| 126 |
+# Arguments: |
|
| 127 |
+# None |
|
| 128 |
+# Returns: |
|
| 129 |
+# None |
|
| 130 |
+function os::test::junit::reconcile_output() {
|
|
| 131 |
+ if [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}" = "1" ]]; then
|
|
| 132 |
+ os::test::junit::declare_test_end |
|
| 133 |
+ fi |
|
| 134 |
+ |
|
| 135 |
+ for (( i = 0; i < ${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}; i++ )); do
|
|
| 136 |
+ os::test::junit::declare_suite_end |
|
| 137 |
+ done |
|
| 138 |
+} |
|
| 0 | 139 |
\ No newline at end of file |
| ... | ... |
@@ -14,6 +14,7 @@ source "${OS_ROOT}/hack/util.sh"
|
| 14 | 14 |
source "${OS_ROOT}/hack/lib/log.sh"
|
| 15 | 15 |
source "${OS_ROOT}/hack/lib/util/environment.sh"
|
| 16 | 16 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 17 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 17 | 18 |
os::log::install_errexit |
| 18 | 19 |
os::util::environment::setup_time_vars |
| 19 | 20 |
|
| ... | ... |
@@ -78,6 +79,11 @@ export ETCD_PEER_PORT=${ETCD_PEER_PORT:-27001}
|
| 78 | 78 |
os::util::environment::setup_all_server_vars "test-cmd/" |
| 79 | 79 |
reset_tmp_dir |
| 80 | 80 |
|
| 81 |
+# Allow setting $JUNIT_REPORT to toggle output behavior |
|
| 82 |
+if [[ -n "${JUNIT_REPORT}" ]]; then
|
|
| 83 |
+ export JUNIT_REPORT_OUTPUT="${LOG_DIR}/raw_test_output.log"
|
|
| 84 |
+fi |
|
| 85 |
+ |
|
| 81 | 86 |
echo "Logging to ${LOG_DIR}..."
|
| 82 | 87 |
|
| 83 | 88 |
os::log::start_system_logger |
| ... | ... |
@@ -145,19 +151,6 @@ openshift start \ |
| 145 | 145 |
--etcd-dir="${ETCD_DATA_DIR}" \
|
| 146 | 146 |
--images="${USE_IMAGES}"
|
| 147 | 147 |
|
| 148 |
-# validate config that was generated |
|
| 149 |
-os::cmd::expect_success_and_text "openshift ex validate master-config ${MASTER_CONFIG_DIR}/master-config.yaml" 'SUCCESS'
|
|
| 150 |
-os::cmd::expect_success_and_text "openshift ex validate node-config ${NODE_CONFIG_DIR}/node-config.yaml" 'SUCCESS'
|
|
| 151 |
-# breaking the config fails the validation check |
|
| 152 |
-cp ${MASTER_CONFIG_DIR}/master-config.yaml ${BASETMPDIR}/master-config-broken.yaml
|
|
| 153 |
-echo "kubernetesMasterConfig: {}" >> ${BASETMPDIR}/master-config-broken.yaml
|
|
| 154 |
-os::cmd::expect_failure_and_text "openshift ex validate master-config ${BASETMPDIR}/master-config-broken.yaml" 'ERROR'
|
|
| 155 |
- |
|
| 156 |
-cp ${NODE_CONFIG_DIR}/node-config.yaml ${BASETMPDIR}/node-config-broken.yaml
|
|
| 157 |
-os::util::sed '5,10d' ${BASETMPDIR}/node-config-broken.yaml
|
|
| 158 |
-os::cmd::expect_failure_and_text "openshift ex validate node-config ${BASETMPDIR}/node-config-broken.yaml" 'ERROR'
|
|
| 159 |
-echo "validation: ok" |
|
| 160 |
- |
|
| 161 | 148 |
# Don't try this at home. We don't have flags for setting etcd ports in the config, but we want deconflicted ones. Use sed to replace defaults in a completely unsafe way |
| 162 | 149 |
os::util::sed "s/:4001$/:${ETCD_PORT}/g" ${SERVER_CONFIG_DIR}/master/master-config.yaml
|
| 163 | 150 |
os::util::sed "s/:7001$/:${ETCD_PEER_PORT}/g" ${SERVER_CONFIG_DIR}/master/master-config.yaml
|
| ... | ... |
@@ -196,6 +189,22 @@ atomic-enterprise start \ |
| 196 | 196 |
--etcd-dir="${ETCD_DATA_DIR}" \
|
| 197 | 197 |
--images="${USE_IMAGES}"
|
| 198 | 198 |
|
| 199 |
+os::test::junit::declare_suite_start "cmd/validatation" |
|
| 200 |
+# validate config that was generated |
|
| 201 |
+os::cmd::expect_success_and_text "openshift ex validate master-config ${MASTER_CONFIG_DIR}/master-config.yaml" 'SUCCESS'
|
|
| 202 |
+os::cmd::expect_success_and_text "openshift ex validate node-config ${NODE_CONFIG_DIR}/node-config.yaml" 'SUCCESS'
|
|
| 203 |
+# breaking the config fails the validation check |
|
| 204 |
+cp ${MASTER_CONFIG_DIR}/master-config.yaml ${BASETMPDIR}/master-config-broken.yaml
|
|
| 205 |
+os::util::sed '7,12d' ${BASETMPDIR}/master-config-broken.yaml
|
|
| 206 |
+os::cmd::expect_failure_and_text "openshift ex validate master-config ${BASETMPDIR}/master-config-broken.yaml" 'ERROR'
|
|
| 207 |
+ |
|
| 208 |
+cp ${NODE_CONFIG_DIR}/node-config.yaml ${BASETMPDIR}/node-config-broken.yaml
|
|
| 209 |
+os::util::sed '5,10d' ${BASETMPDIR}/node-config-broken.yaml
|
|
| 210 |
+os::cmd::expect_failure_and_text "openshift ex validate node-config ${BASETMPDIR}/node-config-broken.yaml" 'ERROR'
|
|
| 211 |
+echo "validation: ok" |
|
| 212 |
+os::test::junit::declare_suite_end |
|
| 213 |
+ |
|
| 214 |
+os::test::junit::declare_suite_start "cmd/config" |
|
| 199 | 215 |
# ensure that DisabledFeatures aren't written to config files |
| 200 | 216 |
os::cmd::expect_success_and_text "cat ${MASTER_CONFIG_DIR}/master-config.yaml" 'disabledFeatures: null'
|
| 201 | 217 |
os::cmd::expect_success_and_text "cat ${BASETMPDIR}/atomic.local.config/master/master-config.yaml" 'disabledFeatures: null'
|
| ... | ... |
@@ -217,7 +226,6 @@ if [[ "${API_SCHEME}" == "https" ]]; then
|
| 217 | 217 |
os::cmd::expect_failure_and_text "oc get services" 'certificate signed by unknown authority' |
| 218 | 218 |
fi |
| 219 | 219 |
|
| 220 |
- |
|
| 221 | 220 |
# login and logout tests |
| 222 | 221 |
# bad token should error |
| 223 | 222 |
os::cmd::expect_failure_and_text "oc login ${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' --token=badvalue" 'The token provided is invalid or expired'
|
| ... | ... |
@@ -279,6 +287,7 @@ cp ${MASTER_CONFIG_DIR}/admin.kubeconfig ${HOME}/.kube/config
|
| 279 | 279 |
os::cmd::expect_success 'oc get services' |
| 280 | 280 |
mv ${HOME}/.kube/config ${HOME}/.kube/non-default-config
|
| 281 | 281 |
echo "config files: ok" |
| 282 |
+os::test::junit::declare_suite_end |
|
| 282 | 283 |
|
| 283 | 284 |
# from this point every command will use config from the KUBECONFIG env var |
| 284 | 285 |
export NODECONFIG="${NODE_CONFIG_DIR}/node-config.yaml"
|
| ... | ... |
@@ -303,6 +312,8 @@ for test in "${tests[@]}"; do
|
| 303 | 303 |
cp ${KUBECONFIG}{.bak,} # since nothing ever gets deleted from kubeconfig, reset it
|
| 304 | 304 |
done |
| 305 | 305 |
|
| 306 |
+# check that we didn't mangle jUnit output |
|
| 307 |
+os::test::junit::check_test_counters |
|
| 306 | 308 |
|
| 307 | 309 |
# Done |
| 308 | 310 |
echo |
| 309 | 311 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,41 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+# This script runs all of the test written for our Bash libraries. |
|
| 3 |
+ |
|
| 4 |
+set -o errexit |
|
| 5 |
+set -o nounset |
|
| 6 |
+set -o pipefail |
|
| 7 |
+ |
|
| 8 |
+function exit_trap() {
|
|
| 9 |
+ local return_code=$? |
|
| 10 |
+ |
|
| 11 |
+ end_time=$(date +%s) |
|
| 12 |
+ |
|
| 13 |
+ if [[ "${return_code}" -eq "0" ]]; then
|
|
| 14 |
+ verb="succeeded" |
|
| 15 |
+ else |
|
| 16 |
+ verb="failed" |
|
| 17 |
+ fi |
|
| 18 |
+ |
|
| 19 |
+ echo "$0 ${verb} after $((${end_time} - ${start_time})) seconds"
|
|
| 20 |
+ exit "${return_code}"
|
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+trap exit_trap EXIT |
|
| 24 |
+ |
|
| 25 |
+start_time=$(date +%s) |
|
| 26 |
+OS_ROOT=$(dirname "${BASH_SOURCE}")/..
|
|
| 27 |
+source "${OS_ROOT}/hack/common.sh"
|
|
| 28 |
+source "${OS_ROOT}/hack/util.sh"
|
|
| 29 |
+source "${OS_ROOT}/hack/lib/util/environment.sh"
|
|
| 30 |
+os::log::install_errexit |
|
| 31 |
+os::util::environment::setup_tmpdir_vars "test-lib" |
|
| 32 |
+ |
|
| 33 |
+cd "${OS_ROOT}"
|
|
| 34 |
+ |
|
| 35 |
+library_tests="$( find 'hack/test-lib/' -type f -executable )" |
|
| 36 |
+for test in ${library_tests}; do
|
|
| 37 |
+ # run each library test found in a subshell so that we can isolate them |
|
| 38 |
+ ( ${test} )
|
|
| 39 |
+ echo "$(basename "${test//.sh}"): ok"
|
|
| 40 |
+done |
|
| 0 | 41 |
\ No newline at end of file |
| 1 | 42 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,82 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+# |
|
| 2 |
+# This script tests os::test::junit functionality. |
|
| 3 |
+ |
|
| 4 |
+set -o errexit |
|
| 5 |
+set -o nounset |
|
| 6 |
+set -o pipefail |
|
| 7 |
+ |
|
| 8 |
+function exit_trap() {
|
|
| 9 |
+ local return_code=$? |
|
| 10 |
+ |
|
| 11 |
+ end_time=$(date +%s) |
|
| 12 |
+ |
|
| 13 |
+ if [[ "${return_code}" -eq "0" ]]; then
|
|
| 14 |
+ verb="succeeded" |
|
| 15 |
+ else |
|
| 16 |
+ verb="failed" |
|
| 17 |
+ fi |
|
| 18 |
+ |
|
| 19 |
+ echo "$0 ${verb} after $((${end_time} - ${start_time})) seconds"
|
|
| 20 |
+ exit "${return_code}"
|
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+trap exit_trap EXIT |
|
| 24 |
+ |
|
| 25 |
+start_time=$(date +%s) |
|
| 26 |
+OS_ROOT="$( dirname "${BASH_SOURCE}" )"/../../..
|
|
| 27 |
+source "${OS_ROOT}/hack/lib/log.sh"
|
|
| 28 |
+source "${OS_ROOT}/hack/cmd_util.sh"
|
|
| 29 |
+os::log::install_errexit |
|
| 30 |
+ |
|
| 31 |
+# envars used to track these interactions are not propagated out of the subshells used to run these commands |
|
| 32 |
+# therefore each os::cmd call is its own sandbox and complicated scenarios need to play out inside one call |
|
| 33 |
+# however, envars from this scope *are* propagated into each subshell, so they need to be cleared in each call |
|
| 34 |
+ |
|
| 35 |
+os::test::junit::declare_suite_start 'lib/test/junit' |
|
| 36 |
+ |
|
| 37 |
+# shouldn't be able to end a suite straight away |
|
| 38 |
+os::cmd::expect_failure_and_text 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 39 |
+os::test::junit::declare_suite_end' '\[ERROR\] jUnit suite marker could not be placed, expected suites in flight, got 0' |
|
| 40 |
+# should be able to start one straight away |
|
| 41 |
+os::cmd::expect_success 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 42 |
+os::test::junit::declare_suite_start whatever' |
|
| 43 |
+# should be able to start and end a suite |
|
| 44 |
+os::cmd::expect_success 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 45 |
+os::test::junit::declare_suite_start whatever |
|
| 46 |
+os::test::junit::declare_suite_end' |
|
| 47 |
+# should not be able to end more suites than are in flight |
|
| 48 |
+os::cmd::expect_failure_and_text 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 49 |
+os::test::junit::declare_suite_start whatever |
|
| 50 |
+os::test::junit::declare_suite_end |
|
| 51 |
+os::test::junit::declare_suite_end' '\[ERROR\] jUnit suite marker could not be placed, expected suites in flight, got 0' |
|
| 52 |
+# should not be able to end more suites than are in flight |
|
| 53 |
+os::cmd::expect_failure_and_text 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 54 |
+os::test::junit::declare_suite_start whatever |
|
| 55 |
+os::test::junit::declare_suite_start whateverelse |
|
| 56 |
+os::test::junit::declare_suite_end |
|
| 57 |
+os::test::junit::declare_suite_end |
|
| 58 |
+os::test::junit::declare_suite_end' '\[ERROR\] jUnit suite marker could not be placed, expected suites in flight, got 0' |
|
| 59 |
+# should be able to staart a test |
|
| 60 |
+os::cmd::expect_success 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 61 |
+os::test::junit::declare_suite_start whatever |
|
| 62 |
+os::test::junit::declare_test_start' |
|
| 63 |
+# shouldn't be able to end a test that hasn't been started |
|
| 64 |
+os::cmd::expect_failure_and_text 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 65 |
+os::test::junit::declare_test_end' '\[ERROR\] jUnit test marker could not be placed, expected one test in flight, got 0' |
|
| 66 |
+# should be able to start and end a test case |
|
| 67 |
+os::cmd::expect_success 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 68 |
+os::test::junit::declare_suite_start whatever |
|
| 69 |
+os::test::junit::declare_test_start |
|
| 70 |
+os::test::junit::declare_test_end' |
|
| 71 |
+# shouldn't be able to end too many test cases |
|
| 72 |
+os::cmd::expect_failure_and_text 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 73 |
+os::test::junit::declare_suite_start whatever |
|
| 74 |
+os::test::junit::declare_test_start |
|
| 75 |
+os::test::junit::declare_test_end |
|
| 76 |
+os::test::junit::declare_test_end' '\[ERROR\] jUnit test marker could not be placed, expected one test in flight, got 0' |
|
| 77 |
+# shouldn't be able to start a test without a suite |
|
| 78 |
+os::cmd::expect_failure_and_text 'unset NUM_OS_JUNIT_SUITES_IN_FLIGHT NUM_OS_JUNIT_TESTS_IN_FLIGHT JUNIT_REPORT_OUTPUT |
|
| 79 |
+os::test::junit::declare_test_start' '\[ERROR\] jUnit test marker could not be placed, expected suites in flight, got 0' |
|
| 80 |
+ |
|
| 81 |
+os::test::junit::declare_suite_end |
|
| 0 | 82 |
\ No newline at end of file |
| ... | ... |
@@ -9,14 +9,20 @@ set -o pipefail |
| 9 | 9 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/..
|
| 10 | 10 |
source "${OS_ROOT}/hack/util.sh"
|
| 11 | 11 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 12 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 12 | 13 |
os::log::install_errexit |
| 13 | 14 |
|
| 14 | 15 |
BASETMPDIR="${TMPDIR:-/tmp}/openshift/test-tools"
|
| 15 |
-JUNIT_OUTPUT_FILE="${BASETMPDIR}/junit_output.txt"
|
|
| 16 |
+JUNIT_REPORT_OUTPUT="${BASETMPDIR}/junit_output.txt"
|
|
| 17 |
+mkdir -p "${BASETMPDIR}"
|
|
| 18 |
+touch "${JUNIT_REPORT_OUTPUT}"
|
|
| 16 | 19 |
|
| 17 | 20 |
# set verbosity so we can see that command output renders correctly |
| 18 | 21 |
VERBOSE=1 |
| 19 | 22 |
|
| 23 |
+os::test::junit::declare_suite_start "cmd/util" |
|
| 24 |
+ |
|
| 25 |
+os::test::junit::declare_suite_start "cmd/util/positive" |
|
| 20 | 26 |
# positive tests |
| 21 | 27 |
os::cmd::expect_success 'exit 0' |
| 22 | 28 |
|
| ... | ... |
@@ -37,7 +43,9 @@ os::cmd::expect_code_and_text 'echo "hello" && exit 213' '213' 'hello' |
| 37 | 37 |
os::cmd::expect_code_and_not_text 'echo "goodbye" && exit 213' '213' 'hello' |
| 38 | 38 |
|
| 39 | 39 |
echo "positive tests: ok" |
| 40 |
+os::test::junit::declare_suite_end |
|
| 40 | 41 |
|
| 42 |
+os::test::junit::declare_suite_start "cmd/util/negative" |
|
| 41 | 43 |
# negative tests |
| 42 | 44 |
|
| 43 | 45 |
if os::cmd::expect_success 'exit 1'; then |
| ... | ... |
@@ -125,7 +133,9 @@ if os::cmd::expect_code_and_not_text 'echo "hello" && exit 0' '1' 'hello'; then |
| 125 | 125 |
fi |
| 126 | 126 |
|
| 127 | 127 |
echo "negative tests: ok" |
| 128 |
+os::test::junit::declare_suite_end |
|
| 128 | 129 |
|
| 130 |
+os::test::junit::declare_suite_start "cmd/util/complex" |
|
| 129 | 131 |
# complex input tests |
| 130 | 132 |
|
| 131 | 133 |
# pipes |
| ... | ... |
@@ -170,8 +180,9 @@ EOF |
| 170 | 170 |
os::cmd::expect_success 'grep hello <<< hello' |
| 171 | 171 |
|
| 172 | 172 |
echo "complex tests: ok" |
| 173 |
+os::test::junit::declare_suite_end |
|
| 173 | 174 |
|
| 174 |
- |
|
| 175 |
+os::test::junit::declare_suite_start "cmd/util/output" |
|
| 175 | 176 |
# test for output correctness |
| 176 | 177 |
|
| 177 | 178 |
# expect_code |
| ... | ... |
@@ -333,7 +344,9 @@ echo "${output}" | grep -q '; the output content test failed'
|
| 333 | 333 |
echo "${output}" | grep -q 'hello'
|
| 334 | 334 |
|
| 335 | 335 |
echo "output tests: ok" |
| 336 |
+os::test::junit::declare_suite_end |
|
| 336 | 337 |
|
| 338 |
+os::test::junit::declare_suite_start "cmd/util/tryuntil" |
|
| 337 | 339 |
function current_time_millis_mod_1000() {
|
| 338 | 340 |
mod=$(expr $(date +%s000) % 1000) |
| 339 | 341 |
if [ $mod -eq 0 ]; then |
| ... | ... |
@@ -379,7 +392,9 @@ if os::cmd::try_until_failure 'exit 0' $(( 1 * second )); then |
| 379 | 379 |
fi |
| 380 | 380 |
|
| 381 | 381 |
echo "try_until: ok" |
| 382 |
+os::test::junit::declare_suite_end |
|
| 382 | 383 |
|
| 384 |
+os::test::junit::declare_suite_start "cmd/util/compression" |
|
| 383 | 385 |
TMPDIR="${TMPDIR:-"/tmp"}"
|
| 384 | 386 |
TEST_DIR=${TMPDIR}/openshift/origin/test/cmd
|
| 385 | 387 |
rm -rf ${TEST_DIR} || true
|
| ... | ... |
@@ -432,4 +447,9 @@ line 2 |
| 432 | 432 |
|
| 433 | 433 |
os::cmd::internal::compress_output ${TEST_DIR}//compress_test.txt > ${TEST_DIR}/actual-compressed.out
|
| 434 | 434 |
diff ${TEST_DIR}/expected-compressed.out ${TEST_DIR}/actual-compressed.out
|
| 435 |
-echo "compression: ok" |
|
| 436 | 435 |
\ No newline at end of file |
| 436 |
+echo "compression: ok" |
|
| 437 |
+os::test::junit::declare_suite_end |
|
| 438 |
+ |
|
| 439 |
+os::test::junit::declare_suite_end |
|
| 440 |
+ |
|
| 441 |
+os::test::junit::check_test_counters |
|
| 437 | 442 |
\ No newline at end of file |
| ... | ... |
@@ -704,7 +704,7 @@ os::log::errexit() {
|
| 704 | 704 |
} |
| 705 | 705 |
|
| 706 | 706 |
os::log::install_errexit() {
|
| 707 |
- # trap ERR to provide an error handler whenever a command exits nonzero this |
|
| 707 |
+ # trap ERR to provide an error handler whenever a command exits nonzero this |
|
| 708 | 708 |
# is a more verbose version of set -o errexit |
| 709 | 709 |
trap 'os::log::errexit' ERR |
| 710 | 710 |
|
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -30,8 +32,10 @@ os::log::install_errexit |
| 30 | 30 |
defaultimage="openshift/origin-\${component}:latest"
|
| 31 | 31 |
USE_IMAGES=${USE_IMAGES:-$defaultimage}
|
| 32 | 32 |
|
| 33 |
+os::test::junit::declare_suite_start "cmd/admin" |
|
| 33 | 34 |
# This test validates admin level commands including system policy |
| 34 | 35 |
|
| 36 |
+os::test::junit::declare_suite_start "cmd/admin/start" |
|
| 35 | 37 |
# Check failure modes of various system commands |
| 36 | 38 |
os::cmd::expect_failure_and_text 'openshift start network' 'kubeconfig must be set' |
| 37 | 39 |
os::cmd::expect_failure_and_text 'openshift start network --config=${NODECONFIG} --enable=kubelet' 'the following components are not recognized: kubelet'
|
| ... | ... |
@@ -41,7 +45,9 @@ os::cmd::expect_failure_and_text 'openshift start network --config=${NODECONFIG}
|
| 41 | 41 |
os::cmd::expect_failure_and_text 'openshift start node' 'kubeconfig must be set' |
| 42 | 42 |
os::cmd::expect_failure_and_text 'openshift start node --config=${NODECONFIG} --disable=other' 'the following components are not recognized: other'
|
| 43 | 43 |
os::cmd::expect_failure_and_text 'openshift start node --config=${NODECONFIG} --disable=kubelet,proxy,plugins' 'at least one node component must be enabled \(kubelet, plugins, proxy\)'
|
| 44 |
+os::test::junit::declare_suite_end |
|
| 44 | 45 |
|
| 46 |
+os::test::junit::declare_suite_start "cmd/admin/manage-node" |
|
| 45 | 47 |
# Test admin manage-node operations |
| 46 | 48 |
os::cmd::expect_success_and_text 'openshift admin manage-node --help' 'Manage nodes' |
| 47 | 49 |
|
| ... | ... |
@@ -65,7 +71,10 @@ status: |
| 65 | 65 |
|
| 66 | 66 |
os::cmd::expect_success_and_text 'oadm manage-node --selector= --schedulable=true' 'Ready' |
| 67 | 67 |
os::cmd::expect_success_and_not_text 'oadm manage-node --selector= --schedulable=true' 'Sched' |
| 68 |
+echo "manage-node: ok" |
|
| 69 |
+os::test::junit::declare_suite_end |
|
| 68 | 70 |
|
| 71 |
+os::test::junit::declare_suite_start "cmd/admin/certs" |
|
| 69 | 72 |
# check create-master-certs validation |
| 70 | 73 |
os::cmd::expect_failure_and_text 'oadm ca create-master-certs --hostnames=example.com --master=' 'master must be provided' |
| 71 | 74 |
os::cmd::expect_failure_and_text 'oadm ca create-master-certs --hostnames=example.com --master=example.com' 'master must be a valid URL' |
| ... | ... |
@@ -93,7 +102,10 @@ os::cmd::expect_success "oadm ca decrypt --key='${ARTIFACT_DIR}/secret.key' <
|
| 93 | 93 |
os::cmd::expect_success "diff '${ARTIFACT_DIR}/secret.data' '${ARTIFACT_DIR}/secret.file-in-file-out.decrypted'"
|
| 94 | 94 |
os::cmd::expect_success "diff '${ARTIFACT_DIR}/secret.data' '${ARTIFACT_DIR}/secret.file-in-pipe-out.decrypted'"
|
| 95 | 95 |
os::cmd::expect_success "diff '${ARTIFACT_DIR}/secret.data' '${ARTIFACT_DIR}/secret.pipe-in-pipe-out.decrypted'"
|
| 96 |
+echo "certs: ok" |
|
| 97 |
+os::test::junit::declare_suite_end |
|
| 96 | 98 |
|
| 99 |
+os::test::junit::declare_suite_start "cmd/admin/groups" |
|
| 97 | 100 |
os::cmd::expect_success 'oadm groups new group1 foo bar' |
| 98 | 101 |
os::cmd::expect_success_and_text 'oc get groups/group1 --no-headers' 'foo, bar' |
| 99 | 102 |
os::cmd::expect_success 'oadm groups add-users group1 baz' |
| ... | ... |
@@ -101,7 +113,9 @@ os::cmd::expect_success_and_text 'oc get groups/group1 --no-headers' 'baz' |
| 101 | 101 |
os::cmd::expect_success 'oadm groups remove-users group1 bar' |
| 102 | 102 |
os::cmd::expect_success_and_not_text 'oc get groups/group1 --no-headers' 'bar' |
| 103 | 103 |
echo "groups: ok" |
| 104 |
+os::test::junit::declare_suite_end |
|
| 104 | 105 |
|
| 106 |
+os::test::junit::declare_suite_start "cmd/admin/admin-scc" |
|
| 105 | 107 |
os::cmd::expect_success 'oadm policy who-can get pods' |
| 106 | 108 |
os::cmd::expect_success 'oadm policy who-can get pods -n default' |
| 107 | 109 |
os::cmd::expect_success 'oadm policy who-can get pods --all-namespaces' |
| ... | ... |
@@ -145,7 +159,9 @@ os::cmd::expect_success_and_not_text 'oc get scc/privileged -o yaml' "system:ser |
| 145 | 145 |
os::cmd::expect_success 'oadm policy remove-scc-from-group privileged fake-group' |
| 146 | 146 |
os::cmd::expect_success_and_not_text 'oc get scc/privileged -o yaml' 'fake-group' |
| 147 | 147 |
echo "admin-scc: ok" |
| 148 |
+os::test::junit::declare_suite_end |
|
| 148 | 149 |
|
| 150 |
+os::test::junit::declare_suite_start "cmd/admin/reconcile-cluster-roles" |
|
| 149 | 151 |
os::cmd::expect_success 'oc delete clusterrole/cluster-status --cascade=false' |
| 150 | 152 |
os::cmd::expect_failure 'oc get clusterrole/cluster-status' |
| 151 | 153 |
os::cmd::expect_success 'oadm policy reconcile-cluster-roles' |
| ... | ... |
@@ -172,7 +188,9 @@ os::cmd::expect_success_and_text 'oc get clusterroles/basic-user -o json' 'group |
| 172 | 172 |
os::cmd::expect_success 'oadm policy reconcile-cluster-roles --confirm' |
| 173 | 173 |
os::cmd::expect_success_and_not_text 'oc get clusterroles/basic-user -o yaml' 'groups' |
| 174 | 174 |
echo "admin-reconcile-cluster-roles: ok" |
| 175 |
+os::test::junit::declare_suite_end |
|
| 175 | 176 |
|
| 177 |
+os::test::junit::declare_suite_start "cmd/admin/reconcile-cluster-role-bindings" |
|
| 176 | 178 |
# Ensure a removed binding gets re-added |
| 177 | 179 |
os::cmd::expect_success 'oc delete clusterrolebinding/cluster-status-binding' |
| 178 | 180 |
os::cmd::expect_failure 'oc get clusterrolebinding/cluster-status-binding' |
| ... | ... |
@@ -203,7 +221,9 @@ os::cmd::expect_failure 'oc get clusterrolebinding/basic-users' |
| 203 | 203 |
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings basic-user --confirm' |
| 204 | 204 |
os::cmd::expect_success 'oc get clusterrolebinding/basic-users' |
| 205 | 205 |
echo "admin-reconcile-cluster-role-bindings: ok" |
| 206 |
+os::test::junit::declare_suite_end |
|
| 206 | 207 |
|
| 208 |
+os::test::junit::declare_suite_start "cmd/admin/role-reapers" |
|
| 207 | 209 |
os::cmd::expect_success "oc create -f test/extended/fixtures/roles/policy-roles.yaml" |
| 208 | 210 |
os::cmd::expect_success "oc get rolebinding/basic-users" |
| 209 | 211 |
os::cmd::expect_success "oc delete role/basic-user" |
| ... | ... |
@@ -219,9 +239,9 @@ os::cmd::expect_failure "oc get rolebinding/edit" |
| 219 | 219 |
os::cmd::expect_success "oadm policy reconcile-cluster-roles --confirm" |
| 220 | 220 |
os::cmd::expect_success "oadm policy reconcile-cluster-role-bindings --confirm" |
| 221 | 221 |
echo "admin-role-reapers: ok" |
| 222 |
+os::test::junit::declare_suite_end |
|
| 222 | 223 |
|
| 223 |
-echo "admin-policy: ok" |
|
| 224 |
- |
|
| 224 |
+os::test::junit::declare_suite_start "cmd/admin/ui-project-commands" |
|
| 225 | 225 |
# Test the commands the UI projects page tells users to run |
| 226 | 226 |
# These should match what is described in projects.html |
| 227 | 227 |
os::cmd::expect_success 'oadm new-project ui-test-project --admin="createuser"' |
| ... | ... |
@@ -232,8 +252,9 @@ os::cmd::try_until_text 'oc get projects' 'ui\-test\-project' |
| 232 | 232 |
os::cmd::expect_success_and_text "oc describe policybinding ':default' -n ui-test-project" 'createuser' |
| 233 | 233 |
os::cmd::expect_success_and_text "oc describe policybinding ':default' -n ui-test-project" 'adduser' |
| 234 | 234 |
echo "ui-project-commands: ok" |
| 235 |
+os::test::junit::declare_suite_end |
|
| 235 | 236 |
|
| 236 |
- |
|
| 237 |
+os::test::junit::declare_suite_start "cmd/admin/new-project" |
|
| 237 | 238 |
# Test deleting and recreating a project |
| 238 | 239 |
os::cmd::expect_success 'oadm new-project recreated-project --admin="createuser1"' |
| 239 | 240 |
os::cmd::expect_success 'oc delete project recreated-project' |
| ... | ... |
@@ -241,7 +262,22 @@ os::cmd::try_until_failure 'oc get project recreated-project' |
| 241 | 241 |
os::cmd::expect_success 'oadm new-project recreated-project --admin="createuser2"' |
| 242 | 242 |
os::cmd::expect_success_and_text "oc describe policybinding ':default' -n recreated-project" 'createuser2' |
| 243 | 243 |
echo "new-project: ok" |
| 244 |
- |
|
| 244 |
+os::test::junit::declare_suite_end |
|
| 245 |
+ |
|
| 246 |
+os::test::junit::declare_suite_start "cmd/admin/router" |
|
| 247 |
+# Test running a router |
|
| 248 |
+os::cmd::expect_failure_and_text 'oadm router --dry-run' 'does not exist' |
|
| 249 |
+encoded_json='{"kind":"ServiceAccount","apiVersion":"v1","metadata":{"name":"router"}}'
|
|
| 250 |
+os::cmd::expect_success "echo '${encoded_json}' | oc create -f - -n default"
|
|
| 251 |
+os::cmd::expect_success "oadm policy add-scc-to-user privileged system:serviceaccount:default:router" |
|
| 252 |
+os::cmd::expect_success_and_text "oadm router -o yaml --credentials=${KUBECONFIG} --service-account=router -n default" 'image:.*-haproxy-router:'
|
|
| 253 |
+os::cmd::expect_success "oadm router --credentials=${KUBECONFIG} --images='${USE_IMAGES}' --service-account=router -n default"
|
|
| 254 |
+os::cmd::expect_success_and_text 'oadm router -n default' 'service exists' |
|
| 255 |
+os::cmd::expect_success_and_text 'oc get dc/router -o yaml -n default' 'readinessProbe' |
|
| 256 |
+echo "router: ok" |
|
| 257 |
+os::test::junit::declare_suite_end |
|
| 258 |
+ |
|
| 259 |
+os::test::junit::declare_suite_start "cmd/admin/registry" |
|
| 245 | 260 |
# Test running a registry |
| 246 | 261 |
os::cmd::expect_failure_and_text 'oadm registry --dry-run' 'does not exist' |
| 247 | 262 |
os::cmd::expect_success_and_text "oadm registry -o yaml --credentials=${KUBECONFIG}" 'image:.*-docker-registry'
|
| ... | ... |
@@ -250,14 +286,18 @@ os::cmd::expect_success_and_text 'oadm registry' 'service exists' |
| 250 | 250 |
os::cmd::expect_success_and_text 'oc describe svc/docker-registry' 'Session Affinity:\s*ClientIP' |
| 251 | 251 |
os::cmd::expect_success_and_text 'oc get dc/docker-registry -o yaml' 'readinessProbe' |
| 252 | 252 |
echo "registry: ok" |
| 253 |
+os::test::junit::declare_suite_end |
|
| 253 | 254 |
|
| 255 |
+os::test::junit::declare_suite_start "cmd/admin/apply" |
|
| 254 | 256 |
workingdir=$(mktemp -d) |
| 255 | 257 |
os::cmd::expect_success "oadm registry --credentials=${KUBECONFIG} -o yaml > ${workingdir}/oadm_registry.yaml"
|
| 256 | 258 |
os::util::sed "s/5000/6000/g" ${workingdir}/oadm_registry.yaml
|
| 257 | 259 |
os::cmd::expect_success "oc apply -f ${workingdir}/oadm_registry.yaml"
|
| 258 | 260 |
os::cmd::expect_success_and_text 'oc get dc/docker-registry -o yaml' '6000' |
| 259 | 261 |
echo "apply: ok" |
| 262 |
+os::test::junit::declare_suite_end |
|
| 260 | 263 |
|
| 264 |
+os::test::junit::declare_suite_start "cmd/admin/build-chain" |
|
| 261 | 265 |
# Test building a dependency tree |
| 262 | 266 |
os::cmd::expect_success 'oc process -f examples/sample-app/application-template-stibuild.json -l build=sti | oc create -f -' |
| 263 | 267 |
# Test both the type/name resource syntax and the fact that istag/origin-ruby-sample:latest is still |
| ... | ... |
@@ -266,7 +306,9 @@ os::cmd::expect_success_and_text 'oadm build-chain istag/origin-ruby-sample' 'is |
| 266 | 266 |
os::cmd::expect_success_and_text 'oadm build-chain ruby-22-centos7 -o dot' 'digraph' |
| 267 | 267 |
os::cmd::expect_success 'oc delete all -l build=sti' |
| 268 | 268 |
echo "ex build-chain: ok" |
| 269 |
+os::test::junit::declare_suite_end |
|
| 269 | 270 |
|
| 271 |
+os::test::junit::declare_suite_start "cmd/admin/complex-scenarios" |
|
| 270 | 272 |
os::cmd::expect_success 'oadm new-project example --admin="createuser"' |
| 271 | 273 |
os::cmd::expect_success 'oc project example' |
| 272 | 274 |
os::cmd::try_until_success 'oc get serviceaccount default' |
| ... | ... |
@@ -274,7 +316,9 @@ os::cmd::expect_success 'oc create -f test/fixtures/app-scenarios' |
| 274 | 274 |
os::cmd::expect_success 'oc status' |
| 275 | 275 |
os::cmd::expect_success 'oc status -o dot' |
| 276 | 276 |
echo "complex-scenarios: ok" |
| 277 |
+os::test::junit::declare_suite_end |
|
| 277 | 278 |
|
| 279 |
+os::test::junit::declare_suite_start "cmd/admin/reconcile-security-context-constraints" |
|
| 278 | 280 |
# Test reconciling SCCs |
| 279 | 281 |
os::cmd::expect_success 'oc delete scc/restricted' |
| 280 | 282 |
os::cmd::expect_failure 'oc get scc/restricted' |
| ... | ... |
@@ -307,8 +351,9 @@ os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'topic: my-foo- |
| 307 | 307 |
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm --additive-only=false' |
| 308 | 308 |
os::cmd::expect_success_and_not_text 'oc get scc/restricted -o yaml' 'topic: my-foo-bar' |
| 309 | 309 |
echo "reconcile-scc: ok" |
| 310 |
+os::test::junit::declare_suite_end |
|
| 310 | 311 |
|
| 311 |
- |
|
| 312 |
+os::test::junit::declare_suite_start "cmd/admin/user-group-cascade" |
|
| 312 | 313 |
# Create test users/identities and groups |
| 313 | 314 |
os::cmd::expect_success 'oc login -u cascaded-user -p pw' |
| 314 | 315 |
os::cmd::expect_success 'oc login -u orphaned-user -p pw' |
| ... | ... |
@@ -352,7 +397,9 @@ os::cmd::expect_success_and_not_text "oc get clusterrolebindings/cluster-admins |
| 352 | 352 |
os::cmd::expect_success_and_not_text "oc get rolebindings/cluster-admin --output-version=v1 --template='{{.subjects}}' -n default" 'cascaded-group'
|
| 353 | 353 |
os::cmd::expect_success_and_not_text "oc get scc/restricted --output-version=v1 --template='{{.groups}}'" 'cascaded-group'
|
| 354 | 354 |
echo "user-group-cascade: ok" |
| 355 |
+os::test::junit::declare_suite_end |
|
| 355 | 356 |
|
| 357 |
+os::test::junit::declare_suite_start "cmd/admin/serviceaccounts" |
|
| 356 | 358 |
# create a new service account |
| 357 | 359 |
os::cmd::expect_success_and_text 'oc create serviceaccount my-sa-name' 'serviceaccount "my-sa-name" created' |
| 358 | 360 |
os::cmd::expect_success 'oc get sa my-sa-name' |
| ... | ... |
@@ -368,5 +415,7 @@ os::cmd::expect_success 'oc sa new-token my-sa-name --labels="mykey=myvalue,myot |
| 368 | 368 |
os::cmd::expect_success_and_text 'oc get secrets --selector="mykey=myvalue"' 'my-sa-name' |
| 369 | 369 |
os::cmd::expect_success_and_text 'oc get secrets --selector="myotherkey=myothervalue"' 'my-sa-name' |
| 370 | 370 |
os::cmd::expect_success_and_text 'oc get secrets --selector="mykey=myvalue,myotherkey=myothervalue"' 'my-sa-name' |
| 371 |
+echo "serviceacounts: ok" |
|
| 372 |
+os::test::junit::declare_suite_end |
|
| 371 | 373 |
|
| 372 |
-echo "serviceacounts: ok" |
|
| 373 | 374 |
\ No newline at end of file |
| 375 |
+os::test::junit::declare_suite_end |
| ... | ... |
@@ -8,7 +8,9 @@ OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/common.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/util.sh"
|
| 10 | 10 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 11 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 11 | 12 |
os::log::install_errexit |
| 13 |
+trap os::test::junit::reconcile_output EXIT |
|
| 12 | 14 |
|
| 13 | 15 |
# Cleanup cluster resources created by this test |
| 14 | 16 |
( |
| ... | ... |
@@ -18,8 +20,10 @@ os::log::install_errexit |
| 18 | 18 |
exit 0 |
| 19 | 19 |
) &>/dev/null |
| 20 | 20 |
|
| 21 |
+os::test::junit::declare_suite_start "cmd/basicresources" |
|
| 21 | 22 |
# This test validates basic resource retrieval and command interaction |
| 22 | 23 |
|
| 24 |
+os::test::junit::declare_suite_start "cmd/basicresources/versionreporting" |
|
| 23 | 25 |
# Test to make sure that we're reporting the correct version information from endpoints and the correct |
| 24 | 26 |
# User-Agent information from our clients regardless of which resources they're trying to access |
| 25 | 27 |
os::build::get_version_vars |
| ... | ... |
@@ -58,10 +62,10 @@ os::cmd::expect_success_and_text 'oadm policy who-can get pods --loglevel=7 2>& |
| 58 | 58 |
os::cmd::expect_success_and_text 'openshift cli get pods --loglevel=7 2>&1 | grep -A4 "pods" | grep User-Agent' "openshift/${KUBE_GIT_VERSION_TO_MICRO} .* kubernetes/"
|
| 59 | 59 |
# example User-Agent: openshift/v1.1.3 (linux/amd64) openshift/b348c2f |
| 60 | 60 |
os::cmd::expect_success_and_text 'openshift cli get dc --loglevel=7 2>&1 | grep -A4 "deploymentconfig" | grep User-Agent' "openshift/${OS_GIT_VERSION_TO_MICRO} .* openshift/"
|
| 61 |
- |
|
| 62 | 61 |
echo "version reporting: ok" |
| 62 |
+os::test::junit::declare_suite_end |
|
| 63 | 63 |
|
| 64 |
- |
|
| 64 |
+os::test::junit::declare_suite_start "cmd/basicresources/explain" |
|
| 65 | 65 |
os::cmd::expect_success_and_text 'oc types' 'Deployment Configuration' |
| 66 | 66 |
os::cmd::expect_failure_and_text 'oc get' 'deploymentconfig' |
| 67 | 67 |
os::cmd::expect_success_and_text 'oc get all --loglevel=6' 'buildconfigs' |
| ... | ... |
@@ -70,7 +74,9 @@ os::cmd::expect_success_and_text 'oc explain pods.spec' 'SecurityContext holds p |
| 70 | 70 |
os::cmd::expect_success_and_text 'oc explain deploymentconfig' 'a desired deployment state' |
| 71 | 71 |
os::cmd::expect_success_and_text 'oc explain deploymentconfig.spec' 'ensures that this deployment config will have zero replicas' |
| 72 | 72 |
echo "explain: ok" |
| 73 |
+os::test::junit::declare_suite_end |
|
| 73 | 74 |
|
| 75 |
+os::test::junit::declare_suite_start "cmd/basicresources/resource-builder" |
|
| 74 | 76 |
# Test resource builder filtering of files with expected extensions inside directories, and individual files without expected extensions |
| 75 | 77 |
os::cmd::expect_success 'oc create -f test/fixtures/resource-builder/directory -f test/fixtures/resource-builder/json-no-extension -f test/fixtures/resource-builder/yml-no-extension' |
| 76 | 78 |
# Explicitly specified extensionless files |
| ... | ... |
@@ -80,13 +86,17 @@ os::cmd::expect_success 'oc get secret json-with-extension yml-with-extension' |
| 80 | 80 |
# Ensure extensionless files inside directories are not processed by resource-builder |
| 81 | 81 |
os::cmd::expect_failure_and_text 'oc get secret json-no-extension-in-directory' 'not found' |
| 82 | 82 |
echo "resource-builder: ok" |
| 83 |
+os::test::junit::declare_suite_end |
|
| 83 | 84 |
|
| 85 |
+os::test::junit::declare_suite_start "cmd/basicresources/pods" |
|
| 84 | 86 |
os::cmd::expect_success 'oc get pods --match-server-version' |
| 85 | 87 |
os::cmd::expect_success_and_text 'oc create -f examples/hello-openshift/hello-pod.json' 'pod "hello-openshift" created' |
| 86 | 88 |
os::cmd::expect_success 'oc describe pod hello-openshift' |
| 87 | 89 |
os::cmd::expect_success 'oc delete pods hello-openshift --grace-period=0' |
| 88 | 90 |
echo "pods: ok" |
| 91 |
+os::test::junit::declare_suite_end |
|
| 89 | 92 |
|
| 93 |
+os::test::junit::declare_suite_start "cmd/basicresources/label" |
|
| 90 | 94 |
os::cmd::expect_success_and_text 'oc create -f examples/hello-openshift/hello-pod.json -o name' 'pod/hello-openshift' |
| 91 | 95 |
os::cmd::try_until_success 'oc label pod/hello-openshift acustom=label' # can race against scheduling and status updates |
| 92 | 96 |
os::cmd::expect_success_and_text 'oc describe pod/hello-openshift' 'acustom=label' |
| ... | ... |
@@ -95,12 +105,16 @@ os::cmd::expect_success_and_text 'oc get -o yaml pod/hello-openshift' 'foo: bar' |
| 95 | 95 |
os::cmd::expect_success 'oc delete pods -l acustom=label --grace-period=0' |
| 96 | 96 |
os::cmd::expect_failure 'oc get pod/hello-openshift' |
| 97 | 97 |
echo "label: ok" |
| 98 |
+os::test::junit::declare_suite_end |
|
| 98 | 99 |
|
| 100 |
+os::test::junit::declare_suite_start "cmd/basicresources/services" |
|
| 99 | 101 |
os::cmd::expect_success 'oc get services' |
| 100 | 102 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-service.json' |
| 101 | 103 |
os::cmd::expect_success 'oc delete services frontend' |
| 102 | 104 |
echo "services: ok" |
| 105 |
+os::test::junit::declare_suite_end |
|
| 103 | 106 |
|
| 107 |
+os::test::junit::declare_suite_start "cmd/basicresources/list-version-conversion" |
|
| 104 | 108 |
os::cmd::expect_success 'oc create -f test/fixtures/mixed-api-versions.yaml' |
| 105 | 109 |
os::cmd::expect_success 'oc get -f test/fixtures/mixed-api-versions.yaml -o yaml' |
| 106 | 110 |
os::cmd::expect_success 'oc label -f test/fixtures/mixed-api-versions.yaml mylabel=a' |
| ... | ... |
@@ -110,7 +124,9 @@ os::cmd::expect_success_and_text 'oc get -f test/fixtures/mixed-api-versions.yam |
| 110 | 110 |
os::cmd::expect_success_and_text 'oc get -f test/fixtures/mixed-api-versions.yaml --output-version=v1 --output=jsonpath="{..metadata.annotations.myannotation}"' '^b b b b b b$'
|
| 111 | 111 |
os::cmd::expect_success 'oc delete -f test/fixtures/mixed-api-versions.yaml' |
| 112 | 112 |
echo "list version conversion: ok" |
| 113 |
+os::test::junit::declare_suite_end |
|
| 113 | 114 |
|
| 115 |
+os::test::junit::declare_suite_start "cmd/basicresources/nodes" |
|
| 114 | 116 |
os::cmd::expect_success 'oc get nodes' |
| 115 | 117 |
( |
| 116 | 118 |
# subshell so we can unset kubeconfig |
| ... | ... |
@@ -119,7 +135,9 @@ os::cmd::expect_success 'oc get nodes' |
| 119 | 119 |
os::cmd::expect_success 'kubectl get nodes --kubeconfig="${cfg}"'
|
| 120 | 120 |
) |
| 121 | 121 |
echo "nodes: ok" |
| 122 |
+os::test::junit::declare_suite_end |
|
| 122 | 123 |
|
| 124 |
+os::test::junit::declare_suite_start "cmd/basicresources/routes" |
|
| 123 | 125 |
os::cmd::expect_success 'oc get routes' |
| 124 | 126 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-route.json' |
| 125 | 127 |
os::cmd::expect_success 'oc delete routes testroute' |
| ... | ... |
@@ -133,7 +151,9 @@ os::cmd::expect_success 'oc delete routes test-route' |
| 133 | 133 |
os::cmd::expect_failure 'oc create route edge new-route' |
| 134 | 134 |
os::cmd::expect_success 'oc delete services frontend' |
| 135 | 135 |
echo "routes: ok" |
| 136 |
+os::test::junit::declare_suite_end |
|
| 136 | 137 |
|
| 138 |
+os::test::junit::declare_suite_start "cmd/basicresources/setprobe" |
|
| 137 | 139 |
# Validate the probe command |
| 138 | 140 |
arg="-f examples/hello-openshift/hello-pod.json" |
| 139 | 141 |
os::cmd::expect_failure_and_text "oc set probe" "error: one or more resources" |
| ... | ... |
@@ -182,7 +202,9 @@ os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --liven |
| 182 | 182 |
os::cmd::expect_success_and_not_text "oc get dc/test-deployment-config -o yaml" "livenessProbe" |
| 183 | 183 |
os::cmd::expect_success "oc delete dc/test-deployment-config" |
| 184 | 184 |
echo "set probe: ok" |
| 185 |
+os::test::junit::declare_suite_end |
|
| 185 | 186 |
|
| 187 |
+os::test::junit::declare_suite_start "cmd/basicresources/setenv" |
|
| 186 | 188 |
os::cmd::expect_success "oc create -f test/integration/fixtures/test-deployment-config.yaml" |
| 187 | 189 |
os::cmd::expect_success "oc create -f test/integration/fixtures/test-buildcli.json" |
| 188 | 190 |
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config FOO=bar" "updated" |
| ... | ... |
@@ -193,7 +215,9 @@ os::cmd::expect_success_and_text "oc set env bc --all FOO-" "updated" |
| 193 | 193 |
os::cmd::expect_success "oc delete dc/test-deployment-config" |
| 194 | 194 |
os::cmd::expect_success "oc delete bc/ruby-sample-build-validtag" |
| 195 | 195 |
echo "set env: ok" |
| 196 |
+os::test::junit::declare_suite_end |
|
| 196 | 197 |
|
| 198 |
+os::test::junit::declare_suite_start "cmd/basicresources/expose" |
|
| 197 | 199 |
# Expose service as a route |
| 198 | 200 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-service.json' |
| 199 | 201 |
os::cmd::expect_failure 'oc expose service frontend --create-external-load-balancer' |
| ... | ... |
@@ -214,9 +238,11 @@ os::cmd::expect_success 'oc create -f test/fixtures/multiport-service.yaml' |
| 214 | 214 |
os::cmd::expect_success 'oc expose svc/frontend --name route-with-set-port' |
| 215 | 215 |
os::cmd::expect_success_and_text "oc get route route-with-set-port --template='{{.spec.port.targetPort}}' --output-version=v1" "web"
|
| 216 | 216 |
echo "expose: ok" |
| 217 |
+os::test::junit::declare_suite_end |
|
| 217 | 218 |
|
| 218 | 219 |
os::cmd::expect_success 'oc delete all --all' |
| 219 | 220 |
|
| 221 |
+os::test::junit::declare_suite_start "cmd/basicresources/projectadmin" |
|
| 220 | 222 |
# switch to test user to be sure that default project admin policy works properly |
| 221 | 223 |
new="$(mktemp -d)/tempconfig" |
| 222 | 224 |
os::cmd::expect_success "oc config view --raw > $new" |
| ... | ... |
@@ -257,11 +283,12 @@ os::cmd::expect_success_and_text "oc get dc/ruby-hello-world --template='{{ .spe
|
| 257 | 257 |
os::cmd::expect_success 'oc delete all -l app=ruby-hello-world' |
| 258 | 258 |
os::cmd::expect_failure 'oc get dc/ruby-hello-world' |
| 259 | 259 |
echo "delete all: ok" |
| 260 |
+os::test::junit::declare_suite_end |
|
| 260 | 261 |
|
| 261 | 262 |
# service accounts should not be allowed to request new projects |
| 262 | 263 |
os::cmd::expect_failure_and_text "oc new-project --token="$( oc sa get-token builder )" will-fail" 'Error from server: You may not request a new project via this API' |
| 263 | 264 |
|
| 264 |
- |
|
| 265 |
+os::test::junit::declare_suite_start "cmd/basicresources/patch" |
|
| 265 | 266 |
# Validate patching works correctly |
| 266 | 267 |
oc login -u system:admin |
| 267 | 268 |
# Clean up group if needed to be re-entrant |
| ... | ... |
@@ -274,4 +301,7 @@ os::cmd::expect_success_and_text 'oc get group patch-group -o yaml' 'myuser' |
| 274 | 274 |
os::cmd::expect_success "oc patch group patch-group -p 'users: []' --loglevel=8" |
| 275 | 275 |
os::cmd::expect_success_and_text 'oc get group patch-group -o yaml' 'users: \[\]' |
| 276 | 276 |
echo "patch: ok" |
| 277 |
+os::test::junit::declare_suite_end |
|
| 278 |
+ |
|
| 279 |
+os::test::junit::declare_suite_end |
|
| 277 | 280 |
|
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -20,6 +22,7 @@ os::log::install_errexit |
| 20 | 20 |
url=":${API_PORT:-8443}"
|
| 21 | 21 |
project="$(oc project -q)" |
| 22 | 22 |
|
| 23 |
+os::test::junit::declare_suite_start "cmd/builds" |
|
| 23 | 24 |
# This test validates builds and build related commands |
| 24 | 25 |
|
| 25 | 26 |
os::cmd::expect_success 'oc new-build centos/ruby-22-centos7 https://github.com/openshift/ruby-hello-world.git' |
| ... | ... |
@@ -75,6 +78,7 @@ os::cmd::expect_success_and_text 'oc get bc/centos -o=jsonpath="{.spec.output.to
|
| 75 | 75 |
# Ensure output is valid JSON |
| 76 | 76 |
os::cmd::expect_success 'oc new-build -D "FROM centos:7" -o json | python -m json.tool' |
| 77 | 77 |
|
| 78 |
+os::test::junit::declare_suite_start "cmd/builds/postcommithook" |
|
| 78 | 79 |
# Ensure post commit hook is executed |
| 79 | 80 |
os::cmd::expect_success 'oc new-build -D "FROM busybox:1"' |
| 80 | 81 |
os::cmd::try_until_text 'oc get istag busybox:1' 'busybox@sha256:' |
| ... | ... |
@@ -88,6 +92,7 @@ os::cmd::expect_success 'oc patch bc/busybox -p '\''{"spec":{"postCommit":{"args
|
| 88 | 88 |
os::cmd::expect_success_and_text 'oc get bc/busybox -o=jsonpath="{.spec.postCommit['\''script'\'','\''args'\'','\''command'\'']}"' ' \[echo default entrypoint\] \[\]'
|
| 89 | 89 |
# os::cmd::expect_success_and_text 'oc start-build --wait --follow busybox' 'default entrypoint' |
| 90 | 90 |
echo "postCommitHook: ok" |
| 91 |
+os::test::junit::declare_suite_end |
|
| 91 | 92 |
|
| 92 | 93 |
os::cmd::expect_success 'oc delete all --all' |
| 93 | 94 |
os::cmd::expect_success 'oc process -f examples/sample-app/application-template-dockerbuild.json -l build=docker | oc create -f -' |
| ... | ... |
@@ -98,12 +103,15 @@ os::cmd::expect_success 'oc get builds' |
| 98 | 98 |
# make sure the imagestream has the latest tag before trying to test it or start a build with it |
| 99 | 99 |
os::cmd::try_until_text 'oc get is ruby-22-centos7' 'latest' |
| 100 | 100 |
|
| 101 |
+os::test::junit::declare_suite_start "cmd/builds/patch-anon-fields" |
|
| 101 | 102 |
REAL_OUTPUT_TO=$(oc get bc/ruby-sample-build --template='{{ .spec.output.to.name }}')
|
| 102 | 103 |
os::cmd::expect_success "oc patch bc/ruby-sample-build -p '{\"spec\":{\"output\":{\"to\":{\"name\":\"different:tag1\"}}}}'"
|
| 103 | 104 |
os::cmd::expect_success_and_text "oc get bc/ruby-sample-build --template='{{ .spec.output.to.name }}'" 'different'
|
| 104 | 105 |
os::cmd::expect_success "oc patch bc/ruby-sample-build -p '{\"spec\":{\"output\":{\"to\":{\"name\":\"${REAL_OUTPUT_TO}\"}}}}'"
|
| 105 | 106 |
echo "patchAnonFields: ok" |
| 107 |
+os::test::junit::declare_suite_end |
|
| 106 | 108 |
|
| 109 |
+os::test::junit::declare_suite_start "cmd/builds/config" |
|
| 107 | 110 |
os::cmd::expect_success_and_text 'oc describe buildConfigs ruby-sample-build' "Webhook GitHub.+${url}/oapi/v1/namespaces/${project}/buildconfigs/ruby-sample-build/webhooks/secret101/github"
|
| 108 | 111 |
os::cmd::expect_success_and_text 'oc describe buildConfigs ruby-sample-build' "Webhook Generic.+${url}/oapi/v1/namespaces/${project}/buildconfigs/ruby-sample-build/webhooks/secret101/generic"
|
| 109 | 112 |
os::cmd::expect_success 'oc start-build --list-webhooks='all' ruby-sample-build' |
| ... | ... |
@@ -116,7 +124,9 @@ os::cmd::expect_success "oc start-build --from-webhook=${webhook}"
|
| 116 | 116 |
os::cmd::expect_success 'oc get builds' |
| 117 | 117 |
os::cmd::expect_success 'oc delete all -l build=docker' |
| 118 | 118 |
echo "buildConfig: ok" |
| 119 |
+os::test::junit::declare_suite_end |
|
| 119 | 120 |
|
| 121 |
+os::test::junit::declare_suite_start "cmd/builds/start-build" |
|
| 120 | 122 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-buildcli.json' |
| 121 | 123 |
# a build for which there is not an upstream tag in the corresponding imagerepo, so |
| 122 | 124 |
# the build should use the image field as defined in the buildconfig |
| ... | ... |
@@ -125,7 +135,9 @@ os::cmd::expect_success_and_text "oc describe build ${started}" 'centos/ruby-22-
|
| 125 | 125 |
frombuild=$(oc start-build --from-build="${started}")
|
| 126 | 126 |
os::cmd::expect_success_and_text "oc describe build ${frombuild}" 'centos/ruby-22-centos7$'
|
| 127 | 127 |
echo "start-build: ok" |
| 128 |
+os::test::junit::declare_suite_end |
|
| 128 | 129 |
|
| 130 |
+os::test::junit::declare_suite_start "cmd/builds/cancel-build" |
|
| 129 | 131 |
os::cmd::expect_success_and_text "oc cancel-build ${started} --dump-logs --restart" "Restarted build ${started}."
|
| 130 | 132 |
os::cmd::expect_success 'oc delete all --all' |
| 131 | 133 |
os::cmd::expect_success 'oc process -f examples/sample-app/application-template-dockerbuild.json -l build=docker | oc create -f -' |
| ... | ... |
@@ -136,3 +148,6 @@ os::cmd::expect_success_and_text 'oc cancel-build build/ruby-sample-build-1' 'Bu |
| 136 | 136 |
os::cmd::try_until_text 'oc cancel-build build/ruby-sample-build-1' 'A cancellation event was already triggered for the build ruby-sample-build-1.' |
| 137 | 137 |
os::cmd::expect_success 'oc delete all --all' |
| 138 | 138 |
echo "cancel-build: ok" |
| 139 |
+os::test::junit::declare_suite_end |
|
| 140 |
+ |
|
| 141 |
+os::test::junit::declare_suite_end |
|
| 139 | 142 |
\ No newline at end of file |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -16,7 +18,7 @@ os::log::install_errexit |
| 16 | 16 |
exit 0 |
| 17 | 17 |
) &>/dev/null |
| 18 | 18 |
|
| 19 |
- |
|
| 19 |
+os::test::junit::declare_suite_start "cmd/debug" |
|
| 20 | 20 |
# This test validates the debug command |
| 21 | 21 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-deployment-config.yaml' |
| 22 | 22 |
os::cmd::expect_success_and_text "oc debug dc/test-deployment-config -o yaml" '\- /bin/sh' |
| ... | ... |
@@ -36,3 +38,4 @@ os::cmd::expect_success_and_not_text "oc debug -f examples/hello-openshift/hello |
| 36 | 36 |
os::cmd::expect_success_and_not_text "oc debug -f examples/hello-openshift/hello-pod.json -o yaml -- /bin/env" 'tty' |
| 37 | 37 |
# TODO: write a test that emulates a TTY to verify the correct defaulting of what the pod is created |
| 38 | 38 |
echo "debug: ok" |
| 39 |
+os::test::junit::declare_suite_end |
|
| 39 | 40 |
\ No newline at end of file |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -17,6 +19,7 @@ os::log::install_errexit |
| 17 | 17 |
) &>/dev/null |
| 18 | 18 |
|
| 19 | 19 |
|
| 20 |
+os::test::junit::declare_suite_start "cmd/deployments" |
|
| 20 | 21 |
# This test validates deployments and the env command |
| 21 | 22 |
|
| 22 | 23 |
os::cmd::expect_success 'oc get deploymentConfigs' |
| ... | ... |
@@ -27,6 +30,7 @@ os::cmd::expect_success_and_text 'oc get dc -o name' 'deploymentconfig/test-depl |
| 27 | 27 |
os::cmd::try_until_success 'oc get rc/test-deployment-config-1' |
| 28 | 28 |
os::cmd::expect_success_and_text 'oc describe dc test-deployment-config' 'deploymentconfig=test-deployment-config' |
| 29 | 29 |
|
| 30 |
+os::test::junit::declare_suite_start "cmd/deployments/env" |
|
| 30 | 31 |
# Patch a nil list |
| 31 | 32 |
os::cmd::expect_success 'oc env dc/test-deployment-config TEST=value' |
| 32 | 33 |
os::cmd::expect_success_and_text 'oc env dc/test-deployment-config --list' 'TEST=value' |
| ... | ... |
@@ -66,10 +70,14 @@ os::cmd::expect_success_and_not_text 'oc env dc/test-deployment-config --list' ' |
| 66 | 66 |
os::cmd::expect_success_and_not_text 'oc env dc/test-deployment-config --list' 'C=c' |
| 67 | 67 |
os::cmd::expect_success_and_not_text 'oc env dc/test-deployment-config --list' 'G=g' |
| 68 | 68 |
echo "env: ok" |
| 69 |
+os::test::junit::declare_suite_end |
|
| 70 |
+ |
|
| 71 |
+os::test::junit::declare_suite_start "cmd/deployments/config" |
|
| 69 | 72 |
os::cmd::expect_success 'oc deploy test-deployment-config' |
| 70 | 73 |
os::cmd::expect_success 'oc deploy dc/test-deployment-config' |
| 71 | 74 |
os::cmd::expect_success 'oc delete deploymentConfigs test-deployment-config' |
| 72 | 75 |
echo "deploymentConfigs: ok" |
| 76 |
+os::test::junit::declare_suite_end |
|
| 73 | 77 |
|
| 74 | 78 |
os::cmd::expect_success 'oc delete all --all' |
| 75 | 79 |
# TODO: remove, flake caused by deployment controller updating the following dc |
| ... | ... |
@@ -79,6 +87,7 @@ os::cmd::expect_success 'oc delete all --all' |
| 79 | 79 |
os::cmd::expect_success 'oc process -f examples/sample-app/application-template-dockerbuild.json -l app=dockerbuild | oc create -f -' |
| 80 | 80 |
os::cmd::try_until_success 'oc get rc/database-1' |
| 81 | 81 |
|
| 82 |
+os::test::junit::declare_suite_start "cmd/deployments/rollback" |
|
| 82 | 83 |
os::cmd::expect_success 'oc rollback database --to-version=1 -o=yaml' |
| 83 | 84 |
os::cmd::expect_success 'oc rollback dc/database --to-version=1 -o=yaml' |
| 84 | 85 |
os::cmd::expect_success 'oc rollback dc/database --to-version=1 --dry-run' |
| ... | ... |
@@ -87,7 +96,9 @@ os::cmd::expect_success 'oc rollback rc/database-1 -o=yaml' |
| 87 | 87 |
# should fail because there's no previous deployment |
| 88 | 88 |
os::cmd::expect_failure 'oc rollback database -o yaml' |
| 89 | 89 |
echo "rollback: ok" |
| 90 |
+os::test::junit::declare_suite_end |
|
| 90 | 91 |
|
| 92 |
+os::test::junit::declare_suite_start "cmd/deployments/stop" |
|
| 91 | 93 |
os::cmd::expect_success 'oc get dc/database' |
| 92 | 94 |
os::cmd::expect_success 'oc expose dc/database --name=fromdc' |
| 93 | 95 |
# should be a service |
| ... | ... |
@@ -98,11 +109,15 @@ os::cmd::expect_success 'oc delete dc/database' |
| 98 | 98 |
os::cmd::expect_failure 'oc get dc/database' |
| 99 | 99 |
os::cmd::expect_failure 'oc get rc/database-1' |
| 100 | 100 |
echo "stop: ok" |
| 101 |
+os::test::junit::declare_suite_end |
|
| 101 | 102 |
|
| 103 |
+os::test::junit::declare_suite_start "cmd/deployments/autoscale" |
|
| 102 | 104 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-deployment-config.yaml' |
| 103 | 105 |
os::cmd::expect_success 'oc autoscale dc/test-deployment-config --max 5' |
| 104 | 106 |
os::cmd::expect_success_and_text "oc get hpa/test-deployment-config --template='{{.spec.maxReplicas}}'" "5"
|
| 105 | 107 |
os::cmd::expect_success 'oc delete dc/test-deployment-config' |
| 106 | 108 |
os::cmd::expect_success 'oc delete hpa/test-deployment-config' |
| 107 | 109 |
echo "autoscale: ok" |
| 110 |
+os::test::junit::declare_suite_end |
|
| 108 | 111 |
|
| 112 |
+os::test::junit::declare_suite_end |
| ... | ... |
@@ -8,6 +8,7 @@ OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 | 10 |
os::log::install_errexit |
| 11 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 12 |
|
| 12 | 13 |
# This test validates the diagnostics command |
| 13 | 14 |
|
| ... | ... |
@@ -16,6 +17,7 @@ os::log::install_errexit |
| 16 | 16 |
# Without things feeding into systemd, AnalyzeLogs and UnitStatus are irrelevant. |
| 17 | 17 |
# The rest should be included in some fashion. |
| 18 | 18 |
|
| 19 |
+os::test::junit::declare_suite_start "cmd/diagnostics" |
|
| 19 | 20 |
os::cmd::expect_success 'oadm diagnostics ClusterRoleBindings ClusterRoles ConfigContexts ' |
| 20 | 21 |
# DiagnosticPod can't run without Docker, would just time out. Exercise flags instead. |
| 21 | 22 |
os::cmd::expect_success "oadm diagnostics DiagnosticPod --prevent-modification --images=foo" |
| ... | ... |
@@ -33,3 +35,4 @@ os::cmd::expect_failure_and_text 'oadm diagnostics AnalyzeLogs AlsoMissing' 'Not |
| 33 | 33 |
# openshift ex diagnostics is deprecated but not removed. Make sure it works until we consciously remove it. |
| 34 | 34 |
os::cmd::expect_success 'openshift ex diagnostics ClusterRoleBindings ClusterRoles ConfigContexts ' |
| 35 | 35 |
echo "diagnostics: ok" |
| 36 |
+os::test::junit::declare_suite_end |
|
| 36 | 37 |
\ No newline at end of file |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -17,6 +19,7 @@ os::log::install_errexit |
| 17 | 17 |
) &>/dev/null |
| 18 | 18 |
|
| 19 | 19 |
|
| 20 |
+os::test::junit::declare_suite_start "cmd/edit" |
|
| 20 | 21 |
# This test validates the edit command |
| 21 | 22 |
|
| 22 | 23 |
os::cmd::expect_success 'oc create -f examples/hello-openshift/hello-pod.json' |
| ... | ... |
@@ -26,3 +29,4 @@ os::cmd::expect_success_and_text 'OC_EDITOR=cat oc edit pod/hello-openshift' 'na |
| 26 | 26 |
os::cmd::expect_success_and_text 'OC_EDITOR=cat oc edit --windows-line-endings pod/hello-openshift | file -' 'CRLF' |
| 27 | 27 |
os::cmd::expect_success_and_not_text 'OC_EDITOR=cat oc edit --windows-line-endings=false pod/hello-openshift | file -' 'CRFL' |
| 28 | 28 |
echo "edit: ok" |
| 29 |
+os::test::junit::declare_suite_end |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -17,6 +19,7 @@ os::log::install_errexit |
| 17 | 17 |
) &>/dev/null |
| 18 | 18 |
|
| 19 | 19 |
|
| 20 |
+os::test::junit::declare_suite_start "cmd/export" |
|
| 20 | 21 |
# This test validates the export command |
| 21 | 22 |
|
| 22 | 23 |
os::cmd::expect_success 'oc new-app -f examples/sample-app/application-template-stibuild.json --name=sample' |
| ... | ... |
@@ -41,3 +44,5 @@ os::cmd::expect_failure_and_text 'oc export svc -l a=b' 'no resources found' |
| 41 | 41 |
os::cmd::expect_success 'oc export svc -l app=sample' |
| 42 | 42 |
os::cmd::expect_success_and_text 'oc export -f examples/sample-app/application-template-stibuild.json --raw --output-version=v1' 'apiVersion: v1' |
| 43 | 43 |
echo "export: ok" |
| 44 |
+os::test::junit::declare_suite_end |
|
| 45 |
+ |
| ... | ... |
@@ -7,8 +7,11 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 14 |
+os::test::junit::declare_suite_start "cmd/help" |
|
| 12 | 15 |
# This test validates the help commands and output text |
| 13 | 16 |
|
| 14 | 17 |
# verify some default commands |
| ... | ... |
@@ -127,4 +130,6 @@ os::cmd::expect_success_and_text 'openshift ex sync-groups --help' 'external pro |
| 127 | 127 |
os::cmd::expect_success_and_text 'openshift ex prune-groups --help' 'external provider' |
| 128 | 128 |
os::cmd::expect_success_and_text 'openshift admin groups sync --help' 'external provider' |
| 129 | 129 |
os::cmd::expect_success_and_text 'openshift admin groups prune --help' 'external provider' |
| 130 |
-os::cmd::expect_success_and_text 'openshift admin prune groups --help' 'external provider' |
|
| 131 | 130 |
\ No newline at end of file |
| 131 |
+os::cmd::expect_success_and_text 'openshift admin prune groups --help' 'external provider' |
|
| 132 |
+ |
|
| 133 |
+os::test::junit::declare_suite_end |
|
| 132 | 134 |
\ No newline at end of file |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -20,13 +22,17 @@ os::log::install_errexit |
| 20 | 20 |
defaultimage="openshift/origin-\${component}:latest"
|
| 21 | 21 |
USE_IMAGES=${USE_IMAGES:-$defaultimage}
|
| 22 | 22 |
|
| 23 |
+os::test::junit::declare_suite_start "cmd/images" |
|
| 23 | 24 |
# This test validates images and image streams along with the tag and import-image commands |
| 24 | 25 |
|
| 26 |
+os::test::junit::declare_suite_start "cmd/images/images" |
|
| 25 | 27 |
os::cmd::expect_success 'oc get images' |
| 26 | 28 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-image.json' |
| 27 | 29 |
os::cmd::expect_success 'oc delete images test' |
| 28 | 30 |
echo "images: ok" |
| 31 |
+os::test::junit::declare_suite_end |
|
| 29 | 32 |
|
| 33 |
+os::test::junit::declare_suite_start "cmd/images/imagestreams" |
|
| 30 | 34 |
os::cmd::expect_success 'oc get imageStreams' |
| 31 | 35 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-image-stream.json' |
| 32 | 36 |
# verify that creating a registry fills out .status.dockerImageRepository |
| ... | ... |
@@ -92,7 +98,9 @@ os::cmd::expect_success_and_text "oc describe ${imagename}" 'Environment:'
|
| 92 | 92 |
os::cmd::expect_success_and_text "oc describe ${imagename}" 'Image Created:'
|
| 93 | 93 |
os::cmd::expect_success_and_text "oc describe ${imagename}" 'Image Name:'
|
| 94 | 94 |
echo "imageStreams: ok" |
| 95 |
+os::test::junit::declare_suite_end |
|
| 95 | 96 |
|
| 97 |
+os::test::junit::declare_suite_start "cmd/images/import-image" |
|
| 96 | 98 |
# should follow the latest reference to 5.6 and update that, and leave latest unchanged |
| 97 | 99 |
os::cmd::expect_success_and_text "oc get is/mysql --template='{{(index .spec.tags 1).from.kind}}'" 'DockerImage'
|
| 98 | 100 |
os::cmd::expect_success_and_text "oc get is/mysql --template='{{(index .spec.tags 2).from.kind}}'" 'ImageStreamTag'
|
| ... | ... |
@@ -123,7 +131,9 @@ os::cmd::expect_failure_and_text 'oc import-image mysql --from=mysql --all' '\-\ |
| 123 | 123 |
os::cmd::expect_success_and_text 'oc import-image mysql --from=mysql --all --confirm' 'sha256:' |
| 124 | 124 |
name=$(oc get istag/mysql:latest --template='{{ .image.metadata.name }}')
|
| 125 | 125 |
echo "import-image: ok" |
| 126 |
+os::test::junit::declare_suite_end |
|
| 126 | 127 |
|
| 128 |
+os::test::junit::declare_suite_start "cmd/images/tag" |
|
| 127 | 129 |
# oc tag |
| 128 | 130 |
os::cmd::expect_success 'oc tag mysql:latest tagtest:tag1 --alias' |
| 129 | 131 |
os::cmd::expect_success_and_text "oc get is/tagtest --template='{{(index .spec.tags 0).from.kind}}'" 'ImageStreamTag'
|
| ... | ... |
@@ -185,7 +195,9 @@ os::cmd::expect_success 'oc create -f test/fixtures/test-stream.yaml' |
| 185 | 185 |
os::cmd::expect_success_and_text 'oc tag test-stream:latest -d' 'Deleted' |
| 186 | 186 |
os::cmd::expect_success 'oc delete is/test-stream' |
| 187 | 187 |
echo "tag: ok" |
| 188 |
+os::test::junit::declare_suite_end |
|
| 188 | 189 |
|
| 190 |
+os::test::junit::declare_suite_start "cmd/images/delete-istag" |
|
| 189 | 191 |
# test deleting a tag using oc delete |
| 190 | 192 |
os::cmd::expect_success_and_text "oc get is perl --template '{{(index .spec.tags 0).name}}'" '5.16'
|
| 191 | 193 |
os::cmd::expect_success_and_text "oc get is perl --template '{{(index .status.tags 0).tag}}'" '5.16'
|
| ... | ... |
@@ -195,3 +207,6 @@ os::cmd::expect_success_and_not_text 'oc get is/perl --template={{.status.tags}}
|
| 195 | 195 |
os::cmd::expect_success 'oc delete all --all' |
| 196 | 196 |
|
| 197 | 197 |
echo "delete istag: ok" |
| 198 |
+os::test::junit::declare_suite_end |
|
| 199 |
+ |
|
| 200 |
+os::test::junit::declare_suite_end |
|
| 198 | 201 |
\ No newline at end of file |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -17,6 +19,7 @@ os::log::install_errexit |
| 17 | 17 |
) &>/dev/null |
| 18 | 18 |
|
| 19 | 19 |
|
| 20 |
+os::test::junit::declare_suite_start "cmd/newapp" |
|
| 20 | 21 |
# This test validates the new-app command |
| 21 | 22 |
|
| 22 | 23 |
os::cmd::expect_success_and_text 'oc new-app library/php mysql -o yaml' '3306' |
| ... | ... |
@@ -241,3 +244,4 @@ os::cmd::expect_success 'oc new-app https://github.com/openshift/ruby-hello-worl |
| 241 | 241 |
os::cmd::expect_success_and_not_text 'oc new-app https://github.com/openshift/ruby-hello-world --output-version=v1 -o=jsonpath="{.items[?(@.kind==\"BuildConfig\")].spec.source}"' 'dockerfile|binary'
|
| 242 | 242 |
|
| 243 | 243 |
echo "new-app: ok" |
| 244 |
+os::test::junit::declare_suite_end |
|
| 244 | 245 |
\ No newline at end of file |
| ... | ... |
@@ -7,8 +7,11 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 14 |
+os::test::junit::declare_suite_start "cmd/policy" |
|
| 12 | 15 |
# This test validates user level policy |
| 13 | 16 |
|
| 14 | 17 |
os::cmd::expect_failure_and_text 'oc policy add-role-to-user' 'you must specify a role' |
| ... | ... |
@@ -70,3 +73,4 @@ os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_
|
| 70 | 70 |
os::cmd::expect_failure_and_text "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"
|
| 71 | 71 |
|
| 72 | 72 |
echo "policy: ok" |
| 73 |
+os::test::junit::declare_suite_end |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -20,6 +22,7 @@ os::log::install_errexit |
| 20 | 20 |
defaultimage="openshift/origin-\${component}:latest"
|
| 21 | 21 |
USE_IMAGES=${USE_IMAGES:-$defaultimage}
|
| 22 | 22 |
|
| 23 |
+os::test::junit::declare_suite_start "cmd/router" |
|
| 23 | 24 |
# Test running a router |
| 24 | 25 |
os::cmd::expect_failure_and_text 'oadm router --dry-run' 'does not exist' |
| 25 | 26 |
os::cmd::expect_failure_and_text 'oadm router --dry-run -o yaml' 'service account "router" is not allowed to access the host network on nodes' |
| ... | ... |
@@ -65,3 +68,4 @@ os::cmd::expect_success_and_text 'oc get dc/router -o yaml' 'readinessProbe' |
| 65 | 65 |
# only when using hostnetwork should we force the probes to use localhost |
| 66 | 66 |
os::cmd::expect_success_and_not_text "oadm router -o yaml --credentials=${KUBECONFIG} --host-network=false" 'host: localhost'
|
| 67 | 67 |
echo "router: ok" |
| 68 |
+os::test::junit::declare_suite_end |
|
| 68 | 69 |
\ No newline at end of file |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -17,6 +19,7 @@ os::log::install_errexit |
| 17 | 17 |
) &>/dev/null |
| 18 | 18 |
|
| 19 | 19 |
|
| 20 |
+os::test::junit::declare_suite_start "cmd/secrets" |
|
| 20 | 21 |
# This test validates secret interaction |
| 21 | 22 |
os::cmd::expect_failure_and_text 'oc secrets new foo --type=blah makefile=Makefile' 'error: unknown secret type "blah"' |
| 22 | 23 |
os::cmd::expect_success 'oc secrets new foo --type=blah makefile=Makefile --confirm' |
| ... | ... |
@@ -77,3 +80,4 @@ os::cmd::expect_success 'oc secret new-sshauth --help' |
| 77 | 77 |
os::cmd::expect_success 'oc secret add --help' |
| 78 | 78 |
|
| 79 | 79 |
echo "secrets: ok" |
| 80 |
+os::test::junit::declare_suite_end |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -20,8 +22,10 @@ os::log::install_errexit |
| 20 | 20 |
) &>/dev/null |
| 21 | 21 |
|
| 22 | 22 |
|
| 23 |
+os::test::junit::declare_suite_start "cmd/templates" |
|
| 23 | 24 |
# This test validates template commands |
| 24 | 25 |
|
| 26 |
+os::test::junit::declare_suite_start "cmd/templates/basic" |
|
| 25 | 27 |
os::cmd::expect_success 'oc get templates' |
| 26 | 28 |
os::cmd::expect_success 'oc create -f examples/sample-app/application-template-dockerbuild.json' |
| 27 | 29 |
os::cmd::expect_success 'oc get templates' |
| ... | ... |
@@ -33,11 +37,15 @@ os::cmd::expect_success 'oc delete templates ruby-helloworld-sample' |
| 33 | 33 |
os::cmd::expect_success 'oc get templates' |
| 34 | 34 |
# TODO: create directly from template |
| 35 | 35 |
echo "templates: ok" |
| 36 |
+os::test::junit::declare_suite_end |
|
| 36 | 37 |
|
| 38 |
+os::test::junit::declare_suite_start "cmd/templates/config" |
|
| 37 | 39 |
os::cmd::expect_success 'oc process -f test/templates/fixtures/guestbook.json -l app=guestbook | oc create -f -' |
| 38 | 40 |
os::cmd::expect_success_and_text 'oc status' 'frontend-service' |
| 39 | 41 |
echo "template+config: ok" |
| 42 |
+os::test::junit::declare_suite_end |
|
| 40 | 43 |
|
| 44 |
+os::test::junit::declare_suite_start "cmd/templates/parameters" |
|
| 41 | 45 |
# Joined parameter values are honored |
| 42 | 46 |
os::cmd::expect_success_and_text 'oc process -f test/templates/fixtures/guestbook.json -v ADMIN_USERNAME=myuser,ADMIN_PASSWORD=mypassword' '"myuser"' |
| 43 | 47 |
os::cmd::expect_success_and_text 'oc process -f test/templates/fixtures/guestbook.json -v ADMIN_USERNAME=myuser,ADMIN_PASSWORD=mypassword' '"mypassword"' |
| ... | ... |
@@ -53,7 +61,9 @@ os::cmd::expect_success_and_text 'oc process ruby-helloworld-sample MYSQL_USER=m |
| 53 | 53 |
os::cmd::expect_success_and_text 'oc process MYSQL_USER=myself MYSQL_PASSWORD=my,1%pass ruby-helloworld-sample' '"my,1%pass"' |
| 54 | 54 |
os::cmd::expect_success 'oc delete template ruby-helloworld-sample' |
| 55 | 55 |
echo "template+parameters: ok" |
| 56 |
+os::test::junit::declare_suite_end |
|
| 56 | 57 |
|
| 58 |
+os::test::junit::declare_suite_start "cmd/templates/data-precision" |
|
| 57 | 59 |
# Run as cluster-admin to allow choosing any supplemental groups we want |
| 58 | 60 |
# Ensure large integers survive unstructured JSON creation |
| 59 | 61 |
os::cmd::expect_success 'oc create -f test/fixtures/template-type-precision.json' |
| ... | ... |
@@ -75,8 +85,9 @@ os::cmd::expect_success_and_text 'oc get pod/template-type-precision -o json' 'p |
| 75 | 75 |
os::cmd::expect_success 'oc delete template/template-type-precision' |
| 76 | 76 |
os::cmd::expect_success 'oc delete pod/template-type-precision' |
| 77 | 77 |
echo "template data precision: ok" |
| 78 |
+os::test::junit::declare_suite_end |
|
| 78 | 79 |
|
| 79 |
- |
|
| 80 |
+os::test::junit::declare_suite_start "cmd/templates/different-namespaces" |
|
| 80 | 81 |
os::cmd::expect_success 'oc create -f examples/sample-app/application-template-dockerbuild.json -n openshift' |
| 81 | 82 |
os::cmd::expect_success 'oc policy add-role-to-user admin test-user' |
| 82 | 83 |
new="$(mktemp -d)/tempconfig" |
| ... | ... |
@@ -92,3 +103,6 @@ os::cmd::expect_success 'oc process templates/ruby-helloworld-sample' |
| 92 | 92 |
os::cmd::expect_success 'oc process openshift//ruby-helloworld-sample' |
| 93 | 93 |
os::cmd::expect_success 'oc process openshift/template/ruby-helloworld-sample' |
| 94 | 94 |
echo "processing templates in different namespace: ok" |
| 95 |
+os::test::junit::declare_suite_end |
|
| 96 |
+ |
|
| 97 |
+os::test::junit::declare_suite_end |
|
| 95 | 98 |
\ No newline at end of file |
| ... | ... |
@@ -8,6 +8,7 @@ OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 | 10 |
os::log::install_errexit |
| 11 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 12 |
|
| 12 | 13 |
# Cleanup cluster resources created by this test |
| 13 | 14 |
( |
| ... | ... |
@@ -20,12 +21,14 @@ os::log::install_errexit |
| 20 | 20 |
url=":${API_PORT:-8443}"
|
| 21 | 21 |
project="$(oc project -q)" |
| 22 | 22 |
|
| 23 |
-# This test validates builds and build related commands |
|
| 23 |
+os::test::junit::declare_suite_start "cmd/triggers" |
|
| 24 |
+# This test validates triggers |
|
| 24 | 25 |
|
| 25 | 26 |
os::cmd::expect_success 'oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-hello-world.git' |
| 26 | 27 |
os::cmd::expect_success 'oc get bc/ruby-hello-world' |
| 27 | 28 |
os::cmd::expect_success 'oc get dc/ruby-hello-world' |
| 28 | 29 |
|
| 30 |
+os::test::junit::declare_suite_start "cmd/triggers/buildconfigs" |
|
| 29 | 31 |
## Build configs |
| 30 | 32 |
|
| 31 | 33 |
# error conditions |
| ... | ... |
@@ -64,7 +67,9 @@ os::cmd::expect_success_and_not_text 'oc set triggers bc/ruby-hello-world' 'imag |
| 64 | 64 |
os::cmd::expect_success_and_text 'oc set triggers bc --all' 'buildconfigs/ruby-hello-world.*image.*ruby-22-centos7:latest.*false' |
| 65 | 65 |
os::cmd::expect_success_and_text 'oc set triggers bc --all --auto' 'updated' |
| 66 | 66 |
os::cmd::expect_success_and_text 'oc set triggers bc --all' 'buildconfigs/ruby-hello-world.*image.*ruby-22-centos7:latest.*true' |
| 67 |
+os::test::junit::declare_suite_end |
|
| 67 | 68 |
|
| 69 |
+os::test::junit::declare_suite_start "cmd/triggers/deploymentconfigs" |
|
| 68 | 70 |
## Deployment configs |
| 69 | 71 |
|
| 70 | 72 |
# error conditions |
| ... | ... |
@@ -86,3 +91,6 @@ os::cmd::expect_success_and_text 'oc set triggers dc/ruby-hello-world --auto' 'u |
| 86 | 86 |
os::cmd::expect_success_and_text 'oc set triggers dc/ruby-hello-world' 'config.*true' |
| 87 | 87 |
os::cmd::expect_success_and_text 'oc set triggers dc/ruby-hello-world --from-image=ruby-hello-world:latest -c ruby-hello-world' 'updated' |
| 88 | 88 |
os::cmd::expect_success_and_text 'oc set triggers dc/ruby-hello-world' 'image.*ruby-hello-world:latest \(ruby-hello-world\).*true' |
| 89 |
+os::test::junit::declare_suite_end |
|
| 90 |
+ |
|
| 91 |
+os::test::junit::declare_suite_end |
|
| 89 | 92 |
\ No newline at end of file |
| ... | ... |
@@ -7,7 +7,9 @@ set -o pipefail |
| 7 | 7 |
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
| 8 | 8 |
source "${OS_ROOT}/hack/util.sh"
|
| 9 | 9 |
source "${OS_ROOT}/hack/cmd_util.sh"
|
| 10 |
+source "${OS_ROOT}/hack/lib/test/junit.sh"
|
|
| 10 | 11 |
os::log::install_errexit |
| 12 |
+trap os::test::junit::reconcile_output EXIT |
|
| 11 | 13 |
|
| 12 | 14 |
# Cleanup cluster resources created by this test |
| 13 | 15 |
( |
| ... | ... |
@@ -17,6 +19,7 @@ os::log::install_errexit |
| 17 | 17 |
) &>/dev/null |
| 18 | 18 |
|
| 19 | 19 |
|
| 20 |
+os::test::junit::declare_suite_start "cmd/volumes" |
|
| 20 | 21 |
# This test validates the 'volume' command |
| 21 | 22 |
|
| 22 | 23 |
os::cmd::expect_success 'oc create -f test/integration/fixtures/test-deployment-config.yaml' |
| ... | ... |
@@ -50,3 +53,5 @@ os::cmd::expect_success 'oc set volumes dc/test-deployment-config --list' |
| 50 | 50 |
|
| 51 | 51 |
os::cmd::expect_success 'oc delete dc/test-deployment-config' |
| 52 | 52 |
echo "volumes: ok" |
| 53 |
+os::test::junit::declare_suite_end |
|
| 54 |
+ |