Instead of using `echo "[INFO]..."` scripts should instead
be using the logging utility function `os::log::info`.
Signed-off-by: Steve Kuznetsov <skuznets@redhat.com>
| ... | ... |
@@ -186,7 +186,7 @@ function os::log::system::internal::plot() {
|
| 186 | 186 |
printf '\n\n' |
| 187 | 187 |
} >> "${LOG_DIR}/gnuplot.log"
|
| 188 | 188 |
|
| 189 |
- echo "[INFO] Stacked plot for log subset \"${plotname}\" written to ${LOG_DIR}/${plotname}.pdf"
|
|
| 189 |
+ os::log::info "Stacked plot for log subset \"${plotname}\" written to ${LOG_DIR}/${plotname}.pdf"
|
|
| 190 | 190 |
} |
| 191 | 191 |
readonly -f os::log::system::internal::plot |
| 192 | 192 |
|
| ... | ... |
@@ -192,7 +192,7 @@ function os::start::internal::patch_master_config() {
|
| 192 | 192 |
export ADMIN_KUBECONFIG="${MASTER_CONFIG_DIR}/admin.kubeconfig"
|
| 193 | 193 |
CLUSTER_ADMIN_CONTEXT=$(oc config view --config="${ADMIN_KUBECONFIG}" --flatten -o template --template='{{index . "current-context"}}'); export CLUSTER_ADMIN_CONTEXT
|
| 194 | 194 |
${sudo} chmod -R a+rwX "${ADMIN_KUBECONFIG}"
|
| 195 |
- echo "[INFO] To debug: export KUBECONFIG=$ADMIN_KUBECONFIG" |
|
| 195 |
+ os::log::info "To debug: export KUBECONFIG=$ADMIN_KUBECONFIG" |
|
| 196 | 196 |
} |
| 197 | 197 |
readonly -f os::start::internal::patch_master_config |
| 198 | 198 |
|
| ... | ... |
@@ -230,7 +230,7 @@ function os::start::server() {
|
| 230 | 230 |
local controllers_version="${2:-}"
|
| 231 | 231 |
local skip_node="${3:-}"
|
| 232 | 232 |
|
| 233 |
- echo "[INFO] Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 233 |
+ os::log::info "Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 234 | 234 |
ps -ef | grep openshift |
| 235 | 235 |
|
| 236 | 236 |
mkdir -p "${LOG_DIR}"
|
| ... | ... |
@@ -278,10 +278,10 @@ function os::start::master() {
|
| 278 | 278 |
|
| 279 | 279 |
mkdir -p "${LOG_DIR}"
|
| 280 | 280 |
|
| 281 |
- echo "[INFO] Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 281 |
+ os::log::info "Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 282 | 282 |
ps -ef | grep openshift |
| 283 | 283 |
|
| 284 |
- echo "[INFO] Starting OpenShift server" |
|
| 284 |
+ os::log::info "Starting OpenShift server" |
|
| 285 | 285 |
local openshift_env=( "OPENSHIFT_PROFILE=web" "OPENSHIFT_ON_PANIC=crash" ) |
| 286 | 286 |
$(os::start::internal::openshift_executable) start master \ |
| 287 | 287 |
--config="${MASTER_CONFIG_DIR}/master-config.yaml" \
|
| ... | ... |
@@ -289,7 +289,7 @@ function os::start::master() {
|
| 289 | 289 |
&>"${LOG_DIR}/openshift.log" &
|
| 290 | 290 |
export OS_PID=$! |
| 291 | 291 |
|
| 292 |
- echo "[INFO] OpenShift server start at: " |
|
| 292 |
+ os::log::info "OpenShift server start at: " |
|
| 293 | 293 |
date |
| 294 | 294 |
|
| 295 | 295 |
os::test::junit::declare_suite_start "setup/start-master" |
| ... | ... |
@@ -297,7 +297,7 @@ function os::start::master() {
|
| 297 | 297 |
os::cmd::try_until_text "oc get --raw /healthz/ready --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' $(( 160 * second )) 0.25
|
| 298 | 298 |
os::test::junit::declare_suite_end |
| 299 | 299 |
|
| 300 |
- echo "[INFO] OpenShift server health checks done at: " |
|
| 300 |
+ os::log::info "OpenShift server health checks done at: " |
|
| 301 | 301 |
date |
| 302 | 302 |
} |
| 303 | 303 |
readonly -f os::start::master |
| ... | ... |
@@ -331,7 +331,7 @@ function os::start::all_in_one() {
|
| 331 | 331 |
use_latest_images="false" |
| 332 | 332 |
fi |
| 333 | 333 |
|
| 334 |
- echo "[INFO] Starting OpenShift server" |
|
| 334 |
+ os::log::info "Starting OpenShift server" |
|
| 335 | 335 |
local openshift_env=( "OPENSHIFT_PROFILE=web" "OPENSHIFT_ON_PANIC=crash" ) |
| 336 | 336 |
local openshift_executable |
| 337 | 337 |
openshift_executable="$(os::start::internal::openshift_executable)" |
| ... | ... |
@@ -344,7 +344,7 @@ function os::start::all_in_one() {
|
| 344 | 344 |
&>"${LOG_DIR}/openshift.log" &
|
| 345 | 345 |
export OS_PID=$! |
| 346 | 346 |
|
| 347 |
- echo "[INFO] OpenShift server start at: " |
|
| 347 |
+ os::log::info "OpenShift server start at: " |
|
| 348 | 348 |
date |
| 349 | 349 |
|
| 350 | 350 |
os::test::junit::declare_suite_start "setup/start-all_in_one" |
| ... | ... |
@@ -354,7 +354,7 @@ function os::start::all_in_one() {
|
| 354 | 354 |
os::cmd::try_until_success "oc get --raw /api/v1/nodes/${KUBELET_HOST} --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" $(( 80 * second )) 0.25
|
| 355 | 355 |
os::test::junit::declare_suite_end |
| 356 | 356 |
|
| 357 |
- echo "[INFO] OpenShift server health checks done at: " |
|
| 357 |
+ os::log::info "OpenShift server health checks done at: " |
|
| 358 | 358 |
date |
| 359 | 359 |
} |
| 360 | 360 |
readonly -f os::start::all_in_one |
| ... | ... |
@@ -372,7 +372,7 @@ readonly -f os::start::all_in_one |
| 372 | 372 |
# Returns: |
| 373 | 373 |
# - export ETCD_PID |
| 374 | 374 |
function os::start::etcd() {
|
| 375 |
- echo "[INFO] Starting etcd" |
|
| 375 |
+ os::log::info "Starting etcd" |
|
| 376 | 376 |
local openshift_env=( "OPENSHIFT_ON_PANIC=crash" ) |
| 377 | 377 |
local openshift_executable |
| 378 | 378 |
openshift_executable="$(os::start::internal::openshift_executable)" |
| ... | ... |
@@ -380,14 +380,14 @@ function os::start::etcd() {
|
| 380 | 380 |
--config="${MASTER_CONFIG_DIR}/master-config.yaml" &>"${LOG_DIR}/etcd.log" &
|
| 381 | 381 |
export ETCD_PID=$! |
| 382 | 382 |
|
| 383 |
- echo "[INFO] etcd server start at: " |
|
| 383 |
+ os::log::info "etcd server start at: " |
|
| 384 | 384 |
date |
| 385 | 385 |
|
| 386 | 386 |
os::test::junit::declare_suite_start "setup/start-etcd" |
| 387 | 387 |
os::cmd::try_until_success "os::util::curl_etcd '/version'" $(( 10 * second )) |
| 388 | 388 |
os::test::junit::declare_suite_end |
| 389 | 389 |
|
| 390 |
- echo "[INFO] etcd server health checks done at: " |
|
| 390 |
+ os::log::info "etcd server health checks done at: " |
|
| 391 | 391 |
date |
| 392 | 392 |
} |
| 393 | 393 |
readonly -f os::start::etcd |
| ... | ... |
@@ -421,7 +421,7 @@ function os::start::api_server() {
|
| 421 | 421 |
|
| 422 | 422 |
export API_SERVER_PID=$! |
| 423 | 423 |
|
| 424 |
- echo "[INFO] OpenShift API server start at: " |
|
| 424 |
+ os::log::info "OpenShift API server start at: " |
|
| 425 | 425 |
date |
| 426 | 426 |
|
| 427 | 427 |
os::test::junit::declare_suite_start "setup/start-api_server" |
| ... | ... |
@@ -429,7 +429,7 @@ function os::start::api_server() {
|
| 429 | 429 |
os::cmd::try_until_text "oc get --raw /healthz/ready --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' $(( 160 * second )) 0.25
|
| 430 | 430 |
os::test::junit::declare_suite_end |
| 431 | 431 |
|
| 432 |
- echo "[INFO] OpenShift API server health checks done at: " |
|
| 432 |
+ os::log::info "OpenShift API server health checks done at: " |
|
| 433 | 433 |
date |
| 434 | 434 |
} |
| 435 | 435 |
readonly -f os::start::api_server |
| ... | ... |
@@ -456,7 +456,7 @@ function os::start::controllers() {
|
| 456 | 456 |
|
| 457 | 457 |
export CONTROLLERS_PID=$! |
| 458 | 458 |
|
| 459 |
- echo "[INFO] OpenShift controllers start at: " |
|
| 459 |
+ os::log::info "OpenShift controllers start at: " |
|
| 460 | 460 |
date |
| 461 | 461 |
} |
| 462 | 462 |
readonly -f os::start::controllers |
| ... | ... |
@@ -485,7 +485,7 @@ function os::start::internal::start_node() {
|
| 485 | 485 |
|
| 486 | 486 |
mkdir -p "${LOG_DIR}"
|
| 487 | 487 |
|
| 488 |
- echo "[INFO] Starting OpenShift node" |
|
| 488 |
+ os::log::info "Starting OpenShift node" |
|
| 489 | 489 |
local openshift_env=( "OPENSHIFT_ON_PANIC=crash" ) |
| 490 | 490 |
$(os::start::internal::openshift_executable) openshift start node \ |
| 491 | 491 |
--config="${NODE_CONFIG_DIR}/node-config.yaml" \
|
| ... | ... |
@@ -494,14 +494,14 @@ function os::start::internal::start_node() {
|
| 494 | 494 |
&>"${LOG_DIR}/node.log" &
|
| 495 | 495 |
export NODE_PID=$! |
| 496 | 496 |
|
| 497 |
- echo "[INFO] OpenShift node start at: " |
|
| 497 |
+ os::log::info "OpenShift node start at: " |
|
| 498 | 498 |
date |
| 499 | 499 |
|
| 500 | 500 |
os::test::junit::declare_suite_start "setup/start-node" |
| 501 | 501 |
os::cmd::try_until_text "oc get --raw ${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' $(( 80 * second )) 0.25
|
| 502 | 502 |
os::test::junit::declare_suite_end |
| 503 | 503 |
|
| 504 |
- echo "[INFO] OpenShift node health checks done at: " |
|
| 504 |
+ os::log::info "OpenShift node health checks done at: " |
|
| 505 | 505 |
date |
| 506 | 506 |
} |
| 507 | 507 |
readonly -f os::start::internal::start_node |
| ... | ... |
@@ -589,9 +589,9 @@ readonly -f os::start::internal::determine_hostnames |
| 589 | 589 |
function os::start::internal::print_server_info() {
|
| 590 | 590 |
local openshift_executable |
| 591 | 591 |
openshift_executable="$(os::start::internal::openshift_executable)" |
| 592 |
- echo "[INFO] $(${openshift_executable} version)"
|
|
| 593 |
- echo "[INFO] Server logs will be at: ${LOG_DIR}"
|
|
| 594 |
- echo "[INFO] Config dir is: ${SERVER_CONFIG_DIR}"
|
|
| 595 |
- echo "[INFO] Using images: ${USE_IMAGES}"
|
|
| 596 |
- echo "[INFO] MasterIP is: ${MASTER_ADDR}"
|
|
| 592 |
+ os::log::info "$(${openshift_executable} version)"
|
|
| 593 |
+ os::log::info "Server logs will be at: ${LOG_DIR}"
|
|
| 594 |
+ os::log::info "Config dir is: ${SERVER_CONFIG_DIR}"
|
|
| 595 |
+ os::log::info "Using images: ${USE_IMAGES}"
|
|
| 596 |
+ os::log::info "MasterIP is: ${MASTER_ADDR}"
|
|
| 597 | 597 |
} |
| ... | ... |
@@ -32,7 +32,7 @@ function cleanup() |
| 32 | 32 |
echo ------------------------------------- |
| 33 | 33 |
echo |
| 34 | 34 |
elif go tool -n pprof >/dev/null 2>&1; then |
| 35 |
- echo "[INFO] \`pprof\` output logged to ${LOG_DIR}/pprof.out"
|
|
| 35 |
+ os::log::info "\`pprof\` output logged to ${LOG_DIR}/pprof.out"
|
|
| 36 | 36 |
go tool pprof -text "./_output/local/bin/$(os::util::host_platform)/openshift" cpu.pprof >"${LOG_DIR}/pprof.out" 2>&1
|
| 37 | 37 |
fi |
| 38 | 38 |
|
| ... | ... |
@@ -65,7 +65,7 @@ function cleanup() |
| 65 | 65 |
fi |
| 66 | 66 |
|
| 67 | 67 |
ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds" |
| 68 |
- echo "[INFO] Exiting with ${out}"
|
|
| 68 |
+ os::log::info "Exiting with ${out}"
|
|
| 69 | 69 |
exit $out |
| 70 | 70 |
} |
| 71 | 71 |
|
| ... | ... |
@@ -147,7 +147,7 @@ oc version |
| 147 | 147 |
export OPENSHIFT_PROFILE="${WEB_PROFILE-}"
|
| 148 | 148 |
|
| 149 | 149 |
# Specify the scheme and port for the listen address, but let the IP auto-discover. Set --public-master to localhost, for a stable link to the console. |
| 150 |
-echo "[INFO] Create certificates for the OpenShift server to ${MASTER_CONFIG_DIR}"
|
|
| 150 |
+os::log::info "Create certificates for the OpenShift server to ${MASTER_CONFIG_DIR}"
|
|
| 151 | 151 |
# find the same IP that openshift start will bind to. This allows access from pods that have to talk back to master |
| 152 | 152 |
SERVER_HOSTNAME_LIST="${PUBLIC_MASTER_HOST},$(openshift start --print-ip),localhost"
|
| 153 | 153 |
|
| ... | ... |
@@ -355,6 +355,6 @@ for test in "${tests[@]}"; do
|
| 355 | 355 |
cp ${KUBECONFIG}{.bak,} # since nothing ever gets deleted from kubeconfig, reset it
|
| 356 | 356 |
done |
| 357 | 357 |
|
| 358 |
-echo "[INFO] Metrics information logged to ${LOG_DIR}/metrics.log"
|
|
| 358 |
+os::log::info "Metrics information logged to ${LOG_DIR}/metrics.log"
|
|
| 359 | 359 |
oc get --raw /metrics > "${LOG_DIR}/metrics.log"
|
| 360 | 360 |
echo "test-cmd: ok" |
| ... | ... |
@@ -5,7 +5,7 @@ |
| 5 | 5 |
STARTTIME=$(date +%s) |
| 6 | 6 |
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
| 7 | 7 |
|
| 8 |
-echo "[INFO] Starting containerized end-to-end test" |
|
| 8 |
+os::log::info "Starting containerized end-to-end test" |
|
| 9 | 9 |
|
| 10 | 10 |
unset KUBECONFIG |
| 11 | 11 |
|
| ... | ... |
@@ -19,7 +19,7 @@ function cleanup() |
| 19 | 19 |
if [ $out -ne 0 ]; then |
| 20 | 20 |
echo "[FAIL] !!!!! Test Failed !!!!" |
| 21 | 21 |
else |
| 22 |
- echo "[INFO] Test Succeeded" |
|
| 22 |
+ os::log::info "Test Succeeded" |
|
| 23 | 23 |
fi |
| 24 | 24 |
echo |
| 25 | 25 |
|
| ... | ... |
@@ -33,13 +33,13 @@ function cleanup() |
| 33 | 33 |
os::cleanup::dump_etcd |
| 34 | 34 |
|
| 35 | 35 |
if [[ -z "${SKIP_TEARDOWN-}" ]]; then
|
| 36 |
- echo "[INFO] remove the openshift container" |
|
| 36 |
+ os::log::info "remove the openshift container" |
|
| 37 | 37 |
docker stop origin |
| 38 | 38 |
docker rm origin |
| 39 | 39 |
|
| 40 |
- echo "[INFO] Stopping k8s docker containers"; docker ps | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker stop
|
|
| 40 |
+ os::log::info "Stopping k8s docker containers"; docker ps | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker stop
|
|
| 41 | 41 |
if [[ -z "${SKIP_IMAGE_CLEANUP-}" ]]; then
|
| 42 |
- echo "[INFO] Removing k8s docker containers"; docker ps -a | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker rm
|
|
| 42 |
+ os::log::info "Removing k8s docker containers"; docker ps -a | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker rm
|
|
| 43 | 43 |
fi |
| 44 | 44 |
set -u |
| 45 | 45 |
fi |
| ... | ... |
@@ -50,7 +50,7 @@ function cleanup() |
| 50 | 50 |
truncate_large_logs |
| 51 | 51 |
set -e |
| 52 | 52 |
|
| 53 |
- echo "[INFO] Exiting" |
|
| 53 |
+ os::log::info "Exiting" |
|
| 54 | 54 |
ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds" |
| 55 | 55 |
exit $out |
| 56 | 56 |
} |
| ... | ... |
@@ -67,11 +67,11 @@ out=$( |
| 67 | 67 |
) |
| 68 | 68 |
|
| 69 | 69 |
# Setup |
| 70 |
-echo "[INFO] openshift version: `openshift version`" |
|
| 71 |
-echo "[INFO] oc version: `oc version`" |
|
| 72 |
-echo "[INFO] Using images: ${USE_IMAGES}"
|
|
| 70 |
+os::log::info "openshift version: `openshift version`" |
|
| 71 |
+os::log::info "oc version: `oc version`" |
|
| 72 |
+os::log::info "Using images: ${USE_IMAGES}"
|
|
| 73 | 73 |
|
| 74 |
-echo "[INFO] Starting OpenShift containerized server" |
|
| 74 |
+os::log::info "Starting OpenShift containerized server" |
|
| 75 | 75 |
oc cluster up --server-loglevel=4 --version="${TAG}" \
|
| 76 | 76 |
--host-data-dir="${VOLUME_DIR}/etcd" \
|
| 77 | 77 |
--host-volumes-dir="${VOLUME_DIR}"
|
| ... | ... |
@@ -85,7 +85,7 @@ export ADMIN_KUBECONFIG="${MASTER_CONFIG_DIR}/admin.kubeconfig"
|
| 85 | 85 |
export CLUSTER_ADMIN_CONTEXT=$(oc config view --config=${ADMIN_KUBECONFIG} --flatten -o template --template='{{index . "current-context"}}')
|
| 86 | 86 |
sudo chmod -R a+rwX "${ADMIN_KUBECONFIG}"
|
| 87 | 87 |
export KUBECONFIG="${ADMIN_KUBECONFIG}"
|
| 88 |
-echo "[INFO] To debug: export KUBECONFIG=$ADMIN_KUBECONFIG" |
|
| 88 |
+os::log::info "To debug: export KUBECONFIG=$ADMIN_KUBECONFIG" |
|
| 89 | 89 |
|
| 90 | 90 |
|
| 91 | 91 |
${OS_ROOT}/test/end-to-end/core.sh
|
| ... | ... |
@@ -18,7 +18,7 @@ fi |
| 18 | 18 |
|
| 19 | 19 |
ensure_iptables_or_die |
| 20 | 20 |
|
| 21 |
-echo "[INFO] Starting end-to-end test" |
|
| 21 |
+os::log::info "Starting end-to-end test" |
|
| 22 | 22 |
|
| 23 | 23 |
function cleanup() |
| 24 | 24 |
{
|
| ... | ... |
@@ -27,12 +27,12 @@ function cleanup() |
| 27 | 27 |
if [ $out -ne 0 ]; then |
| 28 | 28 |
echo "[FAIL] !!!!! Test Failed !!!!" |
| 29 | 29 |
else |
| 30 |
- echo "[INFO] Test Succeeded" |
|
| 30 |
+ os::log::info "Test Succeeded" |
|
| 31 | 31 |
fi |
| 32 | 32 |
echo |
| 33 | 33 |
|
| 34 | 34 |
cleanup_openshift |
| 35 |
- echo "[INFO] Exiting" |
|
| 35 |
+ os::log::info "Exiting" |
|
| 36 | 36 |
ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds" |
| 37 | 37 |
exit $out |
| 38 | 38 |
} |
| ... | ... |
@@ -215,7 +215,7 @@ if [[ -n "${junit_report}" ]]; then
|
| 215 | 215 |
test_error_file="${LOG_DIR}/test-go-err.log"
|
| 216 | 216 |
junit_report_file="${ARTIFACT_DIR}/report.xml"
|
| 217 | 217 |
|
| 218 |
- echo "[INFO] Running \`go test\`..." |
|
| 218 |
+ os::log::info "Running \`go test\`..." |
|
| 219 | 219 |
# we don't care if the `go test` fails in this pipe, as we want to generate the report and summarize the output anyway |
| 220 | 220 |
set +o pipefail |
| 221 | 221 |
|
| ... | ... |
@@ -258,8 +258,8 @@ if [[ -n "${junit_report}" ]]; then
|
| 258 | 258 |
fi |
| 259 | 259 |
fi |
| 260 | 260 |
|
| 261 |
- echo "[INFO] Full output from \`go test\` logged at ${test_output_file}"
|
|
| 262 |
- echo "[INFO] jUnit XML report placed at ${junit_report_file}"
|
|
| 261 |
+ os::log::info "Full output from \`go test\` logged at ${test_output_file}"
|
|
| 262 |
+ os::log::info "jUnit XML report placed at ${junit_report_file}"
|
|
| 263 | 263 |
exit "${test_return_code}"
|
| 264 | 264 |
|
| 265 | 265 |
elif [[ -n "${coverage_output_dir}" ]]; then
|
| ... | ... |
@@ -276,7 +276,7 @@ elif [[ -n "${coverage_output_dir}" ]]; then
|
| 276 | 276 |
find "${coverage_output_dir}" -name profile.out | xargs sed '/^mode: atomic$/d' >> "${coverage_output_dir}/profiles.out"
|
| 277 | 277 |
|
| 278 | 278 |
go tool cover "-html=${coverage_output_dir}/profiles.out" -o "${coverage_output_dir}/coverage.html"
|
| 279 |
- echo "[INFO] Coverage profile written to ${coverage_output_dir}/coverage.html"
|
|
| 279 |
+ os::log::info "Coverage profile written to ${coverage_output_dir}/coverage.html"
|
|
| 280 | 280 |
|
| 281 | 281 |
# clean up all of the individual coverage reports as they have been subsumed into the report at ${coverage_output_dir}/coverage.html
|
| 282 | 282 |
# we can clean up all of the coverage reports at once as they all exist in subdirectories of ${coverage_output_dir}/${OS_GO_PACKAGE}
|
| ... | ... |
@@ -62,11 +62,11 @@ for file in ${source_files}; do
|
| 62 | 62 |
echo "[ERROR] Generated Swagger documentation at \"${swagger_file}\" is out of date."
|
| 63 | 63 |
failed='true' |
| 64 | 64 |
else |
| 65 |
- echo "[INFO] Verified that generated Swagger documentation at \"${swagger_file}\" is up to date."
|
|
| 65 |
+ os::log::info "Verified that generated Swagger documentation at \"${swagger_file}\" is up to date."
|
|
| 66 | 66 |
fi |
| 67 | 67 |
else |
| 68 | 68 |
mv "${tmp_output_file}" "${swagger_file}"
|
| 69 |
- echo "[INFO] Generated Swagger documentation written for \"${file}\" to \"${swagger_file}\""
|
|
| 69 |
+ os::log::info "Generated Swagger documentation written for \"${file}\" to \"${swagger_file}\""
|
|
| 70 | 70 |
fi |
| 71 | 71 |
fi |
| 72 | 72 |
done |
| ... | ... |
@@ -52,7 +52,7 @@ function dump_container_logs() {
|
| 52 | 52 |
|
| 53 | 53 |
mkdir -p ${LOG_DIR}
|
| 54 | 54 |
|
| 55 |
- echo "[INFO] Dumping container logs to ${LOG_DIR}"
|
|
| 55 |
+ os::log::info "Dumping container logs to ${LOG_DIR}"
|
|
| 56 | 56 |
for container in $(docker ps -aq); do |
| 57 | 57 |
container_name=$(docker inspect -f "{{.Name}}" $container)
|
| 58 | 58 |
# strip off leading / |
| ... | ... |
@@ -114,17 +114,17 @@ function cleanup_openshift() {
|
| 114 | 114 |
os::cleanup::dump_etcd |
| 115 | 115 |
|
| 116 | 116 |
if [[ -z "${SKIP_TEARDOWN-}" ]]; then
|
| 117 |
- echo "[INFO] Tearing down test" |
|
| 117 |
+ os::log::info "Tearing down test" |
|
| 118 | 118 |
kill_all_processes |
| 119 | 119 |
|
| 120 | 120 |
if docker version >/dev/null 2>&1; then |
| 121 |
- echo "[INFO] Stopping k8s docker containers"; docker ps | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker stop -t 1 >/dev/null
|
|
| 121 |
+ os::log::info "Stopping k8s docker containers"; docker ps | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker stop -t 1 >/dev/null
|
|
| 122 | 122 |
if [[ -z "${SKIP_IMAGE_CLEANUP-}" ]]; then
|
| 123 |
- echo "[INFO] Removing k8s docker containers"; docker ps -a | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker rm -v >/dev/null
|
|
| 123 |
+ os::log::info "Removing k8s docker containers"; docker ps -a | awk 'index($NF,"k8s_")==1 { print $1 }' | xargs -l -r docker rm -v >/dev/null
|
|
| 124 | 124 |
fi |
| 125 | 125 |
fi |
| 126 | 126 |
|
| 127 |
- echo "[INFO] Pruning etcd data directory..." |
|
| 127 |
+ os::log::info "Pruning etcd data directory..." |
|
| 128 | 128 |
local sudo="${USE_SUDO:+sudo}"
|
| 129 | 129 |
${sudo} rm -rf "${ETCD_DATA_DIR}"
|
| 130 | 130 |
|
| ... | ... |
@@ -139,18 +139,18 @@ function cleanup_openshift() {
|
| 139 | 139 |
delete_empty_logs |
| 140 | 140 |
truncate_large_logs |
| 141 | 141 |
|
| 142 |
- echo "[INFO] Cleanup complete" |
|
| 142 |
+ os::log::info "Cleanup complete" |
|
| 143 | 143 |
set -e |
| 144 | 144 |
} |
| 145 | 145 |
readonly -f cleanup_openshift |
| 146 | 146 |
|
| 147 | 147 |
# install the router for the extended tests |
| 148 | 148 |
function install_router() {
|
| 149 |
- echo "[INFO] Installing the router" |
|
| 149 |
+ os::log::info "Installing the router" |
|
| 150 | 150 |
oadm policy add-scc-to-user privileged -z router --config="${ADMIN_KUBECONFIG}"
|
| 151 | 151 |
# Create a TLS certificate for the router |
| 152 | 152 |
if [[ -n "${CREATE_ROUTER_CERT:-}" ]]; then
|
| 153 |
- echo "[INFO] Generating router TLS certificate" |
|
| 153 |
+ os::log::info "Generating router TLS certificate" |
|
| 154 | 154 |
oadm ca create-server-cert --signer-cert=${MASTER_CONFIG_DIR}/ca.crt \
|
| 155 | 155 |
--signer-key=${MASTER_CONFIG_DIR}/ca.key \
|
| 156 | 156 |
--signer-serial=${MASTER_CONFIG_DIR}/ca.serial.txt \
|
| ... | ... |
@@ -165,7 +165,7 @@ function install_router() {
|
| 165 | 165 |
# Set the SYN eater to make router reloads more robust |
| 166 | 166 |
if [[ -n "${DROP_SYN_DURING_RESTART:-}" ]]; then
|
| 167 | 167 |
# Rewrite the DC for the router to add the environment variable into the pod definition |
| 168 |
- echo "[INFO] Changing the router DC to drop SYN packets during a reload" |
|
| 168 |
+ os::log::info "Changing the router DC to drop SYN packets during a reload" |
|
| 169 | 169 |
oc set env dc/router -c router DROP_SYN_DURING_RESTART=true |
| 170 | 170 |
fi |
| 171 | 171 |
} |
| ... | ... |
@@ -174,7 +174,7 @@ readonly -f install_router |
| 174 | 174 |
# install registry for the extended tests |
| 175 | 175 |
function install_registry() {
|
| 176 | 176 |
# The --mount-host option is provided to reuse local storage. |
| 177 |
- echo "[INFO] Installing the registry" |
|
| 177 |
+ os::log::info "Installing the registry" |
|
| 178 | 178 |
# For testing purposes, ensure the quota objects are always up to date in the registry by |
| 179 | 179 |
# disabling project cache. |
| 180 | 180 |
openshift admin registry --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}" --enforce-quota -o json | \
|
| ... | ... |
@@ -4,7 +4,7 @@ |
| 4 | 4 |
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
| 5 | 5 |
|
| 6 | 6 |
# Open port scanning |
| 7 |
-echo "[INFO] Checking open ports ('sudo openshift start' should already be running)"
|
|
| 7 |
+os::log::info "Checking open ports ('sudo openshift start' should already be running)"
|
|
| 8 | 8 |
|
| 9 | 9 |
# 53 (DNS) |
| 10 | 10 |
# 4001,7001 (etcd) |
| ... | ... |
@@ -10,30 +10,30 @@ trap os::test::junit::reconcile_output EXIT |
| 10 | 10 |
export VERBOSE=true |
| 11 | 11 |
|
| 12 | 12 |
function wait_for_app() {
|
| 13 |
- echo "[INFO] Waiting for app in namespace $1" |
|
| 14 |
- echo "[INFO] Waiting for database pod to start" |
|
| 13 |
+ os::log::info "Waiting for app in namespace $1" |
|
| 14 |
+ os::log::info "Waiting for database pod to start" |
|
| 15 | 15 |
os::cmd::try_until_text "oc get -n $1 pods -l name=database" 'Running' |
| 16 | 16 |
os::cmd::expect_success "oc logs dc/database -n $1" |
| 17 | 17 |
|
| 18 |
- echo "[INFO] Waiting for database service to start" |
|
| 18 |
+ os::log::info "Waiting for database service to start" |
|
| 19 | 19 |
os::cmd::try_until_text "oc get -n $1 services" 'database' "$(( 2 * TIME_MIN ))" |
| 20 | 20 |
DB_IP=$(oc get -n "$1" --output-version=v1 --template="{{ .spec.clusterIP }}" service database)
|
| 21 | 21 |
|
| 22 |
- echo "[INFO] Waiting for frontend pod to start" |
|
| 22 |
+ os::log::info "Waiting for frontend pod to start" |
|
| 23 | 23 |
os::cmd::try_until_text "oc get -n $1 pods -l name=frontend" 'Running' "$(( 2 * TIME_MIN ))" |
| 24 | 24 |
os::cmd::expect_success "oc logs dc/frontend -n $1" |
| 25 | 25 |
|
| 26 |
- echo "[INFO] Waiting for frontend service to start" |
|
| 26 |
+ os::log::info "Waiting for frontend service to start" |
|
| 27 | 27 |
os::cmd::try_until_text "oc get -n $1 services" 'frontend' "$(( 2 * TIME_MIN ))" |
| 28 | 28 |
FRONTEND_IP=$(oc get -n "$1" --output-version=v1 --template="{{ .spec.clusterIP }}" service frontend)
|
| 29 | 29 |
|
| 30 |
- echo "[INFO] Waiting for database to start..." |
|
| 30 |
+ os::log::info "Waiting for database to start..." |
|
| 31 | 31 |
os::cmd::try_until_success "curl --max-time 2 --fail --silent 'http://${DB_IP}:5434'" "$((3*TIME_MIN))"
|
| 32 | 32 |
|
| 33 |
- echo "[INFO] Waiting for app to start..." |
|
| 33 |
+ os::log::info "Waiting for app to start..." |
|
| 34 | 34 |
os::cmd::try_until_success "curl --max-time 2 --fail --silent 'http://${FRONTEND_IP}:5432'" "$((2*TIME_MIN))"
|
| 35 | 35 |
|
| 36 |
- echo "[INFO] Testing app" |
|
| 36 |
+ os::log::info "Testing app" |
|
| 37 | 37 |
os::cmd::try_until_text "curl -s -X POST http://${FRONTEND_IP}:5432/keys/foo -d value=1337" "Key created"
|
| 38 | 38 |
os::cmd::try_until_text "curl -s http://${FRONTEND_IP}:5432/keys/foo" "1337"
|
| 39 | 39 |
} |
| ... | ... |
@@ -48,8 +48,8 @@ function remove_docker_images() {
|
| 48 | 48 |
|
| 49 | 49 |
os::test::junit::declare_suite_start "end-to-end/core" |
| 50 | 50 |
|
| 51 |
-echo "[INFO] openshift version: `openshift version`" |
|
| 52 |
-echo "[INFO] oc version: `oc version`" |
|
| 51 |
+os::log::info "openshift version: `openshift version`" |
|
| 52 |
+os::log::info "oc version: `oc version`" |
|
| 53 | 53 |
|
| 54 | 54 |
# service dns entry is visible via master service |
| 55 | 55 |
# find the IP of the master service by asking the API_HOST to verify DNS is running there |
| ... | ... |
@@ -74,17 +74,17 @@ os::cmd::expect_success "openshift admin new-project cache --description='This i |
| 74 | 74 |
echo "The console should be available at ${API_SCHEME}://${PUBLIC_MASTER_HOST}:${API_PORT}/console."
|
| 75 | 75 |
echo "Log in as 'e2e-user' to see the 'test' project." |
| 76 | 76 |
|
| 77 |
-echo "[INFO] Pre-pulling and pushing ruby-22-centos7" |
|
| 77 |
+os::log::info "Pre-pulling and pushing ruby-22-centos7" |
|
| 78 | 78 |
os::cmd::expect_success 'docker pull centos/ruby-22-centos7:latest' |
| 79 |
-echo "[INFO] Pulled ruby-22-centos7" |
|
| 79 |
+os::log::info "Pulled ruby-22-centos7" |
|
| 80 | 80 |
|
| 81 | 81 |
os::cmd::expect_success "openshift admin policy add-scc-to-user privileged -z ipfailover" |
| 82 | 82 |
os::cmd::expect_success "openshift admin ipfailover --images='${USE_IMAGES}' --virtual-ips='1.2.3.4' --service-account=ipfailover"
|
| 83 | 83 |
|
| 84 |
-echo "[INFO] Waiting for Docker registry pod to start" |
|
| 84 |
+os::log::info "Waiting for Docker registry pod to start" |
|
| 85 | 85 |
os::cmd::expect_success 'oc rollout status dc/docker-registry' |
| 86 | 86 |
|
| 87 |
-echo "[INFO] Waiting for IP failover to deploy" |
|
| 87 |
+os::log::info "Waiting for IP failover to deploy" |
|
| 88 | 88 |
os::cmd::expect_success 'oc rollout status dc/ipfailover' |
| 89 | 89 |
os::cmd::expect_success "oc delete all -l ipfailover=ipfailover" |
| 90 | 90 |
|
| ... | ... |
@@ -99,7 +99,7 @@ DOCKER_REGISTRY=$(oc get --output-version=v1 --template="{{ .spec.clusterIP }}:{
|
| 99 | 99 |
|
| 100 | 100 |
os::cmd::expect_success_and_text "dig @${API_HOST} docker-registry.default.svc.cluster.local. +short A | head -n 1" "${DOCKER_REGISTRY/:5000}"
|
| 101 | 101 |
|
| 102 |
-echo "[INFO] Verifying the docker-registry is up at ${DOCKER_REGISTRY}"
|
|
| 102 |
+os::log::info "Verifying the docker-registry is up at ${DOCKER_REGISTRY}"
|
|
| 103 | 103 |
os::cmd::try_until_success "curl --max-time 2 --fail --silent 'http://${DOCKER_REGISTRY}/'" "$((2*TIME_MIN))"
|
| 104 | 104 |
# ensure original healthz route works as well |
| 105 | 105 |
os::cmd::expect_success "curl -f http://${DOCKER_REGISTRY}/healthz"
|
| ... | ... |
@@ -108,7 +108,7 @@ os::cmd::expect_success "dig @${API_HOST} docker-registry.default.local. A"
|
| 108 | 108 |
|
| 109 | 109 |
# Client setup (log in as e2e-user and set 'test' as the default project) |
| 110 | 110 |
# This is required to be able to push to the registry! |
| 111 |
-echo "[INFO] Logging in as a regular user (e2e-user:pass) with project 'test'..." |
|
| 111 |
+os::log::info "Logging in as a regular user (e2e-user:pass) with project 'test'..." |
|
| 112 | 112 |
os::cmd::expect_success 'oc login -u e2e-user -p pass' |
| 113 | 113 |
os::cmd::expect_success_and_text 'oc whoami' 'e2e-user' |
| 114 | 114 |
|
| ... | ... |
@@ -127,31 +127,31 @@ os::cmd::expect_success 'oc status -n default' |
| 127 | 127 |
os::cmd::expect_success 'oc project cache' |
| 128 | 128 |
e2e_user_token="$(oc whoami -t)" |
| 129 | 129 |
|
| 130 |
-echo "[INFO] Docker login as e2e-user to ${DOCKER_REGISTRY}"
|
|
| 130 |
+os::log::info "Docker login as e2e-user to ${DOCKER_REGISTRY}"
|
|
| 131 | 131 |
os::cmd::expect_success "docker login -u e2e-user -p ${e2e_user_token} -e e2e-user@openshift.com ${DOCKER_REGISTRY}"
|
| 132 |
-echo "[INFO] Docker login successful" |
|
| 132 |
+os::log::info "Docker login successful" |
|
| 133 | 133 |
|
| 134 |
-echo "[INFO] Tagging and pushing ruby-22-centos7 to ${DOCKER_REGISTRY}/cache/ruby-22-centos7:latest"
|
|
| 134 |
+os::log::info "Tagging and pushing ruby-22-centos7 to ${DOCKER_REGISTRY}/cache/ruby-22-centos7:latest"
|
|
| 135 | 135 |
os::cmd::expect_success "docker tag centos/ruby-22-centos7:latest ${DOCKER_REGISTRY}/cache/ruby-22-centos7:latest"
|
| 136 | 136 |
os::cmd::expect_success "docker push ${DOCKER_REGISTRY}/cache/ruby-22-centos7:latest"
|
| 137 |
-echo "[INFO] Pushed ruby-22-centos7" |
|
| 137 |
+os::log::info "Pushed ruby-22-centos7" |
|
| 138 | 138 |
|
| 139 | 139 |
# get image's digest |
| 140 | 140 |
rubyimagedigest="$(oc get -o jsonpath='{.status.tags[?(@.tag=="latest")].items[0].image}' is/ruby-22-centos7)"
|
| 141 |
-echo "[INFO] Ruby image digest: $rubyimagedigest" |
|
| 141 |
+os::log::info "Ruby image digest: $rubyimagedigest" |
|
| 142 | 142 |
# get a random, non-empty blob |
| 143 | 143 |
rubyimageblob="$(oc get isimage -o go-template='{{range .image.dockerImageLayers}}{{if gt .size 1024.}}{{.name}},{{end}}{{end}}' "ruby-22-centos7@${rubyimagedigest}" | cut -d , -f 1)"
|
| 144 |
-echo "[INFO] Ruby's testing blob digest: $rubyimageblob" |
|
| 144 |
+os::log::info "Ruby's testing blob digest: $rubyimageblob" |
|
| 145 | 145 |
|
| 146 | 146 |
# verify remote images can be pulled directly from the local registry |
| 147 |
-echo "[INFO] Docker pullthrough" |
|
| 147 |
+os::log::info "Docker pullthrough" |
|
| 148 | 148 |
os::cmd::expect_success "oc import-image --confirm --from=mysql:latest mysql:pullthrough" |
| 149 | 149 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/cache/mysql:pullthrough"
|
| 150 | 150 |
|
| 151 |
-echo "[INFO] Docker registry start with GCS" |
|
| 151 |
+os::log::info "Docker registry start with GCS" |
|
| 152 | 152 |
os::cmd::expect_failure_and_text "docker run -e REGISTRY_STORAGE=\"gcs: {}\" openshift/origin-docker-registry:${TAG}" "No bucket parameter provided"
|
| 153 | 153 |
|
| 154 |
-echo "[INFO] Docker pull from istag" |
|
| 154 |
+os::log::info "Docker pull from istag" |
|
| 155 | 155 |
os::cmd::expect_success "oc import-image --confirm --from=hello-world:latest -n test hello-world:pullthrough" |
| 156 | 156 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/test/hello-world:pullthrough"
|
| 157 | 157 |
os::cmd::expect_success "docker tag ${DOCKER_REGISTRY}/test/hello-world:pullthrough ${DOCKER_REGISTRY}/cache/hello-world:latest"
|
| ... | ... |
@@ -159,48 +159,48 @@ os::cmd::expect_success "docker push ${DOCKER_REGISTRY}/cache/hello-world:latest
|
| 159 | 159 |
|
| 160 | 160 |
# verify we can pull from tagged image (using tag) |
| 161 | 161 |
remove_docker_images 'cache/hello-world' |
| 162 |
-echo "[INFO] Tagging hello-world:latest to the same image stream and pulling it" |
|
| 162 |
+os::log::info "Tagging hello-world:latest to the same image stream and pulling it" |
|
| 163 | 163 |
os::cmd::expect_success "oc tag hello-world:latest hello-world:new-tag" |
| 164 | 164 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/cache/hello-world:new-tag"
|
| 165 |
-echo "[INFO] The same image stream pull successful" |
|
| 165 |
+os::log::info "The same image stream pull successful" |
|
| 166 | 166 |
|
| 167 | 167 |
remove_docker_images "${DOCKER_REGISTRY}/cache/hello-world" new-tag
|
| 168 |
-echo "[INFO] Tagging hello-world:latest to cross repository and pulling it" |
|
| 168 |
+os::log::info "Tagging hello-world:latest to cross repository and pulling it" |
|
| 169 | 169 |
os::cmd::expect_success "oc tag hello-world:latest cross:repo-pull" |
| 170 | 170 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/cache/cross:repo-pull"
|
| 171 |
-echo "[INFO] Cross repository pull successful" |
|
| 171 |
+os::log::info "Cross repository pull successful" |
|
| 172 | 172 |
|
| 173 | 173 |
remove_docker_images "${DOCKER_REGISTRY}/cache/cross" "repo-pull"
|
| 174 |
-echo "[INFO] Tagging hello-world:latest to cross namespace and pulling it" |
|
| 174 |
+os::log::info "Tagging hello-world:latest to cross namespace and pulling it" |
|
| 175 | 175 |
os::cmd::expect_success "oc tag cache/hello-world:latest cross:namespace-pull -n custom" |
| 176 | 176 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/custom/cross:namespace-pull"
|
| 177 |
-echo "[INFO] Cross namespace pull successful" |
|
| 177 |
+os::log::info "Cross namespace pull successful" |
|
| 178 | 178 |
|
| 179 | 179 |
# verify we can pull from tagged image (using image digest) |
| 180 | 180 |
remove_docker_images "${DOCKER_REGISTRY}/custom/cross" namespace-pull
|
| 181 | 181 |
imagedigest="$(oc get istag hello-world:latest --template='{{.image.metadata.name}}')"
|
| 182 |
-echo "[INFO] Tagging hello-world@${imagedigest} to the same image stream and pulling it"
|
|
| 182 |
+os::log::info "Tagging hello-world@${imagedigest} to the same image stream and pulling it"
|
|
| 183 | 183 |
os::cmd::expect_success "oc tag hello-world@${imagedigest} hello-world:new-id-tag"
|
| 184 | 184 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/cache/hello-world:new-id-tag"
|
| 185 |
-echo "[INFO] The same image stream pull successful" |
|
| 185 |
+os::log::info "The same image stream pull successful" |
|
| 186 | 186 |
|
| 187 | 187 |
remove_docker_images "${DOCKER_REGISTRY}/cache/hello-world" new-id-tag
|
| 188 |
-echo "[INFO] Tagging hello-world@${imagedigest} to cross repository and pulling it"
|
|
| 188 |
+os::log::info "Tagging hello-world@${imagedigest} to cross repository and pulling it"
|
|
| 189 | 189 |
os::cmd::expect_success "oc tag hello-world@${imagedigest} cross:repo-pull-id"
|
| 190 | 190 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/cache/cross:repo-pull-id"
|
| 191 |
-echo "[INFO] Cross repository pull successful" |
|
| 191 |
+os::log::info "Cross repository pull successful" |
|
| 192 | 192 |
|
| 193 | 193 |
remove_docker_images "${DOCKER_REGISTRY}/cache/cross" repo-pull-id
|
| 194 |
-echo "[INFO] Tagging hello-world@${imagedigest} to cross repository and pulling it by id"
|
|
| 194 |
+os::log::info "Tagging hello-world@${imagedigest} to cross repository and pulling it by id"
|
|
| 195 | 195 |
os::cmd::expect_success "oc tag hello-world@${imagedigest} cross:repo-pull-id"
|
| 196 | 196 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/cache/cross@${imagedigest}"
|
| 197 |
-echo "[INFO] Cross repository pull successful" |
|
| 197 |
+os::log::info "Cross repository pull successful" |
|
| 198 | 198 |
|
| 199 | 199 |
remove_docker_images "${DOCKER_REGISTRY}/cache/cross"
|
| 200 |
-echo "[INFO] Tagging hello-world@${imagedigest} to cross namespace and pulling it"
|
|
| 200 |
+os::log::info "Tagging hello-world@${imagedigest} to cross namespace and pulling it"
|
|
| 201 | 201 |
os::cmd::expect_success "oc tag cache/hello-world@${imagedigest} cross:namespace-pull-id -n custom"
|
| 202 | 202 |
os::cmd::expect_success "docker pull ${DOCKER_REGISTRY}/custom/cross:namespace-pull-id"
|
| 203 |
-echo "[INFO] Cross namespace pull successful" |
|
| 203 |
+os::log::info "Cross namespace pull successful" |
|
| 204 | 204 |
|
| 205 | 205 |
os::cmd::expect_success 'oc login -u schema2-user -p pass' |
| 206 | 206 |
os::cmd::expect_success "oc new-project schema2" |
| ... | ... |
@@ -213,7 +213,7 @@ os::cmd::expect_failure_and_text "oc get --server='https://${KUBELET_HOST}:${KUB
|
| 213 | 213 |
os::test::junit::declare_suite_end |
| 214 | 214 |
|
| 215 | 215 |
|
| 216 |
-echo "[INFO] Fetch manifest V2 schema 2 image with old client using pullthrough" |
|
| 216 |
+os::log::info "Fetch manifest V2 schema 2 image with old client using pullthrough" |
|
| 217 | 217 |
os::cmd::expect_success "oc import-image --confirm --from=hello-world:latest hello-world:pullthrough" |
| 218 | 218 |
os::cmd::expect_success_and_text "oc get -o jsonpath='{.image.dockerImageManifestMediaType}' istag hello-world:pullthrough" 'application/vnd\.docker\.distribution\.manifest\.v2\+json'
|
| 219 | 219 |
hello_world_name="$(oc get -o 'jsonpath={.image.metadata.name}' istag hello-world:pullthrough)"
|
| ... | ... |
@@ -232,9 +232,9 @@ os::cmd::expect_success_and_text "jq -r '.schemaVersion' ${ARTIFACT_DIR}/hello-w
|
| 232 | 232 |
os::cmd::expect_success_and_not_text "echo '${hello_world_schema1_digest}'" "${hello_world_name}"
|
| 233 | 233 |
os::cmd::expect_success_and_text "echo '${hello_world_schema1_digest}'" ".+"
|
| 234 | 234 |
os::cmd::expect_success_and_text "curl -I -u 'schema2-user:${schema2_user_token}' '${DOCKER_REGISTRY}/v2/schema2/hello-world/manifests/${hello_world_schema1_digest}'" "404 Not Found"
|
| 235 |
-echo "[INFO] Manifest V2 schema 2 image fetched successfully with old client" |
|
| 235 |
+os::log::info "Manifest V2 schema 2 image fetched successfully with old client" |
|
| 236 | 236 |
|
| 237 |
-echo "[INFO] Back to 'default' project with 'admin' user..." |
|
| 237 |
+os::log::info "Back to 'default' project with 'admin' user..." |
|
| 238 | 238 |
os::cmd::expect_success "oc project ${CLUSTER_ADMIN_CONTEXT}"
|
| 239 | 239 |
os::cmd::expect_success_and_text 'oc whoami' 'system:admin' |
| 240 | 240 |
|
| ... | ... |
@@ -243,11 +243,11 @@ os::cmd::expect_success 'oc policy add-role-to-user system:image-pusher -n cache |
| 243 | 243 |
os::cmd::expect_success 'oc login -u pusher -p pass' |
| 244 | 244 |
pusher_token="$(oc whoami -t)" |
| 245 | 245 |
|
| 246 |
-echo "[INFO] Docker login as pusher to ${DOCKER_REGISTRY}"
|
|
| 246 |
+os::log::info "Docker login as pusher to ${DOCKER_REGISTRY}"
|
|
| 247 | 247 |
os::cmd::expect_success "docker login -u e2e-user -p ${pusher_token} -e pusher@openshift.com ${DOCKER_REGISTRY}"
|
| 248 |
-echo "[INFO] Docker login successful" |
|
| 248 |
+os::log::info "Docker login successful" |
|
| 249 | 249 |
|
| 250 |
-echo "[INFO] Anonymous registry access" |
|
| 250 |
+os::log::info "Anonymous registry access" |
|
| 251 | 251 |
# setup: log out of docker, log into openshift as e2e-user to run policy commands, tag image to use for push attempts |
| 252 | 252 |
os::cmd::expect_success 'oc login -u e2e-user' |
| 253 | 253 |
os::cmd::expect_success 'docker pull busybox' |
| ... | ... |
@@ -271,7 +271,7 @@ os::cmd::expect_success 'oc policy add-role-to-user system:image-pusher system:a |
| 271 | 271 |
os::cmd::try_until_text 'oc policy who-can update imagestreams/layers -n custom' 'system:anonymous' |
| 272 | 272 |
os::cmd::expect_success "docker push ${DOCKER_REGISTRY}/custom/cross:namespace-pull"
|
| 273 | 273 |
os::cmd::expect_success "docker push ${DOCKER_REGISTRY}/custom/cross:namespace-pull-id"
|
| 274 |
-echo "[INFO] Anonymous registry access successfull" |
|
| 274 |
+os::log::info "Anonymous registry access successfull" |
|
| 275 | 275 |
|
| 276 | 276 |
# log back into docker as e2e-user again |
| 277 | 277 |
os::cmd::expect_success "docker login -u e2e-user -p ${e2e_user_token} -e e2e-user@openshift.com ${DOCKER_REGISTRY}"
|
| ... | ... |
@@ -279,14 +279,14 @@ os::cmd::expect_success "docker login -u e2e-user -p ${e2e_user_token} -e e2e-us
|
| 279 | 279 |
os::cmd::expect_success "oc new-project crossmount" |
| 280 | 280 |
os::cmd::expect_success "oc create imagestream repo" |
| 281 | 281 |
|
| 282 |
-echo "[INFO] Back to 'default' project with 'admin' user..." |
|
| 282 |
+os::log::info "Back to 'default' project with 'admin' user..." |
|
| 283 | 283 |
os::cmd::expect_success "oc project ${CLUSTER_ADMIN_CONTEXT}"
|
| 284 | 284 |
os::cmd::expect_success_and_text 'oc whoami' 'system:admin' |
| 285 | 285 |
os::cmd::expect_success "oc tag --source docker centos/ruby-22-centos7:latest -n custom ruby-22-centos7:latest" |
| 286 | 286 |
os::cmd::expect_success 'oc policy add-role-to-user registry-viewer pusher -n custom' |
| 287 | 287 |
os::cmd::expect_success 'oc policy add-role-to-user system:image-pusher pusher -n crossmount' |
| 288 | 288 |
|
| 289 |
-echo "[INFO] Docker cross-repo mount" |
|
| 289 |
+os::log::info "Docker cross-repo mount" |
|
| 290 | 290 |
os::cmd::expect_success_and_text "curl -I -X HEAD -u 'pusher:${pusher_token}' '${DOCKER_REGISTRY}/v2/cache/ruby-22-centos7/blobs/$rubyimageblob'" "200 OK"
|
| 291 | 291 |
os::cmd::try_until_text "oc get -n custom is/ruby-22-centos7 -o 'jsonpath={.status.tags[*].tag}'" "latest" $((20*TIME_SEC))
|
| 292 | 292 |
os::cmd::expect_success_and_text "curl -I -X HEAD -u 'pusher:${pusher_token}' '${DOCKER_REGISTRY}/v2/custom/ruby-22-centos7/blobs/$rubyimageblob'" "200 OK"
|
| ... | ... |
@@ -303,15 +303,15 @@ os::cmd::expect_success "oc policy remove-role-from-user system:image-pusher pus |
| 303 | 303 |
os::cmd::try_until_text "oc policy can-i get imagestreams/layers -n cache '--token=${pusher_token}'" "no"
|
| 304 | 304 |
# cross-repo mount failed because of access denied |
| 305 | 305 |
os::cmd::expect_success_and_text "curl -I -X POST -u 'pusher:${pusher_token}' '${DOCKER_REGISTRY}/v2/crossmount/repo/blobs/uploads/?mount=$rubyimageblob&from=cache/ruby-22-centos7'" "202 Accepted"
|
| 306 |
-echo "[INFO] Docker cross-repo mount successful" |
|
| 306 |
+os::log::info "Docker cross-repo mount successful" |
|
| 307 | 307 |
|
| 308 | 308 |
# The build requires a dockercfg secret in the builder service account in order |
| 309 | 309 |
# to be able to push to the registry. Make sure it exists first. |
| 310 |
-echo "[INFO] Waiting for dockercfg secrets to be generated in project 'test' before building" |
|
| 310 |
+os::log::info "Waiting for dockercfg secrets to be generated in project 'test' before building" |
|
| 311 | 311 |
os::cmd::try_until_text 'oc get -n test serviceaccount/builder -o yaml' 'dockercfg' |
| 312 | 312 |
|
| 313 | 313 |
# Process template and create |
| 314 |
-echo "[INFO] Submitting application template json for processing..." |
|
| 314 |
+os::log::info "Submitting application template json for processing..." |
|
| 315 | 315 |
STI_CONFIG_FILE="${ARTIFACT_DIR}/stiAppConfig.json"
|
| 316 | 316 |
DOCKER_CONFIG_FILE="${ARTIFACT_DIR}/dockerAppConfig.json"
|
| 317 | 317 |
CUSTOM_CONFIG_FILE="${ARTIFACT_DIR}/customAppConfig.json"
|
| ... | ... |
@@ -319,12 +319,12 @@ os::cmd::expect_success "oc process -n test -f examples/sample-app/application-t |
| 319 | 319 |
os::cmd::expect_success "oc process -n docker -f examples/sample-app/application-template-dockerbuild.json > '${DOCKER_CONFIG_FILE}'"
|
| 320 | 320 |
os::cmd::expect_success "oc process -n custom -f examples/sample-app/application-template-custombuild.json > '${CUSTOM_CONFIG_FILE}'"
|
| 321 | 321 |
|
| 322 |
-echo "[INFO] Back to 'test' context with 'e2e-user' user" |
|
| 322 |
+os::log::info "Back to 'test' context with 'e2e-user' user" |
|
| 323 | 323 |
os::cmd::expect_success 'oc login -u e2e-user' |
| 324 | 324 |
os::cmd::expect_success 'oc project test' |
| 325 | 325 |
os::cmd::expect_success 'oc whoami' |
| 326 | 326 |
|
| 327 |
-echo "[INFO] Running a CLI command in a container using the service account" |
|
| 327 |
+os::log::info "Running a CLI command in a container using the service account" |
|
| 328 | 328 |
os::cmd::expect_success 'oc policy add-role-to-user view -z default' |
| 329 | 329 |
oc run cli-with-token --attach --image="openshift/origin:${TAG}" --restart=Never -- cli status --loglevel=4 > "${LOG_DIR}/cli-with-token.log" 2>&1
|
| 330 | 330 |
# TODO Switch back to using cat once https://github.com/docker/docker/pull/26718 is in our Godeps |
| ... | ... |
@@ -345,7 +345,7 @@ oc run kubectl-with-token --attach --image="openshift/origin:${TAG}" --restart=N
|
| 345 | 345 |
os::cmd::expect_success_and_text "oc logs kubectl-with-token" 'Using in-cluster configuration' |
| 346 | 346 |
os::cmd::expect_success_and_text "oc logs kubectl-with-token" 'kubectl-with-token' |
| 347 | 347 |
|
| 348 |
-echo "[INFO] Testing deployment logs and failing pre and mid hooks ..." |
|
| 348 |
+os::log::info "Testing deployment logs and failing pre and mid hooks ..." |
|
| 349 | 349 |
# test hook selectors |
| 350 | 350 |
os::cmd::expect_success "oc create -f ${OS_ROOT}/test/testdata/complete-dc-hooks.yaml"
|
| 351 | 351 |
os::cmd::try_until_text 'oc get pods -l openshift.io/deployer-pod.type=hook-pre -o jsonpath={.items[*].status.phase}' '^Succeeded$'
|
| ... | ... |
@@ -376,14 +376,14 @@ os::cmd::expect_success "oc patch dc/failing-dc-mid -p '{\"status\":{\"latestVer
|
| 376 | 376 |
os::cmd::expect_success_and_text 'oc logs --version=1 dc/failing-dc-mid' 'test mid hook executed' |
| 377 | 377 |
os::cmd::expect_success_and_text 'oc logs --previous dc/failing-dc-mid' 'test mid hook executed' |
| 378 | 378 |
|
| 379 |
-echo "[INFO] Run pod diagnostics" |
|
| 379 |
+os::log::info "Run pod diagnostics" |
|
| 380 | 380 |
# Requires a node to run the origin-deployer pod; expects registry deployed, deployer image pulled |
| 381 | 381 |
# TODO: Find out why this would flake expecting PodCheckDns to run |
| 382 | 382 |
# https://github.com/openshift/origin/issues/9888 |
| 383 | 383 |
#os::cmd::expect_success_and_text 'oadm diagnostics DiagnosticPod --images='"'""${USE_IMAGES}""'" 'Running diagnostic: PodCheckDns'
|
| 384 | 384 |
os::cmd::expect_success_and_not_text "oadm diagnostics DiagnosticPod --images='${USE_IMAGES}'" ERROR
|
| 385 | 385 |
|
| 386 |
-echo "[INFO] Applying STI application config" |
|
| 386 |
+os::log::info "Applying STI application config" |
|
| 387 | 387 |
os::cmd::expect_success "oc create -f ${STI_CONFIG_FILE}"
|
| 388 | 388 |
|
| 389 | 389 |
# Wait for build which should have triggered automatically |
| ... | ... |
@@ -408,11 +408,11 @@ os::cmd::expect_success 'oc logs buildconfigs/ruby-sample-build --loglevel=6' |
| 408 | 408 |
os::cmd::expect_success 'oc logs buildconfig/ruby-sample-build --loglevel=6' |
| 409 | 409 |
echo "logs: ok" |
| 410 | 410 |
|
| 411 |
-echo "[INFO] Starting build from ${STI_CONFIG_FILE} with non-existing commit..."
|
|
| 411 |
+os::log::info "Starting build from ${STI_CONFIG_FILE} with non-existing commit..."
|
|
| 412 | 412 |
os::cmd::expect_failure 'oc start-build test --commit=fffffff --wait' |
| 413 | 413 |
|
| 414 | 414 |
# Remote command execution |
| 415 |
-echo "[INFO] Validating exec" |
|
| 415 |
+os::log::info "Validating exec" |
|
| 416 | 416 |
frontend_pod=$(oc get pod -l deploymentconfig=frontend --template='{{(index .items 0).metadata.name}}')
|
| 417 | 417 |
# when running as a restricted pod the registry will run with a pre-allocated |
| 418 | 418 |
# user in the neighborhood of 1000000+. Look for a substring of the pre-allocated uid range |
| ... | ... |
@@ -426,53 +426,53 @@ os::cmd::expect_success_and_text "oc logs dc/frontend" 'Connecting to production |
| 426 | 426 |
os::cmd::expect_success_and_text "oc deploy frontend" 'deployed' |
| 427 | 427 |
|
| 428 | 428 |
# Port forwarding |
| 429 |
-echo "[INFO] Validating port-forward" |
|
| 429 |
+os::log::info "Validating port-forward" |
|
| 430 | 430 |
os::cmd::expect_success "oc port-forward -p ${frontend_pod} 10080:8080 &> '${LOG_DIR}/port-forward.log' &"
|
| 431 | 431 |
os::cmd::try_until_success "curl --max-time 2 --fail --silent 'http://localhost:10080'" "$((10*TIME_SEC))" |
| 432 | 432 |
|
| 433 | 433 |
# Rsync |
| 434 |
-echo "[INFO] Validating rsync" |
|
| 434 |
+os::log::info "Validating rsync" |
|
| 435 | 435 |
os::cmd::expect_success "oc rsync examples/sample-app ${frontend_pod}:/tmp"
|
| 436 | 436 |
os::cmd::expect_success_and_text "oc rsh ${frontend_pod} ls /tmp/sample-app" 'application-template-stibuild'
|
| 437 | 437 |
|
| 438 |
-#echo "[INFO] Applying Docker application config" |
|
| 438 |
+#os::log::info "Applying Docker application config" |
|
| 439 | 439 |
#oc create -n docker -f "${DOCKER_CONFIG_FILE}"
|
| 440 |
-#echo "[INFO] Invoking generic web hook to trigger new docker build using curl" |
|
| 440 |
+#os::log::info "Invoking generic web hook to trigger new docker build using curl" |
|
| 441 | 441 |
#curl -k -X POST $API_SCHEME://$API_HOST:$API_PORT/oapi/v1/namespaces/docker/buildconfigs/ruby-sample-build/webhooks/secret101/generic && sleep 3 |
| 442 | 442 |
# BUILD_ID="$( oc get builds --namespace docker -o jsonpath='{.items[0].metadata.name}' )"
|
| 443 | 443 |
# os::cmd::try_until_text "oc get builds --namespace docker -o jsonpath='{.items[0].status.phase}'" "Complete" "$(( 10*TIME_MIN ))"
|
| 444 | 444 |
# os::cmd::expect_success "oc build-logs --namespace docker '${BUILD_ID}' > '${LOG_DIR}/docker-build.log'"
|
| 445 | 445 |
#wait_for_app "docker" |
| 446 | 446 |
|
| 447 |
-#echo "[INFO] Applying Custom application config" |
|
| 447 |
+#os::log::info "Applying Custom application config" |
|
| 448 | 448 |
#oc create -n custom -f "${CUSTOM_CONFIG_FILE}"
|
| 449 |
-#echo "[INFO] Invoking generic web hook to trigger new custom build using curl" |
|
| 449 |
+#os::log::info "Invoking generic web hook to trigger new custom build using curl" |
|
| 450 | 450 |
#curl -k -X POST $API_SCHEME://$API_HOST:$API_PORT/oapi/v1/namespaces/custom/buildconfigs/ruby-sample-build/webhooks/secret101/generic && sleep 3 |
| 451 | 451 |
# BUILD_ID="$( oc get builds --namespace custom -o jsonpath='{.items[0].metadata.name}' )"
|
| 452 | 452 |
# os::cmd::try_until_text "oc get builds --namespace custom -o jsonpath='{.items[0].status.phase}'" "Complete" "$(( 10*TIME_MIN ))"
|
| 453 | 453 |
# os::cmd::expect_success "oc build-logs --namespace custom '${BUILD_ID}' > '${LOG_DIR}/custom-build.log'"
|
| 454 | 454 |
#wait_for_app "custom" |
| 455 | 455 |
|
| 456 |
-echo "[INFO] Back to 'default' project with 'admin' user..." |
|
| 456 |
+os::log::info "Back to 'default' project with 'admin' user..." |
|
| 457 | 457 |
os::cmd::expect_success "oc project ${CLUSTER_ADMIN_CONTEXT}"
|
| 458 | 458 |
|
| 459 | 459 |
# ensure the router is started |
| 460 | 460 |
# TODO: simplify when #4702 is fixed upstream |
| 461 | 461 |
os::cmd::try_until_text "oc get endpoints router --output-version=v1 --template='{{ if .subsets }}{{ len .subsets }}{{ else }}0{{ end }}'" '[1-9]+' $((5*TIME_MIN))
|
| 462 |
-echo "[INFO] Waiting for router to start..." |
|
| 462 |
+os::log::info "Waiting for router to start..." |
|
| 463 | 463 |
router_pod=$(oc get pod -n default -l deploymentconfig=router --template='{{(index .items 0).metadata.name}}')
|
| 464 | 464 |
healthz_uri="http://$(oc get pod "${router_pod}" --template='{{.status.podIP}}'):1936/healthz"
|
| 465 | 465 |
os::cmd::try_until_success "curl --max-time 2 --fail --silent '${healthz_uri}'" "$((5*TIME_MIN))"
|
| 466 | 466 |
|
| 467 | 467 |
# Check for privileged exec limitations. |
| 468 |
-echo "[INFO] Validating privileged pod exec" |
|
| 468 |
+os::log::info "Validating privileged pod exec" |
|
| 469 | 469 |
os::cmd::expect_success 'oc policy add-role-to-user admin e2e-default-admin' |
| 470 | 470 |
# system:admin should be able to exec into it |
| 471 | 471 |
os::cmd::expect_success "oc project ${CLUSTER_ADMIN_CONTEXT}"
|
| 472 | 472 |
os::cmd::expect_success "oc exec -n default -tip ${router_pod} ls"
|
| 473 | 473 |
|
| 474 | 474 |
|
| 475 |
-echo "[INFO] Validating routed app response..." |
|
| 475 |
+os::log::info "Validating routed app response..." |
|
| 476 | 476 |
# 172.17.42.1 is no longer the default ip of the docker bridge as of |
| 477 | 477 |
# docker 1.9. Since the router is using hostNetwork=true, the router |
| 478 | 478 |
# will be reachable via the ip of its pod. |
| ... | ... |
@@ -488,7 +488,7 @@ os::cmd::expect_success "oc create route edge --service=frontend --cert=${MASTER
|
| 488 | 488 |
os::cmd::try_until_text "curl -s -k --resolve 'www.example.com:443:${CONTAINER_ACCESSIBLE_API_HOST}' https://www.example.com" "Hello from OpenShift" "$((10*TIME_SEC))"
|
| 489 | 489 |
|
| 490 | 490 |
# Pod node selection |
| 491 |
-echo "[INFO] Validating pod.spec.nodeSelector rejections" |
|
| 491 |
+os::log::info "Validating pod.spec.nodeSelector rejections" |
|
| 492 | 492 |
# Create a project that enforces an impossible to satisfy nodeSelector, and two pods, one of which has an explicit node name |
| 493 | 493 |
os::cmd::expect_success "openshift admin new-project node-selector --description='This is an example project to test node selection prevents deployment' --admin='e2e-user' --node-selector='impossible-label=true'" |
| 494 | 494 |
os::cmd::expect_success "oc process -n node-selector -v NODE_NAME='$(oc get node -o jsonpath='{.items[0].metadata.name}')' -f test/testdata/node-selector/pods.json | oc create -n node-selector -f -"
|
| ... | ... |
@@ -499,7 +499,7 @@ os::cmd::try_until_text 'oc get events -n node-selector' 'pod-with-node-name.+Ma |
| 499 | 499 |
|
| 500 | 500 |
|
| 501 | 501 |
# Image pruning |
| 502 |
-echo "[INFO] Validating image pruning" |
|
| 502 |
+os::log::info "Validating image pruning" |
|
| 503 | 503 |
# builder service account should have the power to create new image streams: prune in this case |
| 504 | 504 |
os::cmd::expect_success "docker login -u e2e-user -p $(oc sa get-token builder -n cache) -e builder@openshift.com ${DOCKER_REGISTRY}"
|
| 505 | 505 |
os::cmd::expect_success 'docker pull busybox' |
| ... | ... |
@@ -536,16 +536,16 @@ os::cmd::expect_success "oc exec -p ${registry_pod} du /registry > '${LOG_DIR}/p
|
| 536 | 536 |
|
| 537 | 537 |
# make sure there were changes to the registry's storage |
| 538 | 538 |
os::cmd::expect_code "diff ${LOG_DIR}/prune-images.before.txt ${LOG_DIR}/prune-images.after.txt" 1
|
| 539 |
-echo "[INFO] Validated image pruning" |
|
| 539 |
+os::log::info "Validated image pruning" |
|
| 540 | 540 |
|
| 541 | 541 |
# with registry's re-deployment we loose all the blobs stored in its storage until now |
| 542 |
-echo "[INFO] Configure registry to accept manifest V2 schema 2" |
|
| 542 |
+os::log::info "Configure registry to accept manifest V2 schema 2" |
|
| 543 | 543 |
os::cmd::expect_success "oc project '${CLUSTER_ADMIN_CONTEXT}'"
|
| 544 | 544 |
os::cmd::expect_success 'oc env -n default dc/docker-registry REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ACCEPTSCHEMA2=true' |
| 545 | 545 |
os::cmd::expect_success 'oc rollout status dc/docker-registry' |
| 546 |
-echo "[INFO] Registry configured to accept manifest V2 schema 2" |
|
| 546 |
+os::log::info "Registry configured to accept manifest V2 schema 2" |
|
| 547 | 547 |
|
| 548 |
-echo "[INFO] Accept manifest V2 schema 2" |
|
| 548 |
+os::log::info "Accept manifest V2 schema 2" |
|
| 549 | 549 |
os::cmd::expect_success "oc login -u schema2-user -p pass" |
| 550 | 550 |
os::cmd::expect_success "oc project schema2" |
| 551 | 551 |
# tagging remote docker.io/busybox image |
| ... | ... |
@@ -554,9 +554,9 @@ os::cmd::expect_success "docker login -u e2e-user -p '${schema2_user_token}' -e
|
| 554 | 554 |
os::cmd::expect_success "docker push '${DOCKER_REGISTRY}/schema2/busybox'"
|
| 555 | 555 |
# image accepted as schema 2 |
| 556 | 556 |
os::cmd::expect_success_and_text "oc get -o jsonpath='{.image.dockerImageManifestMediaType}' istag busybox:latest" 'application/vnd\.docker\.distribution\.manifest\.v2\+json'
|
| 557 |
-echo "[INFO] Manifest V2 schema 2 successfully accepted" |
|
| 557 |
+os::log::info "Manifest V2 schema 2 successfully accepted" |
|
| 558 | 558 |
|
| 559 |
-echo "[INFO] Convert manifest V2 schema 2 to schema 1 for older client" |
|
| 559 |
+os::log::info "Convert manifest V2 schema 2 to schema 1 for older client" |
|
| 560 | 560 |
os::cmd::expect_success 'oc login -u schema2-user -p pass' |
| 561 | 561 |
os::cmd::expect_success "oc new-project schema2tagged" |
| 562 | 562 |
os::cmd::expect_success "oc tag --source=istag schema2/busybox:latest busybox:latest" |
| ... | ... |
@@ -572,14 +572,14 @@ os::cmd::expect_success_and_text "echo '${busybox_schema1_digest}'" ".+"
|
| 572 | 572 |
os::cmd::expect_success_and_text "curl -I -u 'schema2-user:${schema2_user_token}' '${DOCKER_REGISTRY}/v2/schema2tagged/busybox/manifests/${busybox_schema1_digest}'" "404 Not Found"
|
| 573 | 573 |
# ensure we can fetch it back as schema 2 |
| 574 | 574 |
os::cmd::expect_success_and_text "curl -I -u 'schema2-user:${schema2_user_token}' -H 'Accept: application/vnd.docker.distribution.manifest.v2+json' '${DOCKER_REGISTRY}/v2/schema2tagged/busybox/manifests/latest'" "Docker-Content-Digest:\s*${busybox_name}"
|
| 575 |
-echo "[INFO] Manifest V2 schema 2 successfully converted to schema 1" |
|
| 575 |
+os::log::info "Manifest V2 schema 2 successfully converted to schema 1" |
|
| 576 | 576 |
|
| 577 |
-echo "[INFO] Verify image size calculation" |
|
| 577 |
+os::log::info "Verify image size calculation" |
|
| 578 | 578 |
busybox_expected_size="$(oc get -o 'jsonpath={.dockerImageManifest}' image "${busybox_name}" --context="${CLUSTER_ADMIN_CONTEXT}" | jq -r '[.. | .size?] | add')"
|
| 579 | 579 |
busybox_calculated_size="$(oc get -o go-template='{{.dockerImageMetadata.Size}}' image "${busybox_name}" --context="${CLUSTER_ADMIN_CONTEXT}")"
|
| 580 | 580 |
os::cmd::expect_success_and_text "echo '${busybox_expected_size}:${busybox_calculated_size}'" '^[1-9][0-9]*:[1-9][0-9]*$'
|
| 581 | 581 |
os::cmd::expect_success_and_text "echo '${busybox_expected_size}'" "${busybox_calculated_size}"
|
| 582 |
-echo "[INFO] Image size matches" |
|
| 582 |
+os::log::info "Image size matches" |
|
| 583 | 583 |
|
| 584 | 584 |
os::test::junit::declare_suite_end |
| 585 | 585 |
unset VERBOSE |
| ... | ... |
@@ -11,7 +11,7 @@ function cleanup() |
| 11 | 11 |
{
|
| 12 | 12 |
out=$? |
| 13 | 13 |
kill $OS_PID |
| 14 |
- echo "[INFO] Exiting" |
|
| 14 |
+ os::log::info "Exiting" |
|
| 15 | 15 |
exit $out |
| 16 | 16 |
} |
| 17 | 17 |
|
| ... | ... |
@@ -19,15 +19,15 @@ trap "exit" INT TERM |
| 19 | 19 |
trap "cleanup" EXIT |
| 20 | 20 |
|
| 21 | 21 |
|
| 22 |
-echo "[INFO] Starting server as distinct processes" |
|
| 23 |
-echo "[INFO] `openshift version`" |
|
| 24 |
-echo "[INFO] Server logs will be at: ${LOG_DIR}/openshift.log"
|
|
| 25 |
-echo "[INFO] Test artifacts will be in: ${ARTIFACT_DIR}"
|
|
| 26 |
-echo "[INFO] Config dir is: ${SERVER_CONFIG_DIR}"
|
|
| 22 |
+os::log::info "Starting server as distinct processes" |
|
| 23 |
+os::log::info "`openshift version`" |
|
| 24 |
+os::log::info "Server logs will be at: ${LOG_DIR}/openshift.log"
|
|
| 25 |
+os::log::info "Test artifacts will be in: ${ARTIFACT_DIR}"
|
|
| 26 |
+os::log::info "Config dir is: ${SERVER_CONFIG_DIR}"
|
|
| 27 | 27 |
|
| 28 | 28 |
mkdir -p ${LOG_DIR}
|
| 29 | 29 |
|
| 30 |
-echo "[INFO] Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 30 |
+os::log::info "Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 31 | 31 |
ps -ef | grep openshift |
| 32 | 32 |
|
| 33 | 33 |
mkdir -p "${SERVER_CONFIG_DIR}"
|
| ... | ... |
@@ -14,7 +14,7 @@ function cleanup() |
| 14 | 14 |
out=$? |
| 15 | 15 |
pgrep -f "openshift" | xargs -r sudo kill |
| 16 | 16 |
cleanup_openshift |
| 17 |
- echo "[INFO] Exiting" |
|
| 17 |
+ os::log::info "Exiting" |
|
| 18 | 18 |
exit $out |
| 19 | 19 |
} |
| 20 | 20 |
|
| ... | ... |
@@ -22,30 +22,30 @@ trap "exit" INT TERM |
| 22 | 22 |
trap "cleanup" EXIT |
| 23 | 23 |
|
| 24 | 24 |
|
| 25 |
-echo "[INFO] Starting server as distinct processes" |
|
| 25 |
+os::log::info "Starting server as distinct processes" |
|
| 26 | 26 |
ensure_iptables_or_die |
| 27 | 27 |
os::start::configure_server |
| 28 | 28 |
|
| 29 |
-echo "[INFO] `openshift version`" |
|
| 30 |
-echo "[INFO] Server logs will be at: ${LOG_DIR}/openshift.log"
|
|
| 31 |
-echo "[INFO] Test artifacts will be in: ${ARTIFACT_DIR}"
|
|
| 32 |
-echo "[INFO] Volumes dir is: ${VOLUME_DIR}"
|
|
| 33 |
-echo "[INFO] Config dir is: ${SERVER_CONFIG_DIR}"
|
|
| 34 |
-echo "[INFO] Using images: ${USE_IMAGES}"
|
|
| 35 |
-echo "[INFO] MasterIP is: ${MASTER_ADDR}"
|
|
| 29 |
+os::log::info "`openshift version`" |
|
| 30 |
+os::log::info "Server logs will be at: ${LOG_DIR}/openshift.log"
|
|
| 31 |
+os::log::info "Test artifacts will be in: ${ARTIFACT_DIR}"
|
|
| 32 |
+os::log::info "Volumes dir is: ${VOLUME_DIR}"
|
|
| 33 |
+os::log::info "Config dir is: ${SERVER_CONFIG_DIR}"
|
|
| 34 |
+os::log::info "Using images: ${USE_IMAGES}"
|
|
| 35 |
+os::log::info "MasterIP is: ${MASTER_ADDR}"
|
|
| 36 | 36 |
|
| 37 | 37 |
mkdir -p ${LOG_DIR}
|
| 38 | 38 |
|
| 39 |
-echo "[INFO] Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 39 |
+os::log::info "Scan of OpenShift related processes already up via ps -ef | grep openshift : " |
|
| 40 | 40 |
ps -ef | grep openshift |
| 41 | 41 |
|
| 42 |
-echo "[INFO] Starting etcdserver" |
|
| 42 |
+os::log::info "Starting etcdserver" |
|
| 43 | 43 |
sudo env "PATH=${PATH}" OPENSHIFT_ON_PANIC=crash openshift start etcd \
|
| 44 | 44 |
--config=${MASTER_CONFIG_DIR}/master-config.yaml \
|
| 45 | 45 |
--loglevel=4 \ |
| 46 | 46 |
&>"${LOG_DIR}/os-etcdserver.log" &
|
| 47 | 47 |
|
| 48 |
-echo "[INFO] Starting api server" |
|
| 48 |
+os::log::info "Starting api server" |
|
| 49 | 49 |
sudo env "PATH=${PATH}" OPENSHIFT_PROFILE=web OPENSHIFT_ON_PANIC=crash openshift start master api \
|
| 50 | 50 |
--config=${MASTER_CONFIG_DIR}/master-config.yaml \
|
| 51 | 51 |
--loglevel=4 \ |
| ... | ... |
@@ -53,11 +53,11 @@ sudo env "PATH=${PATH}" OPENSHIFT_PROFILE=web OPENSHIFT_ON_PANIC=crash openshift
|
| 53 | 53 |
|
| 54 | 54 |
os::cmd::try_until_text "oc get --raw /healthz --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' $(( 80 * second )) 0.25
|
| 55 | 55 |
os::cmd::try_until_text "oc get --raw /healthz/ready --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' $(( 80 * second )) 0.25
|
| 56 |
-echo "[INFO] OpenShift API server up at: " |
|
| 56 |
+os::log::info "OpenShift API server up at: " |
|
| 57 | 57 |
date |
| 58 | 58 |
|
| 59 | 59 |
# test alternate node level launches |
| 60 |
-echo "[INFO] Testing alternate node configurations" |
|
| 60 |
+os::log::info "Testing alternate node configurations" |
|
| 61 | 61 |
|
| 62 | 62 |
# proxy only |
| 63 | 63 |
sudo env "PATH=${PATH}" TEST_CALL=1 OPENSHIFT_ON_PANIC=crash openshift start network --enable=proxy \
|
| ... | ... |
@@ -111,25 +111,25 @@ os::cmd::expect_success_and_not_text 'cat ${LOG_DIR}/os-node-3.log' 'Starting no
|
| 111 | 111 |
os::cmd::expect_success_and_not_text 'cat ${LOG_DIR}/os-node-3.log' 'Started Kubernetes Proxy on'
|
| 112 | 112 |
|
| 113 | 113 |
|
| 114 |
-echo "[INFO] Starting controllers" |
|
| 114 |
+os::log::info "Starting controllers" |
|
| 115 | 115 |
sudo env "PATH=${PATH}" OPENSHIFT_ON_PANIC=crash openshift start master controllers \
|
| 116 | 116 |
--config=${MASTER_CONFIG_DIR}/master-config.yaml \
|
| 117 | 117 |
--loglevel=4 \ |
| 118 | 118 |
&>"${LOG_DIR}/os-controllers.log" &
|
| 119 | 119 |
|
| 120 |
-echo "[INFO] Starting node" |
|
| 120 |
+os::log::info "Starting node" |
|
| 121 | 121 |
sudo env "PATH=${PATH}" OPENSHIFT_ON_PANIC=crash openshift start node \
|
| 122 | 122 |
--config=${NODE_CONFIG_DIR}/node-config.yaml \
|
| 123 | 123 |
--loglevel=4 \ |
| 124 | 124 |
&>"${LOG_DIR}/os-node.log" &
|
| 125 | 125 |
export OS_PID=$! |
| 126 | 126 |
|
| 127 |
-echo "[INFO] OpenShift server start at: " |
|
| 127 |
+os::log::info "OpenShift server start at: " |
|
| 128 | 128 |
date |
| 129 | 129 |
|
| 130 | 130 |
os::cmd::try_until_text "oc get --raw ${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' minute 0.5
|
| 131 | 131 |
os::cmd::try_until_success "oc get --raw /api/v1/nodes/${KUBELET_HOST} --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" $(( 80 * second )) 0.25
|
| 132 |
-echo "[INFO] OpenShift node health checks done at: " |
|
| 132 |
+os::log::info "OpenShift node health checks done at: " |
|
| 133 | 133 |
date |
| 134 | 134 |
|
| 135 | 135 |
# set our default KUBECONFIG location |
| ... | ... |
@@ -14,14 +14,14 @@ function cleanup() |
| 14 | 14 |
out=$? |
| 15 | 15 |
docker rmi test/scratchimage |
| 16 | 16 |
cleanup_openshift |
| 17 |
- echo "[INFO] Exiting" |
|
| 17 |
+ os::log::info "Exiting" |
|
| 18 | 18 |
return "${out}"
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
trap "exit" INT TERM |
| 22 | 22 |
trap "cleanup" EXIT |
| 23 | 23 |
|
| 24 |
-echo "[INFO] Starting server" |
|
| 24 |
+os::log::info "Starting server" |
|
| 25 | 25 |
|
| 26 | 26 |
os::util::environment::use_sudo |
| 27 | 27 |
os::util::environment::setup_all_server_vars "test-extended/cmd/" |
| ... | ... |
@@ -44,7 +44,7 @@ docker_registry="$( oc get service/docker-registry -n default -o jsonpath='{.spe
|
| 44 | 44 |
os::test::junit::declare_suite_start "extended/cmd" |
| 45 | 45 |
|
| 46 | 46 |
os::test::junit::declare_suite_start "extended/cmd/new-app" |
| 47 |
-echo "[INFO] Running newapp extended tests" |
|
| 47 |
+os::log::info "Running newapp extended tests" |
|
| 48 | 48 |
oc login "${MASTER_ADDR}" -u new-app -p password --certificate-authority="${MASTER_CONFIG_DIR}/ca.crt"
|
| 49 | 49 |
oc new-project new-app |
| 50 | 50 |
oc delete all --all |
| ... | ... |
@@ -73,11 +73,11 @@ VERBOSE=true os::cmd::expect_success "oc project new-app" |
| 73 | 73 |
os::cmd::expect_failure_and_text "oc new-app test/scratchimage2 -o yaml" "partial match" |
| 74 | 74 |
# success with exact match |
| 75 | 75 |
os::cmd::expect_success "oc new-app test/scratchimage" |
| 76 |
-echo "[INFO] newapp: ok" |
|
| 76 |
+os::log::info "newapp: ok" |
|
| 77 | 77 |
os::test::junit::declare_suite_end |
| 78 | 78 |
|
| 79 | 79 |
os::test::junit::declare_suite_start "extended/cmd/variable-expansion" |
| 80 |
-echo "[INFO] Running env variable expansion tests" |
|
| 80 |
+os::log::info "Running env variable expansion tests" |
|
| 81 | 81 |
VERBOSE=true os::cmd::expect_success "oc new-project envtest" |
| 82 | 82 |
os::cmd::expect_success "oc create -f test/extended/testdata/test-env-pod.json" |
| 83 | 83 |
os::cmd::try_until_text "oc get pods" "Running" |
| ... | ... |
@@ -86,11 +86,11 @@ os::cmd::expect_success_and_text "oc exec test-pod env" "podname_composed=test-p |
| 86 | 86 |
os::cmd::expect_success_and_text "oc exec test-pod env" "var1=value1" |
| 87 | 87 |
os::cmd::expect_success_and_text "oc exec test-pod env" "var2=value1" |
| 88 | 88 |
os::cmd::expect_success_and_text "oc exec test-pod ps ax" "sleep 120" |
| 89 |
-echo "[INFO] variable-expansion: ok" |
|
| 89 |
+os::log::info "variable-expansion: ok" |
|
| 90 | 90 |
os::test::junit::declare_suite_end |
| 91 | 91 |
|
| 92 | 92 |
os::test::junit::declare_suite_start "extended/cmd/image-pull-secrets" |
| 93 |
-echo "[INFO] Running image pull secrets tests" |
|
| 93 |
+os::log::info "Running image pull secrets tests" |
|
| 94 | 94 |
VERBOSE=true os::cmd::expect_success "oc login '${MASTER_ADDR}' -u pull-secrets-user -p password --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt'"
|
| 95 | 95 |
|
| 96 | 96 |
# create a new project and push a busybox image in there |
| ... | ... |
@@ -18,5 +18,5 @@ os::test::extended::setup |
| 18 | 18 |
os::test::extended::focus "$@" |
| 19 | 19 |
|
| 20 | 20 |
|
| 21 |
-echo "[INFO] Running compatibility tests" |
|
| 21 |
+os::log::info "Running compatibility tests" |
|
| 22 | 22 |
FOCUS="\[Compatibility\]" SKIP="${SKIP_TESTS:-}" TEST_REPORT_FILE_NAME=compatibility os::test::extended::run -- -ginkgo.v -test.timeout 2h
|
| ... | ... |
@@ -20,7 +20,7 @@ sf=$(join '|' "${serial_only[@]}")
|
| 20 | 20 |
ss=$(join '|' "${serial_exclude[@]}")
|
| 21 | 21 |
|
| 22 | 22 |
|
| 23 |
-echo "[INFO] Running the following tests:" |
|
| 23 |
+os::log::info "Running the following tests:" |
|
| 24 | 24 |
TEST_REPORT_DIR= TEST_OUTPUT_QUIET=true ${EXTENDEDTEST} "--ginkgo.focus=${pf}" "--ginkgo.skip=${ps}" --ginkgo.dryRun --ginkgo.noColor | grep ok | grep -v skip | cut -c 20- | sort
|
| 25 | 25 |
TEST_REPORT_DIR= TEST_OUTPUT_QUIET=true ${EXTENDEDTEST} "--ginkgo.focus=${sf}" "--ginkgo.skip=${ss}" --ginkgo.dryRun --ginkgo.noColor | grep ok | grep -v skip | cut -c 20- | sort
|
| 26 | 26 |
echo |
| ... | ... |
@@ -29,11 +29,11 @@ exitstatus=0 |
| 29 | 29 |
|
| 30 | 30 |
# run parallel tests |
| 31 | 31 |
nodes="${PARALLEL_NODES:-5}"
|
| 32 |
-echo "[INFO] Running parallel tests N=${nodes}"
|
|
| 32 |
+os::log::info "Running parallel tests N=${nodes}"
|
|
| 33 | 33 |
TEST_REPORT_FILE_NAME=conformance_parallel ${GINKGO} -v "-focus=${pf}" "-skip=${ps}" -p -nodes "${nodes}" ${EXTENDEDTEST} -- -ginkgo.v -test.timeout 6h || exitstatus=$?
|
| 34 | 34 |
|
| 35 | 35 |
# run tests in serial |
| 36 |
-echo "[INFO] Running serial tests" |
|
| 36 |
+os::log::info "Running serial tests" |
|
| 37 | 37 |
TEST_REPORT_FILE_NAME=conformance_serial ${GINKGO} -v "-focus=${sf}" "-skip=${ss}" ${EXTENDEDTEST} -- -ginkgo.v -test.timeout 2h || exitstatus=$?
|
| 38 | 38 |
|
| 39 | 39 |
exit $exitstatus |
| ... | ... |
@@ -12,14 +12,14 @@ function cleanup() |
| 12 | 12 |
{
|
| 13 | 13 |
out=$? |
| 14 | 14 |
cleanup_openshift |
| 15 |
- echo "[INFO] Exiting" |
|
| 15 |
+ os::log::info "Exiting" |
|
| 16 | 16 |
return $out |
| 17 | 17 |
} |
| 18 | 18 |
|
| 19 | 19 |
trap "exit" INT TERM |
| 20 | 20 |
trap "cleanup" EXIT |
| 21 | 21 |
|
| 22 |
-echo "[INFO] Starting server" |
|
| 22 |
+os::log::info "Starting server" |
|
| 23 | 23 |
|
| 24 | 24 |
ensure_iptables_or_die |
| 25 | 25 |
os::util::environment::use_sudo |
| ... | ... |
@@ -94,13 +94,13 @@ function compare_and_cleanup() {
|
| 94 | 94 |
|
| 95 | 95 |
oc login -u system:admin -n default |
| 96 | 96 |
|
| 97 |
-echo "[INFO] Running extended tests" |
|
| 97 |
+os::log::info "Running extended tests" |
|
| 98 | 98 |
|
| 99 | 99 |
schema=('rfc2307' 'ad' 'augmented-ad')
|
| 100 | 100 |
|
| 101 | 101 |
for (( i=0; i<${#schema[@]}; i++ )); do
|
| 102 | 102 |
current_schema=${schema[$i]}
|
| 103 |
- echo "[INFO] Testing schema: ${current_schema}"
|
|
| 103 |
+ os::log::info "Testing schema: ${current_schema}"
|
|
| 104 | 104 |
|
| 105 | 105 |
WORKINGDIR=${BASETMPDIR}/${current_schema}
|
| 106 | 106 |
mkdir ${WORKINGDIR}
|