Browse code

Remove prompts from commented examples

Following the upstream style changes from Kubernetes.

Rodolfo Carvalho authored on 2016/04/25 22:57:44
Showing 80 changed files
... ...
@@ -57,10 +57,10 @@ const (
57 57
 for my command.`
58 58
 
59 59
   mineExample = `  # Run my command's first action
60
-  $ %[1]s first
60
+  %[1]s first
61 61
 
62 62
   # Run my command's second action on latest stuff
63
-  $ %[1]s second --latest`
63
+  %[1]s second --latest`
64 64
 )
65 65
 
66 66
 // NewCmdMine implement the OpenShift cli mine command.
... ...
@@ -137,10 +137,10 @@ Example:
137 137
 [source,go,numbered,options="nowrap"]
138 138
 ----
139 139
   deployExample = `  # Display the latest deployment for the 'database' deployment config
140
-  $ %[1]s database
140
+  %[1]s database
141 141
 
142 142
   # Start a new deployment based on the 'database' deployment config
143
-  $ %[1]s database --latest`
143
+  %[1]s database --latest`
144 144
 ----
145 145
 ====
146 146
 
... ...
@@ -244,10 +244,10 @@ for the deploy command.`
244 244
 
245 245
   // 3.
246 246
   deployExample = `  # Display the latest deployment for the 'database' DeploymentConfig
247
-  $ %[1]s database
247
+  %[1]s database
248 248
 
249 249
   # Start a new deployment based on the 'database' DeploymentConfig
250
-  $ %[1]s database --latest`
250
+  %[1]s database --latest`
251 251
 )
252 252
 
253 253
 // 4.
... ...
@@ -12,13 +12,13 @@ Output the inputs and dependencies of your builds
12 12
 [options="nowrap"]
13 13
 ----
14 14
   # Build the dependency tree for the 'latest' tag in <image-stream>
15
-  $ oadm build-chain <image-stream>
15
+  oadm build-chain <image-stream>
16 16
 
17 17
   # Build the dependency tree for 'v2' tag in dot format and visualize it via the dot utility
18
-  $ oadm build-chain <image-stream>:v2 -o dot | dot -T svg -o deps.svg
18
+  oadm build-chain <image-stream>:v2 -o dot | dot -T svg -o deps.svg
19 19
 
20 20
   # Build the dependency tree across all namespaces for the specified image stream tag found in 'test' namespace
21
-  $ oadm build-chain <image-stream> -n test --all
21
+  oadm build-chain <image-stream> -n test --all
22 22
 ----
23 23
 ====
24 24
 
... ...
@@ -31,10 +31,10 @@ Decrypt data encrypted with "oadm ca encrypt"
31 31
 [options="nowrap"]
32 32
 ----
33 33
 	# Decrypt an encrypted file to a cleartext file:
34
-	$ oadm ca decrypt --key=secret.key --in=secret.encrypted --out=secret.decrypted
34
+	oadm ca decrypt --key=secret.key --in=secret.encrypted --out=secret.decrypted
35 35
 	
36 36
 	# Decrypt from stdin to stdout:
37
-	$ oadm ca decrypt --key=secret.key < secret2.encrypted > secret2.decrypted
37
+	oadm ca decrypt --key=secret.key < secret2.encrypted > secret2.decrypted
38 38
 
39 39
 ----
40 40
 ====
... ...
@@ -48,10 +48,10 @@ Encrypt data with AES-256-CBC encryption
48 48
 [options="nowrap"]
49 49
 ----
50 50
 	# Encrypt the content of secret.txt with a generated key:
51
-	$ oadm ca encrypt --genkey=secret.key --in=secret.txt --out=secret.encrypted
51
+	oadm ca encrypt --genkey=secret.key --in=secret.txt --out=secret.encrypted
52 52
 	
53 53
 	# Encrypt the content of secret2.txt with an existing key:
54
-	$ oadm ca encrypt --key=secret.key < secret2.txt > secret2.encrypted
54
+	oadm ca encrypt --key=secret.key < secret2.txt > secret2.encrypted
55 55
 
56 56
 ----
57 57
 ====
... ...
@@ -162,7 +162,7 @@ Add users to a group
162 162
 [options="nowrap"]
163 163
 ----
164 164
   # Add user1 and user2 to my-group
165
-  $ oadm groups add-users my-group user1 user2
165
+  oadm groups add-users my-group user1 user2
166 166
 ----
167 167
 ====
168 168
 
... ...
@@ -175,10 +175,10 @@ Create a new group
175 175
 [options="nowrap"]
176 176
 ----
177 177
   # Add a group with no users
178
-  $ oadm groups new my-group
178
+  oadm groups new my-group
179 179
 
180 180
   # Add a group with two users
181
-  $ oadm groups new my-group user1 user2
181
+  oadm groups new my-group user1 user2
182 182
 ----
183 183
 ====
184 184
 
... ...
@@ -191,16 +191,16 @@ Prune OpenShift groups referencing missing records on an external provider.
191 191
 [options="nowrap"]
192 192
 ----
193 193
   # Prune all orphaned groups
194
-  $ oadm groups prune --sync-config=/path/to/ldap-sync-config.yaml --confirm
194
+  oadm groups prune --sync-config=/path/to/ldap-sync-config.yaml --confirm
195 195
 
196 196
   # Prune all orphaned groups except the ones from the blacklist file
197
-  $ oadm groups prune --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
197
+  oadm groups prune --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
198 198
 
199 199
   # Prune all orphaned groups from a list of specific groups specified in a whitelist file
200
-  $ oadm groups prune --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
200
+  oadm groups prune --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
201 201
 
202 202
   # Prune all orphaned groups from a list of specific groups specified in a whitelist
203
-  $ oadm groups prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
203
+  oadm groups prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
204 204
 
205 205
 ----
206 206
 ====
... ...
@@ -214,7 +214,7 @@ Remove users from a group
214 214
 [options="nowrap"]
215 215
 ----
216 216
   # Remove user1 and user2 from my-group
217
-  $ oadm groups remove-users my-group user1 user2
217
+  oadm groups remove-users my-group user1 user2
218 218
 ----
219 219
 ====
220 220
 
... ...
@@ -227,19 +227,19 @@ Sync OpenShift groups with records from an external provider.
227 227
 [options="nowrap"]
228 228
 ----
229 229
   # Sync all groups from an LDAP server
230
-  $ oadm groups sync --sync-config=/path/to/ldap-sync-config.yaml --confirm
230
+  oadm groups sync --sync-config=/path/to/ldap-sync-config.yaml --confirm
231 231
 
232 232
   # Sync all groups except the ones from the blacklist file from an LDAP server
233
-  $ oadm groups sync --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
233
+  oadm groups sync --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
234 234
 
235 235
   # Sync specific groups specified in a whitelist file with an LDAP server
236
-  $ oadm groups sync --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm
236
+  oadm groups sync --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm
237 237
 
238 238
   # Sync all OpenShift Groups that have been synced previously with an LDAP server
239
-  $ oadm groups sync --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm
239
+  oadm groups sync --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm
240 240
 
241 241
   # Sync specific OpenShift Groups if they have been synced previously with an LDAP server
242
-  $ oadm groups sync groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm
242
+  oadm groups sync groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm
243 243
 
244 244
 ----
245 245
 ====
... ...
@@ -253,21 +253,21 @@ Install an IP failover group to a set of nodes
253 253
 [options="nowrap"]
254 254
 ----
255 255
   # Check the default IP failover configuration ("ipfailover"):
256
-  $ oadm ipfailover
256
+  oadm ipfailover
257 257
 
258 258
   # See what the IP failover configuration would look like if it is created:
259
-  $ oadm ipfailover -o json
259
+  oadm ipfailover -o json
260 260
 
261 261
   # Create an IP failover configuration if it does not already exist:
262
-  $ oadm ipfailover ipf --virtual-ips="10.1.1.1-4" --create
262
+  oadm ipfailover ipf --virtual-ips="10.1.1.1-4" --create
263 263
 
264 264
   # Create an IP failover configuration on a selection of nodes labeled
265 265
   # "router=us-west-ha" (on 4 nodes with 7 virtual IPs monitoring a service
266 266
   # listening on port 80, such as the router process).
267
-  $ oadm ipfailover ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
267
+  oadm ipfailover ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
268 268
 
269 269
   # Use a different IP failover config image and see the configuration:
270
-  $ oadm ipfailover ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag
270
+  oadm ipfailover ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag
271 271
 ----
272 272
 ====
273 273
 
... ...
@@ -280,19 +280,19 @@ Manage nodes - list pods, evacuate, or mark ready
280 280
 [options="nowrap"]
281 281
 ----
282 282
 	# Block accepting any pods on given nodes
283
-	$ oadm manage-node <mynode> --schedulable=false
283
+	oadm manage-node <mynode> --schedulable=false
284 284
 
285 285
 	# Mark selected nodes as schedulable
286
-	$ oadm manage-node --selector="<env=dev>" --schedulable=true
286
+	oadm manage-node --selector="<env=dev>" --schedulable=true
287 287
 
288 288
 	# Migrate selected pods
289
-	$ oadm manage-node <mynode> --evacuate --pod-selector="<service=myapp>"
289
+	oadm manage-node <mynode> --evacuate --pod-selector="<service=myapp>"
290 290
 
291 291
 	# Show pods that will be migrated
292
-	$ oadm manage-node <mynode> --evacuate --dry-run --pod-selector="<service=myapp>"
292
+	oadm manage-node <mynode> --evacuate --dry-run --pod-selector="<service=myapp>"
293 293
 
294 294
 	# List all pods on given nodes
295
-	$ oadm manage-node <mynode1> <mynode2> --list-pods
295
+	oadm manage-node <mynode1> <mynode2> --list-pods
296 296
 ----
297 297
 ====
298 298
 
... ...
@@ -337,10 +337,10 @@ Add users or serviceaccounts to a role in the current project
337 337
 [options="nowrap"]
338 338
 ----
339 339
   # Add the 'view' role to user1 in the current project
340
-  $ oadm policy add-role-to-user view user1
340
+  oadm policy add-role-to-user view user1
341 341
 
342 342
   # Add the 'edit' role to serviceaccount1 in the current project
343
-  $ oadm policy add-role-to-user edit -z serviceaccount1
343
+  oadm policy add-role-to-user edit -z serviceaccount1
344 344
 ----
345 345
 ====
346 346
 
... ...
@@ -353,10 +353,10 @@ Add users or serviceaccount to a security context constraint
353 353
 [options="nowrap"]
354 354
 ----
355 355
   # Add the 'restricted' security context contraint to user1 and user2
356
-  $ oadm policy add-scc-to-user restricted user1 user2
356
+  oadm policy add-scc-to-user restricted user1 user2
357 357
 
358 358
   # Add the 'privileged' security context contraint to the service account serviceaccount1 in the current namespace
359
-  $ oadm policy add-scc-to-user privileged -z serviceaccount1
359
+  oadm policy add-scc-to-user privileged -z serviceaccount1
360 360
 ----
361 361
 ====
362 362
 
... ...
@@ -369,19 +369,19 @@ Update cluster role bindings to match the recommended bootstrap policy
369 369
 [options="nowrap"]
370 370
 ----
371 371
   # Display the names of cluster role bindings that would be modified
372
-  $ oadm policy reconcile-cluster-role-bindings -o name
372
+  oadm policy reconcile-cluster-role-bindings -o name
373 373
 
374 374
   # Display the cluster role bindings that would be modified, removing any extra subjects
375
-  $ oadm policy reconcile-cluster-role-bindings --additive-only=false
375
+  oadm policy reconcile-cluster-role-bindings --additive-only=false
376 376
 
377 377
   # Update cluster role bindings that don't match the current defaults
378
-  $ oadm policy reconcile-cluster-role-bindings --confirm
378
+  oadm policy reconcile-cluster-role-bindings --confirm
379 379
 
380 380
   # Update cluster role bindings that don't match the current defaults, avoid adding roles to the system:authenticated group
381
-  $ oadm policy reconcile-cluster-role-bindings --confirm --exclude-groups=system:authenticated
381
+  oadm policy reconcile-cluster-role-bindings --confirm --exclude-groups=system:authenticated
382 382
 
383 383
   # Update cluster role bindings that don't match the current defaults, removing any extra subjects from the binding
384
-  $ oadm policy reconcile-cluster-role-bindings --confirm --additive-only=false
384
+  oadm policy reconcile-cluster-role-bindings --confirm --additive-only=false
385 385
 ----
386 386
 ====
387 387
 
... ...
@@ -394,17 +394,17 @@ Update cluster roles to match the recommended bootstrap policy
394 394
 [options="nowrap"]
395 395
 ----
396 396
   # Display the names of cluster roles that would be modified
397
-  $ oadm policy reconcile-cluster-roles -o name
397
+  oadm policy reconcile-cluster-roles -o name
398 398
 
399 399
   # Add missing permissions to cluster roles that don't match the current defaults
400
-  $ oadm policy reconcile-cluster-roles --confirm
400
+  oadm policy reconcile-cluster-roles --confirm
401 401
 
402
-  # Add missing permissions and remove extra permissions from 
402
+  # Add missing permissions and remove extra permissions from
403 403
   # cluster roles that don't match the current defaults
404
-  $ oadm policy reconcile-cluster-roles --additive-only=false --confirm
404
+  oadm policy reconcile-cluster-roles --additive-only=false --confirm
405 405
 
406 406
   # Display the union of the default and modified cluster roles
407
-  $ oadm policy reconcile-cluster-roles --additive-only
407
+  oadm policy reconcile-cluster-roles --additive-only
408 408
 ----
409 409
 ====
410 410
 
... ...
@@ -417,14 +417,14 @@ Replace cluster SCCs to match the recommended bootstrap policy
417 417
 [options="nowrap"]
418 418
 ----
419 419
   # Display the cluster SCCs that would be modified
420
-  $ oadm policy reconcile-sccs
420
+  oadm policy reconcile-sccs
421 421
 
422 422
   # Update cluster SCCs that don't match the current defaults preserving additional grants
423 423
   # for users and group and keeping any priorities that are already set
424
-  $ oadm policy reconcile-sccs --confirm
424
+  oadm policy reconcile-sccs --confirm
425 425
 
426 426
   # Replace existing users, groups, and priorities that do not match defaults
427
-  $ oadm policy reconcile-sccs --additive-only=false --confirm
427
+  oadm policy reconcile-sccs --additive-only=false --confirm
428 428
 ----
429 429
 ====
430 430
 
... ...
@@ -438,10 +438,10 @@ Remove old completed and failed builds
438 438
 ----
439 439
   # Dry run deleting older completed and failed builds and also including
440 440
   # all builds whose associated BuildConfig no longer exists
441
-  $ oadm prune builds --orphans
441
+  oadm prune builds --orphans
442 442
 
443 443
   # To actually perform the prune operation, the confirm flag must be appended
444
-  $ oadm prune builds --orphans --confirm
444
+  oadm prune builds --orphans --confirm
445 445
 ----
446 446
 ====
447 447
 
... ...
@@ -454,10 +454,10 @@ Remove old completed and failed deployments
454 454
 [options="nowrap"]
455 455
 ----
456 456
   # Dry run deleting all but the last complete deployment for every deployment config
457
-  $ oadm prune deployments --keep-complete=1
457
+  oadm prune deployments --keep-complete=1
458 458
 
459 459
   # To actually perform the prune operation, the confirm flag must be appended
460
-  $ oadm prune deployments --keep-complete=1 --confirm
460
+  oadm prune deployments --keep-complete=1 --confirm
461 461
 ----
462 462
 ====
463 463
 
... ...
@@ -470,16 +470,16 @@ Prune OpenShift groups referencing missing records on an external provider.
470 470
 [options="nowrap"]
471 471
 ----
472 472
   # Prune all orphaned groups
473
-  $ oadm prune groups --sync-config=/path/to/ldap-sync-config.yaml --confirm
473
+  oadm prune groups --sync-config=/path/to/ldap-sync-config.yaml --confirm
474 474
 
475 475
   # Prune all orphaned groups except the ones from the blacklist file
476
-  $ oadm prune groups --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
476
+  oadm prune groups --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
477 477
 
478 478
   # Prune all orphaned groups from a list of specific groups specified in a whitelist file
479
-  $ oadm prune groups --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
479
+  oadm prune groups --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
480 480
 
481 481
   # Prune all orphaned groups from a list of specific groups specified in a whitelist
482
-  $ oadm prune groups groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
482
+  oadm prune groups groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
483 483
 
484 484
 ----
485 485
 ====
... ...
@@ -494,10 +494,10 @@ Remove unreferenced images
494 494
 ----
495 495
   # See, what the prune command would delete if only images more than an hour old and obsoleted
496 496
   # by 3 newer revisions under the same tag were considered.
497
-  $ oadm prune images --keep-tag-revisions=3 --keep-younger-than=60m
497
+  oadm prune images --keep-tag-revisions=3 --keep-younger-than=60m
498 498
 
499 499
   # To actually perform the prune operation, the confirm flag must be appended
500
-  $ oadm prune images --keep-tag-revisions=3 --keep-younger-than=60m --confirm
500
+  oadm prune images --keep-tag-revisions=3 --keep-younger-than=60m --confirm
501 501
 ----
502 502
 ====
503 503
 
... ...
@@ -510,16 +510,16 @@ Install the integrated Docker registry
510 510
 [options="nowrap"]
511 511
 ----
512 512
   # Check if default Docker registry ("docker-registry") has been created
513
-  $ oadm registry --dry-run
513
+  oadm registry --dry-run
514 514
 
515 515
   # See what the registry will look like if created
516
-  $ oadm registry -o yaml
516
+  oadm registry -o yaml
517 517
 
518 518
   # Create a registry with two replicas if it does not exist
519
-  $ oadm registry --replicas=2
519
+  oadm registry --replicas=2
520 520
 
521 521
   # Use a different registry image
522
-  $ oadm registry --images=myrepo/docker-registry:mytag
522
+  oadm registry --images=myrepo/docker-registry:mytag
523 523
 ----
524 524
 ====
525 525
 
... ...
@@ -532,19 +532,19 @@ Install a router
532 532
 [options="nowrap"]
533 533
 ----
534 534
   # Check the default router ("router")
535
-  $ oadm router --dry-run
535
+  oadm router --dry-run
536 536
 
537 537
   # See what the router would look like if created
538
-  $ oadm router -o yaml
538
+  oadm router -o yaml
539 539
 
540 540
   # Create a router with two replicas if it does not exist
541
-  $ oadm router router-west --replicas=2
541
+  oadm router router-west --replicas=2
542 542
 
543 543
   # Use a different router image
544
-  $ oadm router region-west --images=myrepo/somerouter:mytag
544
+  oadm router region-west --images=myrepo/somerouter:mytag
545 545
 
546 546
   # Run the router with a hint to the underlying implementation to _not_ expose statistics.
547
-  $ oadm router router-west --stats-port=0
547
+  oadm router router-west --stats-port=0
548 548
   
549 549
 ----
550 550
 ====
... ...
@@ -12,13 +12,13 @@ Output the inputs and dependencies of your builds
12 12
 [options="nowrap"]
13 13
 ----
14 14
   # Build the dependency tree for the 'latest' tag in <image-stream>
15
-  $ oc adm build-chain <image-stream>
15
+  oc adm build-chain <image-stream>
16 16
 
17 17
   # Build the dependency tree for 'v2' tag in dot format and visualize it via the dot utility
18
-  $ oc adm build-chain <image-stream>:v2 -o dot | dot -T svg -o deps.svg
18
+  oc adm build-chain <image-stream>:v2 -o dot | dot -T svg -o deps.svg
19 19
 
20 20
   # Build the dependency tree across all namespaces for the specified image stream tag found in 'test' namespace
21
-  $ oc adm build-chain <image-stream> -n test --all
21
+  oc adm build-chain <image-stream> -n test --all
22 22
 ----
23 23
 ====
24 24
 
... ...
@@ -31,10 +31,10 @@ Decrypt data encrypted with "oc adm ca encrypt"
31 31
 [options="nowrap"]
32 32
 ----
33 33
 	# Decrypt an encrypted file to a cleartext file:
34
-	$ oc adm ca decrypt --key=secret.key --in=secret.encrypted --out=secret.decrypted
34
+	oc adm ca decrypt --key=secret.key --in=secret.encrypted --out=secret.decrypted
35 35
 	
36 36
 	# Decrypt from stdin to stdout:
37
-	$ oc adm ca decrypt --key=secret.key < secret2.encrypted > secret2.decrypted
37
+	oc adm ca decrypt --key=secret.key < secret2.encrypted > secret2.decrypted
38 38
 
39 39
 ----
40 40
 ====
... ...
@@ -48,10 +48,10 @@ Encrypt data with AES-256-CBC encryption
48 48
 [options="nowrap"]
49 49
 ----
50 50
 	# Encrypt the content of secret.txt with a generated key:
51
-	$ oc adm ca encrypt --genkey=secret.key --in=secret.txt --out=secret.encrypted
51
+	oc adm ca encrypt --genkey=secret.key --in=secret.txt --out=secret.encrypted
52 52
 	
53 53
 	# Encrypt the content of secret2.txt with an existing key:
54
-	$ oc adm ca encrypt --key=secret.key < secret2.txt > secret2.encrypted
54
+	oc adm ca encrypt --key=secret.key < secret2.txt > secret2.encrypted
55 55
 
56 56
 ----
57 57
 ====
... ...
@@ -162,7 +162,7 @@ Add users to a group
162 162
 [options="nowrap"]
163 163
 ----
164 164
   # Add user1 and user2 to my-group
165
-  $ oc adm groups add-users my-group user1 user2
165
+  oc adm groups add-users my-group user1 user2
166 166
 ----
167 167
 ====
168 168
 
... ...
@@ -175,10 +175,10 @@ Create a new group
175 175
 [options="nowrap"]
176 176
 ----
177 177
   # Add a group with no users
178
-  $ oc adm groups new my-group
178
+  oc adm groups new my-group
179 179
 
180 180
   # Add a group with two users
181
-  $ oc adm groups new my-group user1 user2
181
+  oc adm groups new my-group user1 user2
182 182
 ----
183 183
 ====
184 184
 
... ...
@@ -191,16 +191,16 @@ Prune OpenShift groups referencing missing records on an external provider.
191 191
 [options="nowrap"]
192 192
 ----
193 193
   # Prune all orphaned groups
194
-  $ oc adm groups prune --sync-config=/path/to/ldap-sync-config.yaml --confirm
194
+  oc adm groups prune --sync-config=/path/to/ldap-sync-config.yaml --confirm
195 195
 
196 196
   # Prune all orphaned groups except the ones from the blacklist file
197
-  $ oc adm groups prune --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
197
+  oc adm groups prune --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
198 198
 
199 199
   # Prune all orphaned groups from a list of specific groups specified in a whitelist file
200
-  $ oc adm groups prune --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
200
+  oc adm groups prune --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
201 201
 
202 202
   # Prune all orphaned groups from a list of specific groups specified in a whitelist
203
-  $ oc adm groups prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
203
+  oc adm groups prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
204 204
 
205 205
 ----
206 206
 ====
... ...
@@ -214,7 +214,7 @@ Remove users from a group
214 214
 [options="nowrap"]
215 215
 ----
216 216
   # Remove user1 and user2 from my-group
217
-  $ oc adm groups remove-users my-group user1 user2
217
+  oc adm groups remove-users my-group user1 user2
218 218
 ----
219 219
 ====
220 220
 
... ...
@@ -227,19 +227,19 @@ Sync OpenShift groups with records from an external provider.
227 227
 [options="nowrap"]
228 228
 ----
229 229
   # Sync all groups from an LDAP server
230
-  $ oc adm groups sync --sync-config=/path/to/ldap-sync-config.yaml --confirm
230
+  oc adm groups sync --sync-config=/path/to/ldap-sync-config.yaml --confirm
231 231
 
232 232
   # Sync all groups except the ones from the blacklist file from an LDAP server
233
-  $ oc adm groups sync --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
233
+  oc adm groups sync --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
234 234
 
235 235
   # Sync specific groups specified in a whitelist file with an LDAP server
236
-  $ oc adm groups sync --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm
236
+  oc adm groups sync --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm
237 237
 
238 238
   # Sync all OpenShift Groups that have been synced previously with an LDAP server
239
-  $ oc adm groups sync --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm
239
+  oc adm groups sync --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm
240 240
 
241 241
   # Sync specific OpenShift Groups if they have been synced previously with an LDAP server
242
-  $ oc adm groups sync groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm
242
+  oc adm groups sync groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm
243 243
 
244 244
 ----
245 245
 ====
... ...
@@ -253,21 +253,21 @@ Install an IP failover group to a set of nodes
253 253
 [options="nowrap"]
254 254
 ----
255 255
   # Check the default IP failover configuration ("ipfailover"):
256
-  $ oc adm ipfailover
256
+  oc adm ipfailover
257 257
 
258 258
   # See what the IP failover configuration would look like if it is created:
259
-  $ oc adm ipfailover -o json
259
+  oc adm ipfailover -o json
260 260
 
261 261
   # Create an IP failover configuration if it does not already exist:
262
-  $ oc adm ipfailover ipf --virtual-ips="10.1.1.1-4" --create
262
+  oc adm ipfailover ipf --virtual-ips="10.1.1.1-4" --create
263 263
 
264 264
   # Create an IP failover configuration on a selection of nodes labeled
265 265
   # "router=us-west-ha" (on 4 nodes with 7 virtual IPs monitoring a service
266 266
   # listening on port 80, such as the router process).
267
-  $ oc adm ipfailover ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
267
+  oc adm ipfailover ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
268 268
 
269 269
   # Use a different IP failover config image and see the configuration:
270
-  $ oc adm ipfailover ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag
270
+  oc adm ipfailover ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag
271 271
 ----
272 272
 ====
273 273
 
... ...
@@ -280,19 +280,19 @@ Manage nodes - list pods, evacuate, or mark ready
280 280
 [options="nowrap"]
281 281
 ----
282 282
 	# Block accepting any pods on given nodes
283
-	$ oc adm manage-node <mynode> --schedulable=false
283
+	oc adm manage-node <mynode> --schedulable=false
284 284
 
285 285
 	# Mark selected nodes as schedulable
286
-	$ oc adm manage-node --selector="<env=dev>" --schedulable=true
286
+	oc adm manage-node --selector="<env=dev>" --schedulable=true
287 287
 
288 288
 	# Migrate selected pods
289
-	$ oc adm manage-node <mynode> --evacuate --pod-selector="<service=myapp>"
289
+	oc adm manage-node <mynode> --evacuate --pod-selector="<service=myapp>"
290 290
 
291 291
 	# Show pods that will be migrated
292
-	$ oc adm manage-node <mynode> --evacuate --dry-run --pod-selector="<service=myapp>"
292
+	oc adm manage-node <mynode> --evacuate --dry-run --pod-selector="<service=myapp>"
293 293
 
294 294
 	# List all pods on given nodes
295
-	$ oc adm manage-node <mynode1> <mynode2> --list-pods
295
+	oc adm manage-node <mynode1> <mynode2> --list-pods
296 296
 ----
297 297
 ====
298 298
 
... ...
@@ -337,10 +337,10 @@ Add users or serviceaccounts to a role in the current project
337 337
 [options="nowrap"]
338 338
 ----
339 339
   # Add the 'view' role to user1 in the current project
340
-  $ oc adm policy add-role-to-user view user1
340
+  oc adm policy add-role-to-user view user1
341 341
 
342 342
   # Add the 'edit' role to serviceaccount1 in the current project
343
-  $ oc adm policy add-role-to-user edit -z serviceaccount1
343
+  oc adm policy add-role-to-user edit -z serviceaccount1
344 344
 ----
345 345
 ====
346 346
 
... ...
@@ -353,10 +353,10 @@ Add users or serviceaccount to a security context constraint
353 353
 [options="nowrap"]
354 354
 ----
355 355
   # Add the 'restricted' security context contraint to user1 and user2
356
-  $ oc adm policy add-scc-to-user restricted user1 user2
356
+  oc adm policy add-scc-to-user restricted user1 user2
357 357
 
358 358
   # Add the 'privileged' security context contraint to the service account serviceaccount1 in the current namespace
359
-  $ oc adm policy add-scc-to-user privileged -z serviceaccount1
359
+  oc adm policy add-scc-to-user privileged -z serviceaccount1
360 360
 ----
361 361
 ====
362 362
 
... ...
@@ -369,19 +369,19 @@ Update cluster role bindings to match the recommended bootstrap policy
369 369
 [options="nowrap"]
370 370
 ----
371 371
   # Display the names of cluster role bindings that would be modified
372
-  $ oc adm policy reconcile-cluster-role-bindings -o name
372
+  oc adm policy reconcile-cluster-role-bindings -o name
373 373
 
374 374
   # Display the cluster role bindings that would be modified, removing any extra subjects
375
-  $ oc adm policy reconcile-cluster-role-bindings --additive-only=false
375
+  oc adm policy reconcile-cluster-role-bindings --additive-only=false
376 376
 
377 377
   # Update cluster role bindings that don't match the current defaults
378
-  $ oc adm policy reconcile-cluster-role-bindings --confirm
378
+  oc adm policy reconcile-cluster-role-bindings --confirm
379 379
 
380 380
   # Update cluster role bindings that don't match the current defaults, avoid adding roles to the system:authenticated group
381
-  $ oc adm policy reconcile-cluster-role-bindings --confirm --exclude-groups=system:authenticated
381
+  oc adm policy reconcile-cluster-role-bindings --confirm --exclude-groups=system:authenticated
382 382
 
383 383
   # Update cluster role bindings that don't match the current defaults, removing any extra subjects from the binding
384
-  $ oc adm policy reconcile-cluster-role-bindings --confirm --additive-only=false
384
+  oc adm policy reconcile-cluster-role-bindings --confirm --additive-only=false
385 385
 ----
386 386
 ====
387 387
 
... ...
@@ -394,17 +394,17 @@ Update cluster roles to match the recommended bootstrap policy
394 394
 [options="nowrap"]
395 395
 ----
396 396
   # Display the names of cluster roles that would be modified
397
-  $ oc adm policy reconcile-cluster-roles -o name
397
+  oc adm policy reconcile-cluster-roles -o name
398 398
 
399 399
   # Add missing permissions to cluster roles that don't match the current defaults
400
-  $ oc adm policy reconcile-cluster-roles --confirm
400
+  oc adm policy reconcile-cluster-roles --confirm
401 401
 
402
-  # Add missing permissions and remove extra permissions from 
402
+  # Add missing permissions and remove extra permissions from
403 403
   # cluster roles that don't match the current defaults
404
-  $ oc adm policy reconcile-cluster-roles --additive-only=false --confirm
404
+  oc adm policy reconcile-cluster-roles --additive-only=false --confirm
405 405
 
406 406
   # Display the union of the default and modified cluster roles
407
-  $ oc adm policy reconcile-cluster-roles --additive-only
407
+  oc adm policy reconcile-cluster-roles --additive-only
408 408
 ----
409 409
 ====
410 410
 
... ...
@@ -417,14 +417,14 @@ Replace cluster SCCs to match the recommended bootstrap policy
417 417
 [options="nowrap"]
418 418
 ----
419 419
   # Display the cluster SCCs that would be modified
420
-  $ oc adm policy reconcile-sccs
420
+  oc adm policy reconcile-sccs
421 421
 
422 422
   # Update cluster SCCs that don't match the current defaults preserving additional grants
423 423
   # for users and group and keeping any priorities that are already set
424
-  $ oc adm policy reconcile-sccs --confirm
424
+  oc adm policy reconcile-sccs --confirm
425 425
 
426 426
   # Replace existing users, groups, and priorities that do not match defaults
427
-  $ oc adm policy reconcile-sccs --additive-only=false --confirm
427
+  oc adm policy reconcile-sccs --additive-only=false --confirm
428 428
 ----
429 429
 ====
430 430
 
... ...
@@ -438,10 +438,10 @@ Remove old completed and failed builds
438 438
 ----
439 439
   # Dry run deleting older completed and failed builds and also including
440 440
   # all builds whose associated BuildConfig no longer exists
441
-  $ oc adm prune builds --orphans
441
+  oc adm prune builds --orphans
442 442
 
443 443
   # To actually perform the prune operation, the confirm flag must be appended
444
-  $ oc adm prune builds --orphans --confirm
444
+  oc adm prune builds --orphans --confirm
445 445
 ----
446 446
 ====
447 447
 
... ...
@@ -454,10 +454,10 @@ Remove old completed and failed deployments
454 454
 [options="nowrap"]
455 455
 ----
456 456
   # Dry run deleting all but the last complete deployment for every deployment config
457
-  $ oc adm prune deployments --keep-complete=1
457
+  oc adm prune deployments --keep-complete=1
458 458
 
459 459
   # To actually perform the prune operation, the confirm flag must be appended
460
-  $ oc adm prune deployments --keep-complete=1 --confirm
460
+  oc adm prune deployments --keep-complete=1 --confirm
461 461
 ----
462 462
 ====
463 463
 
... ...
@@ -470,16 +470,16 @@ Prune OpenShift groups referencing missing records on an external provider.
470 470
 [options="nowrap"]
471 471
 ----
472 472
   # Prune all orphaned groups
473
-  $ oc adm prune groups --sync-config=/path/to/ldap-sync-config.yaml --confirm
473
+  oc adm prune groups --sync-config=/path/to/ldap-sync-config.yaml --confirm
474 474
 
475 475
   # Prune all orphaned groups except the ones from the blacklist file
476
-  $ oc adm prune groups --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
476
+  oc adm prune groups --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
477 477
 
478 478
   # Prune all orphaned groups from a list of specific groups specified in a whitelist file
479
-  $ oc adm prune groups --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
479
+  oc adm prune groups --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
480 480
 
481 481
   # Prune all orphaned groups from a list of specific groups specified in a whitelist
482
-  $ oc adm prune groups groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
482
+  oc adm prune groups groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
483 483
 
484 484
 ----
485 485
 ====
... ...
@@ -494,10 +494,10 @@ Remove unreferenced images
494 494
 ----
495 495
   # See, what the prune command would delete if only images more than an hour old and obsoleted
496 496
   # by 3 newer revisions under the same tag were considered.
497
-  $ oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m
497
+  oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m
498 498
 
499 499
   # To actually perform the prune operation, the confirm flag must be appended
500
-  $ oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m --confirm
500
+  oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m --confirm
501 501
 ----
502 502
 ====
503 503
 
... ...
@@ -510,16 +510,16 @@ Install the integrated Docker registry
510 510
 [options="nowrap"]
511 511
 ----
512 512
   # Check if default Docker registry ("docker-registry") has been created
513
-  $ oc adm registry --dry-run
513
+  oc adm registry --dry-run
514 514
 
515 515
   # See what the registry will look like if created
516
-  $ oc adm registry -o yaml
516
+  oc adm registry -o yaml
517 517
 
518 518
   # Create a registry with two replicas if it does not exist
519
-  $ oc adm registry --replicas=2
519
+  oc adm registry --replicas=2
520 520
 
521 521
   # Use a different registry image
522
-  $ oc adm registry --images=myrepo/docker-registry:mytag
522
+  oc adm registry --images=myrepo/docker-registry:mytag
523 523
 ----
524 524
 ====
525 525
 
... ...
@@ -532,19 +532,19 @@ Install a router
532 532
 [options="nowrap"]
533 533
 ----
534 534
   # Check the default router ("router")
535
-  $ oc adm router --dry-run
535
+  oc adm router --dry-run
536 536
 
537 537
   # See what the router would look like if created
538
-  $ oc adm router -o yaml
538
+  oc adm router -o yaml
539 539
 
540 540
   # Create a router with two replicas if it does not exist
541
-  $ oc adm router router-west --replicas=2
541
+  oc adm router router-west --replicas=2
542 542
 
543 543
   # Use a different router image
544
-  $ oc adm router region-west --images=myrepo/somerouter:mytag
544
+  oc adm router region-west --images=myrepo/somerouter:mytag
545 545
 
546 546
   # Run the router with a hint to the underlying implementation to _not_ expose statistics.
547
-  $ oc adm router router-west --stats-port=0
547
+  oc adm router router-west --stats-port=0
548 548
   
549 549
 ----
550 550
 ====
... ...
@@ -559,21 +559,21 @@ Update the annotations on a resource
559 559
 ----
560 560
   # Update pod 'foo' with the annotation 'description' and the value 'my frontend'.
561 561
   # If the same annotation is set multiple times, only the last value will be applied
562
-  $ oc annotate pods foo description='my frontend'
562
+  oc annotate pods foo description='my frontend'
563 563
 
564 564
   # Update pod 'foo' with the annotation 'description' and the value
565 565
   # 'my frontend running nginx', overwriting any existing value.
566
-  $ oc annotate --overwrite pods foo description='my frontend running nginx'
566
+  oc annotate --overwrite pods foo description='my frontend running nginx'
567 567
 
568 568
   # Update all pods in the namespace
569
-  $ oc annotate pods --all description='my frontend running nginx'
569
+  oc annotate pods --all description='my frontend running nginx'
570 570
 
571 571
   # Update pod 'foo' only if the resource is unchanged from version 1.
572
-  $ oc annotate pods foo description='my frontend running nginx' --resource-version=1
572
+  oc annotate pods foo description='my frontend running nginx' --resource-version=1
573 573
 
574 574
   # Update pod 'foo' by removing an annotation named 'description' if it exists.
575 575
   # Does not require the --overwrite flag.
576
-  $ oc annotate pods foo description-
576
+  oc annotate pods foo description-
577 577
 ----
578 578
 ====
579 579
 
... ...
@@ -586,10 +586,10 @@ Apply a configuration to a resource by filename or stdin
586 586
 [options="nowrap"]
587 587
 ----
588 588
 # Apply the configuration in pod.json to a pod.
589
-$ oc apply -f ./pod.json
589
+oc apply -f ./pod.json
590 590
 
591 591
 # Apply the JSON passed into stdin to a pod.
592
-$ cat pod.json | oc apply -f -
592
+cat pod.json | oc apply -f -
593 593
 ----
594 594
 ====
595 595
 
... ...
@@ -602,14 +602,14 @@ Attach to a running container.
602 602
 [options="nowrap"]
603 603
 ----
604 604
   # Get output from running pod 123456-7890, using the first container by default
605
-  $ oc attach 123456-7890
605
+  oc attach 123456-7890
606 606
 
607 607
   # Get output from ruby-container from pod 123456-7890
608
-  $ oc attach 123456-7890 -c ruby-container
608
+  oc attach 123456-7890 -c ruby-container
609 609
 
610 610
   # Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780
611 611
   # and sends stdout/stderr from 'bash' back to the client
612
-  $ oc attach 123456-7890 -c ruby-container -i -t
612
+  oc attach 123456-7890 -c ruby-container -i -t
613 613
 ----
614 614
 ====
615 615
 
... ...
@@ -622,10 +622,10 @@ Autoscale a deployment config or replication controller
622 622
 [options="nowrap"]
623 623
 ----
624 624
   # Auto scale a deployment config "foo", with the number of pods between 2 to 10, target CPU utilization at a default value that server applies:
625
-  $ oc autoscale dc/foo --min=2 --max=10
625
+  oc autoscale dc/foo --min=2 --max=10
626 626
 
627 627
   # Auto scale a replication controller "foo", with the number of pods between 1 to 5, target CPU utilization at 80%
628
-  $ oc autoscale rc/foo --max=5 --cpu-percent=80
628
+  oc autoscale rc/foo --max=5 --cpu-percent=80
629 629
 ----
630 630
 ====
631 631
 
... ...
@@ -638,19 +638,19 @@ Cancel running, pending, or new builds
638 638
 [options="nowrap"]
639 639
 ----
640 640
   # Cancel the build with the given name
641
-  $ oc cancel-build ruby-build-2
641
+  oc cancel-build ruby-build-2
642 642
 
643 643
   # Cancel the named build and print the build logs
644
-  $ oc cancel-build ruby-build-2 --dump-logs
644
+  oc cancel-build ruby-build-2 --dump-logs
645 645
 
646 646
   # Cancel the named build and create a new one with the same parameters
647
-  $ oc cancel-build ruby-build-2 --restart
647
+  oc cancel-build ruby-build-2 --restart
648 648
 
649 649
   # Cancel multiple builds
650
-  $ oc cancel-build ruby-build-1 ruby-build-2 ruby-build-3
650
+  oc cancel-build ruby-build-1 ruby-build-2 ruby-build-3
651 651
 
652 652
   # Cancel all builds created from 'ruby-build' build configuration that are in 'new' state
653
-  $ oc cancel-build bc/ruby-build --state=new
653
+  oc cancel-build bc/ruby-build --state=new
654 654
 ----
655 655
 ====
656 656
 
... ...
@@ -760,14 +760,14 @@ Convert config files between different API versions
760 760
 [options="nowrap"]
761 761
 ----
762 762
 # Convert 'pod.yaml' to latest version and print to stdout.
763
-$ oc convert -f pod.yaml
763
+oc convert -f pod.yaml
764 764
 
765 765
 # Convert the live state of the resource specified by 'pod.yaml' to the latest version
766 766
 # and print to stdout in json format.
767
-$ oc convert -f pod.yaml --local -o json
767
+oc convert -f pod.yaml --local -o json
768 768
 
769 769
 # Convert all files under current directory to latest version and create them all.
770
-$ oc convert -f . | kubectl create -f -
770
+oc convert -f . | kubectl create -f -
771 771
 
772 772
 ----
773 773
 ====
... ...
@@ -781,10 +781,10 @@ Create a resource by filename or stdin
781 781
 [options="nowrap"]
782 782
 ----
783 783
   # Create a pod using the data in pod.json.
784
-  $ oc create -f pod.json
784
+  oc create -f pod.json
785 785
   
786 786
   # Create a pod based on the JSON passed into stdin.
787
-  $ cat pod.json | oc create -f -
787
+  cat pod.json | oc create -f -
788 788
 ----
789 789
 ====
790 790
 
... ...
@@ -816,7 +816,7 @@ Create deployment config with default options that uses a given image.
816 816
 [options="nowrap"]
817 817
 ----
818 818
   # Create an nginx deployment config named my-nginx
819
-  $ oc create deploymentconfig my-nginx --image=nginx
819
+  oc create deploymentconfig my-nginx --image=nginx
820 820
 ----
821 821
 ====
822 822
 
... ...
@@ -829,7 +829,7 @@ Manually create an identity (only needed if automatic creation is disabled).
829 829
 [options="nowrap"]
830 830
 ----
831 831
   # Create an identity with identity provider "acme_ldap" and the identity provider username "adamjones"
832
-  $ oc create identity acme_ldap:adamjones
832
+  oc create identity acme_ldap:adamjones
833 833
 ----
834 834
 ====
835 835
 
... ...
@@ -855,7 +855,7 @@ Create a policy binding that references the policy in the targetted namespace.
855 855
 [options="nowrap"]
856 856
 ----
857 857
   # Create a policy binding in namespace "foo" that references the policy in namespace "bar"
858
-  $ oc create policybinding bar -n foo
858
+  oc create policybinding bar -n foo
859 859
 ----
860 860
 ====
861 861
 
... ...
@@ -868,11 +868,11 @@ Create a route that uses edge TLS termination
868 868
 [options="nowrap"]
869 869
 ----
870 870
   # Create an edge route named "my-route" that exposes frontend service.
871
-  $ oc create route edge my-route --service=frontend
871
+  oc create route edge my-route --service=frontend
872 872
   
873 873
   # Create an edge route that exposes the frontend service and specify a path.
874 874
   # If the route name is omitted, the service name will be re-used.
875
-  $ oc create route edge --service=frontend --path /assets
875
+  oc create route edge --service=frontend --path /assets
876 876
 ----
877 877
 ====
878 878
 
... ...
@@ -885,11 +885,11 @@ Create a route that uses passthrough TLS termination
885 885
 [options="nowrap"]
886 886
 ----
887 887
   # Create a passthrough route named "my-route" that exposes the frontend service.
888
-  $ oc create route passthrough my-route --service=frontend
888
+  oc create route passthrough my-route --service=frontend
889 889
   
890 890
   # Create a passthrough route that exposes the frontend service and specify
891 891
   # a hostname. If the route name is omitted, the service name will be re-used.
892
-  $ oc create route passthrough --service=frontend --hostname=www.example.com
892
+  oc create route passthrough --service=frontend --hostname=www.example.com
893 893
 ----
894 894
 ====
895 895
 
... ...
@@ -902,11 +902,11 @@ Create a route that uses reencrypt TLS termination
902 902
 [options="nowrap"]
903 903
 ----
904 904
   # Create a route named "my-route" that exposes the frontend service.
905
-  $ oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert
905
+  oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert
906 906
   
907 907
   # Create a reencrypt route that exposes the frontend service and re-use
908 908
   # the service name as the route name.
909
-  $ oc create route reencrypt --service=frontend --dest-ca-cert cert.cert
909
+  oc create route reencrypt --service=frontend --dest-ca-cert cert.cert
910 910
 ----
911 911
 ====
912 912
 
... ...
@@ -964,7 +964,7 @@ Manually create a user (only needed if automatic creation is disabled).
964 964
 [options="nowrap"]
965 965
 ----
966 966
   # Create a user with the username "ajones" and the display name "Adam Jones"
967
-  $ oc create user ajones --full-name="Adam Jones"
967
+  oc create user ajones --full-name="Adam Jones"
968 968
 ----
969 969
 ====
970 970
 
... ...
@@ -977,7 +977,7 @@ Manually map an identity to a user.
977 977
 [options="nowrap"]
978 978
 ----
979 979
   # Map the identity "acme_ldap:adamjones" to the user "ajones"
980
-  $ oc create useridentitymapping acme_ldap:adamjones ajones
980
+  oc create useridentitymapping acme_ldap:adamjones ajones
981 981
 ----
982 982
 ====
983 983
 
... ...
@@ -991,13 +991,13 @@ Launch a new instance of a pod for debugging
991 991
 ----
992 992
 
993 993
   # Debug a currently running deployment
994
-  $ oc debug dc/test
994
+  oc debug dc/test
995 995
 
996 996
   # Debug a specific failing container by running the env command in the 'second' container
997
-  $ oc debug dc/test -c second -- /bin/env
997
+  oc debug dc/test -c second -- /bin/env
998 998
 
999 999
   # See the pod that would be created to debug
1000
-  $ oc debug dc/test -o yaml
1000
+  oc debug dc/test -o yaml
1001 1001
 ----
1002 1002
 ====
1003 1003
 
... ...
@@ -1010,19 +1010,19 @@ Delete one or more resources
1010 1010
 [options="nowrap"]
1011 1011
 ----
1012 1012
   # Delete a pod using the type and ID specified in pod.json.
1013
-  $ oc delete -f pod.json
1013
+  oc delete -f pod.json
1014 1014
 
1015 1015
   # Delete a pod based on the type and ID in the JSON passed into stdin.
1016
-  $ cat pod.json | oc delete -f -
1016
+  cat pod.json | oc delete -f -
1017 1017
 
1018 1018
   # Delete pods and services with label name=myLabel.
1019
-  $ oc delete pods,services -l name=myLabel
1019
+  oc delete pods,services -l name=myLabel
1020 1020
 
1021 1021
   # Delete a pod with ID 1234-56-7890-234234-456456.
1022
-  $ oc delete pod 1234-56-7890-234234-456456
1022
+  oc delete pod 1234-56-7890-234234-456456
1023 1023
 
1024 1024
   # Delete all pods
1025
-  $ oc delete pods --all
1025
+  oc delete pods --all
1026 1026
 ----
1027 1027
 ====
1028 1028
 
... ...
@@ -1035,17 +1035,17 @@ View, start, cancel, or retry a deployment
1035 1035
 [options="nowrap"]
1036 1036
 ----
1037 1037
   # Display the latest deployment for the 'database' deployment config
1038
-  $ oc deploy database
1038
+  oc deploy database
1039 1039
 
1040 1040
   # Start a new deployment based on the 'database'
1041
-  $ oc deploy database --latest
1041
+  oc deploy database --latest
1042 1042
 
1043 1043
   # Retry the latest failed deployment based on 'frontend'
1044 1044
   # The deployer pod and any hook pods are deleted for the latest failed deployment
1045
-  $ oc deploy frontend --retry
1045
+  oc deploy frontend --retry
1046 1046
 
1047 1047
   # Cancel the in-progress deployment based on 'frontend'
1048
-  $ oc deploy frontend --cancel
1048
+  oc deploy frontend --cancel
1049 1049
 ----
1050 1050
 ====
1051 1051
 
... ...
@@ -1058,10 +1058,10 @@ Show details of a specific resource or group of resources
1058 1058
 [options="nowrap"]
1059 1059
 ----
1060 1060
   # Provide details about the ruby-22-centos7 image repository
1061
-  $ oc describe imageRepository ruby-22-centos7
1061
+  oc describe imageRepository ruby-22-centos7
1062 1062
 
1063 1063
   # Provide details about the ruby-sample-build build configuration
1064
-  $ oc describe bc ruby-sample-build
1064
+  oc describe bc ruby-sample-build
1065 1065
 ----
1066 1066
 ====
1067 1067
 
... ...
@@ -1074,16 +1074,16 @@ Edit a resource on the server
1074 1074
 [options="nowrap"]
1075 1075
 ----
1076 1076
   # Edit the service named 'docker-registry':
1077
-  $ oc edit svc/docker-registry
1077
+  oc edit svc/docker-registry
1078 1078
 
1079 1079
   # Edit the DeploymentConfig named 'my-deployment':
1080
-  $ oc edit dc/my-deployment
1080
+  oc edit dc/my-deployment
1081 1081
 
1082 1082
   # Use an alternative editor
1083
-  $ OC_EDITOR="nano" oc edit dc/my-deployment
1083
+  OC_EDITOR="nano" oc edit dc/my-deployment
1084 1084
 
1085 1085
   # Edit the service 'docker-registry' in JSON using the v1beta3 API format:
1086
-  $ oc edit svc/docker-registry --output-version=v1beta3 -o json
1086
+  oc edit svc/docker-registry --output-version=v1beta3 -o json
1087 1087
 ----
1088 1088
 ====
1089 1089
 
... ...
@@ -1096,29 +1096,29 @@ DEPRECATED: set env
1096 1096
 [options="nowrap"]
1097 1097
 ----
1098 1098
   # Update deployment 'registry' with a new environment variable
1099
-  $ oc env dc/registry STORAGE_DIR=/local
1099
+  oc env dc/registry STORAGE_DIR=/local
1100 1100
 
1101 1101
   # List the environment variables defined on a build config 'sample-build'
1102
-  $ oc env bc/sample-build --list
1102
+  oc env bc/sample-build --list
1103 1103
 
1104 1104
   # List the environment variables defined on all pods
1105
-  $ oc env pods --all --list
1105
+  oc env pods --all --list
1106 1106
 
1107 1107
   # Output modified build config in YAML, and does not alter the object on the server
1108
-  $ oc env bc/sample-build STORAGE_DIR=/data -o yaml
1108
+  oc env bc/sample-build STORAGE_DIR=/data -o yaml
1109 1109
 
1110 1110
   # Update all containers in all replication controllers in the project to have ENV=prod
1111
-  $ oc env rc --all ENV=prod
1111
+  oc env rc --all ENV=prod
1112 1112
 
1113 1113
   # Remove the environment variable ENV from container 'c1' in all deployment configs
1114
-  $ oc env dc --all --containers="c1" ENV-
1114
+  oc env dc --all --containers="c1" ENV-
1115 1115
 
1116 1116
   # Remove the environment variable ENV from a deployment config definition on disk and
1117 1117
   # update the deployment config on the server
1118
-  $ oc env -f dc.json ENV-
1118
+  oc env -f dc.json ENV-
1119 1119
 
1120 1120
   # Set some of the local shell environment into a deployment config on the server
1121
-  $ env | grep RAILS_ | oc env -e - dc/registry
1121
+  env | grep RAILS_ | oc env -e - dc/registry
1122 1122
 ----
1123 1123
 ====
1124 1124
 
... ...
@@ -1131,7 +1131,7 @@ Perform a direct Docker build
1131 1131
 [options="nowrap"]
1132 1132
 ----
1133 1133
   # Build the current directory into a single layer and tag
1134
-  $ oc dockerbuild . myimage:latest
1134
+  oc dockerbuild . myimage:latest
1135 1135
 ----
1136 1136
 ====
1137 1137
 
... ...
@@ -1144,10 +1144,10 @@ Execute a command in a container.
1144 1144
 [options="nowrap"]
1145 1145
 ----
1146 1146
   # Get output from running 'date' in ruby-container from pod 123456-7890
1147
-  $ oc exec -p 123456-7890 -c ruby-container date
1147
+  oc exec -p 123456-7890 -c ruby-container date
1148 1148
 
1149 1149
   # Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780 and sends stdout/stderr from 'bash' back to the client
1150
-  $ oc exec -p 123456-7890 -c ruby-container -i -t -- bash -il
1150
+  oc exec -p 123456-7890 -c ruby-container -i -t -- bash -il
1151 1151
 ----
1152 1152
 ====
1153 1153
 
... ...
@@ -1160,10 +1160,10 @@ Documentation of resources.
1160 1160
 [options="nowrap"]
1161 1161
 ----
1162 1162
 # Get the documentation of the resource and its fields
1163
-$ oc explain pods
1163
+oc explain pods
1164 1164
 
1165 1165
 # Get the documentation of a specific field of a resource
1166
-$ oc explain pods.spec.containers
1166
+oc explain pods.spec.containers
1167 1167
 ----
1168 1168
 ====
1169 1169
 
... ...
@@ -1198,19 +1198,19 @@ Expose a replicated application as a service or route
1198 1198
 [options="nowrap"]
1199 1199
 ----
1200 1200
   # Create a route based on service nginx. The new route will re-use nginx's labels
1201
-  $ oc expose service nginx
1201
+  oc expose service nginx
1202 1202
 
1203 1203
   # Create a route and specify your own label and route name
1204
-  $ oc expose service nginx -l name=myroute --name=fromdowntown
1204
+  oc expose service nginx -l name=myroute --name=fromdowntown
1205 1205
 
1206 1206
   # Create a route and specify a hostname
1207
-  $ oc expose service nginx --hostname=www.example.com
1207
+  oc expose service nginx --hostname=www.example.com
1208 1208
 
1209 1209
   # Expose a deployment configuration as a service and use the specified port
1210
-  $ oc expose dc ruby-hello-world --port=8080
1210
+  oc expose dc ruby-hello-world --port=8080
1211 1211
 
1212 1212
   # Expose a service as a route in the specified path
1213
-  $ oc expose service nginx --path=/nginx
1213
+  oc expose service nginx --path=/nginx
1214 1214
 ----
1215 1215
 ====
1216 1216
 
... ...
@@ -1223,19 +1223,19 @@ Display one or many resources
1223 1223
 [options="nowrap"]
1224 1224
 ----
1225 1225
   # List all pods in ps output format.
1226
-  $ oc get pods
1226
+  oc get pods
1227 1227
 
1228 1228
   # List a single replication controller with specified ID in ps output format.
1229
-  $ oc get rc redis
1229
+  oc get rc redis
1230 1230
 
1231 1231
   # List all pods and show more details about them.
1232
-  $ oc get -o wide pods
1232
+  oc get -o wide pods
1233 1233
 
1234 1234
   # List a single pod in JSON output format.
1235
-  $ oc get -o json pod redis-pod
1235
+  oc get -o json pod redis-pod
1236 1236
 
1237 1237
   # Return only the status value of the specified pod.
1238
-  $ oc get -o template pod redis-pod --template={{.currentState.status}}
1238
+  oc get -o template pod redis-pod --template={{.currentState.status}}
1239 1239
 ----
1240 1240
 ====
1241 1241
 
... ...
@@ -1248,10 +1248,10 @@ Import a docker-compose.yml project into OpenShift
1248 1248
 [options="nowrap"]
1249 1249
 ----
1250 1250
   # Import a docker-compose.yml file into OpenShift
1251
-  $ oc import docker-compose -f ./docker-compose.yml
1251
+  oc import docker-compose -f ./docker-compose.yml
1252 1252
 
1253 1253
 	# Turn a docker-compose.yml file into a template
1254
-  $ oc import docker-compose -f ./docker-compose.yml -o yaml --as-template
1254
+  oc import docker-compose -f ./docker-compose.yml -o yaml --as-template
1255 1255
 
1256 1256
 ----
1257 1257
 ====
... ...
@@ -1264,7 +1264,7 @@ Imports images from a Docker registry
1264 1264
 
1265 1265
 [options="nowrap"]
1266 1266
 ----
1267
-  $ oc import-image mystream
1267
+  oc import-image mystream
1268 1268
 ----
1269 1269
 ====
1270 1270
 
... ...
@@ -1277,20 +1277,20 @@ Update the labels on a resource
1277 1277
 [options="nowrap"]
1278 1278
 ----
1279 1279
   # Update pod 'foo' with the label 'unhealthy' and the value 'true'.
1280
-  $ oc label pods foo unhealthy=true
1280
+  oc label pods foo unhealthy=true
1281 1281
 
1282 1282
   # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.
1283
-  $ oc label --overwrite pods foo status=unhealthy
1283
+  oc label --overwrite pods foo status=unhealthy
1284 1284
 
1285 1285
   # Update all pods in the namespace
1286
-  $ oc label pods --all status=unhealthy
1286
+  oc label pods --all status=unhealthy
1287 1287
 
1288 1288
   # Update pod 'foo' only if the resource is unchanged from version 1.
1289
-  $ oc label pods foo status=unhealthy --resource-version=1
1289
+  oc label pods foo status=unhealthy --resource-version=1
1290 1290
 
1291 1291
   # Update pod 'foo' by removing a label named 'bar' if it exists.
1292 1292
   # Does not require the --overwrite flag.
1293
-  $ oc label pods foo bar-
1293
+  oc label pods foo bar-
1294 1294
 ----
1295 1295
 ====
1296 1296
 
... ...
@@ -1303,13 +1303,13 @@ Log in to a server
1303 1303
 [options="nowrap"]
1304 1304
 ----
1305 1305
   # Log in interactively
1306
-  $ oc login
1306
+  oc login
1307 1307
 
1308 1308
   # Log in to the given server with the given certificate authority file
1309
-  $ oc login localhost:8443 --certificate-authority=/path/to/cert.crt
1309
+  oc login localhost:8443 --certificate-authority=/path/to/cert.crt
1310 1310
 
1311 1311
   # Log in to the given server with the given credentials (will not prompt interactively)
1312
-  $ oc login localhost:8443 --username=myuser --password=mypass
1312
+  oc login localhost:8443 --username=myuser --password=mypass
1313 1313
 ----
1314 1314
 ====
1315 1315
 
... ...
@@ -1323,7 +1323,7 @@ End the current server session
1323 1323
 ----
1324 1324
 
1325 1325
   # Logout
1326
-  $ oc logout
1326
+  oc logout
1327 1327
 ----
1328 1328
 ====
1329 1329
 
... ...
@@ -1336,21 +1336,21 @@ Print the logs for a resource.
1336 1336
 [options="nowrap"]
1337 1337
 ----
1338 1338
   # Start streaming the logs of the most recent build of the openldap build config.
1339
-  $ oc logs -f bc/openldap
1339
+  oc logs -f bc/openldap
1340 1340
 
1341 1341
   # Start streaming the logs of the latest deployment of the mysql deployment config.
1342
-  $ oc logs -f dc/mysql
1342
+  oc logs -f dc/mysql
1343 1343
 
1344 1344
   # Get the logs of the first deployment for the mysql deployment config. Note that logs
1345 1345
   # from older deployments may not exist either because the deployment was successful
1346 1346
   # or due to deployment pruning or manual deletion of the deployment.
1347
-  $ oc logs --version=1 dc/mysql
1347
+  oc logs --version=1 dc/mysql
1348 1348
 
1349 1349
   # Return a snapshot of ruby-container logs from pod backend.
1350
-  $ oc logs backend -c ruby-container
1350
+  oc logs backend -c ruby-container
1351 1351
 
1352 1352
   # Start streaming of ruby-container logs from pod backend.
1353
-  $ oc logs -f pod/backend -c ruby-container
1353
+  oc logs -f pod/backend -c ruby-container
1354 1354
 ----
1355 1355
 ====
1356 1356
 
... ...
@@ -1364,45 +1364,45 @@ Create a new application
1364 1364
 ----
1365 1365
 
1366 1366
   # List all local templates and image streams that can be used to create an app
1367
-  $ oc new-app --list
1367
+  oc new-app --list
1368 1368
 
1369 1369
   # Search all templates, image streams, and Docker images for the ones that match "ruby"
1370
-  $ oc new-app --search ruby
1370
+  oc new-app --search ruby
1371 1371
 
1372 1372
   # Create an application based on the source code in the current git repository (with a public remote)
1373 1373
   # and a Docker image
1374
-  $ oc new-app . --docker-image=repo/langimage
1374
+  oc new-app . --docker-image=repo/langimage
1375 1375
 
1376 1376
   # Create a Ruby application based on the provided [image]~[source code] combination
1377
-  $ oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
1377
+  oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
1378 1378
 
1379 1379
   # Use the public Docker Hub MySQL image to create an app. Generated artifacts will be labeled with db=mysql
1380
-  $ oc new-app mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql
1380
+  oc new-app mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql
1381 1381
 
1382 1382
   # Use a MySQL image in a private registry to create an app and override application artifacts' names
1383
-  $ oc new-app --docker-image=myregistry.com/mycompany/mysql --name=private
1383
+  oc new-app --docker-image=myregistry.com/mycompany/mysql --name=private
1384 1384
 
1385 1385
   # Create an application from a remote repository using its beta4 branch
1386
-  $ oc new-app https://github.com/openshift/ruby-hello-world#beta4
1386
+  oc new-app https://github.com/openshift/ruby-hello-world#beta4
1387 1387
 
1388 1388
   # Create an application based on a stored template, explicitly setting a parameter value
1389
-  $ oc new-app --template=ruby-helloworld-sample --param=MYSQL_USER=admin
1389
+  oc new-app --template=ruby-helloworld-sample --param=MYSQL_USER=admin
1390 1390
 
1391 1391
   # Create an application from a remote repository and specify a context directory
1392
-  $ oc new-app https://github.com/youruser/yourgitrepo --context-dir=src/build
1392
+  oc new-app https://github.com/youruser/yourgitrepo --context-dir=src/build
1393 1393
 
1394 1394
   # Create an application based on a template file, explicitly setting a parameter value
1395
-  $ oc new-app --file=./example/myapp/template.json --param=MYSQL_USER=admin
1395
+  oc new-app --file=./example/myapp/template.json --param=MYSQL_USER=admin
1396 1396
 
1397 1397
   # Search for "mysql" in all image repositories and stored templates
1398
-  $ oc new-app --search mysql
1398
+  oc new-app --search mysql
1399 1399
 
1400 1400
   # Search for "ruby", but only in stored templates (--template, --image and --docker-image
1401 1401
   # can be used to filter search results)
1402
-  $ oc new-app --search --template=ruby
1402
+  oc new-app --search --template=ruby
1403 1403
 
1404 1404
   # Search for "ruby" in stored templates and print the output as an YAML
1405
-  $ oc new-app --search --template=ruby --output=yaml
1405
+  oc new-app --search --template=ruby --output=yaml
1406 1406
 ----
1407 1407
 ====
1408 1408
 
... ...
@@ -1417,25 +1417,25 @@ Create a new build configuration
1417 1417
 
1418 1418
   # Create a build config based on the source code in the current git repository (with a public
1419 1419
   # remote) and a Docker image
1420
-  $ oc new-build . --docker-image=repo/langimage
1420
+  oc new-build . --docker-image=repo/langimage
1421 1421
 
1422 1422
   # Create a NodeJS build config based on the provided [image]~[source code] combination
1423
-  $ oc new-build openshift/nodejs-010-centos7~https://github.com/openshift/nodejs-ex.git
1423
+  oc new-build openshift/nodejs-010-centos7~https://github.com/openshift/nodejs-ex.git
1424 1424
 
1425 1425
   # Create a build config from a remote repository using its beta2 branch
1426
-  $ oc new-build https://github.com/openshift/ruby-hello-world#beta2
1426
+  oc new-build https://github.com/openshift/ruby-hello-world#beta2
1427 1427
 
1428 1428
   # Create a build config using a Dockerfile specified as an argument
1429
-  $ oc new-build -D $'FROM centos:7\nRUN yum install -y httpd'
1429
+  oc new-build -D $'FROM centos:7\nRUN yum install -y httpd'
1430 1430
 
1431 1431
   # Create a build config from a remote repository and add custom environment variables
1432
-  $ oc new-build https://github.com/openshift/ruby-hello-world RACK_ENV=development
1432
+  oc new-build https://github.com/openshift/ruby-hello-world RACK_ENV=development
1433 1433
 
1434 1434
   # Create a build config from a remote repository and inject the npmrc into a build
1435
-  $ oc new-build https://github.com/openshift/ruby-hello-world --build-secret npmrc:.npmrc
1435
+  oc new-build https://github.com/openshift/ruby-hello-world --build-secret npmrc:.npmrc
1436 1436
 
1437 1437
   # Create a build config that gets its input from a remote repository and another Docker image
1438
-  $ oc new-build https://github.com/openshift/ruby-hello-world --source-image=openshift/jenkins-1-centos7 --source-image-path=/var/lib/jenkins:tmp
1438
+  oc new-build https://github.com/openshift/ruby-hello-world --source-image=openshift/jenkins-1-centos7 --source-image-path=/var/lib/jenkins:tmp
1439 1439
 ----
1440 1440
 ====
1441 1441
 
... ...
@@ -1448,10 +1448,10 @@ Request a new project
1448 1448
 [options="nowrap"]
1449 1449
 ----
1450 1450
   # Create a new project with minimal information
1451
-  $ oc new-project web-team-dev
1451
+  oc new-project web-team-dev
1452 1452
 
1453 1453
   # Create a new project with a display name and description
1454
-  $ oc new-project web-team-dev --display-name="Web Team Development" --description="Development project for the web team."
1454
+  oc new-project web-team-dev --display-name="Web Team Development" --description="Development project for the web team."
1455 1455
 ----
1456 1456
 ====
1457 1457
 
... ...
@@ -1464,7 +1464,7 @@ Update field(s) of a resource using strategic merge patch.
1464 1464
 [options="nowrap"]
1465 1465
 ----
1466 1466
   # Partially update a node using strategic merge patch
1467
-  $ oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'
1467
+  oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'
1468 1468
 ----
1469 1469
 ====
1470 1470
 
... ...
@@ -1477,10 +1477,10 @@ Add users or serviceaccounts to a role in the current project
1477 1477
 [options="nowrap"]
1478 1478
 ----
1479 1479
   # Add the 'view' role to user1 in the current project
1480
-  $ oc policy add-role-to-user view user1
1480
+  oc policy add-role-to-user view user1
1481 1481
 
1482 1482
   # Add the 'edit' role to serviceaccount1 in the current project
1483
-  $ oc policy add-role-to-user edit -z serviceaccount1
1483
+  oc policy add-role-to-user edit -z serviceaccount1
1484 1484
 ----
1485 1485
 ====
1486 1486
 
... ...
@@ -1493,16 +1493,16 @@ Forward one or more local ports to a pod.
1493 1493
 [options="nowrap"]
1494 1494
 ----
1495 1495
   # Listens on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
1496
-  $ oc port-forward -p mypod 5000 6000
1496
+  oc port-forward -p mypod 5000 6000
1497 1497
 
1498 1498
   # Listens on port 8888 locally, forwarding to 5000 in the pod
1499
-  $ oc port-forward -p mypod 8888:5000
1499
+  oc port-forward -p mypod 8888:5000
1500 1500
 
1501 1501
   # Listens on a random port locally, forwarding to 5000 in the pod
1502
-  $ oc port-forward -p mypod :5000
1502
+  oc port-forward -p mypod :5000
1503 1503
 
1504 1504
   # Listens on a random port locally, forwarding to 5000 in the pod
1505
-  $ oc port-forward -p mypod 0:5000
1505
+  oc port-forward -p mypod 0:5000
1506 1506
 ----
1507 1507
 ====
1508 1508
 
... ...
@@ -1515,25 +1515,25 @@ Process a template into list of resources
1515 1515
 [options="nowrap"]
1516 1516
 ----
1517 1517
   # Convert template.json file into resource list and pass to create
1518
-  $ oc process -f template.json | oc create -f -
1518
+  oc process -f template.json | oc create -f -
1519 1519
 
1520 1520
   # Process template while passing a user-defined label
1521
-  $ oc process -f template.json -l name=mytemplate
1521
+  oc process -f template.json -l name=mytemplate
1522 1522
 
1523 1523
   # Convert stored template into resource list
1524
-  $ oc process foo
1524
+  oc process foo
1525 1525
 
1526 1526
   # Convert stored template into resource list by setting/overriding parameter values
1527
-  $ oc process foo PARM1=VALUE1 PARM2=VALUE2
1527
+  oc process foo PARM1=VALUE1 PARM2=VALUE2
1528 1528
 
1529 1529
   # Convert template stored in different namespace into a resource list
1530
-  $ oc process openshift//foo
1530
+  oc process openshift//foo
1531 1531
 
1532 1532
   # Convert template.json into resource list
1533
-  $ cat template.json | oc process -f -
1533
+  cat template.json | oc process -f -
1534 1534
 
1535 1535
   # Combine multiple templates into single resource list
1536
-  $ cat template.json second_template.json | oc process -f -
1536
+  cat template.json second_template.json | oc process -f -
1537 1537
 ----
1538 1538
 ====
1539 1539
 
... ...
@@ -1546,10 +1546,10 @@ Switch to another project
1546 1546
 [options="nowrap"]
1547 1547
 ----
1548 1548
   # Switch to 'myapp' project
1549
-  $ oc project myapp
1549
+  oc project myapp
1550 1550
 
1551 1551
   # Display the project currently in use
1552
-  $ oc project
1552
+  oc project
1553 1553
 ----
1554 1554
 ====
1555 1555
 
... ...
@@ -1562,11 +1562,11 @@ Run a proxy to the Kubernetes API server
1562 1562
 [options="nowrap"]
1563 1563
 ----
1564 1564
   # Run a proxy to kubernetes apiserver on port 8011, serving static content from ./local/www/
1565
-  $ oc proxy --port=8011 --www=./local/www/
1565
+  oc proxy --port=8011 --www=./local/www/
1566 1566
 
1567 1567
   # Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api
1568 1568
   # This makes e.g. the pods api available at localhost:8011/k8s-api/v1beta3/pods/
1569
-  $ oc proxy --api-prefix=k8s-api
1569
+  oc proxy --api-prefix=k8s-api
1570 1570
 ----
1571 1571
 ====
1572 1572
 
... ...
@@ -1579,13 +1579,13 @@ Replace a resource by filename or stdin.
1579 1579
 [options="nowrap"]
1580 1580
 ----
1581 1581
   # Replace a pod using the data in pod.json.
1582
-  $ oc replace -f pod.json
1582
+  oc replace -f pod.json
1583 1583
 
1584 1584
   # Replace a pod based on the JSON passed into stdin.
1585
-  $ cat pod.json | oc replace -f -
1585
+  cat pod.json | oc replace -f -
1586 1586
 
1587 1587
   # Force replace, delete and then re-create the resource
1588
-  $ oc replace --force -f pod.json
1588
+  oc replace --force -f pod.json
1589 1589
 ----
1590 1590
 ====
1591 1591
 
... ...
@@ -1598,16 +1598,16 @@ Revert part of an application back to a previous deployment
1598 1598
 [options="nowrap"]
1599 1599
 ----
1600 1600
   # Perform a rollback to the last successfully completed deployment for a deploymentconfig
1601
-  $ oc rollback frontend
1601
+  oc rollback frontend
1602 1602
 
1603 1603
   # See what a rollback to version 3 will look like, but don't perform the rollback
1604
-  $ oc rollback frontend --to-version=3 --dry-run
1604
+  oc rollback frontend --to-version=3 --dry-run
1605 1605
 
1606 1606
   # Perform a rollback to a specific deployment
1607
-  $ oc rollback frontend-2
1607
+  oc rollback frontend-2
1608 1608
 
1609 1609
   # Perform the rollback manually by piping the JSON of the new config back to oc
1610
-  $ oc rollback frontend -o json | oc replace dc/frontend -f -
1610
+  oc rollback frontend -o json | oc replace dc/frontend -f -
1611 1611
 ----
1612 1612
 ====
1613 1613
 
... ...
@@ -1621,13 +1621,13 @@ Start a shell session in a pod
1621 1621
 ----
1622 1622
 
1623 1623
   # Open a shell session on the first container in pod 'foo'
1624
-  $ oc rsh foo
1624
+  oc rsh foo
1625 1625
 
1626 1626
   # Run the command 'cat /etc/resolv.conf' inside pod 'foo'
1627
-  $ oc rsh foo cat /etc/resolv.conf
1627
+  oc rsh foo cat /etc/resolv.conf
1628 1628
 
1629 1629
   # See the configuration of your internal registry
1630
-  $ oc rsh dc/docker-registry cat config.yml
1630
+  oc rsh dc/docker-registry cat config.yml
1631 1631
 
1632 1632
   # Open a shell session on the container named 'index' inside a pod of your job
1633 1633
   # oc rsh -c index job/sheduled
... ...
@@ -1644,10 +1644,10 @@ Copy files between local filesystem and a pod
1644 1644
 ----
1645 1645
 
1646 1646
   # Synchronize a local directory with a pod directory
1647
-  $ oc rsync ./local/dir/ POD:/remote/dir
1647
+  oc rsync ./local/dir/ POD:/remote/dir
1648 1648
 
1649 1649
   # Synchronize a pod directory with a local directory
1650
-  $ oc rsync POD:/remote/dir/ ./local/dir
1650
+  oc rsync POD:/remote/dir/ ./local/dir
1651 1651
 ----
1652 1652
 ====
1653 1653
 
... ...
@@ -1660,20 +1660,20 @@ Run a particular image on the cluster.
1660 1660
 [options="nowrap"]
1661 1661
 ----
1662 1662
   # Starts a single instance of nginx.
1663
-  $ oc run nginx --image=nginx
1663
+  oc run nginx --image=nginx
1664 1664
 
1665 1665
   # Starts a replicated instance of nginx.
1666
-  $ oc run nginx --image=nginx --replicas=5
1666
+  oc run nginx --image=nginx --replicas=5
1667 1667
 
1668 1668
   # Dry run. Print the corresponding API objects without creating them.
1669
-  $ oc run nginx --image=nginx --dry-run
1669
+  oc run nginx --image=nginx --dry-run
1670 1670
 
1671 1671
   # Start a single instance of nginx, but overload the spec of the replication
1672 1672
   # controller with a partial set of values parsed from JSON.
1673
-  $ oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
1673
+  oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
1674 1674
 
1675 1675
   # Start a single instance of nginx and keep it in the foreground, don't restart it if it exits.
1676
-  $ oc run -i --tty nginx --image=nginx --restart=Never
1676
+  oc run -i --tty nginx --image=nginx --restart=Never
1677 1677
 ----
1678 1678
 ====
1679 1679
 
... ...
@@ -1686,14 +1686,14 @@ Change the number of pods in a deployment
1686 1686
 [options="nowrap"]
1687 1687
 ----
1688 1688
   # Scale replication controller named 'foo' to 3.
1689
-  $ oc scale --replicas=3 replicationcontrollers foo
1689
+  oc scale --replicas=3 replicationcontrollers foo
1690 1690
 
1691 1691
   # If the replication controller named foo's current size is 2, scale foo to 3.
1692
-  $ oc scale --current-replicas=2 --replicas=3 replicationcontrollers foo
1692
+  oc scale --current-replicas=2 --replicas=3 replicationcontrollers foo
1693 1693
 
1694 1694
   # Scale the latest deployment of 'bar'. In case of no deployment, bar's template
1695 1695
   # will be scaled instead.
1696
-  $ oc scale --replicas=10 dc bar
1696
+  oc scale --replicas=10 dc bar
1697 1697
 ----
1698 1698
 ====
1699 1699
 
... ...
@@ -1706,13 +1706,13 @@ Add secrets to a ServiceAccount
1706 1706
 [options="nowrap"]
1707 1707
 ----
1708 1708
   // To use your secret inside of a pod or as a push, pull, or source secret for a build, you must add a 'mount' secret to your service account like this:
1709
-  $ oc secrets add serviceaccount/sa-name secrets/secret-name secrets/another-secret-name
1709
+  oc secrets add serviceaccount/sa-name secrets/secret-name secrets/another-secret-name
1710 1710
 
1711 1711
   // To use your secret as an image pull secret, you must add a 'pull' secret to your service account like this:
1712
-  $ oc secrets add serviceaccount/sa-name secrets/secret-name --for=pull
1712
+  oc secrets add serviceaccount/sa-name secrets/secret-name --for=pull
1713 1713
 
1714 1714
   // To use your secret for image pulls or inside a pod:
1715
-  $ oc secrets add serviceaccount/sa-name secrets/secret-name --for=pull,mount
1715
+  oc secrets add serviceaccount/sa-name secrets/secret-name --for=pull,mount
1716 1716
 ----
1717 1717
 ====
1718 1718
 
... ...
@@ -1725,19 +1725,19 @@ Create a new secret based on a key file or on files within a directory
1725 1725
 [options="nowrap"]
1726 1726
 ----
1727 1727
   # Create a new secret named my-secret with a key named ssh-privatekey
1728
-  $ oc secrets new my-secret ~/.ssh/ssh-privatekey
1728
+  oc secrets new my-secret ~/.ssh/ssh-privatekey
1729 1729
 
1730 1730
   # Create a new secret named my-secret with keys named ssh-privatekey and ssh-publickey instead of the names of the keys on disk
1731
-  $ oc secrets new my-secret ssh-privatekey=~/.ssh/id_rsa ssh-publickey=~/.ssh/id_rsa.pub
1731
+  oc secrets new my-secret ssh-privatekey=~/.ssh/id_rsa ssh-publickey=~/.ssh/id_rsa.pub
1732 1732
 
1733 1733
   # Create a new secret named my-secret with keys for each file in the folder "bar"
1734
-  $ oc secrets new my-secret path/to/bar
1734
+  oc secrets new my-secret path/to/bar
1735 1735
 
1736 1736
   # Create a new .dockercfg secret named my-secret
1737
-  $ oc secrets new my-secret path/to/.dockercfg
1737
+  oc secrets new my-secret path/to/.dockercfg
1738 1738
 
1739 1739
   # Create a new .docker/config.json secret named my-secret
1740
-  $ oc secrets new my-secret .dockerconfigjson=path/to/.docker/config.json
1740
+  oc secrets new my-secret .dockerconfigjson=path/to/.docker/config.json
1741 1741
 ----
1742 1742
 ====
1743 1743
 
... ...
@@ -1750,13 +1750,13 @@ Create a new secret for basic authentication
1750 1750
 [options="nowrap"]
1751 1751
 ----
1752 1752
   // If your basic authentication method requires only username and password or token, add it by using:
1753
-  $ oc secrets new-basicauth SECRET --username=USERNAME --password=PASSWORD
1753
+  oc secrets new-basicauth SECRET --username=USERNAME --password=PASSWORD
1754 1754
 
1755 1755
   // If your basic authentication method requires also CA certificate, add it by using:
1756
-  $ oc secrets new-basicauth SECRET --username=USERNAME --password=PASSWORD --ca-cert=FILENAME
1756
+  oc secrets new-basicauth SECRET --username=USERNAME --password=PASSWORD --ca-cert=FILENAME
1757 1757
 
1758 1758
   // If you do already have a .gitconfig file needed for authentication, you can create a gitconfig secret by using:
1759
-  $ oc secrets new SECRET path/to/.gitconfig
1759
+  oc secrets new SECRET path/to/.gitconfig
1760 1760
 ----
1761 1761
 ====
1762 1762
 
... ...
@@ -1769,16 +1769,16 @@ Create a new dockercfg secret
1769 1769
 [options="nowrap"]
1770 1770
 ----
1771 1771
   # Create a new .dockercfg secret:
1772
-  $ oc secrets new-dockercfg SECRET --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
1772
+  oc secrets new-dockercfg SECRET --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
1773 1773
 
1774 1774
   # Create a new .dockercfg secret from an existing file:
1775
-  $ oc secrets new SECRET path/to/.dockercfg
1775
+  oc secrets new SECRET path/to/.dockercfg
1776 1776
 
1777 1777
   # Create a new .docker/config.json secret from an existing file:
1778
-  $ oc secrets new SECRET .dockerconfigjson=path/to/.docker/config.json
1778
+  oc secrets new SECRET .dockerconfigjson=path/to/.docker/config.json
1779 1779
 
1780 1780
   # To add new secret to 'imagePullSecrets' for the node, or 'secrets' for builds, use:
1781
-  $ oc edit SERVICE_ACCOUNT
1781
+  oc edit SERVICE_ACCOUNT
1782 1782
 ----
1783 1783
 ====
1784 1784
 
... ...
@@ -1791,13 +1791,13 @@ Create a new secret for SSH authentication
1791 1791
 [options="nowrap"]
1792 1792
 ----
1793 1793
   // If your SSH authentication method requires only private SSH key, add it by using:
1794
-  $ oc secrets new-sshauth SECRET --ssh-privatekey=FILENAME
1794
+  oc secrets new-sshauth SECRET --ssh-privatekey=FILENAME
1795 1795
 
1796 1796
   // If your SSH authentication method requires also CA certificate, add it by using:
1797
-  $ oc secrets new-sshauth SECRET --ssh-privatekey=FILENAME --ca-cert=FILENAME
1797
+  oc secrets new-sshauth SECRET --ssh-privatekey=FILENAME --ca-cert=FILENAME
1798 1798
 
1799 1799
   // If you do already have a .gitconfig file needed for authentication, you can create a gitconfig secret by using:
1800
-  $ oc secrets new SECRET path/to/.gitconfig
1800
+  oc secrets new SECRET path/to/.gitconfig
1801 1801
 ----
1802 1802
 ====
1803 1803
 
... ...
@@ -1810,7 +1810,7 @@ Get a token assigned to a service account.
1810 1810
 [options="nowrap"]
1811 1811
 ----
1812 1812
   # Get the service account token from service account 'default'
1813
-  $ oc serviceaccounts get-token 'default'
1813
+  oc serviceaccounts get-token 'default'
1814 1814
 
1815 1815
 ----
1816 1816
 ====
... ...
@@ -1824,7 +1824,7 @@ Generate a new token for a service account.
1824 1824
 [options="nowrap"]
1825 1825
 ----
1826 1826
   # Generate a new token for service account 'default'
1827
-  $ oc serviceaccounts new-token 'default'
1827
+  oc serviceaccounts new-token 'default'
1828 1828
 
1829 1829
   # Generate a new token for service account 'default' and apply 
1830 1830
   # labels 'foo' and 'bar' to the new token for identification
... ...
@@ -1842,29 +1842,29 @@ Update environment variables on a pod template
1842 1842
 [options="nowrap"]
1843 1843
 ----
1844 1844
   # Update deployment 'registry' with a new environment variable
1845
-  $ oc set env dc/registry STORAGE_DIR=/local
1845
+  oc set env dc/registry STORAGE_DIR=/local
1846 1846
 
1847 1847
   # List the environment variables defined on a build config 'sample-build'
1848
-  $ oc set env bc/sample-build --list
1848
+  oc set env bc/sample-build --list
1849 1849
 
1850 1850
   # List the environment variables defined on all pods
1851
-  $ oc set env pods --all --list
1851
+  oc set env pods --all --list
1852 1852
 
1853 1853
   # Output modified build config in YAML, and does not alter the object on the server
1854
-  $ oc set env bc/sample-build STORAGE_DIR=/data -o yaml
1854
+  oc set env bc/sample-build STORAGE_DIR=/data -o yaml
1855 1855
 
1856 1856
   # Update all containers in all replication controllers in the project to have ENV=prod
1857
-  $ oc set env rc --all ENV=prod
1857
+  oc set env rc --all ENV=prod
1858 1858
 
1859 1859
   # Remove the environment variable ENV from container 'c1' in all deployment configs
1860
-  $ oc set env dc --all --containers="c1" ENV-
1860
+  oc set env dc --all --containers="c1" ENV-
1861 1861
 
1862 1862
   # Remove the environment variable ENV from a deployment config definition on disk and
1863 1863
   # update the deployment config on the server
1864
-  $ oc set env -f dc.json ENV-
1864
+  oc set env -f dc.json ENV-
1865 1865
 
1866 1866
   # Set some of the local shell environment into a deployment config on the server
1867
-  $ env | grep RAILS_ | oc set env -e - dc/registry
1867
+  env | grep RAILS_ | oc set env -e - dc/registry
1868 1868
 ----
1869 1869
 ====
1870 1870
 
... ...
@@ -1877,22 +1877,22 @@ Update a probe on a pod template
1877 1877
 [options="nowrap"]
1878 1878
 ----
1879 1879
   # Clear both readiness and liveness probes off all containers
1880
-  $ oc set probe dc/registry --remove --readiness --liveness
1880
+  oc set probe dc/registry --remove --readiness --liveness
1881 1881
 
1882 1882
   # Set an exec action as a liveness probe to run 'echo ok'
1883
-  $ oc set probe dc/registry --liveness -- echo ok
1883
+  oc set probe dc/registry --liveness -- echo ok
1884 1884
 
1885 1885
   # Set a readiness probe to try to open a TCP socket on 3306
1886
-  $ oc set probe rc/mysql --readiness --open-tcp=3306
1886
+  oc set probe rc/mysql --readiness --open-tcp=3306
1887 1887
 
1888 1888
   # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP
1889
-  $ oc set probe dc/webapp --readiness --get-url=http://:8080/healthz
1889
+  oc set probe dc/webapp --readiness --get-url=http://:8080/healthz
1890 1890
 
1891 1891
   # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod
1892
-  $ oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats
1892
+  oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats
1893 1893
 
1894 1894
   # Set only the initial-delay-seconds field on all deployments
1895
-  $ oc set probe dc --all --readiness --initial-delay-seconds=30
1895
+  oc set probe dc --all --readiness --initial-delay-seconds=30
1896 1896
 ----
1897 1897
 ====
1898 1898
 
... ...
@@ -1905,26 +1905,26 @@ Update the triggers on a build or deployment config
1905 1905
 [options="nowrap"]
1906 1906
 ----
1907 1907
   # Print the triggers on the registry
1908
-  $ oc set triggers dc/registry
1908
+  oc set triggers dc/registry
1909 1909
 
1910 1910
   # Set all triggers to manual
1911
-  $ oc set triggers dc/registry --manual
1911
+  oc set triggers dc/registry --manual
1912 1912
 
1913 1913
   # Enable all automatic triggers
1914
-  $ oc set triggers dc/registry --auto
1914
+  oc set triggers dc/registry --auto
1915 1915
 
1916 1916
   # Reset the GitHub webhook on a build to a new, generated secret
1917
-  $ oc set triggers bc/webapp --from-github
1918
-  $ oc set triggers bc/webapp --from-webhook
1917
+  oc set triggers bc/webapp --from-github
1918
+  oc set triggers bc/webapp --from-webhook
1919 1919
 
1920 1920
   # Remove all triggers
1921
-  $ oc set triggers bc/webapp --remove-all
1921
+  oc set triggers bc/webapp --remove-all
1922 1922
 
1923 1923
   # Stop triggering on config change
1924
-  $ oc set triggers dc/registry --from-config --remove
1924
+  oc set triggers dc/registry --from-config --remove
1925 1925
 
1926 1926
   # Add an image trigger to a build config
1927
-  $ oc set triggers bc/webapp --from-image=namespace1/image:latest
1927
+  oc set triggers bc/webapp --from-image=namespace1/image:latest
1928 1928
 ----
1929 1929
 ====
1930 1930
 
... ...
@@ -1937,31 +1937,31 @@ Update volumes on a pod template
1937 1937
 [options="nowrap"]
1938 1938
 ----
1939 1939
   # List volumes defined on all deployment configs in the current project
1940
-  $ oc set volume dc --all
1940
+  oc set volume dc --all
1941 1941
 
1942 1942
   # Add a new empty dir volume to deployment config (dc) 'registry' mounted under
1943 1943
   # /var/lib/registry
1944
-  $ oc set volume dc/registry --add --mount-path=/var/lib/registry
1944
+  oc set volume dc/registry --add --mount-path=/var/lib/registry
1945 1945
 
1946 1946
   # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1'
1947
-  $ oc set volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite
1947
+  oc set volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite
1948 1948
 
1949 1949
   # Remove volume 'v1' from deployment config 'registry'
1950
-  $ oc set volume dc/registry --remove --name=v1
1950
+  oc set volume dc/registry --remove --name=v1
1951 1951
 
1952 1952
   # Create a new persistent volume claim that overwrites an existing volume 'v1'
1953
-  $ oc set volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite
1953
+  oc set volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite
1954 1954
 
1955 1955
   # Change the mount point for volume 'v1' to /data
1956
-  $ oc set volume dc/registry --add --name=v1 -m /data --overwrite
1956
+  oc set volume dc/registry --add --name=v1 -m /data --overwrite
1957 1957
 
1958 1958
   # Modify the deployment config by removing volume mount "v1" from container "c1"
1959 1959
   # (and by removing the volume "v1" if no other containers have volume mounts that reference it)
1960
-  $ oc set volume dc/registry --remove --name=v1 --containers=c1
1960
+  oc set volume dc/registry --remove --name=v1 --containers=c1
1961 1961
 
1962 1962
   # Add new volume based on a more complex volume source (Git repo, AWS EBS, GCE PD,
1963 1963
   # Ceph, Gluster, NFS, ISCSI, ...)
1964
-  $ oc set volume dc/registry --add -m /repo --source=<json-string>
1964
+  oc set volume dc/registry --add -m /repo --source=<json-string>
1965 1965
 ----
1966 1966
 ====
1967 1967
 
... ...
@@ -1974,24 +1974,24 @@ Start a new build
1974 1974
 [options="nowrap"]
1975 1975
 ----
1976 1976
   # Starts build from build config "hello-world"
1977
-  $ oc start-build hello-world
1977
+  oc start-build hello-world
1978 1978
 
1979 1979
   # Starts build from a previous build "hello-world-1"
1980
-  $ oc start-build --from-build=hello-world-1
1980
+  oc start-build --from-build=hello-world-1
1981 1981
 
1982 1982
   # Use the contents of a directory as build input
1983
-  $ oc start-build hello-world --from-dir=src/
1983
+  oc start-build hello-world --from-dir=src/
1984 1984
 
1985 1985
   # Send the contents of a Git repository to the server from tag 'v2'
1986
-  $ oc start-build hello-world --from-repo=../hello-world --commit=v2
1986
+  oc start-build hello-world --from-repo=../hello-world --commit=v2
1987 1987
 
1988 1988
   # Start a new build for build config "hello-world" and watch the logs until the build
1989 1989
   # completes or fails.
1990
-  $ oc start-build hello-world --follow
1990
+  oc start-build hello-world --follow
1991 1991
 
1992 1992
   # Start a new build for build config "hello-world" and wait until the build completes. It
1993 1993
   # exits with a non-zero return code if the build fails.
1994
-  $ oc start-build hello-world --wait
1994
+  oc start-build hello-world --wait
1995 1995
 ----
1996 1996
 ====
1997 1997
 
... ...
@@ -2004,13 +2004,13 @@ Show an overview of the current project
2004 2004
 [options="nowrap"]
2005 2005
 ----
2006 2006
   # See an overview of the current project.
2007
-  $ oc status
2007
+  oc status
2008 2008
 
2009 2009
   # Export the overview of the current project in an svg file.
2010
-  $ oc status -o dot | dot -T svg -o project.svg
2010
+  oc status -o dot | dot -T svg -o project.svg
2011 2011
 
2012 2012
   # See an overview of the current project including details for any identified issues.
2013
-  $ oc status -v
2013
+  oc status -v
2014 2014
 ----
2015 2015
 ====
2016 2016
 
... ...
@@ -2023,16 +2023,16 @@ Tag existing images into image streams
2023 2023
 [options="nowrap"]
2024 2024
 ----
2025 2025
   # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip'.
2026
-  $ oc tag openshift/ruby:2.0 yourproject/ruby:tip
2026
+  oc tag openshift/ruby:2.0 yourproject/ruby:tip
2027 2027
 
2028 2028
   # Tag a specific image.
2029
-  $ oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip
2029
+  oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip
2030 2030
 
2031 2031
   # Tag an external Docker image.
2032
-  $ oc tag --source=docker openshift/origin:latest yourproject/ruby:tip
2032
+  oc tag --source=docker openshift/origin:latest yourproject/ruby:tip
2033 2033
 
2034 2034
   # Remove the specified spec tag from an image stream.
2035
-  $ oc tag openshift/origin:latest -d
2035
+  oc tag openshift/origin:latest -d
2036 2036
 ----
2037 2037
 ====
2038 2038
 
... ...
@@ -2045,16 +2045,16 @@ An introduction to concepts and types
2045 2045
 [options="nowrap"]
2046 2046
 ----
2047 2047
   # View all projects you have access to
2048
-  $ oc get projects
2048
+  oc get projects
2049 2049
 
2050 2050
   # See a list of all services in the current project
2051
-  $ oc get svc
2051
+  oc get svc
2052 2052
 
2053 2053
   # Describe a deployment configuration in detail
2054
-  $ oc describe dc mydeploymentconfig
2054
+  oc describe dc mydeploymentconfig
2055 2055
 
2056 2056
   # Show the images tagged into an image stream
2057
-  $ oc describe is ruby-centos7
2057
+  oc describe is ruby-centos7
2058 2058
 ----
2059 2059
 ====
2060 2060
 
... ...
@@ -2067,31 +2067,31 @@ DEPRECATED: set volume
2067 2067
 [options="nowrap"]
2068 2068
 ----
2069 2069
   # List volumes defined on all deployment configs in the current project
2070
-  $ oc volume dc --all
2070
+  oc volume dc --all
2071 2071
 
2072 2072
   # Add a new empty dir volume to deployment config (dc) 'registry' mounted under
2073 2073
   # /var/lib/registry
2074
-  $ oc volume dc/registry --add --mount-path=/var/lib/registry
2074
+  oc volume dc/registry --add --mount-path=/var/lib/registry
2075 2075
 
2076 2076
   # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1'
2077
-  $ oc volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite
2077
+  oc volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite
2078 2078
 
2079 2079
   # Remove volume 'v1' from deployment config 'registry'
2080
-  $ oc volume dc/registry --remove --name=v1
2080
+  oc volume dc/registry --remove --name=v1
2081 2081
 
2082 2082
   # Create a new persistent volume claim that overwrites an existing volume 'v1'
2083
-  $ oc volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite
2083
+  oc volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite
2084 2084
 
2085 2085
   # Change the mount point for volume 'v1' to /data
2086
-  $ oc volume dc/registry --add --name=v1 -m /data --overwrite
2086
+  oc volume dc/registry --add --name=v1 -m /data --overwrite
2087 2087
 
2088 2088
   # Modify the deployment config by removing volume mount "v1" from container "c1"
2089 2089
   # (and by removing the volume "v1" if no other containers have volume mounts that reference it)
2090
-  $ oc volume dc/registry --remove --name=v1 --containers=c1
2090
+  oc volume dc/registry --remove --name=v1 --containers=c1
2091 2091
 
2092 2092
   # Add new volume based on a more complex volume source (Git repo, AWS EBS, GCE PD,
2093 2093
   # Ceph, Gluster, NFS, ISCSI, ...)
2094
-  $ oc volume dc/registry --add -m /repo --source=<json-string>
2094
+  oc volume dc/registry --add -m /repo --source=<json-string>
2095 2095
 ----
2096 2096
 ====
2097 2097
 
... ...
@@ -63,14 +63,14 @@ This utility helps troubleshoot and diagnose known problems. It runs
63 63
 diagnostics using a client and/or the state of a running master /
64 64
 node host.
65 65
 
66
-    $ %[1]s
66
+    %[1]s
67 67
 
68 68
 If run without flags, it will check for standard config files for
69 69
 client, master, and node, and if found, use them for diagnostics.
70 70
 You may also specify config files explicitly with flags, in which case
71 71
 you will receive an error if they are not found. For example:
72 72
 
73
-    $ %[1]s --master-config=/etc/origin/master/master-config.yaml
73
+    %[1]s --master-config=/etc/origin/master/master-config.yaml
74 74
 
75 75
 * If master/node config files are not found and the --host flag is not
76 76
   present, host diagnostics are skipped.
... ...
@@ -22,7 +22,7 @@ Add users to a group.
22 22
 This command will append unique users to the list of members for a group.`
23 23
 
24 24
 	addExample = `  # Add user1 and user2 to my-group
25
-  $ %[1]s my-group user1 user2`
25
+  %[1]s my-group user1 user2`
26 26
 )
27 27
 
28 28
 const (
... ...
@@ -33,7 +33,7 @@ Remove users from a group.
33 33
 This command will remove users from the list of members for a group.`
34 34
 
35 35
 	removeExample = `  # Remove user1 and user2 from my-group
36
-  $ %[1]s my-group user1 user2`
36
+  %[1]s my-group user1 user2`
37 37
 )
38 38
 
39 39
 type GroupModificationOptions struct {
... ...
@@ -23,10 +23,10 @@ Create a new group.
23 23
 This command will create a new group with an optional list of users.`
24 24
 
25 25
 	newExample = `  # Add a group with no users
26
-  $ %[1]s my-group
26
+  %[1]s my-group
27 27
 
28 28
   # Add a group with two users
29
-  $ %[1]s my-group user1 user2`
29
+  %[1]s my-group user1 user2`
30 30
 )
31 31
 
32 32
 type NewGroupOptions struct {
... ...
@@ -36,16 +36,16 @@ for which the external record does not exist, to run the pruning process and com
36 36
 flag.
37 37
 `
38 38
 	pruneExamples = `  # Prune all orphaned groups
39
-  $ %[1]s --sync-config=/path/to/ldap-sync-config.yaml --confirm
39
+  %[1]s --sync-config=/path/to/ldap-sync-config.yaml --confirm
40 40
 
41 41
   # Prune all orphaned groups except the ones from the blacklist file
42
-  $ %[1]s --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
42
+  %[1]s --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
43 43
 
44 44
   # Prune all orphaned groups from a list of specific groups specified in a whitelist file
45
-  $ %[1]s --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
45
+  %[1]s --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
46 46
 
47 47
   # Prune all orphaned groups from a list of specific groups specified in a whitelist
48
-  $ %[1]s groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
48
+  %[1]s groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm
49 49
 `
50 50
 )
51 51
 
... ...
@@ -46,19 +46,19 @@ without changing OpenShift records. Passing '--confirm' will sync all groups fro
46 46
 LDAP query templates.
47 47
 `
48 48
 	syncExamples = `  # Sync all groups from an LDAP server
49
-  $ %[1]s --sync-config=/path/to/ldap-sync-config.yaml --confirm
49
+  %[1]s --sync-config=/path/to/ldap-sync-config.yaml --confirm
50 50
 
51 51
   # Sync all groups except the ones from the blacklist file from an LDAP server
52
-  $ %[1]s --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
52
+  %[1]s --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm
53 53
 
54 54
   # Sync specific groups specified in a whitelist file with an LDAP server
55
-  $ %[1]s --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm
55
+  %[1]s --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm
56 56
 
57 57
   # Sync all OpenShift Groups that have been synced previously with an LDAP server
58
-  $ %[1]s --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm
58
+  %[1]s --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm
59 59
 
60 60
   # Sync specific OpenShift Groups if they have been synced previously with an LDAP server
61
-  $ %[1]s groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm
61
+  %[1]s groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm
62 62
 `
63 63
 )
64 64
 
... ...
@@ -28,19 +28,19 @@ evacuate: Migrate all/selected pod on the provided nodes.
28 28
 list-pods: List all/selected pods on given/selected nodes. It can list the output in json/yaml format.`
29 29
 
30 30
 	manageNodeExample = `	# Block accepting any pods on given nodes
31
-	$ %[1]s <mynode> --schedulable=false
31
+	%[1]s <mynode> --schedulable=false
32 32
 
33 33
 	# Mark selected nodes as schedulable
34
-	$ %[1]s --selector="<env=dev>" --schedulable=true
34
+	%[1]s --selector="<env=dev>" --schedulable=true
35 35
 
36 36
 	# Migrate selected pods
37
-	$ %[1]s <mynode> --evacuate --pod-selector="<service=myapp>"
37
+	%[1]s <mynode> --evacuate --pod-selector="<service=myapp>"
38 38
 
39 39
 	# Show pods that will be migrated
40
-	$ %[1]s <mynode> --evacuate --dry-run --pod-selector="<service=myapp>"
40
+	%[1]s <mynode> --evacuate --dry-run --pod-selector="<service=myapp>"
41 41
 
42 42
 	# List all pods on given nodes
43
-	$ %[1]s <mynode1> <mynode2> --list-pods`
43
+	%[1]s <mynode1> <mynode2> --list-pods`
44 44
 )
45 45
 
46 46
 var schedulable, evacuate, listpods bool
... ...
@@ -29,10 +29,10 @@ const (
29 29
 
30 30
 const (
31 31
 	addRoleToUserExample = `  # Add the 'view' role to user1 in the current project
32
-  $ %[1]s view user1
32
+  %[1]s view user1
33 33
 
34 34
   # Add the 'edit' role to serviceaccount1 in the current project
35
-  $ %[1]s edit -z serviceaccount1`
35
+  %[1]s edit -z serviceaccount1`
36 36
 )
37 37
 
38 38
 type RoleModificationOptions struct {
... ...
@@ -25,10 +25,10 @@ const (
25 25
 
26 26
 const (
27 27
 	addSCCToUserExample = `  # Add the 'restricted' security context contraint to user1 and user2
28
-  $ %[1]s restricted user1 user2
28
+  %[1]s restricted user1 user2
29 29
 
30 30
   # Add the 'privileged' security context contraint to the service account serviceaccount1 in the current namespace
31
-  $ %[1]s privileged -z serviceaccount1`
31
+  %[1]s privileged -z serviceaccount1`
32 32
 )
33 33
 
34 34
 type SCCModificationOptions struct {
... ...
@@ -50,19 +50,19 @@ This command will not remove any additional cluster role bindings.
50 50
 You can see which recommended cluster role bindings have changed by choosing an output type.`
51 51
 
52 52
 	reconcileBindingsExample = `  # Display the names of cluster role bindings that would be modified
53
-  $ %[1]s -o name
53
+  %[1]s -o name
54 54
 
55 55
   # Display the cluster role bindings that would be modified, removing any extra subjects
56
-  $ %[1]s --additive-only=false
56
+  %[1]s --additive-only=false
57 57
 
58 58
   # Update cluster role bindings that don't match the current defaults
59
-  $ %[1]s --confirm
59
+  %[1]s --confirm
60 60
 
61 61
   # Update cluster role bindings that don't match the current defaults, avoid adding roles to the system:authenticated group
62
-  $ %[1]s --confirm --exclude-groups=system:authenticated
62
+  %[1]s --confirm --exclude-groups=system:authenticated
63 63
 
64 64
   # Update cluster role bindings that don't match the current defaults, removing any extra subjects from the binding
65
-  $ %[1]s --confirm --additive-only=false`
65
+  %[1]s --confirm --additive-only=false`
66 66
 )
67 67
 
68 68
 // NewCmdReconcileClusterRoleBindings implements the OpenShift cli reconcile-cluster-role-bindings command
... ...
@@ -55,17 +55,17 @@ Cluster roles with the annotation %s set to "true" are skipped.
55 55
 You can see which cluster roles have recommended changed by choosing an output type.`
56 56
 
57 57
 	reconcileExample = `  # Display the names of cluster roles that would be modified
58
-  $ %[1]s -o name
58
+  %[1]s -o name
59 59
 
60 60
   # Add missing permissions to cluster roles that don't match the current defaults
61
-  $ %[1]s --confirm
61
+  %[1]s --confirm
62 62
 
63
-  # Add missing permissions and remove extra permissions from 
63
+  # Add missing permissions and remove extra permissions from
64 64
   # cluster roles that don't match the current defaults
65
-  $ %[1]s --additive-only=false --confirm
65
+  %[1]s --additive-only=false --confirm
66 66
 
67 67
   # Display the union of the default and modified cluster roles
68
-  $ %[1]s --additive-only`
68
+  %[1]s --additive-only`
69 69
 )
70 70
 
71 71
 // NewCmdReconcileClusterRoles implements the OpenShift cli reconcile-cluster-roles command
... ...
@@ -54,14 +54,14 @@ definition).
54 54
 You can see which cluster SCCs have recommended changes by choosing an output type.`
55 55
 
56 56
 	reconcileSCCExample = `  # Display the cluster SCCs that would be modified
57
-  $ %[1]s
57
+  %[1]s
58 58
 
59 59
   # Update cluster SCCs that don't match the current defaults preserving additional grants
60 60
   # for users and group and keeping any priorities that are already set
61
-  $ %[1]s --confirm
61
+  %[1]s --confirm
62 62
 
63 63
   # Replace existing users, groups, and priorities that do not match defaults
64
-  $ %[1]s --additive-only=false --confirm`
64
+  %[1]s --additive-only=false --confirm`
65 65
 )
66 66
 
67 67
 // NewDefaultReconcileSCCOptions provides a ReconcileSCCOptions with default settings.
... ...
@@ -28,10 +28,10 @@ By default, the prune operation performs a dry run making no changes to internal
28 28
 
29 29
 	buildsExample = `  # Dry run deleting older completed and failed builds and also including
30 30
   # all builds whose associated BuildConfig no longer exists
31
-  $ %[1]s %[2]s --orphans
31
+  %[1]s %[2]s --orphans
32 32
 
33 33
   # To actually perform the prune operation, the confirm flag must be appended
34
-  $ %[1]s %[2]s --orphans --confirm`
34
+  %[1]s %[2]s --orphans --confirm`
35 35
 )
36 36
 
37 37
 type pruneBuildsConfig struct {
... ...
@@ -29,10 +29,10 @@ A --confirm flag is needed for changes to be effective.
29 29
 `
30 30
 
31 31
 	deploymentsExample = `  # Dry run deleting all but the last complete deployment for every deployment config
32
-  $ %[1]s %[2]s --keep-complete=1
32
+  %[1]s %[2]s --keep-complete=1
33 33
 
34 34
   # To actually perform the prune operation, the confirm flag must be appended
35
-  $ %[1]s %[2]s --keep-complete=1 --confirm`
35
+  %[1]s %[2]s --keep-complete=1 --confirm`
36 36
 )
37 37
 
38 38
 type pruneDeploymentConfig struct {
... ...
@@ -41,10 +41,10 @@ images.`
41 41
 
42 42
 	imagesExample = `  # See, what the prune command would delete if only images more than an hour old and obsoleted
43 43
   # by 3 newer revisions under the same tag were considered.
44
-  $ %[1]s %[2]s --keep-tag-revisions=3 --keep-younger-than=60m
44
+  %[1]s %[2]s --keep-tag-revisions=3 --keep-younger-than=60m
45 45
 
46 46
   # To actually perform the prune operation, the confirm flag must be appended
47
-  $ %[1]s %[2]s --keep-tag-revisions=3 --keep-younger-than=60m --confirm`
47
+  %[1]s %[2]s --keep-tag-revisions=3 --keep-younger-than=60m --confirm`
48 48
 )
49 49
 
50 50
 // PruneImagesOptions holds all the required options for prune images
... ...
@@ -54,16 +54,16 @@ NOTE: This command is intended to simplify the tasks of setting up a Docker regi
54 54
   your registry persist data.`
55 55
 
56 56
 	registryExample = `  # Check if default Docker registry ("docker-registry") has been created
57
-  $ %[1]s %[2]s --dry-run
57
+  %[1]s %[2]s --dry-run
58 58
 
59 59
   # See what the registry will look like if created
60
-  $ %[1]s %[2]s -o yaml
60
+  %[1]s %[2]s -o yaml
61 61
 
62 62
   # Create a registry with two replicas if it does not exist
63
-  $ %[1]s %[2]s --replicas=2
63
+  %[1]s %[2]s --replicas=2
64 64
 
65 65
   # Use a different registry image
66
-  $ %[1]s %[2]s --images=myrepo/docker-registry:mytag`
66
+  %[1]s %[2]s --images=myrepo/docker-registry:mytag`
67 67
 )
68 68
 
69 69
 type RegistryConfig struct {
... ...
@@ -51,19 +51,19 @@ running your router in production, you should pass --replicas=2 or higher to ens
51 51
 you have failover protection.`
52 52
 
53 53
 	routerExample = `  # Check the default router ("router")
54
-  $ %[1]s %[2]s --dry-run
54
+  %[1]s %[2]s --dry-run
55 55
 
56 56
   # See what the router would look like if created
57
-  $ %[1]s %[2]s -o yaml
57
+  %[1]s %[2]s -o yaml
58 58
 
59 59
   # Create a router with two replicas if it does not exist
60
-  $ %[1]s %[2]s router-west --replicas=2
60
+  %[1]s %[2]s router-west --replicas=2
61 61
 
62 62
   # Use a different router image
63
-  $ %[1]s %[2]s region-west --images=myrepo/somerouter:mytag
63
+  %[1]s %[2]s region-west --images=myrepo/somerouter:mytag
64 64
 
65 65
   # Run the router with a hint to the underlying implementation to _not_ expose statistics.
66
-  $ %[1]s %[2]s router-west --stats-port=0
66
+  %[1]s %[2]s router-west --stats-port=0
67 67
   `
68 68
 
69 69
 	secretsVolumeName = "secret-volume"
... ...
@@ -26,7 +26,7 @@ This command validates that a configuration file intended to be used for a maste
26 26
 `
27 27
 
28 28
 	validateMasterConfigExample = ` // Validate master server configuration file
29
-  $ %s openshift.local.config/master/master-config.yaml`
29
+  %s openshift.local.config/master/master-config.yaml`
30 30
 
31 31
 	validateMasterConfigDeprecationMessage = `This command is deprecated and will be removed. Use 'oadm diagnostics MasterConfigCheck --master-config=path/to/config.yaml' instead.`
32 32
 )
... ...
@@ -25,7 +25,7 @@ This command validates that a configuration file intended to be used for a node
25 25
 `
26 26
 
27 27
 	valiateNodeConfigExample = ` // Validate node configuration file
28
-  $ %s openshift.local.config/master/node-config.yaml`
28
+  %s openshift.local.config/master/node-config.yaml`
29 29
 
30 30
 	validateNodeConfigDeprecationMessage = `This command is deprecated and will be removed. Use 'oadm diagnostics NodeConfigCheck --node-config=path/to/config.yaml' instead.`
31 31
 )
... ...
@@ -41,9 +41,9 @@ cluster under the 'adm' subcommand.
41 41
 const cliExplain = `
42 42
 To create a new application, login to your server and then run new-app:
43 43
 
44
-  $ %[1]s login https://mycluster.mycompany.com
45
-  $ %[1]s new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
46
-  $ %[1]s logs -f bc/ruby-ex
44
+  %[1]s login https://mycluster.mycompany.com
45
+  %[1]s new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
46
+  %[1]s logs -f bc/ruby-ex
47 47
 
48 48
 This will create an application based on the Docker image 'centos/ruby-22-centos7' that builds
49 49
 the source code from GitHub. A build will start automatically, push the resulting image to the
... ...
@@ -52,16 +52,16 @@ registry, and a deployment will roll that change out in your project.
52 52
 Once your application is deployed, use the status, describe, and get commands to see more about
53 53
 the created components:
54 54
 
55
-  $ %[1]s status
56
-  $ %[1]s describe deploymentconfig ruby-ex
57
-  $ %[1]s get pods
55
+  %[1]s status
56
+  %[1]s describe deploymentconfig ruby-ex
57
+  %[1]s get pods
58 58
 
59 59
 To make this application visible outside of the cluster, use the expose command on the service
60 60
 we just created to create a 'route' (which will connect your application over the HTTP port
61 61
 to a public domain name).
62 62
 
63
-  $ %[1]s expose svc/ruby-ex
64
-  $ %[1]s status
63
+  %[1]s expose svc/ruby-ex
64
+  %[1]s status
65 65
 
66 66
 You should now see the URL the application can be reached at.
67 67
 
... ...
@@ -26,7 +26,7 @@ will no longer be available. If the build has not yet completed, the build logs
26 26
 build completes or fails.`
27 27
 
28 28
 	buildLogsExample = `  # Stream logs from container
29
-  $ %[1]s build-logs 566bed879d2d`
29
+  %[1]s build-logs 566bed879d2d`
30 30
 )
31 31
 
32 32
 // NewCmdBuildLogs implements the OpenShift cli build-logs command
... ...
@@ -26,23 +26,23 @@ const (
26 26
 	cancelBuildLong = `
27 27
 Cancel running, pending, or new builds
28 28
 
29
-This command requests a graceful shutdown of the build. There may be a delay between requesting 
29
+This command requests a graceful shutdown of the build. There may be a delay between requesting
30 30
 the build and the time the build is terminated.`
31 31
 
32 32
 	cancelBuildExample = `  # Cancel the build with the given name
33
-  $ %[1]s cancel-build ruby-build-2
33
+  %[1]s cancel-build ruby-build-2
34 34
 
35 35
   # Cancel the named build and print the build logs
36
-  $ %[1]s cancel-build ruby-build-2 --dump-logs
36
+  %[1]s cancel-build ruby-build-2 --dump-logs
37 37
 
38 38
   # Cancel the named build and create a new one with the same parameters
39
-  $ %[1]s cancel-build ruby-build-2 --restart
39
+  %[1]s cancel-build ruby-build-2 --restart
40 40
 
41 41
   # Cancel multiple builds
42
-  $ %[1]s cancel-build ruby-build-1 ruby-build-2 ruby-build-3
42
+  %[1]s cancel-build ruby-build-1 ruby-build-2 ruby-build-3
43 43
 
44 44
   # Cancel all builds created from 'ruby-build' build configuration that are in 'new' state
45
-  $ %[1]s cancel-build bc/ruby-build --state=new`
45
+  %[1]s cancel-build bc/ruby-build --state=new`
46 46
 )
47 47
 
48 48
 type CancelBuildOptions struct {
... ...
@@ -25,7 +25,7 @@ Create a deployment config that uses a given image.
25 25
 Deployment configs define the template for a pod and manages deploying new images or configuration changes.`
26 26
 
27 27
 	deploymentConfigExample = `  # Create an nginx deployment config named my-nginx
28
-  $ %[1]s my-nginx --image=nginx`
28
+  %[1]s my-nginx --image=nginx`
29 29
 )
30 30
 
31 31
 type CreateDeploymentConfigOptions struct {
... ...
@@ -30,7 +30,7 @@ Corresponding user and useridentitymapping objects must also be created
30 30
 to allow logging in with the created identity.`
31 31
 
32 32
 	identityExample = `  # Create an identity with identity provider "acme_ldap" and the identity provider username "adamjones"
33
-  $ %[1]s acme_ldap:adamjones`
33
+  %[1]s acme_ldap:adamjones`
34 34
 )
35 35
 
36 36
 type CreateIdentityOptions struct {
... ...
@@ -22,7 +22,7 @@ const (
22 22
 Create a policy binding that references the policy in the targetted namespace.`
23 23
 
24 24
 	policyBindingExample = `  # Create a policy binding in namespace "foo" that references the policy in namespace "bar"
25
-  $ %[1]s bar -n foo`
25
+  %[1]s bar -n foo`
26 26
 )
27 27
 
28 28
 type CreatePolicyBindingOptions struct {
... ...
@@ -29,7 +29,7 @@ Corresponding identity and useridentitymapping objects must also be created
29 29
 to allow logging in as the created user.`
30 30
 
31 31
 	userExample = `  # Create a user with the username "ajones" and the display name "Adam Jones"
32
-  $ %[1]s ajones --full-name="Adam Jones"`
32
+  %[1]s ajones --full-name="Adam Jones"`
33 33
 )
34 34
 
35 35
 type CreateUserOptions struct {
... ...
@@ -26,7 +26,7 @@ be manually established between an identity and a user, this command can be used
26 26
 to create a useridentitymapping object.`
27 27
 
28 28
 	userIdentityMappingExample = `  # Map the identity "acme_ldap:adamjones" to the user "ajones"
29
-  $ %[1]s acme_ldap:adamjones ajones`
29
+  %[1]s acme_ldap:adamjones ajones`
30 30
 )
31 31
 
32 32
 type CreateUserIdentityMappingOptions struct {
... ...
@@ -52,11 +52,11 @@ Specify the service (either just its name or using type/name syntax) that the
52 52
 generated route should expose via the --service flag.`
53 53
 
54 54
 	edgeRouteExample = `  # Create an edge route named "my-route" that exposes frontend service.
55
-  $ %[1]s create route edge my-route --service=frontend
55
+  %[1]s create route edge my-route --service=frontend
56 56
 
57 57
   # Create an edge route that exposes the frontend service and specify a path.
58 58
   # If the route name is omitted, the service name will be re-used.
59
-  $ %[1]s create route edge --service=frontend --path /assets`
59
+  %[1]s create route edge --service=frontend --path /assets`
60 60
 )
61 61
 
62 62
 // NewCmdCreateEdgeRoute is a macro command to create an edge route.
... ...
@@ -160,11 +160,11 @@ Specify the service (either just its name or using type/name syntax) that the
160 160
 generated route should expose via the --service flag.`
161 161
 
162 162
 	passthroughRouteExample = `  # Create a passthrough route named "my-route" that exposes the frontend service.
163
-  $ %[1]s create route passthrough my-route --service=frontend
163
+  %[1]s create route passthrough my-route --service=frontend
164 164
 
165 165
   # Create a passthrough route that exposes the frontend service and specify
166 166
   # a hostname. If the route name is omitted, the service name will be re-used.
167
-  $ %[1]s create route passthrough --service=frontend --hostname=www.example.com`
167
+  %[1]s create route passthrough --service=frontend --hostname=www.example.com`
168 168
 )
169 169
 
170 170
 // NewCmdCreatePassthroughRoute is a macro command to create a passthrough route.
... ...
@@ -246,11 +246,11 @@ generated route should expose via the --service flag. A destination CA certifica
246 246
 is needed for reencrypt routes, specify one with the --dest-ca-cert flag.`
247 247
 
248 248
 	reencryptRouteExample = `  # Create a route named "my-route" that exposes the frontend service.
249
-  $ %[1]s create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert
249
+  %[1]s create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert
250 250
 
251 251
   # Create a reencrypt route that exposes the frontend service and re-use
252 252
   # the service name as the route name.
253
-  $ %[1]s create route reencrypt --service=frontend --dest-ca-cert cert.cert`
253
+  %[1]s create route reencrypt --service=frontend --dest-ca-cert cert.cert`
254 254
 )
255 255
 
256 256
 // NewCmdCreateReencryptRoute is a macro command to create a reencrypt route.
... ...
@@ -76,13 +76,13 @@ the shell.`
76 76
 
77 77
 	debugExample = `
78 78
   # Debug a currently running deployment
79
-  $ %[1]s dc/test
79
+  %[1]s dc/test
80 80
 
81 81
   # Debug a specific failing container by running the env command in the 'second' container
82
-  $ %[1]s dc/test -c second -- /bin/env
82
+  %[1]s dc/test -c second -- /bin/env
83 83
 
84 84
   # See the pod that would be created to debug
85
-  $ %[1]s dc/test -o yaml`
85
+  %[1]s dc/test -o yaml`
86 86
 
87 87
 	debugPodLabelName = "debug.openshift.io/name"
88 88
 
... ...
@@ -74,17 +74,17 @@ complete before the cancellation is effective. In such a case an appropriate eve
74 74
 If no options are given, shows information about the latest deployment.`
75 75
 
76 76
 	deployExample = `  # Display the latest deployment for the 'database' deployment config
77
-  $ %[1]s deploy database
77
+  %[1]s deploy database
78 78
 
79 79
   # Start a new deployment based on the 'database'
80
-  $ %[1]s deploy database --latest
80
+  %[1]s deploy database --latest
81 81
 
82 82
   # Retry the latest failed deployment based on 'frontend'
83 83
   # The deployer pod and any hook pods are deleted for the latest failed deployment
84
-  $ %[1]s deploy frontend --retry
84
+  %[1]s deploy frontend --retry
85 85
 
86 86
   # Cancel the in-progress deployment based on 'frontend'
87
-  $ %[1]s deploy frontend --cancel`
87
+  %[1]s deploy frontend --cancel`
88 88
 )
89 89
 
90 90
 // NewCmdDeploy creates a new `deploy` command.
... ...
@@ -28,7 +28,7 @@ Builds the provided directory with a Dockerfile into a single layered image.
28 28
 Requires that you have a working connection to a Docker engine.`
29 29
 
30 30
 	dockerbuildExample = `  # Build the current directory into a single layer and tag
31
-  $ %[1]s dockerbuild . myimage:latest`
31
+  %[1]s dockerbuild . myimage:latest`
32 32
 )
33 33
 
34 34
 type DockerbuildOptions struct {
... ...
@@ -23,19 +23,19 @@ as a new service on a specified port. If no labels are specified, the new object
23 23
 labels from the object it exposes.`
24 24
 
25 25
 	exposeExample = `  # Create a route based on service nginx. The new route will re-use nginx's labels
26
-  $ %[1]s expose service nginx
26
+  %[1]s expose service nginx
27 27
 
28 28
   # Create a route and specify your own label and route name
29
-  $ %[1]s expose service nginx -l name=myroute --name=fromdowntown
29
+  %[1]s expose service nginx -l name=myroute --name=fromdowntown
30 30
 
31 31
   # Create a route and specify a hostname
32
-  $ %[1]s expose service nginx --hostname=www.example.com
32
+  %[1]s expose service nginx --hostname=www.example.com
33 33
 
34 34
   # Expose a deployment configuration as a service and use the specified port
35
-  $ %[1]s expose dc ruby-hello-world --port=8080
35
+  %[1]s expose dc ruby-hello-world --port=8080
36 36
 
37 37
   # Expose a service as a route in the specified path
38
-  $ %[1]s expose service nginx --path=/nginx`
38
+  %[1]s expose service nginx --path=/nginx`
39 39
 )
40 40
 
41 41
 // NewCmdExpose is a wrapper for the Kubernetes cli expose command
... ...
@@ -37,10 +37,10 @@ The command will create objects unless you pass the -o yaml or --as-template fla
37 37
 configuration file for later use.`
38 38
 
39 39
 	dockerComposeExample = `  # Import a docker-compose.yml file into OpenShift
40
-  $ %[1]s docker-compose -f ./docker-compose.yml
40
+  %[1]s docker-compose -f ./docker-compose.yml
41 41
 
42 42
 	# Turn a docker-compose.yml file into a template
43
-  $ %[1]s docker-compose -f ./docker-compose.yml -o yaml --as-template
43
+  %[1]s docker-compose -f ./docker-compose.yml -o yaml --as-template
44 44
 `
45 45
 
46 46
 	DockerComposeV1GeneratorName = "docker-compose/v1"
... ...
@@ -27,7 +27,7 @@ Import tag and image information from an external Docker image repository
27 27
 Only image streams that have a value set for spec.dockerImageRepository and/or
28 28
 spec.Tags may have tag and image information imported.`
29 29
 
30
-	importImageExample = `  $ %[1]s import-image mystream`
30
+	importImageExample = `  %[1]s import-image mystream`
31 31
 )
32 32
 
33 33
 // NewCmdImportImage implements the OpenShift cli import-image command.
... ...
@@ -33,13 +33,13 @@ the server details -- can be provided through flags. If not provided, the comman
33 33
 prompt for user input as needed.`
34 34
 
35 35
 	loginExample = `  # Log in interactively
36
-  $ %[1]s login
36
+  %[1]s login
37 37
 
38 38
   # Log in to the given server with the given certificate authority file
39
-  $ %[1]s login localhost:8443 --certificate-authority=/path/to/cert.crt
39
+  %[1]s login localhost:8443 --certificate-authority=/path/to/cert.crt
40 40
 
41 41
   # Log in to the given server with the given credentials (will not prompt interactively)
42
-  $ %[1]s login localhost:8443 --username=myuser --password=mypass`
42
+  %[1]s login localhost:8443 --username=myuser --password=mypass`
43 43
 )
44 44
 
45 45
 // NewCmdLogin implements the OpenShift cli login command
... ...
@@ -313,7 +313,7 @@ func (o *LoginOptions) gatherProjectInfo() error {
313 313
 	case 0:
314 314
 		fmt.Fprintf(o.Out, `You don't have any projects. You can try to create a new project, by running
315 315
 
316
-    $ oc new-project <projectname>
316
+    oc new-project <projectname>
317 317
 
318 318
 `)
319 319
 		o.Project = ""
... ...
@@ -41,7 +41,7 @@ After logging out, if you want to log back into the server use '%[1]s'.`
41 41
 
42 42
 	logoutExample = `
43 43
   # Logout
44
-  $ %[1]s`
44
+  %[1]s`
45 45
 )
46 46
 
47 47
 // NewCmdLogout implements the OpenShift cli logout command
... ...
@@ -36,21 +36,21 @@ If your pod is failing to start, you may need to use the --previous option to se
36 36
 logs of the last attempt.`
37 37
 
38 38
 	logsExample = `  # Start streaming the logs of the most recent build of the openldap build config.
39
-  $ %[1]s -f bc/openldap
39
+  %[1]s -f bc/openldap
40 40
 
41 41
   # Start streaming the logs of the latest deployment of the mysql deployment config.
42
-  $ %[1]s -f dc/mysql
42
+  %[1]s -f dc/mysql
43 43
 
44 44
   # Get the logs of the first deployment for the mysql deployment config. Note that logs
45 45
   # from older deployments may not exist either because the deployment was successful
46 46
   # or due to deployment pruning or manual deletion of the deployment.
47
-  $ %[1]s --version=1 dc/mysql
47
+  %[1]s --version=1 dc/mysql
48 48
 
49 49
   # Return a snapshot of ruby-container logs from pod backend.
50
-  $ %[1]s backend -c ruby-container
50
+  %[1]s backend -c ruby-container
51 51
 
52 52
   # Start streaming of ruby-container logs from pod backend.
53
-  $ %[1]s -f pod/backend -c ruby-container`
53
+  %[1]s -f pod/backend -c ruby-container`
54 54
 )
55 55
 
56 56
 // OpenShiftLogsOptions holds all the necessary options for running oc logs.
... ...
@@ -63,57 +63,57 @@ You can use '%[1]s status' to check the progress.`
63 63
 
64 64
 	newAppExample = `
65 65
   # List all local templates and image streams that can be used to create an app
66
-  $ %[1]s new-app --list
66
+  %[1]s new-app --list
67 67
 
68 68
   # Search all templates, image streams, and Docker images for the ones that match "ruby"
69
-  $ %[1]s new-app --search ruby
69
+  %[1]s new-app --search ruby
70 70
 
71 71
   # Create an application based on the source code in the current git repository (with a public remote)
72 72
   # and a Docker image
73
-  $ %[1]s new-app . --docker-image=repo/langimage
73
+  %[1]s new-app . --docker-image=repo/langimage
74 74
 
75 75
   # Create a Ruby application based on the provided [image]~[source code] combination
76
-  $ %[1]s new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
76
+  %[1]s new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
77 77
 
78 78
   # Use the public Docker Hub MySQL image to create an app. Generated artifacts will be labeled with db=mysql
79
-  $ %[1]s new-app mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql
79
+  %[1]s new-app mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql
80 80
 
81 81
   # Use a MySQL image in a private registry to create an app and override application artifacts' names
82
-  $ %[1]s new-app --docker-image=myregistry.com/mycompany/mysql --name=private
82
+  %[1]s new-app --docker-image=myregistry.com/mycompany/mysql --name=private
83 83
 
84 84
   # Create an application from a remote repository using its beta4 branch
85
-  $ %[1]s new-app https://github.com/openshift/ruby-hello-world#beta4
85
+  %[1]s new-app https://github.com/openshift/ruby-hello-world#beta4
86 86
 
87 87
   # Create an application based on a stored template, explicitly setting a parameter value
88
-  $ %[1]s new-app --template=ruby-helloworld-sample --param=MYSQL_USER=admin
88
+  %[1]s new-app --template=ruby-helloworld-sample --param=MYSQL_USER=admin
89 89
 
90 90
   # Create an application from a remote repository and specify a context directory
91
-  $ %[1]s new-app https://github.com/youruser/yourgitrepo --context-dir=src/build
91
+  %[1]s new-app https://github.com/youruser/yourgitrepo --context-dir=src/build
92 92
 
93 93
   # Create an application based on a template file, explicitly setting a parameter value
94
-  $ %[1]s new-app --file=./example/myapp/template.json --param=MYSQL_USER=admin
94
+  %[1]s new-app --file=./example/myapp/template.json --param=MYSQL_USER=admin
95 95
 
96 96
   # Search for "mysql" in all image repositories and stored templates
97
-  $ %[1]s new-app --search mysql
97
+  %[1]s new-app --search mysql
98 98
 
99 99
   # Search for "ruby", but only in stored templates (--template, --image and --docker-image
100 100
   # can be used to filter search results)
101
-  $ %[1]s new-app --search --template=ruby
101
+  %[1]s new-app --search --template=ruby
102 102
 
103 103
   # Search for "ruby" in stored templates and print the output as an YAML
104
-  $ %[1]s new-app --search --template=ruby --output=yaml`
104
+  %[1]s new-app --search --template=ruby --output=yaml`
105 105
 
106 106
 	newAppNoInput = `You must specify one or more images, image streams, templates, or source code locations to create an application.
107 107
 
108 108
 To list all local templates and image streams, use:
109 109
 
110
-  $ %[1]s new-app -L
110
+  %[1]s new-app -L
111 111
 
112 112
 To search templates, image streams, and Docker images that match the arguments provided, use:
113 113
 
114
-  $ %[1]s new-app -S php
115
-  $ %[1]s new-app -S --template=ruby
116
-  $ %[1]s new-app -S --image=mysql
114
+  %[1]s new-app -S php
115
+  %[1]s new-app -S --template=ruby
116
+  %[1]s new-app -S --image=mysql
117 117
 `
118 118
 )
119 119
 
... ...
@@ -40,38 +40,38 @@ You can use '%[1]s status' to check the progress.`
40 40
 	newBuildExample = `
41 41
   # Create a build config based on the source code in the current git repository (with a public
42 42
   # remote) and a Docker image
43
-  $ %[1]s new-build . --docker-image=repo/langimage
43
+  %[1]s new-build . --docker-image=repo/langimage
44 44
 
45 45
   # Create a NodeJS build config based on the provided [image]~[source code] combination
46
-  $ %[1]s new-build openshift/nodejs-010-centos7~https://github.com/openshift/nodejs-ex.git
46
+  %[1]s new-build openshift/nodejs-010-centos7~https://github.com/openshift/nodejs-ex.git
47 47
 
48 48
   # Create a build config from a remote repository using its beta2 branch
49
-  $ %[1]s new-build https://github.com/openshift/ruby-hello-world#beta2
49
+  %[1]s new-build https://github.com/openshift/ruby-hello-world#beta2
50 50
 
51 51
   # Create a build config using a Dockerfile specified as an argument
52
-  $ %[1]s new-build -D $'FROM centos:7\nRUN yum install -y httpd'
52
+  %[1]s new-build -D $'FROM centos:7\nRUN yum install -y httpd'
53 53
 
54 54
   # Create a build config from a remote repository and add custom environment variables
55
-  $ %[1]s new-build https://github.com/openshift/ruby-hello-world RACK_ENV=development
55
+  %[1]s new-build https://github.com/openshift/ruby-hello-world RACK_ENV=development
56 56
 
57 57
   # Create a build config from a remote repository and inject the npmrc into a build
58
-  $ %[1]s new-build https://github.com/openshift/ruby-hello-world --build-secret npmrc:.npmrc
58
+  %[1]s new-build https://github.com/openshift/ruby-hello-world --build-secret npmrc:.npmrc
59 59
 
60 60
   # Create a build config that gets its input from a remote repository and another Docker image
61
-  $ %[1]s new-build https://github.com/openshift/ruby-hello-world --source-image=openshift/jenkins-1-centos7 --source-image-path=/var/lib/jenkins:tmp`
61
+  %[1]s new-build https://github.com/openshift/ruby-hello-world --source-image=openshift/jenkins-1-centos7 --source-image-path=/var/lib/jenkins:tmp`
62 62
 
63 63
 	newBuildNoInput = `You must specify one or more images, image streams, or source code locations to create a build.
64 64
 
65 65
 To build from an existing image stream tag or Docker image, provide the name of the image and
66 66
 the source code location:
67 67
 
68
-  $ %[1]s new-build openshift/nodejs-010-centos7~https://github.com/openshift/nodejs-ex.git
68
+  %[1]s new-build openshift/nodejs-010-centos7~https://github.com/openshift/nodejs-ex.git
69 69
 
70 70
 If you only specify the source repository location (local or remote), the command will look at
71 71
 the repo to determine the type, and then look for a matching image on your server or on the
72 72
 default Docker registry.
73 73
 
74
-  $ %[1]s new-build https://github.com/openshift/nodejs-ex.git
74
+  %[1]s new-build https://github.com/openshift/nodejs-ex.git
75 75
 
76 76
 will look for an image called "nodejs" in your current project, the 'openshift' project, or
77 77
 on the Docker Hub.
... ...
@@ -36,25 +36,25 @@ The output of the process command is always a list of one or more resources. You
36 36
 output to the create command over STDIN (using the '-f -' option) or redirect it to a file.`
37 37
 
38 38
 	processExample = `  # Convert template.json file into resource list and pass to create
39
-  $ %[1]s process -f template.json | %[1]s create -f -
39
+  %[1]s process -f template.json | %[1]s create -f -
40 40
 
41 41
   # Process template while passing a user-defined label
42
-  $ %[1]s process -f template.json -l name=mytemplate
42
+  %[1]s process -f template.json -l name=mytemplate
43 43
 
44 44
   # Convert stored template into resource list
45
-  $ %[1]s process foo
45
+  %[1]s process foo
46 46
 
47 47
   # Convert stored template into resource list by setting/overriding parameter values
48
-  $ %[1]s process foo PARM1=VALUE1 PARM2=VALUE2
48
+  %[1]s process foo PARM1=VALUE1 PARM2=VALUE2
49 49
 
50 50
   # Convert template stored in different namespace into a resource list
51
-  $ %[1]s process openshift//foo
51
+  %[1]s process openshift//foo
52 52
 
53 53
   # Convert template.json into resource list
54
-  $ cat template.json | %[1]s process -f -
54
+  cat template.json | %[1]s process -f -
55 55
 
56 56
   # Combine multiple templates into single resource list
57
-  $ cat template.json second_template.json | %[1]s process -f -`
57
+  cat template.json second_template.json | %[1]s process -f -`
58 58
 )
59 59
 
60 60
 // NewCmdProcess implements the OpenShift cli process command
... ...
@@ -51,10 +51,10 @@ For advanced configuration, or to manage the contents of your config file, use t
51 51
 command.`
52 52
 
53 53
 	projectExample = `  # Switch to 'myapp' project
54
-  $ %[1]s myapp
54
+  %[1]s myapp
55 55
 
56 56
   # Display the project currently in use
57
-  $ %[1]s`
57
+  %[1]s`
58 58
 )
59 59
 
60 60
 // NewCmdProject implements the OpenShift cli rollback command
... ...
@@ -39,10 +39,10 @@ as the project admin.
39 39
 After your project is created it will become the default project in your config.`
40 40
 
41 41
 	requestProjectExample = `  # Create a new project with minimal information
42
-  $ %[1]s web-team-dev
42
+  %[1]s web-team-dev
43 43
 
44 44
   # Create a new project with a display name and description
45
-  $ %[1]s web-team-dev --display-name="Web Team Development" --description="Development project for the web team."`
45
+  %[1]s web-team-dev --display-name="Web Team Development" --description="Development project for the web team."`
46 46
 )
47 47
 
48 48
 func NewCmdRequestProject(baseName, name, ocLoginName, ocProjectName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
... ...
@@ -126,7 +126,7 @@ func (o *NewProjectOptions) Run() error {
126 126
 	fmt.Fprintf(o.Out, `
127 127
 You can add applications to this project with the 'new-app' command. For example, try:
128 128
 
129
-    $ %[1]s new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
129
+    %[1]s new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git
130 130
 
131 131
 to build a new example application in Ruby.
132 132
 `, o.Name)
... ...
@@ -45,16 +45,16 @@ executing the rollback. This is useful if you're not quite sure what the outcome
45 45
 will be.`
46 46
 
47 47
 	rollbackExample = `  # Perform a rollback to the last successfully completed deployment for a deploymentconfig
48
-  $ %[1]s rollback frontend
48
+  %[1]s rollback frontend
49 49
 
50 50
   # See what a rollback to version 3 will look like, but don't perform the rollback
51
-  $ %[1]s rollback frontend --to-version=3 --dry-run
51
+  %[1]s rollback frontend --to-version=3 --dry-run
52 52
 
53 53
   # Perform a rollback to a specific deployment
54
-  $ %[1]s rollback frontend-2
54
+  %[1]s rollback frontend-2
55 55
 
56 56
   # Perform the rollback manually by piping the JSON of the new config back to %[1]s
57
-  $ %[1]s rollback frontend -o json | %[1]s replace dc/frontend -f -`
57
+  %[1]s rollback frontend -o json | %[1]s replace dc/frontend -f -`
58 58
 )
59 59
 
60 60
 // NewCmdRollback creates a CLI rollback command.
... ...
@@ -33,13 +33,13 @@ directly.`
33 33
 
34 34
 	rshExample = `
35 35
   # Open a shell session on the first container in pod 'foo'
36
-  $ %[1]s foo
36
+  %[1]s foo
37 37
 
38 38
   # Run the command 'cat /etc/resolv.conf' inside pod 'foo'
39
-  $ %[1]s foo cat /etc/resolv.conf
39
+  %[1]s foo cat /etc/resolv.conf
40 40
 
41 41
   # See the configuration of your internal registry
42
-  $ %[1]s dc/docker-registry cat config.yml
42
+  %[1]s dc/docker-registry cat config.yml
43 43
 
44 44
   # Open a shell session on the container named 'index' inside a pod of your job
45 45
   # %[1]s -c index job/sheduled`
... ...
@@ -34,10 +34,10 @@ for the copy.`
34 34
 
35 35
 	rsyncExample = `
36 36
   # Synchronize a local directory with a pod directory
37
-  $ %[1]s ./local/dir/ POD:/remote/dir
37
+  %[1]s ./local/dir/ POD:/remote/dir
38 38
 
39 39
   # Synchronize a pod directory with a local directory
40
-  $ %[1]s POD:/remote/dir/ ./local/dir`
40
+  %[1]s POD:/remote/dir/ ./local/dir`
41 41
 
42 42
 	noRsyncUnixWarning    = "WARNING: rsync command not found in path. Please use your package manager to install it.\n"
43 43
 	noRsyncWindowsWarning = "WARNING: rsync command not found in path. Download cwRsync for Windows and add it to your PATH.\n"
... ...
@@ -34,29 +34,29 @@ If "--env -" is passed, environment variables can be read from STDIN using the s
34 34
 syntax.`
35 35
 
36 36
 	envExample = `  # Update deployment 'registry' with a new environment variable
37
-  $ %[1]s env dc/registry STORAGE_DIR=/local
37
+  %[1]s env dc/registry STORAGE_DIR=/local
38 38
 
39 39
   # List the environment variables defined on a build config 'sample-build'
40
-  $ %[1]s env bc/sample-build --list
40
+  %[1]s env bc/sample-build --list
41 41
 
42 42
   # List the environment variables defined on all pods
43
-  $ %[1]s env pods --all --list
43
+  %[1]s env pods --all --list
44 44
 
45 45
   # Output modified build config in YAML, and does not alter the object on the server
46
-  $ %[1]s env bc/sample-build STORAGE_DIR=/data -o yaml
46
+  %[1]s env bc/sample-build STORAGE_DIR=/data -o yaml
47 47
 
48 48
   # Update all containers in all replication controllers in the project to have ENV=prod
49
-  $ %[1]s env rc --all ENV=prod
49
+  %[1]s env rc --all ENV=prod
50 50
 
51 51
   # Remove the environment variable ENV from container 'c1' in all deployment configs
52
-  $ %[1]s env dc --all --containers="c1" ENV-
52
+  %[1]s env dc --all --containers="c1" ENV-
53 53
 
54 54
   # Remove the environment variable ENV from a deployment config definition on disk and
55 55
   # update the deployment config on the server
56
-  $ %[1]s env -f dc.json ENV-
56
+  %[1]s env -f dc.json ENV-
57 57
 
58 58
   # Set some of the local shell environment into a deployment config on the server
59
-  $ env | grep RAILS_ | %[1]s env -e - dc/registry`
59
+  env | grep RAILS_ | %[1]s env -e - dc/registry`
60 60
 )
61 61
 
62 62
 // NewCmdEnv implements the OpenShift cli env command
... ...
@@ -44,22 +44,22 @@ initial-delay-seconds values, otherwise as your application evolves you may sudd
44 44
 to fail.`
45 45
 
46 46
 	probeExample = `  # Clear both readiness and liveness probes off all containers
47
-  $ %[1]s probe dc/registry --remove --readiness --liveness
47
+  %[1]s probe dc/registry --remove --readiness --liveness
48 48
 
49 49
   # Set an exec action as a liveness probe to run 'echo ok'
50
-  $ %[1]s probe dc/registry --liveness -- echo ok
50
+  %[1]s probe dc/registry --liveness -- echo ok
51 51
 
52 52
   # Set a readiness probe to try to open a TCP socket on 3306
53
-  $ %[1]s probe rc/mysql --readiness --open-tcp=3306
53
+  %[1]s probe rc/mysql --readiness --open-tcp=3306
54 54
 
55 55
   # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP
56
-  $ %[1]s probe dc/webapp --readiness --get-url=http://:8080/healthz
56
+  %[1]s probe dc/webapp --readiness --get-url=http://:8080/healthz
57 57
 
58 58
   # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod
59
-  $ %[1]s probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats
59
+  %[1]s probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats
60 60
 
61 61
   # Set only the initial-delay-seconds field on all deployments
62
-  $ %[1]s probe dc --all --readiness --initial-delay-seconds=30`
62
+  %[1]s probe dc --all --readiness --initial-delay-seconds=30`
63 63
 )
64 64
 
65 65
 type ProbeOptions struct {
... ...
@@ -43,26 +43,26 @@ Build configs support triggering off of image changes, config changes, and webho
43 43
 and generic). The config change trigger for a build config will only trigger the first build.`
44 44
 
45 45
 	triggersExample = `  # Print the triggers on the registry
46
-  $ %[1]s triggers dc/registry
46
+  %[1]s triggers dc/registry
47 47
 
48 48
   # Set all triggers to manual
49
-  $ %[1]s triggers dc/registry --manual
49
+  %[1]s triggers dc/registry --manual
50 50
 
51 51
   # Enable all automatic triggers
52
-  $ %[1]s triggers dc/registry --auto
52
+  %[1]s triggers dc/registry --auto
53 53
 
54 54
   # Reset the GitHub webhook on a build to a new, generated secret
55
-  $ %[1]s triggers bc/webapp --from-github
56
-  $ %[1]s triggers bc/webapp --from-webhook
55
+  %[1]s triggers bc/webapp --from-github
56
+  %[1]s triggers bc/webapp --from-webhook
57 57
 
58 58
   # Remove all triggers
59
-  $ %[1]s triggers bc/webapp --remove-all
59
+  %[1]s triggers bc/webapp --remove-all
60 60
 
61 61
   # Stop triggering on config change
62
-  $ %[1]s triggers dc/registry --from-config --remove
62
+  %[1]s triggers dc/registry --from-config --remove
63 63
 
64 64
   # Add an image trigger to a build config
65
-  $ %[1]s triggers bc/webapp --from-image=namespace1/image:latest`
65
+  %[1]s triggers bc/webapp --from-image=namespace1/image:latest`
66 66
 )
67 67
 
68 68
 type TriggersOptions struct {
... ...
@@ -58,31 +58,31 @@ Volume types include:
58 58
 For descriptions on other volume types, see https://docs.openshift.com`
59 59
 
60 60
 	volumeExample = `  # List volumes defined on all deployment configs in the current project
61
-  $ %[1]s volume dc --all
61
+  %[1]s volume dc --all
62 62
 
63 63
   # Add a new empty dir volume to deployment config (dc) 'registry' mounted under
64 64
   # /var/lib/registry
65
-  $ %[1]s volume dc/registry --add --mount-path=/var/lib/registry
65
+  %[1]s volume dc/registry --add --mount-path=/var/lib/registry
66 66
 
67 67
   # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1'
68
-  $ %[1]s volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite
68
+  %[1]s volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite
69 69
 
70 70
   # Remove volume 'v1' from deployment config 'registry'
71
-  $ %[1]s volume dc/registry --remove --name=v1
71
+  %[1]s volume dc/registry --remove --name=v1
72 72
 
73 73
   # Create a new persistent volume claim that overwrites an existing volume 'v1'
74
-  $ %[1]s volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite
74
+  %[1]s volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite
75 75
 
76 76
   # Change the mount point for volume 'v1' to /data
77
-  $ %[1]s volume dc/registry --add --name=v1 -m /data --overwrite
77
+  %[1]s volume dc/registry --add --name=v1 -m /data --overwrite
78 78
 
79 79
   # Modify the deployment config by removing volume mount "v1" from container "c1"
80 80
   # (and by removing the volume "v1" if no other containers have volume mounts that reference it)
81
-  $ %[1]s volume dc/registry --remove --name=v1 --containers=c1
81
+  %[1]s volume dc/registry --remove --name=v1 --containers=c1
82 82
 
83 83
   # Add new volume based on a more complex volume source (Git repo, AWS EBS, GCE PD,
84 84
   # Ceph, Gluster, NFS, ISCSI, ...)
85
-  $ %[1]s volume dc/registry --add -m /repo --source=<json-string>`
85
+  %[1]s volume dc/registry --add -m /repo --source=<json-string>`
86 86
 
87 87
 	volumePrefix = "volume-"
88 88
 )
... ...
@@ -51,24 +51,24 @@ base image changes will use the source specified on the build config.
51 51
 `
52 52
 
53 53
 	startBuildExample = `  # Starts build from build config "hello-world"
54
-  $ %[1]s start-build hello-world
54
+  %[1]s start-build hello-world
55 55
 
56 56
   # Starts build from a previous build "hello-world-1"
57
-  $ %[1]s start-build --from-build=hello-world-1
57
+  %[1]s start-build --from-build=hello-world-1
58 58
 
59 59
   # Use the contents of a directory as build input
60
-  $ %[1]s start-build hello-world --from-dir=src/
60
+  %[1]s start-build hello-world --from-dir=src/
61 61
 
62 62
   # Send the contents of a Git repository to the server from tag 'v2'
63
-  $ %[1]s start-build hello-world --from-repo=../hello-world --commit=v2
63
+  %[1]s start-build hello-world --from-repo=../hello-world --commit=v2
64 64
 
65 65
   # Start a new build for build config "hello-world" and watch the logs until the build
66 66
   # completes or fails.
67
-  $ %[1]s start-build hello-world --follow
67
+  %[1]s start-build hello-world --follow
68 68
 
69 69
   # Start a new build for build config "hello-world" and wait until the build completes. It
70 70
   # exits with a non-zero return code if the build fails.
71
-  $ %[1]s start-build hello-world --wait`
71
+  %[1]s start-build hello-world --wait`
72 72
 )
73 73
 
74 74
 // NewCmdStartBuild implements the OpenShift cli start-build command
... ...
@@ -31,13 +31,13 @@ You can specify an output format of "-o dot" to have this command output the gen
31 31
 graph in DOT format that is suitable for use by the "dot" command.`
32 32
 
33 33
 	statusExample = `  # See an overview of the current project.
34
-  $ %[1]s
34
+  %[1]s
35 35
 
36 36
   # Export the overview of the current project in an svg file.
37
-  $ %[1]s -o dot | dot -T svg -o project.svg
37
+  %[1]s -o dot | dot -T svg -o project.svg
38 38
 
39 39
   # See an overview of the current project including details for any identified issues.
40
-  $ %[1]s -v`
40
+  %[1]s -v`
41 41
 )
42 42
 
43 43
 // StatusOptions contains all the necessary options for the Openshift cli status command.
... ...
@@ -55,16 +55,16 @@ Docker images.
55 55
 `
56 56
 
57 57
 	tagExample = `  # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip'.
58
-  $ %[1]s tag openshift/ruby:2.0 yourproject/ruby:tip
58
+  %[1]s tag openshift/ruby:2.0 yourproject/ruby:tip
59 59
 
60 60
   # Tag a specific image.
61
-  $ %[1]s tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip
61
+  %[1]s tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip
62 62
 
63 63
   # Tag an external Docker image.
64
-  $ %[1]s tag --source=docker openshift/origin:latest yourproject/ruby:tip
64
+  %[1]s tag --source=docker openshift/origin:latest yourproject/ruby:tip
65 65
 
66 66
   # Remove the specified spec tag from an image stream.
67
-  $ %[1]s tag openshift/origin:latest -d`
67
+  %[1]s tag openshift/origin:latest -d`
68 68
 )
69 69
 
70 70
 // NewCmdTag implements the OpenShift cli tag command.
... ...
@@ -208,16 +208,16 @@ var (
208 208
   `)
209 209
 
210 210
 	typesExample = `  # View all projects you have access to
211
-  $ %[1]s get projects
211
+  %[1]s get projects
212 212
 
213 213
   # See a list of all services in the current project
214
-  $ %[1]s get svc
214
+  %[1]s get svc
215 215
 
216 216
   # Describe a deployment configuration in detail
217
-  $ %[1]s describe dc mydeploymentconfig
217
+  %[1]s describe dc mydeploymentconfig
218 218
 
219 219
   # Show the images tagged into an image stream
220
-  $ %[1]s describe is ruby-centos7`
220
+  %[1]s describe is ruby-centos7`
221 221
 )
222 222
 
223 223
 func NewCmdTypes(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
... ...
@@ -36,19 +36,19 @@ Some resources may omit advanced details that you can see with '-o wide'.
36 36
 If you want an even more detailed view, use '%[1]s describe'.`
37 37
 
38 38
 	getExample = `  # List all pods in ps output format.
39
-  $ %[1]s get pods
39
+  %[1]s get pods
40 40
 
41 41
   # List a single replication controller with specified ID in ps output format.
42
-  $ %[1]s get rc redis
42
+  %[1]s get rc redis
43 43
 
44 44
   # List all pods and show more details about them.
45
-  $ %[1]s get -o wide pods
45
+  %[1]s get -o wide pods
46 46
 
47 47
   # List a single pod in JSON output format.
48
-  $ %[1]s get -o json pod redis-pod
48
+  %[1]s get -o json pod redis-pod
49 49
 
50 50
   # Return only the status value of the specified pod.
51
-  $ %[1]s get -o template pod redis-pod --template={{.currentState.status}}`
51
+  %[1]s get -o template pod redis-pod --template={{.currentState.status}}`
52 52
 )
53 53
 
54 54
 // NewCmdGet is a wrapper for the Kubernetes cli get command
... ...
@@ -66,13 +66,13 @@ const (
66 66
 JSON and YAML formats are accepted.`
67 67
 
68 68
 	replaceExample = `  # Replace a pod using the data in pod.json.
69
-  $ %[1]s replace -f pod.json
69
+  %[1]s replace -f pod.json
70 70
 
71 71
   # Replace a pod based on the JSON passed into stdin.
72
-  $ cat pod.json | %[1]s replace -f -
72
+  cat pod.json | %[1]s replace -f -
73 73
 
74 74
   # Force replace, delete and then re-create the resource
75
-  $ %[1]s replace --force -f pod.json`
75
+  %[1]s replace --force -f pod.json`
76 76
 )
77 77
 
78 78
 // NewCmdReplace is a wrapper for the Kubernetes cli replace command
... ...
@@ -89,7 +89,7 @@ const (
89 89
 JSON and YAML formats are accepted.`
90 90
 
91 91
 	patchExample = `  # Partially update a node using strategic merge patch
92
-  $ %[1]s patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'`
92
+  %[1]s patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'`
93 93
 )
94 94
 
95 95
 // NewCmdPatch is a wrapper for the Kubernetes cli patch command
... ...
@@ -113,19 +113,19 @@ submits an update to a resource right when you submit a delete, their update
113 113
 will be lost along with the rest of the resource.`
114 114
 
115 115
 	deleteExample = `  # Delete a pod using the type and ID specified in pod.json.
116
-  $ %[1]s delete -f pod.json
116
+  %[1]s delete -f pod.json
117 117
 
118 118
   # Delete a pod based on the type and ID in the JSON passed into stdin.
119
-  $ cat pod.json | %[1]s delete -f -
119
+  cat pod.json | %[1]s delete -f -
120 120
 
121 121
   # Delete pods and services with label name=myLabel.
122
-  $ %[1]s delete pods,services -l name=myLabel
122
+  %[1]s delete pods,services -l name=myLabel
123 123
 
124 124
   # Delete a pod with ID 1234-56-7890-234234-456456.
125
-  $ %[1]s delete pod 1234-56-7890-234234-456456
125
+  %[1]s delete pod 1234-56-7890-234234-456456
126 126
 
127 127
   # Delete all pods
128
-  $ %[1]s delete pods --all`
128
+  %[1]s delete pods --all`
129 129
 )
130 130
 
131 131
 // NewCmdDelete is a wrapper for the Kubernetes cli delete command
... ...
@@ -144,10 +144,10 @@ const (
144 144
 JSON and YAML formats are accepted.`
145 145
 
146 146
 	createExample = `  # Create a pod using the data in pod.json.
147
-  $ %[1]s create -f pod.json
147
+  %[1]s create -f pod.json
148 148
 
149 149
   # Create a pod based on the JSON passed into stdin.
150
-  $ cat pod.json | %[1]s create -f -`
150
+  cat pod.json | %[1]s create -f -`
151 151
 )
152 152
 
153 153
 // NewCmdCreate is a wrapper for the Kubernetes cli create command
... ...
@@ -174,10 +174,10 @@ const (
174 174
 	execLong = `Execute a command in a container`
175 175
 
176 176
 	execExample = `  # Get output from running 'date' in ruby-container from pod 123456-7890
177
-  $ %[1]s exec -p 123456-7890 -c ruby-container date
177
+  %[1]s exec -p 123456-7890 -c ruby-container date
178 178
 
179 179
   # Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780 and sends stdout/stderr from 'bash' back to the client
180
-  $ %[1]s exec -p 123456-7890 -c ruby-container -i -t -- bash -il`
180
+  %[1]s exec -p 123456-7890 -c ruby-container -i -t -- bash -il`
181 181
 )
182 182
 
183 183
 // NewCmdExec is a wrapper for the Kubernetes cli exec command
... ...
@@ -193,16 +193,16 @@ const (
193 193
 	portForwardLong = `Forward 1 or more local ports to a pod`
194 194
 
195 195
 	portForwardExample = `  # Listens on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
196
-  $ %[1]s port-forward -p mypod 5000 6000
196
+  %[1]s port-forward -p mypod 5000 6000
197 197
 
198 198
   # Listens on port 8888 locally, forwarding to 5000 in the pod
199
-  $ %[1]s port-forward -p mypod 8888:5000
199
+  %[1]s port-forward -p mypod 8888:5000
200 200
 
201 201
   # Listens on a random port locally, forwarding to 5000 in the pod
202
-  $ %[1]s port-forward -p mypod :5000
202
+  %[1]s port-forward -p mypod :5000
203 203
 
204 204
   # Listens on a random port locally, forwarding to 5000 in the pod
205
-  $ %[1]s port-forward -p mypod 0:5000`
205
+  %[1]s port-forward -p mypod 0:5000`
206 206
 )
207 207
 
208 208
 // NewCmdPortForward is a wrapper for the Kubernetes cli port-forward command
... ...
@@ -220,10 +220,10 @@ This command joins many API calls together to form a detailed description of a
220 220
 given resource.`
221 221
 
222 222
 	describeExample = `  # Provide details about the ruby-22-centos7 image repository
223
-  $ %[1]s describe imageRepository ruby-22-centos7
223
+  %[1]s describe imageRepository ruby-22-centos7
224 224
 
225 225
   # Provide details about the ruby-sample-build build configuration
226
-  $ %[1]s describe bc ruby-sample-build`
226
+  %[1]s describe bc ruby-sample-build`
227 227
 )
228 228
 
229 229
 // NewCmdDescribe is a wrapper for the Kubernetes cli describe command
... ...
@@ -239,11 +239,11 @@ const (
239 239
 	proxyLong = `Run a proxy to the Kubernetes API server`
240 240
 
241 241
 	proxyExample = `  # Run a proxy to kubernetes apiserver on port 8011, serving static content from ./local/www/
242
-  $ %[1]s proxy --port=8011 --www=./local/www/
242
+  %[1]s proxy --port=8011 --www=./local/www/
243 243
 
244 244
   # Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api
245 245
   # This makes e.g. the pods api available at localhost:8011/k8s-api/v1beta3/pods/
246
-  $ %[1]s proxy --api-prefix=k8s-api`
246
+  %[1]s proxy --api-prefix=k8s-api`
247 247
 )
248 248
 
249 249
 // NewCmdProxy is a wrapper for the Kubernetes cli proxy command
... ...
@@ -266,14 +266,14 @@ Note that scaling a deployment configuration with no deployments will update the
266 266
 desired replicas in the configuration template.`
267 267
 
268 268
 	scaleExample = `  # Scale replication controller named 'foo' to 3.
269
-  $ %[1]s scale --replicas=3 replicationcontrollers foo
269
+  %[1]s scale --replicas=3 replicationcontrollers foo
270 270
 
271 271
   # If the replication controller named foo's current size is 2, scale foo to 3.
272
-  $ %[1]s scale --current-replicas=2 --replicas=3 replicationcontrollers foo
272
+  %[1]s scale --current-replicas=2 --replicas=3 replicationcontrollers foo
273 273
 
274 274
   # Scale the latest deployment of 'bar'. In case of no deployment, bar's template
275 275
   # will be scaled instead.
276
-  $ %[1]s scale --replicas=10 dc bar`
276
+  %[1]s scale --replicas=10 dc bar`
277 277
 )
278 278
 
279 279
 // NewCmdScale is a wrapper for the Kubernetes cli scale command
... ...
@@ -294,10 +294,10 @@ this deployment config or replication controller as a reference. An autoscaler c
294 294
 increase or decrease number of pods deployed within the system as needed.`
295 295
 
296 296
 	autoScaleExample = `  # Auto scale a deployment config "foo", with the number of pods between 2 to 10, target CPU utilization at a default value that server applies:
297
-  $ %[1]s autoscale dc/foo --min=2 --max=10
297
+  %[1]s autoscale dc/foo --min=2 --max=10
298 298
 
299 299
   # Auto scale a replication controller "foo", with the number of pods between 1 to 5, target CPU utilization at 80%%
300
-  $ %[1]s autoscale rc/foo --max=5 --cpu-percent=80`
300
+  %[1]s autoscale rc/foo --max=5 --cpu-percent=80`
301 301
 )
302 302
 
303 303
 // NewCmdAutoscale is a wrapper for the Kubernetes cli autoscale command
... ...
@@ -317,29 +317,29 @@ foreground for an interactive container execution.  You may pass 'run/v1' to
317 317
 --generator to create a replication controller instead of a deployment config.`
318 318
 
319 319
 	runExample = `  # Starts a single instance of nginx.
320
-  $ %[1]s run nginx --image=nginx
320
+  %[1]s run nginx --image=nginx
321 321
 
322 322
   # Starts a replicated instance of nginx.
323
-  $ %[1]s run nginx --image=nginx --replicas=5
323
+  %[1]s run nginx --image=nginx --replicas=5
324 324
 
325 325
   # Dry run. Print the corresponding API objects without creating them.
326
-  $ %[1]s run nginx --image=nginx --dry-run
326
+  %[1]s run nginx --image=nginx --dry-run
327 327
 
328 328
   # Start a single instance of nginx, but overload the spec of the replication
329 329
   # controller with a partial set of values parsed from JSON.
330
-  $ %[1]s run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
330
+  %[1]s run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
331 331
 
332 332
   # Start a single instance of nginx and keep it in the foreground, don't restart it if it exits.
333
-  $ %[1]s run -i --tty nginx --image=nginx --restart=Never`
333
+  %[1]s run -i --tty nginx --image=nginx --restart=Never`
334 334
 
335 335
 	// TODO: uncomment these when arguments are delivered upstream
336 336
 
337 337
 	// Start the nginx container using the default command, but use custom
338 338
 	// arguments (arg1 .. argN) for that command.
339
-	//$ %[1]s run nginx --image=nginx -- <arg1> <arg2> ... <argN>
339
+	//%[1]s run nginx --image=nginx -- <arg1> <arg2> ... <argN>
340 340
 
341 341
 	// Start the nginx container using a different command and custom arguments
342
-	//$ %[1]s run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>`
342
+	//%[1]s run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>`
343 343
 )
344 344
 
345 345
 // NewCmdRun is a wrapper for the Kubernetes cli run command
... ...
@@ -363,14 +363,14 @@ Attach the current shell to a remote container, returning output or setting up a
363 363
 terminal session. Can be used to debug containers and invoke interactive commands.`
364 364
 
365 365
 	attachExample = `  # Get output from running pod 123456-7890, using the first container by default
366
-  $ %[1]s attach 123456-7890
366
+  %[1]s attach 123456-7890
367 367
 
368 368
   # Get output from ruby-container from pod 123456-7890
369
-  $ %[1]s attach 123456-7890 -c ruby-container
369
+  %[1]s attach 123456-7890 -c ruby-container
370 370
 
371 371
   # Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780
372 372
   # and sends stdout/stderr from 'bash' back to the client
373
-  $ %[1]s attach 123456-7890 -c ruby-container -i -t`
373
+  %[1]s attach 123456-7890 -c ruby-container -i -t`
374 374
 )
375 375
 
376 376
 // NewCmdAttach is a wrapper for the Kubernetes cli attach command
... ...
@@ -396,21 +396,21 @@ Run '%[1]s types' for a list of valid resources.`
396 396
 
397 397
 	annotateExample = `  # Update pod 'foo' with the annotation 'description' and the value 'my frontend'.
398 398
   # If the same annotation is set multiple times, only the last value will be applied
399
-  $ %[1]s annotate pods foo description='my frontend'
399
+  %[1]s annotate pods foo description='my frontend'
400 400
 
401 401
   # Update pod 'foo' with the annotation 'description' and the value
402 402
   # 'my frontend running nginx', overwriting any existing value.
403
-  $ %[1]s annotate --overwrite pods foo description='my frontend running nginx'
403
+  %[1]s annotate --overwrite pods foo description='my frontend running nginx'
404 404
 
405 405
   # Update all pods in the namespace
406
-  $ %[1]s annotate pods --all description='my frontend running nginx'
406
+  %[1]s annotate pods --all description='my frontend running nginx'
407 407
 
408 408
   # Update pod 'foo' only if the resource is unchanged from version 1.
409
-  $ %[1]s annotate pods foo description='my frontend running nginx' --resource-version=1
409
+  %[1]s annotate pods foo description='my frontend running nginx' --resource-version=1
410 410
 
411 411
   # Update pod 'foo' by removing an annotation named 'description' if it exists.
412 412
   # Does not require the --overwrite flag.
413
-  $ %[1]s annotate pods foo description-`
413
+  %[1]s annotate pods foo description-`
414 414
 )
415 415
 
416 416
 // NewCmdAnnotate is a wrapper for the Kubernetes cli annotate command
... ...
@@ -431,20 +431,20 @@ specified, then updates will use this resource version, otherwise the existing
431 431
 resource-version will be used.`
432 432
 
433 433
 	labelExample = `  # Update pod 'foo' with the label 'unhealthy' and the value 'true'.
434
-  $ %[1]s label pods foo unhealthy=true
434
+  %[1]s label pods foo unhealthy=true
435 435
 
436 436
   # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.
437
-  $ %[1]s label --overwrite pods foo status=unhealthy
437
+  %[1]s label --overwrite pods foo status=unhealthy
438 438
 
439 439
   # Update all pods in the namespace
440
-  $ %[1]s label pods --all status=unhealthy
440
+  %[1]s label pods --all status=unhealthy
441 441
 
442 442
   # Update pod 'foo' only if the resource is unchanged from version 1.
443
-  $ %[1]s label pods foo status=unhealthy --resource-version=1
443
+  %[1]s label pods foo status=unhealthy --resource-version=1
444 444
 
445 445
   # Update pod 'foo' by removing a label named 'bar' if it exists.
446 446
   # Does not require the --overwrite flag.
447
-  $ %[1]s label pods foo bar-`
447
+  %[1]s label pods foo bar-`
448 448
 )
449 449
 
450 450
 // NewCmdLabel is a wrapper for the Kubernetes cli label command
... ...
@@ -461,10 +461,10 @@ const (
461 461
 JSON and YAML formats are accepted.`
462 462
 
463 463
 	applyExample = `# Apply the configuration in pod.json to a pod.
464
-$ %[1]s apply -f ./pod.json
464
+%[1]s apply -f ./pod.json
465 465
 
466 466
 # Apply the JSON passed into stdin to a pod.
467
-$ cat pod.json | %[1]s apply -f -`
467
+cat pod.json | %[1]s apply -f -`
468 468
 )
469 469
 
470 470
 func NewCmdApply(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
... ...
@@ -483,10 +483,10 @@ limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc),
483 483
 resourcequotas (quota), namespaces (ns) or endpoints (ep).`
484 484
 
485 485
 	explainExample = `# Get the documentation of the resource and its fields
486
-$ %[1]s explain pods
486
+%[1]s explain pods
487 487
 
488 488
 # Get the documentation of a specific field of a resource
489
-$ %[1]s explain pods.spec.containers`
489
+%[1]s explain pods.spec.containers`
490 490
 )
491 491
 
492 492
 func NewCmdExplain(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {
... ...
@@ -508,14 +508,14 @@ The default output will be printed to stdout in YAML format. One can use -o opti
508 508
 to change to output destination.
509 509
 `
510 510
 	convertExample = `# Convert 'pod.yaml' to latest version and print to stdout.
511
-$ %[1]s convert -f pod.yaml
511
+%[1]s convert -f pod.yaml
512 512
 
513 513
 # Convert the live state of the resource specified by 'pod.yaml' to the latest version
514 514
 # and print to stdout in json format.
515
-$ %[1]s convert -f pod.yaml --local -o json
515
+%[1]s convert -f pod.yaml --local -o json
516 516
 
517 517
 # Convert all files under current directory to latest version and create them all.
518
-$ %[1]s convert -f . | kubectl create -f -
518
+%[1]s convert -f . | kubectl create -f -
519 519
 `
520 520
 )
521 521
 
... ...
@@ -549,16 +549,16 @@ to apply your changes to the newer version of the resource, or update your tempo
549 549
 saved copy to include the latest resource version.`
550 550
 
551 551
 	editExample = `  # Edit the service named 'docker-registry':
552
-  $ %[1]s edit svc/docker-registry
552
+  %[1]s edit svc/docker-registry
553 553
 
554 554
   # Edit the DeploymentConfig named 'my-deployment':
555
-  $ %[1]s edit dc/my-deployment
555
+  %[1]s edit dc/my-deployment
556 556
 
557 557
   # Use an alternative editor
558
-  $ OC_EDITOR="nano" %[1]s edit dc/my-deployment
558
+  OC_EDITOR="nano" %[1]s edit dc/my-deployment
559 559
 
560 560
   # Edit the service 'docker-registry' in JSON using the v1beta3 API format:
561
-  $ %[1]s edit svc/docker-registry --output-version=v1beta3 -o json`
561
+  %[1]s edit svc/docker-registry --output-version=v1beta3 -o json`
562 562
 )
563 563
 
564 564
 func NewCmdEdit(fullName string, f *clientcmd.Factory, out, errout io.Writer) *cobra.Command {
... ...
@@ -35,7 +35,7 @@ itself were making the actions.
35 35
 	getServiceAccountTokenUsage = `%s SA-NAME`
36 36
 
37 37
 	getServiceAccountTokenExamples = `  # Get the service account token from service account 'default'
38
-  $ %[1]s 'default'
38
+  %[1]s 'default'
39 39
 `
40 40
 )
41 41
 
... ...
@@ -42,7 +42,7 @@ be applied to any created token so that tokens created with this command can be
42 42
 	newServiceAccountTokenUsage = `%s SA-NAME`
43 43
 
44 44
 	newServiceAccountTokenExamples = `  # Generate a new token for service account 'default'
45
-  $ %[1]s 'default'
45
+  %[1]s 'default'
46 46
 
47 47
   # Generate a new token for service account 'default' and apply 
48 48
   # labels 'foo' and 'bar' to the new token for identification
... ...
@@ -28,13 +28,13 @@ Add secrets to a ServiceAccount
28 28
 After you have created a secret, you probably want to make use of that secret inside of a pod, for a build, or as an image pull secret.  In order to do that, you must add your secret to a service account.`
29 29
 
30 30
 	addSecretExample = `  // To use your secret inside of a pod or as a push, pull, or source secret for a build, you must add a 'mount' secret to your service account like this:
31
-  $ %[1]s serviceaccount/sa-name secrets/secret-name secrets/another-secret-name
31
+  %[1]s serviceaccount/sa-name secrets/secret-name secrets/another-secret-name
32 32
 
33 33
   // To use your secret as an image pull secret, you must add a 'pull' secret to your service account like this:
34
-  $ %[1]s serviceaccount/sa-name secrets/secret-name --for=pull
34
+  %[1]s serviceaccount/sa-name secrets/secret-name --for=pull
35 35
 
36 36
   // To use your secret for image pulls or inside a pod:
37
-  $ %[1]s serviceaccount/sa-name secrets/secret-name --for=pull,mount`
37
+  %[1]s serviceaccount/sa-name secrets/secret-name --for=pull,mount`
38 38
 )
39 39
 
40 40
 type AddSecretOptions struct {
... ...
@@ -30,13 +30,13 @@ In order for the nodes to clone source code on your behalf, they have to have th
30 30
 this information by creating a 'basicauth' secret and attaching it to your service account.`
31 31
 
32 32
 	createBasicAuthSecretExample = `  // If your basic authentication method requires only username and password or token, add it by using:
33
-  $ %[1]s SECRET --username=USERNAME --password=PASSWORD
33
+  %[1]s SECRET --username=USERNAME --password=PASSWORD
34 34
 
35 35
   // If your basic authentication method requires also CA certificate, add it by using:
36
-  $ %[1]s SECRET --username=USERNAME --password=PASSWORD --ca-cert=FILENAME
36
+  %[1]s SECRET --username=USERNAME --password=PASSWORD --ca-cert=FILENAME
37 37
 
38 38
   // If you do already have a .gitconfig file needed for authentication, you can create a gitconfig secret by using:
39
-  $ %[2]s SECRET path/to/.gitconfig`
39
+  %[2]s SECRET path/to/.gitconfig`
40 40
 )
41 41
 
42 42
 // CreateBasicAuthSecretOptions holds the credential needed to authenticate against SCM servers.
... ...
@@ -34,16 +34,16 @@ nodes to pull images on your behalf, they have to have the credentials.  You can
34 34
 by creating a dockercfg secret and attaching it to your service account.`
35 35
 
36 36
 	createDockercfgExample = `  # Create a new .dockercfg secret:
37
-  $ %[1]s SECRET --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
37
+  %[1]s SECRET --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
38 38
 
39 39
   # Create a new .dockercfg secret from an existing file:
40
-  $ %[2]s SECRET path/to/.dockercfg
40
+  %[2]s SECRET path/to/.dockercfg
41 41
 
42 42
   # Create a new .docker/config.json secret from an existing file:
43
-  $ %[2]s SECRET .dockerconfigjson=path/to/.docker/config.json
43
+  %[2]s SECRET .dockerconfigjson=path/to/.docker/config.json
44 44
 
45 45
   # To add new secret to 'imagePullSecrets' for the node, or 'secrets' for builds, use:
46
-  $ %[3]s SERVICE_ACCOUNT`
46
+  %[3]s SERVICE_ACCOUNT`
47 47
 )
48 48
 
49 49
 type CreateDockerConfigOptions struct {
... ...
@@ -30,19 +30,19 @@ using with all valid keys in that directory.
30 30
 `
31 31
 
32 32
 	newExample = `  # Create a new secret named my-secret with a key named ssh-privatekey
33
-  $ %[1]s my-secret ~/.ssh/ssh-privatekey
33
+  %[1]s my-secret ~/.ssh/ssh-privatekey
34 34
 
35 35
   # Create a new secret named my-secret with keys named ssh-privatekey and ssh-publickey instead of the names of the keys on disk
36
-  $ %[1]s my-secret ssh-privatekey=~/.ssh/id_rsa ssh-publickey=~/.ssh/id_rsa.pub
36
+  %[1]s my-secret ssh-privatekey=~/.ssh/id_rsa ssh-publickey=~/.ssh/id_rsa.pub
37 37
 
38 38
   # Create a new secret named my-secret with keys for each file in the folder "bar"
39
-  $ %[1]s my-secret path/to/bar
39
+  %[1]s my-secret path/to/bar
40 40
 
41 41
   # Create a new .dockercfg secret named my-secret
42
-  $ %[1]s my-secret path/to/.dockercfg
42
+  %[1]s my-secret path/to/.dockercfg
43 43
 
44 44
   # Create a new .docker/config.json secret named my-secret
45
-  $ %[1]s my-secret .dockerconfigjson=path/to/.docker/config.json`
45
+  %[1]s my-secret .dockerconfigjson=path/to/.docker/config.json`
46 46
 )
47 47
 
48 48
 type CreateSecretOptions struct {
... ...
@@ -27,13 +27,13 @@ In order for the nodes to clone source code on your behalf, they have to have th
27 27
 provide this information by creating a 'sshauth' secret and attaching it to your service account.`
28 28
 
29 29
 	createSSHAuthSecretExample = `  // If your SSH authentication method requires only private SSH key, add it by using:
30
-  $ %[1]s SECRET --ssh-privatekey=FILENAME
30
+  %[1]s SECRET --ssh-privatekey=FILENAME
31 31
 
32 32
   // If your SSH authentication method requires also CA certificate, add it by using:
33
-  $ %[1]s SECRET --ssh-privatekey=FILENAME --ca-cert=FILENAME
33
+  %[1]s SECRET --ssh-privatekey=FILENAME --ca-cert=FILENAME
34 34
 
35 35
   // If you do already have a .gitconfig file needed for authentication, you can create a gitconfig secret by using:
36
-  $ %[2]s SECRET path/to/.gitconfig`
36
+  %[2]s SECRET path/to/.gitconfig`
37 37
 )
38 38
 
39 39
 // CreateSSHAuthSecretOptions holds the credential needed to authenticate against SCM servers.
... ...
@@ -29,13 +29,13 @@ Tag and namespace are optional and if they are not specified, 'latest' and the
29 29
 default namespace will be used respectively.`
30 30
 
31 31
 	buildChainExample = `  # Build the dependency tree for the 'latest' tag in <image-stream>
32
-  $ %[1]s <image-stream>
32
+  %[1]s <image-stream>
33 33
 
34 34
   # Build the dependency tree for 'v2' tag in dot format and visualize it via the dot utility
35
-  $ %[1]s <image-stream>:v2 -o dot | dot -T svg -o deps.svg
35
+  %[1]s <image-stream>:v2 -o dot | dot -T svg -o deps.svg
36 36
 
37 37
   # Build the dependency tree across all namespaces for the specified image stream tag found in 'test' namespace
38
-  $ %[1]s <image-stream> -n test --all`
38
+  %[1]s <image-stream> -n test --all`
39 39
 )
40 40
 
41 41
 // BuildChainRecommendedCommandName is the recommended command name
... ...
@@ -32,21 +32,21 @@ to ensure you have failover protection, and that you provide a --replicas=<n>
32 32
 value that matches the number of nodes for the given labeled selector.`
33 33
 
34 34
 	ipFailover_example = `  # Check the default IP failover configuration ("ipfailover"):
35
-  $ %[1]s %[2]s
35
+  %[1]s %[2]s
36 36
 
37 37
   # See what the IP failover configuration would look like if it is created:
38
-  $ %[1]s %[2]s -o json
38
+  %[1]s %[2]s -o json
39 39
 
40 40
   # Create an IP failover configuration if it does not already exist:
41
-  $ %[1]s %[2]s ipf --virtual-ips="10.1.1.1-4" --create
41
+  %[1]s %[2]s ipf --virtual-ips="10.1.1.1-4" --create
42 42
 
43 43
   # Create an IP failover configuration on a selection of nodes labeled
44 44
   # "router=us-west-ha" (on 4 nodes with 7 virtual IPs monitoring a service
45 45
   # listening on port 80, such as the router process).
46
-  $ %[1]s %[2]s ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
46
+  %[1]s %[2]s ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
47 47
 
48 48
   # Use a different IP failover config image and see the configuration:
49
-  $ %[1]s %[2]s ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag`
49
+  %[1]s %[2]s ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag`
50 50
 )
51 51
 
52 52
 func NewCmdIPFailoverConfig(f *clientcmd.Factory, parentName, name string, out, errout io.Writer) *cobra.Command {
... ...
@@ -36,7 +36,7 @@ const (
36 36
 The %[3]s helps you build, deploy, and manage your applications on top of
37 37
 Docker containers. To start an all-in-one server with the default configuration, run:
38 38
 
39
-  $ %[1]s start &`
39
+  %[1]s start &`
40 40
 )
41 41
 
42 42
 // CommandFor returns the appropriate command for this base name,
... ...
@@ -34,8 +34,8 @@ Create an RSA key pair and generate PEM-encoded public/private key files
34 34
 
35 35
 Example: Creating service account signing and authenticating key files:
36 36
 
37
-    $ CONFIG=openshift.local.config/master
38
-    $ %[1]s --public-key=$CONFIG/serviceaccounts.public.key --private-key=$CONFIG/serviceaccounts.private.key
37
+    CONFIG=openshift.local.config/master
38
+    %[1]s --public-key=$CONFIG/serviceaccounts.public.key --private-key=$CONFIG/serviceaccounts.private.key
39 39
 `
40 40
 
41 41
 func NewCommandCreateKeyPair(commandName string, fullName string, out io.Writer) *cobra.Command {
... ...
@@ -50,8 +50,8 @@ would recreate ALL certs including the CA cert, invalidating any existing
50 50
 infrastructure or client configuration. Instead, delete/rename the existing
51 51
 server cert and run the command to fill it in:
52 52
 
53
-    $ mv openshift.local.config/master/master.server.crt{,.old}
54
-    $ %[1]s --cert-dir=... \
53
+    mv openshift.local.config/master/master.server.crt{,.old}
54
+    %[1]s --cert-dir=... \
55 55
             --master=https://internal.master.fqdn:8443 \
56 56
             --public-master=https://external.master.fqdn:8443 \
57 57
             --hostnames=external.master.fqdn,internal.master.fqdn,localhost,127.0.0.1,172.17.42.1,kubernetes.default.local
... ...
@@ -36,12 +36,12 @@ components such as the router, authentication server, etc.
36 36
 
37 37
 Example: Creating a secure router certificate.
38 38
 
39
-    $ CA=openshift.local.config/master
40
-	$ %[1]s --signer-cert=$CA/ca.crt \
39
+    CA=openshift.local.config/master
40
+	%[1]s --signer-cert=$CA/ca.crt \
41 41
 	          --signer-key=$CA/ca.key --signer-serial=$CA/ca.serial.txt \
42 42
 	          --hostnames='*.cloudapps.example.com' \
43 43
 	          --cert=cloudapps.crt --key=cloudapps.key
44
-    $ cat cloudapps.crt cloudapps.key $CA/ca.crt > cloudapps.router.pem
44
+    cat cloudapps.crt cloudapps.key $CA/ca.crt > cloudapps.router.pem
45 45
 `
46 46
 
47 47
 func NewCommandCreateServerCert(commandName string, fullName string, out io.Writer) *cobra.Command {
... ...
@@ -37,10 +37,10 @@ type DecryptOptions struct {
37 37
 }
38 38
 
39 39
 const decryptExample = `	# Decrypt an encrypted file to a cleartext file:
40
-	$ %[1]s --key=secret.key --in=secret.encrypted --out=secret.decrypted
40
+	%[1]s --key=secret.key --in=secret.encrypted --out=secret.decrypted
41 41
 	
42 42
 	# Decrypt from stdin to stdout:
43
-	$ %[1]s --key=secret.key < secret2.encrypted > secret2.decrypted
43
+	%[1]s --key=secret.key < secret2.encrypted > secret2.decrypted
44 44
 `
45 45
 
46 46
 func NewCommandDecrypt(commandName string, fullName, encryptFullName string, out io.Writer) *cobra.Command {
... ...
@@ -46,10 +46,10 @@ type EncryptOptions struct {
46 46
 }
47 47
 
48 48
 const encryptExample = `	# Encrypt the content of secret.txt with a generated key:
49
-	$ %[1]s --genkey=secret.key --in=secret.txt --out=secret.encrypted
49
+	%[1]s --genkey=secret.key --in=secret.txt --out=secret.encrypted
50 50
 	
51 51
 	# Encrypt the content of secret2.txt with an existing key:
52
-	$ %[1]s --key=secret.key < secret2.txt > secret2.encrypted
52
+	%[1]s --key=secret.key < secret2.txt > secret2.encrypted
53 53
 `
54 54
 
55 55
 func NewCommandEncrypt(commandName string, fullName string, out io.Writer, errout io.Writer) *cobra.Command {
... ...
@@ -43,7 +43,7 @@ Start an all-in-one server
43 43
 This command helps you launch an all-in-one server, which allows you to run all of the
44 44
 components of an enterprise Kubernetes system on a server with Docker. Running:
45 45
 
46
-  $ %[1]s start
46
+  %[1]s start
47 47
 
48 48
 will start listening on all interfaces, launch an etcd server to store persistent
49 49
 data, and launch the Kubernetes system components. The server will run in the foreground until
... ...
@@ -20,7 +20,7 @@ const apiLong = `Start the master API
20 20
 
21 21
 This command starts the master API.  Running
22 22
 
23
-  $ %[1]s start master %[2]s
23
+  %[1]s start master %[2]s
24 24
 
25 25
 will start the server listening for incoming API requests. The server
26 26
 will run in the foreground until you terminate the process.`
... ...
@@ -19,7 +19,7 @@ const controllersLong = `Start the master controllers
19 19
 
20 20
 This command starts the controllers for the master.  Running
21 21
 
22
-  $ %[1]s start master %[2]s
22
+  %[1]s start master %[2]s
23 23
 
24 24
 will start the controllers that manage the master state, including the scheduler. The controllers
25 25
 will run in the foreground until you terminate the process.`
... ...
@@ -32,7 +32,7 @@ const etcdLong = `Start an etcd server for testing.
32 32
 This command starts an etcd server based on the config for testing.  It is not 
33 33
 Intended for production use.  Running
34 34
 
35
-  $ %[1]s start %[2]s
35
+  %[1]s start %[2]s
36 36
 
37 37
 will start the server listening for incoming requests. The server
38 38
 will run in the foreground until you terminate the process.`
... ...
@@ -58,7 +58,7 @@ const masterLong = `Start a master server
58 58
 
59 59
 This command helps you launch a master server.  Running
60 60
 
61
-  $ %[1]s start master
61
+  %[1]s start master
62 62
 
63 63
 will start a master listening on all interfaces, launch an etcd server to store
64 64
 persistent data, and launch the Kubernetes system components. The server will run in the
... ...
@@ -37,7 +37,7 @@ Start a node
37 37
 
38 38
 This command helps you launch a node.  Running
39 39
 
40
-  $ %[1]s start node --config=<node-config>
40
+  %[1]s start node --config=<node-config>
41 41
 
42 42
 will start a node with given configuration file. The node will run in the
43 43
 foreground until you terminate the process.`
... ...
@@ -75,7 +75,7 @@ Start node network components
75 75
 
76 76
 This command helps you launch node networking.  Running
77 77
 
78
-  $ %[1]s start network --config=<node-config>
78
+  %[1]s start network --config=<node-config>
79 79
 
80 80
 will start the network proxy and SDN plugins with given configuration file. The proxy will
81 81
 run in the foreground until you terminate the process.`
... ...
@@ -44,15 +44,15 @@ function. Comment lines beginning with '---' or 'TOOD' are ignored.
44 44
 `
45 45
 
46 46
 	genSwaggerDocUsage = `Usage:
47
-  $ %s [--input=GO-FILE] [--output=GENERATED-FILE] [--verify]
47
+  %s [--input=GO-FILE] [--output=GENERATED-FILE] [--verify]
48 48
 `
49 49
 
50 50
 	genSwaggerDocExamples = `Examples:
51 51
   # Generate 'SwaggerDoc' methods to file 'swagger_doc_generated.go' for objects in file 'types.go'
52
-  $ %[1]s --input=types.go --output=swagger_doc_generated.go
52
+  %[1]s --input=types.go --output=swagger_doc_generated.go
53 53
 
54 54
   # Verify that types in 'types.go' are sufficiently docummented
55
-  $ %[1]s --input=types.go --verify=true
55
+  %[1]s --input=types.go --verify=true
56 56
 `
57 57
 )
58 58
 
... ...
@@ -53,7 +53,7 @@ const (
53 53
 %[1]s consumes test output through Stdin and creates jUnit XML files. Currently, only the output of 'go test'
54 54
 and the output of 'oscmd' functions with $JUNIT_REPORT_OUTPUT set are supported. jUnit XML can be build with
55 55
 nested or flat test suites. Sub-trees of test suites can be selected when using the nested test-suites represen-
56
-tation to only build XML for some subset of the test output. This parser is greedy, so all output not directly 
56
+tation to only build XML for some subset of the test output. This parser is greedy, so all output not directly
57 57
 related to a test suite is considered test case output.
58 58
 `
59 59
 
... ...
@@ -64,28 +64,28 @@ related to a test suite is considered test case output.
64 64
 
65 65
 	junitReportExamples = `Examples:
66 66
   # Consume 'go test' output to create a jUnit XML file
67
-  $ go test -v -cover ./... | %[1]s > report.xml
67
+  go test -v -cover ./... | %[1]s > report.xml
68 68
 
69 69
   # Consume 'go test' output to create a jUnit XML file, while also printing package output as it is generated
70
-  $ go test -v -cover ./... | %[1]s --stream > report.xml
70
+  go test -v -cover ./... | %[1]s --stream > report.xml
71 71
 
72 72
   # Consume 'go test' output from a file to create a jUnit XML file
73
-  $ %[1]s -f testoutput.txt > report.xml
73
+  %[1]s -f testoutput.txt > report.xml
74 74
 
75 75
   # Consume 'go test' output to create a specific jUnit XML file
76
-  $ %[1]s --output report.xml
76
+  %[1]s --output report.xml
77 77
 
78 78
   # Consume 'go test' output to create a jUnit XML file with nested test suites
79
-  $ go test -v -cover ./... | junitreport --suites=nested > report.xml
79
+  go test -v -cover ./... | junitreport --suites=nested > report.xml
80 80
 
81 81
   # Consume 'go test' output to create a jUnit XML file with nested test suites rooted at 'github.com/maintainer'
82
-  $ go test -v -cover ./... | junitreport --suites=nested --roots=github.com/maintainer > report.xml
82
+  go test -v -cover ./... | junitreport --suites=nested --roots=github.com/maintainer > report.xml
83 83
 
84 84
   # Describe failures and skipped tests in an existing jUnit XML file
85
-  $ cat report.xml | %[1]s summarize
85
+  cat report.xml | %[1]s summarize
86 86
 
87 87
   # Consume 'os::cmd' output from to create a jUnit XML file
88
-  $ JUNIT_REPORT='true' hack/test-cmd.sh | junitreport --type=os::cmd > report.xml
88
+  JUNIT_REPORT='true' hack/test-cmd.sh | junitreport --type=os::cmd > report.xml
89 89
 `
90 90
 )
91 91