:toc: macro
:toc-title:

toc::[]


== oadm build-chain
Output the inputs and dependencies of your builds

====

[options="nowrap"]
----
  # Build the dependency tree for the 'latest' tag in <image-stream>
  $ oadm build-chain <image-stream>

  # Build the dependency tree for 'v2' tag in dot format and visualize it via the dot utility
  $ oadm build-chain <image-stream>:v2 -o dot | dot -T svg -o deps.svg

  # Build the dependency tree across all namespaces for the specified image stream tag found in 'test' namespace
  $ oadm build-chain <image-stream> -n test --all
----
====


== oadm config
Change configuration files for the client

====

[options="nowrap"]
----
  # Change the config context to use
  oadm config use-context my-context
  
  # Set the value of a config preference
  oadm config set preferences.some true
----
====


== oadm config set-cluster
Sets a cluster entry in kubeconfig

====

[options="nowrap"]
----
  # Set only the server field on the e2e cluster entry without touching other values.
  $ oadm config set-cluster e2e --server=https://1.2.3.4
  
  # Embed certificate authority data for the e2e cluster entry
  $ oadm config set-cluster e2e --certificate-authority=~/.kube/e2e/kubernetes.ca.crt
  
  # Disable cert checking for the dev cluster entry
  $ oadm config set-cluster e2e --insecure-skip-tls-verify=true
----
====


== oadm config set-context
Sets a context entry in kubeconfig

====

[options="nowrap"]
----
  # Set the user field on the gce context entry without touching other values
  $ oadm config set-context gce --user=cluster-admin
----
====


== oadm config set-credentials
Sets a user entry in kubeconfig

====

[options="nowrap"]
----
  # Set only the "client-key" field on the "cluster-admin"
  # entry, without touching other values:
  $ oadm config set-credentials cluster-admin --client-key=~/.kube/admin.key
  
  # Set basic auth for the "cluster-admin" entry
  $ oadm config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif
  
  # Embed client certificate data in the "cluster-admin" entry
  $ oadm config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true
----
====


== oadm config view
Displays merged kubeconfig settings or a specified kubeconfig file.

====

[options="nowrap"]
----
  # Show Merged kubeconfig settings.
  $ oadm config view
  
  # Get the password for the e2e user
  $ oadm config view -o template --template='{{range .users}}{{ if eq .name "e2e" }}{{ index .user.password }}{{end}}{{end}}'
----
====


== oadm groups add-users
Add users to a group

====

[options="nowrap"]
----
  # Add user1 and user2 to my-group
  $ oadm groups add-users my-group user1 user2
----
====


== oadm groups new
Create a new group

====

[options="nowrap"]
----
  # Add a group with no users
  $ oadm groups new my-group

  # Add a group with two users
  $ oadm groups new my-group user1 user2
----
====


== oadm groups prune
Prune OpenShift groups referencing missing records on an external provider.

====

[options="nowrap"]
----
  # Prune all orphaned groups 
  $ oadm groups prune --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Prune all orphaned groups except the ones from the blacklist file
  $ oadm groups prune --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Prune all orphaned groups from a list of specific groups specified in a whitelist file
  $ oadm groups prune --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Prune all orphaned groups from a list of specific groups specified in a whitelist
  $ oadm groups prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm

----
====


== oadm groups remove-users
Remove users from a group

====

[options="nowrap"]
----
  # Remove user1 and user2 from my-group
  $ oadm groups remove-users my-group user1 user2
----
====


== oadm groups sync
Sync OpenShift groups with records from an external provider.

====

[options="nowrap"]
----
  # Sync all groups from an LDAP server
  $ oadm groups sync --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Sync all groups except the ones from the blacklist file from an LDAP server
  $ oadm groups sync --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Sync specific groups specified in a whitelist file with an LDAP server 
  $ oadm groups sync --whitelist=/path/to/whitelist.txt --sync-config=/path/to/sync-config.yaml --confirm

  # Sync all OpenShift Groups that have been synced previously with an LDAP server
  $ oadm groups sync --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Sync specific OpenShift Groups if they have been synced previously with an LDAP server
  $ oadm groups sync groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm

----
====


== oadm ipfailover
Install an IP failover group to a set of nodes

====

[options="nowrap"]
----
  # Check the default IP failover configuration ("ipfailover"):
  $ oadm ipfailover

  # See what the IP failover configuration would look like if it is created:
  $ oadm ipfailover -o json

  # Create an IP failover configuration if it does not already exist:
  $ oadm ipfailover ipf --virtual-ips="10.1.1.1-4" --create

  # Create an IP failover configuration on a selection of nodes labeled
  # "router=us-west-ha" (on 4 nodes with 7 virtual IPs monitoring a service
  # listening on port 80, such as the router process).
  $ oadm ipfailover ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create

  # Use a different IP failover config image and see the configuration:
  $ oadm ipfailover ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag
----
====


== oadm manage-node
Manage nodes - list pods, evacuate, or mark ready

====

[options="nowrap"]
----
	# Block accepting any pods on given nodes
	$ oadm manage-node <mynode> --schedulable=false

	# Mark selected nodes as schedulable
	$ oadm manage-node --selector="<env=dev>" --schedulable=true

	# Migrate selected pods
	$ oadm manage-node <mynode> --evacuate --pod-selector="<service=myapp>"

	# Show pods that will be migrated
	$ oadm manage-node <mynode> --evacuate --dry-run --pod-selector="<service=myapp>"

	# List all pods on given nodes
	$ oadm manage-node <mynode1> <mynode2> --list-pods
----
====


== oadm pod-network join-projects
Join project network

====

[options="nowrap"]
----
	# Allow project p2 to use project p1 network
	$ oadm pod-network join-projects --to=<p1> <p2>

	# Allow all projects with label name=top-secret to use project p1 network
	$ oadm pod-network join-projects --to=<p1> --selector='name=top-secret'
----
====


== oadm pod-network make-projects-global
Make project network global

====

[options="nowrap"]
----
	# Allow project p1 to access all pods in the cluster and vice versa
	$ oadm pod-network make-projects-global <p1>

	# Allow all projects with label name=share to access all pods in the cluster and vice versa
	$ oadm pod-network make-projects-global --selector='name=share'
----
====


== oadm policy reconcile-cluster-role-bindings
Replace cluster role bindings to match the recommended bootstrap policy

====

[options="nowrap"]
----
  # Display the cluster role bindings that would be modified
  $ oadm policy reconcile-cluster-role-bindings

  # Display the cluster role bindings that would be modified, removing any extra subjects
  $ oadm policy reconcile-cluster-role-bindings --additive-only=false

  # Update cluster role bindings that don't match the current defaults
  $ oadm policy reconcile-cluster-role-bindings --confirm

  # Update cluster role bindings that don't match the current defaults, avoid adding roles to the system:authenticated group
  $ oadm policy reconcile-cluster-role-bindings --confirm --exclude-groups=system:authenticated

  # Update cluster role bindings that don't match the current defaults, removing any extra subjects from the binding
  $ oadm policy reconcile-cluster-role-bindings --confirm --additive-only=false
----
====


== oadm policy reconcile-cluster-roles
Replace cluster roles to match the recommended bootstrap policy

====

[options="nowrap"]
----
  # Display the cluster roles that would be modified
  $ oadm policy reconcile-cluster-roles

  # Replace cluster roles that don't match the current defaults
  $ oadm policy reconcile-cluster-roles --confirm

  # Display the union of the default and modified cluster roles
  $ oadm policy reconcile-cluster-roles --additive-only
----
====


== oadm policy reconcile-sccs
Replace cluster SCCs to match the recommended bootstrap policy

====

[options="nowrap"]
----
  # Display the cluster SCCs that would be modified
  $ oadm policy reconcile-sccs

  # Update cluster SCCs that don't match the current defaults preserving additional grants
  # for users and group and keeping any priorities that are already set
  $ oadm policy reconcile-sccs --confirm

  # Replace existing users, groups, and priorities that do not match defaults
  $ oadm policy reconcile-sccs --additive-only=false --confirm
----
====


== oadm prune prune
Prune OpenShift groups referencing missing records on an external provider.

====

[options="nowrap"]
----
  # Prune all orphaned groups 
  $ oadm prune prune --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Prune all orphaned groups except the ones from the blacklist file
  $ oadm prune prune --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Prune all orphaned groups from a list of specific groups specified in a whitelist file
  $ oadm prune prune --whitelist=/path/to/whitelist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm

  # Prune all orphaned groups from a list of specific groups specified in a whitelist
  $ oadm prune prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm

----
====


== oadm registry
Install the integrated Docker registry

====

[options="nowrap"]
----
  # Check if default Docker registry ("docker-registry") has been created
  $ oadm registry --dry-run

  # See what the registry will look like if created
  $ oadm registry -o json --credentials=/path/to/registry-user.kubeconfig

  # Create a registry if it does not exist with two replicas
  $ oadm registry --replicas=2 --credentials=/path/to/registry-user.kubeconfig

  # Use a different registry image and see the registry configuration
  $ oadm registry -o yaml --images=myrepo/docker-registry:mytag --credentials=/path/to/registry-user.kubeconfig
----
====


== oadm router
Install a router

====

[options="nowrap"]
----
  # Check the default router ("router")
  $ oadm router --dry-run

  # See what the router would look like if created
  $ oadm router -o json --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount

  # Create a router if it does not exist
  $ oadm router router-west --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount --replicas=2

  # Use a different router image and see the router configuration
  $ oadm router region-west -o yaml --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount --images=myrepo/somerouter:mytag

  # Run the router with a hint to the underlying implementation to _not_ expose statistics.
  $ oadm router router-west --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount --stats-port=0
  
----
====