Browse code

Fix typos

To find potential typos:

1. Install tool:
go get -u github.com/client9/misspell/cmd/misspell

2. Fix some typos:
git ls-files -- \
`# list files in these directories:` \
docs/proposals examples hack images pkg test | \
`# ignore assets from Web Console.` \
grep -v '^pkg/assets/' | \
`# ignore "strat", used as short for "strategy" and` \
`# ignore "acustom", used as a label name "acustom=label" and` \
`# ignore "thru", used in a skip list of kube tests:` \
xargs misspell -i strat,acustom,thru -w

3. Review changes:
git add -p
git checkout -- . # ignore what was not added

4. Update generated files:
./hack/update-generated-docs.sh
./hack/update-generated-swagger-spec.sh
```

Rodolfo Carvalho authored on 2016/10/19 01:37:59
Showing 66 changed files
... ...
@@ -87,7 +87,7 @@ message ClusterResourceQuotaStatus {
87 87
   optional k8s.io.kubernetes.pkg.api.v1.ResourceQuotaStatus total = 1;
88 88
 
89 89
   // Namespaces slices the usage by project.  This division allows for quick resolution of
90
-  // deletion reconcilation inside of a single project without requiring a recalculation
90
+  // deletion reconciliation inside of a single project without requiring a recalculation
91 91
   // across all projects.  This can be used to pull the deltas for a given project.
92 92
   repeated ResourceQuotaStatusByNamespace namespaces = 2;
93 93
 }
... ...
@@ -21317,7 +21317,7 @@
21317 21317
       "items": {
21318 21318
        "$ref": "v1.ResourceQuotaStatusByNamespace"
21319 21319
       },
21320
-      "description": "Namespaces slices the usage by project.  This division allows for quick resolution of deletion reconcilation inside of a single project without requiring a recalculation across all projects.  This can be used to pull the deltas for a given project."
21320
+      "description": "Namespaces slices the usage by project.  This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects.  This can be used to pull the deltas for a given project."
21321 21321
      }
21322 21322
     }
21323 21323
    },
... ...
@@ -45419,7 +45419,7 @@
45419 45419
     ],
45420 45420
     "properties": {
45421 45421
      "namespaces": {
45422
-      "description": "Namespaces slices the usage by project.  This division allows for quick resolution of deletion reconcilation inside of a single project without requiring a recalculation across all projects.  This can be used to pull the deltas for a given project.",
45422
+      "description": "Namespaces slices the usage by project.  This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects.  This can be used to pull the deltas for a given project.",
45423 45423
       "type": "array",
45424 45424
       "items": {
45425 45425
        "$ref": "#/definitions/v1.ResourceQuotaStatusByNamespace"
... ...
@@ -1250,7 +1250,7 @@ Create a namespace with the specified name
1250 1250
 
1251 1251
 
1252 1252
 == oc create policybinding
1253
-Create a policy binding that references the policy in the targetted namespace.
1253
+Create a policy binding that references the policy in the targeted namespace.
1254 1254
 
1255 1255
 ====
1256 1256
 
... ...
@@ -3,7 +3,7 @@
3 3
 
4 4
 .SH NAME
5 5
 .PP
6
-oc create policybinding \- Create a policy binding that references the policy in the targetted namespace.
6
+oc create policybinding \- Create a policy binding that references the policy in the targeted namespace.
7 7
 
8 8
 
9 9
 .SH SYNOPSIS
... ...
@@ -13,7 +13,7 @@ oc create policybinding \- Create a policy binding that references the policy in
13 13
 
14 14
 .SH DESCRIPTION
15 15
 .PP
16
-Create a policy binding that references the policy in the targetted namespace.
16
+Create a policy binding that references the policy in the targeted namespace.
17 17
 
18 18
 
19 19
 .SH OPTIONS
... ...
@@ -35,7 +35,7 @@ DEPRECATED: This command has been moved to "oc set env"
35 35
 
36 36
 .PP
37 37
 \fB\-\-from\fP=""
38
-    The name of a resource from which to inject enviroment variables
38
+    The name of a resource from which to inject environment variables
39 39
 
40 40
 .PP
41 41
 \fB\-\-list\fP=false
... ...
@@ -110,7 +110,7 @@ Experimental: This command is under active development and may change without no
110 110
 
111 111
 .PP
112 112
 \fB\-d\fP, \fB\-\-delete\fP=""
113
-    A command to run when resources are deleted. Specify mulitple times to add arguments.
113
+    A command to run when resources are deleted. Specify multiple times to add arguments.
114 114
 
115 115
 .PP
116 116
 \fB\-\-exit\-after\fP=0
... ...
@@ -41,7 +41,7 @@ If "\-\-env \-" is passed, environment variables can be read from STDIN using th
41 41
 
42 42
 .PP
43 43
 \fB\-\-from\fP=""
44
-    The name of a resource from which to inject enviroment variables
44
+    The name of a resource from which to inject environment variables
45 45
 
46 46
 .PP
47 47
 \fB\-\-list\fP=false
... ...
@@ -3,7 +3,7 @@
3 3
 
4 4
 .SH NAME
5 5
 .PP
6
-openshift cli create policybinding \- Create a policy binding that references the policy in the targetted namespace.
6
+openshift cli create policybinding \- Create a policy binding that references the policy in the targeted namespace.
7 7
 
8 8
 
9 9
 .SH SYNOPSIS
... ...
@@ -13,7 +13,7 @@ openshift cli create policybinding \- Create a policy binding that references th
13 13
 
14 14
 .SH DESCRIPTION
15 15
 .PP
16
-Create a policy binding that references the policy in the targetted namespace.
16
+Create a policy binding that references the policy in the targeted namespace.
17 17
 
18 18
 
19 19
 .SH OPTIONS
... ...
@@ -35,7 +35,7 @@ DEPRECATED: This command has been moved to "openshift cli set env"
35 35
 
36 36
 .PP
37 37
 \fB\-\-from\fP=""
38
-    The name of a resource from which to inject enviroment variables
38
+    The name of a resource from which to inject environment variables
39 39
 
40 40
 .PP
41 41
 \fB\-\-list\fP=false
... ...
@@ -110,7 +110,7 @@ Experimental: This command is under active development and may change without no
110 110
 
111 111
 .PP
112 112
 \fB\-d\fP, \fB\-\-delete\fP=""
113
-    A command to run when resources are deleted. Specify mulitple times to add arguments.
113
+    A command to run when resources are deleted. Specify multiple times to add arguments.
114 114
 
115 115
 .PP
116 116
 \fB\-\-exit\-after\fP=0
... ...
@@ -41,7 +41,7 @@ If "\-\-env \-" is passed, environment variables can be read from STDIN using th
41 41
 
42 42
 .PP
43 43
 \fB\-\-from\fP=""
44
-    The name of a resource from which to inject enviroment variables
44
+    The name of a resource from which to inject environment variables
45 45
 
46 46
 .PP
47 47
 \fB\-\-list\fP=false
... ...
@@ -47,7 +47,7 @@ pods and service accounts within a project
47 47
 
48 48
 ## Requirements
49 49
 
50
-1.  Provide a set of restrictions that controlls how a security context is created as a new, cluster-scoped, object
50
+1.  Provide a set of restrictions that controls how a security context is created as a new, cluster-scoped, object
51 51
 called SecurityContextConstraints.
52 52
 1.  User information in `user.Info` must be available to admission controllers. (Completed in
53 53
 https://github.com/kubernetes/kubernetes/pull/8203)
... ...
@@ -37,7 +37,7 @@ type ClusterResourceQuotaStatus struct {
37 37
 	Overall kapi.ResourceQuotaStatus
38 38
 
39 39
 	// ByNamespace slices the usage by namespace.  This division allows for quick resolution of 
40
-	// deletion reconcilation inside of a single namespace without requiring a recalculation 
40
+	// deletion reconciliation inside of a single namespace without requiring a recalculation 
41 41
 	// across all namespaces.  This map can be used to pull the deltas for a given namespace.
42 42
 	ByNamespace map[string]kapi.ResourceQuotaStatus
43 43
 }
... ...
@@ -6,7 +6,7 @@
6 6
 # $1 - The URL to check
7 7
 # $2 - Optional prefix to use when echoing a successful result
8 8
 # $3 - Optional time to sleep between attempts (Default: 0.2s)
9
-# $4 - Optional number of attemps to make (Default: 10)
9
+# $4 - Optional number of attempts to make (Default: 10)
10 10
 # attribution: openshift/origin hack/util.sh
11 11
 function wait_for_url {
12 12
 	url=$1
... ...
@@ -55,7 +55,7 @@ items:
55 55
 
56 56
           # The URL that builds must use to access the Git repositories
57 57
           # stored in this app.
58
-          # TOOD: support HTTPS
58
+          # TODO: support HTTPS
59 59
           - name: PUBLIC_URL
60 60
             value: http://git.$(POD_NAMESPACE).svc.cluster.local:8080
61 61
           # If INTERNAL_URL is specified, then it's used to point
... ...
@@ -55,7 +55,7 @@ items:
55 55
 
56 56
           # The URL that builds must use to access the Git repositories
57 57
           # stored in this app.
58
-          # TOOD: support HTTPS
58
+          # TODO: support HTTPS
59 59
           - name: PUBLIC_URL
60 60
             value: http://git.$(POD_NAMESPACE).svc.cluster.local:8080
61 61
           # If INTERNAL_URL is specified, then it's used to point
... ...
@@ -16,7 +16,7 @@ Once you have all 3 nodes in Running, you can run the "test.sh" script in this d
16 16
 ## Caveats
17 17
 
18 18
 Starting up all galera nodes at once leads to an issue where all the mysqls
19
-belive they're in the primary component because they don't see the others in
19
+believe they're in the primary component because they don't see the others in
20 20
 the DNS. For the bootstrapping to work: mysql-0 needs to see itself, mysql-1
21 21
 needs to see itself and mysql-0, and so on, because the first node that sees
22 22
 a peer list of 1 will assume it's the leader.
... ...
@@ -14,7 +14,7 @@
14 14
 # See the License for the specific language governing permissions and
15 15
 # limitations under the License.
16 16
 
17
-# This script writes out a mysql galera config using a list of newline seperated
17
+# This script writes out a mysql galera config using a list of newline separated
18 18
 # peer DNS names it accepts through stdin.
19 19
 
20 20
 # /etc/mysql is assumed to be a shared volume so we can modify my.cnf as required
... ...
@@ -4,7 +4,7 @@
4 4
 
5 5
 ##Purpose
6 6
 
7
-This example gives a basic template for attaching a persistent storage volume to a pod. It provides an end to end setup that begins with the _cluster-admin_ making the persistent volume availble and a _basic-user_ requesting storage from a **privileged** pod.
7
+This example gives a basic template for attaching a persistent storage volume to a pod. It provides an end to end setup that begins with the _cluster-admin_ making the persistent volume available and a _basic-user_ requesting storage from a **privileged** pod.
8 8
 
9 9
 _If the pod is not run as privileged, skip the **Edit Privileged scc** section_
10 10
 
... ...
@@ -109,7 +109,7 @@ Examine the output for the gluster volume.
109 109
 
110 110
 **That's it!**
111 111
 
112
-##Relevent Origin Docs
112
+##Relevant Origin Docs
113 113
 
114 114
 For more info on:
115 115
 
... ...
@@ -30,7 +30,7 @@ else
30 30
     exit 1
31 31
   fi
32 32
 
33
-  # Extract the release achives to a staging area.
33
+  # Extract the release archives to a staging area.
34 34
   os::build::detect_local_release_tars "linux-64bit"
35 35
 
36 36
   echo "Building images from release tars for commit ${OS_RELEASE_COMMIT}:"
... ...
@@ -103,7 +103,7 @@ readonly -f os::test::junit::declare_test_end
103 103
 
104 104
 # os::test::junit::check_test_counters checks that we do not have any test suites or test cases in flight
105 105
 # This function should be called at the very end of any test script using jUnit markers to make sure no error in
106
-# marking has occured.
106
+# marking has occurred.
107 107
 #
108 108
 # Globals:
109 109
 #  - NUM_OS_JUNIT_SUITES_IN_FLIGHT
... ...
@@ -3,7 +3,7 @@ import sys;
3 3
 import string;
4 4
 
5 5
 if len(sys.argv)!=2:
6
-	print("Useage: python hack/list-swagger-objects.py <swagger-spec-location>")
6
+	print("Usage: python hack/list-swagger-objects.py <swagger-spec-location>")
7 7
 	sys.exit(1)
8 8
 
9 9
 swagger_spec_location=sys.argv[1]
... ...
@@ -7,7 +7,7 @@ set -o nounset
7 7
 set -o pipefail
8 8
 
9 9
 
10
-# Values that can be overriden
10
+# Values that can be overridden
11 11
 RPM_TEST_PRODUCT=${RPM_TEST_PRODUCT:-"origin"}           # origin or atomic-enterprise
12 12
 RPM_TEST_OUTPUT_DIR=${RPM_TEST_OUTPUT_DIR:-"/tmp/tito/"} # Output for all build artifacts
13 13
 RPM_TEST_SKIP_LINT=${RPM_TEST_SKIP_LINT:-""}             # Set to anything to disable rpmlint test
... ...
@@ -85,6 +85,6 @@ if [[ -n "${FAILURE:-}" ]]; then
85 85
 	echo "FAILURE: go vet failed!"
86 86
 	exit 1
87 87
 else
88
-	echo "SUCCESS: go vet succeded!"
88
+	echo "SUCCESS: go vet succeeded!"
89 89
 	exit 0
90 90
 fi
... ...
@@ -46,7 +46,7 @@ HA_VRRP_ID_OFFSET=${OPENSHIFT_HA_VRRP_ID_OFFSET:-"0"}
46 46
 
47 47
 
48 48
 #  ========================================================================
49
-#  Default settings - not currently exposed or overriden on OpenShift.
49
+#  Default settings - not currently exposed or overridden on OpenShift.
50 50
 #  ========================================================================
51 51
 
52 52
 #  If your environment doesn't support multicast, you can send VRRP adverts
... ...
@@ -135,7 +135,7 @@ func (h *Helper) TestIP(ip string) error {
135 135
 		Entrypoint("socat").
136 136
 		Command("TCP-LISTEN:8443,crlf,reuseaddr,fork", "SYSTEM:\"echo 'hello world'\"").Start()
137 137
 	if err != nil {
138
-		return errors.NewError("cannnot start simple server on Docker host").WithCause(err)
138
+		return errors.NewError("cannot start simple server on Docker host").WithCause(err)
139 139
 	}
140 140
 	defer func() {
141 141
 		errors.LogError(h.dockerHelper.StopAndRemoveContainer(id))
... ...
@@ -150,7 +150,7 @@ func (h *Helper) TestForwardedIP(ip string) error {
150 150
 		Entrypoint("socat").
151 151
 		Command("TCP-LISTEN:8443,crlf,reuseaddr,fork", "SYSTEM:\"echo 'hello world'\"").Start()
152 152
 	if err != nil {
153
-		return errors.NewError("cannnot start simple server on Docker host").WithCause(err)
153
+		return errors.NewError("cannot start simple server on Docker host").WithCause(err)
154 154
 	}
155 155
 	defer func() {
156 156
 		errors.LogError(h.dockerHelper.StopAndRemoveContainer(id))
... ...
@@ -208,7 +208,7 @@ func TestScriptProxyConfig(t *testing.T) {
208 208
 	}
209 209
 	resultedProxyConf, err := scriptProxyConfig(newBuild)
210 210
 	if err != nil {
211
-		t.Fatalf("An error occured while parsing the proxy config: %v", err)
211
+		t.Fatalf("An error occurred while parsing the proxy config: %v", err)
212 212
 	}
213 213
 	if resultedProxyConf.HTTPProxy.Path != "/insecure" {
214 214
 		t.Errorf("Expected HTTP Proxy path to be /insecure, got: %v", resultedProxyConf.HTTPProxy.Path)
... ...
@@ -814,7 +814,7 @@ func getServiceAccount(buildConfig *buildapi.BuildConfig, defaultServiceAccount
814 814
 	return serviceAccount
815 815
 }
816 816
 
817
-//setBuildSource update build source by bianry status
817
+//setBuildSource update build source by binary status
818 818
 func setBuildSource(binary *buildapi.BinaryBuildSource, build *buildapi.Build) {
819 819
 	if binary != nil {
820 820
 		build.Spec.Source.Git = nil
... ...
@@ -64,7 +64,7 @@ func NewCommandValidateNodeConfig(name, fullName string, out io.Writer) *cobra.C
64 64
 				os.Exit(1)
65 65
 			}
66 66
 
67
-			fmt.Fprintf(options.Out, "SUCCESS: Validation succeded for file: %s\n", options.NodeConfigFile)
67
+			fmt.Fprintf(options.Out, "SUCCESS: Validation succeeded for file: %s\n", options.NodeConfigFile)
68 68
 		},
69 69
 	}
70 70
 
... ...
@@ -19,7 +19,7 @@ import (
19 19
 const PolicyBindingRecommendedName = "policybinding"
20 20
 
21 21
 var (
22
-	policyBindingLong = templates.LongDesc(`Create a policy binding that references the policy in the targetted namespace.`)
22
+	policyBindingLong = templates.LongDesc(`Create a policy binding that references the policy in the targeted namespace.`)
23 23
 
24 24
 	policyBindingExample = templates.Examples(`
25 25
 		# Create a policy binding in namespace "foo" that references the policy in namespace "bar"
... ...
@@ -46,7 +46,7 @@ func NewCmdCreatePolicyBinding(name, fullName string, f *clientcmd.Factory, out
46 46
 
47 47
 	cmd := &cobra.Command{
48 48
 		Use:     name + " TARGET_POLICY_NAMESPACE",
49
-		Short:   "Create a policy binding that references the policy in the targetted namespace.",
49
+		Short:   "Create a policy binding that references the policy in the targeted namespace.",
50 50
 		Long:    policyBindingLong,
51 51
 		Example: fmt.Sprintf(policyBindingExample, fullName),
52 52
 		Run: func(cmd *cobra.Command, args []string) {
... ...
@@ -522,7 +522,7 @@ func (o *IdleOptions) RunIdle(f *clientcmd.Factory) error {
522 522
 		if len(byService) == 0 || len(byScalable) == 0 {
523 523
 			return fmt.Errorf("no valid scalable resources found to idle: %v", err)
524 524
 		}
525
-		fmt.Fprintf(o.errOut, "warning: continuing on for valid scalable resources, but an error occured while finding scalable resources to idle: %v", err)
525
+		fmt.Fprintf(o.errOut, "warning: continuing on for valid scalable resources, but an error occurred while finding scalable resources to idle: %v", err)
526 526
 	}
527 527
 
528 528
 	oclient, _, kclient, err := f.Clients()
... ...
@@ -192,7 +192,7 @@ func NewCmdObserve(fullName string, f *clientcmd.Factory, out, errOut io.Writer)
192 192
 	cmd.Flags().BoolVar(&options.allNamespaces, "all-namespaces", false, "If present, list the requested object(s) across all projects. Project in current context is ignored.")
193 193
 
194 194
 	// to perform deletion synchronization
195
-	cmd.Flags().VarP(&options.deleteCommand, "delete", "d", "A command to run when resources are deleted. Specify mulitple times to add arguments.")
195
+	cmd.Flags().VarP(&options.deleteCommand, "delete", "d", "A command to run when resources are deleted. Specify multiple times to add arguments.")
196 196
 	cmd.Flags().Var(&options.nameSyncCommand, "names", "A command that will list all of the currently known names, optional. Specify multiple times to add arguments. Use to get notifications when objects are deleted.")
197 197
 
198 198
 	// add additional arguments / info to the server
... ...
@@ -843,7 +843,7 @@ func (p *GoTemplateColumnPrinter) Print(obj interface{}) ([]string, []byte, erro
843 843
 			return nil, nil, fmt.Errorf("error executing template '%v': '%v'\n----data----\n%+v\n", p.rawTemplates[i], err, obj)
844 844
 		}
845 845
 		// if the template resolves to the special <no value> result, return it as an empty string
846
-		// most arguments will prefer empty vs an arbitary constant, and we are making gotemplates consistent with
846
+		// most arguments will prefer empty vs an arbitrary constant, and we are making gotemplates consistent with
847 847
 		// jsonpath
848 848
 		if p.buf.String() == "<no value>" {
849 849
 			if p.strict {
... ...
@@ -89,7 +89,7 @@ func NewCmdEnv(fullName string, f *clientcmd.Factory, in io.Reader, out, errout
89 89
 		},
90 90
 	}
91 91
 	cmd.Flags().StringP("containers", "c", "*", "The names of containers in the selected pod templates to change - may use wildcards")
92
-	cmd.Flags().StringP("from", "", "", "The name of a resource from which to inject enviroment variables")
92
+	cmd.Flags().StringP("from", "", "", "The name of a resource from which to inject environment variables")
93 93
 	cmd.Flags().StringP("prefix", "", "", "Prefix to append to variable names")
94 94
 	cmd.Flags().StringArrayVarP(&env, "env", "e", env, "Specify a key-value pair for an environment variable to set into each container.")
95 95
 	cmd.Flags().Bool("list", false, "Display the environment and any changes in the standard format")
... ...
@@ -268,10 +268,10 @@ func (n namespaceNames) NamespaceNames() (sets.String, error) {
268 268
 	return names, nil
269 269
 }
270 270
 
271
-func envVarAsStrings(name, defaultValue, seperator string) []string {
271
+func envVarAsStrings(name, defaultValue, separator string) []string {
272 272
 	strlist := []string{}
273 273
 	if env := cmdutil.Env(name, defaultValue); env != "" {
274
-		values := strings.Split(env, seperator)
274
+		values := strings.Split(env, separator)
275 275
 		for i := range values {
276 276
 			if val := strings.TrimSpace(values[i]); val != "" {
277 277
 				strlist = append(strlist, val)
... ...
@@ -725,7 +725,7 @@ func (o *imageResolutionOptions) Bind(f *pflag.FlagSet) {
725 725
 }
726 726
 
727 727
 // useDiscoveryRESTMapper checks the server version to see if its recent enough to have
728
-// enough discovery information avaiable to reliably build a RESTMapper.  If not, use the
728
+// enough discovery information available to reliably build a RESTMapper.  If not, use the
729 729
 // hardcoded mapper in this client (legacy behavior)
730 730
 func useDiscoveryRESTMapper(serverVersion string) bool {
731 731
 	serverSemVer, err := semver.Parse(serverVersion[1:])
... ...
@@ -176,7 +176,7 @@ func OkPodTemplateMissingImage(missing ...string) *kapi.PodTemplateSpec {
176 176
 	template := OkPodTemplate()
177 177
 	for i, c := range template.Spec.Containers {
178 178
 		if set.Has(c.Name) {
179
-			// rememeber that slices use copies, so have to ref array entry explicitly
179
+			// remember that slices use copies, so have to ref array entry explicitly
180 180
 			template.Spec.Containers[i].Image = ""
181 181
 		}
182 182
 	}
... ...
@@ -30,7 +30,7 @@ const (
30 30
 
31 31
 // newQuotaEnforcingConfig creates caches for quota objects. The objects are stored with given eviction
32 32
 // timeout. Caches will only be initialized if the given ttl is positive. Options are gathered from
33
-// configuration file and will be overriden by enforceQuota and projectCacheTTL environment variable values.
33
+// configuration file and will be overridden by enforceQuota and projectCacheTTL environment variable values.
34 34
 func newQuotaEnforcingConfig(ctx context.Context, enforceQuota, projectCacheTTL string, options map[string]interface{}) *quotaEnforcingConfig {
35 35
 	enforce, err := getBoolOption(EnforceQuotaEnvVar, "enforcequota", false, options)
36 36
 	if err != nil {
... ...
@@ -107,7 +107,7 @@ type TemplateFileSearcher struct {
107 107
 	Namespace    string
108 108
 }
109 109
 
110
-// Search attemps to read template files and transform it into template objects
110
+// Search attempts to read template files and transform it into template objects
111 111
 func (r *TemplateFileSearcher) Search(precise bool, terms ...string) (ComponentMatches, []error) {
112 112
 	matches := ComponentMatches{}
113 113
 	var errs []error
... ...
@@ -16,7 +16,7 @@ import (
16 16
 type FileSearcher struct {
17 17
 }
18 18
 
19
-// Search attemps to read template files and transform it into template objects
19
+// Search attempts to read template files and transform it into template objects
20 20
 func (r *FileSearcher) Search(precise bool, terms ...string) (app.ComponentMatches, []error) {
21 21
 	matches := app.ComponentMatches{}
22 22
 	var errs []error
... ...
@@ -53,7 +53,7 @@ type ImageExecutionPolicyRule struct {
53 53
 type ImageCondition struct {
54 54
 	// Name is the name of this policy rule for reference. It must be unique across all rules.
55 55
 	Name string
56
-	// IgnoreNamespaceOverride prevents this condition from being overriden when the
56
+	// IgnoreNamespaceOverride prevents this condition from being overridden when the
57 57
 	// `alpha.image.policy.openshift.io/ignore-rules` is set on a namespace and contains this rule name.
58 58
 	IgnoreNamespaceOverride bool
59 59
 
... ...
@@ -18,7 +18,7 @@ func (GroupResource) SwaggerDoc() map[string]string {
18 18
 var map_ImageCondition = map[string]string{
19 19
 	"":     "ImageCondition defines the conditions for matching a particular image source. The conditions below are all required (logical AND). If Reject is specified, the condition is false if all conditions match, and true otherwise.",
20 20
 	"name": "Name is the name of this policy rule for reference. It must be unique across all rules.",
21
-	"ignoreNamespaceOverride": "IgnoreNamespaceOverride prevents this condition from being overriden when the `alpha.image.policy.openshift.io/ignore-rules` is set on a namespace and contains this rule name.",
21
+	"ignoreNamespaceOverride": "IgnoreNamespaceOverride prevents this condition from being overridden when the `alpha.image.policy.openshift.io/ignore-rules` is set on a namespace and contains this rule name.",
22 22
 	"onResources":             "OnResources determines which resources this applies to. Defaults to 'pods' for ImageExecutionPolicyRules.",
23 23
 	"invertMatch":             "InvertMatch means the value of the condition is logically inverted (true -> false, false -> true).",
24 24
 	"matchIntegratedRegistry": "MatchIntegratedRegistry will only match image sources that originate from the configured integrated registry.",
... ...
@@ -58,7 +58,7 @@ type GroupResource struct {
58 58
 type ImageCondition struct {
59 59
 	// Name is the name of this policy rule for reference. It must be unique across all rules.
60 60
 	Name string `json:"name"`
61
-	// IgnoreNamespaceOverride prevents this condition from being overriden when the
61
+	// IgnoreNamespaceOverride prevents this condition from being overridden when the
62 62
 	// `alpha.image.policy.openshift.io/ignore-rules` is set on a namespace and contains this rule name.
63 63
 	IgnoreNamespaceOverride bool `json:"ignoreNamespaceOverride"`
64 64
 
... ...
@@ -42,7 +42,7 @@ func (f *FakeImageStreamLimitVerifier) VerifyLimits(ns string, is *imageapi.Imag
42 42
 }
43 43
 
44 44
 // GetFakeImageStreamListHandler creates a test handler that lists given image streams matching requested
45
-// namespace. Addionally, a shared image stream will be listed if the requested namespace is "shared".
45
+// namespace. Additionally, a shared image stream will be listed if the requested namespace is "shared".
46 46
 func GetFakeImageStreamListHandler(t *testing.T, iss ...imageapi.ImageStream) ktestclient.ReactionFunc {
47 47
 	sharedISs := []imageapi.ImageStream{*GetSharedImageStream("shared", "is")}
48 48
 	allISs := append(sharedISs, iss...)
... ...
@@ -212,7 +212,7 @@ func (tcp *tcpUnidlerSocket) acceptConns(ch chan<- net.Conn, svcInfo *userspace.
212 212
 }
213 213
 
214 214
 // awaitAwakening collects new connections and signals once that pods are needed to fulfill them.  The function
215
-// will return when the listening socket is closed, which indicates that endpoints have succesfully appeared
215
+// will return when the listening socket is closed, which indicates that endpoints have successfully appeared
216 216
 // (and thus the hybrid proxy has switched this service over to using the normal proxy).  Connections will
217 217
 // be gradually timed out and dropped off the list of connections on a per-connection basis.  The list of current
218 218
 // connections is returned, in addition to whether or not we should retry this method.
... ...
@@ -87,7 +87,7 @@ func (tcp *tcpProxySocket) ListenPort() int {
87 87
 }
88 88
 
89 89
 // TryConnectEndpoints attempts to connect to the next available endpoint for the given service, cycling
90
-// through until it is able to successully connect, or it has tried with all timeouts in EndpointsDialTimeout.
90
+// through until it is able to successfully connect, or it has tried with all timeouts in EndpointsDialTimeout.
91 91
 func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protocol string, loadBalancer LoadBalancer) (net.Conn, error) {
92 92
 	sessionAffinityReset := false
93 93
 	for _, dialTimeout := range EndpointDialTimeout {
... ...
@@ -51,7 +51,7 @@ type ClusterResourceQuotaStatus struct {
51 51
 	Total kapi.ResourceQuotaStatus
52 52
 
53 53
 	// Namespaces slices the usage by project.  This division allows for quick resolution of
54
-	// deletion reconcilation inside of a single project without requiring a recalculation
54
+	// deletion reconciliation inside of a single project without requiring a recalculation
55 55
 	// across all projects.  This map can be used to pull the deltas for a given project.
56 56
 	Namespaces ResourceQuotasStatusByNamespace
57 57
 }
... ...
@@ -87,7 +87,7 @@ message ClusterResourceQuotaStatus {
87 87
   optional k8s.io.kubernetes.pkg.api.v1.ResourceQuotaStatus total = 1;
88 88
 
89 89
   // Namespaces slices the usage by project.  This division allows for quick resolution of
90
-  // deletion reconcilation inside of a single project without requiring a recalculation
90
+  // deletion reconciliation inside of a single project without requiring a recalculation
91 91
   // across all projects.  This can be used to pull the deltas for a given project.
92 92
   repeated ResourceQuotaStatusByNamespace namespaces = 2;
93 93
 }
... ...
@@ -70,7 +70,7 @@ func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string {
70 70
 var map_ClusterResourceQuotaStatus = map[string]string{
71 71
 	"":           "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage",
72 72
 	"total":      "Total defines the actual enforced quota and its current usage across all projects",
73
-	"namespaces": "Namespaces slices the usage by project.  This division allows for quick resolution of deletion reconcilation inside of a single project without requiring a recalculation across all projects.  This can be used to pull the deltas for a given project.",
73
+	"namespaces": "Namespaces slices the usage by project.  This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects.  This can be used to pull the deltas for a given project.",
74 74
 }
75 75
 
76 76
 func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string {
... ...
@@ -48,7 +48,7 @@ type ClusterResourceQuotaStatus struct {
48 48
 	Total kapi.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"`
49 49
 
50 50
 	// Namespaces slices the usage by project.  This division allows for quick resolution of
51
-	// deletion reconcilation inside of a single project without requiring a recalculation
51
+	// deletion reconciliation inside of a single project without requiring a recalculation
52 52
 	// across all projects.  This can be used to pull the deltas for a given project.
53 53
 	Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces,casttype=ResourceQuotasStatusByNamespace"`
54 54
 }
... ...
@@ -31,7 +31,7 @@ import (
31 31
 // 4. The ns Delete is compressed out and never delivered to the controller, so the improper match is never cleared.
32 32
 //
33 33
 // This sounds pretty bad, however, we fail in the "safe" direction and the consequences are detectable.
34
-// When going from quota to namespace, you can get back a namespace that doesn't exist.  There are no resource in a non-existance
34
+// When going from quota to namespace, you can get back a namespace that doesn't exist.  There are no resource in a non-existence
35 35
 // namespace, so you know to clear all referenced resources.  In addition, this add/delete has to happen so fast
36 36
 // that it would be nearly impossible for any resources to be created.  If you do create resources, then we must be observing
37 37
 // their deletes.  When quota is replenished, we'll see that we need to clear any charges.
... ...
@@ -34,7 +34,7 @@ type SelectionFields struct {
34 34
 
35 35
 // clusterQuotaMapper gives thread safe access to the actual mappings that are being stored.
36 36
 // Many method use a shareable read lock to check status followed by a non-shareable
37
-// write lock which double checks the condition before proceding.  Since locks aren't escalatable
37
+// write lock which double checks the condition before proceeding.  Since locks aren't escalatable
38 38
 // you have to perform the recheck because someone could have beaten you in.
39 39
 type clusterQuotaMapper struct {
40 40
 	lock sync.RWMutex
... ...
@@ -113,7 +113,7 @@ func (c *ClusterQuotaReconcilationController) Run(workers int, stopCh <-chan str
113 113
 	case <-stopCh:
114 114
 		return
115 115
 	}
116
-	glog.V(4).Infof("Starting the cluster quota reconcilation controller workers")
116
+	glog.V(4).Infof("Starting the cluster quota reconciliation controller workers")
117 117
 
118 118
 	// the controllers that replenish other resources to respond rapidly to state changes
119 119
 	for _, replenishmentController := range c.replenishmentControllers {
... ...
@@ -137,7 +137,7 @@ func (c *ClusterQuotaReconcilationController) waitForSyncedStores(ready chan<- s
137 137
 	defer utilruntime.HandleCrash()
138 138
 
139 139
 	for !c.clusterQuotaSynced() {
140
-		glog.V(4).Infof("Waiting for the caches to sync before starting the cluster quota reconcilation controller workers")
140
+		glog.V(4).Infof("Waiting for the caches to sync before starting the cluster quota reconciliation controller workers")
141 141
 		select {
142 142
 		case <-time.After(100 * time.Millisecond):
143 143
 		case <-stopCh:
... ...
@@ -50,7 +50,7 @@ func imageStreamImportConstraintsFunc(required []kapi.ResourceName, object runti
50 50
 	return nil
51 51
 }
52 52
 
53
-// makeImageStreamImportAdmissionUsageFunc retuns a function for computing a usage of an image stream import.
53
+// makeImageStreamImportAdmissionUsageFunc returns a function for computing a usage of an image stream import.
54 54
 func makeImageStreamImportAdmissionUsageFunc(isNamespacer osclient.ImageStreamsNamespacer) generic.UsageFunc {
55 55
 	return func(object runtime.Object) kapi.ResourceList {
56 56
 		isi, ok := object.(*imageapi.ImageStreamImport)
... ...
@@ -414,7 +414,7 @@ func checkAdmitError(t *testing.T, err error, expectedError error, prefix string
414 414
 	case expectedError == nil && err != nil:
415 415
 		t.Errorf("%s: expected no error, got: %q", prefix, err.Error())
416 416
 	case expectedError != nil && err == nil:
417
-		t.Errorf("%s: expected error %q, no error recieved", prefix, expectedError.Error())
417
+		t.Errorf("%s: expected error %q, no error received", prefix, expectedError.Error())
418 418
 	}
419 419
 }
420 420
 
... ...
@@ -237,7 +237,7 @@ func (c *UnidlingController) awaitRequest() bool {
237 237
 		return true
238 238
 	}
239 239
 
240
-	// Otherwise, if we have an error, we were at least partially unsucessful in unidling, so
240
+	// Otherwise, if we have an error, we were at least partially unsuccessful in unidling, so
241 241
 	// we requeue the event to process later
242 242
 
243 243
 	// don't try to process failing requests forever
... ...
@@ -197,7 +197,7 @@ os::cmd::expect_success 'oc new-project test-cmd-images-2'
197 197
 os::cmd::expect_success "oc tag $project/mysql:5.5 newrepo:latest"
198 198
 os::cmd::expect_success_and_text "oc get is/newrepo --template='{{(index .spec.tags 0).from.kind}}'" 'ImageStreamImage'
199 199
 os::cmd::expect_success_and_text 'oc get istag/newrepo:latest -o jsonpath={.image.dockerImageReference}' 'openshift/mysql-55-centos7@sha256:'
200
-# tag accross projects without specifying the source's project
200
+# tag across projects without specifying the source's project
201 201
 os::cmd::expect_success_and_text "oc tag newrepo:latest '${project}/mysql:tag1'" "mysql:tag1 set to"
202 202
 os::cmd::expect_success_and_text "oc get is/newrepo --template='{{(index .spec.tags 0).name}}'" "latest"
203 203
 # tagging an image with a DockerImageReference that points to the internal registry across namespaces updates the reference
... ...
@@ -271,7 +271,7 @@ os::cmd::expect_success 'oc policy add-role-to-user system:image-pusher system:a
271 271
 os::cmd::try_until_text 'oc policy who-can update imagestreams/layers -n custom' 'system:anonymous'
272 272
 os::cmd::expect_success "docker push ${DOCKER_REGISTRY}/custom/cross:namespace-pull"
273 273
 os::cmd::expect_success "docker push ${DOCKER_REGISTRY}/custom/cross:namespace-pull-id"
274
-os::log::info "Anonymous registry access successfull"
274
+os::log::info "Anonymous registry access successful"
275 275
 
276 276
 # log back into docker as e2e-user again
277 277
 os::cmd::expect_success "docker login -u e2e-user -p ${e2e_user_token} -e e2e-user@openshift.com ${DOCKER_REGISTRY}"
... ...
@@ -222,12 +222,12 @@ readonly EXCLUDED_TESTS=(
222 222
 	"\[Feature:PodAffinity\]"  # Not enabled yet
223 223
 	Ingress                    # Not enabled yet
224 224
 	"Cinder"                   # requires an OpenStack cluster
225
-	"should support r/w"       # hostPath: This test expects that host's tmp dir is WRITABLE by a container.  That isn't something we need to gaurantee for openshift.
225
+	"should support r/w"       # hostPath: This test expects that host's tmp dir is WRITABLE by a container.  That isn't something we need to guarantee for openshift.
226 226
 	"should check that the kubernetes-dashboard instance is alive" # we don't create this
227 227
 	"\[Feature:ManualPerformance\]" # requires /resetMetrics which we don't expose
228 228
 
229 229
 	# See the CanSupport implementation in upstream to determine wether these work.
230
-	"Ceph RBD"      # Works if ceph-common Binary installed (but we can't gaurantee this on all clusters).
230
+	"Ceph RBD"      # Works if ceph-common Binary installed (but we can't guarantee this on all clusters).
231 231
 	"GlusterFS" # May work if /sbin/mount.glusterfs to be installed for plugin to work (also possibly blocked by serial pulling)
232 232
 	"should support r/w" # hostPath: This test expects that host's tmp dir is WRITABLE by a container.  That isn't something we need to guarantee for openshift.
233 233
 
... ...
@@ -57,7 +57,7 @@ objects:
57 57
 
58 58
           # The URL that builds must use to access the Git repositories
59 59
           # stored in this app.
60
-          # TOOD: support HTTPS
60
+          # TODO: support HTTPS
61 61
           - name: PUBLIC_URL
62 62
             value: http://gitserver-tokenauth.$(POD_NAMESPACE).svc.cluster.local:8080
63 63
           # The directory to store Git repositories in. If not backed
... ...
@@ -57,7 +57,7 @@ objects:
57 57
 
58 58
           # The URL that builds must use to access the Git repositories
59 59
           # stored in this app.
60
-          # TOOD: support HTTPS
60
+          # TODO: support HTTPS
61 61
           - name: PUBLIC_URL
62 62
             value: http://gitserver.$(POD_NAMESPACE).svc.cluster.local:8080
63 63
           # The directory to store Git repositories in. If not backed
... ...
@@ -42,5 +42,5 @@ func TestBasicFunctionalityWithAudit(t *testing.T) {
42 42
 		t.Errorf("Unexpected error watching pods: %v", err)
43 43
 	}
44 44
 
45
-	// TOOD: test oc debug, exec, rsh, port-forward
45
+	// TODO: test oc debug, exec, rsh, port-forward
46 46
 }
... ...
@@ -222,7 +222,7 @@ func testPodNodeConstraintsDeploymentConfig(nodeName string, nodeSelector map[st
222 222
 	return dc
223 223
 }
224 224
 
225
-// testPodNodeConstraintsObjectCreationWithPodTemplate attemps to create different object types that contain pod templates
225
+// testPodNodeConstraintsObjectCreationWithPodTemplate attempts to create different object types that contain pod templates
226 226
 // using the passed in nodeName and nodeSelector. It will use the expectError flag to determine if an error should be returned or not
227 227
 func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name string, kclientset kclientset.Interface, client client.Interface, nodeName string, nodeSelector map[string]string, expectError bool) {
228 228
 
... ...
@@ -83,7 +83,7 @@ function wait_for_url_timed {
83 83
 #
84 84
 # $1 - The file to check for existence
85 85
 # $2 - Optional time to sleep between attempts (Default: 0.2s)
86
-# $3 - Optional number of attemps to make (Default: 10)
86
+# $3 - Optional number of attempts to make (Default: 10)
87 87
 function wait_for_file {
88 88
   file=$1
89 89
   wait=${2:-0.2}
... ...
@@ -104,7 +104,7 @@ function wait_for_file {
104 104
 # $1 - The URL to check
105 105
 # $2 - Optional prefix to use when echoing a successful result
106 106
 # $3 - Optional time to sleep between attempts (Default: 0.2s)
107
-# $4 - Optional number of attemps to make (Default: 10)
107
+# $4 - Optional number of attempts to make (Default: 10)
108 108
 function wait_for_url {
109 109
   url=$1
110 110
   prefix=${2:-}
... ...
@@ -137,7 +137,7 @@ function wait_for_url {
137 137
 # form).
138 138
 #
139 139
 # $1 - Optional time to sleep between attempts (Default: 0.2s)
140
-# $2 - Optional number of attemps to make (Default: 10)
140
+# $2 - Optional number of attempts to make (Default: 10)
141 141
 function set_curl_args {
142 142
   wait=${1:-0.2}
143 143
   times=${2:-10}
... ...
@@ -39,7 +39,7 @@ objects:
39 39
   kind: Job
40 40
   metadata:
41 41
     annotations:
42
-      description: v1beta1 Job - used to test v1beta1 negotation of group k8s objects
42
+      description: v1beta1 Job - used to test v1beta1 negotiation of group k8s objects
43 43
     name: v1beta1-job
44 44
   spec:
45 45
     selector:
... ...
@@ -58,7 +58,7 @@ objects:
58 58
   kind: Job
59 59
   metadata:
60 60
     annotations:
61
-      description: v1beta1 Job - used to test v1beta1 negotation of group k8s objects
61
+      description: v1beta1 Job - used to test v1beta1 negotiation of group k8s objects
62 62
     name: v1beta2-job
63 63
   spec:
64 64
     selector:
... ...
@@ -25,7 +25,7 @@ items:
25 25
   kind: Job
26 26
   metadata:
27 27
     annotations:
28
-      description: v1beta1 Job - used to test v1beta1 negotation of group k8s objects
28
+      description: v1beta1 Job - used to test v1beta1 negotiation of group k8s objects
29 29
     name: v1beta1-job
30 30
   spec:
31 31
     selector:
... ...
@@ -45,7 +45,7 @@ items:
45 45
   kind: Job
46 46
   metadata:
47 47
     annotations:
48
-      description: v1 Job - used to test v1 negotation of group k8s objects
48
+      description: v1 Job - used to test v1 negotiation of group k8s objects
49 49
     name: v1-job
50 50
   spec:
51 51
     template: