Browse code

Fix typos found across repository

Signed-off-by: Justas Brazauskas <brazauskasjustas@gmail.com>

Justas Brazauskas authored on 2015/12/14 01:00:39
Showing 117 changed files
... ...
@@ -145,7 +145,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
145 145
 	// then make sure we send both files over to the daemon
146 146
 	// because Dockerfile is, obviously, needed no matter what, and
147 147
 	// .dockerignore is needed to know if either one needs to be
148
-	// removed.  The deamon will remove them for us, if needed, after it
148
+	// removed. The daemon will remove them for us, if needed, after it
149 149
 	// parses the Dockerfile. Ignore errors here, as they will have been
150 150
 	// caught by ValidateContextDirectory above.
151 151
 	var includes = []string{"."}
... ...
@@ -231,7 +231,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
231 231
 	// Ignore any error and assume that the parent directory of the destination
232 232
 	// path exists, in which case the copy may still succeed. If there is any
233 233
 	// type of conflict (e.g., non-directory overwriting an existing directory
234
-	// or vice versia) the extraction will fail. If the destination simply did
234
+	// or vice versa) the extraction will fail. If the destination simply did
235 235
 	// not exist, but the parent directory does, the extraction will still
236 236
 	// succeed.
237 237
 	if err == nil {
... ...
@@ -266,7 +266,7 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpP
266 266
 		// With the stat info about the local source as well as the
267 267
 		// destination, we have enough information to know whether we need to
268 268
 		// alter the archive that we upload so that when the server extracts
269
-		// it to the specified directory in the container we get the disired
269
+		// it to the specified directory in the container we get the desired
270 270
 		// copy behavior.
271 271
 
272 272
 		// See comments in the implementation of `archive.PrepareArchiveCopy`
... ...
@@ -130,7 +130,7 @@ func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error {
130 130
 	return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1))
131 131
 }
132 132
 
133
-// CmdNetworkLs lists all the netorks managed by docker daemon
133
+// CmdNetworkLs lists all the networks managed by docker daemon
134 134
 //
135 135
 // Usage: docker network ls [OPTIONS]
136 136
 func (cli *DockerCli) CmdNetworkLs(args ...string) error {
... ...
@@ -198,8 +198,8 @@ func (cli *DockerCli) CmdNetworkInspect(args ...string) error {
198 198
 
199 199
 // Consolidates the ipam configuration as a group from different related configurations
200 200
 // user can configure network with multiple non-overlapping subnets and hence it is
201
-// possible to corelate the various related parameters and consolidate them.
202
-// consoidateIpam consolidates subnets, ip-ranges, gateways and auxilary addresses into
201
+// possible to correlate the various related parameters and consolidate them.
202
+// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into
203 203
 // structured ipam data.
204 204
 func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) {
205 205
 	if len(subnets) < len(ranges) || len(subnets) < len(gateways) {
... ...
@@ -100,7 +100,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
100 100
 			return err
101 101
 		}
102 102
 
103
-		// 4. Wait for attachement to break.
103
+		// 4. Wait for attachment to break.
104 104
 		if c.Config.Tty && cli.isTerminalOut {
105 105
 			if err := cli.monitorTtySize(containerID, false); err != nil {
106 106
 				fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
... ...
@@ -20,7 +20,7 @@ const (
20 20
 	// Version of Current REST API
21 21
 	Version version.Version = "1.22"
22 22
 
23
-	// MinVersion represents Minimun REST API version supported
23
+	// MinVersion represents Minimum REST API version supported
24 24
 	MinVersion version.Version = "1.12"
25 25
 
26 26
 	// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
... ...
@@ -139,7 +139,7 @@ func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc {
139 139
 
140 140
 // handleWithGlobalMiddlwares wraps the handler function for a request with
141 141
 // the server's global middlewares. The order of the middlewares is backwards,
142
-// meaning that the first in the list will be evaludated last.
142
+// meaning that the first in the list will be evaluated last.
143 143
 //
144 144
 // Example: handleWithGlobalMiddlewares(s.getContainersName)
145 145
 //
... ...
@@ -478,7 +478,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R
478 478
 func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
479 479
 	var (
480 480
 		repoAndTags []reference.Named
481
-		// This map is used for deduplicating the "-t" paramter.
481
+		// This map is used for deduplicating the "-t" parameter.
482 482
 		uniqNames = make(map[string]struct{})
483 483
 	)
484 484
 	for _, repo := range names {
... ...
@@ -35,7 +35,7 @@ func (l localRoute) Path() string {
35 35
 	return l.path
36 36
 }
37 37
 
38
-// NewRoute initialies a new local route for the reouter
38
+// NewRoute initializes a new local router for the reouter
39 39
 func NewRoute(method, path string, handler httputils.APIFunc) dkrouter.Route {
40 40
 	return localRoute{method, path, handler}
41 41
 }
... ...
@@ -63,7 +63,7 @@ type BlkioStatEntry struct {
63 63
 // BlkioStats stores All IO service stats for data read and write
64 64
 // TODO Windows: This can be factored out
65 65
 type BlkioStats struct {
66
-	// number of bytes tranferred to and from the block device
66
+	// number of bytes transferred to and from the block device
67 67
 	IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
68 68
 	IoServicedRecursive     []BlkioStatEntry `json:"io_serviced_recursive"`
69 69
 	IoQueuedRecursive       []BlkioStatEntry `json:"io_queue_recursive"`
... ...
@@ -105,7 +105,7 @@ func (fl *Flag) IsTrue() bool {
105 105
 // compile time error so it doesn't matter too much when we stop our
106 106
 // processing as long as we do stop it, so this allows the code
107 107
 // around AddXXX() to be just:
108
-//     defFlag := AddString("desription", "")
108
+//     defFlag := AddString("description", "")
109 109
 // w/o needing to add an if-statement around each one.
110 110
 func (bf *BFlags) Parse() error {
111 111
 	// If there was an error while defining the possible flags
... ...
@@ -640,7 +640,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string)
640 640
 
641 641
 	// If there is a default value associated with this arg then add it to the
642 642
 	// b.buildArgs if one is not already passed to the builder. The args passed
643
-	// to builder override the defaut value of 'arg'.
643
+	// to builder override the default value of 'arg'.
644 644
 	if _, ok := b.BuildArgs[name]; !ok && hasDefault {
645 645
 		b.BuildArgs[name] = value
646 646
 	}
... ...
@@ -4,7 +4,7 @@
4 4
 // parser package for more information) that are yielded from the parser itself.
5 5
 // Calling NewBuilder with the BuildOpts struct can be used to customize the
6 6
 // experience for execution purposes only. Parsing is controlled in the parser
7
-// package, and this division of resposibility should be respected.
7
+// package, and this division of responsibility should be respected.
8 8
 //
9 9
 // Please see the jump table targets for the actual invocations, most of which
10 10
 // will call out to the functions in internals.go to deal with their tasks.
... ...
@@ -70,7 +70,7 @@ func TestTestData(t *testing.T) {
70 70
 		}
71 71
 
72 72
 		if runtime.GOOS == "windows" {
73
-			// CRLF --> CR to match Unix behaviour
73
+			// CRLF --> CR to match Unix behavior
74 74
 			content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1)
75 75
 		}
76 76
 
... ...
@@ -71,7 +71,7 @@ type ConfigFile struct {
71 71
 	filename    string                // Note: not serialized - for internal use only
72 72
 }
73 73
 
74
-// NewConfigFile initilizes an empty configuration file for the given filename 'fn'
74
+// NewConfigFile initializes an empty configuration file for the given filename 'fn'
75 75
 func NewConfigFile(fn string) *ConfigFile {
76 76
 	return &ConfigFile{
77 77
 		AuthConfigs: make(map[string]AuthConfig),
... ...
@@ -518,7 +518,7 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu
518 518
 	}
519 519
 }
520 520
 
521
-// IsDestinationMounted checkes whether a path is mounted on the container or not.
521
+// IsDestinationMounted checks whether a path is mounted on the container or not.
522 522
 func (container *Container) IsDestinationMounted(destination string) bool {
523 523
 	return container.MountPoints[destination] != nil
524 524
 }
... ...
@@ -41,7 +41,7 @@ func (container *Container) IpcMounts() []execdriver.Mount {
41 41
 	return nil
42 42
 }
43 43
 
44
-// UnmountVolumes explicitely unmounts volumes from the container.
44
+// UnmountVolumes explicitly unmounts volumes from the container.
45 45
 func (container *Container) UnmountVolumes(forceSyscall bool) error {
46 46
 	return nil
47 47
 }
... ...
@@ -121,7 +121,7 @@ func (m *containerMonitor) ExitOnNext() {
121 121
 }
122 122
 
123 123
 // Close closes the container's resources such as networking allocations and
124
-// unmounts the contatiner's root filesystem
124
+// unmounts the container's root filesystem
125 125
 func (m *containerMonitor) Close() error {
126 126
 	// Cleanup networking and mounts
127 127
 	m.supervisor.Cleanup(m.container)
... ...
@@ -56,13 +56,13 @@ while [ $# -gt 0 ]; do
56 56
 	layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')
57 57
 
58 58
 	IFS=$'\n'
59
-	# bash v4 on Windows CI requires CRLF seperator
59
+	# bash v4 on Windows CI requires CRLF separator
60 60
 	if [ "$(go env GOHOSTOS)" = 'windows' ]; then
61 61
 		major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1)
62 62
 		if [ "$major" -ge 4 ]; then
63 63
 			IFS=$'\r\n'
64 64
 		fi
65
-	fi	
65
+	fi
66 66
 	layers=( ${layersFs} )
67 67
 	unset IFS
68 68
 
... ...
@@ -20,7 +20,7 @@ import (
20 20
 var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
21 21
 
22 22
 // ContainerCopy performs a deprecated operation of archiving the resource at
23
-// the specified path in the conatiner identified by the given name.
23
+// the specified path in the container identified by the given name.
24 24
 func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
25 25
 	container, err := daemon.GetContainer(name)
26 26
 	if err != nil {
... ...
@@ -49,9 +49,9 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain
49 49
 
50 50
 		// FIXME Windows: This code block is present in the Linux version and
51 51
 		// allows the contents to be copied to the container FS prior to it
52
-		// being started. However, the function utilises the FollowSymLinkInScope
52
+		// being started. However, the function utilizes the FollowSymLinkInScope
53 53
 		// path which does not cope with Windows volume-style file paths. There
54
-		// is a seperate effort to resolve this (@swernli), so this processing
54
+		// is a separate effort to resolve this (@swernli), so this processing
55 55
 		// is deferred for now. A case where this would be useful is when
56 56
 		// a dockerfile includes a VOLUME statement, but something is created
57 57
 		// in that directory during the dockerfile processing. What this means
... ...
@@ -13,7 +13,7 @@ import (
13 13
 func setupDumpStackTrap() {
14 14
 	// Windows does not support signals like *nix systems. So instead of
15 15
 	// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
16
-	// signalled.
16
+	// signaled.
17 17
 	go func() {
18 18
 		sa := syscall.SecurityAttributes{
19 19
 			Length: 0,
... ...
@@ -284,7 +284,7 @@ func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) e
284 284
 		userMounts[m.Destination] = struct{}{}
285 285
 	}
286 286
 
287
-	// Filter out mounts that are overriden by user supplied mounts
287
+	// Filter out mounts that are overridden by user supplied mounts
288 288
 	var defaultMounts []*configs.Mount
289 289
 	_, mountDev := userMounts["/dev"]
290 290
 	for _, m := range container.Mounts {
... ...
@@ -3,7 +3,7 @@
3 3
 package btrfs
4 4
 
5 5
 // TODO(vbatts) remove this work-around once supported linux distros are on
6
-// btrfs utililties of >= 3.16.1
6
+// btrfs utilities of >= 3.16.1
7 7
 
8 8
 func btrfsBuildVersion() string {
9 9
 	return "-"
... ...
@@ -766,7 +766,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
766 766
 		if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil {
767 767
 			if devicemapper.DeviceIDExists(err) {
768 768
 				// Device ID already exists. This should not
769
-				// happen. Now we have a mechianism to find
769
+				// happen. Now we have a mechanism to find
770 770
 				// a free device ID. So something is not right.
771 771
 				// Give a warning and continue.
772 772
 				logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID)
... ...
@@ -818,7 +818,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
818 818
 		if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil {
819 819
 			if devicemapper.DeviceIDExists(err) {
820 820
 				// Device ID already exists. This should not
821
-				// happen. Now we have a mechianism to find
821
+				// happen. Now we have a mechanism to find
822 822
 				// a free device ID. So something is not right.
823 823
 				// Give a warning and continue.
824 824
 				logrus.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID)
... ...
@@ -1749,7 +1749,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
1749 1749
 
1750 1750
 	info.Deleted = true
1751 1751
 
1752
-	// save device metadata to refelect deleted state.
1752
+	// save device metadata to reflect deleted state.
1753 1753
 	if err := devices.saveMetadata(info); err != nil {
1754 1754
 		info.Deleted = false
1755 1755
 		return err
... ...
@@ -1759,7 +1759,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error {
1759 1759
 	return nil
1760 1760
 }
1761 1761
 
1762
-// Should be caled with devices.Lock() held.
1762
+// Should be called with devices.Lock() held.
1763 1763
 func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error {
1764 1764
 	if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil {
1765 1765
 		logrus.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceID)
... ...
@@ -1805,7 +1805,7 @@ func (devices *DeviceSet) issueDiscard(info *devInfo) error {
1805 1805
 	// This is a workaround for the kernel not discarding block so
1806 1806
 	// on the thin pool when we remove a thinp device, so we do it
1807 1807
 	// manually.
1808
-	// Even if device is deferred deleted, activate it and isue
1808
+	// Even if device is deferred deleted, activate it and issue
1809 1809
 	// discards.
1810 1810
 	if err := devices.activateDeviceIfNeeded(info, true); err != nil {
1811 1811
 		return err
... ...
@@ -2131,7 +2131,7 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
2131 2131
 	defer devices.Unlock()
2132 2132
 
2133 2133
 	// If there are running containers when daemon crashes, during daemon
2134
-	// restarting, it will kill running contaienrs and will finally call
2134
+	// restarting, it will kill running containers and will finally call
2135 2135
 	// Put() without calling Get(). So info.MountCount may become negative.
2136 2136
 	// if info.mountCount goes negative, we do the unmount and assign
2137 2137
 	// it to 0.
... ...
@@ -13,7 +13,7 @@ package devmapper
13 13
 // 	 * version number of the interface that they were
14 14
 // 	 * compiled with.
15 15
 // 	 *
16
-// 	 * All recognised ioctl commands (ie. those that don't
16
+// 	 * All recognized ioctl commands (ie. those that don't
17 17
 // 	 * return -ENOTTY) fill out this field, even if the
18 18
 // 	 * command failed.
19 19
 // 	 */
... ...
@@ -177,7 +177,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
177 177
 
178 178
 	idFile := path.Join(mp, "id")
179 179
 	if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
180
-		// Create an "id" file with the container/image id in it to help reconscruct this in case
180
+		// Create an "id" file with the container/image id in it to help reconstruct this in case
181 181
 		// of later problems
182 182
 		if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
183 183
 			d.DeviceSet.UnmountDevice(id, mp)
... ...
@@ -17,14 +17,14 @@ import (
17 17
 type FsMagic uint32
18 18
 
19 19
 const (
20
-	// FsMagicUnsupported is a predifined contant value other than a valid filesystem id.
20
+	// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
21 21
 	FsMagicUnsupported = FsMagic(0x00000000)
22 22
 )
23 23
 
24 24
 var (
25 25
 	// DefaultDriver if a storage driver is not specified.
26 26
 	DefaultDriver string
27
-	// All registred drivers
27
+	// All registered drivers
28 28
 	drivers map[string]InitFunc
29 29
 
30 30
 	// ErrNotSupported returned when driver is not supported.
... ...
@@ -120,7 +120,7 @@ func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.I
120 120
 	return nil, ErrNotSupported
121 121
 }
122 122
 
123
-// getBuiltinDriver initalizes and returns the registered driver, but does not try to load from plugins
123
+// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
124 124
 func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
125 125
 	if initFunc, exists := drivers[name]; exists {
126 126
 		return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
... ...
@@ -30,7 +30,7 @@ var (
30 30
 	ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff")
31 31
 )
32 32
 
33
-// ApplyDiffProtoDriver wraps the ProtoDriver by extending the inteface with ApplyDiff method.
33
+// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method.
34 34
 type ApplyDiffProtoDriver interface {
35 35
 	graphdriver.ProtoDriver
36 36
 	// ApplyDiff writes the diff to the archive for the given id and parent id.
... ...
@@ -31,7 +31,7 @@ func init() {
31 31
 	graphdriver.Register("zfs", Init)
32 32
 }
33 33
 
34
-// Logger returns a zfs logger implmentation.
34
+// Logger returns a zfs logger implementation.
35 35
 type Logger struct{}
36 36
 
37 37
 // Log wraps log message from ZFS driver with a prefix '[zfs]'.
... ...
@@ -105,7 +105,7 @@ func (daemon *Daemon) Kill(container *container.Container) error {
105 105
 	return nil
106 106
 }
107 107
 
108
-// killPossibleDeadProcess is a wrapper aroung killSig() suppressing "no such process" error.
108
+// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error.
109 109
 func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error {
110 110
 	err := daemon.killWithSignal(container, sig)
111 111
 	if err == syscall.ESRCH {
... ...
@@ -266,7 +266,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
266 266
 		return excludeContainer
267 267
 	}
268 268
 
269
-	// Stop interation when the container arrives to the filter container
269
+	// Stop iteration when the container arrives to the filter container
270 270
 	if ctx.sinceFilter != nil {
271 271
 		if container.ID == ctx.sinceFilter.ID {
272 272
 			return stopIteration
... ...
@@ -54,7 +54,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
54 54
 	}
55 55
 	extra := ctx.ExtraAttributes(nil)
56 56
 	logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s, extra:%v.", ctx.ContainerID, host, port, tag, extra)
57
-	// logger tries to recoonect 2**32 - 1 times
57
+	// logger tries to reconnect 2**32 - 1 times
58 58
 	// failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds]
59 59
 	log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32})
60 60
 	if err != nil {
... ...
@@ -146,7 +146,7 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
146 146
 				// io.ErrUnexpectedEOF is returned from json.Decoder when there is
147 147
 				// remaining data in the parser's buffer while an io.EOF occurs.
148 148
 				// If the json logger writes a partial json log entry to the disk
149
-				// while at the same time the decoder tries to decode it, the race codition happens.
149
+				// while at the same time the decoder tries to decode it, the race condition happens.
150 150
 				if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
151 151
 					reader := io.MultiReader(dec.Buffered(), f)
152 152
 					dec = json.NewDecoder(reader)
... ...
@@ -32,7 +32,7 @@ func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateF
32 32
 	}, nil
33 33
 }
34 34
 
35
-//WriteLog write log messge to File
35
+//WriteLog write log message to File
36 36
 func (w *RotateFileWriter) Write(message []byte) (int, error) {
37 37
 	w.mu.Lock()
38 38
 	defer w.mu.Unlock()
... ...
@@ -106,7 +106,7 @@ func backup(fromPath, toPath string) error {
106 106
 	return os.Rename(fromPath, toPath)
107 107
 }
108 108
 
109
-// LogPath returns the location the given wirter logs to.
109
+// LogPath returns the location the given writer logs to.
110 110
 func (w *RotateFileWriter) LogPath() string {
111 111
 	return w.f.Name()
112 112
 }
... ...
@@ -91,7 +91,7 @@ func New(ctx logger.Context) (logger.Logger, error) {
91 91
 	tlsConfig := &tls.Config{}
92 92
 
93 93
 	// Splunk is using autogenerated certificates by default,
94
-	// allow users to trust them with skiping verification
94
+	// allow users to trust them with skipping verification
95 95
 	if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok {
96 96
 		insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
97 97
 		if err != nil {
... ...
@@ -18,7 +18,7 @@ const (
18 18
 )
19 19
 
20 20
 // NetworkControllerEnabled checks if the networking stack is enabled.
21
-// This feature depends on OS primitives and it's dissabled in systems like Windows.
21
+// This feature depends on OS primitives and it's disabled in systems like Windows.
22 22
 func (daemon *Daemon) NetworkControllerEnabled() bool {
23 23
 	return daemon.netController != nil
24 24
 }
... ...
@@ -70,7 +70,7 @@ func (m mounts) parts(i int) int {
70 70
 // 1. Select the previously configured mount points for the containers, if any.
71 71
 // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
72 72
 // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
73
-// 4. Cleanup old volumes that are about to be reasigned.
73
+// 4. Cleanup old volumes that are about to be reassigned.
74 74
 func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *runconfig.HostConfig) error {
75 75
 	binds := map[string]bool{}
76 76
 	mountPoints := map[string]*volume.MountPoint{}
... ...
@@ -148,7 +148,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
148 148
 
149 149
 	container.Lock()
150 150
 
151
-	// 4. Cleanup old volumes that are about to be reasigned.
151
+	// 4. Cleanup old volumes that are about to be reassigned.
152 152
 	for _, m := range mountPoints {
153 153
 		if m.BackwardsCompatible() {
154 154
 			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
... ...
@@ -41,7 +41,7 @@ type ImagePushConfig struct {
41 41
 	// MetadataStore is the storage backend for distribution-specific
42 42
 	// metadata.
43 43
 	MetadataStore metadata.Store
44
-	// LayerStore manges layers.
44
+	// LayerStore manages layers.
45 45
 	LayerStore layer.Store
46 46
 	// ImageStore manages images.
47 47
 	ImageStore image.Store
... ...
@@ -61,7 +61,7 @@ type transfer struct {
61 61
 
62 62
 	// running remains open as long as the transfer is in progress.
63 63
 	running chan struct{}
64
-	// hasWatchers stays open until all watchers release the trasnfer.
64
+	// hasWatchers stays open until all watchers release the transfer.
65 65
 	hasWatchers chan struct{}
66 66
 
67 67
 	// broadcastDone is true if the master progress channel has closed.
... ...
@@ -240,9 +240,9 @@ func (t *transfer) Cancel() {
240 240
 
241 241
 // DoFunc is a function called by the transfer manager to actually perform
242 242
 // a transfer. It should be non-blocking. It should wait until the start channel
243
-// is closed before transfering any data. If the function closes inactive, that
243
+// is closed before transferring any data. If the function closes inactive, that
244 244
 // signals to the transfer manager that the job is no longer actively moving
245
-// data - for example, it may be waiting for a dependent tranfer to finish.
245
+// data - for example, it may be waiting for a dependent transfer to finish.
246 246
 // This prevents it from taking up a slot.
247 247
 type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer
248 248
 
... ...
@@ -1,2 +1,2 @@
1
-# avoid commiting the awsconfig file used for releases
1
+# avoid committing the awsconfig file used for releases
2 2
 awsconfig
... ...
@@ -55,7 +55,7 @@ The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE
55 55
   The configuration shows you where `boot2docker` is looking for the `profile` file. It also output the settings that are in use.
56 56
 
57 57
 
58
-2. Initialise a default file to customize using `boot2docker config > ~/.boot2docker/profile` command.
58
+2. Initialize a default file to customize using `boot2docker config > ~/.boot2docker/profile` command.
59 59
 
60 60
 3. Add the following lines to `$HOME/.boot2docker/profile`:
61 61
 
... ...
@@ -12,7 +12,7 @@ weight = 7
12 12
 # Control and configure Docker with systemd
13 13
 
14 14
 Many Linux distributions use systemd to start the Docker daemon. This document
15
-shows a few examples of how to customise Docker's settings.
15
+shows a few examples of how to customize Docker's settings.
16 16
 
17 17
 ## Starting the Docker daemon
18 18
 
... ...
@@ -10,7 +10,7 @@ parent = "smn_linux"
10 10
 
11 11
 # openSUSE and SUSE Linux Enterprise
12 12
 
13
-This page provides instructions for installing and configuring the lastest
13
+This page provides instructions for installing and configuring the latest
14 14
 Docker Engine software on openSUSE and SUSE systems.
15 15
 
16 16
 >**Note:** You can also find bleeding edge Docker versions inside of the repositories maintained by the [Virtualization:containers project](https://build.opensuse.org/project/show/Virtualization:containers) on the [Open Build Service](https://build.opensuse.org/). This project delivers also other packages that are related with the Docker ecosystem (for example, Docker Compose).
... ...
@@ -99,11 +99,11 @@ This section lists each version from latest to oldest.  Each listing includes a
99 99
 * `GET /info` Now returns `Architecture` and `OSType` fields, providing information
100 100
   about the host architecture and operating system type that the daemon runs on.
101 101
 * `GET /networks/(name)` now returns a `Name` field for each container attached to the network.
102
-* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it 
102
+* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it
103 103
   consistent with other date/time values returned by the API.
104 104
 * `AuthConfig` now supports a `registrytoken` for token based authentication
105 105
 * `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory`
106
-* Pushes initated with `POST /images/(name)/push` and pulls initiated with `POST /images/create`
106
+* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create`
107 107
   will be cancelled if the HTTP connection making the API request is closed before
108 108
   the push or pull completes.
109 109
 
... ...
@@ -27,11 +27,11 @@ and Docker images will report:
27 27
 
28 28
     delete, import, pull, push, tag, untag
29 29
 
30
-The `--since` and `--until` parameters can be Unix timestamps, date formated
30
+The `--since` and `--until` parameters can be Unix timestamps, date formatted
31 31
 timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
32 32
 relative to the client machine’s time. If you do not provide the --since option,
33 33
 the command returns only new and/or live events.  Supported formats for date
34
-formated time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
34
+formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
35 35
 `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
36 36
 timezone on the client will be used if you do not provide either a `Z` or a
37 37
 `+-00:00` timezone offset at the end of the timestamp.  When providing Unix
... ...
@@ -101,7 +101,7 @@ ID 260 gen 11 top level 5 path btrfs/subvolumes/3c9a9d7cc6a235eb2de58ca9ef3551c6
101 101
 ID 261 gen 12 top level 5 path btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751
102 102
 ```
103 103
 
104
-Under the `/var/lib/docker/btrfs/subvolumes` directoy, each of these subvolumes and snapshots are visible as a normal Unix directory:
104
+Under the `/var/lib/docker/btrfs/subvolumes` directory, each of these subvolumes and snapshots are visible as a normal Unix directory:
105 105
 
106 106
 ```bash
107 107
 $ ls -l /var/lib/docker/btrfs/subvolumes/
... ...
@@ -81,7 +81,7 @@ var (
81 81
 		HTTPStatusCode: http.StatusInternalServerError,
82 82
 	})
83 83
 
84
-	// ErrorCodeEmptyID is generated when an ID is the emptry string.
84
+	// ErrorCodeEmptyID is generated when an ID is the empty string.
85 85
 	ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{
86 86
 		Value:          "EMPTYID",
87 87
 		Message:        "Invalid empty id",
... ...
@@ -204,7 +204,7 @@ Respond with a string error if an error occurred.
204 204
 {}
205 205
 ```
206 206
 
207
-Perform neccessary tasks to release resources help by the plugin, for example
207
+Perform necessary tasks to release resources help by the plugin, for example
208 208
 unmounting all the layered file systems.
209 209
 
210 210
 **Response**:
... ...
@@ -12,13 +12,13 @@ In this experimental phase, the Docker daemon creates a single daemon-wide mappi
12 12
 for all containers running on the same engine instance. The mappings will
13 13
 utilize the existing subordinate user and group ID feature available on all modern
14 14
 Linux distributions.
15
-The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and 
15
+The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and
16 16
 [`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be
17
-read for the user, and optional group, specified to the `--userns-remap` 
18
-parameter.  If you do not wish to specify your own user and/or group, you can 
17
+read for the user, and optional group, specified to the `--userns-remap`
18
+parameter.  If you do not wish to specify your own user and/or group, you can
19 19
 provide `default` as the value to this flag, and a user will be created on your behalf
20 20
 and provided subordinate uid and gid ranges. This default user will be named
21
-`dockremap`, and entries will be created for it in `/etc/passwd` and 
21
+`dockremap`, and entries will be created for it in `/etc/passwd` and
22 22
 `/etc/group` using your distro's standard user and group creation tools.
23 23
 
24 24
 > **Note**: The single mapping per-daemon restriction exists for this experimental
... ...
@@ -43,7 +43,7 @@ values in the following formats:
43 43
 If numeric IDs are provided, translation back to valid user or group names
44 44
 will occur so that the subordinate uid and gid information can be read, given
45 45
 these resources are name-based, not id-based.  If the numeric ID information
46
-provided does not exist as entries in `/etc/passwd` or `/etc/group`, dameon
46
+provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon
47 47
 startup will fail with an error message.
48 48
 
49 49
 *An example: starting with default Docker user management:*
... ...
@@ -67,7 +67,7 @@ create the following range, based on an existing user already having the first
67 67
 
68 68
 > **Note:** On a fresh Fedora install, we found that we had to `touch` the
69 69
 > `/etc/subuid` and `/etc/subgid` files to have ranges assigned when users
70
-> were created.  Once these files existed, range assigment on user creation
70
+> were created.  Once these files existed, range assignment on user creation
71 71
 > worked properly.
72 72
 
73 73
 If you have a preferred/self-managed user with subordinate ID mappings already
... ...
@@ -84,7 +84,7 @@ current experimental user namespace support.
84 84
 
85 85
 The simplest case exists where only one contiguous range is defined for the
86 86
 provided user or group. In this case, Docker will use that entire contiguous
87
-range for the mapping of host uids and gids to the container process.  This 
87
+range for the mapping of host uids and gids to the container process.  This
88 88
 means that the first ID in the range will be the remapped root user, and the
89 89
 IDs above that initial ID will map host ID 1 through the end of the range.
90 90
 
... ...
@@ -12,7 +12,7 @@ set -e
12 12
 #   will be used as Docker binary version and package version.
13 13
 # - The hash of the git commit will also be included in the Docker binary,
14 14
 #   with the suffix -dirty if the repository isn't clean.
15
-# - The script is intented to be run inside the docker container specified
15
+# - The script is intended to be run inside the docker container specified
16 16
 #   in the Dockerfile at the root of the source. In other words:
17 17
 #   DO NOT CALL THIS SCRIPT DIRECTLY.
18 18
 # - The right way to call this script is to invoke "make" from
... ...
@@ -225,7 +225,7 @@ release_build() {
225 225
 			;;
226 226
 		arm)
227 227
 			s3Arch=armel
228
-			# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too
228
+			# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
229 229
 			;;
230 230
 		*)
231 231
 			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
... ...
@@ -268,7 +268,7 @@ func testGetSet(t *testing.T, store StoreBackend) {
268 268
 	if err != nil {
269 269
 		t.Fatal(err)
270 270
 	}
271
-	// skipping use of digest pkg because its used by the imlementation
271
+	// skipping use of digest pkg because its used by the implementation
272 272
 	h := sha256.New()
273 273
 	_, err = h.Write(randomInput)
274 274
 	if err != nil {
... ...
@@ -96,7 +96,7 @@ type History struct {
96 96
 	Author string `json:"author,omitempty"`
97 97
 	// CreatedBy keeps the Dockerfile command used while building image.
98 98
 	CreatedBy string `json:"created_by,omitempty"`
99
-	// Comment is custom mesage set by the user when creating the image.
99
+	// Comment is custom message set by the user when creating the image.
100 100
 	Comment string `json:"comment,omitempty"`
101 101
 	// EmptyLayer is set to true if this history item did not generate a
102 102
 	// layer. Otherwise, the history item is associated with the next
... ...
@@ -6,7 +6,7 @@ import "github.com/docker/docker/layer"
6 6
 
7 7
 // RootFS describes images root filesystem
8 8
 // This is currently a placeholder that only supports layers. In the future
9
-// this can be made into a interface that supports different implementaions.
9
+// this can be made into a interface that supports different implementations.
10 10
 type RootFS struct {
11 11
 	Type    string         `json:"type"`
12 12
 	DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
... ...
@@ -12,7 +12,7 @@ import (
12 12
 
13 13
 // RootFS describes images root filesystem
14 14
 // This is currently a placeholder that only supports layers. In the future
15
-// this can be made into a interface that supports different implementaions.
15
+// this can be made into a interface that supports different implementations.
16 16
 type RootFS struct {
17 17
 	Type      string         `json:"type"`
18 18
 	DiffIDs   []layer.DiffID `json:"diff_ids,omitempty"`
... ...
@@ -137,7 +137,7 @@ func (s *DockerSuite) TestPostContainersAttach(c *check.C) {
137 137
 	// Since the container only emits stdout, attaching to stderr should return nothing.
138 138
 	expectTimeout(conn, br, "stdout")
139 139
 
140
-	// Test the simlar functions of the stderr stream.
140
+	// Test the similar functions of the stderr stream.
141 141
 	cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2")
142 142
 	cid = strings.TrimSpace(cid)
143 143
 	conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain")
... ...
@@ -35,7 +35,7 @@ func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) {
35 35
 	errChan := make(chan error)
36 36
 	go func() {
37 37
 		defer close(errChan)
38
-		// Container is wating for us to signal it to stop
38
+		// Container is waiting for us to signal it to stop
39 39
 		dockerCmd(c, "stop", id)
40 40
 		// And wait for the attach command to end
41 41
 		errChan <- attachCmd.Wait()
... ...
@@ -4612,7 +4612,7 @@ func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
4612 4612
 	testRequires(c, DaemonIsLinux)
4613 4613
 	name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
4614 4614
 	_, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true)
4615
-	// if the error doesnt check for illegal tag name, or the image is built
4615
+	// if the error doesn't check for illegal tag name, or the image is built
4616 4616
 	// then this should fail
4617 4617
 	if !strings.Contains(out, "invalid reference format") || strings.Contains(out, "Sending build context to Docker daemon") {
4618 4618
 		c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out)
... ...
@@ -4817,7 +4817,7 @@ func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
4817 4817
 	// This testcase is supposed to generate an error because the
4818 4818
 	// JSON array we're passing in on the CMD uses single quotes instead
4819 4819
 	// of double quotes (per the JSON spec). This means we interpret it
4820
-	// as a "string" insead of "JSON array" and pass it on to "sh -c" and
4820
+	// as a "string" instead of "JSON array" and pass it on to "sh -c" and
4821 4821
 	// it should barf on it.
4822 4822
 	name := "testbuildsinglequotefails"
4823 4823
 
... ...
@@ -87,7 +87,7 @@ func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) {
87 87
 }
88 88
 
89 89
 // Test for error when DST ends in a trailing path separator but exists as a
90
-// file. Also test that we cannot overwirite an existing directory with a
90
+// file. Also test that we cannot overwrite an existing directory with a
91 91
 // non-directory and cannot overwrite an existing
92 92
 func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) {
93 93
 	testRequires(c, DaemonIsLinux)
... ...
@@ -171,7 +171,7 @@ func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) {
171 171
 	dockerCmd(c, "tag", "-f", "busybox", "foobox")
172 172
 
173 173
 	out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true")
174
-	// Exect one dangling image
174
+	// Expect one dangling image
175 175
 	c.Assert(strings.Count(out, imageID), checker.Equals, 1)
176 176
 }
177 177
 
... ...
@@ -170,7 +170,7 @@ func (s *DockerNetworkSuite) SetUpSuite(c *check.C) {
170 170
 			return
171 171
 		}
172 172
 		w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
173
-		// make sure libnetwork is now asking to release the expected address fro mthe expected poolid
173
+		// make sure libnetwork is now asking to release the expected address from the expected poolid
174 174
 		if addressRequest.PoolID != poolID {
175 175
 			fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
176 176
 		} else if addressReleaseReq.Address != gw {
... ...
@@ -429,7 +429,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpamMultipleNetworks(c *check.C) {
429 429
 	assertNwIsAvailable(c, "test5")
430 430
 
431 431
 	// test network with multiple subnets
432
-	// bridge network doesnt support multiple subnets. hence, use a dummy driver that supports
432
+	// bridge network doesn't support multiple subnets. hence, use a dummy driver that supports
433 433
 
434 434
 	dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6")
435 435
 	assertNwIsAvailable(c, "test6")
... ...
@@ -491,7 +491,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIpamInvalidCombinations(c *check.C
491 491
 	_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test")
492 492
 	c.Assert(err, check.NotNil)
493 493
 
494
-	// Multiple overlaping subnets in the same network must fail
494
+	// Multiple overlapping subnets in the same network must fail
495 495
 	_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test")
496 496
 	c.Assert(err, check.NotNil)
497 497
 
... ...
@@ -223,7 +223,7 @@ func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) {
223 223
 	// Cannot find expected port binding (expBnd2) in docker ps output
224 224
 	c.Assert(out, checker.Contains, expBnd2)
225 225
 
226
-	// Remove container now otherwise it will interfeer with next test
226
+	// Remove container now otherwise it will interfere with next test
227 227
 	stopRemoveContainer(id, c)
228 228
 
229 229
 	// Run the container with explicit port bindings and no exposed ports
... ...
@@ -236,7 +236,7 @@ func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) {
236 236
 	c.Assert(out, checker.Contains, expBnd1)
237 237
 	// Cannot find expected port binding (expBnd2) in docker ps output
238 238
 	c.Assert(out, checker.Contains, expBnd2)
239
-	// Remove container now otherwise it will interfeer with next test
239
+	// Remove container now otherwise it will interfere with next test
240 240
 	stopRemoveContainer(id, c)
241 241
 
242 242
 	// Run the container with one unpublished exposed port and one explicit port binding
... ...
@@ -754,7 +754,7 @@ func (s *DockerSuite) TestRunContainerNetwork(c *check.C) {
754 754
 
755 755
 func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) {
756 756
 	// TODO Windows: This is Linux specific as --link is not supported and
757
-	// this will be deprecated in favour of container networking model.
757
+	// this will be deprecated in favor of container networking model.
758 758
 	testRequires(c, DaemonIsLinux, NotUserNamespace)
759 759
 	dockerCmd(c, "run", "--name", "linked", "busybox", "true")
760 760
 
... ...
@@ -109,14 +109,14 @@ func (s *DockerSuite) TestVolumeCliLsFilterDangling(c *check.C) {
109 109
 
110 110
 	out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false")
111 111
 
112
-	// Same as above, but expicitly disabling dangling
112
+	// Same as above, but explicitly disabling dangling
113 113
 	c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output"))
114 114
 	c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output"))
115 115
 	c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output"))
116 116
 
117 117
 	out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true")
118 118
 
119
-	// Filter "dangling" volumes; ony "dangling" (unused) volumes should be in the output
119
+	// Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output
120 120
 	c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output"))
121 121
 	c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected"))
122 122
 	c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected"))
... ...
@@ -30,7 +30,7 @@ var (
30 30
 
31 31
 	// daemonPlatform is held globally so that tests can make intelligent
32 32
 	// decisions on how to configure themselves according to the platform
33
-	// of the daemon. This is initialised in docker_utils by sending
33
+	// of the daemon. This is initialized in docker_utils by sending
34 34
 	// a version call to the daemon and examining the response header.
35 35
 	daemonPlatform string
36 36
 
... ...
@@ -1416,7 +1416,7 @@ func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (
1416 1416
 			return nil, fmt.Errorf("cannot start fake storage: %v", err)
1417 1417
 		}
1418 1418
 	} else {
1419
-		// always start a local http server on CLI test machin
1419
+		// always start a local http server on CLI test machine
1420 1420
 		httpServer := httptest.NewServer(http.FileServer(http.Dir(root)))
1421 1421
 		server = &localGitServer{httpServer}
1422 1422
 	}
... ...
@@ -1430,7 +1430,7 @@ func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (
1430 1430
 // Write `content` to the file at path `dst`, creating it if necessary,
1431 1431
 // as well as any missing directories.
1432 1432
 // The file is truncated if it already exists.
1433
-// Fail the test when error occures.
1433
+// Fail the test when error occurs.
1434 1434
 func writeFile(dst, content string, c *check.C) {
1435 1435
 	// Create subdirectories if necessary
1436 1436
 	c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil)
... ...
@@ -1443,7 +1443,7 @@ func writeFile(dst, content string, c *check.C) {
1443 1443
 }
1444 1444
 
1445 1445
 // Return the contents of file at path `src`.
1446
-// Fail the test when error occures.
1446
+// Fail the test when error occurs.
1447 1447
 func readFile(src string, c *check.C) (content string) {
1448 1448
 	data, err := ioutil.ReadFile(src)
1449 1449
 	c.Assert(err, check.IsNil)
... ...
@@ -1,6 +1,6 @@
1 1
 // Package layer is package for managing read only
2 2
 // and read-write mounts on the union file system
3
-// driver. Read-only mounts are refenced using a
3
+// driver. Read-only mounts are referenced using a
4 4
 // content hash and are protected from mutation in
5 5
 // the exposed interface. The tar format is used
6 6
 // to create read only layers and export both
... ...
@@ -189,7 +189,7 @@ type MetadataStore interface {
189 189
 	GetInitID(string) (string, error)
190 190
 	GetMountParent(string) (ChainID, error)
191 191
 
192
-	// List returns the full list of referened
192
+	// List returns the full list of referenced
193 193
 	// read-only and read-write layers
194 194
 	List() ([]ChainID, []string, error)
195 195
 
... ...
@@ -418,7 +418,7 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
418 418
 func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) {
419 419
 	// Use "<graph-id>-init" to maintain compatibility with graph drivers
420 420
 	// which are expecting this layer with this special name. If all
421
-	// graph drivers can be updated to not rely on knowin about this layer
421
+	// graph drivers can be updated to not rely on knowing about this layer
422 422
 	// then the initID should be randomly generated.
423 423
 	initID := fmt.Sprintf("%s-init", graphID)
424 424
 
... ...
@@ -37,11 +37,11 @@ and Docker images will report:
37 37
 **--until**=""
38 38
    Stream events until this timestamp
39 39
 
40
-The `--since` and `--until` parameters can be Unix timestamps, date formated
40
+The `--since` and `--until` parameters can be Unix timestamps, date formatted
41 41
 timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed
42 42
 relative to the client machine’s time. If you do not provide the --since option,
43 43
 the command returns only new and/or live events.  Supported formats for date
44
-formated time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
44
+formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`,
45 45
 `2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local
46 46
 timezone on the client will be used if you do not provide either a `Z` or a
47 47
 `+-00:00` timezone offset at the end of the timestamp.  When providing Unix
... ...
@@ -49,7 +49,7 @@ timestamps enter seconds[.nanoseconds], where seconds is the number of seconds
49 49
 that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap
50 50
 seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a
51 51
 fraction of a second no more than nine digits long.
52
-   
52
+
53 53
 # EXAMPLES
54 54
 
55 55
 ## Listening for Docker events
... ...
@@ -48,7 +48,7 @@ the running containers.
48 48
       .Ports - Exposed ports.
49 49
       .Status - Container status.
50 50
       .Size - Container disk size.
51
-      .Labels - All labels asigned to the container.
51
+      .Labels - All labels assigned to the container.
52 52
       .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}`
53 53
 
54 54
 **--help**
... ...
@@ -106,7 +106,7 @@ func TestParseEnvFileBadlyFormattedFile(t *testing.T) {
106 106
 	}
107 107
 }
108 108
 
109
-// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize
109
+// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize
110 110
 func TestParseEnvFileLineTooLongFile(t *testing.T) {
111 111
 	content := strings.Repeat("a", bufio.MaxScanTokenSize+42)
112 112
 	content = fmt.Sprint("foo=", content)
... ...
@@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
22 22
 }
23 23
 
24 24
 // Set sets an IPv4 or IPv6 address from a given string. If the given
25
-// string is not parsable as an IP address it returns an error.
25
+// string is not parseable as an IP address it returns an error.
26 26
 func (o *IPOpt) Set(val string) error {
27 27
 	ip := net.ParseIP(val)
28 28
 	if ip == nil {
... ...
@@ -31,7 +31,7 @@ type (
31 31
 	Archive io.ReadCloser
32 32
 	// Reader is a type of io.Reader.
33 33
 	Reader io.Reader
34
-	// Compression is the state represtents if compressed or not.
34
+	// Compression is the state represents if compressed or not.
35 35
 	Compression int
36 36
 	// TarChownOptions wraps the chown options UID and GID.
37 37
 	TarChownOptions struct {
... ...
@@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
19 19
 }
20 20
 
21 21
 // getWalkRoot calculates the root path when performing a TarWithOptions.
22
-// We use a seperate function as this is platform specific. On Linux, we
22
+// We use a separate function as this is platform specific. On Linux, we
23 23
 // can't use filepath.Join(srcPath,include) because this will clean away
24 24
 // a trailing "." or "/" which may be important.
25 25
 func getWalkRoot(srcPath string, include string) string {
... ...
@@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
19 19
 }
20 20
 
21 21
 // getWalkRoot calculates the root path when performing a TarWithOptions.
22
-// We use a seperate function as this is platform specific.
22
+// We use a separate function as this is platform specific.
23 23
 func getWalkRoot(srcPath string, include string) string {
24 24
 	return filepath.Join(srcPath, include)
25 25
 }
... ...
@@ -150,7 +150,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
150 150
 
151 151
 		// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
152 152
 		// This block is here to ensure the change is recorded even if the
153
-		// modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
153
+		// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
154 154
 		// Check https://github.com/docker/docker/pull/13590 for details.
155 155
 		if f.IsDir() {
156 156
 			changedDirs[path] = struct{}{}
... ...
@@ -9,7 +9,7 @@ package archive
9 9
 const WhiteoutPrefix = ".wh."
10 10
 
11 11
 // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
12
-// for remoing an actaul file. Normally these files are excluded from exported
12
+// for removing an actual file. Normally these files are excluded from exported
13 13
 // archives.
14 14
 const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
15 15
 
... ...
@@ -87,7 +87,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
87 87
 func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
88 88
 
89 89
 	// Try to retrieve the next available loopback device via syscall.
90
-	// If it fails, we discard error and start loopking for a
90
+	// If it fails, we discard error and start looping for a
91 91
 	// loopback from index 0.
92 92
 	startIndex, err := getNextFreeLoopbackIndex()
93 93
 	if err != nil {
... ...
@@ -290,7 +290,7 @@ func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVP
290 290
 }
291 291
 
292 292
 // WatchTree will fail the first time, and return the mockKVchan afterwards.
293
-// This is the behaviour we need for testing.. If we need 'moar', should update this.
293
+// This is the behavior we need for testing.. If we need 'moar', should update this.
294 294
 func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
295 295
 	if s.watchTreeCallCount == 0 {
296 296
 		s.watchTreeCallCount = 1
... ...
@@ -1,6 +1,6 @@
1 1
 // Package filenotify provides a mechanism for watching file(s) for changes.
2 2
 // Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support.
3
-// These are wrapped up in a common interface so that either can be used interchangably in your code.
3
+// These are wrapped up in a common interface so that either can be used interchangeably in your code.
4 4
 package filenotify
5 5
 
6 6
 import "gopkg.in/fsnotify.v1"
... ...
@@ -24,7 +24,7 @@ const watchWaitTime = 200 * time.Millisecond
24 24
 
25 25
 // filePoller is used to poll files for changes, especially in cases where fsnotify
26 26
 // can't be run (e.g. when inotify handles are exhausted)
27
-// filePoller satifies the FileWatcher interface
27
+// filePoller satisfies the FileWatcher interface
28 28
 type filePoller struct {
29 29
 	// watches is the list of files currently being polled, close the associated channel to stop the watch
30 30
 	watches map[string]chan struct{}
... ...
@@ -78,7 +78,7 @@ func Matches(file string, patterns []string) (bool, error) {
78 78
 
79 79
 // OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
80 80
 // It will assume that the inputs have been preprocessed and therefore the function
81
-// doen't need to do as much error checking and clean-up. This was done to avoid
81
+// doesn't need to do as much error checking and clean-up. This was done to avoid
82 82
 // repeating these steps on each file being checked during the archive process.
83 83
 // The more generic fileutils.Matches() can't make these assumptions.
84 84
 func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
... ...
@@ -295,7 +295,7 @@ func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, s
295 295
 	}
296 296
 }
297 297
 
298
-// ParseCgroupPaths arses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns
298
+// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns
299 299
 // a map which cgroup name as key and path as value.
300 300
 func ParseCgroupPaths(procCgroupData string) map[string]string {
301 301
 	cgroupPaths := map[string]string{}
... ...
@@ -337,7 +337,7 @@ func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) {
337 337
 	}
338 338
 }
339 339
 
340
-// RunAtDifferentDate runs the specifed function with the given time.
340
+// RunAtDifferentDate runs the specified function with the given time.
341 341
 // It changes the date of the system, which can led to weird behaviors.
342 342
 func RunAtDifferentDate(date time.Time, block func()) {
343 343
 	// Layout for date. MMDDhhmmYYYY
... ...
@@ -309,7 +309,7 @@ func TestCompareDirectoryEntries(t *testing.T) {
309 309
 	}
310 310
 }
311 311
 
312
-// FIXME make an "unhappy path" test for ListTar without "panicing" :-)
312
+// FIXME make an "unhappy path" test for ListTar without "panicking" :-)
313 313
 func TestListTar(t *testing.T) {
314 314
 	tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar")
315 315
 	if err != nil {
... ...
@@ -20,7 +20,7 @@ func NopWriteCloser(w io.Writer) io.WriteCloser {
20 20
 	return &nopWriteCloser{w}
21 21
 }
22 22
 
23
-// NopFlusher represents a type which flush opetatin is nop.
23
+// NopFlusher represents a type which flush operation is nop.
24 24
 type NopFlusher struct{}
25 25
 
26 26
 // Flush is a nop operation.
... ...
@@ -19,8 +19,8 @@ type JSONLog struct {
19 19
 
20 20
 // Format returns the log formatted according to format
21 21
 // If format is nil, returns the log message
22
-// If format is json, returns the log marshalled in json format
23
-// By defalut, returns the log with the log time formatted according to format.
22
+// If format is json, returns the log marshaled in json format
23
+// By default, returns the log with the log time formatted according to format.
24 24
 func (jl *JSONLog) Format(format string) (string, error) {
25 25
 	if format == "" {
26 26
 		return jl.Log, nil
... ...
@@ -60,7 +60,7 @@ func (p *JSONProgress) String() string {
60 60
 		percentage = 50
61 61
 	}
62 62
 	if width > 110 {
63
-		// this number can't be negetive gh#7136
63
+		// this number can't be negative gh#7136
64 64
 		numSpaces := 0
65 65
 		if 50-percentage > 0 {
66 66
 			numSpaces = 50 - percentage
... ...
@@ -106,7 +106,7 @@ type JSONMessage struct {
106 106
 
107 107
 // Display displays the JSONMessage to `out`. `isTerminal` describes if `out`
108 108
 // is a terminal. If this is the case, it will erase the entire current line
109
-// when dislaying the progressbar.
109
+// when displaying the progressbar.
110 110
 func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
111 111
 	if jm.Error != nil {
112 112
 		if jm.Error.Code == 401 {
... ...
@@ -41,7 +41,7 @@ func (l *lockCtr) inc() {
41 41
 	atomic.AddInt32(&l.waiters, 1)
42 42
 }
43 43
 
44
-// dec decrements the number of waiters wating on the lock
44
+// dec decrements the number of waiters waiting on the lock
45 45
 func (l *lockCtr) dec() {
46 46
 	atomic.AddInt32(&l.waiters, -1)
47 47
 }
... ...
@@ -1228,7 +1228,7 @@ func (v mergeVal) IsBoolFlag() bool {
1228 1228
 
1229 1229
 // Merge is an helper function that merges n FlagSets into a single dest FlagSet
1230 1230
 // In case of name collision between the flagsets it will apply
1231
-// the destination FlagSet's errorHandling behaviour.
1231
+// the destination FlagSet's errorHandling behavior.
1232 1232
 func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
1233 1233
 	for _, fset := range flagsets {
1234 1234
 		for k, f := range fset.formal {
... ...
@@ -23,7 +23,7 @@ const (
23 23
 	SYNCHRONOUS = syscall.MS_SYNCHRONOUS
24 24
 
25 25
 	// DIRSYNC will force all directory updates within the file system to be done
26
-	// synchronously. This affects the following system calls: creat, link,
26
+	// synchronously. This affects the following system calls: create, link,
27 27
 	// unlink, symlink, mkdir, rmdir, mknod and rename.
28 28
 	DIRSYNC = syscall.MS_DIRSYNC
29 29
 
... ...
@@ -168,7 +168,7 @@ func TestSubtreeShared(t *testing.T) {
168 168
 		}
169 169
 	}()
170 170
 
171
-	// NOW, check that the file from the outside directory is avaible in the source directory
171
+	// NOW, check that the file from the outside directory is available in the source directory
172 172
 	if _, err := os.Stat(sourceCheckPath); err != nil {
173 173
 		t.Fatal(err)
174 174
 	}
... ...
@@ -128,7 +128,7 @@ func (filters Args) Len() int {
128 128
 	return len(filters.fields)
129 129
 }
130 130
 
131
-// MatchKVList returns true if the values for the specified field maches the ones
131
+// MatchKVList returns true if the values for the specified field matches the ones
132 132
 // from the sources.
133 133
 // e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
134 134
 //      field is 'label' and sources are {'label1': '1', 'label2': '2'}
... ...
@@ -10,7 +10,7 @@ import (
10 10
 	"time"
11 11
 )
12 12
 
13
-// Rand is a global *rand.Rand instance, which initilized with NewSource() source.
13
+// Rand is a global *rand.Rand instance, which initialized with NewSource() source.
14 14
 var Rand = rand.New(NewSource())
15 15
 
16 16
 // Reader is a global, shared instance of a pseudorandom bytes generator.
... ...
@@ -41,7 +41,7 @@ func naiveSelf() string {
41 41
 	if absName, err := filepath.Abs(name); err == nil {
42 42
 		return absName
43 43
 	}
44
-	// if we coudn't get absolute name, return original
44
+	// if we couldn't get absolute name, return original
45 45
 	// (NOTE: Go only errors on Abs() if os.Getwd fails)
46 46
 	return name
47 47
 }
... ...
@@ -54,7 +54,7 @@ func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []b
54 54
 	return []byte(str + streamNewline)
55 55
 }
56 56
 
57
-// FormatError formats the specifed error.
57
+// FormatError formats the specified error.
58 58
 func (sf *StreamFormatter) FormatError(err error) []byte {
59 59
 	if sf.json {
60 60
 		jsonError, ok := err.(*jsonmessage.JSONError)
... ...
@@ -48,7 +48,7 @@ func generateID(crypto bool) string {
48 48
 		}
49 49
 		id := hex.EncodeToString(b)
50 50
 		// if we try to parse the truncated for as an int and we don't have
51
-		// an error then the value is all numberic and causes issues when
51
+		// an error then the value is all numeric and causes issues when
52 52
 		// used as a hostname. ref #3869
53 53
 		if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
54 54
 			continue
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"strings"
6 6
 )
7 7
 
8
-// StrSlice representes a string or an array of strings.
8
+// StrSlice represents a string or an array of strings.
9 9
 // We need to override the json decoder to accept both options.
10 10
 type StrSlice struct {
11 11
 	parts []string
... ...
@@ -91,7 +91,7 @@ func walkSymlinks(path string) (string, error) {
91 91
 			return "", errors.New("EvalSymlinks: too many links in " + originalPath)
92 92
 		}
93 93
 
94
-		// A path beginnging with `\\?\` represents the root, so automatically
94
+		// A path beginning with `\\?\` represents the root, so automatically
95 95
 		// skip that part and begin processing the next segment.
96 96
 		if strings.HasPrefix(path, longpath.Prefix) {
97 97
 			b.WriteString(longpath.Prefix)
... ...
@@ -36,7 +36,7 @@ type cgroupMemInfo struct {
36 36
 	// Whether soft limit is supported or not
37 37
 	MemoryReservation bool
38 38
 
39
-	// Whether OOM killer disalbe is supported or not
39
+	// Whether OOM killer disable is supported or not
40 40
 	OomKillDisable bool
41 41
 
42 42
 	// Whether memory swappiness is supported or not
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"unsafe"
6 6
 )
7 7
 
8
-// LUtimesNano is used to change access and modification time of the speficied path.
8
+// LUtimesNano is used to change access and modification time of the specified path.
9 9
 // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
10 10
 func LUtimesNano(path string, ts []syscall.Timespec) error {
11 11
 	// These are not currently available in syscall
... ...
@@ -4,7 +4,7 @@ import "sort"
4 4
 
5 5
 // FileInfoSumInterface provides an interface for accessing file checksum
6 6
 // information within a tar file. This info is accessed through interface
7
-// so the actual name and sum cannot be medled with.
7
+// so the actual name and sum cannot be melded with.
8 8
 type FileInfoSumInterface interface {
9 9
 	// File name
10 10
 	Name() string
... ...
@@ -146,7 +146,7 @@ var (
146 146
 	}
147 147
 )
148 148
 
149
-// DefaultTHash is default TarSum hashing algoritm - "sha256".
149
+// DefaultTHash is default TarSum hashing algorithm - "sha256".
150 150
 var DefaultTHash = NewTHash("sha256", sha256.New)
151 151
 
152 152
 type simpleTHash struct {
... ...
@@ -261,7 +261,7 @@ func (ts *tarSum) Read(buf []byte) (int, error) {
261 261
 		return 0, err
262 262
 	}
263 263
 
264
-	// Filling the tar writter
264
+	// Filling the tar writer
265 265
 	if _, err = ts.tarW.Write(buf2[:n]); err != nil {
266 266
 		return 0, err
267 267
 	}
... ...
@@ -51,7 +51,7 @@ func GetFdInfo(in interface{}) (uintptr, bool) {
51 51
 func GetWinsize(fd uintptr) (*Winsize, error) {
52 52
 	ws := &Winsize{}
53 53
 	_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
54
-	// Skipp errno = 0
54
+	// Skip errno = 0
55 55
 	if err == 0 {
56 56
 		return ws, nil
57 57
 	}
... ...
@@ -61,7 +61,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
61 61
 // SetWinsize tries to set the specified window size for the specified file descriptor.
62 62
 func SetWinsize(fd uintptr, ws *Winsize) error {
63 63
 	_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
64
-	// Skipp errno = 0
64
+	// Skip errno = 0
65 65
 	if err == 0 {
66 66
 		return nil
67 67
 	}
... ...
@@ -44,7 +44,7 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
44 44
 		// we want the number of colons in the T portion of the timestamp
45 45
 		tcolons := strings.Count(value, ":")
46 46
 		// if parseInLocation is off and we have a +/- zone offset (not Z) then
47
-		// there will be an extra colon in the input for the tz offset subract that
47
+		// there will be an extra colon in the input for the tz offset subtract that
48 48
 		// colon from the tcolons count
49 49
 		if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
50 50
 			tcolons--
... ...
@@ -29,8 +29,8 @@ func (vi *VersionInfo) isValid() bool {
29 29
 //
30 30
 // Each VersionInfo will be converted to a string in the format of
31 31
 // "product/version", where the "product" is get from the name field, while
32
-// version is get from the version field. Several pieces of verson information
33
-// will be concatinated and separated by space.
32
+// version is get from the version field. Several pieces of version information
33
+// will be concatenated and separated by space.
34 34
 //
35 35
 // Example:
36 36
 // AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"})
... ...
@@ -38,7 +38,7 @@ func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (stri
38 38
 
39 39
 	loginAgainstOfficialIndex := serverAddress == IndexServer
40 40
 
41
-	// to avoid sending the server address to the server it should be removed before being marshalled
41
+	// to avoid sending the server address to the server it should be removed before being marshaled
42 42
 	authCopy := *authConfig
43 43
 	authCopy.ServerAddress = ""
44 44
 
... ...
@@ -125,7 +125,7 @@ type Endpoint struct {
125 125
 	URLBuilder     *v2.URLBuilder
126 126
 }
127 127
 
128
-// Get the formated URL for the root of this registry Endpoint
128
+// Get the formatted URL for the root of this registry Endpoint
129 129
 func (e *Endpoint) String() string {
130 130
 	return fmt.Sprintf("%s/v%d/", e.URL, e.Version)
131 131
 }
... ...
@@ -100,8 +100,8 @@ func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
100 100
 	// Authorization should not be set on 302 redirect for untrusted locations.
101 101
 	// This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests.
102 102
 	// As the authorization logic is currently implemented in RoundTrip,
103
-	// a 302 redirect is detected by looking at the Referer header as go http package adds said header.
104
-	// This is safe as Docker doesn't set Referer in other scenarios.
103
+	// a 302 redirect is detected by looking at the Referrer header as go http package adds said header.
104
+	// This is safe as Docker doesn't set Referrer in other scenarios.
105 105
 	if orig.Header.Get("Referer") != "" && !trustedLocation(orig) {
106 106
 		return tr.RoundTripper.RoundTrip(orig)
107 107
 	}
... ...
@@ -26,7 +26,7 @@ type SearchResults struct {
26 26
 	Query string `json:"query"`
27 27
 	// NumResults indicates the number of results the query returned
28 28
 	NumResults int `json:"num_results"`
29
-	// Results is a slice containing the acutal results for the search
29
+	// Results is a slice containing the actual results for the search
30 30
 	Results []SearchResult `json:"results"`
31 31
 }
32 32
 
... ...
@@ -59,7 +59,7 @@ func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) {
59 59
 	// Perform platform-specific processing of Volumes and Binds.
60 60
 	if w.Config != nil && hc != nil {
61 61
 
62
-		// Initialise the volumes map if currently nil
62
+		// Initialize the volumes map if currently nil
63 63
 		if w.Config.Volumes == nil {
64 64
 			w.Config.Volumes = make(map[string]struct{})
65 65
 		}
... ...
@@ -46,7 +46,7 @@ func (w *ContainerConfigWrapper) getHostConfig() *HostConfig {
46 46
 	}
47 47
 
48 48
 	// Make sure NetworkMode has an acceptable value. We do this to ensure
49
-	// backwards compatible API behaviour.
49
+	// backwards compatible API behavior.
50 50
 	hc = SetDefaultNetModeIfBlank(hc)
51 51
 
52 52
 	return hc
... ...
@@ -229,7 +229,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
229 229
 		}
230 230
 	}
231 231
 
232
-	// Can't evalute options passed into --tmpfs until we actually mount
232
+	// Can't evaluate options passed into --tmpfs until we actually mount
233 233
 	tmpfs := make(map[string]string)
234 234
 	for _, t := range flTmpfs.GetAll() {
235 235
 		if arr := strings.SplitN(t, ":", 2); len(arr) > 1 {
... ...
@@ -343,7 +343,7 @@ func setupPlatformVolume(u []string, w []string) ([]string, string) {
343 343
 	return a, s
344 344
 }
345 345
 
346
-// Simple parse with MacAddress validatation
346
+// Simple parse with MacAddress validation
347 347
 func TestParseWithMacAddress(t *testing.T) {
348 348
 	invalidMacAddress := "--mac-address=invalidMacAddress"
349 349
 	validMacAddress := "--mac-address=92:d0:c6:0a:29:33"
... ...
@@ -18,7 +18,7 @@ import (
18 18
 )
19 19
 
20 20
 // VolumeDataPathName is the name of the directory where the volume data is stored.
21
-// It uses a very distintive name to avoid collisions migrating data between
21
+// It uses a very distinctive name to avoid collisions migrating data between
22 22
 // Docker versions.
23 23
 const (
24 24
 	VolumeDataPathName = "_data"
... ...
@@ -28,7 +28,7 @@ const (
28 28
 var (
29 29
 	// ErrNotFound is the typed error returned when the requested volume name can't be found
30 30
 	ErrNotFound = errors.New("volume not found")
31
-	// volumeNameRegex ensures the name asigned for the volume is valid.
31
+	// volumeNameRegex ensures the name assigned for the volume is valid.
32 32
 	// This name is used to create the bind directory, so we need to avoid characters that
33 33
 	// would make the path to escape the root directory.
34 34
 	volumeNameRegex = utils.RestrictedNamePattern
... ...
@@ -22,7 +22,7 @@ type OpErr struct {
22 22
 	Name string
23 23
 }
24 24
 
25
-// Error satifies the built-in error interface type.
25
+// Error satisfies the built-in error interface type.
26 26
 func (e *OpErr) Error() string {
27 27
 	if e == nil {
28 28
 		return "<nil>"
... ...
@@ -5,7 +5,7 @@ import "strings"
5 5
 // normaliseVolumeName is a platform specific function to normalise the name
6 6
 // of a volume. On Windows, as NTFS is case insensitive, under
7 7
 // c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous.
8
-// Hence we can't allow the volume "John" and "john" to be created as seperate
8
+// Hence we can't allow the volume "John" and "john" to be created as separate
9 9
 // volumes.
10 10
 func normaliseVolumeName(name string) string {
11 11
 	return strings.ToLower(name)
... ...
@@ -45,7 +45,7 @@ const (
45 45
 	// RXReservedNames are reserved names not possible on Windows
46 46
 	RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])`
47 47
 
48
-	// RXSource is the combined possiblities for a source
48
+	// RXSource is the combined possibilities for a source
49 49
 	RXSource = `((?P<source>((` + RXHostDir + `)|(` + RXName + `))):)?`
50 50
 
51 51
 	// Source. Can be either a host directory, a name, or omitted: