Browse code

Merge pull request #29609 from dnephin/add-compose-file-package

Replace the vendored aanand/compose-file with a local copy

Tõnis Tiigi authored on 2016/12/30 08:12:59
Showing 46 changed files
... ...
@@ -239,7 +239,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
239 239
 # Please edit hack/dockerfile/install-binaries.sh to update them.
240 240
 COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
241 241
 COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
242
-RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
242
+RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata
243 243
 
244 244
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
245 245
 ENTRYPOINT ["hack/dind"]
... ...
@@ -11,13 +11,13 @@ import (
11 11
 	"github.com/spf13/cobra"
12 12
 	"golang.org/x/net/context"
13 13
 
14
-	"github.com/aanand/compose-file/loader"
15
-	composetypes "github.com/aanand/compose-file/types"
16 14
 	"github.com/docker/docker/api/types"
17 15
 	"github.com/docker/docker/api/types/swarm"
18 16
 	"github.com/docker/docker/cli"
19 17
 	"github.com/docker/docker/cli/command"
20 18
 	"github.com/docker/docker/cli/compose/convert"
19
+	"github.com/docker/docker/cli/compose/loader"
20
+	composetypes "github.com/docker/docker/cli/compose/types"
21 21
 	dockerclient "github.com/docker/docker/client"
22 22
 )
23 23
 
... ...
@@ -1,9 +1,9 @@
1 1
 package convert
2 2
 
3 3
 import (
4
-	composetypes "github.com/aanand/compose-file/types"
5 4
 	"github.com/docker/docker/api/types"
6 5
 	networktypes "github.com/docker/docker/api/types/network"
6
+	composetypes "github.com/docker/docker/cli/compose/types"
7 7
 )
8 8
 
9 9
 const (
... ...
@@ -3,9 +3,9 @@ package convert
3 3
 import (
4 4
 	"testing"
5 5
 
6
-	composetypes "github.com/aanand/compose-file/types"
7 6
 	"github.com/docker/docker/api/types"
8 7
 	"github.com/docker/docker/api/types/network"
8
+	composetypes "github.com/docker/docker/cli/compose/types"
9 9
 	"github.com/docker/docker/pkg/testutil/assert"
10 10
 )
11 11
 
... ...
@@ -4,9 +4,9 @@ import (
4 4
 	"fmt"
5 5
 	"time"
6 6
 
7
-	composetypes "github.com/aanand/compose-file/types"
8 7
 	"github.com/docker/docker/api/types/container"
9 8
 	"github.com/docker/docker/api/types/swarm"
9
+	composetypes "github.com/docker/docker/cli/compose/types"
10 10
 	"github.com/docker/docker/opts"
11 11
 	runconfigopts "github.com/docker/docker/runconfig/opts"
12 12
 	"github.com/docker/go-connections/nat"
... ...
@@ -6,9 +6,9 @@ import (
6 6
 	"testing"
7 7
 	"time"
8 8
 
9
-	composetypes "github.com/aanand/compose-file/types"
10 9
 	"github.com/docker/docker/api/types/container"
11 10
 	"github.com/docker/docker/api/types/swarm"
11
+	composetypes "github.com/docker/docker/cli/compose/types"
12 12
 	"github.com/docker/docker/pkg/testutil/assert"
13 13
 )
14 14
 
... ...
@@ -4,8 +4,8 @@ import (
4 4
 	"fmt"
5 5
 	"strings"
6 6
 
7
-	composetypes "github.com/aanand/compose-file/types"
8 7
 	"github.com/docker/docker/api/types/mount"
8
+	composetypes "github.com/docker/docker/cli/compose/types"
9 9
 )
10 10
 
11 11
 type volumes map[string]composetypes.VolumeConfig
... ...
@@ -3,8 +3,8 @@ package convert
3 3
 import (
4 4
 	"testing"
5 5
 
6
-	composetypes "github.com/aanand/compose-file/types"
7 6
 	"github.com/docker/docker/api/types/mount"
7
+	composetypes "github.com/docker/docker/cli/compose/types"
8 8
 	"github.com/docker/docker/pkg/testutil/assert"
9 9
 )
10 10
 
11 11
new file mode 100644
... ...
@@ -0,0 +1,90 @@
0
+package interpolation
1
+
2
+import (
3
+	"fmt"
4
+
5
+	"github.com/docker/docker/cli/compose/template"
6
+	"github.com/docker/docker/cli/compose/types"
7
+)
8
+
9
+// Interpolate replaces variables in a string with the values from a mapping
10
+func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) {
11
+	out := types.Dict{}
12
+
13
+	for name, item := range config {
14
+		if item == nil {
15
+			out[name] = nil
16
+			continue
17
+		}
18
+		interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping)
19
+		if err != nil {
20
+			return nil, err
21
+		}
22
+		out[name] = interpolatedItem
23
+	}
24
+
25
+	return out, nil
26
+}
27
+
28
+func interpolateSectionItem(
29
+	name string,
30
+	item types.Dict,
31
+	section string,
32
+	mapping template.Mapping,
33
+) (types.Dict, error) {
34
+
35
+	out := types.Dict{}
36
+
37
+	for key, value := range item {
38
+		interpolatedValue, err := recursiveInterpolate(value, mapping)
39
+		if err != nil {
40
+			return nil, fmt.Errorf(
41
+				"Invalid interpolation format for %#v option in %s %#v: %#v",
42
+				key, section, name, err.Template,
43
+			)
44
+		}
45
+		out[key] = interpolatedValue
46
+	}
47
+
48
+	return out, nil
49
+
50
+}
51
+
52
+func recursiveInterpolate(
53
+	value interface{},
54
+	mapping template.Mapping,
55
+) (interface{}, *template.InvalidTemplateError) {
56
+
57
+	switch value := value.(type) {
58
+
59
+	case string:
60
+		return template.Substitute(value, mapping)
61
+
62
+	case types.Dict:
63
+		out := types.Dict{}
64
+		for key, elem := range value {
65
+			interpolatedElem, err := recursiveInterpolate(elem, mapping)
66
+			if err != nil {
67
+				return nil, err
68
+			}
69
+			out[key] = interpolatedElem
70
+		}
71
+		return out, nil
72
+
73
+	case []interface{}:
74
+		out := make([]interface{}, len(value))
75
+		for i, elem := range value {
76
+			interpolatedElem, err := recursiveInterpolate(elem, mapping)
77
+			if err != nil {
78
+				return nil, err
79
+			}
80
+			out[i] = interpolatedElem
81
+		}
82
+		return out, nil
83
+
84
+	default:
85
+		return value, nil
86
+
87
+	}
88
+
89
+}
0 90
new file mode 100644
... ...
@@ -0,0 +1,59 @@
0
+package interpolation
1
+
2
+import (
3
+	"testing"
4
+
5
+	"github.com/stretchr/testify/assert"
6
+
7
+	"github.com/docker/docker/cli/compose/types"
8
+)
9
+
10
+var defaults = map[string]string{
11
+	"USER": "jenny",
12
+	"FOO":  "bar",
13
+}
14
+
15
+func defaultMapping(name string) (string, bool) {
16
+	val, ok := defaults[name]
17
+	return val, ok
18
+}
19
+
20
+func TestInterpolate(t *testing.T) {
21
+	services := types.Dict{
22
+		"servicea": types.Dict{
23
+			"image":   "example:${USER}",
24
+			"volumes": []interface{}{"$FOO:/target"},
25
+			"logging": types.Dict{
26
+				"driver": "${FOO}",
27
+				"options": types.Dict{
28
+					"user": "$USER",
29
+				},
30
+			},
31
+		},
32
+	}
33
+	expected := types.Dict{
34
+		"servicea": types.Dict{
35
+			"image":   "example:jenny",
36
+			"volumes": []interface{}{"bar:/target"},
37
+			"logging": types.Dict{
38
+				"driver": "bar",
39
+				"options": types.Dict{
40
+					"user": "jenny",
41
+				},
42
+			},
43
+		},
44
+	}
45
+	result, err := Interpolate(services, "service", defaultMapping)
46
+	assert.NoError(t, err)
47
+	assert.Equal(t, expected, result)
48
+}
49
+
50
+func TestInvalidInterpolation(t *testing.T) {
51
+	services := types.Dict{
52
+		"servicea": types.Dict{
53
+			"image": "${",
54
+		},
55
+	}
56
+	_, err := Interpolate(services, "service", defaultMapping)
57
+	assert.EqualError(t, err, `Invalid interpolation format for "image" option in service "servicea": "${"`)
58
+}
0 59
new file mode 100644
... ...
@@ -0,0 +1,8 @@
0
+# passed through
1
+FOO=1
2
+
3
+# overridden in example2.env
4
+BAR=1
5
+
6
+# overridden in full-example.yml
7
+BAZ=1
0 8
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+BAR=2
0 1
new file mode 100644
... ...
@@ -0,0 +1,287 @@
0
+version: "3"
1
+
2
+services:
3
+  foo:
4
+    cap_add:
5
+      - ALL
6
+
7
+    cap_drop:
8
+      - NET_ADMIN
9
+      - SYS_ADMIN
10
+
11
+    cgroup_parent: m-executor-abcd
12
+
13
+    # String or list
14
+    command: bundle exec thin -p 3000
15
+    # command: ["bundle", "exec", "thin", "-p", "3000"]
16
+
17
+    container_name: my-web-container
18
+
19
+    depends_on:
20
+      - db
21
+      - redis
22
+
23
+    deploy:
24
+      mode: replicated
25
+      replicas: 6
26
+      labels: [FOO=BAR]
27
+      update_config:
28
+        parallelism: 3
29
+        delay: 10s
30
+        failure_action: continue
31
+        monitor: 60s
32
+        max_failure_ratio: 0.3
33
+      resources:
34
+        limits:
35
+          cpus: '0.001'
36
+          memory: 50M
37
+        reservations:
38
+          cpus: '0.0001'
39
+          memory: 20M
40
+      restart_policy:
41
+        condition: on_failure
42
+        delay: 5s
43
+        max_attempts: 3
44
+        window: 120s
45
+      placement:
46
+        constraints: [node=foo]
47
+
48
+    devices:
49
+      - "/dev/ttyUSB0:/dev/ttyUSB0"
50
+
51
+    # String or list
52
+    # dns: 8.8.8.8
53
+    dns:
54
+      - 8.8.8.8
55
+      - 9.9.9.9
56
+
57
+    # String or list
58
+    # dns_search: example.com
59
+    dns_search:
60
+      - dc1.example.com
61
+      - dc2.example.com
62
+
63
+    domainname: foo.com
64
+
65
+    # String or list
66
+    # entrypoint: /code/entrypoint.sh -p 3000
67
+    entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
68
+
69
+    # String or list
70
+    # env_file: .env
71
+    env_file:
72
+      - ./example1.env
73
+      - ./example2.env
74
+
75
+    # Mapping or list
76
+    # Mapping values can be strings, numbers or null
77
+    # Booleans are not allowed - must be quoted
78
+    environment:
79
+      RACK_ENV: development
80
+      SHOW: 'true'
81
+      SESSION_SECRET:
82
+      BAZ: 3
83
+    # environment:
84
+    #   - RACK_ENV=development
85
+    #   - SHOW=true
86
+    #   - SESSION_SECRET
87
+
88
+    # Items can be strings or numbers
89
+    expose:
90
+     - "3000"
91
+     - 8000
92
+
93
+    external_links:
94
+      - redis_1
95
+      - project_db_1:mysql
96
+      - project_db_1:postgresql
97
+
98
+    # Mapping or list
99
+    # Mapping values must be strings
100
+    # extra_hosts:
101
+    #   somehost: "162.242.195.82"
102
+    #   otherhost: "50.31.209.229"
103
+    extra_hosts:
104
+      - "somehost:162.242.195.82"
105
+      - "otherhost:50.31.209.229"
106
+
107
+    hostname: foo
108
+
109
+    healthcheck:
110
+      test: echo "hello world"
111
+      interval: 10s
112
+      timeout: 1s
113
+      retries: 5
114
+
115
+    # Any valid image reference - repo, tag, id, sha
116
+    image: redis
117
+    # image: ubuntu:14.04
118
+    # image: tutum/influxdb
119
+    # image: example-registry.com:4000/postgresql
120
+    # image: a4bc65fd
121
+    # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
122
+
123
+    ipc: host
124
+
125
+    # Mapping or list
126
+    # Mapping values can be strings, numbers or null
127
+    labels:
128
+      com.example.description: "Accounting webapp"
129
+      com.example.number: 42
130
+      com.example.empty-label:
131
+    # labels:
132
+    #   - "com.example.description=Accounting webapp"
133
+    #   - "com.example.number=42"
134
+    #   - "com.example.empty-label"
135
+
136
+    links:
137
+     - db
138
+     - db:database
139
+     - redis
140
+
141
+    logging:
142
+      driver: syslog
143
+      options:
144
+        syslog-address: "tcp://192.168.0.42:123"
145
+
146
+    mac_address: 02:42:ac:11:65:43
147
+
148
+    # network_mode: "bridge"
149
+    # network_mode: "host"
150
+    # network_mode: "none"
151
+    # Use the network mode of an arbitrary container from another service
152
+    # network_mode: "service:db"
153
+    # Use the network mode of another container, specified by name or id
154
+    # network_mode: "container:some-container"
155
+    network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
156
+
157
+    networks:
158
+      some-network:
159
+        aliases:
160
+         - alias1
161
+         - alias3
162
+      other-network:
163
+        ipv4_address: 172.16.238.10
164
+        ipv6_address: 2001:3984:3989::10
165
+      other-other-network:
166
+
167
+    pid: "host"
168
+
169
+    ports:
170
+      - 3000
171
+      - "3000-3005"
172
+      - "8000:8000"
173
+      - "9090-9091:8080-8081"
174
+      - "49100:22"
175
+      - "127.0.0.1:8001:8001"
176
+      - "127.0.0.1:5000-5010:5000-5010"
177
+
178
+    privileged: true
179
+
180
+    read_only: true
181
+
182
+    restart: always
183
+
184
+    security_opt:
185
+      - label=level:s0:c100,c200
186
+      - label=type:svirt_apache_t
187
+
188
+    stdin_open: true
189
+
190
+    stop_grace_period: 20s
191
+
192
+    stop_signal: SIGUSR1
193
+
194
+    # String or list
195
+    # tmpfs: /run
196
+    tmpfs:
197
+      - /run
198
+      - /tmp
199
+
200
+    tty: true
201
+
202
+    ulimits:
203
+      # Single number or mapping with soft + hard limits
204
+      nproc: 65535
205
+      nofile:
206
+        soft: 20000
207
+        hard: 40000
208
+
209
+    user: someone
210
+
211
+    volumes:
212
+      # Just specify a path and let the Engine create a volume
213
+      - /var/lib/mysql
214
+      # Specify an absolute path mapping
215
+      - /opt/data:/var/lib/mysql
216
+      # Path on the host, relative to the Compose file
217
+      - .:/code
218
+      - ./static:/var/www/html
219
+      # User-relative path
220
+      - ~/configs:/etc/configs/:ro
221
+      # Named volume
222
+      - datavolume:/var/lib/mysql
223
+
224
+    working_dir: /code
225
+
226
+networks:
227
+  # Entries can be null, which specifies simply that a network
228
+  # called "{project name}_some-network" should be created and
229
+  # use the default driver
230
+  some-network:
231
+
232
+  other-network:
233
+    driver: overlay
234
+
235
+    driver_opts:
236
+      # Values can be strings or numbers
237
+      foo: "bar"
238
+      baz: 1
239
+
240
+    ipam:
241
+      driver: overlay
242
+      # driver_opts:
243
+      #   # Values can be strings or numbers
244
+      #   com.docker.network.enable_ipv6: "true"
245
+      #   com.docker.network.numeric_value: 1
246
+      config:
247
+      - subnet: 172.16.238.0/24
248
+        # gateway: 172.16.238.1
249
+      - subnet: 2001:3984:3989::/64
250
+        # gateway: 2001:3984:3989::1
251
+
252
+  external-network:
253
+    # Specifies that a pre-existing network called "external-network"
254
+    # can be referred to within this file as "external-network"
255
+    external: true
256
+
257
+  other-external-network:
258
+    # Specifies that a pre-existing network called "my-cool-network"
259
+    # can be referred to within this file as "other-external-network"
260
+    external:
261
+      name: my-cool-network
262
+
263
+volumes:
264
+  # Entries can be null, which specifies simply that a volume
265
+  # called "{project name}_some-volume" should be created and
266
+  # use the default driver
267
+  some-volume:
268
+
269
+  other-volume:
270
+    driver: flocker
271
+
272
+    driver_opts:
273
+      # Values can be strings or numbers
274
+      foo: "bar"
275
+      baz: 1
276
+
277
+  external-volume:
278
+    # Specifies that a pre-existing volume called "external-volume"
279
+    # can be referred to within this file as "external-volume"
280
+    external: true
281
+
282
+  other-external-volume:
283
+    # Specifies that a pre-existing volume called "my-cool-volume"
284
+    # can be referred to within this file as "other-external-volume"
285
+    external:
286
+      name: my-cool-volume
0 287
new file mode 100644
... ...
@@ -0,0 +1,611 @@
0
+package loader
1
+
2
+import (
3
+	"fmt"
4
+	"os"
5
+	"path"
6
+	"reflect"
7
+	"regexp"
8
+	"sort"
9
+	"strings"
10
+
11
+	"github.com/docker/docker/cli/compose/interpolation"
12
+	"github.com/docker/docker/cli/compose/schema"
13
+	"github.com/docker/docker/cli/compose/types"
14
+	"github.com/docker/docker/runconfig/opts"
15
+	units "github.com/docker/go-units"
16
+	shellwords "github.com/mattn/go-shellwords"
17
+	"github.com/mitchellh/mapstructure"
18
+	yaml "gopkg.in/yaml.v2"
19
+)
20
+
21
+var (
22
+	fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+")
23
+)
24
+
25
+// ParseYAML reads the bytes from a file, parses the bytes into a mapping
26
+// structure, and returns it.
27
+func ParseYAML(source []byte) (types.Dict, error) {
28
+	var cfg interface{}
29
+	if err := yaml.Unmarshal(source, &cfg); err != nil {
30
+		return nil, err
31
+	}
32
+	cfgMap, ok := cfg.(map[interface{}]interface{})
33
+	if !ok {
34
+		return nil, fmt.Errorf("Top-level object must be a mapping")
35
+	}
36
+	converted, err := convertToStringKeysRecursive(cfgMap, "")
37
+	if err != nil {
38
+		return nil, err
39
+	}
40
+	return converted.(types.Dict), nil
41
+}
42
+
43
+// Load reads a ConfigDetails and returns a fully loaded configuration
44
+func Load(configDetails types.ConfigDetails) (*types.Config, error) {
45
+	if len(configDetails.ConfigFiles) < 1 {
46
+		return nil, fmt.Errorf("No files specified")
47
+	}
48
+	if len(configDetails.ConfigFiles) > 1 {
49
+		return nil, fmt.Errorf("Multiple files are not yet supported")
50
+	}
51
+
52
+	configDict := getConfigDict(configDetails)
53
+
54
+	if services, ok := configDict["services"]; ok {
55
+		if servicesDict, ok := services.(types.Dict); ok {
56
+			forbidden := getProperties(servicesDict, types.ForbiddenProperties)
57
+
58
+			if len(forbidden) > 0 {
59
+				return nil, &ForbiddenPropertiesError{Properties: forbidden}
60
+			}
61
+		}
62
+	}
63
+
64
+	if err := schema.Validate(configDict); err != nil {
65
+		return nil, err
66
+	}
67
+
68
+	cfg := types.Config{}
69
+	version := configDict["version"].(string)
70
+	if version != "3" && version != "3.0" {
71
+		return nil, fmt.Errorf(`Unsupported Compose file version: %#v. The only version supported is "3" (or "3.0")`, version)
72
+	}
73
+
74
+	if services, ok := configDict["services"]; ok {
75
+		servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv)
76
+		if err != nil {
77
+			return nil, err
78
+		}
79
+
80
+		servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir)
81
+		if err != nil {
82
+			return nil, err
83
+		}
84
+
85
+		cfg.Services = servicesList
86
+	}
87
+
88
+	if networks, ok := configDict["networks"]; ok {
89
+		networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv)
90
+		if err != nil {
91
+			return nil, err
92
+		}
93
+
94
+		networksMapping, err := loadNetworks(networksConfig)
95
+		if err != nil {
96
+			return nil, err
97
+		}
98
+
99
+		cfg.Networks = networksMapping
100
+	}
101
+
102
+	if volumes, ok := configDict["volumes"]; ok {
103
+		volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv)
104
+		if err != nil {
105
+			return nil, err
106
+		}
107
+
108
+		volumesMapping, err := loadVolumes(volumesConfig)
109
+		if err != nil {
110
+			return nil, err
111
+		}
112
+
113
+		cfg.Volumes = volumesMapping
114
+	}
115
+
116
+	return &cfg, nil
117
+}
118
+
119
+// GetUnsupportedProperties returns the list of any unsupported properties that are
120
+// used in the Compose files.
121
+func GetUnsupportedProperties(configDetails types.ConfigDetails) []string {
122
+	unsupported := map[string]bool{}
123
+
124
+	for _, service := range getServices(getConfigDict(configDetails)) {
125
+		serviceDict := service.(types.Dict)
126
+		for _, property := range types.UnsupportedProperties {
127
+			if _, isSet := serviceDict[property]; isSet {
128
+				unsupported[property] = true
129
+			}
130
+		}
131
+	}
132
+
133
+	return sortedKeys(unsupported)
134
+}
135
+
136
+func sortedKeys(set map[string]bool) []string {
137
+	var keys []string
138
+	for key := range set {
139
+		keys = append(keys, key)
140
+	}
141
+	sort.Strings(keys)
142
+	return keys
143
+}
144
+
145
+// GetDeprecatedProperties returns the list of any deprecated properties that
146
+// are used in the compose files.
147
+func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string {
148
+	return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties)
149
+}
150
+
151
+func getProperties(services types.Dict, propertyMap map[string]string) map[string]string {
152
+	output := map[string]string{}
153
+
154
+	for _, service := range services {
155
+		if serviceDict, ok := service.(types.Dict); ok {
156
+			for property, description := range propertyMap {
157
+				if _, isSet := serviceDict[property]; isSet {
158
+					output[property] = description
159
+				}
160
+			}
161
+		}
162
+	}
163
+
164
+	return output
165
+}
166
+
167
+// ForbiddenPropertiesError is returned when there are properties in the Compose
168
+// file that are forbidden.
169
+type ForbiddenPropertiesError struct {
170
+	Properties map[string]string
171
+}
172
+
173
+func (e *ForbiddenPropertiesError) Error() string {
174
+	return "Configuration contains forbidden properties"
175
+}
176
+
177
+// TODO: resolve multiple files into a single config
178
+func getConfigDict(configDetails types.ConfigDetails) types.Dict {
179
+	return configDetails.ConfigFiles[0].Config
180
+}
181
+
182
+func getServices(configDict types.Dict) types.Dict {
183
+	if services, ok := configDict["services"]; ok {
184
+		if servicesDict, ok := services.(types.Dict); ok {
185
+			return servicesDict
186
+		}
187
+	}
188
+
189
+	return types.Dict{}
190
+}
191
+
192
+func transform(source map[string]interface{}, target interface{}) error {
193
+	data := mapstructure.Metadata{}
194
+	config := &mapstructure.DecoderConfig{
195
+		DecodeHook: mapstructure.ComposeDecodeHookFunc(
196
+			transformHook,
197
+			mapstructure.StringToTimeDurationHookFunc()),
198
+		Result:   target,
199
+		Metadata: &data,
200
+	}
201
+	decoder, err := mapstructure.NewDecoder(config)
202
+	if err != nil {
203
+		return err
204
+	}
205
+	err = decoder.Decode(source)
206
+	// TODO: log unused keys
207
+	return err
208
+}
209
+
210
+func transformHook(
211
+	source reflect.Type,
212
+	target reflect.Type,
213
+	data interface{},
214
+) (interface{}, error) {
215
+	switch target {
216
+	case reflect.TypeOf(types.External{}):
217
+		return transformExternal(source, target, data)
218
+	case reflect.TypeOf(make(map[string]string, 0)):
219
+		return transformMapStringString(source, target, data)
220
+	case reflect.TypeOf(types.UlimitsConfig{}):
221
+		return transformUlimits(source, target, data)
222
+	case reflect.TypeOf(types.UnitBytes(0)):
223
+		return loadSize(data)
224
+	}
225
+	switch target.Kind() {
226
+	case reflect.Struct:
227
+		return transformStruct(source, target, data)
228
+	}
229
+	return data, nil
230
+}
231
+
232
+// keys needs to be converted to strings for jsonschema
233
+// TODO: don't use types.Dict
234
+func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
235
+	if mapping, ok := value.(map[interface{}]interface{}); ok {
236
+		dict := make(types.Dict)
237
+		for key, entry := range mapping {
238
+			str, ok := key.(string)
239
+			if !ok {
240
+				var location string
241
+				if keyPrefix == "" {
242
+					location = "at top level"
243
+				} else {
244
+					location = fmt.Sprintf("in %s", keyPrefix)
245
+				}
246
+				return nil, fmt.Errorf("Non-string key %s: %#v", location, key)
247
+			}
248
+			var newKeyPrefix string
249
+			if keyPrefix == "" {
250
+				newKeyPrefix = str
251
+			} else {
252
+				newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
253
+			}
254
+			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
255
+			if err != nil {
256
+				return nil, err
257
+			}
258
+			dict[str] = convertedEntry
259
+		}
260
+		return dict, nil
261
+	}
262
+	if list, ok := value.([]interface{}); ok {
263
+		var convertedList []interface{}
264
+		for index, entry := range list {
265
+			newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
266
+			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
267
+			if err != nil {
268
+				return nil, err
269
+			}
270
+			convertedList = append(convertedList, convertedEntry)
271
+		}
272
+		return convertedList, nil
273
+	}
274
+	return value, nil
275
+}
276
+
277
+func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) {
278
+	var services []types.ServiceConfig
279
+
280
+	for name, serviceDef := range servicesDict {
281
+		serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir)
282
+		if err != nil {
283
+			return nil, err
284
+		}
285
+		services = append(services, *serviceConfig)
286
+	}
287
+
288
+	return services, nil
289
+}
290
+
291
+func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) {
292
+	serviceConfig := &types.ServiceConfig{}
293
+	if err := transform(serviceDict, serviceConfig); err != nil {
294
+		return nil, err
295
+	}
296
+	serviceConfig.Name = name
297
+
298
+	if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil {
299
+		return nil, err
300
+	}
301
+
302
+	if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil {
303
+		return nil, err
304
+	}
305
+
306
+	return serviceConfig, nil
307
+}
308
+
309
+func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error {
310
+	environment := make(map[string]string)
311
+
312
+	if envFileVal, ok := serviceDict["env_file"]; ok {
313
+		envFiles := loadStringOrListOfStrings(envFileVal)
314
+
315
+		var envVars []string
316
+
317
+		for _, file := range envFiles {
318
+			filePath := path.Join(workingDir, file)
319
+			fileVars, err := opts.ParseEnvFile(filePath)
320
+			if err != nil {
321
+				return err
322
+			}
323
+			envVars = append(envVars, fileVars...)
324
+		}
325
+
326
+		for k, v := range opts.ConvertKVStringsToMap(envVars) {
327
+			environment[k] = v
328
+		}
329
+	}
330
+
331
+	for k, v := range serviceConfig.Environment {
332
+		environment[k] = v
333
+	}
334
+
335
+	serviceConfig.Environment = environment
336
+
337
+	return nil
338
+}
339
+
340
+func resolveVolumePaths(volumes []string, workingDir string) error {
341
+	for i, mapping := range volumes {
342
+		parts := strings.SplitN(mapping, ":", 2)
343
+		if len(parts) == 1 {
344
+			continue
345
+		}
346
+
347
+		if strings.HasPrefix(parts[0], ".") {
348
+			parts[0] = path.Join(workingDir, parts[0])
349
+		}
350
+		parts[0] = expandUser(parts[0])
351
+
352
+		volumes[i] = strings.Join(parts, ":")
353
+	}
354
+
355
+	return nil
356
+}
357
+
358
+// TODO: make this more robust
359
+func expandUser(path string) string {
360
+	if strings.HasPrefix(path, "~") {
361
+		return strings.Replace(path, "~", os.Getenv("HOME"), 1)
362
+	}
363
+	return path
364
+}
365
+
366
+func transformUlimits(
367
+	source reflect.Type,
368
+	target reflect.Type,
369
+	data interface{},
370
+) (interface{}, error) {
371
+	switch value := data.(type) {
372
+	case int:
373
+		return types.UlimitsConfig{Single: value}, nil
374
+	case types.Dict:
375
+		ulimit := types.UlimitsConfig{}
376
+		ulimit.Soft = value["soft"].(int)
377
+		ulimit.Hard = value["hard"].(int)
378
+		return ulimit, nil
379
+	default:
380
+		return data, fmt.Errorf("invalid type %T for ulimits", value)
381
+	}
382
+}
383
+
384
+func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) {
385
+	networks := make(map[string]types.NetworkConfig)
386
+	err := transform(source, &networks)
387
+	if err != nil {
388
+		return networks, err
389
+	}
390
+	for name, network := range networks {
391
+		if network.External.External && network.External.Name == "" {
392
+			network.External.Name = name
393
+			networks[name] = network
394
+		}
395
+	}
396
+	return networks, nil
397
+}
398
+
399
+func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) {
400
+	volumes := make(map[string]types.VolumeConfig)
401
+	err := transform(source, &volumes)
402
+	if err != nil {
403
+		return volumes, err
404
+	}
405
+	for name, volume := range volumes {
406
+		if volume.External.External && volume.External.Name == "" {
407
+			volume.External.Name = name
408
+			volumes[name] = volume
409
+		}
410
+	}
411
+	return volumes, nil
412
+}
413
+
414
+func transformStruct(
415
+	source reflect.Type,
416
+	target reflect.Type,
417
+	data interface{},
418
+) (interface{}, error) {
419
+	structValue, ok := data.(map[string]interface{})
420
+	if !ok {
421
+		// FIXME: this is necessary because of convertToStringKeysRecursive
422
+		structValue, ok = data.(types.Dict)
423
+		if !ok {
424
+			panic(fmt.Sprintf(
425
+				"transformStruct called with non-map type: %T, %s", data, data))
426
+		}
427
+	}
428
+
429
+	var err error
430
+	for i := 0; i < target.NumField(); i++ {
431
+		field := target.Field(i)
432
+		fieldTag := field.Tag.Get("compose")
433
+
434
+		yamlName := toYAMLName(field.Name)
435
+		value, ok := structValue[yamlName]
436
+		if !ok {
437
+			continue
438
+		}
439
+
440
+		structValue[yamlName], err = convertField(
441
+			fieldTag, reflect.TypeOf(value), field.Type, value)
442
+		if err != nil {
443
+			return nil, fmt.Errorf("field %s: %s", yamlName, err.Error())
444
+		}
445
+	}
446
+	return structValue, nil
447
+}
448
+
449
+func transformMapStringString(
450
+	source reflect.Type,
451
+	target reflect.Type,
452
+	data interface{},
453
+) (interface{}, error) {
454
+	switch value := data.(type) {
455
+	case map[string]interface{}:
456
+		return toMapStringString(value), nil
457
+	case types.Dict:
458
+		return toMapStringString(value), nil
459
+	case map[string]string:
460
+		return value, nil
461
+	default:
462
+		return data, fmt.Errorf("invalid type %T for map[string]string", value)
463
+	}
464
+}
465
+
466
+func convertField(
467
+	fieldTag string,
468
+	source reflect.Type,
469
+	target reflect.Type,
470
+	data interface{},
471
+) (interface{}, error) {
472
+	switch fieldTag {
473
+	case "":
474
+		return data, nil
475
+	case "healthcheck":
476
+		return loadHealthcheck(data)
477
+	case "list_or_dict_equals":
478
+		return loadMappingOrList(data, "="), nil
479
+	case "list_or_dict_colon":
480
+		return loadMappingOrList(data, ":"), nil
481
+	case "list_or_struct_map":
482
+		return loadListOrStructMap(data, target)
483
+	case "string_or_list":
484
+		return loadStringOrListOfStrings(data), nil
485
+	case "list_of_strings_or_numbers":
486
+		return loadListOfStringsOrNumbers(data), nil
487
+	case "shell_command":
488
+		return loadShellCommand(data)
489
+	case "size":
490
+		return loadSize(data)
491
+	case "-":
492
+		return nil, nil
493
+	}
494
+	return data, nil
495
+}
496
+
497
+func transformExternal(
498
+	source reflect.Type,
499
+	target reflect.Type,
500
+	data interface{},
501
+) (interface{}, error) {
502
+	switch value := data.(type) {
503
+	case bool:
504
+		return map[string]interface{}{"external": value}, nil
505
+	case types.Dict:
506
+		return map[string]interface{}{"external": true, "name": value["name"]}, nil
507
+	case map[string]interface{}:
508
+		return map[string]interface{}{"external": true, "name": value["name"]}, nil
509
+	default:
510
+		return data, fmt.Errorf("invalid type %T for external", value)
511
+	}
512
+}
513
+
514
+func toYAMLName(name string) string {
515
+	nameParts := fieldNameRegexp.FindAllString(name, -1)
516
+	for i, p := range nameParts {
517
+		nameParts[i] = strings.ToLower(p)
518
+	}
519
+	return strings.Join(nameParts, "_")
520
+}
521
+
522
+func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) {
523
+	if list, ok := value.([]interface{}); ok {
524
+		mapValue := map[interface{}]interface{}{}
525
+		for _, name := range list {
526
+			mapValue[name] = nil
527
+		}
528
+		return mapValue, nil
529
+	}
530
+
531
+	return value, nil
532
+}
533
+
534
+func loadListOfStringsOrNumbers(value interface{}) []string {
535
+	list := value.([]interface{})
536
+	result := make([]string, len(list))
537
+	for i, item := range list {
538
+		result[i] = fmt.Sprint(item)
539
+	}
540
+	return result
541
+}
542
+
543
+func loadStringOrListOfStrings(value interface{}) []string {
544
+	if list, ok := value.([]interface{}); ok {
545
+		result := make([]string, len(list))
546
+		for i, item := range list {
547
+			result[i] = fmt.Sprint(item)
548
+		}
549
+		return result
550
+	}
551
+	return []string{value.(string)}
552
+}
553
+
554
+func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string {
555
+	if mapping, ok := mappingOrList.(types.Dict); ok {
556
+		return toMapStringString(mapping)
557
+	}
558
+	if list, ok := mappingOrList.([]interface{}); ok {
559
+		result := make(map[string]string)
560
+		for _, value := range list {
561
+			parts := strings.SplitN(value.(string), sep, 2)
562
+			if len(parts) == 1 {
563
+				result[parts[0]] = ""
564
+			} else {
565
+				result[parts[0]] = parts[1]
566
+			}
567
+		}
568
+		return result
569
+	}
570
+	panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList))
571
+}
572
+
573
+func loadShellCommand(value interface{}) (interface{}, error) {
574
+	if str, ok := value.(string); ok {
575
+		return shellwords.Parse(str)
576
+	}
577
+	return value, nil
578
+}
579
+
580
+func loadHealthcheck(value interface{}) (interface{}, error) {
581
+	if str, ok := value.(string); ok {
582
+		return append([]string{"CMD-SHELL"}, str), nil
583
+	}
584
+	return value, nil
585
+}
586
+
587
+func loadSize(value interface{}) (int64, error) {
588
+	switch value := value.(type) {
589
+	case int:
590
+		return int64(value), nil
591
+	case string:
592
+		return units.RAMInBytes(value)
593
+	}
594
+	panic(fmt.Errorf("invalid type for size %T", value))
595
+}
596
+
597
+func toMapStringString(value map[string]interface{}) map[string]string {
598
+	output := make(map[string]string)
599
+	for key, value := range value {
600
+		output[key] = toString(value)
601
+	}
602
+	return output
603
+}
604
+
605
+func toString(value interface{}) string {
606
+	if value == nil {
607
+		return ""
608
+	}
609
+	return fmt.Sprint(value)
610
+}
0 611
new file mode 100644
... ...
@@ -0,0 +1,782 @@
0
+package loader
1
+
2
+import (
3
+	"fmt"
4
+	"io/ioutil"
5
+	"os"
6
+	"sort"
7
+	"testing"
8
+	"time"
9
+
10
+	"github.com/docker/docker/cli/compose/types"
11
+	"github.com/stretchr/testify/assert"
12
+)
13
+
14
+func buildConfigDetails(source types.Dict) types.ConfigDetails {
15
+	workingDir, err := os.Getwd()
16
+	if err != nil {
17
+		panic(err)
18
+	}
19
+
20
+	return types.ConfigDetails{
21
+		WorkingDir: workingDir,
22
+		ConfigFiles: []types.ConfigFile{
23
+			{Filename: "filename.yml", Config: source},
24
+		},
25
+		Environment: nil,
26
+	}
27
+}
28
+
29
+var sampleYAML = `
30
+version: "3"
31
+services:
32
+  foo:
33
+    image: busybox
34
+    networks:
35
+      with_me:
36
+  bar:
37
+    image: busybox
38
+    environment:
39
+      - FOO=1
40
+    networks:
41
+      - with_ipam
42
+volumes:
43
+  hello:
44
+    driver: default
45
+    driver_opts:
46
+      beep: boop
47
+networks:
48
+  default:
49
+    driver: bridge
50
+    driver_opts:
51
+      beep: boop
52
+  with_ipam:
53
+    ipam:
54
+      driver: default
55
+      config:
56
+        - subnet: 172.28.0.0/16
57
+`
58
+
59
+var sampleDict = types.Dict{
60
+	"version": "3",
61
+	"services": types.Dict{
62
+		"foo": types.Dict{
63
+			"image":    "busybox",
64
+			"networks": types.Dict{"with_me": nil},
65
+		},
66
+		"bar": types.Dict{
67
+			"image":       "busybox",
68
+			"environment": []interface{}{"FOO=1"},
69
+			"networks":    []interface{}{"with_ipam"},
70
+		},
71
+	},
72
+	"volumes": types.Dict{
73
+		"hello": types.Dict{
74
+			"driver": "default",
75
+			"driver_opts": types.Dict{
76
+				"beep": "boop",
77
+			},
78
+		},
79
+	},
80
+	"networks": types.Dict{
81
+		"default": types.Dict{
82
+			"driver": "bridge",
83
+			"driver_opts": types.Dict{
84
+				"beep": "boop",
85
+			},
86
+		},
87
+		"with_ipam": types.Dict{
88
+			"ipam": types.Dict{
89
+				"driver": "default",
90
+				"config": []interface{}{
91
+					types.Dict{
92
+						"subnet": "172.28.0.0/16",
93
+					},
94
+				},
95
+			},
96
+		},
97
+	},
98
+}
99
+
100
+var sampleConfig = types.Config{
101
+	Services: []types.ServiceConfig{
102
+		{
103
+			Name:        "foo",
104
+			Image:       "busybox",
105
+			Environment: map[string]string{},
106
+			Networks: map[string]*types.ServiceNetworkConfig{
107
+				"with_me": nil,
108
+			},
109
+		},
110
+		{
111
+			Name:        "bar",
112
+			Image:       "busybox",
113
+			Environment: map[string]string{"FOO": "1"},
114
+			Networks: map[string]*types.ServiceNetworkConfig{
115
+				"with_ipam": nil,
116
+			},
117
+		},
118
+	},
119
+	Networks: map[string]types.NetworkConfig{
120
+		"default": {
121
+			Driver: "bridge",
122
+			DriverOpts: map[string]string{
123
+				"beep": "boop",
124
+			},
125
+		},
126
+		"with_ipam": {
127
+			Ipam: types.IPAMConfig{
128
+				Driver: "default",
129
+				Config: []*types.IPAMPool{
130
+					{
131
+						Subnet: "172.28.0.0/16",
132
+					},
133
+				},
134
+			},
135
+		},
136
+	},
137
+	Volumes: map[string]types.VolumeConfig{
138
+		"hello": {
139
+			Driver: "default",
140
+			DriverOpts: map[string]string{
141
+				"beep": "boop",
142
+			},
143
+		},
144
+	},
145
+}
146
+
147
+func TestParseYAML(t *testing.T) {
148
+	dict, err := ParseYAML([]byte(sampleYAML))
149
+	if !assert.NoError(t, err) {
150
+		return
151
+	}
152
+	assert.Equal(t, sampleDict, dict)
153
+}
154
+
155
+func TestLoad(t *testing.T) {
156
+	actual, err := Load(buildConfigDetails(sampleDict))
157
+	if !assert.NoError(t, err) {
158
+		return
159
+	}
160
+	assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services))
161
+	assert.Equal(t, sampleConfig.Networks, actual.Networks)
162
+	assert.Equal(t, sampleConfig.Volumes, actual.Volumes)
163
+}
164
+
165
+func TestParseAndLoad(t *testing.T) {
166
+	actual, err := loadYAML(sampleYAML)
167
+	if !assert.NoError(t, err) {
168
+		return
169
+	}
170
+	assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services))
171
+	assert.Equal(t, sampleConfig.Networks, actual.Networks)
172
+	assert.Equal(t, sampleConfig.Volumes, actual.Volumes)
173
+}
174
+
175
+func TestInvalidTopLevelObjectType(t *testing.T) {
176
+	_, err := loadYAML("1")
177
+	assert.Error(t, err)
178
+	assert.Contains(t, err.Error(), "Top-level object must be a mapping")
179
+
180
+	_, err = loadYAML("\"hello\"")
181
+	assert.Error(t, err)
182
+	assert.Contains(t, err.Error(), "Top-level object must be a mapping")
183
+
184
+	_, err = loadYAML("[\"hello\"]")
185
+	assert.Error(t, err)
186
+	assert.Contains(t, err.Error(), "Top-level object must be a mapping")
187
+}
188
+
189
+func TestNonStringKeys(t *testing.T) {
190
+	_, err := loadYAML(`
191
+version: "3"
192
+123:
193
+  foo:
194
+    image: busybox
195
+`)
196
+	assert.Error(t, err)
197
+	assert.Contains(t, err.Error(), "Non-string key at top level: 123")
198
+
199
+	_, err = loadYAML(`
200
+version: "3"
201
+services:
202
+  foo:
203
+    image: busybox
204
+  123:
205
+    image: busybox
206
+`)
207
+	assert.Error(t, err)
208
+	assert.Contains(t, err.Error(), "Non-string key in services: 123")
209
+
210
+	_, err = loadYAML(`
211
+version: "3"
212
+services:
213
+  foo:
214
+    image: busybox
215
+networks:
216
+  default:
217
+    ipam:
218
+      config:
219
+        - 123: oh dear
220
+`)
221
+	assert.Error(t, err)
222
+	assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123")
223
+
224
+	_, err = loadYAML(`
225
+version: "3"
226
+services:
227
+  dict-env:
228
+    image: busybox
229
+    environment:
230
+      1: FOO
231
+`)
232
+	assert.Error(t, err)
233
+	assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1")
234
+}
235
+
236
+func TestSupportedVersion(t *testing.T) {
237
+	_, err := loadYAML(`
238
+version: "3"
239
+services:
240
+  foo:
241
+    image: busybox
242
+`)
243
+	assert.NoError(t, err)
244
+
245
+	_, err = loadYAML(`
246
+version: "3.0"
247
+services:
248
+  foo:
249
+    image: busybox
250
+`)
251
+	assert.NoError(t, err)
252
+}
253
+
254
+func TestUnsupportedVersion(t *testing.T) {
255
+	_, err := loadYAML(`
256
+version: "2"
257
+services:
258
+  foo:
259
+    image: busybox
260
+`)
261
+	assert.Error(t, err)
262
+	assert.Contains(t, err.Error(), "version")
263
+
264
+	_, err = loadYAML(`
265
+version: "2.0"
266
+services:
267
+  foo:
268
+    image: busybox
269
+`)
270
+	assert.Error(t, err)
271
+	assert.Contains(t, err.Error(), "version")
272
+}
273
+
274
+func TestInvalidVersion(t *testing.T) {
275
+	_, err := loadYAML(`
276
+version: 3
277
+services:
278
+  foo:
279
+    image: busybox
280
+`)
281
+	assert.Error(t, err)
282
+	assert.Contains(t, err.Error(), "version must be a string")
283
+}
284
+
285
+func TestV1Unsupported(t *testing.T) {
286
+	_, err := loadYAML(`
287
+foo:
288
+  image: busybox
289
+`)
290
+	assert.Error(t, err)
291
+}
292
+
293
+func TestNonMappingObject(t *testing.T) {
294
+	_, err := loadYAML(`
295
+version: "3"
296
+services:
297
+  - foo:
298
+      image: busybox
299
+`)
300
+	assert.Error(t, err)
301
+	assert.Contains(t, err.Error(), "services must be a mapping")
302
+
303
+	_, err = loadYAML(`
304
+version: "3"
305
+services:
306
+  foo: busybox
307
+`)
308
+	assert.Error(t, err)
309
+	assert.Contains(t, err.Error(), "services.foo must be a mapping")
310
+
311
+	_, err = loadYAML(`
312
+version: "3"
313
+networks:
314
+  - default:
315
+      driver: bridge
316
+`)
317
+	assert.Error(t, err)
318
+	assert.Contains(t, err.Error(), "networks must be a mapping")
319
+
320
+	_, err = loadYAML(`
321
+version: "3"
322
+networks:
323
+  default: bridge
324
+`)
325
+	assert.Error(t, err)
326
+	assert.Contains(t, err.Error(), "networks.default must be a mapping")
327
+
328
+	_, err = loadYAML(`
329
+version: "3"
330
+volumes:
331
+  - data:
332
+      driver: local
333
+`)
334
+	assert.Error(t, err)
335
+	assert.Contains(t, err.Error(), "volumes must be a mapping")
336
+
337
+	_, err = loadYAML(`
338
+version: "3"
339
+volumes:
340
+  data: local
341
+`)
342
+	assert.Error(t, err)
343
+	assert.Contains(t, err.Error(), "volumes.data must be a mapping")
344
+}
345
+
346
+func TestNonStringImage(t *testing.T) {
347
+	_, err := loadYAML(`
348
+version: "3"
349
+services:
350
+  foo:
351
+    image: ["busybox", "latest"]
352
+`)
353
+	assert.Error(t, err)
354
+	assert.Contains(t, err.Error(), "services.foo.image must be a string")
355
+}
356
+
357
+func TestValidEnvironment(t *testing.T) {
358
+	config, err := loadYAML(`
359
+version: "3"
360
+services:
361
+  dict-env:
362
+    image: busybox
363
+    environment:
364
+      FOO: "1"
365
+      BAR: 2
366
+      BAZ: 2.5
367
+      QUUX:
368
+  list-env:
369
+    image: busybox
370
+    environment:
371
+      - FOO=1
372
+      - BAR=2
373
+      - BAZ=2.5
374
+      - QUUX=
375
+`)
376
+	assert.NoError(t, err)
377
+
378
+	expected := map[string]string{
379
+		"FOO":  "1",
380
+		"BAR":  "2",
381
+		"BAZ":  "2.5",
382
+		"QUUX": "",
383
+	}
384
+
385
+	assert.Equal(t, 2, len(config.Services))
386
+
387
+	for _, service := range config.Services {
388
+		assert.Equal(t, expected, service.Environment)
389
+	}
390
+}
391
+
392
+func TestInvalidEnvironmentValue(t *testing.T) {
393
+	_, err := loadYAML(`
394
+version: "3"
395
+services:
396
+  dict-env:
397
+    image: busybox
398
+    environment:
399
+      FOO: ["1"]
400
+`)
401
+	assert.Error(t, err)
402
+	assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null")
403
+}
404
+
405
+func TestInvalidEnvironmentObject(t *testing.T) {
406
+	_, err := loadYAML(`
407
+version: "3"
408
+services:
409
+  dict-env:
410
+    image: busybox
411
+    environment: "FOO=1"
412
+`)
413
+	assert.Error(t, err)
414
+	assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping")
415
+}
416
+
417
+func TestEnvironmentInterpolation(t *testing.T) {
418
+	config, err := loadYAML(`
419
+version: "3"
420
+services:
421
+  test:
422
+    image: busybox
423
+    labels:
424
+      - home1=$HOME
425
+      - home2=${HOME}
426
+      - nonexistent=$NONEXISTENT
427
+      - default=${NONEXISTENT-default}
428
+networks:
429
+  test:
430
+    driver: $HOME
431
+volumes:
432
+  test:
433
+    driver: $HOME
434
+`)
435
+
436
+	assert.NoError(t, err)
437
+
438
+	home := os.Getenv("HOME")
439
+
440
+	expectedLabels := map[string]string{
441
+		"home1":       home,
442
+		"home2":       home,
443
+		"nonexistent": "",
444
+		"default":     "default",
445
+	}
446
+
447
+	assert.Equal(t, expectedLabels, config.Services[0].Labels)
448
+	assert.Equal(t, home, config.Networks["test"].Driver)
449
+	assert.Equal(t, home, config.Volumes["test"].Driver)
450
+}
451
+
452
+func TestUnsupportedProperties(t *testing.T) {
453
+	dict, err := ParseYAML([]byte(`
454
+version: "3"
455
+services:
456
+  web:
457
+    image: web
458
+    build: ./web
459
+    links:
460
+      - bar
461
+  db:
462
+    image: db
463
+    build: ./db
464
+`))
465
+	assert.NoError(t, err)
466
+
467
+	configDetails := buildConfigDetails(dict)
468
+
469
+	_, err = Load(configDetails)
470
+	assert.NoError(t, err)
471
+
472
+	unsupported := GetUnsupportedProperties(configDetails)
473
+	assert.Equal(t, []string{"build", "links"}, unsupported)
474
+}
475
+
476
+func TestDeprecatedProperties(t *testing.T) {
477
+	dict, err := ParseYAML([]byte(`
478
+version: "3"
479
+services:
480
+  web:
481
+    image: web
482
+    container_name: web
483
+  db:
484
+    image: db
485
+    container_name: db
486
+    expose: ["5434"]
487
+`))
488
+	assert.NoError(t, err)
489
+
490
+	configDetails := buildConfigDetails(dict)
491
+
492
+	_, err = Load(configDetails)
493
+	assert.NoError(t, err)
494
+
495
+	deprecated := GetDeprecatedProperties(configDetails)
496
+	assert.Equal(t, 2, len(deprecated))
497
+	assert.Contains(t, deprecated, "container_name")
498
+	assert.Contains(t, deprecated, "expose")
499
+}
500
+
501
+func TestForbiddenProperties(t *testing.T) {
502
+	_, err := loadYAML(`
503
+version: "3"
504
+services:
505
+  foo:
506
+    image: busybox
507
+    volumes:
508
+      - /data
509
+    volume_driver: some-driver
510
+  bar:
511
+    extends:
512
+      service: foo
513
+`)
514
+
515
+	assert.Error(t, err)
516
+	assert.IsType(t, &ForbiddenPropertiesError{}, err)
517
+	fmt.Println(err)
518
+	forbidden := err.(*ForbiddenPropertiesError).Properties
519
+
520
+	assert.Equal(t, 2, len(forbidden))
521
+	assert.Contains(t, forbidden, "volume_driver")
522
+	assert.Contains(t, forbidden, "extends")
523
+}
524
+
525
+func durationPtr(value time.Duration) *time.Duration {
526
+	return &value
527
+}
528
+
529
+func int64Ptr(value int64) *int64 {
530
+	return &value
531
+}
532
+
533
+func uint64Ptr(value uint64) *uint64 {
534
+	return &value
535
+}
536
+
537
+func TestFullExample(t *testing.T) {
538
+	bytes, err := ioutil.ReadFile("full-example.yml")
539
+	assert.NoError(t, err)
540
+
541
+	config, err := loadYAML(string(bytes))
542
+	if !assert.NoError(t, err) {
543
+		return
544
+	}
545
+
546
+	workingDir, err := os.Getwd()
547
+	assert.NoError(t, err)
548
+
549
+	homeDir := os.Getenv("HOME")
550
+	stopGracePeriod := time.Duration(20 * time.Second)
551
+
552
+	expectedServiceConfig := types.ServiceConfig{
553
+		Name: "foo",
554
+
555
+		CapAdd:        []string{"ALL"},
556
+		CapDrop:       []string{"NET_ADMIN", "SYS_ADMIN"},
557
+		CgroupParent:  "m-executor-abcd",
558
+		Command:       []string{"bundle", "exec", "thin", "-p", "3000"},
559
+		ContainerName: "my-web-container",
560
+		DependsOn:     []string{"db", "redis"},
561
+		Deploy: types.DeployConfig{
562
+			Mode:     "replicated",
563
+			Replicas: uint64Ptr(6),
564
+			Labels:   map[string]string{"FOO": "BAR"},
565
+			UpdateConfig: &types.UpdateConfig{
566
+				Parallelism:     uint64Ptr(3),
567
+				Delay:           time.Duration(10 * time.Second),
568
+				FailureAction:   "continue",
569
+				Monitor:         time.Duration(60 * time.Second),
570
+				MaxFailureRatio: 0.3,
571
+			},
572
+			Resources: types.Resources{
573
+				Limits: &types.Resource{
574
+					NanoCPUs:    "0.001",
575
+					MemoryBytes: 50 * 1024 * 1024,
576
+				},
577
+				Reservations: &types.Resource{
578
+					NanoCPUs:    "0.0001",
579
+					MemoryBytes: 20 * 1024 * 1024,
580
+				},
581
+			},
582
+			RestartPolicy: &types.RestartPolicy{
583
+				Condition:   "on_failure",
584
+				Delay:       durationPtr(5 * time.Second),
585
+				MaxAttempts: uint64Ptr(3),
586
+				Window:      durationPtr(2 * time.Minute),
587
+			},
588
+			Placement: types.Placement{
589
+				Constraints: []string{"node=foo"},
590
+			},
591
+		},
592
+		Devices:    []string{"/dev/ttyUSB0:/dev/ttyUSB0"},
593
+		DNS:        []string{"8.8.8.8", "9.9.9.9"},
594
+		DNSSearch:  []string{"dc1.example.com", "dc2.example.com"},
595
+		DomainName: "foo.com",
596
+		Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"},
597
+		Environment: map[string]string{
598
+			"RACK_ENV":       "development",
599
+			"SHOW":           "true",
600
+			"SESSION_SECRET": "",
601
+			"FOO":            "1",
602
+			"BAR":            "2",
603
+			"BAZ":            "3",
604
+		},
605
+		Expose: []string{"3000", "8000"},
606
+		ExternalLinks: []string{
607
+			"redis_1",
608
+			"project_db_1:mysql",
609
+			"project_db_1:postgresql",
610
+		},
611
+		ExtraHosts: map[string]string{
612
+			"otherhost": "50.31.209.229",
613
+			"somehost":  "162.242.195.82",
614
+		},
615
+		HealthCheck: &types.HealthCheckConfig{
616
+			Test: []string{
617
+				"CMD-SHELL",
618
+				"echo \"hello world\"",
619
+			},
620
+			Interval: "10s",
621
+			Timeout:  "1s",
622
+			Retries:  uint64Ptr(5),
623
+		},
624
+		Hostname: "foo",
625
+		Image:    "redis",
626
+		Ipc:      "host",
627
+		Labels: map[string]string{
628
+			"com.example.description": "Accounting webapp",
629
+			"com.example.number":      "42",
630
+			"com.example.empty-label": "",
631
+		},
632
+		Links: []string{
633
+			"db",
634
+			"db:database",
635
+			"redis",
636
+		},
637
+		Logging: &types.LoggingConfig{
638
+			Driver: "syslog",
639
+			Options: map[string]string{
640
+				"syslog-address": "tcp://192.168.0.42:123",
641
+			},
642
+		},
643
+		MacAddress:  "02:42:ac:11:65:43",
644
+		NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b",
645
+		Networks: map[string]*types.ServiceNetworkConfig{
646
+			"some-network": {
647
+				Aliases:     []string{"alias1", "alias3"},
648
+				Ipv4Address: "",
649
+				Ipv6Address: "",
650
+			},
651
+			"other-network": {
652
+				Ipv4Address: "172.16.238.10",
653
+				Ipv6Address: "2001:3984:3989::10",
654
+			},
655
+			"other-other-network": nil,
656
+		},
657
+		Pid: "host",
658
+		Ports: []string{
659
+			"3000",
660
+			"3000-3005",
661
+			"8000:8000",
662
+			"9090-9091:8080-8081",
663
+			"49100:22",
664
+			"127.0.0.1:8001:8001",
665
+			"127.0.0.1:5000-5010:5000-5010",
666
+		},
667
+		Privileged: true,
668
+		ReadOnly:   true,
669
+		Restart:    "always",
670
+		SecurityOpt: []string{
671
+			"label=level:s0:c100,c200",
672
+			"label=type:svirt_apache_t",
673
+		},
674
+		StdinOpen:       true,
675
+		StopSignal:      "SIGUSR1",
676
+		StopGracePeriod: &stopGracePeriod,
677
+		Tmpfs:           []string{"/run", "/tmp"},
678
+		Tty:             true,
679
+		Ulimits: map[string]*types.UlimitsConfig{
680
+			"nproc": {
681
+				Single: 65535,
682
+			},
683
+			"nofile": {
684
+				Soft: 20000,
685
+				Hard: 40000,
686
+			},
687
+		},
688
+		User: "someone",
689
+		Volumes: []string{
690
+			"/var/lib/mysql",
691
+			"/opt/data:/var/lib/mysql",
692
+			fmt.Sprintf("%s:/code", workingDir),
693
+			fmt.Sprintf("%s/static:/var/www/html", workingDir),
694
+			fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir),
695
+			"datavolume:/var/lib/mysql",
696
+		},
697
+		WorkingDir: "/code",
698
+	}
699
+
700
+	assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services)
701
+
702
+	expectedNetworkConfig := map[string]types.NetworkConfig{
703
+		"some-network": {},
704
+
705
+		"other-network": {
706
+			Driver: "overlay",
707
+			DriverOpts: map[string]string{
708
+				"foo": "bar",
709
+				"baz": "1",
710
+			},
711
+			Ipam: types.IPAMConfig{
712
+				Driver: "overlay",
713
+				Config: []*types.IPAMPool{
714
+					{Subnet: "172.16.238.0/24"},
715
+					{Subnet: "2001:3984:3989::/64"},
716
+				},
717
+			},
718
+		},
719
+
720
+		"external-network": {
721
+			External: types.External{
722
+				Name:     "external-network",
723
+				External: true,
724
+			},
725
+		},
726
+
727
+		"other-external-network": {
728
+			External: types.External{
729
+				Name:     "my-cool-network",
730
+				External: true,
731
+			},
732
+		},
733
+	}
734
+
735
+	assert.Equal(t, expectedNetworkConfig, config.Networks)
736
+
737
+	expectedVolumeConfig := map[string]types.VolumeConfig{
738
+		"some-volume": {},
739
+		"other-volume": {
740
+			Driver: "flocker",
741
+			DriverOpts: map[string]string{
742
+				"foo": "bar",
743
+				"baz": "1",
744
+			},
745
+		},
746
+		"external-volume": {
747
+			External: types.External{
748
+				Name:     "external-volume",
749
+				External: true,
750
+			},
751
+		},
752
+		"other-external-volume": {
753
+			External: types.External{
754
+				Name:     "my-cool-volume",
755
+				External: true,
756
+			},
757
+		},
758
+	}
759
+
760
+	assert.Equal(t, expectedVolumeConfig, config.Volumes)
761
+}
762
+
763
+func loadYAML(yaml string) (*types.Config, error) {
764
+	dict, err := ParseYAML([]byte(yaml))
765
+	if err != nil {
766
+		return nil, err
767
+	}
768
+
769
+	return Load(buildConfigDetails(dict))
770
+}
771
+
772
+func serviceSort(services []types.ServiceConfig) []types.ServiceConfig {
773
+	sort.Sort(servicesByName(services))
774
+	return services
775
+}
776
+
777
+type servicesByName []types.ServiceConfig
778
+
779
+func (sbn servicesByName) Len() int           { return len(sbn) }
780
+func (sbn servicesByName) Swap(i, j int)      { sbn[i], sbn[j] = sbn[j], sbn[i] }
781
+func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name }
0 782
new file mode 100644
... ...
@@ -0,0 +1,237 @@
0
+// Code generated by go-bindata.
1
+// sources:
2
+// data/config_schema_v3.0.json
3
+// DO NOT EDIT!
4
+
5
+package schema
6
+
7
+import (
8
+	"bytes"
9
+	"compress/gzip"
10
+	"fmt"
11
+	"io"
12
+	"io/ioutil"
13
+	"os"
14
+	"path/filepath"
15
+	"strings"
16
+	"time"
17
+)
18
+
19
+func bindataRead(data []byte, name string) ([]byte, error) {
20
+	gz, err := gzip.NewReader(bytes.NewBuffer(data))
21
+	if err != nil {
22
+		return nil, fmt.Errorf("Read %q: %v", name, err)
23
+	}
24
+
25
+	var buf bytes.Buffer
26
+	_, err = io.Copy(&buf, gz)
27
+	clErr := gz.Close()
28
+
29
+	if err != nil {
30
+		return nil, fmt.Errorf("Read %q: %v", name, err)
31
+	}
32
+	if clErr != nil {
33
+		return nil, err
34
+	}
35
+
36
+	return buf.Bytes(), nil
37
+}
38
+
39
+type asset struct {
40
+	bytes []byte
41
+	info  os.FileInfo
42
+}
43
+
44
+type bindataFileInfo struct {
45
+	name    string
46
+	size    int64
47
+	mode    os.FileMode
48
+	modTime time.Time
49
+}
50
+
51
+func (fi bindataFileInfo) Name() string {
52
+	return fi.name
53
+}
54
+func (fi bindataFileInfo) Size() int64 {
55
+	return fi.size
56
+}
57
+func (fi bindataFileInfo) Mode() os.FileMode {
58
+	return fi.mode
59
+}
60
+func (fi bindataFileInfo) ModTime() time.Time {
61
+	return fi.modTime
62
+}
63
+func (fi bindataFileInfo) IsDir() bool {
64
+	return false
65
+}
66
+func (fi bindataFileInfo) Sys() interface{} {
67
+	return nil
68
+}
69
+
70
+var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4d\x8f\xdb\x36\x13\xbe\xfb\x57\x08\x4a\x6e\xf1\xee\x06\x78\x83\x17\x68\x6e\x3d\xf6\xd4\x9e\xbb\x50\x04\x5a\x1a\xdb\xcc\x52\x24\x33\xa4\x9c\x75\x02\xff\xf7\x82\xfa\xb2\x48\x93\xa2\x6c\x2b\x4d\x0e\xbd\x2c\xd6\xe2\xcc\x70\xbe\xf8\xcc\x70\xa4\xef\xab\x24\x49\xdf\xaa\x62\x0f\x15\x49\x3f\x26\xe9\x5e\x6b\xf9\xf1\xe9\xe9\xb3\x12\xfc\xa1\x7d\xfa\x28\x70\xf7\x54\x22\xd9\xea\x87\xf7\x1f\x9e\xda\x67\x6f\xd2\xb5\xe1\xa3\xa5\x61\x29\x04\xdf\xd2\x5d\xde\xae\xe4\x87\xff\x3d\xbe\x7f\x34\xec\x2d\x89\x3e\x4a\x30\x44\x62\xf3\x19\x0a\xdd\x3e\x43\xf8\x52\x53\x04\xc3\xfc\x9c\x1e\x00\x15\x15\x3c\xcd\xd6\x2b\xb3\x26\x51\x48\x40\x4d\x41\xa5\x1f\x13\xa3\x5c\x92\x0c\x24\xfd\x83\x91\x58\xa5\x91\xf2\x5d\xda\x3c\x3e\x35\x12\x92\x24\x55\x80\x07\x5a\x8c\x24\x0c\xaa\xbe\x79\x3a\xcb\x7f\x1a\xc8\xd6\xae\xd4\x91\xb2\xcd\x73\x49\xb4\x06\xe4\x7f\x5d\xea\xd6\x2c\x7f\x7a\x26\x0f\xdf\x7e\x7f\xf8\xfb\xfd\xc3\x6f\x8f\xf9\x43\xf6\xee\xad\xb5\x6c\xfc\x8b\xb0\x6d\xb7\x2f\x61\x4b\x39\xd5\x54\xf0\x61\xff\x74\xa0\x3c\x75\xff\x9d\x86\x8d\x49\x59\x36\xc4\x84\x59\x7b\x6f\x09\x53\x60\xdb\xcc\x41\x7f\x15\xf8\x12\xb3\x79\x20\xfb\x49\x36\x77\xfb\x7b\x6c\xb6\xcd\x39\x08\x56\x57\xd1\x08\xf6\x54\x3f\xc9\x98\x76\xfb\xfb\xe2\xb7\xea\x8d\x9e\xa4\x6d\x29\x46\x7b\x37\x0a\x5a\xd9\xee\x73\x95\x2f\xdb\xc2\xbe\x1a\x9c\x15\xf0\x52\x09\x92\x89\xa3\x79\x16\xf0\x47\x4b\x50\x01\xd7\xe9\xe0\x82\x24\x49\x37\x35\x65\xa5\xeb\x51\xc1\xe1\x4f\x23\xe2\x79\xf4\x30\x49\xbe\xbb\x07\x7b\x24\xa7\x59\xb7\x7e\x85\x03\x3e\xac\x07\x6c\x19\xd6\x0b\xc1\x35\xbc\xea\xc6\xa8\xe9\xad\x5b\x17\x88\xe2\x05\x70\x4b\x19\xcc\xe5\x20\xb8\x53\x13\x2e\x63\x54\xe9\x5c\x60\x5e\xd2\x42\xa7\x27\x87\xfd\x42\x5e\x3c\x9f\x06\xd6\xd1\xaf\x6c\xe5\x11\x98\x16\x44\xe6\xa4\x2c\x2d\x3b\x08\x22\x39\xa6\xeb\x24\xa5\x1a\x2a\xe5\x37\x31\x49\x6b\x4e\xbf\xd4\xf0\x47\x47\xa2\xb1\x06\x57\x6e\x89\x42\x2e\x2f\x78\x87\xa2\x96\xb9\x24\x68\x12\x6c\xda\xfd\x69\x21\xaa\x8a\xf0\xa5\xb2\xee\x1a\x3b\x66\x78\x5e\x70\x4d\x28\x07\xcc\x39\xa9\x62\x89\x64\x4e\x1d\xf0\x52\xe5\x6d\xfd\x9b\x4c\xa3\x6d\xde\xf2\x2b\x47\xc0\x50\x0c\x17\x8d\x47\xc9\xa7\x12\xbb\x15\x63\x52\xdb\xe8\x96\x3a\x8c\xb9\x02\x82\xc5\xfe\x46\x7e\x51\x11\xca\xe7\xf8\x0e\xb8\xc6\xa3\x14\xb4\xcd\x97\x5f\x2e\x11\x80\x1f\xf2\x01\x4b\xae\x76\x03\xf0\x03\x45\xc1\xab\xfe\x34\xcc\x01\x98\x01\xe4\x0d\xff\xab\x14\x0a\x5c\xc7\x38\x06\x8e\x97\x06\x53\x2d\x9f\xf4\x1c\xcf\xbd\xe1\xeb\x24\xe5\x75\xb5\x01\x34\x2d\x9d\x45\xb9\x15\x58\x11\xa3\x6c\xbf\xf7\x68\xd9\xf2\xb4\x27\xf3\xc6\x0e\x1c\xdb\x60\xca\x3a\x61\x39\xa3\xfc\x65\xf9\x14\x87\x57\x8d\x24\xdf\x0b\xa5\xe7\x63\xf8\x88\x7d\x0f\x84\xe9\x7d\xb1\x87\xe2\x65\x82\x7d\x4c\x65\x71\x0b\xa5\xe7\x24\x39\xad\xc8\x2e\x4e\x24\x8b\x18\x09\x23\x1b\x60\x37\xd9\xb9\xa8\xf3\x47\x62\xc5\x6e\x67\x48\x43\x19\x77\xd1\xb9\x74\xcb\xb1\x9a\x5f\x22\x3d\x00\xce\x2d\xe0\x42\x9e\x1b\x2e\x77\x31\xde\x80\x24\xf1\xee\xd3\x22\xfd\xf4\xd8\x36\x9f\x13\xa7\xaa\xf9\x8f\xb1\x34\x73\xdb\x85\xc4\xa9\xfb\xbe\x27\x8e\x85\xf3\x1a\x0a\x2b\x2a\x15\x29\x4c\xdf\x80\xa0\x02\x71\x3d\x93\x76\xcd\x7e\x5e\x89\x32\x94\xa0\x17\xc4\xae\x6f\x82\x48\x7d\x75\x21\x4c\x6e\xea\x1f\x67\x85\x2e\x7a\x81\x88\x58\x13\x52\x6f\xae\x9a\x67\x75\xe3\x29\xd6\xd0\x11\x46\x89\x82\xf8\x61\x0f\x3a\xd2\x92\x46\xe5\xe1\xc3\xcc\x9c\xf0\xf1\xfe\x7f\x92\x37\xc0\x1a\x94\x39\xbf\x47\x8e\x88\x3a\xab\xd2\x1c\x37\x9f\x22\x59\xe4\xb4\xfd\xe0\x16\x5e\xd2\x32\x8c\x15\x0d\x42\x8c\x0f\x98\x14\xa8\x2f\x4e\xd7\xbf\x53\xee\xdb\xad\xef\xae\xf6\x12\xe9\x81\x32\xd8\x81\x7d\x6b\xd9\x08\xc1\x80\x70\x0b\x7a\x10\x48\x99\x0b\xce\x8e\x33\x28\x95\x26\x18\xbd\x50\x28\x28\x6a\xa4\xfa\x98\x0b\xa9\x17\xef\x33\xd4\xbe\xca\x15\xfd\x06\x76\x34\xcf\x78\xdf\x09\xca\x2c\x1e\x5d\x52\x9e\x0b\x09\x3c\x6a\xa2\xd2\x42\xe6\x8a\xee\x38\x61\x51\x33\x0d\xe9\x0e\x49\x01\xb9\x04\xa4\xa2\xf4\x31\xac\xc7\xb1\x2d\x6b\x24\x26\x9f\x2d\x31\xba\x92\xdb\x1b\x6f\x07\x5a\xc7\x63\x56\x33\x5a\xd1\x70\x32\x7b\x50\x72\x06\x90\xb7\x20\xee\xc7\xee\x09\xdc\x3e\x6b\x4a\xb9\x86\x1d\xa0\x0f\xee\x26\x5a\x87\xe9\xce\x61\x46\xcb\xb0\x27\x68\x47\x69\x42\x8f\x86\x41\x89\xad\xf6\x33\xf8\x1a\x0a\xaf\x5e\xd6\x04\xb7\x91\xb7\xee\x14\xc9\xbc\xf4\x57\x61\xb2\xab\x46\x16\x84\xc5\x93\x17\x16\x6b\x15\xed\xee\xc6\xf3\xc5\x45\x4f\xb2\x69\x61\x4c\x66\x97\xd4\xaf\xc2\xca\x51\xf7\x8a\x09\xaf\x73\x9b\xe8\x05\xf8\x66\x7d\x63\x52\x77\xde\xf7\x3c\x24\x5c\x5f\x25\xce\x53\xd2\xc0\xe0\xcf\xe4\x07\x1e\x2c\xf0\xf0\xf9\x54\xd3\x0a\x44\xad\x23\x54\x08\x1a\xa9\xe3\xf9\x0e\xe9\x2c\x61\xa0\x7e\xcd\x4b\x7b\x49\x15\xd9\x38\xf3\xbf\x01\xa3\x6e\x0a\x6f\x72\x1e\xae\xf6\x97\xf9\xa9\xe0\x8e\x28\x17\x88\xed\x44\x6f\x3e\x0a\x99\x64\xb4\x20\x2a\x86\x32\x77\x5c\x21\x6b\x59\x12\x0d\x79\xfb\x2a\xe9\x2a\x5c\x9f\x00\x74\x49\x90\x30\x06\x8c\xaa\x6a\x0e\x40\xa6\x25\x30\x72\xbc\xa9\xe0\x35\xec\x5b\x42\x59\x8d\x90\x93\x42\x77\x6f\xab\x22\x99\x99\x56\x82\x53\x2d\xbc\x48\x31\x6f\xcb\x8a\xbc\xe6\xfd\xb6\x0d\x89\xf7\x58\x05\x1b\xaf\xb9\xb7\xbf\x51\x26\x28\x51\x63\x71\xe1\xec\x9b\x43\x74\x2e\xe4\x81\x8c\xe9\x77\xbc\x30\x1d\x41\x19\x50\x1a\x2e\xe7\x51\xfe\x68\xdd\xe8\x3a\xc1\x5c\x0a\x46\x8b\xe3\x52\x16\x16\x82\xb7\x4e\x9e\x93\x10\x77\x66\xa0\x49\x07\xd3\xe7\x54\x52\x47\x0f\x6b\xc3\xf0\x95\xf2\x52\x7c\xbd\x62\xc3\xe5\x52\x49\x32\x52\x80\x83\x77\xf7\x3a\x5a\x69\x24\x94\xeb\xab\xcb\xfa\xbd\x66\xdd\x51\xd5\x87\xfc\x8c\xa0\xfe\x40\x17\x7f\xd7\x19\x40\xfa\x42\xd6\xd1\x89\x4d\x05\x95\x40\x6f\x02\x2e\xf0\x6e\x3a\x66\x62\x4f\xb6\x40\x55\x9b\x35\xe2\xeb\xa8\xcc\x8d\x6e\xf1\xab\x44\x7c\x8c\x97\xc5\x01\x89\x4a\x52\x2d\x75\x3a\x66\x0f\x3d\x53\x6f\x0d\x4e\xa6\x87\x05\x49\x78\x60\x10\xd3\x3a\xae\x7b\x47\xa1\xea\x0d\x07\xff\x3d\xfd\xf2\x0a\xe1\x7b\x13\x3b\xff\x0e\x72\x0a\xdf\x38\xee\x03\xbd\xfe\x7d\x45\x20\xaa\xcf\x43\x27\xb9\x1e\x7c\x95\xcd\x0e\x71\xf0\x65\xc1\x72\xfa\x5f\xd9\xe0\xdd\x81\x19\xdd\xb7\x15\x11\xc8\xe8\xa8\xfe\x43\x8c\x5f\x26\xbf\x26\x8a\xe2\x8d\xb7\x83\x2b\x92\xc6\x19\x2b\x8d\x92\xe7\xf2\xea\x38\x15\xe7\xd9\x43\xf1\x8e\x23\xb3\xd5\x70\xc9\x3c\xdf\xad\xd9\x10\x3a\x35\x71\xe8\x49\x02\x43\x52\x67\xd3\xce\x79\xd3\x96\x2f\x98\xb6\x8f\xef\x26\x0a\xc5\xd4\xcb\xab\x1f\x84\xb0\x0b\x4c\x73\xfc\x31\x75\xba\xcb\xde\xbb\x97\x1f\x5f\x05\x90\x6a\xc4\x7f\xf1\x29\x96\xb1\x93\x1f\x2f\x46\x1b\xdf\xed\x31\x5b\xfb\x19\x55\x66\xf9\xc7\x21\x69\x5f\x05\x8f\x70\x22\x1b\x37\xdc\xa1\x30\x7a\x3f\xd0\x72\x87\x7c\xfd\x87\x52\xd9\xf4\x61\x5f\xf5\x7f\x4f\xab\xd3\xea\x9f\x00\x00\x00\xff\xff\xd1\xeb\xc9\xb9\x5c\x2a\x00\x00")
71
+
72
+func dataConfig_schema_v30JsonBytes() ([]byte, error) {
73
+	return bindataRead(
74
+		_dataConfig_schema_v30Json,
75
+		"data/config_schema_v3.0.json",
76
+	)
77
+}
78
+
79
+func dataConfig_schema_v30Json() (*asset, error) {
80
+	bytes, err := dataConfig_schema_v30JsonBytes()
81
+	if err != nil {
82
+		return nil, err
83
+	}
84
+
85
+	info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
86
+	a := &asset{bytes: bytes, info: info}
87
+	return a, nil
88
+}
89
+
90
+// Asset loads and returns the asset for the given name.
91
+// It returns an error if the asset could not be found or
92
+// could not be loaded.
93
+func Asset(name string) ([]byte, error) {
94
+	cannonicalName := strings.Replace(name, "\\", "/", -1)
95
+	if f, ok := _bindata[cannonicalName]; ok {
96
+		a, err := f()
97
+		if err != nil {
98
+			return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
99
+		}
100
+		return a.bytes, nil
101
+	}
102
+	return nil, fmt.Errorf("Asset %s not found", name)
103
+}
104
+
105
+// MustAsset is like Asset but panics when Asset would return an error.
106
+// It simplifies safe initialization of global variables.
107
+func MustAsset(name string) []byte {
108
+	a, err := Asset(name)
109
+	if err != nil {
110
+		panic("asset: Asset(" + name + "): " + err.Error())
111
+	}
112
+
113
+	return a
114
+}
115
+
116
+// AssetInfo loads and returns the asset info for the given name.
117
+// It returns an error if the asset could not be found or
118
+// could not be loaded.
119
+func AssetInfo(name string) (os.FileInfo, error) {
120
+	cannonicalName := strings.Replace(name, "\\", "/", -1)
121
+	if f, ok := _bindata[cannonicalName]; ok {
122
+		a, err := f()
123
+		if err != nil {
124
+			return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
125
+		}
126
+		return a.info, nil
127
+	}
128
+	return nil, fmt.Errorf("AssetInfo %s not found", name)
129
+}
130
+
131
+// AssetNames returns the names of the assets.
132
+func AssetNames() []string {
133
+	names := make([]string, 0, len(_bindata))
134
+	for name := range _bindata {
135
+		names = append(names, name)
136
+	}
137
+	return names
138
+}
139
+
140
+// _bindata is a table, holding each asset generator, mapped to its name.
141
+var _bindata = map[string]func() (*asset, error){
142
+	"data/config_schema_v3.0.json": dataConfig_schema_v30Json,
143
+}
144
+
145
+// AssetDir returns the file names below a certain
146
+// directory embedded in the file by go-bindata.
147
+// For example if you run go-bindata on data/... and data contains the
148
+// following hierarchy:
149
+//     data/
150
+//       foo.txt
151
+//       img/
152
+//         a.png
153
+//         b.png
154
+// then AssetDir("data") would return []string{"foo.txt", "img"}
155
+// AssetDir("data/img") would return []string{"a.png", "b.png"}
156
+// AssetDir("foo.txt") and AssetDir("notexist") would return an error
157
+// AssetDir("") will return []string{"data"}.
158
+func AssetDir(name string) ([]string, error) {
159
+	node := _bintree
160
+	if len(name) != 0 {
161
+		cannonicalName := strings.Replace(name, "\\", "/", -1)
162
+		pathList := strings.Split(cannonicalName, "/")
163
+		for _, p := range pathList {
164
+			node = node.Children[p]
165
+			if node == nil {
166
+				return nil, fmt.Errorf("Asset %s not found", name)
167
+			}
168
+		}
169
+	}
170
+	if node.Func != nil {
171
+		return nil, fmt.Errorf("Asset %s not found", name)
172
+	}
173
+	rv := make([]string, 0, len(node.Children))
174
+	for childName := range node.Children {
175
+		rv = append(rv, childName)
176
+	}
177
+	return rv, nil
178
+}
179
+
180
+type bintree struct {
181
+	Func     func() (*asset, error)
182
+	Children map[string]*bintree
183
+}
184
+var _bintree = &bintree{nil, map[string]*bintree{
185
+	"data": &bintree{nil, map[string]*bintree{
186
+		"config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}},
187
+	}},
188
+}}
189
+
190
+// RestoreAsset restores an asset under the given directory
191
+func RestoreAsset(dir, name string) error {
192
+	data, err := Asset(name)
193
+	if err != nil {
194
+		return err
195
+	}
196
+	info, err := AssetInfo(name)
197
+	if err != nil {
198
+		return err
199
+	}
200
+	err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
201
+	if err != nil {
202
+		return err
203
+	}
204
+	err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
205
+	if err != nil {
206
+		return err
207
+	}
208
+	err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
209
+	if err != nil {
210
+		return err
211
+	}
212
+	return nil
213
+}
214
+
215
+// RestoreAssets restores an asset under the given directory recursively
216
+func RestoreAssets(dir, name string) error {
217
+	children, err := AssetDir(name)
218
+	// File
219
+	if err != nil {
220
+		return RestoreAsset(dir, name)
221
+	}
222
+	// Dir
223
+	for _, child := range children {
224
+		err = RestoreAssets(dir, filepath.Join(name, child))
225
+		if err != nil {
226
+			return err
227
+		}
228
+	}
229
+	return nil
230
+}
231
+
232
+func _filePath(dir, name string) string {
233
+	cannonicalName := strings.Replace(name, "\\", "/", -1)
234
+	return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
235
+}
236
+
0 237
new file mode 100644
... ...
@@ -0,0 +1,379 @@
0
+{
1
+  "$schema": "http://json-schema.org/draft-04/schema#",
2
+  "id": "config_schema_v3.0.json",
3
+  "type": "object",
4
+  "required": ["version"],
5
+
6
+  "properties": {
7
+    "version": {
8
+      "type": "string"
9
+    },
10
+
11
+    "services": {
12
+      "id": "#/properties/services",
13
+      "type": "object",
14
+      "patternProperties": {
15
+        "^[a-zA-Z0-9._-]+$": {
16
+          "$ref": "#/definitions/service"
17
+        }
18
+      },
19
+      "additionalProperties": false
20
+    },
21
+
22
+    "networks": {
23
+      "id": "#/properties/networks",
24
+      "type": "object",
25
+      "patternProperties": {
26
+        "^[a-zA-Z0-9._-]+$": {
27
+          "$ref": "#/definitions/network"
28
+        }
29
+      }
30
+    },
31
+
32
+    "volumes": {
33
+      "id": "#/properties/volumes",
34
+      "type": "object",
35
+      "patternProperties": {
36
+        "^[a-zA-Z0-9._-]+$": {
37
+          "$ref": "#/definitions/volume"
38
+        }
39
+      },
40
+      "additionalProperties": false
41
+    }
42
+  },
43
+
44
+  "additionalProperties": false,
45
+
46
+  "definitions": {
47
+
48
+    "service": {
49
+      "id": "#/definitions/service",
50
+      "type": "object",
51
+
52
+      "properties": {
53
+        "deploy": {"$ref": "#/definitions/deployment"},
54
+        "build": {
55
+          "oneOf": [
56
+            {"type": "string"},
57
+            {
58
+              "type": "object",
59
+              "properties": {
60
+                "context": {"type": "string"},
61
+                "dockerfile": {"type": "string"},
62
+                "args": {"$ref": "#/definitions/list_or_dict"}
63
+              },
64
+              "additionalProperties": false
65
+            }
66
+          ]
67
+        },
68
+        "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
69
+        "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
70
+        "cgroup_parent": {"type": "string"},
71
+        "command": {
72
+          "oneOf": [
73
+            {"type": "string"},
74
+            {"type": "array", "items": {"type": "string"}}
75
+          ]
76
+        },
77
+        "container_name": {"type": "string"},
78
+        "depends_on": {"$ref": "#/definitions/list_of_strings"},
79
+        "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
80
+        "dns": {"$ref": "#/definitions/string_or_list"},
81
+        "dns_search": {"$ref": "#/definitions/string_or_list"},
82
+        "domainname": {"type": "string"},
83
+        "entrypoint": {
84
+          "oneOf": [
85
+            {"type": "string"},
86
+            {"type": "array", "items": {"type": "string"}}
87
+          ]
88
+        },
89
+        "env_file": {"$ref": "#/definitions/string_or_list"},
90
+        "environment": {"$ref": "#/definitions/list_or_dict"},
91
+
92
+        "expose": {
93
+          "type": "array",
94
+          "items": {
95
+            "type": ["string", "number"],
96
+            "format": "expose"
97
+          },
98
+          "uniqueItems": true
99
+        },
100
+
101
+        "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
102
+        "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
103
+        "healthcheck": {"$ref": "#/definitions/healthcheck"},
104
+        "hostname": {"type": "string"},
105
+        "image": {"type": "string"},
106
+        "ipc": {"type": "string"},
107
+        "labels": {"$ref": "#/definitions/list_or_dict"},
108
+        "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
109
+
110
+        "logging": {
111
+            "type": "object",
112
+
113
+            "properties": {
114
+                "driver": {"type": "string"},
115
+                "options": {
116
+                  "type": "object",
117
+                  "patternProperties": {
118
+                    "^.+$": {"type": ["string", "number", "null"]}
119
+                  }
120
+                }
121
+            },
122
+            "additionalProperties": false
123
+        },
124
+
125
+        "mac_address": {"type": "string"},
126
+        "network_mode": {"type": "string"},
127
+
128
+        "networks": {
129
+          "oneOf": [
130
+            {"$ref": "#/definitions/list_of_strings"},
131
+            {
132
+              "type": "object",
133
+              "patternProperties": {
134
+                "^[a-zA-Z0-9._-]+$": {
135
+                  "oneOf": [
136
+                    {
137
+                      "type": "object",
138
+                      "properties": {
139
+                        "aliases": {"$ref": "#/definitions/list_of_strings"},
140
+                        "ipv4_address": {"type": "string"},
141
+                        "ipv6_address": {"type": "string"}
142
+                      },
143
+                      "additionalProperties": false
144
+                    },
145
+                    {"type": "null"}
146
+                  ]
147
+                }
148
+              },
149
+              "additionalProperties": false
150
+            }
151
+          ]
152
+        },
153
+        "pid": {"type": ["string", "null"]},
154
+
155
+        "ports": {
156
+          "type": "array",
157
+          "items": {
158
+            "type": ["string", "number"],
159
+            "format": "ports"
160
+          },
161
+          "uniqueItems": true
162
+        },
163
+
164
+        "privileged": {"type": "boolean"},
165
+        "read_only": {"type": "boolean"},
166
+        "restart": {"type": "string"},
167
+        "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
168
+        "shm_size": {"type": ["number", "string"]},
169
+        "stdin_open": {"type": "boolean"},
170
+        "stop_signal": {"type": "string"},
171
+        "stop_grace_period": {"type": "string", "format": "duration"},
172
+        "tmpfs": {"$ref": "#/definitions/string_or_list"},
173
+        "tty": {"type": "boolean"},
174
+        "ulimits": {
175
+          "type": "object",
176
+          "patternProperties": {
177
+            "^[a-z]+$": {
178
+              "oneOf": [
179
+                {"type": "integer"},
180
+                {
181
+                  "type":"object",
182
+                  "properties": {
183
+                    "hard": {"type": "integer"},
184
+                    "soft": {"type": "integer"}
185
+                  },
186
+                  "required": ["soft", "hard"],
187
+                  "additionalProperties": false
188
+                }
189
+              ]
190
+            }
191
+          }
192
+        },
193
+        "user": {"type": "string"},
194
+        "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
195
+        "working_dir": {"type": "string"}
196
+      },
197
+      "additionalProperties": false
198
+    },
199
+
200
+    "healthcheck": {
201
+      "id": "#/definitions/healthcheck",
202
+      "type": ["object", "null"],
203
+      "properties": {
204
+        "interval": {"type":"string"},
205
+        "timeout": {"type":"string"},
206
+        "retries": {"type": "number"},
207
+        "test": {
208
+          "oneOf": [
209
+            {"type": "string"},
210
+            {"type": "array", "items": {"type": "string"}}
211
+          ]
212
+        },
213
+        "disable": {"type": "boolean"}
214
+      },
215
+      "additionalProperties": false
216
+    },
217
+    "deployment": {
218
+      "id": "#/definitions/deployment",
219
+      "type": ["object", "null"],
220
+      "properties": {
221
+        "mode": {"type": "string"},
222
+        "replicas": {"type": "integer"},
223
+        "labels": {"$ref": "#/definitions/list_or_dict"},
224
+        "update_config": {
225
+          "type": "object",
226
+          "properties": {
227
+            "parallelism": {"type": "integer"},
228
+            "delay": {"type": "string", "format": "duration"},
229
+            "failure_action": {"type": "string"},
230
+            "monitor": {"type": "string", "format": "duration"},
231
+            "max_failure_ratio": {"type": "number"}
232
+          },
233
+          "additionalProperties": false
234
+        },
235
+        "resources": {
236
+          "type": "object",
237
+          "properties": {
238
+            "limits": {"$ref": "#/definitions/resource"},
239
+            "reservations": {"$ref": "#/definitions/resource"}
240
+          }
241
+        },
242
+        "restart_policy": {
243
+          "type": "object",
244
+          "properties": {
245
+            "condition": {"type": "string"},
246
+            "delay": {"type": "string", "format": "duration"},
247
+            "max_attempts": {"type": "integer"},
248
+            "window": {"type": "string", "format": "duration"}
249
+          },
250
+          "additionalProperties": false
251
+        },
252
+        "placement": {
253
+          "type": "object",
254
+          "properties": {
255
+            "constraints": {"type": "array", "items": {"type": "string"}}
256
+          },
257
+          "additionalProperties": false
258
+        }
259
+      },
260
+      "additionalProperties": false
261
+    },
262
+
263
+    "resource": {
264
+      "id": "#/definitions/resource",
265
+      "type": "object",
266
+      "properties": {
267
+        "cpus": {"type": "string"},
268
+        "memory": {"type": "string"}
269
+      },
270
+      "additionalProperties": false
271
+    },
272
+
273
+    "network": {
274
+      "id": "#/definitions/network",
275
+      "type": ["object", "null"],
276
+      "properties": {
277
+        "driver": {"type": "string"},
278
+        "driver_opts": {
279
+          "type": "object",
280
+          "patternProperties": {
281
+            "^.+$": {"type": ["string", "number"]}
282
+          }
283
+        },
284
+        "ipam": {
285
+          "type": "object",
286
+          "properties": {
287
+            "driver": {"type": "string"},
288
+            "config": {
289
+              "type": "array",
290
+              "items": {
291
+                "type": "object",
292
+                "properties": {
293
+                  "subnet": {"type": "string"}
294
+                },
295
+                "additionalProperties": false
296
+              }
297
+            }
298
+          },
299
+          "additionalProperties": false
300
+        },
301
+        "external": {
302
+          "type": ["boolean", "object"],
303
+          "properties": {
304
+            "name": {"type": "string"}
305
+          },
306
+          "additionalProperties": false
307
+        },
308
+        "labels": {"$ref": "#/definitions/list_or_dict"}
309
+      },
310
+      "additionalProperties": false
311
+    },
312
+
313
+    "volume": {
314
+      "id": "#/definitions/volume",
315
+      "type": ["object", "null"],
316
+      "properties": {
317
+        "driver": {"type": "string"},
318
+        "driver_opts": {
319
+          "type": "object",
320
+          "patternProperties": {
321
+            "^.+$": {"type": ["string", "number"]}
322
+          }
323
+        },
324
+        "external": {
325
+          "type": ["boolean", "object"],
326
+          "properties": {
327
+            "name": {"type": "string"}
328
+          }
329
+        }
330
+      },
331
+      "labels": {"$ref": "#/definitions/list_or_dict"},
332
+      "additionalProperties": false
333
+    },
334
+
335
+    "string_or_list": {
336
+      "oneOf": [
337
+        {"type": "string"},
338
+        {"$ref": "#/definitions/list_of_strings"}
339
+      ]
340
+    },
341
+
342
+    "list_of_strings": {
343
+      "type": "array",
344
+      "items": {"type": "string"},
345
+      "uniqueItems": true
346
+    },
347
+
348
+    "list_or_dict": {
349
+      "oneOf": [
350
+        {
351
+          "type": "object",
352
+          "patternProperties": {
353
+            ".+": {
354
+              "type": ["string", "number", "null"]
355
+            }
356
+          },
357
+          "additionalProperties": false
358
+        },
359
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
360
+      ]
361
+    },
362
+
363
+    "constraints": {
364
+      "service": {
365
+        "id": "#/definitions/constraints/service",
366
+        "anyOf": [
367
+          {"required": ["build"]},
368
+          {"required": ["image"]}
369
+        ],
370
+        "properties": {
371
+          "build": {
372
+            "required": ["context"]
373
+          }
374
+        }
375
+      }
376
+    }
377
+  }
378
+}
0 379
new file mode 100644
... ...
@@ -0,0 +1,113 @@
0
+package schema
1
+
2
+//go:generate go-bindata -pkg schema -nometadata data
3
+
4
+import (
5
+	"fmt"
6
+	"strings"
7
+	"time"
8
+
9
+	"github.com/xeipuuv/gojsonschema"
10
+)
11
+
12
+type portsFormatChecker struct{}
13
+
14
+func (checker portsFormatChecker) IsFormat(input string) bool {
15
+	// TODO: implement this
16
+	return true
17
+}
18
+
19
+type durationFormatChecker struct{}
20
+
21
+func (checker durationFormatChecker) IsFormat(input string) bool {
22
+	_, err := time.ParseDuration(input)
23
+	return err == nil
24
+}
25
+
26
+func init() {
27
+	gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
28
+	gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
29
+	gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
30
+}
31
+
32
+// Validate uses the jsonschema to validate the configuration
33
+func Validate(config map[string]interface{}) error {
34
+	schemaData, err := Asset("data/config_schema_v3.0.json")
35
+	if err != nil {
36
+		return err
37
+	}
38
+
39
+	schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
40
+	dataLoader := gojsonschema.NewGoLoader(config)
41
+
42
+	result, err := gojsonschema.Validate(schemaLoader, dataLoader)
43
+	if err != nil {
44
+		return err
45
+	}
46
+
47
+	if !result.Valid() {
48
+		return toError(result)
49
+	}
50
+
51
+	return nil
52
+}
53
+
54
+func toError(result *gojsonschema.Result) error {
55
+	err := getMostSpecificError(result.Errors())
56
+	description := getDescription(err)
57
+	return fmt.Errorf("%s %s", err.Field(), description)
58
+}
59
+
60
+func getDescription(err gojsonschema.ResultError) string {
61
+	if err.Type() == "invalid_type" {
62
+		if expectedType, ok := err.Details()["expected"].(string); ok {
63
+			return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
64
+		}
65
+	}
66
+
67
+	return err.Description()
68
+}
69
+
70
+func humanReadableType(definition string) string {
71
+	if definition[0:1] == "[" {
72
+		allTypes := strings.Split(definition[1:len(definition)-1], ",")
73
+		for i, t := range allTypes {
74
+			allTypes[i] = humanReadableType(t)
75
+		}
76
+		return fmt.Sprintf(
77
+			"%s or %s",
78
+			strings.Join(allTypes[0:len(allTypes)-1], ", "),
79
+			allTypes[len(allTypes)-1],
80
+		)
81
+	}
82
+	if definition == "object" {
83
+		return "mapping"
84
+	}
85
+	if definition == "array" {
86
+		return "list"
87
+	}
88
+	return definition
89
+}
90
+
91
+func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError {
92
+	var mostSpecificError gojsonschema.ResultError
93
+
94
+	for _, err := range errors {
95
+		if mostSpecificError == nil {
96
+			mostSpecificError = err
97
+		} else if specificity(err) > specificity(mostSpecificError) {
98
+			mostSpecificError = err
99
+		} else if specificity(err) == specificity(mostSpecificError) {
100
+			// Invalid type errors win in a tie-breaker for most specific field name
101
+			if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" {
102
+				mostSpecificError = err
103
+			}
104
+		}
105
+	}
106
+
107
+	return mostSpecificError
108
+}
109
+
110
+func specificity(err gojsonschema.ResultError) int {
111
+	return len(strings.Split(err.Field(), "."))
112
+}
0 113
new file mode 100644
... ...
@@ -0,0 +1,35 @@
0
+package schema
1
+
2
+import (
3
+	"testing"
4
+
5
+	"github.com/stretchr/testify/assert"
6
+)
7
+
8
+type dict map[string]interface{}
9
+
10
+func TestValid(t *testing.T) {
11
+	config := dict{
12
+		"version": "2.1",
13
+		"services": dict{
14
+			"foo": dict{
15
+				"image": "busybox",
16
+			},
17
+		},
18
+	}
19
+
20
+	assert.NoError(t, Validate(config))
21
+}
22
+
23
+func TestUndefinedTopLevelOption(t *testing.T) {
24
+	config := dict{
25
+		"version": "2.1",
26
+		"helicopters": dict{
27
+			"foo": dict{
28
+				"image": "busybox",
29
+			},
30
+		},
31
+	}
32
+
33
+	assert.Error(t, Validate(config))
34
+}
0 35
new file mode 100644
... ...
@@ -0,0 +1,100 @@
0
+package template
1
+
2
+import (
3
+	"fmt"
4
+	"regexp"
5
+	"strings"
6
+)
7
+
8
+var delimiter = "\\$"
9
+var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?"
10
+
11
+var patternString = fmt.Sprintf(
12
+	"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
13
+	delimiter, delimiter, substitution, substitution,
14
+)
15
+
16
+var pattern = regexp.MustCompile(patternString)
17
+
18
+// InvalidTemplateError is returned when a variable template is not in a valid
19
+// format
20
+type InvalidTemplateError struct {
21
+	Template string
22
+}
23
+
24
+func (e InvalidTemplateError) Error() string {
25
+	return fmt.Sprintf("Invalid template: %#v", e.Template)
26
+}
27
+
28
+// Mapping is a user-supplied function which maps from variable names to values.
29
+// Returns the value as a string and a bool indicating whether
30
+// the value is present, to distinguish between an empty string
31
+// and the absence of a value.
32
+type Mapping func(string) (string, bool)
33
+
34
+// Substitute variables in the string with their values
35
+func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) {
36
+	result = pattern.ReplaceAllStringFunc(template, func(substring string) string {
37
+		matches := pattern.FindStringSubmatch(substring)
38
+		groups := make(map[string]string)
39
+		for i, name := range pattern.SubexpNames() {
40
+			if i != 0 {
41
+				groups[name] = matches[i]
42
+			}
43
+		}
44
+
45
+		substitution := groups["named"]
46
+		if substitution == "" {
47
+			substitution = groups["braced"]
48
+		}
49
+		if substitution != "" {
50
+			// Soft default (fall back if unset or empty)
51
+			if strings.Contains(substitution, ":-") {
52
+				name, defaultValue := partition(substitution, ":-")
53
+				value, ok := mapping(name)
54
+				if !ok || value == "" {
55
+					return defaultValue
56
+				}
57
+				return value
58
+			}
59
+
60
+			// Hard default (fall back if-and-only-if empty)
61
+			if strings.Contains(substitution, "-") {
62
+				name, defaultValue := partition(substitution, "-")
63
+				value, ok := mapping(name)
64
+				if !ok {
65
+					return defaultValue
66
+				}
67
+				return value
68
+			}
69
+
70
+			// No default (fall back to empty string)
71
+			value, ok := mapping(substitution)
72
+			if !ok {
73
+				return ""
74
+			}
75
+			return value
76
+		}
77
+
78
+		if escaped := groups["escaped"]; escaped != "" {
79
+			return escaped
80
+		}
81
+
82
+		err = &InvalidTemplateError{Template: template}
83
+		return ""
84
+	})
85
+
86
+	return result, err
87
+}
88
+
89
+// Split the string at the first occurrence of sep, and return the part before the separator,
90
+// and the part after the separator.
91
+//
92
+// If the separator is not found, return the string itself, followed by an empty string.
93
+func partition(s, sep string) (string, string) {
94
+	if strings.Contains(s, sep) {
95
+		parts := strings.SplitN(s, sep, 2)
96
+		return parts[0], parts[1]
97
+	}
98
+	return s, ""
99
+}
0 100
new file mode 100644
... ...
@@ -0,0 +1,83 @@
0
+package template
1
+
2
+import (
3
+	"testing"
4
+
5
+	"github.com/stretchr/testify/assert"
6
+)
7
+
8
+var defaults = map[string]string{
9
+	"FOO": "first",
10
+	"BAR": "",
11
+}
12
+
13
+func defaultMapping(name string) (string, bool) {
14
+	val, ok := defaults[name]
15
+	return val, ok
16
+}
17
+
18
+func TestEscaped(t *testing.T) {
19
+	result, err := Substitute("$${foo}", defaultMapping)
20
+	assert.NoError(t, err)
21
+	assert.Equal(t, "${foo}", result)
22
+}
23
+
24
+func TestInvalid(t *testing.T) {
25
+	invalidTemplates := []string{
26
+		"${",
27
+		"$}",
28
+		"${}",
29
+		"${ }",
30
+		"${ foo}",
31
+		"${foo }",
32
+		"${foo!}",
33
+	}
34
+
35
+	for _, template := range invalidTemplates {
36
+		_, err := Substitute(template, defaultMapping)
37
+		assert.Error(t, err)
38
+		assert.IsType(t, &InvalidTemplateError{}, err)
39
+	}
40
+}
41
+
42
+func TestNoValueNoDefault(t *testing.T) {
43
+	for _, template := range []string{"This ${missing} var", "This ${BAR} var"} {
44
+		result, err := Substitute(template, defaultMapping)
45
+		assert.NoError(t, err)
46
+		assert.Equal(t, "This  var", result)
47
+	}
48
+}
49
+
50
+func TestValueNoDefault(t *testing.T) {
51
+	for _, template := range []string{"This $FOO var", "This ${FOO} var"} {
52
+		result, err := Substitute(template, defaultMapping)
53
+		assert.NoError(t, err)
54
+		assert.Equal(t, "This first var", result)
55
+	}
56
+}
57
+
58
+func TestNoValueWithDefault(t *testing.T) {
59
+	for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} {
60
+		result, err := Substitute(template, defaultMapping)
61
+		assert.NoError(t, err)
62
+		assert.Equal(t, "ok def", result)
63
+	}
64
+}
65
+
66
+func TestEmptyValueWithSoftDefault(t *testing.T) {
67
+	result, err := Substitute("ok ${BAR:-def}", defaultMapping)
68
+	assert.NoError(t, err)
69
+	assert.Equal(t, "ok def", result)
70
+}
71
+
72
+func TestEmptyValueWithHardDefault(t *testing.T) {
73
+	result, err := Substitute("ok ${BAR-def}", defaultMapping)
74
+	assert.NoError(t, err)
75
+	assert.Equal(t, "ok ", result)
76
+}
77
+
78
+func TestNonAlphanumericDefault(t *testing.T) {
79
+	result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping)
80
+	assert.NoError(t, err)
81
+	assert.Equal(t, "ok /non:-alphanumeric", result)
82
+}
0 83
new file mode 100644
... ...
@@ -0,0 +1,232 @@
0
+package types
1
+
2
+import (
3
+	"time"
4
+)
5
+
6
+// UnsupportedProperties not yet supported by this implementation of the compose file
7
+var UnsupportedProperties = []string{
8
+	"build",
9
+	"cap_add",
10
+	"cap_drop",
11
+	"cgroup_parent",
12
+	"devices",
13
+	"dns",
14
+	"dns_search",
15
+	"domainname",
16
+	"external_links",
17
+	"ipc",
18
+	"links",
19
+	"mac_address",
20
+	"network_mode",
21
+	"privileged",
22
+	"read_only",
23
+	"restart",
24
+	"security_opt",
25
+	"shm_size",
26
+	"stop_signal",
27
+	"tmpfs",
28
+}
29
+
30
+// DeprecatedProperties that were removed from the v3 format, but their
31
+// use should not impact the behaviour of the application.
32
+var DeprecatedProperties = map[string]string{
33
+	"container_name": "Setting the container name is not supported.",
34
+	"expose":         "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.",
35
+}
36
+
37
+// ForbiddenProperties that are not supported in this implementation of the
38
+// compose file.
39
+var ForbiddenProperties = map[string]string{
40
+	"extends":       "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.",
41
+	"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.",
42
+	"volumes_from":  "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.",
43
+	"cpu_quota":     "Set resource limits using deploy.resources",
44
+	"cpu_shares":    "Set resource limits using deploy.resources",
45
+	"cpuset":        "Set resource limits using deploy.resources",
46
+	"mem_limit":     "Set resource limits using deploy.resources",
47
+	"memswap_limit": "Set resource limits using deploy.resources",
48
+}
49
+
50
+// Dict is a mapping of strings to interface{}
51
+type Dict map[string]interface{}
52
+
53
+// ConfigFile is a filename and the contents of the file as a Dict
54
+type ConfigFile struct {
55
+	Filename string
56
+	Config   Dict
57
+}
58
+
59
+// ConfigDetails are the details about a group of ConfigFiles
60
+type ConfigDetails struct {
61
+	WorkingDir  string
62
+	ConfigFiles []ConfigFile
63
+	Environment map[string]string
64
+}
65
+
66
+// Config is a full compose file configuration
67
+type Config struct {
68
+	Services []ServiceConfig
69
+	Networks map[string]NetworkConfig
70
+	Volumes  map[string]VolumeConfig
71
+}
72
+
73
+// ServiceConfig is the configuration of one service
74
+type ServiceConfig struct {
75
+	Name string
76
+
77
+	CapAdd          []string `mapstructure:"cap_add"`
78
+	CapDrop         []string `mapstructure:"cap_drop"`
79
+	CgroupParent    string   `mapstructure:"cgroup_parent"`
80
+	Command         []string `compose:"shell_command"`
81
+	ContainerName   string   `mapstructure:"container_name"`
82
+	DependsOn       []string `mapstructure:"depends_on"`
83
+	Deploy          DeployConfig
84
+	Devices         []string
85
+	DNS             []string          `compose:"string_or_list"`
86
+	DNSSearch       []string          `mapstructure:"dns_search" compose:"string_or_list"`
87
+	DomainName      string            `mapstructure:"domainname"`
88
+	Entrypoint      []string          `compose:"shell_command"`
89
+	Environment     map[string]string `compose:"list_or_dict_equals"`
90
+	Expose          []string          `compose:"list_of_strings_or_numbers"`
91
+	ExternalLinks   []string          `mapstructure:"external_links"`
92
+	ExtraHosts      map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"`
93
+	Hostname        string
94
+	HealthCheck     *HealthCheckConfig
95
+	Image           string
96
+	Ipc             string
97
+	Labels          map[string]string `compose:"list_or_dict_equals"`
98
+	Links           []string
99
+	Logging         *LoggingConfig
100
+	MacAddress      string                           `mapstructure:"mac_address"`
101
+	NetworkMode     string                           `mapstructure:"network_mode"`
102
+	Networks        map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"`
103
+	Pid             string
104
+	Ports           []string `compose:"list_of_strings_or_numbers"`
105
+	Privileged      bool
106
+	ReadOnly        bool `mapstructure:"read_only"`
107
+	Restart         string
108
+	SecurityOpt     []string       `mapstructure:"security_opt"`
109
+	StdinOpen       bool           `mapstructure:"stdin_open"`
110
+	StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"`
111
+	StopSignal      string         `mapstructure:"stop_signal"`
112
+	Tmpfs           []string       `compose:"string_or_list"`
113
+	Tty             bool           `mapstructure:"tty"`
114
+	Ulimits         map[string]*UlimitsConfig
115
+	User            string
116
+	Volumes         []string
117
+	WorkingDir      string `mapstructure:"working_dir"`
118
+}
119
+
120
+// LoggingConfig the logging configuration for a service
121
+type LoggingConfig struct {
122
+	Driver  string
123
+	Options map[string]string
124
+}
125
+
126
+// DeployConfig the deployment configuration for a service
127
+type DeployConfig struct {
128
+	Mode          string
129
+	Replicas      *uint64
130
+	Labels        map[string]string `compose:"list_or_dict_equals"`
131
+	UpdateConfig  *UpdateConfig     `mapstructure:"update_config"`
132
+	Resources     Resources
133
+	RestartPolicy *RestartPolicy `mapstructure:"restart_policy"`
134
+	Placement     Placement
135
+}
136
+
137
+// HealthCheckConfig the healthcheck configuration for a service
138
+type HealthCheckConfig struct {
139
+	Test     []string `compose:"healthcheck"`
140
+	Timeout  string
141
+	Interval string
142
+	Retries  *uint64
143
+	Disable  bool
144
+}
145
+
146
+// UpdateConfig the service update configuration
147
+type UpdateConfig struct {
148
+	Parallelism     *uint64
149
+	Delay           time.Duration
150
+	FailureAction   string `mapstructure:"failure_action"`
151
+	Monitor         time.Duration
152
+	MaxFailureRatio float32 `mapstructure:"max_failure_ratio"`
153
+}
154
+
155
+// Resources the resource limits and reservations
156
+type Resources struct {
157
+	Limits       *Resource
158
+	Reservations *Resource
159
+}
160
+
161
+// Resource is a resource to be limited or reserved
162
+type Resource struct {
163
+	// TODO: types to convert from units and ratios
164
+	NanoCPUs    string    `mapstructure:"cpus"`
165
+	MemoryBytes UnitBytes `mapstructure:"memory"`
166
+}
167
+
168
+// UnitBytes is the bytes type
169
+type UnitBytes int64
170
+
171
+// RestartPolicy the service restart policy
172
+type RestartPolicy struct {
173
+	Condition   string
174
+	Delay       *time.Duration
175
+	MaxAttempts *uint64 `mapstructure:"max_attempts"`
176
+	Window      *time.Duration
177
+}
178
+
179
+// Placement constraints for the service
180
+type Placement struct {
181
+	Constraints []string
182
+}
183
+
184
+// ServiceNetworkConfig is the network configuration for a service
185
+type ServiceNetworkConfig struct {
186
+	Aliases     []string
187
+	Ipv4Address string `mapstructure:"ipv4_address"`
188
+	Ipv6Address string `mapstructure:"ipv6_address"`
189
+}
190
+
191
+// UlimitsConfig the ulimit configuration
192
+type UlimitsConfig struct {
193
+	Single int
194
+	Soft   int
195
+	Hard   int
196
+}
197
+
198
+// NetworkConfig for a network
199
+type NetworkConfig struct {
200
+	Driver     string
201
+	DriverOpts map[string]string `mapstructure:"driver_opts"`
202
+	Ipam       IPAMConfig
203
+	External   External
204
+	Labels     map[string]string `compose:"list_or_dict_equals"`
205
+}
206
+
207
+// IPAMConfig for a network
208
+type IPAMConfig struct {
209
+	Driver string
210
+	Config []*IPAMPool
211
+}
212
+
213
+// IPAMPool for a network
214
+type IPAMPool struct {
215
+	Subnet string
216
+}
217
+
218
+// VolumeConfig for a volume
219
+type VolumeConfig struct {
220
+	Driver     string
221
+	DriverOpts map[string]string `mapstructure:"driver_opts"`
222
+	External   External
223
+	Labels     map[string]string `compose:"list_or_dict_equals"`
224
+}
225
+
226
+// External identifies a Volume or Network as a reference to a resource that is
227
+// not managed, and should already exist.
228
+type External struct {
229
+	Name     string
230
+	External bool
231
+}
... ...
@@ -6,3 +6,4 @@ CONTAINERD_COMMIT=03e5862ec0d8d3b3f750e19fca3ee367e13c090e
6 6
 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
7 7
 LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e
8 8
 VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0
9
+BINDATA_COMMIT=a0ff2567cfb70903282db057e799fd826784d41d
... ...
@@ -46,6 +46,14 @@ install_proxy() {
46 46
 	go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy
47 47
 }
48 48
 
49
+install_bindata() {
50
+    echo "Install go-bindata version $BINDATA_COMMIT"
51
+    git clone https://github.com/jteeuwen/go-bindata "$GOPATH/src/github.com/jteeuwen/go-bindata"
52
+    cd $GOPATH/src/github.com/jteeuwen/go-bindata
53
+    git checkout -q "$BINDATA_COMMIT"
54
+	go build -o /usr/local/bin/go-bindata github.com/jteeuwen/go-bindata/go-bindata
55
+}
56
+
49 57
 for prog in "$@"
50 58
 do
51 59
 	case $prog in
... ...
@@ -99,6 +107,10 @@ do
99 99
 			go build -v -o /usr/local/bin/vndr .
100 100
 			;;
101 101
 
102
+        bindata)
103
+            install_bindata
104
+            ;;
105
+
102 106
 		*)
103 107
 			echo echo "Usage: $0 [tomlv|runc|containerd|tini|proxy]"
104 108
 			exit 1
... ...
@@ -261,7 +261,7 @@ Function Validate-GoFormat($headCommit, $upstreamCommit) {
261 261
 
262 262
     # Get a list of all go source-code files which have changed.  Ignore exit code on next call - always process regardless
263 263
     $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'"
264
-    $files = $files | Select-String -NotMatch "^vendor/"
264
+    $files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go"
265 265
     $badFiles=@(); $files | %{
266 266
         # Deliberately ignore error on next line - treat as failed
267 267
         $content=Invoke-Expression "git show $headCommit`:$_"
268 268
new file mode 100755
... ...
@@ -0,0 +1,28 @@
0
+#!/bin/bash
1
+
2
+export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
3
+source "${SCRIPTDIR}/.validate"
4
+
5
+IFS=$'\n'
6
+files=( $(validate_diff --diff-filter=ACMR --name-only -- 'cli/compose/schema/data' || true) )
7
+unset IFS
8
+
9
+if [ ${#files[@]} -gt 0 ]; then
10
+	go generate github.com/docker/docker/cli/compose/schema 2> /dev/null
11
+	# Let see if the working directory is clean
12
+	diffs="$(git status --porcelain -- api/types/ 2>/dev/null)"
13
+	if [ "$diffs" ]; then
14
+		{
15
+			echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs'
16
+			echo
17
+			echo "$diffs"
18
+			echo
19
+			echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`'
20
+		} >&2
21
+		false
22
+	else
23
+		echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.'
24
+	fi
25
+else
26
+    echo 'No cli/compose/schema/data changes in diff.'
27
+fi
... ...
@@ -4,7 +4,9 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 4
 source "${SCRIPTDIR}/.validate"
5 5
 
6 6
 IFS=$'\n'
7
-files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
7
+files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' |
8
+    grep -v '^vendor/' |
9
+    grep -v '^cli/compose/schema/bindata.go' || true) )
8 10
 unset IFS
9 11
 
10 12
 badFiles=()
... ...
@@ -4,7 +4,7 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 4
 source "${SCRIPTDIR}/.validate"
5 5
 
6 6
 IFS=$'\n'
7
-files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) )
7
+files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' || true) )
8 8
 unset IFS
9 9
 
10 10
 errors=()
... ...
@@ -8,7 +8,6 @@ files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swag
8 8
 unset IFS
9 9
 
10 10
 if [ ${#files[@]} -gt 0 ]; then
11
-	# We run vndr to and see if we have a diff afterwards
12 11
 	${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null
13 12
 	# Let see if the working directory is clean
14 13
 	diffs="$(git status --porcelain -- api/types/ 2>/dev/null)"
... ...
@@ -134,7 +134,6 @@ github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
134 134
 github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72
135 135
 
136 136
 # composefile
137
-github.com/aanand/compose-file a3e58764f50597b6217fec07e9bff7225c4a1719
138 137
 github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
139 138
 github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
140 139
 github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
141 140
deleted file mode 100644
... ...
@@ -1,191 +0,0 @@
1
-
2
-                                 Apache License
3
-                           Version 2.0, January 2004
4
-                        https://www.apache.org/licenses/
5
-
6
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
-
8
-   1. Definitions.
9
-
10
-      "License" shall mean the terms and conditions for use, reproduction,
11
-      and distribution as defined by Sections 1 through 9 of this document.
12
-
13
-      "Licensor" shall mean the copyright owner or entity authorized by
14
-      the copyright owner that is granting the License.
15
-
16
-      "Legal Entity" shall mean the union of the acting entity and all
17
-      other entities that control, are controlled by, or are under common
18
-      control with that entity. For the purposes of this definition,
19
-      "control" means (i) the power, direct or indirect, to cause the
20
-      direction or management of such entity, whether by contract or
21
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
-      outstanding shares, or (iii) beneficial ownership of such entity.
23
-
24
-      "You" (or "Your") shall mean an individual or Legal Entity
25
-      exercising permissions granted by this License.
26
-
27
-      "Source" form shall mean the preferred form for making modifications,
28
-      including but not limited to software source code, documentation
29
-      source, and configuration files.
30
-
31
-      "Object" form shall mean any form resulting from mechanical
32
-      transformation or translation of a Source form, including but
33
-      not limited to compiled object code, generated documentation,
34
-      and conversions to other media types.
35
-
36
-      "Work" shall mean the work of authorship, whether in Source or
37
-      Object form, made available under the License, as indicated by a
38
-      copyright notice that is included in or attached to the work
39
-      (an example is provided in the Appendix below).
40
-
41
-      "Derivative Works" shall mean any work, whether in Source or Object
42
-      form, that is based on (or derived from) the Work and for which the
43
-      editorial revisions, annotations, elaborations, or other modifications
44
-      represent, as a whole, an original work of authorship. For the purposes
45
-      of this License, Derivative Works shall not include works that remain
46
-      separable from, or merely link (or bind by name) to the interfaces of,
47
-      the Work and Derivative Works thereof.
48
-
49
-      "Contribution" shall mean any work of authorship, including
50
-      the original version of the Work and any modifications or additions
51
-      to that Work or Derivative Works thereof, that is intentionally
52
-      submitted to Licensor for inclusion in the Work by the copyright owner
53
-      or by an individual or Legal Entity authorized to submit on behalf of
54
-      the copyright owner. For the purposes of this definition, "submitted"
55
-      means any form of electronic, verbal, or written communication sent
56
-      to the Licensor or its representatives, including but not limited to
57
-      communication on electronic mailing lists, source code control systems,
58
-      and issue tracking systems that are managed by, or on behalf of, the
59
-      Licensor for the purpose of discussing and improving the Work, but
60
-      excluding communication that is conspicuously marked or otherwise
61
-      designated in writing by the copyright owner as "Not a Contribution."
62
-
63
-      "Contributor" shall mean Licensor and any individual or Legal Entity
64
-      on behalf of whom a Contribution has been received by Licensor and
65
-      subsequently incorporated within the Work.
66
-
67
-   2. Grant of Copyright License. Subject to the terms and conditions of
68
-      this License, each Contributor hereby grants to You a perpetual,
69
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
-      copyright license to reproduce, prepare Derivative Works of,
71
-      publicly display, publicly perform, sublicense, and distribute the
72
-      Work and such Derivative Works in Source or Object form.
73
-
74
-   3. Grant of Patent License. Subject to the terms and conditions of
75
-      this License, each Contributor hereby grants to You a perpetual,
76
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
-      (except as stated in this section) patent license to make, have made,
78
-      use, offer to sell, sell, import, and otherwise transfer the Work,
79
-      where such license applies only to those patent claims licensable
80
-      by such Contributor that are necessarily infringed by their
81
-      Contribution(s) alone or by combination of their Contribution(s)
82
-      with the Work to which such Contribution(s) was submitted. If You
83
-      institute patent litigation against any entity (including a
84
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
85
-      or a Contribution incorporated within the Work constitutes direct
86
-      or contributory patent infringement, then any patent licenses
87
-      granted to You under this License for that Work shall terminate
88
-      as of the date such litigation is filed.
89
-
90
-   4. Redistribution. You may reproduce and distribute copies of the
91
-      Work or Derivative Works thereof in any medium, with or without
92
-      modifications, and in Source or Object form, provided that You
93
-      meet the following conditions:
94
-
95
-      (a) You must give any other recipients of the Work or
96
-          Derivative Works a copy of this License; and
97
-
98
-      (b) You must cause any modified files to carry prominent notices
99
-          stating that You changed the files; and
100
-
101
-      (c) You must retain, in the Source form of any Derivative Works
102
-          that You distribute, all copyright, patent, trademark, and
103
-          attribution notices from the Source form of the Work,
104
-          excluding those notices that do not pertain to any part of
105
-          the Derivative Works; and
106
-
107
-      (d) If the Work includes a "NOTICE" text file as part of its
108
-          distribution, then any Derivative Works that You distribute must
109
-          include a readable copy of the attribution notices contained
110
-          within such NOTICE file, excluding those notices that do not
111
-          pertain to any part of the Derivative Works, in at least one
112
-          of the following places: within a NOTICE text file distributed
113
-          as part of the Derivative Works; within the Source form or
114
-          documentation, if provided along with the Derivative Works; or,
115
-          within a display generated by the Derivative Works, if and
116
-          wherever such third-party notices normally appear. The contents
117
-          of the NOTICE file are for informational purposes only and
118
-          do not modify the License. You may add Your own attribution
119
-          notices within Derivative Works that You distribute, alongside
120
-          or as an addendum to the NOTICE text from the Work, provided
121
-          that such additional attribution notices cannot be construed
122
-          as modifying the License.
123
-
124
-      You may add Your own copyright statement to Your modifications and
125
-      may provide additional or different license terms and conditions
126
-      for use, reproduction, or distribution of Your modifications, or
127
-      for any such Derivative Works as a whole, provided Your use,
128
-      reproduction, and distribution of the Work otherwise complies with
129
-      the conditions stated in this License.
130
-
131
-   5. Submission of Contributions. Unless You explicitly state otherwise,
132
-      any Contribution intentionally submitted for inclusion in the Work
133
-      by You to the Licensor shall be under the terms and conditions of
134
-      this License, without any additional terms or conditions.
135
-      Notwithstanding the above, nothing herein shall supersede or modify
136
-      the terms of any separate license agreement you may have executed
137
-      with Licensor regarding such Contributions.
138
-
139
-   6. Trademarks. This License does not grant permission to use the trade
140
-      names, trademarks, service marks, or product names of the Licensor,
141
-      except as required for reasonable and customary use in describing the
142
-      origin of the Work and reproducing the content of the NOTICE file.
143
-
144
-   7. Disclaimer of Warranty. Unless required by applicable law or
145
-      agreed to in writing, Licensor provides the Work (and each
146
-      Contributor provides its Contributions) on an "AS IS" BASIS,
147
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
-      implied, including, without limitation, any warranties or conditions
149
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
-      PARTICULAR PURPOSE. You are solely responsible for determining the
151
-      appropriateness of using or redistributing the Work and assume any
152
-      risks associated with Your exercise of permissions under this License.
153
-
154
-   8. Limitation of Liability. In no event and under no legal theory,
155
-      whether in tort (including negligence), contract, or otherwise,
156
-      unless required by applicable law (such as deliberate and grossly
157
-      negligent acts) or agreed to in writing, shall any Contributor be
158
-      liable to You for damages, including any direct, indirect, special,
159
-      incidental, or consequential damages of any character arising as a
160
-      result of this License or out of the use or inability to use the
161
-      Work (including but not limited to damages for loss of goodwill,
162
-      work stoppage, computer failure or malfunction, or any and all
163
-      other commercial damages or losses), even if such Contributor
164
-      has been advised of the possibility of such damages.
165
-
166
-   9. Accepting Warranty or Additional Liability. While redistributing
167
-      the Work or Derivative Works thereof, You may choose to offer,
168
-      and charge a fee for, acceptance of support, warranty, indemnity,
169
-      or other liability obligations and/or rights consistent with this
170
-      License. However, in accepting such obligations, You may act only
171
-      on Your own behalf and on Your sole responsibility, not on behalf
172
-      of any other Contributor, and only if You agree to indemnify,
173
-      defend, and hold each Contributor harmless for any liability
174
-      incurred by, or claims asserted against, such Contributor by reason
175
-      of your accepting any such warranty or additional liability.
176
-
177
-   END OF TERMS AND CONDITIONS
178
-
179
-   Copyright 2016 Docker, Inc.
180
-
181
-   Licensed under the Apache License, Version 2.0 (the "License");
182
-   you may not use this file except in compliance with the License.
183
-   You may obtain a copy of the License at
184
-
185
-       https://www.apache.org/licenses/LICENSE-2.0
186
-
187
-   Unless required by applicable law or agreed to in writing, software
188
-   distributed under the License is distributed on an "AS IS" BASIS,
189
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
190
-   See the License for the specific language governing permissions and
191
-   limitations under the License.
192 1
deleted file mode 100644
... ...
@@ -1,89 +0,0 @@
1
-package interpolation
2
-
3
-import (
4
-	"fmt"
5
-
6
-	"github.com/aanand/compose-file/template"
7
-	"github.com/aanand/compose-file/types"
8
-)
9
-
10
-func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) {
11
-	out := types.Dict{}
12
-
13
-	for name, item := range config {
14
-		if item == nil {
15
-			out[name] = nil
16
-			continue
17
-		}
18
-		interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping)
19
-		if err != nil {
20
-			return nil, err
21
-		}
22
-		out[name] = interpolatedItem
23
-	}
24
-
25
-	return out, nil
26
-}
27
-
28
-func interpolateSectionItem(
29
-	name string,
30
-	item types.Dict,
31
-	section string,
32
-	mapping template.Mapping,
33
-) (types.Dict, error) {
34
-
35
-	out := types.Dict{}
36
-
37
-	for key, value := range item {
38
-		interpolatedValue, err := recursiveInterpolate(value, mapping)
39
-		if err != nil {
40
-			return nil, fmt.Errorf(
41
-				"Invalid interpolation format for %#v option in %s %#v: %#v",
42
-				key, section, name, err.Template,
43
-			)
44
-		}
45
-		out[key] = interpolatedValue
46
-	}
47
-
48
-	return out, nil
49
-
50
-}
51
-
52
-func recursiveInterpolate(
53
-	value interface{},
54
-	mapping template.Mapping,
55
-) (interface{}, *template.InvalidTemplateError) {
56
-
57
-	switch value := value.(type) {
58
-
59
-	case string:
60
-		return template.Substitute(value, mapping)
61
-
62
-	case types.Dict:
63
-		out := types.Dict{}
64
-		for key, elem := range value {
65
-			interpolatedElem, err := recursiveInterpolate(elem, mapping)
66
-			if err != nil {
67
-				return nil, err
68
-			}
69
-			out[key] = interpolatedElem
70
-		}
71
-		return out, nil
72
-
73
-	case []interface{}:
74
-		out := make([]interface{}, len(value))
75
-		for i, elem := range value {
76
-			interpolatedElem, err := recursiveInterpolate(elem, mapping)
77
-			if err != nil {
78
-				return nil, err
79
-			}
80
-			out[i] = interpolatedElem
81
-		}
82
-		return out, nil
83
-
84
-	default:
85
-		return value, nil
86
-
87
-	}
88
-
89
-}
90 1
deleted file mode 100644
... ...
@@ -1,605 +0,0 @@
1
-package loader
2
-
3
-import (
4
-	"fmt"
5
-	"os"
6
-	"path"
7
-	"reflect"
8
-	"regexp"
9
-	"sort"
10
-	"strings"
11
-
12
-	"github.com/aanand/compose-file/interpolation"
13
-	"github.com/aanand/compose-file/schema"
14
-	"github.com/aanand/compose-file/types"
15
-	"github.com/docker/docker/runconfig/opts"
16
-	units "github.com/docker/go-units"
17
-	shellwords "github.com/mattn/go-shellwords"
18
-	"github.com/mitchellh/mapstructure"
19
-	yaml "gopkg.in/yaml.v2"
20
-)
21
-
22
-var (
23
-	fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+")
24
-)
25
-
26
-// ParseYAML reads the bytes from a file, parses the bytes into a mapping
27
-// structure, and returns it.
28
-func ParseYAML(source []byte) (types.Dict, error) {
29
-	var cfg interface{}
30
-	if err := yaml.Unmarshal(source, &cfg); err != nil {
31
-		return nil, err
32
-	}
33
-	cfgMap, ok := cfg.(map[interface{}]interface{})
34
-	if !ok {
35
-		return nil, fmt.Errorf("Top-level object must be a mapping")
36
-	}
37
-	converted, err := convertToStringKeysRecursive(cfgMap, "")
38
-	if err != nil {
39
-		return nil, err
40
-	}
41
-	return converted.(types.Dict), nil
42
-}
43
-
44
-// Load reads a ConfigDetails and returns a fully loaded configuration
45
-func Load(configDetails types.ConfigDetails) (*types.Config, error) {
46
-	if len(configDetails.ConfigFiles) < 1 {
47
-		return nil, fmt.Errorf("No files specified")
48
-	}
49
-	if len(configDetails.ConfigFiles) > 1 {
50
-		return nil, fmt.Errorf("Multiple files are not yet supported")
51
-	}
52
-
53
-	configDict := getConfigDict(configDetails)
54
-
55
-	if services, ok := configDict["services"]; ok {
56
-		if servicesDict, ok := services.(types.Dict); ok {
57
-			forbidden := getProperties(servicesDict, types.ForbiddenProperties)
58
-
59
-			if len(forbidden) > 0 {
60
-				return nil, &ForbiddenPropertiesError{Properties: forbidden}
61
-			}
62
-		}
63
-	}
64
-
65
-	if err := schema.Validate(configDict); err != nil {
66
-		return nil, err
67
-	}
68
-
69
-	cfg := types.Config{}
70
-	version := configDict["version"].(string)
71
-	if version != "3" && version != "3.0" {
72
-		return nil, fmt.Errorf(`Unsupported Compose file version: %#v. The only version supported is "3" (or "3.0")`, version)
73
-	}
74
-
75
-	if services, ok := configDict["services"]; ok {
76
-		servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv)
77
-		if err != nil {
78
-			return nil, err
79
-		}
80
-
81
-		servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir)
82
-		if err != nil {
83
-			return nil, err
84
-		}
85
-
86
-		cfg.Services = servicesList
87
-	}
88
-
89
-	if networks, ok := configDict["networks"]; ok {
90
-		networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv)
91
-		if err != nil {
92
-			return nil, err
93
-		}
94
-
95
-		networksMapping, err := loadNetworks(networksConfig)
96
-		if err != nil {
97
-			return nil, err
98
-		}
99
-
100
-		cfg.Networks = networksMapping
101
-	}
102
-
103
-	if volumes, ok := configDict["volumes"]; ok {
104
-		volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv)
105
-		if err != nil {
106
-			return nil, err
107
-		}
108
-
109
-		volumesMapping, err := loadVolumes(volumesConfig)
110
-		if err != nil {
111
-			return nil, err
112
-		}
113
-
114
-		cfg.Volumes = volumesMapping
115
-	}
116
-
117
-	return &cfg, nil
118
-}
119
-
120
-func GetUnsupportedProperties(configDetails types.ConfigDetails) []string {
121
-	unsupported := map[string]bool{}
122
-
123
-	for _, service := range getServices(getConfigDict(configDetails)) {
124
-		serviceDict := service.(types.Dict)
125
-		for _, property := range types.UnsupportedProperties {
126
-			if _, isSet := serviceDict[property]; isSet {
127
-				unsupported[property] = true
128
-			}
129
-		}
130
-	}
131
-
132
-	return sortedKeys(unsupported)
133
-}
134
-
135
-func sortedKeys(set map[string]bool) []string {
136
-	var keys []string
137
-	for key := range set {
138
-		keys = append(keys, key)
139
-	}
140
-	sort.Strings(keys)
141
-	return keys
142
-}
143
-
144
-func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string {
145
-	return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties)
146
-}
147
-
148
-func getProperties(services types.Dict, propertyMap map[string]string) map[string]string {
149
-	output := map[string]string{}
150
-
151
-	for _, service := range services {
152
-		if serviceDict, ok := service.(types.Dict); ok {
153
-			for property, description := range propertyMap {
154
-				if _, isSet := serviceDict[property]; isSet {
155
-					output[property] = description
156
-				}
157
-			}
158
-		}
159
-	}
160
-
161
-	return output
162
-}
163
-
164
-type ForbiddenPropertiesError struct {
165
-	Properties map[string]string
166
-}
167
-
168
-func (e *ForbiddenPropertiesError) Error() string {
169
-	return "Configuration contains forbidden properties"
170
-}
171
-
172
-// TODO: resolve multiple files into a single config
173
-func getConfigDict(configDetails types.ConfigDetails) types.Dict {
174
-	return configDetails.ConfigFiles[0].Config
175
-}
176
-
177
-func getServices(configDict types.Dict) types.Dict {
178
-	if services, ok := configDict["services"]; ok {
179
-		if servicesDict, ok := services.(types.Dict); ok {
180
-			return servicesDict
181
-		}
182
-	}
183
-
184
-	return types.Dict{}
185
-}
186
-
187
-func transform(source map[string]interface{}, target interface{}) error {
188
-	data := mapstructure.Metadata{}
189
-	config := &mapstructure.DecoderConfig{
190
-		DecodeHook: mapstructure.ComposeDecodeHookFunc(
191
-			transformHook,
192
-			mapstructure.StringToTimeDurationHookFunc()),
193
-		Result:   target,
194
-		Metadata: &data,
195
-	}
196
-	decoder, err := mapstructure.NewDecoder(config)
197
-	if err != nil {
198
-		return err
199
-	}
200
-	err = decoder.Decode(source)
201
-	// TODO: log unused keys
202
-	return err
203
-}
204
-
205
-func transformHook(
206
-	source reflect.Type,
207
-	target reflect.Type,
208
-	data interface{},
209
-) (interface{}, error) {
210
-	switch target {
211
-	case reflect.TypeOf(types.External{}):
212
-		return transformExternal(source, target, data)
213
-	case reflect.TypeOf(make(map[string]string, 0)):
214
-		return transformMapStringString(source, target, data)
215
-	case reflect.TypeOf(types.UlimitsConfig{}):
216
-		return transformUlimits(source, target, data)
217
-	case reflect.TypeOf(types.UnitBytes(0)):
218
-		return loadSize(data)
219
-	}
220
-	switch target.Kind() {
221
-	case reflect.Struct:
222
-		return transformStruct(source, target, data)
223
-	}
224
-	return data, nil
225
-}
226
-
227
-// keys needs to be converted to strings for jsonschema
228
-// TODO: don't use types.Dict
229
-func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
230
-	if mapping, ok := value.(map[interface{}]interface{}); ok {
231
-		dict := make(types.Dict)
232
-		for key, entry := range mapping {
233
-			str, ok := key.(string)
234
-			if !ok {
235
-				var location string
236
-				if keyPrefix == "" {
237
-					location = "at top level"
238
-				} else {
239
-					location = fmt.Sprintf("in %s", keyPrefix)
240
-				}
241
-				return nil, fmt.Errorf("Non-string key %s: %#v", location, key)
242
-			}
243
-			var newKeyPrefix string
244
-			if keyPrefix == "" {
245
-				newKeyPrefix = str
246
-			} else {
247
-				newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
248
-			}
249
-			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
250
-			if err != nil {
251
-				return nil, err
252
-			}
253
-			dict[str] = convertedEntry
254
-		}
255
-		return dict, nil
256
-	}
257
-	if list, ok := value.([]interface{}); ok {
258
-		var convertedList []interface{}
259
-		for index, entry := range list {
260
-			newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
261
-			convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
262
-			if err != nil {
263
-				return nil, err
264
-			}
265
-			convertedList = append(convertedList, convertedEntry)
266
-		}
267
-		return convertedList, nil
268
-	}
269
-	return value, nil
270
-}
271
-
272
-func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) {
273
-	var services []types.ServiceConfig
274
-
275
-	for name, serviceDef := range servicesDict {
276
-		serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir)
277
-		if err != nil {
278
-			return nil, err
279
-		}
280
-		services = append(services, *serviceConfig)
281
-	}
282
-
283
-	return services, nil
284
-}
285
-
286
-func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) {
287
-	serviceConfig := &types.ServiceConfig{}
288
-	if err := transform(serviceDict, serviceConfig); err != nil {
289
-		return nil, err
290
-	}
291
-	serviceConfig.Name = name
292
-
293
-	if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil {
294
-		return nil, err
295
-	}
296
-
297
-	if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil {
298
-		return nil, err
299
-	}
300
-
301
-	return serviceConfig, nil
302
-}
303
-
304
-func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error {
305
-	environment := make(map[string]string)
306
-
307
-	if envFileVal, ok := serviceDict["env_file"]; ok {
308
-		envFiles := loadStringOrListOfStrings(envFileVal)
309
-
310
-		var envVars []string
311
-
312
-		for _, file := range envFiles {
313
-			filePath := path.Join(workingDir, file)
314
-			fileVars, err := opts.ParseEnvFile(filePath)
315
-			if err != nil {
316
-				return err
317
-			}
318
-			envVars = append(envVars, fileVars...)
319
-		}
320
-
321
-		for k, v := range opts.ConvertKVStringsToMap(envVars) {
322
-			environment[k] = v
323
-		}
324
-	}
325
-
326
-	for k, v := range serviceConfig.Environment {
327
-		environment[k] = v
328
-	}
329
-
330
-	serviceConfig.Environment = environment
331
-
332
-	return nil
333
-}
334
-
335
-func resolveVolumePaths(volumes []string, workingDir string) error {
336
-	for i, mapping := range volumes {
337
-		parts := strings.SplitN(mapping, ":", 2)
338
-		if len(parts) == 1 {
339
-			continue
340
-		}
341
-
342
-		if strings.HasPrefix(parts[0], ".") {
343
-			parts[0] = path.Join(workingDir, parts[0])
344
-		}
345
-		parts[0] = expandUser(parts[0])
346
-
347
-		volumes[i] = strings.Join(parts, ":")
348
-	}
349
-
350
-	return nil
351
-}
352
-
353
-// TODO: make this more robust
354
-func expandUser(path string) string {
355
-	if strings.HasPrefix(path, "~") {
356
-		return strings.Replace(path, "~", os.Getenv("HOME"), 1)
357
-	}
358
-	return path
359
-}
360
-
361
-func transformUlimits(
362
-	source reflect.Type,
363
-	target reflect.Type,
364
-	data interface{},
365
-) (interface{}, error) {
366
-	switch value := data.(type) {
367
-	case int:
368
-		return types.UlimitsConfig{Single: value}, nil
369
-	case types.Dict:
370
-		ulimit := types.UlimitsConfig{}
371
-		ulimit.Soft = value["soft"].(int)
372
-		ulimit.Hard = value["hard"].(int)
373
-		return ulimit, nil
374
-	default:
375
-		return data, fmt.Errorf("invalid type %T for ulimits", value)
376
-	}
377
-}
378
-
379
-func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) {
380
-	networks := make(map[string]types.NetworkConfig)
381
-	err := transform(source, &networks)
382
-	if err != nil {
383
-		return networks, err
384
-	}
385
-	for name, network := range networks {
386
-		if network.External.External && network.External.Name == "" {
387
-			network.External.Name = name
388
-			networks[name] = network
389
-		}
390
-	}
391
-	return networks, nil
392
-}
393
-
394
-func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) {
395
-	volumes := make(map[string]types.VolumeConfig)
396
-	err := transform(source, &volumes)
397
-	if err != nil {
398
-		return volumes, err
399
-	}
400
-	for name, volume := range volumes {
401
-		if volume.External.External && volume.External.Name == "" {
402
-			volume.External.Name = name
403
-			volumes[name] = volume
404
-		}
405
-	}
406
-	return volumes, nil
407
-}
408
-
409
-func transformStruct(
410
-	source reflect.Type,
411
-	target reflect.Type,
412
-	data interface{},
413
-) (interface{}, error) {
414
-	structValue, ok := data.(map[string]interface{})
415
-	if !ok {
416
-		// FIXME: this is necessary because of convertToStringKeysRecursive
417
-		structValue, ok = data.(types.Dict)
418
-		if !ok {
419
-			panic(fmt.Sprintf(
420
-				"transformStruct called with non-map type: %T, %s", data, data))
421
-		}
422
-	}
423
-
424
-	var err error
425
-	for i := 0; i < target.NumField(); i++ {
426
-		field := target.Field(i)
427
-		fieldTag := field.Tag.Get("compose")
428
-
429
-		yamlName := toYAMLName(field.Name)
430
-		value, ok := structValue[yamlName]
431
-		if !ok {
432
-			continue
433
-		}
434
-
435
-		structValue[yamlName], err = convertField(
436
-			fieldTag, reflect.TypeOf(value), field.Type, value)
437
-		if err != nil {
438
-			return nil, fmt.Errorf("field %s: %s", yamlName, err.Error())
439
-		}
440
-	}
441
-	return structValue, nil
442
-}
443
-
444
-func transformMapStringString(
445
-	source reflect.Type,
446
-	target reflect.Type,
447
-	data interface{},
448
-) (interface{}, error) {
449
-	switch value := data.(type) {
450
-	case map[string]interface{}:
451
-		return toMapStringString(value), nil
452
-	case types.Dict:
453
-		return toMapStringString(value), nil
454
-	case map[string]string:
455
-		return value, nil
456
-	default:
457
-		return data, fmt.Errorf("invalid type %T for map[string]string", value)
458
-	}
459
-}
460
-
461
-func convertField(
462
-	fieldTag string,
463
-	source reflect.Type,
464
-	target reflect.Type,
465
-	data interface{},
466
-) (interface{}, error) {
467
-	switch fieldTag {
468
-	case "":
469
-		return data, nil
470
-	case "healthcheck":
471
-		return loadHealthcheck(data)
472
-	case "list_or_dict_equals":
473
-		return loadMappingOrList(data, "="), nil
474
-	case "list_or_dict_colon":
475
-		return loadMappingOrList(data, ":"), nil
476
-	case "list_or_struct_map":
477
-		return loadListOrStructMap(data, target)
478
-	case "string_or_list":
479
-		return loadStringOrListOfStrings(data), nil
480
-	case "list_of_strings_or_numbers":
481
-		return loadListOfStringsOrNumbers(data), nil
482
-	case "shell_command":
483
-		return loadShellCommand(data)
484
-	case "size":
485
-		return loadSize(data)
486
-	case "-":
487
-		return nil, nil
488
-	}
489
-	return data, nil
490
-}
491
-
492
-func transformExternal(
493
-	source reflect.Type,
494
-	target reflect.Type,
495
-	data interface{},
496
-) (interface{}, error) {
497
-	switch value := data.(type) {
498
-	case bool:
499
-		return map[string]interface{}{"external": value}, nil
500
-	case types.Dict:
501
-		return map[string]interface{}{"external": true, "name": value["name"]}, nil
502
-	case map[string]interface{}:
503
-		return map[string]interface{}{"external": true, "name": value["name"]}, nil
504
-	default:
505
-		return data, fmt.Errorf("invalid type %T for external", value)
506
-	}
507
-}
508
-
509
-func toYAMLName(name string) string {
510
-	nameParts := fieldNameRegexp.FindAllString(name, -1)
511
-	for i, p := range nameParts {
512
-		nameParts[i] = strings.ToLower(p)
513
-	}
514
-	return strings.Join(nameParts, "_")
515
-}
516
-
517
-func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) {
518
-	if list, ok := value.([]interface{}); ok {
519
-		mapValue := map[interface{}]interface{}{}
520
-		for _, name := range list {
521
-			mapValue[name] = nil
522
-		}
523
-		return mapValue, nil
524
-	}
525
-
526
-	return value, nil
527
-}
528
-
529
-func loadListOfStringsOrNumbers(value interface{}) []string {
530
-	list := value.([]interface{})
531
-	result := make([]string, len(list))
532
-	for i, item := range list {
533
-		result[i] = fmt.Sprint(item)
534
-	}
535
-	return result
536
-}
537
-
538
-func loadStringOrListOfStrings(value interface{}) []string {
539
-	if list, ok := value.([]interface{}); ok {
540
-		result := make([]string, len(list))
541
-		for i, item := range list {
542
-			result[i] = fmt.Sprint(item)
543
-		}
544
-		return result
545
-	}
546
-	return []string{value.(string)}
547
-}
548
-
549
-func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string {
550
-	if mapping, ok := mappingOrList.(types.Dict); ok {
551
-		return toMapStringString(mapping)
552
-	}
553
-	if list, ok := mappingOrList.([]interface{}); ok {
554
-		result := make(map[string]string)
555
-		for _, value := range list {
556
-			parts := strings.SplitN(value.(string), sep, 2)
557
-			if len(parts) == 1 {
558
-				result[parts[0]] = ""
559
-			} else {
560
-				result[parts[0]] = parts[1]
561
-			}
562
-		}
563
-		return result
564
-	}
565
-	panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList))
566
-}
567
-
568
-func loadShellCommand(value interface{}) (interface{}, error) {
569
-	if str, ok := value.(string); ok {
570
-		return shellwords.Parse(str)
571
-	}
572
-	return value, nil
573
-}
574
-
575
-func loadHealthcheck(value interface{}) (interface{}, error) {
576
-	if str, ok := value.(string); ok {
577
-		return append([]string{"CMD-SHELL"}, str), nil
578
-	}
579
-	return value, nil
580
-}
581
-
582
-func loadSize(value interface{}) (int64, error) {
583
-	switch value := value.(type) {
584
-	case int:
585
-		return int64(value), nil
586
-	case string:
587
-		return units.RAMInBytes(value)
588
-	}
589
-	panic(fmt.Errorf("invalid type for size %T", value))
590
-}
591
-
592
-func toMapStringString(value map[string]interface{}) map[string]string {
593
-	output := make(map[string]string)
594
-	for key, value := range value {
595
-		output[key] = toString(value)
596
-	}
597
-	return output
598
-}
599
-
600
-func toString(value interface{}) string {
601
-	if value == nil {
602
-		return ""
603
-	}
604
-	return fmt.Sprint(value)
605
-}
606 1
deleted file mode 100644
... ...
@@ -1,237 +0,0 @@
1
-// Code generated by go-bindata.
2
-// sources:
3
-// data/config_schema_v3.0.json
4
-// DO NOT EDIT!
5
-
6
-package schema
7
-
8
-import (
9
-	"bytes"
10
-	"compress/gzip"
11
-	"fmt"
12
-	"io"
13
-	"io/ioutil"
14
-	"os"
15
-	"path/filepath"
16
-	"strings"
17
-	"time"
18
-)
19
-
20
-func bindataRead(data []byte, name string) ([]byte, error) {
21
-	gz, err := gzip.NewReader(bytes.NewBuffer(data))
22
-	if err != nil {
23
-		return nil, fmt.Errorf("Read %q: %v", name, err)
24
-	}
25
-
26
-	var buf bytes.Buffer
27
-	_, err = io.Copy(&buf, gz)
28
-	clErr := gz.Close()
29
-
30
-	if err != nil {
31
-		return nil, fmt.Errorf("Read %q: %v", name, err)
32
-	}
33
-	if clErr != nil {
34
-		return nil, err
35
-	}
36
-
37
-	return buf.Bytes(), nil
38
-}
39
-
40
-type asset struct {
41
-	bytes []byte
42
-	info  os.FileInfo
43
-}
44
-
45
-type bindataFileInfo struct {
46
-	name    string
47
-	size    int64
48
-	mode    os.FileMode
49
-	modTime time.Time
50
-}
51
-
52
-func (fi bindataFileInfo) Name() string {
53
-	return fi.name
54
-}
55
-func (fi bindataFileInfo) Size() int64 {
56
-	return fi.size
57
-}
58
-func (fi bindataFileInfo) Mode() os.FileMode {
59
-	return fi.mode
60
-}
61
-func (fi bindataFileInfo) ModTime() time.Time {
62
-	return fi.modTime
63
-}
64
-func (fi bindataFileInfo) IsDir() bool {
65
-	return false
66
-}
67
-func (fi bindataFileInfo) Sys() interface{} {
68
-	return nil
69
-}
70
-
71
-var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4d\x93\xdb\x28\x13\xbe\xfb\x57\x4c\x29\xb9\xc5\x33\x93\xaa\x37\xf5\x56\x6d\x6e\x7b\xdc\xd3\xee\x79\x5d\x8a\x0a\x4b\xd8\x26\x23\x09\x02\xc8\x89\x93\xf2\x7f\x5f\x10\x92\x0c\x88\x2f\xdb\x4a\x66\x0f\x3b\x87\xa9\x19\xe8\x6e\xfa\xe3\xa1\x69\x1a\xfd\x58\x3d\x3c\x64\x6f\x59\x79\x80\x0d\xc8\x3e\x3e\x64\x07\xce\xc9\xc7\xe7\xe7\xcf\x0c\xb7\x8f\x6a\xf4\x09\xd3\xfd\x73\x45\xc1\x8e\x3f\xbe\xff\xf0\xac\xc6\xde\x64\x6b\xc9\x87\x2a\xc9\x52\xe2\x76\x87\xf6\x85\x9a\x29\x8e\xff\x7b\x7a\xff\x24\xd9\x15\x09\x3f\x11\x28\x89\xf0\xf6\x33\x2c\xb9\x1a\xa3\xf0\x4b\x87\x28\x94\xcc\x9b\xec\x08\x29\x43\x82\x3a\x5f\xaf\xe4\x1c\xa1\x98\x40\xca\x11\x64\x62\x56\x2a\x27\xc6\x46\x92\x71\x40\x13\xcb\x38\x45\xed\x3e\xeb\x87\xcf\xbd\x04\x31\xc9\x20\x3d\xa2\x52\x93\x30\xa9\xfa\xe6\xf9\x22\xff\x79\x22\x5b\xdb\x52\x35\x65\xfb\x71\x02\x38\x87\xb4\xfd\x6b\xae\x5b\x3f\xfd\x69\x03\x1e\xbf\xff\xfe\xf8\xf7\xfb\xc7\xdf\x9e\x8a\xc7\xfc\xdd\x5b\x63\x5a\xfa\x97\xc2\x9d\x5a\xbe\x82\x3b\xd4\x22\x2e\xac\x99\xd6\xcf\x26\xca\xf3\xf0\xd7\x79\x5a\x18\x54\x55\x4f\x0c\x6a\x63\xed\x1d\xa8\x19\x34\x6d\x6e\x21\xff\x8a\xe9\x4b\xcc\xe6\x89\xec\x95\x6c\x1e\xd6\x77\xd8\x6c\x9a\x73\xc4\x75\xd7\x44\x23\x38\x52\xbd\x92\x31\x6a\xf9\xfb\xe2\xb7\x1a\x8d\x0e\xd2\x2a\x0a\x6d\xed\x5e\x41\x03\xed\x2e\x57\xb9\xd0\xe6\xf7\xd5\xe4\x2c\x8f\x97\x2a\x48\x6a\x7c\x92\x63\x1e\x7f\x28\x82\x06\xb6\x3c\x9b\x5c\x20\xf8\xb6\x1d\xaa\x2b\xdb\xa3\xb8\x85\x7f\x4a\x11\x1b\x6d\xf0\x41\x48\xb6\x36\xb6\x26\xa7\x9f\x37\xfe\xf3\x07\x7c\x9a\xf7\xd8\x32\xcd\x8b\xdc\xc5\xe1\x37\xde\x1b\x15\x5e\x5a\xb9\x00\x97\x2f\x90\xee\x50\x0d\x53\x39\x00\xdd\xb3\x80\xcb\x6a\xc4\x78\x81\x69\x51\x21\xa1\xfd\xd9\x62\x9f\xc9\x8b\xe3\x69\x62\xd5\xfe\xcb\x57\x0e\x81\x59\x09\x48\x21\xc4\x19\x76\x00\x4a\xc1\x29\x5b\x0b\x00\x71\xd8\x30\xb7\x89\x0f\x59\xd7\xa2\x2f\x1d\xfc\x63\x20\xe1\xb4\x83\xb6\xdc\x4a\x28\xb7\xbc\xe0\x3d\xc5\x1d\x29\x08\xa0\x12\x60\x61\xf7\x8b\xb8\x36\x0d\x68\x97\x42\xdd\x35\x76\x24\x78\x5e\x60\x0e\xa0\x16\xd2\xa2\x05\x4d\x0c\x48\x72\xd7\xc1\xb6\x62\x85\x3a\xff\x82\x30\xda\x15\x8a\x9f\x59\x02\xa6\xc3\x70\xd1\x78\x54\x6d\x08\xd8\x4a\x8c\x84\xb6\xd4\x2d\xb3\x18\x0b\x06\x01\x2d\x0f\x37\xf2\xe3\x46\xb8\x2f\xc5\x77\x02\x28\xf4\x44\x30\x52\x78\xf9\xd7\x01\x01\xb6\xc7\x62\xca\x25\x57\xbb\x41\x70\x23\x8a\xdb\x66\xdc\x0d\x29\x09\x66\x4a\xf2\x92\xff\x1b\xc1\x0c\xda\x8e\xb1\x0c\xd4\xa7\x26\x53\x0d\x9f\x8c\x1c\x9b\xd1\x70\xe1\x94\xb6\x6b\xb6\x90\xca\x92\xce\xa0\xdc\x61\xda\x00\xa9\xec\xb8\xb6\x36\x6d\x78\xda\x81\x3c\xdd\x81\xba\x0d\xf2\x58\x07\xb5\xf0\x4e\xfb\xb2\x3c\xc4\x85\x78\x0a\x8a\x03\x66\x3c\x3d\x87\x6b\xec\x07\x08\x6a\x7e\x10\x65\x71\xf9\x12\x60\xd7\xa9\x0c\x6e\xb1\x6c\x0a\xc8\x51\x03\xf6\x71\x22\x52\xc6\x48\x6a\xb0\x85\xf5\x4d\x76\x2e\xea\x7c\x4d\x2c\xde\xef\x25\xa9\x0f\x71\xb3\xca\x65\x98\x8e\x9d\xf9\x15\x45\xe2\x46\x91\x7a\x80\x63\x72\x29\xb8\xec\xc9\x78\x01\xa2\x14\x0a\x56\x9f\x06\xe9\xa7\x27\x55\x7c\x06\x76\x55\xff\x57\x5d\x67\xb9\x5d\x2e\xc8\x9f\xf9\x98\x39\x62\x59\x98\x56\x50\x18\x51\x69\x40\x29\xeb\x06\x0a\x99\x27\xae\x17\xd2\xa1\xd8\x2f\x1a\x5c\xf9\x00\x3a\x23\xb6\x7d\xe3\xcd\xd4\x57\x1f\x84\x3d\xdb\xd5\xf5\x63\x52\xe8\xa2\x17\x88\x88\x35\x3e\xf5\x52\xd5\xbc\xa8\x1b\x87\x58\x4f\x07\x6a\x04\x18\x8c\x6f\x76\xaf\x23\x0d\x69\x88\x1c\x3f\x24\x62\xc2\xc5\xfb\xff\x20\xaf\x87\xd5\x2b\x33\xbd\x46\x8e\x88\xba\xa8\xd2\x6f\x37\x97\x22\x79\x64\xb7\xfd\xe4\x12\x9e\xa0\xca\x9f\x2b\xfa\x0c\xa1\x6f\x30\x82\x29\x9f\xed\xae\x5f\x73\xdc\xab\xa5\xef\x3e\xed\x89\x48\xdc\xa2\x5c\xda\x43\xf3\xd6\xb2\xc5\xb8\x86\xa0\x35\x52\x0f\x85\xa0\x12\x25\x73\x7d\x4a\xa0\x64\x1c\xd0\xe8\x85\x82\xc1\xb2\xa3\x88\x9f\x0a\x71\x1e\x2c\x5e\x67\xb0\x43\x53\x30\xf4\x1d\x9a\xd1\xbc\xe4\xfb\x41\x50\x6e\xf0\xf0\x0a\xb5\x42\x1b\xd8\x46\x4d\x64\x1c\x13\x21\x7f\x2f\x30\x17\x35\x53\x92\xee\x29\x28\x61\x21\xb0\x89\x70\xe5\x62\x58\xeb\xb1\xad\x3a\x0a\x24\x9e\x0d\x31\xbc\x21\xbb\x1b\x6f\x07\x9c\xc7\x63\xd6\xd5\xa8\x41\x7e\x30\x3b\xb2\x64\x42\x22\x57\x49\xdc\x9d\xbb\x03\x79\xfb\xa2\xa9\xb8\x66\x08\x6c\x52\x57\xba\x0b\x94\x0e\xe1\xca\x21\xa1\x64\x38\x00\x6a\x46\x29\xa0\x47\xcf\xc0\xf0\x8e\xbb\x19\x5c\x05\x85\x53\x2f\xa3\x83\xdb\xcb\x5b\x0f\x8a\xe4\x4e\xfa\xab\x72\xb2\xad\x46\xee\x4d\x8b\x67\x67\x5a\xec\x58\xb4\xba\xd3\xfb\x8b\x8b\xee\x64\x59\xc2\x48\x64\x57\xc8\xad\xc2\xca\x52\xf7\x8a\x0e\xaf\x75\x9b\x18\x05\xb8\x7a\x7d\x3a\xa9\xdd\xef\xdb\x4c\x80\x1b\x4f\x89\x4b\x97\xd4\xd3\xf8\x93\xf8\xa0\x47\x23\x79\xb8\x7c\xca\x51\x03\x71\xc7\x23\x54\x14\x8a\x31\xcb\xf3\x43\xa6\x33\x84\x89\xb4\x9c\x5a\x0a\xfe\xd2\x4b\x7b\x85\x18\xd8\x5a\xfd\xbf\x29\x47\xdd\x14\x5e\x25\xf6\xd2\x3b\x8d\x04\x57\xa3\x5c\x20\xb6\x81\xda\x5c\x0b\x19\xa9\x51\x09\x58\x2c\xcb\xdc\x71\x85\xec\x48\x05\x38\x2c\xd4\x53\xd2\x55\x79\x3d\x90\xd0\x09\xa0\xa0\xae\xa1\x58\xb4\x49\x49\x90\x22\x06\x35\x38\xdd\x74\xe0\xf5\xec\x3b\x80\xea\x8e\xc2\x02\x94\x7c\x78\xad\x8a\x20\x53\x38\x5f\x38\x06\x3b\x33\x45\xda\x92\x0d\xf8\x56\x8c\xcb\xf6\x24\xce\x6d\xe5\x2d\xbc\x52\x6f\x7f\x1a\x12\x18\xee\x68\x39\x73\xf6\xcd\x21\xba\x1c\xe4\x1e\xc4\x8c\x2b\xce\x4c\x17\x13\x32\x29\x4d\x97\xf3\x28\x7f\xf4\xdc\x18\x2a\xc1\x82\x60\x81\xf6\xd3\x52\x16\x0a\x48\x2b\x27\xa7\x00\xe2\x4e\x04\x4a\x38\xc8\x3a\xa7\x21\x3c\xba\x59\x7b\x86\xaf\xa8\xad\xf0\xd7\x2b\x16\x5c\x0e\x4a\xa4\x16\x45\xa6\x95\xef\xee\x75\xb4\xd0\x1d\x08\x53\xaf\x3e\xd6\xef\x35\xeb\x8e\x53\x7d\xc2\x67\x24\xeb\x4f\x74\xf1\xb7\x4e\x4f\xa6\x2f\x49\x17\xed\xd8\x34\xb0\xc1\xd4\x09\xc0\x80\x8d\x89\x4f\xd3\x31\x0b\x47\xb2\x05\x0e\xb5\xa4\x0e\xdf\x40\x25\x2f\x74\x8b\xdf\x24\xe2\x5d\xbc\x3c\x9e\x8f\x10\x01\xcd\x52\x9b\x23\xb9\xe7\x99\x39\x8f\x60\x63\xed\x79\xaf\x40\xa9\xeb\xec\x17\xc4\xb4\x8e\xeb\x3e\x50\xb0\x6e\x2b\x10\x12\x82\xe6\xe5\xc7\xf9\x10\x9b\x7e\x05\x39\xfb\x2f\x1c\xf7\xe5\xbc\xf1\xb9\xc2\x13\xd5\xcd\x54\x48\xae\x27\x5f\xe5\xc9\x21\xf6\xbe\x15\x2c\xa7\xff\x95\xf5\xdd\x1d\x69\x71\xf8\xb4\x22\x92\x32\x06\xaa\xff\x32\xc6\x20\xe5\xf5\xf1\x15\x38\x13\x6f\xbc\x1c\x5c\x01\x1a\xab\xab\xa4\x81\x67\x7e\x73\x0c\xc5\x39\xb9\x27\x3e\x70\xe4\xa6\x1a\x36\xd9\xc7\xf9\x67\x6b\x66\x0a\x0d\x35\x1c\x46\x12\x4f\x8f\xd4\x5a\x74\x70\x5e\xd8\xf2\x05\x61\xfb\xf4\x2e\x70\x50\x84\xde\xae\x7e\x52\x86\x5d\xa0\x99\xe3\x8e\xa9\x55\x5c\x8e\xde\x9d\x7f\x7b\xe5\xc9\x54\x1a\xff\xec\x4b\x2c\x69\x67\x7b\x9a\x75\x36\x7e\x98\x5d\x36\xf5\x15\x55\x6e\xf8\xc7\x22\x51\x2f\xc1\x5a\x9e\xc8\xf5\x7a\xdb\x17\x46\xe7\xf7\x59\x76\x8f\x6f\xfc\x4e\x2a\x0f\x6f\xf6\xd5\xf8\xfb\xbc\x3a\xaf\xfe\x09\x00\x00\xff\xff\x37\x89\x5b\xf1\x5b\x2a\x00\x00")
72
-
73
-func dataConfig_schema_v30JsonBytes() ([]byte, error) {
74
-	return bindataRead(
75
-		_dataConfig_schema_v30Json,
76
-		"data/config_schema_v3.0.json",
77
-	)
78
-}
79
-
80
-func dataConfig_schema_v30Json() (*asset, error) {
81
-	bytes, err := dataConfig_schema_v30JsonBytes()
82
-	if err != nil {
83
-		return nil, err
84
-	}
85
-
86
-	info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 10843, mode: os.FileMode(420), modTime: time.Unix(1479392593, 0)}
87
-	a := &asset{bytes: bytes, info: info}
88
-	return a, nil
89
-}
90
-
91
-// Asset loads and returns the asset for the given name.
92
-// It returns an error if the asset could not be found or
93
-// could not be loaded.
94
-func Asset(name string) ([]byte, error) {
95
-	cannonicalName := strings.Replace(name, "\\", "/", -1)
96
-	if f, ok := _bindata[cannonicalName]; ok {
97
-		a, err := f()
98
-		if err != nil {
99
-			return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
100
-		}
101
-		return a.bytes, nil
102
-	}
103
-	return nil, fmt.Errorf("Asset %s not found", name)
104
-}
105
-
106
-// MustAsset is like Asset but panics when Asset would return an error.
107
-// It simplifies safe initialization of global variables.
108
-func MustAsset(name string) []byte {
109
-	a, err := Asset(name)
110
-	if err != nil {
111
-		panic("asset: Asset(" + name + "): " + err.Error())
112
-	}
113
-
114
-	return a
115
-}
116
-
117
-// AssetInfo loads and returns the asset info for the given name.
118
-// It returns an error if the asset could not be found or
119
-// could not be loaded.
120
-func AssetInfo(name string) (os.FileInfo, error) {
121
-	cannonicalName := strings.Replace(name, "\\", "/", -1)
122
-	if f, ok := _bindata[cannonicalName]; ok {
123
-		a, err := f()
124
-		if err != nil {
125
-			return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
126
-		}
127
-		return a.info, nil
128
-	}
129
-	return nil, fmt.Errorf("AssetInfo %s not found", name)
130
-}
131
-
132
-// AssetNames returns the names of the assets.
133
-func AssetNames() []string {
134
-	names := make([]string, 0, len(_bindata))
135
-	for name := range _bindata {
136
-		names = append(names, name)
137
-	}
138
-	return names
139
-}
140
-
141
-// _bindata is a table, holding each asset generator, mapped to its name.
142
-var _bindata = map[string]func() (*asset, error){
143
-	"data/config_schema_v3.0.json": dataConfig_schema_v30Json,
144
-}
145
-
146
-// AssetDir returns the file names below a certain
147
-// directory embedded in the file by go-bindata.
148
-// For example if you run go-bindata on data/... and data contains the
149
-// following hierarchy:
150
-//     data/
151
-//       foo.txt
152
-//       img/
153
-//         a.png
154
-//         b.png
155
-// then AssetDir("data") would return []string{"foo.txt", "img"}
156
-// AssetDir("data/img") would return []string{"a.png", "b.png"}
157
-// AssetDir("foo.txt") and AssetDir("notexist") would return an error
158
-// AssetDir("") will return []string{"data"}.
159
-func AssetDir(name string) ([]string, error) {
160
-	node := _bintree
161
-	if len(name) != 0 {
162
-		cannonicalName := strings.Replace(name, "\\", "/", -1)
163
-		pathList := strings.Split(cannonicalName, "/")
164
-		for _, p := range pathList {
165
-			node = node.Children[p]
166
-			if node == nil {
167
-				return nil, fmt.Errorf("Asset %s not found", name)
168
-			}
169
-		}
170
-	}
171
-	if node.Func != nil {
172
-		return nil, fmt.Errorf("Asset %s not found", name)
173
-	}
174
-	rv := make([]string, 0, len(node.Children))
175
-	for childName := range node.Children {
176
-		rv = append(rv, childName)
177
-	}
178
-	return rv, nil
179
-}
180
-
181
-type bintree struct {
182
-	Func     func() (*asset, error)
183
-	Children map[string]*bintree
184
-}
185
-var _bintree = &bintree{nil, map[string]*bintree{
186
-	"data": &bintree{nil, map[string]*bintree{
187
-		"config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}},
188
-	}},
189
-}}
190
-
191
-// RestoreAsset restores an asset under the given directory
192
-func RestoreAsset(dir, name string) error {
193
-	data, err := Asset(name)
194
-	if err != nil {
195
-		return err
196
-	}
197
-	info, err := AssetInfo(name)
198
-	if err != nil {
199
-		return err
200
-	}
201
-	err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
202
-	if err != nil {
203
-		return err
204
-	}
205
-	err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
206
-	if err != nil {
207
-		return err
208
-	}
209
-	err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
210
-	if err != nil {
211
-		return err
212
-	}
213
-	return nil
214
-}
215
-
216
-// RestoreAssets restores an asset under the given directory recursively
217
-func RestoreAssets(dir, name string) error {
218
-	children, err := AssetDir(name)
219
-	// File
220
-	if err != nil {
221
-		return RestoreAsset(dir, name)
222
-	}
223
-	// Dir
224
-	for _, child := range children {
225
-		err = RestoreAssets(dir, filepath.Join(name, child))
226
-		if err != nil {
227
-			return err
228
-		}
229
-	}
230
-	return nil
231
-}
232
-
233
-func _filePath(dir, name string) string {
234
-	cannonicalName := strings.Replace(name, "\\", "/", -1)
235
-	return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
236
-}
237
-
238 1
deleted file mode 100644
... ...
@@ -1,113 +0,0 @@
1
-package schema
2
-
3
-//go:generate go-bindata -pkg schema data
4
-
5
-import (
6
-	"fmt"
7
-	"strings"
8
-	"time"
9
-
10
-	"github.com/xeipuuv/gojsonschema"
11
-)
12
-
13
-type portsFormatChecker struct{}
14
-
15
-func (checker portsFormatChecker) IsFormat(input string) bool {
16
-	// TODO: implement this
17
-	return true
18
-}
19
-
20
-type durationFormatChecker struct{}
21
-
22
-func (checker durationFormatChecker) IsFormat(input string) bool {
23
-	_, err := time.ParseDuration(input)
24
-	return err == nil
25
-}
26
-
27
-func init() {
28
-	gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
29
-	gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
30
-	gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
31
-}
32
-
33
-// Validate uses the jsonschema to validate the configuration
34
-func Validate(config map[string]interface{}) error {
35
-	schemaData, err := Asset("data/config_schema_v3.0.json")
36
-	if err != nil {
37
-		return err
38
-	}
39
-
40
-	schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
41
-	dataLoader := gojsonschema.NewGoLoader(config)
42
-
43
-	result, err := gojsonschema.Validate(schemaLoader, dataLoader)
44
-	if err != nil {
45
-		return err
46
-	}
47
-
48
-	if !result.Valid() {
49
-		return toError(result)
50
-	}
51
-
52
-	return nil
53
-}
54
-
55
-func toError(result *gojsonschema.Result) error {
56
-	err := getMostSpecificError(result.Errors())
57
-	description := getDescription(err)
58
-	return fmt.Errorf("%s %s", err.Field(), description)
59
-}
60
-
61
-func getDescription(err gojsonschema.ResultError) string {
62
-	if err.Type() == "invalid_type" {
63
-		if expectedType, ok := err.Details()["expected"].(string); ok {
64
-			return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
65
-		}
66
-	}
67
-
68
-	return err.Description()
69
-}
70
-
71
-func humanReadableType(definition string) string {
72
-	if definition[0:1] == "[" {
73
-		allTypes := strings.Split(definition[1:len(definition)-1], ",")
74
-		for i, t := range allTypes {
75
-			allTypes[i] = humanReadableType(t)
76
-		}
77
-		return fmt.Sprintf(
78
-			"%s or %s",
79
-			strings.Join(allTypes[0:len(allTypes)-1], ", "),
80
-			allTypes[len(allTypes)-1],
81
-		)
82
-	}
83
-	if definition == "object" {
84
-		return "mapping"
85
-	}
86
-	if definition == "array" {
87
-		return "list"
88
-	}
89
-	return definition
90
-}
91
-
92
-func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError {
93
-	var mostSpecificError gojsonschema.ResultError
94
-
95
-	for _, err := range errors {
96
-		if mostSpecificError == nil {
97
-			mostSpecificError = err
98
-		} else if specificity(err) > specificity(mostSpecificError) {
99
-			mostSpecificError = err
100
-		} else if specificity(err) == specificity(mostSpecificError) {
101
-			// Invalid type errors win in a tie-breaker for most specific field name
102
-			if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" {
103
-				mostSpecificError = err
104
-			}
105
-		}
106
-	}
107
-
108
-	return mostSpecificError
109
-}
110
-
111
-func specificity(err gojsonschema.ResultError) int {
112
-	return len(strings.Split(err.Field(), "."))
113
-}
114 1
deleted file mode 100644
... ...
@@ -1,108 +0,0 @@
1
-package template
2
-
3
-import (
4
-	"fmt"
5
-	"regexp"
6
-	"strings"
7
-)
8
-
9
-var delimiter = "\\$"
10
-var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?"
11
-
12
-var patternString = fmt.Sprintf(
13
-	"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
14
-	delimiter, delimiter, substitution, substitution,
15
-)
16
-
17
-var pattern = regexp.MustCompile(patternString)
18
-
19
-type InvalidTemplateError struct {
20
-	Template string
21
-}
22
-
23
-func (e InvalidTemplateError) Error() string {
24
-	return fmt.Sprintf("Invalid template: %#v", e.Template)
25
-}
26
-
27
-// A user-supplied function which maps from variable names to values.
28
-// Returns the value as a string and a bool indicating whether
29
-// the value is present, to distinguish between an empty string
30
-// and the absence of a value.
31
-type Mapping func(string) (string, bool)
32
-
33
-func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) {
34
-	defer func() {
35
-		if r := recover(); r != nil {
36
-			if e, ok := r.(*InvalidTemplateError); ok {
37
-				err = e
38
-			} else {
39
-				panic(r)
40
-			}
41
-		}
42
-	}()
43
-
44
-	result = pattern.ReplaceAllStringFunc(template, func(substring string) string {
45
-		matches := pattern.FindStringSubmatch(substring)
46
-		groups := make(map[string]string)
47
-		for i, name := range pattern.SubexpNames() {
48
-			if i != 0 {
49
-				groups[name] = matches[i]
50
-			}
51
-		}
52
-
53
-		substitution := groups["named"]
54
-		if substitution == "" {
55
-			substitution = groups["braced"]
56
-		}
57
-		if substitution != "" {
58
-			// Soft default (fall back if unset or empty)
59
-			if strings.Contains(substitution, ":-") {
60
-				name, defaultValue := partition(substitution, ":-")
61
-				value, ok := mapping(name)
62
-				if !ok || value == "" {
63
-					return defaultValue
64
-				}
65
-				return value
66
-			}
67
-
68
-			// Hard default (fall back if-and-only-if empty)
69
-			if strings.Contains(substitution, "-") {
70
-				name, defaultValue := partition(substitution, "-")
71
-				value, ok := mapping(name)
72
-				if !ok {
73
-					return defaultValue
74
-				}
75
-				return value
76
-			}
77
-
78
-			// No default (fall back to empty string)
79
-			value, ok := mapping(substitution)
80
-			if !ok {
81
-				return ""
82
-			}
83
-			return value
84
-		}
85
-
86
-		if escaped := groups["escaped"]; escaped != "" {
87
-			return escaped
88
-		}
89
-
90
-		panic(&InvalidTemplateError{Template: template})
91
-		return ""
92
-	})
93
-
94
-	return
95
-}
96
-
97
-// Split the string at the first occurrence of sep, and return the part before the separator,
98
-// and the part after the separator.
99
-//
100
-// If the separator is not found, return the string itself, followed by an empty string.
101
-func partition(s, sep string) (string, string) {
102
-	if strings.Contains(s, sep) {
103
-		parts := strings.SplitN(s, sep, 2)
104
-		return parts[0], parts[1]
105
-	} else {
106
-		return s, ""
107
-	}
108
-}
109 1
deleted file mode 100644
... ...
@@ -1,207 +0,0 @@
1
-package types
2
-
3
-import (
4
-	"time"
5
-)
6
-
7
-var UnsupportedProperties = []string{
8
-	"build",
9
-	"cap_add",
10
-	"cap_drop",
11
-	"cgroup_parent",
12
-	"devices",
13
-	"dns",
14
-	"dns_search",
15
-	"domainname",
16
-	"external_links",
17
-	"ipc",
18
-	"links",
19
-	"mac_address",
20
-	"network_mode",
21
-	"privileged",
22
-	"read_only",
23
-	"restart",
24
-	"security_opt",
25
-	"shm_size",
26
-	"stop_signal",
27
-	"tmpfs",
28
-}
29
-
30
-var DeprecatedProperties = map[string]string{
31
-	"container_name": "Setting the container name is not supported.",
32
-	"expose":         "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.",
33
-}
34
-
35
-var ForbiddenProperties = map[string]string{
36
-	"extends":       "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.",
37
-	"volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.",
38
-	"volumes_from":  "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.",
39
-	"cpu_quota":     "Set resource limits using deploy.resources",
40
-	"cpu_shares":    "Set resource limits using deploy.resources",
41
-	"cpuset":        "Set resource limits using deploy.resources",
42
-	"mem_limit":     "Set resource limits using deploy.resources",
43
-	"memswap_limit": "Set resource limits using deploy.resources",
44
-}
45
-
46
-type Dict map[string]interface{}
47
-
48
-type ConfigFile struct {
49
-	Filename string
50
-	Config   Dict
51
-}
52
-
53
-type ConfigDetails struct {
54
-	WorkingDir  string
55
-	ConfigFiles []ConfigFile
56
-	Environment map[string]string
57
-}
58
-
59
-type Config struct {
60
-	Services []ServiceConfig
61
-	Networks map[string]NetworkConfig
62
-	Volumes  map[string]VolumeConfig
63
-}
64
-
65
-type ServiceConfig struct {
66
-	Name string
67
-
68
-	CapAdd          []string `mapstructure:"cap_add"`
69
-	CapDrop         []string `mapstructure:"cap_drop"`
70
-	CgroupParent    string   `mapstructure:"cgroup_parent"`
71
-	Command         []string `compose:"shell_command"`
72
-	ContainerName   string   `mapstructure:"container_name"`
73
-	DependsOn       []string `mapstructure:"depends_on"`
74
-	Deploy          DeployConfig
75
-	Devices         []string
76
-	Dns             []string          `compose:"string_or_list"`
77
-	DnsSearch       []string          `mapstructure:"dns_search" compose:"string_or_list"`
78
-	DomainName      string            `mapstructure:"domainname"`
79
-	Entrypoint      []string          `compose:"shell_command"`
80
-	Environment     map[string]string `compose:"list_or_dict_equals"`
81
-	Expose          []string          `compose:"list_of_strings_or_numbers"`
82
-	ExternalLinks   []string          `mapstructure:"external_links"`
83
-	ExtraHosts      map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"`
84
-	Hostname        string
85
-	HealthCheck     *HealthCheckConfig
86
-	Image           string
87
-	Ipc             string
88
-	Labels          map[string]string `compose:"list_or_dict_equals"`
89
-	Links           []string
90
-	Logging         *LoggingConfig
91
-	MacAddress      string                           `mapstructure:"mac_address"`
92
-	NetworkMode     string                           `mapstructure:"network_mode"`
93
-	Networks        map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"`
94
-	Pid             string
95
-	Ports           []string `compose:"list_of_strings_or_numbers"`
96
-	Privileged      bool
97
-	ReadOnly        bool `mapstructure:"read_only"`
98
-	Restart         string
99
-	SecurityOpt     []string       `mapstructure:"security_opt"`
100
-	StdinOpen       bool           `mapstructure:"stdin_open"`
101
-	StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"`
102
-	StopSignal      string         `mapstructure:"stop_signal"`
103
-	Tmpfs           []string       `compose:"string_or_list"`
104
-	Tty             bool           `mapstructure:"tty"`
105
-	Ulimits         map[string]*UlimitsConfig
106
-	User            string
107
-	Volumes         []string
108
-	WorkingDir      string `mapstructure:"working_dir"`
109
-}
110
-
111
-type LoggingConfig struct {
112
-	Driver  string
113
-	Options map[string]string
114
-}
115
-
116
-type DeployConfig struct {
117
-	Mode          string
118
-	Replicas      *uint64
119
-	Labels        map[string]string `compose:"list_or_dict_equals"`
120
-	UpdateConfig  *UpdateConfig     `mapstructure:"update_config"`
121
-	Resources     Resources
122
-	RestartPolicy *RestartPolicy `mapstructure:"restart_policy"`
123
-	Placement     Placement
124
-}
125
-
126
-type HealthCheckConfig struct {
127
-	Test     []string `compose:"healthcheck"`
128
-	Timeout  string
129
-	Interval string
130
-	Retries  *uint64
131
-	Disable  bool
132
-}
133
-
134
-type UpdateConfig struct {
135
-	Parallelism     *uint64
136
-	Delay           time.Duration
137
-	FailureAction   string `mapstructure:"failure_action"`
138
-	Monitor         time.Duration
139
-	MaxFailureRatio float32 `mapstructure:"max_failure_ratio"`
140
-}
141
-
142
-type Resources struct {
143
-	Limits       *Resource
144
-	Reservations *Resource
145
-}
146
-
147
-type Resource struct {
148
-	// TODO: types to convert from units and ratios
149
-	NanoCPUs    string    `mapstructure:"cpus"`
150
-	MemoryBytes UnitBytes `mapstructure:"memory"`
151
-}
152
-
153
-type UnitBytes int64
154
-
155
-type RestartPolicy struct {
156
-	Condition   string
157
-	Delay       *time.Duration
158
-	MaxAttempts *uint64 `mapstructure:"max_attempts"`
159
-	Window      *time.Duration
160
-}
161
-
162
-type Placement struct {
163
-	Constraints []string
164
-}
165
-
166
-type ServiceNetworkConfig struct {
167
-	Aliases     []string
168
-	Ipv4Address string `mapstructure:"ipv4_address"`
169
-	Ipv6Address string `mapstructure:"ipv6_address"`
170
-}
171
-
172
-type UlimitsConfig struct {
173
-	Single int
174
-	Soft   int
175
-	Hard   int
176
-}
177
-
178
-type NetworkConfig struct {
179
-	Driver     string
180
-	DriverOpts map[string]string `mapstructure:"driver_opts"`
181
-	Ipam       IPAMConfig
182
-	External   External
183
-	Labels     map[string]string `compose:"list_or_dict_equals"`
184
-}
185
-
186
-type IPAMConfig struct {
187
-	Driver string
188
-	Config []*IPAMPool
189
-}
190
-
191
-type IPAMPool struct {
192
-	Subnet string
193
-}
194
-
195
-type VolumeConfig struct {
196
-	Driver     string
197
-	DriverOpts map[string]string `mapstructure:"driver_opts"`
198
-	External   External
199
-	Labels     map[string]string `compose:"list_or_dict_equals"`
200
-}
201
-
202
-// External identifies a Volume or Network as a reference to a resource that is
203
-// not managed, and should already exist.
204
-type External struct {
205
-	Name     string
206
-	External bool
207
-}
208 1
new file mode 100644
... ...
@@ -0,0 +1,27 @@
0
+Copyright (c) 2013, Patrick Mezard
1
+All rights reserved.
2
+
3
+Redistribution and use in source and binary forms, with or without
4
+modification, are permitted provided that the following conditions are
5
+met:
6
+
7
+    Redistributions of source code must retain the above copyright
8
+notice, this list of conditions and the following disclaimer.
9
+    Redistributions in binary form must reproduce the above copyright
10
+notice, this list of conditions and the following disclaimer in the
11
+documentation and/or other materials provided with the distribution.
12
+    The names of its contributors may not be used to endorse or promote
13
+products derived from this software without specific prior written
14
+permission.
15
+
16
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
17
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
19
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0 27
new file mode 100644
... ...
@@ -0,0 +1,772 @@
0
+// Package difflib is a partial port of Python difflib module.
1
+//
2
+// It provides tools to compare sequences of strings and generate textual diffs.
3
+//
4
+// The following class and functions have been ported:
5
+//
6
+// - SequenceMatcher
7
+//
8
+// - unified_diff
9
+//
10
+// - context_diff
11
+//
12
+// Getting unified diffs was the main goal of the port. Keep in mind this code
13
+// is mostly suitable to output text differences in a human friendly way, there
14
+// are no guarantees generated diffs are consumable by patch(1).
15
+package difflib
16
+
17
+import (
18
+	"bufio"
19
+	"bytes"
20
+	"fmt"
21
+	"io"
22
+	"strings"
23
+)
24
+
25
+func min(a, b int) int {
26
+	if a < b {
27
+		return a
28
+	}
29
+	return b
30
+}
31
+
32
+func max(a, b int) int {
33
+	if a > b {
34
+		return a
35
+	}
36
+	return b
37
+}
38
+
39
+func calculateRatio(matches, length int) float64 {
40
+	if length > 0 {
41
+		return 2.0 * float64(matches) / float64(length)
42
+	}
43
+	return 1.0
44
+}
45
+
46
+type Match struct {
47
+	A    int
48
+	B    int
49
+	Size int
50
+}
51
+
52
+type OpCode struct {
53
+	Tag byte
54
+	I1  int
55
+	I2  int
56
+	J1  int
57
+	J2  int
58
+}
59
+
60
+// SequenceMatcher compares sequence of strings. The basic
61
+// algorithm predates, and is a little fancier than, an algorithm
62
+// published in the late 1980's by Ratcliff and Obershelp under the
63
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
64
+// the longest contiguous matching subsequence that contains no "junk"
65
+// elements (R-O doesn't address junk).  The same idea is then applied
66
+// recursively to the pieces of the sequences to the left and to the right
67
+// of the matching subsequence.  This does not yield minimal edit
68
+// sequences, but does tend to yield matches that "look right" to people.
69
+//
70
+// SequenceMatcher tries to compute a "human-friendly diff" between two
71
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
72
+// longest *contiguous* & junk-free matching subsequence.  That's what
73
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
74
+// notion, pairing up elements that appear uniquely in each sequence.
75
+// That, and the method here, appear to yield more intuitive difference
76
+// reports than does diff.  This method appears to be the least vulnerable
77
+// to synching up on blocks of "junk lines", though (like blank lines in
78
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
79
+// because this is the only method of the 3 that has a *concept* of
80
+// "junk" <wink>.
81
+//
82
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
83
+// case.  SequenceMatcher is quadratic time for the worst case and has
84
+// expected-case behavior dependent in a complicated way on how many
85
+// elements the sequences have in common; best case time is linear.
86
+type SequenceMatcher struct {
87
+	a              []string
88
+	b              []string
89
+	b2j            map[string][]int
90
+	IsJunk         func(string) bool
91
+	autoJunk       bool
92
+	bJunk          map[string]struct{}
93
+	matchingBlocks []Match
94
+	fullBCount     map[string]int
95
+	bPopular       map[string]struct{}
96
+	opCodes        []OpCode
97
+}
98
+
99
+func NewMatcher(a, b []string) *SequenceMatcher {
100
+	m := SequenceMatcher{autoJunk: true}
101
+	m.SetSeqs(a, b)
102
+	return &m
103
+}
104
+
105
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
106
+	isJunk func(string) bool) *SequenceMatcher {
107
+
108
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
109
+	m.SetSeqs(a, b)
110
+	return &m
111
+}
112
+
113
+// Set two sequences to be compared.
114
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
115
+	m.SetSeq1(a)
116
+	m.SetSeq2(b)
117
+}
118
+
119
+// Set the first sequence to be compared. The second sequence to be compared is
120
+// not changed.
121
+//
122
+// SequenceMatcher computes and caches detailed information about the second
123
+// sequence, so if you want to compare one sequence S against many sequences,
124
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
125
+// sequences.
126
+//
127
+// See also SetSeqs() and SetSeq2().
128
+func (m *SequenceMatcher) SetSeq1(a []string) {
129
+	if &a == &m.a {
130
+		return
131
+	}
132
+	m.a = a
133
+	m.matchingBlocks = nil
134
+	m.opCodes = nil
135
+}
136
+
137
+// Set the second sequence to be compared. The first sequence to be compared is
138
+// not changed.
139
+func (m *SequenceMatcher) SetSeq2(b []string) {
140
+	if &b == &m.b {
141
+		return
142
+	}
143
+	m.b = b
144
+	m.matchingBlocks = nil
145
+	m.opCodes = nil
146
+	m.fullBCount = nil
147
+	m.chainB()
148
+}
149
+
150
+func (m *SequenceMatcher) chainB() {
151
+	// Populate line -> index mapping
152
+	b2j := map[string][]int{}
153
+	for i, s := range m.b {
154
+		indices := b2j[s]
155
+		indices = append(indices, i)
156
+		b2j[s] = indices
157
+	}
158
+
159
+	// Purge junk elements
160
+	m.bJunk = map[string]struct{}{}
161
+	if m.IsJunk != nil {
162
+		junk := m.bJunk
163
+		for s, _ := range b2j {
164
+			if m.IsJunk(s) {
165
+				junk[s] = struct{}{}
166
+			}
167
+		}
168
+		for s, _ := range junk {
169
+			delete(b2j, s)
170
+		}
171
+	}
172
+
173
+	// Purge remaining popular elements
174
+	popular := map[string]struct{}{}
175
+	n := len(m.b)
176
+	if m.autoJunk && n >= 200 {
177
+		ntest := n/100 + 1
178
+		for s, indices := range b2j {
179
+			if len(indices) > ntest {
180
+				popular[s] = struct{}{}
181
+			}
182
+		}
183
+		for s, _ := range popular {
184
+			delete(b2j, s)
185
+		}
186
+	}
187
+	m.bPopular = popular
188
+	m.b2j = b2j
189
+}
190
+
191
+func (m *SequenceMatcher) isBJunk(s string) bool {
192
+	_, ok := m.bJunk[s]
193
+	return ok
194
+}
195
+
196
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
197
+//
198
+// If IsJunk is not defined:
199
+//
200
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
201
+//     alo <= i <= i+k <= ahi
202
+//     blo <= j <= j+k <= bhi
203
+// and for all (i',j',k') meeting those conditions,
204
+//     k >= k'
205
+//     i <= i'
206
+//     and if i == i', j <= j'
207
+//
208
+// In other words, of all maximal matching blocks, return one that
209
+// starts earliest in a, and of all those maximal matching blocks that
210
+// start earliest in a, return the one that starts earliest in b.
211
+//
212
+// If IsJunk is defined, first the longest matching block is
213
+// determined as above, but with the additional restriction that no
214
+// junk element appears in the block.  Then that block is extended as
215
+// far as possible by matching (only) junk elements on both sides.  So
216
+// the resulting block never matches on junk except as identical junk
217
+// happens to be adjacent to an "interesting" match.
218
+//
219
+// If no blocks match, return (alo, blo, 0).
220
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
221
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
222
+	// E.g.,
223
+	//    ab
224
+	//    acab
225
+	// Longest matching block is "ab", but if common prefix is
226
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
227
+	// strip, so ends up claiming that ab is changed to acab by
228
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
229
+	// "it's obvious" that someone inserted "ac" at the front.
230
+	// Windiff ends up at the same place as diff, but by pairing up
231
+	// the unique 'b's and then matching the first two 'a's.
232
+	besti, bestj, bestsize := alo, blo, 0
233
+
234
+	// find longest junk-free match
235
+	// during an iteration of the loop, j2len[j] = length of longest
236
+	// junk-free match ending with a[i-1] and b[j]
237
+	j2len := map[int]int{}
238
+	for i := alo; i != ahi; i++ {
239
+		// look at all instances of a[i] in b; note that because
240
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
241
+		newj2len := map[int]int{}
242
+		for _, j := range m.b2j[m.a[i]] {
243
+			// a[i] matches b[j]
244
+			if j < blo {
245
+				continue
246
+			}
247
+			if j >= bhi {
248
+				break
249
+			}
250
+			k := j2len[j-1] + 1
251
+			newj2len[j] = k
252
+			if k > bestsize {
253
+				besti, bestj, bestsize = i-k+1, j-k+1, k
254
+			}
255
+		}
256
+		j2len = newj2len
257
+	}
258
+
259
+	// Extend the best by non-junk elements on each end.  In particular,
260
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
261
+	// the inner loop above, but also means "the best" match so far
262
+	// doesn't contain any junk *or* popular non-junk elements.
263
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
264
+		m.a[besti-1] == m.b[bestj-1] {
265
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
266
+	}
267
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
268
+		!m.isBJunk(m.b[bestj+bestsize]) &&
269
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
270
+		bestsize += 1
271
+	}
272
+
273
+	// Now that we have a wholly interesting match (albeit possibly
274
+	// empty!), we may as well suck up the matching junk on each
275
+	// side of it too.  Can't think of a good reason not to, and it
276
+	// saves post-processing the (possibly considerable) expense of
277
+	// figuring out what to do with it.  In the case of an empty
278
+	// interesting match, this is clearly the right thing to do,
279
+	// because no other kind of match is possible in the regions.
280
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
281
+		m.a[besti-1] == m.b[bestj-1] {
282
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
283
+	}
284
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
285
+		m.isBJunk(m.b[bestj+bestsize]) &&
286
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
287
+		bestsize += 1
288
+	}
289
+
290
+	return Match{A: besti, B: bestj, Size: bestsize}
291
+}
292
+
293
+// Return list of triples describing matching subsequences.
294
+//
295
+// Each triple is of the form (i, j, n), and means that
296
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
297
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
298
+// adjacent triples in the list, and the second is not the last triple in the
299
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
300
+// adjacent equal blocks.
301
+//
302
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
303
+// triple with n==0.
304
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
305
+	if m.matchingBlocks != nil {
306
+		return m.matchingBlocks
307
+	}
308
+
309
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
310
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
311
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
312
+		i, j, k := match.A, match.B, match.Size
313
+		if match.Size > 0 {
314
+			if alo < i && blo < j {
315
+				matched = matchBlocks(alo, i, blo, j, matched)
316
+			}
317
+			matched = append(matched, match)
318
+			if i+k < ahi && j+k < bhi {
319
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
320
+			}
321
+		}
322
+		return matched
323
+	}
324
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
325
+
326
+	// It's possible that we have adjacent equal blocks in the
327
+	// matching_blocks list now.
328
+	nonAdjacent := []Match{}
329
+	i1, j1, k1 := 0, 0, 0
330
+	for _, b := range matched {
331
+		// Is this block adjacent to i1, j1, k1?
332
+		i2, j2, k2 := b.A, b.B, b.Size
333
+		if i1+k1 == i2 && j1+k1 == j2 {
334
+			// Yes, so collapse them -- this just increases the length of
335
+			// the first block by the length of the second, and the first
336
+			// block so lengthened remains the block to compare against.
337
+			k1 += k2
338
+		} else {
339
+			// Not adjacent.  Remember the first block (k1==0 means it's
340
+			// the dummy we started with), and make the second block the
341
+			// new block to compare against.
342
+			if k1 > 0 {
343
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
344
+			}
345
+			i1, j1, k1 = i2, j2, k2
346
+		}
347
+	}
348
+	if k1 > 0 {
349
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
350
+	}
351
+
352
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
353
+	m.matchingBlocks = nonAdjacent
354
+	return m.matchingBlocks
355
+}
356
+
357
+// Return list of 5-tuples describing how to turn a into b.
358
+//
359
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
360
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
361
+// tuple preceding it, and likewise for j1 == the previous j2.
362
+//
363
+// The tags are characters, with these meanings:
364
+//
365
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
366
+//
367
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
368
+//
369
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
370
+//
371
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
372
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
373
+	if m.opCodes != nil {
374
+		return m.opCodes
375
+	}
376
+	i, j := 0, 0
377
+	matching := m.GetMatchingBlocks()
378
+	opCodes := make([]OpCode, 0, len(matching))
379
+	for _, m := range matching {
380
+		//  invariant:  we've pumped out correct diffs to change
381
+		//  a[:i] into b[:j], and the next matching block is
382
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
383
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
384
+		//  the matching block, and move (i,j) beyond the match
385
+		ai, bj, size := m.A, m.B, m.Size
386
+		tag := byte(0)
387
+		if i < ai && j < bj {
388
+			tag = 'r'
389
+		} else if i < ai {
390
+			tag = 'd'
391
+		} else if j < bj {
392
+			tag = 'i'
393
+		}
394
+		if tag > 0 {
395
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
396
+		}
397
+		i, j = ai+size, bj+size
398
+		// the list of matching blocks is terminated by a
399
+		// sentinel with size 0
400
+		if size > 0 {
401
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
402
+		}
403
+	}
404
+	m.opCodes = opCodes
405
+	return m.opCodes
406
+}
407
+
408
+// Isolate change clusters by eliminating ranges with no changes.
409
+//
410
+// Return a generator of groups with up to n lines of context.
411
+// Each group is in the same format as returned by GetOpCodes().
412
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
413
+	if n < 0 {
414
+		n = 3
415
+	}
416
+	codes := m.GetOpCodes()
417
+	if len(codes) == 0 {
418
+		codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
419
+	}
420
+	// Fixup leading and trailing groups if they show no changes.
421
+	if codes[0].Tag == 'e' {
422
+		c := codes[0]
423
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
424
+		codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
425
+	}
426
+	if codes[len(codes)-1].Tag == 'e' {
427
+		c := codes[len(codes)-1]
428
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
429
+		codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
430
+	}
431
+	nn := n + n
432
+	groups := [][]OpCode{}
433
+	group := []OpCode{}
434
+	for _, c := range codes {
435
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
436
+		// End the current group and start a new one whenever
437
+		// there is a large range with no changes.
438
+		if c.Tag == 'e' && i2-i1 > nn {
439
+			group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
440
+				j1, min(j2, j1+n)})
441
+			groups = append(groups, group)
442
+			group = []OpCode{}
443
+			i1, j1 = max(i1, i2-n), max(j1, j2-n)
444
+		}
445
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
446
+	}
447
+	if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
448
+		groups = append(groups, group)
449
+	}
450
+	return groups
451
+}
452
+
453
+// Return a measure of the sequences' similarity (float in [0,1]).
454
+//
455
+// Where T is the total number of elements in both sequences, and
456
+// M is the number of matches, this is 2.0*M / T.
457
+// Note that this is 1 if the sequences are identical, and 0 if
458
+// they have nothing in common.
459
+//
460
+// .Ratio() is expensive to compute if you haven't already computed
461
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
462
+// want to try .QuickRatio() or .RealQuickRation() first to get an
463
+// upper bound.
464
+func (m *SequenceMatcher) Ratio() float64 {
465
+	matches := 0
466
+	for _, m := range m.GetMatchingBlocks() {
467
+		matches += m.Size
468
+	}
469
+	return calculateRatio(matches, len(m.a)+len(m.b))
470
+}
471
+
472
+// Return an upper bound on ratio() relatively quickly.
473
+//
474
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
475
+// is faster to compute.
476
+func (m *SequenceMatcher) QuickRatio() float64 {
477
+	// viewing a and b as multisets, set matches to the cardinality
478
+	// of their intersection; this counts the number of matches
479
+	// without regard to order, so is clearly an upper bound
480
+	if m.fullBCount == nil {
481
+		m.fullBCount = map[string]int{}
482
+		for _, s := range m.b {
483
+			m.fullBCount[s] = m.fullBCount[s] + 1
484
+		}
485
+	}
486
+
487
+	// avail[x] is the number of times x appears in 'b' less the
488
+	// number of times we've seen it in 'a' so far ... kinda
489
+	avail := map[string]int{}
490
+	matches := 0
491
+	for _, s := range m.a {
492
+		n, ok := avail[s]
493
+		if !ok {
494
+			n = m.fullBCount[s]
495
+		}
496
+		avail[s] = n - 1
497
+		if n > 0 {
498
+			matches += 1
499
+		}
500
+	}
501
+	return calculateRatio(matches, len(m.a)+len(m.b))
502
+}
503
+
504
+// Return an upper bound on ratio() very quickly.
505
+//
506
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
507
+// is faster to compute than either .Ratio() or .QuickRatio().
508
+func (m *SequenceMatcher) RealQuickRatio() float64 {
509
+	la, lb := len(m.a), len(m.b)
510
+	return calculateRatio(min(la, lb), la+lb)
511
+}
512
+
513
+// Convert range to the "ed" format
514
+func formatRangeUnified(start, stop int) string {
515
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
516
+	beginning := start + 1 // lines start numbering with one
517
+	length := stop - start
518
+	if length == 1 {
519
+		return fmt.Sprintf("%d", beginning)
520
+	}
521
+	if length == 0 {
522
+		beginning -= 1 // empty ranges begin at line just before the range
523
+	}
524
+	return fmt.Sprintf("%d,%d", beginning, length)
525
+}
526
+
527
+// Unified diff parameters
528
+type UnifiedDiff struct {
529
+	A        []string // First sequence lines
530
+	FromFile string   // First file name
531
+	FromDate string   // First file time
532
+	B        []string // Second sequence lines
533
+	ToFile   string   // Second file name
534
+	ToDate   string   // Second file time
535
+	Eol      string   // Headers end of line, defaults to LF
536
+	Context  int      // Number of context lines
537
+}
538
+
539
+// Compare two sequences of lines; generate the delta as a unified diff.
540
+//
541
+// Unified diffs are a compact way of showing line changes and a few
542
+// lines of context.  The number of context lines is set by 'n' which
543
+// defaults to three.
544
+//
545
+// By default, the diff control lines (those with ---, +++, or @@) are
546
+// created with a trailing newline.  This is helpful so that inputs
547
+// created from file.readlines() result in diffs that are suitable for
548
+// file.writelines() since both the inputs and outputs have trailing
549
+// newlines.
550
+//
551
+// For inputs that do not have trailing newlines, set the lineterm
552
+// argument to "" so that the output will be uniformly newline free.
553
+//
554
+// The unidiff format normally has a header for filenames and modification
555
+// times.  Any or all of these may be specified using strings for
556
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
557
+// The modification times are normally expressed in the ISO 8601 format.
558
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
559
+	buf := bufio.NewWriter(writer)
560
+	defer buf.Flush()
561
+	wf := func(format string, args ...interface{}) error {
562
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
563
+		return err
564
+	}
565
+	ws := func(s string) error {
566
+		_, err := buf.WriteString(s)
567
+		return err
568
+	}
569
+
570
+	if len(diff.Eol) == 0 {
571
+		diff.Eol = "\n"
572
+	}
573
+
574
+	started := false
575
+	m := NewMatcher(diff.A, diff.B)
576
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
577
+		if !started {
578
+			started = true
579
+			fromDate := ""
580
+			if len(diff.FromDate) > 0 {
581
+				fromDate = "\t" + diff.FromDate
582
+			}
583
+			toDate := ""
584
+			if len(diff.ToDate) > 0 {
585
+				toDate = "\t" + diff.ToDate
586
+			}
587
+			if diff.FromFile != "" || diff.ToFile != "" {
588
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
589
+				if err != nil {
590
+					return err
591
+				}
592
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
593
+				if err != nil {
594
+					return err
595
+				}
596
+			}
597
+		}
598
+		first, last := g[0], g[len(g)-1]
599
+		range1 := formatRangeUnified(first.I1, last.I2)
600
+		range2 := formatRangeUnified(first.J1, last.J2)
601
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
602
+			return err
603
+		}
604
+		for _, c := range g {
605
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
606
+			if c.Tag == 'e' {
607
+				for _, line := range diff.A[i1:i2] {
608
+					if err := ws(" " + line); err != nil {
609
+						return err
610
+					}
611
+				}
612
+				continue
613
+			}
614
+			if c.Tag == 'r' || c.Tag == 'd' {
615
+				for _, line := range diff.A[i1:i2] {
616
+					if err := ws("-" + line); err != nil {
617
+						return err
618
+					}
619
+				}
620
+			}
621
+			if c.Tag == 'r' || c.Tag == 'i' {
622
+				for _, line := range diff.B[j1:j2] {
623
+					if err := ws("+" + line); err != nil {
624
+						return err
625
+					}
626
+				}
627
+			}
628
+		}
629
+	}
630
+	return nil
631
+}
632
+
633
+// Like WriteUnifiedDiff but returns the diff a string.
634
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
635
+	w := &bytes.Buffer{}
636
+	err := WriteUnifiedDiff(w, diff)
637
+	return string(w.Bytes()), err
638
+}
639
+
640
+// Convert range to the "ed" format.
641
+func formatRangeContext(start, stop int) string {
642
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
643
+	beginning := start + 1 // lines start numbering with one
644
+	length := stop - start
645
+	if length == 0 {
646
+		beginning -= 1 // empty ranges begin at line just before the range
647
+	}
648
+	if length <= 1 {
649
+		return fmt.Sprintf("%d", beginning)
650
+	}
651
+	return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
652
+}
653
+
654
+type ContextDiff UnifiedDiff
655
+
656
+// Compare two sequences of lines; generate the delta as a context diff.
657
+//
658
+// Context diffs are a compact way of showing line changes and a few
659
+// lines of context. The number of context lines is set by diff.Context
660
+// which defaults to three.
661
+//
662
+// By default, the diff control lines (those with *** or ---) are
663
+// created with a trailing newline.
664
+//
665
+// For inputs that do not have trailing newlines, set the diff.Eol
666
+// argument to "" so that the output will be uniformly newline free.
667
+//
668
+// The context diff format normally has a header for filenames and
669
+// modification times.  Any or all of these may be specified using
670
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
671
+// The modification times are normally expressed in the ISO 8601 format.
672
+// If not specified, the strings default to blanks.
673
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
674
+	buf := bufio.NewWriter(writer)
675
+	defer buf.Flush()
676
+	var diffErr error
677
+	wf := func(format string, args ...interface{}) {
678
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
679
+		if diffErr == nil && err != nil {
680
+			diffErr = err
681
+		}
682
+	}
683
+	ws := func(s string) {
684
+		_, err := buf.WriteString(s)
685
+		if diffErr == nil && err != nil {
686
+			diffErr = err
687
+		}
688
+	}
689
+
690
+	if len(diff.Eol) == 0 {
691
+		diff.Eol = "\n"
692
+	}
693
+
694
+	prefix := map[byte]string{
695
+		'i': "+ ",
696
+		'd': "- ",
697
+		'r': "! ",
698
+		'e': "  ",
699
+	}
700
+
701
+	started := false
702
+	m := NewMatcher(diff.A, diff.B)
703
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
704
+		if !started {
705
+			started = true
706
+			fromDate := ""
707
+			if len(diff.FromDate) > 0 {
708
+				fromDate = "\t" + diff.FromDate
709
+			}
710
+			toDate := ""
711
+			if len(diff.ToDate) > 0 {
712
+				toDate = "\t" + diff.ToDate
713
+			}
714
+			if diff.FromFile != "" || diff.ToFile != "" {
715
+				wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
716
+				wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
717
+			}
718
+		}
719
+
720
+		first, last := g[0], g[len(g)-1]
721
+		ws("***************" + diff.Eol)
722
+
723
+		range1 := formatRangeContext(first.I1, last.I2)
724
+		wf("*** %s ****%s", range1, diff.Eol)
725
+		for _, c := range g {
726
+			if c.Tag == 'r' || c.Tag == 'd' {
727
+				for _, cc := range g {
728
+					if cc.Tag == 'i' {
729
+						continue
730
+					}
731
+					for _, line := range diff.A[cc.I1:cc.I2] {
732
+						ws(prefix[cc.Tag] + line)
733
+					}
734
+				}
735
+				break
736
+			}
737
+		}
738
+
739
+		range2 := formatRangeContext(first.J1, last.J2)
740
+		wf("--- %s ----%s", range2, diff.Eol)
741
+		for _, c := range g {
742
+			if c.Tag == 'r' || c.Tag == 'i' {
743
+				for _, cc := range g {
744
+					if cc.Tag == 'd' {
745
+						continue
746
+					}
747
+					for _, line := range diff.B[cc.J1:cc.J2] {
748
+						ws(prefix[cc.Tag] + line)
749
+					}
750
+				}
751
+				break
752
+			}
753
+		}
754
+	}
755
+	return diffErr
756
+}
757
+
758
+// Like WriteContextDiff but returns the diff a string.
759
+func GetContextDiffString(diff ContextDiff) (string, error) {
760
+	w := &bytes.Buffer{}
761
+	err := WriteContextDiff(w, diff)
762
+	return string(w.Bytes()), err
763
+}
764
+
765
+// Split a string on "\n" while preserving them. The output can be used
766
+// as input for UnifiedDiff and ContextDiff structures.
767
+func SplitLines(s string) []string {
768
+	lines := strings.SplitAfter(s, "\n")
769
+	lines[len(lines)-1] += "\n"
770
+	return lines
771
+}
0 772
new file mode 100644
... ...
@@ -0,0 +1,22 @@
0
+Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
1
+
2
+Please consider promoting this project if you find it useful.
3
+
4
+Permission is hereby granted, free of charge, to any person 
5
+obtaining a copy of this software and associated documentation 
6
+files (the "Software"), to deal in the Software without restriction, 
7
+including without limitation the rights to use, copy, modify, merge, 
8
+publish, distribute, sublicense, and/or sell copies of the Software, 
9
+and to permit persons to whom the Software is furnished to do so, 
10
+subject to the following conditions:
11
+
12
+The above copyright notice and this permission notice shall be included
13
+in all copies or substantial portions of the Software.
14
+
15
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
16
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
17
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
18
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 
19
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 
20
+OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
21
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0 22
new file mode 100644
... ...
@@ -0,0 +1,387 @@
0
+/*
1
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
2
+* THIS FILE MUST NOT BE EDITED BY HAND
3
+*/
4
+
5
+package assert
6
+
7
+import (
8
+
9
+	http "net/http"
10
+	url "net/url"
11
+	time "time"
12
+)
13
+
14
+
15
+// Condition uses a Comparison to assert a complex condition.
16
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
17
+	return Condition(a.t, comp, msgAndArgs...)
18
+}
19
+
20
+
21
+// Contains asserts that the specified string, list(array, slice...) or map contains the
22
+// specified substring or element.
23
+// 
24
+//    a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
25
+//    a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
26
+//    a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
27
+// 
28
+// Returns whether the assertion was successful (true) or not (false).
29
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
30
+	return Contains(a.t, s, contains, msgAndArgs...)
31
+}
32
+
33
+
34
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
35
+// a slice or a channel with len == 0.
36
+// 
37
+//  a.Empty(obj)
38
+// 
39
+// Returns whether the assertion was successful (true) or not (false).
40
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
41
+	return Empty(a.t, object, msgAndArgs...)
42
+}
43
+
44
+
45
+// Equal asserts that two objects are equal.
46
+// 
47
+//    a.Equal(123, 123, "123 and 123 should be equal")
48
+// 
49
+// Returns whether the assertion was successful (true) or not (false).
50
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
51
+	return Equal(a.t, expected, actual, msgAndArgs...)
52
+}
53
+
54
+
55
+// EqualError asserts that a function returned an error (i.e. not `nil`)
56
+// and that it is equal to the provided error.
57
+// 
58
+//   actualObj, err := SomeFunction()
59
+//   if assert.Error(t, err, "An error was expected") {
60
+// 	   assert.Equal(t, err, expectedError)
61
+//   }
62
+// 
63
+// Returns whether the assertion was successful (true) or not (false).
64
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
65
+	return EqualError(a.t, theError, errString, msgAndArgs...)
66
+}
67
+
68
+
69
+// EqualValues asserts that two objects are equal or convertable to the same types
70
+// and equal.
71
+// 
72
+//    a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
73
+// 
74
+// Returns whether the assertion was successful (true) or not (false).
75
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
76
+	return EqualValues(a.t, expected, actual, msgAndArgs...)
77
+}
78
+
79
+
80
+// Error asserts that a function returned an error (i.e. not `nil`).
81
+// 
82
+//   actualObj, err := SomeFunction()
83
+//   if a.Error(err, "An error was expected") {
84
+// 	   assert.Equal(t, err, expectedError)
85
+//   }
86
+// 
87
+// Returns whether the assertion was successful (true) or not (false).
88
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
89
+	return Error(a.t, err, msgAndArgs...)
90
+}
91
+
92
+
93
+// Exactly asserts that two objects are equal is value and type.
94
+// 
95
+//    a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
96
+// 
97
+// Returns whether the assertion was successful (true) or not (false).
98
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
99
+	return Exactly(a.t, expected, actual, msgAndArgs...)
100
+}
101
+
102
+
103
+// Fail reports a failure through
104
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
105
+	return Fail(a.t, failureMessage, msgAndArgs...)
106
+}
107
+
108
+
109
+// FailNow fails test
110
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
111
+	return FailNow(a.t, failureMessage, msgAndArgs...)
112
+}
113
+
114
+
115
+// False asserts that the specified value is false.
116
+// 
117
+//    a.False(myBool, "myBool should be false")
118
+// 
119
+// Returns whether the assertion was successful (true) or not (false).
120
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
121
+	return False(a.t, value, msgAndArgs...)
122
+}
123
+
124
+
125
+// HTTPBodyContains asserts that a specified handler returns a
126
+// body that contains a string.
127
+// 
128
+//  a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
129
+// 
130
+// Returns whether the assertion was successful (true) or not (false).
131
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
132
+	return HTTPBodyContains(a.t, handler, method, url, values, str)
133
+}
134
+
135
+
136
+// HTTPBodyNotContains asserts that a specified handler returns a
137
+// body that does not contain a string.
138
+// 
139
+//  a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
140
+// 
141
+// Returns whether the assertion was successful (true) or not (false).
142
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
143
+	return HTTPBodyNotContains(a.t, handler, method, url, values, str)
144
+}
145
+
146
+
147
+// HTTPError asserts that a specified handler returns an error status code.
148
+// 
149
+//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
150
+// 
151
+// Returns whether the assertion was successful (true) or not (false).
152
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {
153
+	return HTTPError(a.t, handler, method, url, values)
154
+}
155
+
156
+
157
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
158
+// 
159
+//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
160
+// 
161
+// Returns whether the assertion was successful (true) or not (false).
162
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {
163
+	return HTTPRedirect(a.t, handler, method, url, values)
164
+}
165
+
166
+
167
+// HTTPSuccess asserts that a specified handler returns a success status code.
168
+// 
169
+//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
170
+// 
171
+// Returns whether the assertion was successful (true) or not (false).
172
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {
173
+	return HTTPSuccess(a.t, handler, method, url, values)
174
+}
175
+
176
+
177
+// Implements asserts that an object is implemented by the specified interface.
178
+// 
179
+//    a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
180
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
181
+	return Implements(a.t, interfaceObject, object, msgAndArgs...)
182
+}
183
+
184
+
185
+// InDelta asserts that the two numerals are within delta of each other.
186
+// 
187
+// 	 a.InDelta(math.Pi, (22 / 7.0), 0.01)
188
+// 
189
+// Returns whether the assertion was successful (true) or not (false).
190
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
191
+	return InDelta(a.t, expected, actual, delta, msgAndArgs...)
192
+}
193
+
194
+
195
+// InDeltaSlice is the same as InDelta, except it compares two slices.
196
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
197
+	return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
198
+}
199
+
200
+
201
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
202
+// 
203
+// Returns whether the assertion was successful (true) or not (false).
204
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
205
+	return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
206
+}
207
+
208
+
209
+// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
210
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
211
+	return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
212
+}
213
+
214
+
215
+// IsType asserts that the specified objects are of the same type.
216
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
217
+	return IsType(a.t, expectedType, object, msgAndArgs...)
218
+}
219
+
220
+
221
+// JSONEq asserts that two JSON strings are equivalent.
222
+// 
223
+//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
224
+// 
225
+// Returns whether the assertion was successful (true) or not (false).
226
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
227
+	return JSONEq(a.t, expected, actual, msgAndArgs...)
228
+}
229
+
230
+
231
+// Len asserts that the specified object has specific length.
232
+// Len also fails if the object has a type that len() not accept.
233
+// 
234
+//    a.Len(mySlice, 3, "The size of slice is not 3")
235
+// 
236
+// Returns whether the assertion was successful (true) or not (false).
237
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
238
+	return Len(a.t, object, length, msgAndArgs...)
239
+}
240
+
241
+
242
+// Nil asserts that the specified object is nil.
243
+// 
244
+//    a.Nil(err, "err should be nothing")
245
+// 
246
+// Returns whether the assertion was successful (true) or not (false).
247
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
248
+	return Nil(a.t, object, msgAndArgs...)
249
+}
250
+
251
+
252
+// NoError asserts that a function returned no error (i.e. `nil`).
253
+// 
254
+//   actualObj, err := SomeFunction()
255
+//   if a.NoError(err) {
256
+// 	   assert.Equal(t, actualObj, expectedObj)
257
+//   }
258
+// 
259
+// Returns whether the assertion was successful (true) or not (false).
260
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
261
+	return NoError(a.t, err, msgAndArgs...)
262
+}
263
+
264
+
265
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
266
+// specified substring or element.
267
+// 
268
+//    a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
269
+//    a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
270
+//    a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
271
+// 
272
+// Returns whether the assertion was successful (true) or not (false).
273
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
274
+	return NotContains(a.t, s, contains, msgAndArgs...)
275
+}
276
+
277
+
278
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
279
+// a slice or a channel with len == 0.
280
+// 
281
+//  if a.NotEmpty(obj) {
282
+//    assert.Equal(t, "two", obj[1])
283
+//  }
284
+// 
285
+// Returns whether the assertion was successful (true) or not (false).
286
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
287
+	return NotEmpty(a.t, object, msgAndArgs...)
288
+}
289
+
290
+
291
+// NotEqual asserts that the specified values are NOT equal.
292
+// 
293
+//    a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
294
+// 
295
+// Returns whether the assertion was successful (true) or not (false).
296
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
297
+	return NotEqual(a.t, expected, actual, msgAndArgs...)
298
+}
299
+
300
+
301
+// NotNil asserts that the specified object is not nil.
302
+// 
303
+//    a.NotNil(err, "err should be something")
304
+// 
305
+// Returns whether the assertion was successful (true) or not (false).
306
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
307
+	return NotNil(a.t, object, msgAndArgs...)
308
+}
309
+
310
+
311
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
312
+// 
313
+//   a.NotPanics(func(){
314
+//     RemainCalm()
315
+//   }, "Calling RemainCalm() should NOT panic")
316
+// 
317
+// Returns whether the assertion was successful (true) or not (false).
318
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
319
+	return NotPanics(a.t, f, msgAndArgs...)
320
+}
321
+
322
+
323
+// NotRegexp asserts that a specified regexp does not match a string.
324
+// 
325
+//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
326
+//  a.NotRegexp("^start", "it's not starting")
327
+// 
328
+// Returns whether the assertion was successful (true) or not (false).
329
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
330
+	return NotRegexp(a.t, rx, str, msgAndArgs...)
331
+}
332
+
333
+
334
+// NotZero asserts that i is not the zero value for its type and returns the truth.
335
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
336
+	return NotZero(a.t, i, msgAndArgs...)
337
+}
338
+
339
+
340
+// Panics asserts that the code inside the specified PanicTestFunc panics.
341
+// 
342
+//   a.Panics(func(){
343
+//     GoCrazy()
344
+//   }, "Calling GoCrazy() should panic")
345
+// 
346
+// Returns whether the assertion was successful (true) or not (false).
347
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
348
+	return Panics(a.t, f, msgAndArgs...)
349
+}
350
+
351
+
352
+// Regexp asserts that a specified regexp matches a string.
353
+// 
354
+//  a.Regexp(regexp.MustCompile("start"), "it's starting")
355
+//  a.Regexp("start...$", "it's not starting")
356
+// 
357
+// Returns whether the assertion was successful (true) or not (false).
358
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
359
+	return Regexp(a.t, rx, str, msgAndArgs...)
360
+}
361
+
362
+
363
+// True asserts that the specified value is true.
364
+// 
365
+//    a.True(myBool, "myBool should be true")
366
+// 
367
+// Returns whether the assertion was successful (true) or not (false).
368
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
369
+	return True(a.t, value, msgAndArgs...)
370
+}
371
+
372
+
373
+// WithinDuration asserts that the two times are within duration delta of each other.
374
+// 
375
+//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
376
+// 
377
+// Returns whether the assertion was successful (true) or not (false).
378
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
379
+	return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
380
+}
381
+
382
+
383
+// Zero asserts that i is the zero value for its type and returns the truth.
384
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
385
+	return Zero(a.t, i, msgAndArgs...)
386
+}
0 387
new file mode 100644
... ...
@@ -0,0 +1,1004 @@
0
+package assert
1
+
2
+import (
3
+	"bufio"
4
+	"bytes"
5
+	"encoding/json"
6
+	"fmt"
7
+	"math"
8
+	"reflect"
9
+	"regexp"
10
+	"runtime"
11
+	"strings"
12
+	"time"
13
+	"unicode"
14
+	"unicode/utf8"
15
+
16
+	"github.com/davecgh/go-spew/spew"
17
+	"github.com/pmezard/go-difflib/difflib"
18
+)
19
+
20
+// TestingT is an interface wrapper around *testing.T
21
+type TestingT interface {
22
+	Errorf(format string, args ...interface{})
23
+}
24
+
25
+// Comparison a custom function that returns true on success and false on failure
26
+type Comparison func() (success bool)
27
+
28
+/*
29
+	Helper functions
30
+*/
31
+
32
+// ObjectsAreEqual determines if two objects are considered equal.
33
+//
34
+// This function does no assertion of any kind.
35
+func ObjectsAreEqual(expected, actual interface{}) bool {
36
+
37
+	if expected == nil || actual == nil {
38
+		return expected == actual
39
+	}
40
+
41
+	return reflect.DeepEqual(expected, actual)
42
+
43
+}
44
+
45
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
46
+// values are equal.
47
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
48
+	if ObjectsAreEqual(expected, actual) {
49
+		return true
50
+	}
51
+
52
+	actualType := reflect.TypeOf(actual)
53
+	if actualType == nil {
54
+		return false
55
+	}
56
+	expectedValue := reflect.ValueOf(expected)
57
+	if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
58
+		// Attempt comparison after type conversion
59
+		return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
60
+	}
61
+
62
+	return false
63
+}
64
+
65
+/* CallerInfo is necessary because the assert functions use the testing object
66
+internally, causing it to print the file:line of the assert method, rather than where
67
+the problem actually occured in calling code.*/
68
+
69
+// CallerInfo returns an array of strings containing the file and line number
70
+// of each stack frame leading from the current test to the assert call that
71
+// failed.
72
+func CallerInfo() []string {
73
+
74
+	pc := uintptr(0)
75
+	file := ""
76
+	line := 0
77
+	ok := false
78
+	name := ""
79
+
80
+	callers := []string{}
81
+	for i := 0; ; i++ {
82
+		pc, file, line, ok = runtime.Caller(i)
83
+		if !ok {
84
+			return nil
85
+		}
86
+
87
+		// This is a huge edge case, but it will panic if this is the case, see #180
88
+		if file == "<autogenerated>" {
89
+			break
90
+		}
91
+
92
+		parts := strings.Split(file, "/")
93
+		dir := parts[len(parts)-2]
94
+		file = parts[len(parts)-1]
95
+		if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
96
+			callers = append(callers, fmt.Sprintf("%s:%d", file, line))
97
+		}
98
+
99
+		f := runtime.FuncForPC(pc)
100
+		if f == nil {
101
+			break
102
+		}
103
+		name = f.Name()
104
+		// Drop the package
105
+		segments := strings.Split(name, ".")
106
+		name = segments[len(segments)-1]
107
+		if isTest(name, "Test") ||
108
+			isTest(name, "Benchmark") ||
109
+			isTest(name, "Example") {
110
+			break
111
+		}
112
+	}
113
+
114
+	return callers
115
+}
116
+
117
+// Stolen from the `go test` tool.
118
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
119
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
120
+// We don't want TesticularCancer.
121
+func isTest(name, prefix string) bool {
122
+	if !strings.HasPrefix(name, prefix) {
123
+		return false
124
+	}
125
+	if len(name) == len(prefix) { // "Test" is ok
126
+		return true
127
+	}
128
+	rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
129
+	return !unicode.IsLower(rune)
130
+}
131
+
132
+// getWhitespaceString returns a string that is long enough to overwrite the default
133
+// output from the go testing framework.
134
+func getWhitespaceString() string {
135
+
136
+	_, file, line, ok := runtime.Caller(1)
137
+	if !ok {
138
+		return ""
139
+	}
140
+	parts := strings.Split(file, "/")
141
+	file = parts[len(parts)-1]
142
+
143
+	return strings.Repeat(" ", len(fmt.Sprintf("%s:%d:      ", file, line)))
144
+
145
+}
146
+
147
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
148
+	if len(msgAndArgs) == 0 || msgAndArgs == nil {
149
+		return ""
150
+	}
151
+	if len(msgAndArgs) == 1 {
152
+		return msgAndArgs[0].(string)
153
+	}
154
+	if len(msgAndArgs) > 1 {
155
+		return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
156
+	}
157
+	return ""
158
+}
159
+
160
+// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's
161
+// test printing (see inner comment for specifics)
162
+func indentMessageLines(message string, tabs int) string {
163
+	outBuf := new(bytes.Buffer)
164
+
165
+	for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
166
+		if i != 0 {
167
+			outBuf.WriteRune('\n')
168
+		}
169
+		for ii := 0; ii < tabs; ii++ {
170
+			outBuf.WriteRune('\t')
171
+			// Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter
172
+			// by 1 prematurely.
173
+			if ii == 0 && i > 0 {
174
+				ii++
175
+			}
176
+		}
177
+		outBuf.WriteString(scanner.Text())
178
+	}
179
+
180
+	return outBuf.String()
181
+}
182
+
183
+type failNower interface {
184
+	FailNow()
185
+}
186
+
187
+// FailNow fails test
188
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
189
+	Fail(t, failureMessage, msgAndArgs...)
190
+
191
+	// We cannot extend TestingT with FailNow() and
192
+	// maintain backwards compatibility, so we fallback
193
+	// to panicking when FailNow is not available in
194
+	// TestingT.
195
+	// See issue #263
196
+
197
+	if t, ok := t.(failNower); ok {
198
+		t.FailNow()
199
+	} else {
200
+		panic("test failed and t is missing `FailNow()`")
201
+	}
202
+	return false
203
+}
204
+
205
+// Fail reports a failure through
206
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
207
+
208
+	message := messageFromMsgAndArgs(msgAndArgs...)
209
+
210
+	errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t")
211
+	if len(message) > 0 {
212
+		t.Errorf("\r%s\r\tError Trace:\t%s\n"+
213
+			"\r\tError:%s\n"+
214
+			"\r\tMessages:\t%s\n\r",
215
+			getWhitespaceString(),
216
+			errorTrace,
217
+			indentMessageLines(failureMessage, 2),
218
+			message)
219
+	} else {
220
+		t.Errorf("\r%s\r\tError Trace:\t%s\n"+
221
+			"\r\tError:%s\n\r",
222
+			getWhitespaceString(),
223
+			errorTrace,
224
+			indentMessageLines(failureMessage, 2))
225
+	}
226
+
227
+	return false
228
+}
229
+
230
+// Implements asserts that an object is implemented by the specified interface.
231
+//
232
+//    assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
233
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
234
+
235
+	interfaceType := reflect.TypeOf(interfaceObject).Elem()
236
+
237
+	if !reflect.TypeOf(object).Implements(interfaceType) {
238
+		return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
239
+	}
240
+
241
+	return true
242
+
243
+}
244
+
245
+// IsType asserts that the specified objects are of the same type.
246
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
247
+
248
+	if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
249
+		return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
250
+	}
251
+
252
+	return true
253
+}
254
+
255
+// Equal asserts that two objects are equal.
256
+//
257
+//    assert.Equal(t, 123, 123, "123 and 123 should be equal")
258
+//
259
+// Returns whether the assertion was successful (true) or not (false).
260
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
261
+
262
+	if !ObjectsAreEqual(expected, actual) {
263
+		diff := diff(expected, actual)
264
+		return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
265
+			"        != %#v (actual)%s", expected, actual, diff), msgAndArgs...)
266
+	}
267
+
268
+	return true
269
+
270
+}
271
+
272
+// EqualValues asserts that two objects are equal or convertable to the same types
273
+// and equal.
274
+//
275
+//    assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
276
+//
277
+// Returns whether the assertion was successful (true) or not (false).
278
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
279
+
280
+	if !ObjectsAreEqualValues(expected, actual) {
281
+		return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
282
+			"        != %#v (actual)", expected, actual), msgAndArgs...)
283
+	}
284
+
285
+	return true
286
+
287
+}
288
+
289
+// Exactly asserts that two objects are equal is value and type.
290
+//
291
+//    assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
292
+//
293
+// Returns whether the assertion was successful (true) or not (false).
294
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
295
+
296
+	aType := reflect.TypeOf(expected)
297
+	bType := reflect.TypeOf(actual)
298
+
299
+	if aType != bType {
300
+		return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...)
301
+	}
302
+
303
+	return Equal(t, expected, actual, msgAndArgs...)
304
+
305
+}
306
+
307
+// NotNil asserts that the specified object is not nil.
308
+//
309
+//    assert.NotNil(t, err, "err should be something")
310
+//
311
+// Returns whether the assertion was successful (true) or not (false).
312
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
313
+	if !isNil(object) {
314
+		return true
315
+	}
316
+	return Fail(t, "Expected value not to be nil.", msgAndArgs...)
317
+}
318
+
319
+// isNil checks if a specified object is nil or not, without Failing.
320
+func isNil(object interface{}) bool {
321
+	if object == nil {
322
+		return true
323
+	}
324
+
325
+	value := reflect.ValueOf(object)
326
+	kind := value.Kind()
327
+	if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
328
+		return true
329
+	}
330
+
331
+	return false
332
+}
333
+
334
+// Nil asserts that the specified object is nil.
335
+//
336
+//    assert.Nil(t, err, "err should be nothing")
337
+//
338
+// Returns whether the assertion was successful (true) or not (false).
339
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
340
+	if isNil(object) {
341
+		return true
342
+	}
343
+	return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
344
+}
345
+
346
+var numericZeros = []interface{}{
347
+	int(0),
348
+	int8(0),
349
+	int16(0),
350
+	int32(0),
351
+	int64(0),
352
+	uint(0),
353
+	uint8(0),
354
+	uint16(0),
355
+	uint32(0),
356
+	uint64(0),
357
+	float32(0),
358
+	float64(0),
359
+}
360
+
361
+// isEmpty gets whether the specified object is considered empty or not.
362
+func isEmpty(object interface{}) bool {
363
+
364
+	if object == nil {
365
+		return true
366
+	} else if object == "" {
367
+		return true
368
+	} else if object == false {
369
+		return true
370
+	}
371
+
372
+	for _, v := range numericZeros {
373
+		if object == v {
374
+			return true
375
+		}
376
+	}
377
+
378
+	objValue := reflect.ValueOf(object)
379
+
380
+	switch objValue.Kind() {
381
+	case reflect.Map:
382
+		fallthrough
383
+	case reflect.Slice, reflect.Chan:
384
+		{
385
+			return (objValue.Len() == 0)
386
+		}
387
+	case reflect.Struct:
388
+		switch object.(type) {
389
+		case time.Time:
390
+			return object.(time.Time).IsZero()
391
+		}
392
+	case reflect.Ptr:
393
+		{
394
+			if objValue.IsNil() {
395
+				return true
396
+			}
397
+			switch object.(type) {
398
+			case *time.Time:
399
+				return object.(*time.Time).IsZero()
400
+			default:
401
+				return false
402
+			}
403
+		}
404
+	}
405
+	return false
406
+}
407
+
408
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
409
+// a slice or a channel with len == 0.
410
+//
411
+//  assert.Empty(t, obj)
412
+//
413
+// Returns whether the assertion was successful (true) or not (false).
414
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
415
+
416
+	pass := isEmpty(object)
417
+	if !pass {
418
+		Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
419
+	}
420
+
421
+	return pass
422
+
423
+}
424
+
425
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
426
+// a slice or a channel with len == 0.
427
+//
428
+//  if assert.NotEmpty(t, obj) {
429
+//    assert.Equal(t, "two", obj[1])
430
+//  }
431
+//
432
+// Returns whether the assertion was successful (true) or not (false).
433
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
434
+
435
+	pass := !isEmpty(object)
436
+	if !pass {
437
+		Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
438
+	}
439
+
440
+	return pass
441
+
442
+}
443
+
444
+// getLen try to get length of object.
445
+// return (false, 0) if impossible.
446
+func getLen(x interface{}) (ok bool, length int) {
447
+	v := reflect.ValueOf(x)
448
+	defer func() {
449
+		if e := recover(); e != nil {
450
+			ok = false
451
+		}
452
+	}()
453
+	return true, v.Len()
454
+}
455
+
456
+// Len asserts that the specified object has specific length.
457
+// Len also fails if the object has a type that len() not accept.
458
+//
459
+//    assert.Len(t, mySlice, 3, "The size of slice is not 3")
460
+//
461
+// Returns whether the assertion was successful (true) or not (false).
462
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
463
+	ok, l := getLen(object)
464
+	if !ok {
465
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
466
+	}
467
+
468
+	if l != length {
469
+		return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
470
+	}
471
+	return true
472
+}
473
+
474
+// True asserts that the specified value is true.
475
+//
476
+//    assert.True(t, myBool, "myBool should be true")
477
+//
478
+// Returns whether the assertion was successful (true) or not (false).
479
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
480
+
481
+	if value != true {
482
+		return Fail(t, "Should be true", msgAndArgs...)
483
+	}
484
+
485
+	return true
486
+
487
+}
488
+
489
+// False asserts that the specified value is false.
490
+//
491
+//    assert.False(t, myBool, "myBool should be false")
492
+//
493
+// Returns whether the assertion was successful (true) or not (false).
494
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
495
+
496
+	if value != false {
497
+		return Fail(t, "Should be false", msgAndArgs...)
498
+	}
499
+
500
+	return true
501
+
502
+}
503
+
504
+// NotEqual asserts that the specified values are NOT equal.
505
+//
506
+//    assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
507
+//
508
+// Returns whether the assertion was successful (true) or not (false).
509
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
510
+
511
+	if ObjectsAreEqual(expected, actual) {
512
+		return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
513
+	}
514
+
515
+	return true
516
+
517
+}
518
+
519
+// containsElement try loop over the list check if the list includes the element.
520
+// return (false, false) if impossible.
521
+// return (true, false) if element was not found.
522
+// return (true, true) if element was found.
523
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
524
+
525
+	listValue := reflect.ValueOf(list)
526
+	elementValue := reflect.ValueOf(element)
527
+	defer func() {
528
+		if e := recover(); e != nil {
529
+			ok = false
530
+			found = false
531
+		}
532
+	}()
533
+
534
+	if reflect.TypeOf(list).Kind() == reflect.String {
535
+		return true, strings.Contains(listValue.String(), elementValue.String())
536
+	}
537
+
538
+	if reflect.TypeOf(list).Kind() == reflect.Map {
539
+		mapKeys := listValue.MapKeys()
540
+		for i := 0; i < len(mapKeys); i++ {
541
+			if ObjectsAreEqual(mapKeys[i].Interface(), element) {
542
+				return true, true
543
+			}
544
+		}
545
+		return true, false
546
+	}
547
+
548
+	for i := 0; i < listValue.Len(); i++ {
549
+		if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
550
+			return true, true
551
+		}
552
+	}
553
+	return true, false
554
+
555
+}
556
+
557
+// Contains asserts that the specified string, list(array, slice...) or map contains the
558
+// specified substring or element.
559
+//
560
+//    assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
561
+//    assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
562
+//    assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
563
+//
564
+// Returns whether the assertion was successful (true) or not (false).
565
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
566
+
567
+	ok, found := includeElement(s, contains)
568
+	if !ok {
569
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
570
+	}
571
+	if !found {
572
+		return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
573
+	}
574
+
575
+	return true
576
+
577
+}
578
+
579
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
580
+// specified substring or element.
581
+//
582
+//    assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
583
+//    assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
584
+//    assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
585
+//
586
+// Returns whether the assertion was successful (true) or not (false).
587
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
588
+
589
+	ok, found := includeElement(s, contains)
590
+	if !ok {
591
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
592
+	}
593
+	if found {
594
+		return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
595
+	}
596
+
597
+	return true
598
+
599
+}
600
+
601
+// Condition uses a Comparison to assert a complex condition.
602
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
603
+	result := comp()
604
+	if !result {
605
+		Fail(t, "Condition failed!", msgAndArgs...)
606
+	}
607
+	return result
608
+}
609
+
610
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
611
+// methods, and represents a simple func that takes no arguments, and returns nothing.
612
+type PanicTestFunc func()
613
+
614
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
615
+func didPanic(f PanicTestFunc) (bool, interface{}) {
616
+
617
+	didPanic := false
618
+	var message interface{}
619
+	func() {
620
+
621
+		defer func() {
622
+			if message = recover(); message != nil {
623
+				didPanic = true
624
+			}
625
+		}()
626
+
627
+		// call the target function
628
+		f()
629
+
630
+	}()
631
+
632
+	return didPanic, message
633
+
634
+}
635
+
636
+// Panics asserts that the code inside the specified PanicTestFunc panics.
637
+//
638
+//   assert.Panics(t, func(){
639
+//     GoCrazy()
640
+//   }, "Calling GoCrazy() should panic")
641
+//
642
+// Returns whether the assertion was successful (true) or not (false).
643
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
644
+
645
+	if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
646
+		return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
647
+	}
648
+
649
+	return true
650
+}
651
+
652
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
653
+//
654
+//   assert.NotPanics(t, func(){
655
+//     RemainCalm()
656
+//   }, "Calling RemainCalm() should NOT panic")
657
+//
658
+// Returns whether the assertion was successful (true) or not (false).
659
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
660
+
661
+	if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
662
+		return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
663
+	}
664
+
665
+	return true
666
+}
667
+
668
+// WithinDuration asserts that the two times are within duration delta of each other.
669
+//
670
+//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
671
+//
672
+// Returns whether the assertion was successful (true) or not (false).
673
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
674
+
675
+	dt := expected.Sub(actual)
676
+	if dt < -delta || dt > delta {
677
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
678
+	}
679
+
680
+	return true
681
+}
682
+
683
+func toFloat(x interface{}) (float64, bool) {
684
+	var xf float64
685
+	xok := true
686
+
687
+	switch xn := x.(type) {
688
+	case uint8:
689
+		xf = float64(xn)
690
+	case uint16:
691
+		xf = float64(xn)
692
+	case uint32:
693
+		xf = float64(xn)
694
+	case uint64:
695
+		xf = float64(xn)
696
+	case int:
697
+		xf = float64(xn)
698
+	case int8:
699
+		xf = float64(xn)
700
+	case int16:
701
+		xf = float64(xn)
702
+	case int32:
703
+		xf = float64(xn)
704
+	case int64:
705
+		xf = float64(xn)
706
+	case float32:
707
+		xf = float64(xn)
708
+	case float64:
709
+		xf = float64(xn)
710
+	default:
711
+		xok = false
712
+	}
713
+
714
+	return xf, xok
715
+}
716
+
717
+// InDelta asserts that the two numerals are within delta of each other.
718
+//
719
+// 	 assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
720
+//
721
+// Returns whether the assertion was successful (true) or not (false).
722
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
723
+
724
+	af, aok := toFloat(expected)
725
+	bf, bok := toFloat(actual)
726
+
727
+	if !aok || !bok {
728
+		return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
729
+	}
730
+
731
+	if math.IsNaN(af) {
732
+		return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...)
733
+	}
734
+
735
+	if math.IsNaN(bf) {
736
+		return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
737
+	}
738
+
739
+	dt := af - bf
740
+	if dt < -delta || dt > delta {
741
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
742
+	}
743
+
744
+	return true
745
+}
746
+
747
+// InDeltaSlice is the same as InDelta, except it compares two slices.
748
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
749
+	if expected == nil || actual == nil ||
750
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
751
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
752
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
753
+	}
754
+
755
+	actualSlice := reflect.ValueOf(actual)
756
+	expectedSlice := reflect.ValueOf(expected)
757
+
758
+	for i := 0; i < actualSlice.Len(); i++ {
759
+		result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta)
760
+		if !result {
761
+			return result
762
+		}
763
+	}
764
+
765
+	return true
766
+}
767
+
768
+func calcRelativeError(expected, actual interface{}) (float64, error) {
769
+	af, aok := toFloat(expected)
770
+	if !aok {
771
+		return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
772
+	}
773
+	if af == 0 {
774
+		return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
775
+	}
776
+	bf, bok := toFloat(actual)
777
+	if !bok {
778
+		return 0, fmt.Errorf("expected value %q cannot be converted to float", actual)
779
+	}
780
+
781
+	return math.Abs(af-bf) / math.Abs(af), nil
782
+}
783
+
784
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
785
+//
786
+// Returns whether the assertion was successful (true) or not (false).
787
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
788
+	actualEpsilon, err := calcRelativeError(expected, actual)
789
+	if err != nil {
790
+		return Fail(t, err.Error(), msgAndArgs...)
791
+	}
792
+	if actualEpsilon > epsilon {
793
+		return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
794
+			"        < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...)
795
+	}
796
+
797
+	return true
798
+}
799
+
800
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
801
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
802
+	if expected == nil || actual == nil ||
803
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
804
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
805
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
806
+	}
807
+
808
+	actualSlice := reflect.ValueOf(actual)
809
+	expectedSlice := reflect.ValueOf(expected)
810
+
811
+	for i := 0; i < actualSlice.Len(); i++ {
812
+		result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
813
+		if !result {
814
+			return result
815
+		}
816
+	}
817
+
818
+	return true
819
+}
820
+
821
+/*
822
+	Errors
823
+*/
824
+
825
+// NoError asserts that a function returned no error (i.e. `nil`).
826
+//
827
+//   actualObj, err := SomeFunction()
828
+//   if assert.NoError(t, err) {
829
+//	   assert.Equal(t, actualObj, expectedObj)
830
+//   }
831
+//
832
+// Returns whether the assertion was successful (true) or not (false).
833
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
834
+	if isNil(err) {
835
+		return true
836
+	}
837
+
838
+	return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...)
839
+}
840
+
841
+// Error asserts that a function returned an error (i.e. not `nil`).
842
+//
843
+//   actualObj, err := SomeFunction()
844
+//   if assert.Error(t, err, "An error was expected") {
845
+//	   assert.Equal(t, err, expectedError)
846
+//   }
847
+//
848
+// Returns whether the assertion was successful (true) or not (false).
849
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
850
+
851
+	message := messageFromMsgAndArgs(msgAndArgs...)
852
+	return NotNil(t, err, "An error is expected but got nil. %s", message)
853
+
854
+}
855
+
856
+// EqualError asserts that a function returned an error (i.e. not `nil`)
857
+// and that it is equal to the provided error.
858
+//
859
+//   actualObj, err := SomeFunction()
860
+//   if assert.Error(t, err, "An error was expected") {
861
+//	   assert.Equal(t, err, expectedError)
862
+//   }
863
+//
864
+// Returns whether the assertion was successful (true) or not (false).
865
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
866
+
867
+	message := messageFromMsgAndArgs(msgAndArgs...)
868
+	if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
869
+		return false
870
+	}
871
+	s := "An error with value \"%s\" is expected but got \"%s\". %s"
872
+	return Equal(t, errString, theError.Error(),
873
+		s, errString, theError.Error(), message)
874
+}
875
+
876
+// matchRegexp return true if a specified regexp matches a string.
877
+func matchRegexp(rx interface{}, str interface{}) bool {
878
+
879
+	var r *regexp.Regexp
880
+	if rr, ok := rx.(*regexp.Regexp); ok {
881
+		r = rr
882
+	} else {
883
+		r = regexp.MustCompile(fmt.Sprint(rx))
884
+	}
885
+
886
+	return (r.FindStringIndex(fmt.Sprint(str)) != nil)
887
+
888
+}
889
+
890
+// Regexp asserts that a specified regexp matches a string.
891
+//
892
+//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
893
+//  assert.Regexp(t, "start...$", "it's not starting")
894
+//
895
+// Returns whether the assertion was successful (true) or not (false).
896
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
897
+
898
+	match := matchRegexp(rx, str)
899
+
900
+	if !match {
901
+		Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
902
+	}
903
+
904
+	return match
905
+}
906
+
907
+// NotRegexp asserts that a specified regexp does not match a string.
908
+//
909
+//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
910
+//  assert.NotRegexp(t, "^start", "it's not starting")
911
+//
912
+// Returns whether the assertion was successful (true) or not (false).
913
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
914
+	match := matchRegexp(rx, str)
915
+
916
+	if match {
917
+		Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
918
+	}
919
+
920
+	return !match
921
+
922
+}
923
+
924
+// Zero asserts that i is the zero value for its type and returns the truth.
925
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
926
+	if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
927
+		return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
928
+	}
929
+	return true
930
+}
931
+
932
+// NotZero asserts that i is not the zero value for its type and returns the truth.
933
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
934
+	if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
935
+		return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
936
+	}
937
+	return true
938
+}
939
+
940
+// JSONEq asserts that two JSON strings are equivalent.
941
+//
942
+//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
943
+//
944
+// Returns whether the assertion was successful (true) or not (false).
945
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
946
+	var expectedJSONAsInterface, actualJSONAsInterface interface{}
947
+
948
+	if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
949
+		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
950
+	}
951
+
952
+	if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
953
+		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
954
+	}
955
+
956
+	return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
957
+}
958
+
959
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
960
+	t := reflect.TypeOf(v)
961
+	k := t.Kind()
962
+
963
+	if k == reflect.Ptr {
964
+		t = t.Elem()
965
+		k = t.Kind()
966
+	}
967
+	return t, k
968
+}
969
+
970
+// diff returns a diff of both values as long as both are of the same type and
971
+// are a struct, map, slice or array. Otherwise it returns an empty string.
972
+func diff(expected interface{}, actual interface{}) string {
973
+	if expected == nil || actual == nil {
974
+		return ""
975
+	}
976
+
977
+	et, ek := typeAndKind(expected)
978
+	at, _ := typeAndKind(actual)
979
+
980
+	if et != at {
981
+		return ""
982
+	}
983
+
984
+	if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array {
985
+		return ""
986
+	}
987
+
988
+	spew.Config.SortKeys = true
989
+	e := spew.Sdump(expected)
990
+	a := spew.Sdump(actual)
991
+
992
+	diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
993
+		A:        difflib.SplitLines(e),
994
+		B:        difflib.SplitLines(a),
995
+		FromFile: "Expected",
996
+		FromDate: "",
997
+		ToFile:   "Actual",
998
+		ToDate:   "",
999
+		Context:  1,
1000
+	})
1001
+
1002
+	return "\n\nDiff:\n" + diff
1003
+}
0 1004
new file mode 100644
... ...
@@ -0,0 +1,45 @@
0
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
1
+//
2
+// Example Usage
3
+//
4
+// The following is a complete example using assert in a standard test function:
5
+//    import (
6
+//      "testing"
7
+//      "github.com/stretchr/testify/assert"
8
+//    )
9
+//
10
+//    func TestSomething(t *testing.T) {
11
+//
12
+//      var a string = "Hello"
13
+//      var b string = "Hello"
14
+//
15
+//      assert.Equal(t, a, b, "The two words should be the same.")
16
+//
17
+//    }
18
+//
19
+// if you assert many times, use the format below:
20
+//
21
+//    import (
22
+//      "testing"
23
+//      "github.com/stretchr/testify/assert"
24
+//    )
25
+//
26
+//    func TestSomething(t *testing.T) {
27
+//      assert := assert.New(t)
28
+//
29
+//      var a string = "Hello"
30
+//      var b string = "Hello"
31
+//
32
+//      assert.Equal(a, b, "The two words should be the same.")
33
+//    }
34
+//
35
+// Assertions
36
+//
37
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
38
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
39
+// testing framework. This allows the assertion funcs to write the failings and other details to
40
+// the correct place.
41
+//
42
+// Every assertion function also takes an optional string message as the final argument,
43
+// allowing custom error messages to be appended to the message the assertion method outputs.
44
+package assert
0 45
new file mode 100644
... ...
@@ -0,0 +1,10 @@
0
+package assert
1
+
2
+import (
3
+	"errors"
4
+)
5
+
6
+// AnError is an error instance useful for testing.  If the code does not care
7
+// about error specifics, and only needs to return the error for example, this
8
+// error should be used to make the test code more readable.
9
+var AnError = errors.New("assert.AnError general error for testing")
0 10
new file mode 100644
... ...
@@ -0,0 +1,16 @@
0
+package assert
1
+
2
+// Assertions provides assertion methods around the
3
+// TestingT interface.
4
+type Assertions struct {
5
+	t TestingT
6
+}
7
+
8
+// New makes a new Assertions object for the specified TestingT.
9
+func New(t TestingT) *Assertions {
10
+	return &Assertions{
11
+		t: t,
12
+	}
13
+}
14
+
15
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl
0 16
new file mode 100644
... ...
@@ -0,0 +1,106 @@
0
+package assert
1
+
2
+import (
3
+	"fmt"
4
+	"net/http"
5
+	"net/http/httptest"
6
+	"net/url"
7
+	"strings"
8
+)
9
+
10
+// httpCode is a helper that returns HTTP code of the response. It returns -1
11
+// if building a new request fails.
12
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int {
13
+	w := httptest.NewRecorder()
14
+	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
15
+	if err != nil {
16
+		return -1
17
+	}
18
+	handler(w, req)
19
+	return w.Code
20
+}
21
+
22
+// HTTPSuccess asserts that a specified handler returns a success status code.
23
+//
24
+//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
25
+//
26
+// Returns whether the assertion was successful (true) or not (false).
27
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
28
+	code := httpCode(handler, method, url, values)
29
+	if code == -1 {
30
+		return false
31
+	}
32
+	return code >= http.StatusOK && code <= http.StatusPartialContent
33
+}
34
+
35
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
36
+//
37
+//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
38
+//
39
+// Returns whether the assertion was successful (true) or not (false).
40
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
41
+	code := httpCode(handler, method, url, values)
42
+	if code == -1 {
43
+		return false
44
+	}
45
+	return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
46
+}
47
+
48
+// HTTPError asserts that a specified handler returns an error status code.
49
+//
50
+//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
51
+//
52
+// Returns whether the assertion was successful (true) or not (false).
53
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
54
+	code := httpCode(handler, method, url, values)
55
+	if code == -1 {
56
+		return false
57
+	}
58
+	return code >= http.StatusBadRequest
59
+}
60
+
61
+// HTTPBody is a helper that returns HTTP body of the response. It returns
62
+// empty string if building a new request fails.
63
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
64
+	w := httptest.NewRecorder()
65
+	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
66
+	if err != nil {
67
+		return ""
68
+	}
69
+	handler(w, req)
70
+	return w.Body.String()
71
+}
72
+
73
+// HTTPBodyContains asserts that a specified handler returns a
74
+// body that contains a string.
75
+//
76
+//  assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
77
+//
78
+// Returns whether the assertion was successful (true) or not (false).
79
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
80
+	body := HTTPBody(handler, method, url, values)
81
+
82
+	contains := strings.Contains(body, fmt.Sprint(str))
83
+	if !contains {
84
+		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
85
+	}
86
+
87
+	return contains
88
+}
89
+
90
+// HTTPBodyNotContains asserts that a specified handler returns a
91
+// body that does not contain a string.
92
+//
93
+//  assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
94
+//
95
+// Returns whether the assertion was successful (true) or not (false).
96
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
97
+	body := HTTPBody(handler, method, url, values)
98
+
99
+	contains := strings.Contains(body, fmt.Sprint(str))
100
+	if contains {
101
+		Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)
102
+	}
103
+
104
+	return !contains
105
+}