Browse code

Notary delegation integration into docker

Signed-off-by: Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>

Riyaz Faizullabhoy authored on 2015/12/19 11:47:35
Showing 51 changed files
... ...
@@ -153,7 +153,7 @@ RUN set -x \
153 153
 	&& rm -rf "$GOPATH"
154 154
 
155 155
 # Install notary server
156
-ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7
156
+ENV NOTARY_COMMIT 30c488b3b4c62fdbc2c1eae7cf3b62ca73f95fad
157 157
 RUN set -x \
158 158
 	&& export GOPATH="$(mktemp -d)" \
159 159
 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
... ...
@@ -11,6 +11,7 @@ import (
11 11
 	"net/http"
12 12
 	"net/url"
13 13
 	"os"
14
+	"path"
14 15
 	"path/filepath"
15 16
 	"regexp"
16 17
 	"sort"
... ...
@@ -35,9 +36,14 @@ import (
35 35
 	"github.com/docker/notary/passphrase"
36 36
 	"github.com/docker/notary/trustmanager"
37 37
 	"github.com/docker/notary/tuf/data"
38
+	"github.com/docker/notary/tuf/signed"
39
+	"github.com/docker/notary/tuf/store"
38 40
 )
39 41
 
40
-var untrusted bool
42
+var (
43
+	releasesRole = path.Join(data.CanonicalTargetsRole, "releases")
44
+	untrusted    bool
45
+)
41 46
 
42 47
 func addTrustedFlags(fs *flag.FlagSet, verify bool) {
43 48
 	var trusted bool
... ...
@@ -238,11 +244,11 @@ func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Can
238 238
 		return nil, err
239 239
 	}
240 240
 
241
-	t, err := notaryRepo.GetTargetByName(ref.Tag())
241
+	t, err := notaryRepo.GetTargetByName(ref.Tag(), releasesRole, data.CanonicalTargetsRole)
242 242
 	if err != nil {
243 243
 		return nil, err
244 244
 	}
245
-	r, err := convertTarget(*t)
245
+	r, err := convertTarget(t.Target)
246 246
 	if err != nil {
247 247
 		return nil, err
248 248
 
... ...
@@ -264,17 +270,27 @@ func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.N
264 264
 	return cli.client.ImageTag(options)
265 265
 }
266 266
 
267
-func notaryError(err error) error {
267
+func notaryError(repoName string, err error) error {
268 268
 	switch err.(type) {
269 269
 	case *json.SyntaxError:
270 270
 		logrus.Debugf("Notary syntax error: %s", err)
271
-		return errors.New("no trust data available for remote repository")
271
+		return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName)
272 272
 	case client.ErrExpired:
273
-		return fmt.Errorf("remote repository out-of-date: %v", err)
273
+		return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err)
274 274
 	case trustmanager.ErrKeyNotFound:
275
-		return fmt.Errorf("signing keys not found: %v", err)
275
+		return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err)
276 276
 	case *net.OpError:
277
-		return fmt.Errorf("error contacting notary server: %v", err)
277
+		return fmt.Errorf("Error: error contacting notary server: %v", err)
278
+	case store.ErrMetaNotFound:
279
+		return fmt.Errorf("Error: trust data missing for remote repository %s: %v", repoName, err)
280
+	case signed.ErrInvalidKeyType:
281
+		return fmt.Errorf("Error: trust data mismatch for remote repository %s, could be malicious behavior: %v", repoName, err)
282
+	case signed.ErrNoKeys:
283
+		return fmt.Errorf("Error: could not find signing keys for remote repository %s: %v", repoName, err)
284
+	case signed.ErrLowVersion:
285
+		return fmt.Errorf("Error: trust data version is lower than expected for remote repository %s, could be malicious behavior: %v", repoName, err)
286
+	case signed.ErrInsufficientSignatures:
287
+		return fmt.Errorf("Error: trust data has insufficient signatures for remote repository %s, could be malicious behavior: %v", repoName, err)
278 288
 	}
279 289
 
280 290
 	return err
... ...
@@ -291,12 +307,12 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
291 291
 
292 292
 	if ref.String() == "" {
293 293
 		// List all targets
294
-		targets, err := notaryRepo.ListTargets()
294
+		targets, err := notaryRepo.ListTargets(releasesRole, data.CanonicalTargetsRole)
295 295
 		if err != nil {
296
-			return notaryError(err)
296
+			return notaryError(repoInfo.FullName(), err)
297 297
 		}
298 298
 		for _, tgt := range targets {
299
-			t, err := convertTarget(*tgt)
299
+			t, err := convertTarget(tgt.Target)
300 300
 			if err != nil {
301 301
 				fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.Name())
302 302
 				continue
... ...
@@ -304,11 +320,11 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
304 304
 			refs = append(refs, t)
305 305
 		}
306 306
 	} else {
307
-		t, err := notaryRepo.GetTargetByName(ref.String())
307
+		t, err := notaryRepo.GetTargetByName(ref.String(), releasesRole, data.CanonicalTargetsRole)
308 308
 		if err != nil {
309
-			return notaryError(err)
309
+			return notaryError(repoInfo.FullName(), err)
310 310
 		}
311
-		r, err := convertTarget(*t)
311
+		r, err := convertTarget(t.Target)
312 312
 		if err != nil {
313 313
 			return err
314 314
 
... ...
@@ -413,7 +429,7 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
413 413
 
414 414
 	repo, err := cli.getNotaryRepository(repoInfo, authConfig)
415 415
 	if err != nil {
416
-		fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err)
416
+		fmt.Fprintf(cli.out, "Error establishing connection to notary repository, has a notary server been setup and pointed to by the DOCKER_CONTENT_TRUST_SERVER environment variable?: %s\n", err)
417 417
 		return err
418 418
 	}
419 419
 
... ...
@@ -429,14 +445,14 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
429 429
 			},
430 430
 			Length: int64(target.size),
431 431
 		}
432
-		if err := repo.AddTarget(t); err != nil {
432
+		if err := repo.AddTarget(t, releasesRole); err != nil {
433 433
 			return err
434 434
 		}
435 435
 	}
436 436
 
437 437
 	err = repo.Publish()
438 438
 	if _, ok := err.(*client.ErrRepoNotInitialized); !ok {
439
-		return notaryError(err)
439
+		return notaryError(repoInfo.FullName(), err)
440 440
 	}
441 441
 
442 442
 	keys := repo.CryptoService.ListKeys(data.CanonicalRootRole)
... ...
@@ -455,9 +471,9 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
455 455
 	}
456 456
 
457 457
 	if err := repo.Initialize(rootKeyID); err != nil {
458
-		return notaryError(err)
458
+		return notaryError(repoInfo.FullName(), err)
459 459
 	}
460 460
 	fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.FullName())
461 461
 
462
-	return notaryError(repo.Publish())
462
+	return notaryError(repoInfo.FullName(), repo.Publish())
463 463
 }
... ...
@@ -46,7 +46,9 @@ clone git github.com/boltdb/bolt v1.1.0
46 46
 clone git github.com/docker/distribution 568bf038af6d65b376165d02886b1c7fcaef1f61
47 47
 clone git github.com/vbatts/tar-split v0.9.11
48 48
 
49
-clone git github.com/docker/notary 45de2828b5e0083bfb4e9a5a781eddb05e2ef9d0
49
+# get desired notary commit, might also need to be updated in Dockerfile
50
+clone git github.com/docker/notary 30c488b3b4c62fdbc2c1eae7cf3b62ca73f95fad
51
+
50 52
 clone git google.golang.org/grpc 174192fc93efcb188fc8f46ca447f0da606b6885 https://github.com/grpc/grpc-go.git
51 53
 clone git github.com/miekg/pkcs11 80f102b5cac759de406949c47f0928b99bd64cdf
52 54
 clone git github.com/jfrazelle/go v1.5.1-1
... ...
@@ -5805,7 +5805,7 @@ func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
5805 5805
 		c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out)
5806 5806
 	}
5807 5807
 
5808
-	if !strings.Contains(out, fmt.Sprintf("no trust data available")) {
5808
+	if !strings.Contains(out, fmt.Sprintf("trust data unavailable")) {
5809 5809
 		c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out)
5810 5810
 	}
5811 5811
 }
... ...
@@ -312,7 +312,7 @@ func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) {
312 312
 	s.trustedCmd(createCmd)
313 313
 	out, _, err := runCommandWithOutput(createCmd)
314 314
 	c.Assert(err, check.Not(check.IsNil))
315
-	c.Assert(string(out), checker.Contains, "no trust data available", check.Commentf("Missing expected output on trusted create:\n%s", out))
315
+	c.Assert(string(out), checker.Contains, "trust data unavailable", check.Commentf("Missing expected output on trusted create:\n%s", out))
316 316
 
317 317
 }
318 318
 
... ...
@@ -58,7 +58,7 @@ func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) {
58 58
 	out, _, err := runCommandWithOutput(pullCmd)
59 59
 
60 60
 	c.Assert(err, check.NotNil, check.Commentf(out))
61
-	c.Assert(string(out), checker.Contains, "no trust data available", check.Commentf(out))
61
+	c.Assert(string(out), checker.Contains, "trust data unavailable", check.Commentf(out))
62 62
 }
63 63
 
64 64
 func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) {
... ...
@@ -115,10 +115,17 @@ func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {
115 115
 	out, _, err := runCommandWithOutput(pushCmd)
116 116
 	c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out))
117 117
 	c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push"))
118
+
119
+	// Try pull after push
120
+	pullCmd := exec.Command(dockerBinary, "pull", repoName)
121
+	s.trustedCmd(pullCmd)
122
+	out, _, err = runCommandWithOutput(pullCmd)
123
+	c.Assert(err, check.IsNil, check.Commentf(out))
124
+	c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out))
118 125
 }
119 126
 
120 127
 func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) {
121
-	repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
128
+	repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL)
122 129
 	// tag the image and upload it to the private registry
123 130
 	dockerCmd(c, "tag", "busybox", repoName)
124 131
 
... ...
@@ -127,6 +134,13 @@ func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) {
127 127
 	out, _, err := runCommandWithOutput(pushCmd)
128 128
 	c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out))
129 129
 	c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push"))
130
+
131
+	// Try pull after push
132
+	pullCmd := exec.Command(dockerBinary, "pull", repoName)
133
+	s.trustedCmd(pullCmd)
134
+	out, _, err = runCommandWithOutput(pullCmd)
135
+	c.Assert(err, check.IsNil, check.Commentf(out))
136
+	c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out))
130 137
 }
131 138
 
132 139
 // This test ensures backwards compatibility with old ENV variables. Should be
... ...
@@ -168,7 +182,7 @@ func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C)
168 168
 }
169 169
 
170 170
 func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) {
171
-	repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
171
+	repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL)
172 172
 	// tag the image and upload it to the private registry
173 173
 	dockerCmd(c, "tag", "busybox", repoName)
174 174
 	dockerCmd(c, "push", repoName)
... ...
@@ -178,6 +192,13 @@ func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) {
178 178
 	out, _, err := runCommandWithOutput(pushCmd)
179 179
 	c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out))
180 180
 	c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag"))
181
+
182
+	// Try pull after push
183
+	pullCmd := exec.Command(dockerBinary, "pull", repoName)
184
+	s.trustedCmd(pullCmd)
185
+	out, _, err = runCommandWithOutput(pullCmd)
186
+	c.Assert(err, check.IsNil, check.Commentf(out))
187
+	c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out))
181 188
 }
182 189
 
183 190
 func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) {
... ...
@@ -3087,7 +3087,7 @@ func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) {
3087 3087
 		c.Fatalf("Error expected when running trusted run with:\n%s", out)
3088 3088
 	}
3089 3089
 
3090
-	if !strings.Contains(string(out), "no trust data available") {
3090
+	if !strings.Contains(string(out), "trust data unavailable") {
3091 3091
 		c.Fatalf("Missing expected output on trusted run:\n%s", out)
3092 3092
 	}
3093 3093
 }
... ...
@@ -28,9 +28,9 @@ const notaryURL = "https://" + notaryHost
28 28
 func newTestNotary(c *check.C) (*testNotary, error) {
29 29
 	template := `{
30 30
 	"server": {
31
-		"addr": "%s",
32
-		"tls_key_file": "fixtures/notary/localhost.key",
33
-		"tls_cert_file": "fixtures/notary/localhost.cert"
31
+		"http_addr": "%s",
32
+		"tls_key_file": "%s",
33
+		"tls_cert_file": "%s"
34 34
 	},
35 35
 	"trust_service": {
36 36
 		"type": "local",
... ...
@@ -39,8 +39,11 @@ func newTestNotary(c *check.C) (*testNotary, error) {
39 39
 		"key_algorithm": "ed25519"
40 40
 	},
41 41
 	"logging": {
42
-		"level": 5
43
-	}
42
+		"level": "debug"
43
+	},
44
+	"storage": {
45
+        "backend": "memory"
46
+    }
44 47
 }`
45 48
 	tmp, err := ioutil.TempDir("", "notary-test-")
46 49
 	if err != nil {
... ...
@@ -51,7 +54,12 @@ func newTestNotary(c *check.C) (*testNotary, error) {
51 51
 	if err != nil {
52 52
 		return nil, err
53 53
 	}
54
-	if _, err := fmt.Fprintf(config, template, notaryHost); err != nil {
54
+
55
+	workingDir, err := os.Getwd()
56
+	if err != nil {
57
+		return nil, err
58
+	}
59
+	if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil {
55 60
 		os.RemoveAll(tmp)
56 61
 		return nil, err
57 62
 	}
... ...
@@ -20,4 +20,4 @@ RUN go install \
20 20
     ${NOTARYPKG}/cmd/notary-server
21 21
 
22 22
 ENTRYPOINT [ "notary-server" ]
23
-CMD [ "-config", "cmd/notary-server/config.json" ]
23
+CMD [ "-config=fixtures/server-config-local.json" ]
... ...
@@ -38,4 +38,4 @@ RUN go install \
38 38
 
39 39
 
40 40
 ENTRYPOINT [ "notary-signer" ]
41
-CMD [ "-config=cmd/notary-signer/config.json" ]
41
+CMD [ "-config=fixtures/signer-config-local.json" ]
... ...
@@ -1,5 +1,52 @@
1
-David Lawrence <david.lawrence@docker.com> (@endophage)
2
-Ying Li <ying.li@docker.com> (@cyli)
3
-Nathan McCauley <nathan.mccauley@docker.com> (@NathanMcCauley)
4
-Derek McGowan <derek@docker.com> (@dmcgowan)
5
-Diogo Monica <diogo@docker.com> (@diogomonica)
1
+# Notary maintainers file
2
+#
3
+# This file describes who runs the docker/notary project and how.
4
+# This is a living document - if you see something out of date or missing, speak up!
5
+#
6
+# It is structured to be consumable by both humans and programs.
7
+# To extract its contents programmatically, use any TOML-compliant parser.
8
+#
9
+# This file is compiled into the MAINTAINERS file in docker/opensource.
10
+#
11
+[Org]
12
+	[Org."Core maintainers"]
13
+		people = [
14
+			"cyli",
15
+			"diogomonica",
16
+			"dmcgowan",
17
+			"endophage",
18
+			"nathanmccauley",
19
+		]
20
+
21
+[people]
22
+
23
+# A reference list of all people associated with the project.
24
+# All other sections should refer to people by their canonical key
25
+# in the people section.
26
+
27
+	# ADD YOURSELF HERE IN ALPHABETICAL ORDER
28
+
29
+	[people.cyli]
30
+	Name = "Ying Li"
31
+	Email = "ying.li@docker.com"
32
+	GitHub = "cyli"
33
+
34
+	[people.diogomonica]
35
+	Name = "Diogo Monica"
36
+	Email = "diogo@docker.com"
37
+	GitHub = "diogomonica"
38
+
39
+	[people.dmcgowan]
40
+	Name = "Derek McGowan"
41
+	Email = "derek@docker.com"
42
+	GitHub = "dmcgowan"
43
+
44
+	[people.endophage]
45
+	Name = "David Lawrence"
46
+	Email = "david.lawrence@docker.com"
47
+	GitHub = "endophage"
48
+
49
+	[people.nathanmccauley]
50
+	Name = "Nathan McCauley"
51
+	Email = "nathan.mccauley@docker.com"
52
+	GitHub = "nathanmccauley"
... ...
@@ -32,7 +32,7 @@ _space := $(empty) $(empty)
32 32
 
33 33
 # go cover test variables
34 34
 COVERDIR=.cover
35
-COVERPROFILE=$(COVERDIR)/cover.out
35
+COVERPROFILE?=$(COVERDIR)/cover.out
36 36
 COVERMODE=count
37 37
 PKGS = $(shell go list ./... | tr '\n' ' ')
38 38
 
... ...
@@ -43,8 +43,8 @@ GO_VERSION = $(shell go version | awk '{print $$3}')
43 43
 .DEFAULT: default
44 44
 
45 45
 go_version:
46
-ifneq ("$(GO_VERSION)", "go1.5.1")
47
-	$(error Requires go version 1.5.1 - found $(GO_VERSION))
46
+ifeq (,$(findstring go1.5.,$(GO_VERSION)))
47
+	$(error Requires go version 1.5.x - found $(GO_VERSION))
48 48
 else
49 49
 	@echo
50 50
 endif
... ...
@@ -73,6 +73,11 @@ ${PREFIX}/bin/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
73 73
 
74 74
 vet: go_version
75 75
 	@echo "+ $@"
76
+ifeq ($(shell uname -s), Darwin)
77
+	@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v Godeps | xargs echo "This file should end with '_test':"  | tee /dev/stderr)"
78
+else
79
+	@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v Godeps | xargs -r echo "This file should end with '_test':"  | tee /dev/stderr)"
80
+endif
76 81
 	@test -z "$$(go tool vet -printf=false . 2>&1 | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
77 82
 
78 83
 fmt:
... ...
@@ -1 +1 @@
1
-1.0-rc1
1
+0.2
... ...
@@ -87,7 +87,7 @@ curl example.com/install.sh | notary verify example.com/scripts v1 | sh
87 87
 # Notary Server
88 88
 
89 89
 Notary Server manages TUF data over an HTTP API compatible with the
90
-[notary client](../notary/).
90
+[notary client](cmd/notary).
91 91
 
92 92
 It may be configured to use either JWT or HTTP Basic Auth for authentication.
93 93
 Currently it only supports MySQL for storage of the TUF data, we intend to
94 94
new file mode 100644
... ...
@@ -0,0 +1,345 @@
0
+package certs
1
+
2
+import (
3
+	"crypto/x509"
4
+	"errors"
5
+	"fmt"
6
+	"path/filepath"
7
+	"time"
8
+
9
+	"github.com/Sirupsen/logrus"
10
+	"github.com/docker/notary/trustmanager"
11
+	"github.com/docker/notary/tuf/data"
12
+	"github.com/docker/notary/tuf/signed"
13
+)
14
+
15
+// Manager is an abstraction around trusted root CA stores
16
+type Manager struct {
17
+	trustedCAStore          trustmanager.X509Store
18
+	trustedCertificateStore trustmanager.X509Store
19
+}
20
+
21
+const trustDir = "trusted_certificates"
22
+
23
+// ErrValidationFail is returned when there is no valid trusted certificates
24
+// being served inside of the roots.json
25
+type ErrValidationFail struct {
26
+	Reason string
27
+}
28
+
29
+// ErrValidationFail is returned when there is no valid trusted certificates
30
+// being served inside of the roots.json
31
+func (err ErrValidationFail) Error() string {
32
+	return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason)
33
+}
34
+
35
+// ErrRootRotationFail is returned when we fail to do a full root key rotation
36
+// by either failing to add the new root certificate, or delete the old ones
37
+type ErrRootRotationFail struct {
38
+	Reason string
39
+}
40
+
41
+// ErrRootRotationFail is returned when we fail to do a full root key rotation
42
+// by either failing to add the new root certificate, or delete the old ones
43
+func (err ErrRootRotationFail) Error() string {
44
+	return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
45
+}
46
+
47
+// NewManager returns an initialized Manager, or an error
48
+// if it fails to load certificates
49
+func NewManager(baseDir string) (*Manager, error) {
50
+	trustPath := filepath.Join(baseDir, trustDir)
51
+
52
+	// Load all CAs that aren't expired and don't use SHA1
53
+	trustedCAStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
54
+		return cert.IsCA && cert.BasicConstraintsValid && cert.SubjectKeyId != nil &&
55
+			time.Now().Before(cert.NotAfter) &&
56
+			cert.SignatureAlgorithm != x509.SHA1WithRSA &&
57
+			cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
58
+			cert.SignatureAlgorithm != x509.ECDSAWithSHA1
59
+	})
60
+	if err != nil {
61
+		return nil, err
62
+	}
63
+
64
+	// Load all individual (non-CA) certificates that aren't expired and don't use SHA1
65
+	trustedCertificateStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
66
+		return !cert.IsCA &&
67
+			time.Now().Before(cert.NotAfter) &&
68
+			cert.SignatureAlgorithm != x509.SHA1WithRSA &&
69
+			cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
70
+			cert.SignatureAlgorithm != x509.ECDSAWithSHA1
71
+	})
72
+	if err != nil {
73
+		return nil, err
74
+	}
75
+
76
+	return &Manager{
77
+		trustedCAStore:          trustedCAStore,
78
+		trustedCertificateStore: trustedCertificateStore,
79
+	}, nil
80
+}
81
+
82
+// TrustedCertificateStore returns the trusted certificate store being managed
83
+// by this Manager
84
+func (m *Manager) TrustedCertificateStore() trustmanager.X509Store {
85
+	return m.trustedCertificateStore
86
+}
87
+
88
+// TrustedCAStore returns the CA store being managed by this Manager
89
+func (m *Manager) TrustedCAStore() trustmanager.X509Store {
90
+	return m.trustedCAStore
91
+}
92
+
93
+// AddTrustedCert adds a cert to the trusted certificate store (not the CA
94
+// store)
95
+func (m *Manager) AddTrustedCert(cert *x509.Certificate) {
96
+	m.trustedCertificateStore.AddCert(cert)
97
+}
98
+
99
+// AddTrustedCACert adds a cert to the trusted CA certificate store
100
+func (m *Manager) AddTrustedCACert(cert *x509.Certificate) {
101
+	m.trustedCAStore.AddCert(cert)
102
+}
103
+
104
+/*
105
+ValidateRoot receives a new root, validates its correctness and attempts to
106
+do root key rotation if needed.
107
+
108
+First we list the current trusted certificates we have for a particular GUN. If
109
+that list is non-empty means that we've already seen this repository before, and
110
+have a list of trusted certificates for it. In this case, we use this list of
111
+certificates to attempt to validate this root file.
112
+
113
+If the previous validation suceeds, or in the case where we found no trusted
114
+certificates for this particular GUN, we check the integrity of the root by
115
+making sure that it is validated by itself. This means that we will attempt to
116
+validate the root data with the certificates that are included in the root keys
117
+themselves.
118
+
119
+If this last steps succeeds, we attempt to do root rotation, by ensuring that
120
+we only trust the certificates that are present in the new root.
121
+
122
+This mechanism of operation is essentially Trust On First Use (TOFU): if we
123
+have never seen a certificate for a particular CN, we trust it. If later we see
124
+a different certificate for that certificate, we return an ErrValidationFailed error.
125
+
126
+Note that since we only allow trust data to be downloaded over an HTTPS channel
127
+we are using the current public PKI to validate the first download of the certificate
128
+adding an extra layer of security over the normal (SSH style) trust model.
129
+We shall call this: TOFUS.
130
+*/
131
+func (m *Manager) ValidateRoot(root *data.Signed, gun string) error {
132
+	logrus.Debugf("entered ValidateRoot with dns: %s", gun)
133
+	signedRoot, err := data.RootFromSigned(root)
134
+	if err != nil {
135
+		return err
136
+	}
137
+
138
+	// Retrieve all the leaf certificates in root for which the CN matches the GUN
139
+	allValidCerts, err := validRootLeafCerts(signedRoot, gun)
140
+	if err != nil {
141
+		logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
142
+		return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
143
+	}
144
+
145
+	// Retrieve all the trusted certificates that match this gun
146
+	certsForCN, err := m.trustedCertificateStore.GetCertificatesByCN(gun)
147
+	if err != nil {
148
+		// If the error that we get back is different than ErrNoCertificatesFound
149
+		// we couldn't check if there are any certificates with this CN already
150
+		// trusted. Let's take the conservative approach and return a failed validation
151
+		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok {
152
+			logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err)
153
+			return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"}
154
+		}
155
+	}
156
+
157
+	// If we have certificates that match this specific GUN, let's make sure to
158
+	// use them first to validate that this new root is valid.
159
+	if len(certsForCN) != 0 {
160
+		logrus.Debugf("found %d valid root certificates for %s", len(certsForCN), gun)
161
+		err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(certsForCN))
162
+		if err != nil {
163
+			logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
164
+			return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"}
165
+		}
166
+	} else {
167
+		logrus.Debugf("found no currently valid root certificates for %s", gun)
168
+	}
169
+
170
+	// Validate the integrity of the new root (does it have valid signatures)
171
+	err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(allValidCerts))
172
+	if err != nil {
173
+		logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
174
+		return &ErrValidationFail{Reason: "failed to validate integrity of roots"}
175
+	}
176
+
177
+	// Getting here means A) we had trusted certificates and both the
178
+	// old and new validated this root; or B) we had no trusted certificates but
179
+	// the new set of certificates has integrity (self-signed)
180
+	logrus.Debugf("entering root certificate rotation for: %s", gun)
181
+
182
+	// Do root certificate rotation: we trust only the certs present in the new root
183
+	// First we add all the new certificates (even if they already exist)
184
+	for _, cert := range allValidCerts {
185
+		err := m.trustedCertificateStore.AddCert(cert)
186
+		if err != nil {
187
+			// If the error is already exists we don't fail the rotation
188
+			if _, ok := err.(*trustmanager.ErrCertExists); ok {
189
+				logrus.Debugf("ignoring certificate addition to: %s", gun)
190
+				continue
191
+			}
192
+			logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err)
193
+		}
194
+	}
195
+
196
+	// Now we delete old certificates that aren't present in the new root
197
+	for certID, cert := range certsToRemove(certsForCN, allValidCerts) {
198
+		logrus.Debugf("removing certificate with certID: %s", certID)
199
+		err = m.trustedCertificateStore.RemoveCert(cert)
200
+		if err != nil {
201
+			logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
202
+			return &ErrRootRotationFail{Reason: "failed to rotate root keys"}
203
+		}
204
+	}
205
+
206
+	logrus.Debugf("Root validation succeeded for %s", gun)
207
+	return nil
208
+}
209
+
210
+// validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whoose
211
+// Common-Names match the provided GUN
212
+func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, error) {
213
+	// Get a list of all of the leaf certificates present in root
214
+	allLeafCerts, _ := parseAllCerts(root)
215
+	var validLeafCerts []*x509.Certificate
216
+
217
+	// Go through every leaf certificate and check that the CN matches the gun
218
+	for _, cert := range allLeafCerts {
219
+		// Validate that this leaf certificate has a CN that matches the exact gun
220
+		if cert.Subject.CommonName != gun {
221
+			logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", cert.Subject.CommonName)
222
+			continue
223
+		}
224
+		// Make sure the certificate is not expired
225
+		if time.Now().After(cert.NotAfter) {
226
+			logrus.Debugf("error leaf certificate is expired")
227
+			continue
228
+		}
229
+
230
+		// We don't allow root certificates that use SHA1
231
+		if cert.SignatureAlgorithm == x509.SHA1WithRSA ||
232
+			cert.SignatureAlgorithm == x509.DSAWithSHA1 ||
233
+			cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
234
+
235
+			logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)")
236
+			continue
237
+		}
238
+
239
+		validLeafCerts = append(validLeafCerts, cert)
240
+	}
241
+
242
+	if len(validLeafCerts) < 1 {
243
+		logrus.Debugf("didn't find any valid leaf certificates for %s", gun)
244
+		return nil, errors.New("no valid leaf certificates found in any of the root keys")
245
+	}
246
+
247
+	logrus.Debugf("found %d valid leaf certificates for %s", len(validLeafCerts), gun)
248
+	return validLeafCerts, nil
249
+}
250
+
251
+// parseAllCerts returns two maps, one with all of the leafCertificates and one
252
+// with all the intermediate certificates found in signedRoot
253
+func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
254
+	leafCerts := make(map[string]*x509.Certificate)
255
+	intCerts := make(map[string][]*x509.Certificate)
256
+
257
+	// Before we loop through all root keys available, make sure any exist
258
+	rootRoles, ok := signedRoot.Signed.Roles["root"]
259
+	if !ok {
260
+		logrus.Debugf("tried to parse certificates from invalid root signed data")
261
+		return nil, nil
262
+	}
263
+
264
+	logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs)
265
+	// Iterate over every keyID for the root role inside of roots.json
266
+	for _, keyID := range rootRoles.KeyIDs {
267
+		// check that the key exists in the signed root keys map
268
+		key, ok := signedRoot.Signed.Keys[keyID]
269
+		if !ok {
270
+			logrus.Debugf("error while getting data for keyID: %s", keyID)
271
+			continue
272
+		}
273
+
274
+		// Decode all the x509 certificates that were bundled with this
275
+		// Specific root key
276
+		decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public())
277
+		if err != nil {
278
+			logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
279
+			continue
280
+		}
281
+
282
+		// Get all non-CA certificates in the decoded certificates
283
+		leafCertList := trustmanager.GetLeafCerts(decodedCerts)
284
+
285
+		// If we got no leaf certificates or we got more than one, fail
286
+		if len(leafCertList) != 1 {
287
+			logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID)
288
+			continue
289
+		}
290
+
291
+		// Get the ID of the leaf certificate
292
+		leafCert := leafCertList[0]
293
+		leafID, err := trustmanager.FingerprintCert(leafCert)
294
+		if err != nil {
295
+			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err)
296
+			continue
297
+		}
298
+
299
+		// Store the leaf cert in the map
300
+		leafCerts[leafID] = leafCert
301
+
302
+		// Get all the remainder certificates marked as a CA to be used as intermediates
303
+		intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
304
+		intCerts[leafID] = intermediateCerts
305
+	}
306
+
307
+	return leafCerts, intCerts
308
+}
309
+
310
+// certsToRemove returns all the certifificates from oldCerts that aren't present
311
+// in newCerts
312
+func certsToRemove(oldCerts, newCerts []*x509.Certificate) map[string]*x509.Certificate {
313
+	certsToRemove := make(map[string]*x509.Certificate)
314
+
315
+	// If no newCerts were provided
316
+	if len(newCerts) == 0 {
317
+		return certsToRemove
318
+	}
319
+
320
+	// Populate a map with all the IDs from newCert
321
+	var newCertMap = make(map[string]struct{})
322
+	for _, cert := range newCerts {
323
+		certID, err := trustmanager.FingerprintCert(cert)
324
+		if err != nil {
325
+			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err)
326
+			continue
327
+		}
328
+		newCertMap[certID] = struct{}{}
329
+	}
330
+
331
+	// Iterate over all the old certificates and check to see if we should remove them
332
+	for _, cert := range oldCerts {
333
+		certID, err := trustmanager.FingerprintCert(cert)
334
+		if err != nil {
335
+			logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err)
336
+			continue
337
+		}
338
+		if _, ok := newCertMap[certID]; !ok {
339
+			certsToRemove[certID] = cert
340
+		}
341
+	}
342
+
343
+	return certsToRemove
344
+}
... ...
@@ -16,6 +16,8 @@ machine:
16 16
     BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
17 17
   # Workaround Circle parsing dumb bugs and/or YAML wonkyness
18 18
     CIRCLE_PAIN: "mode: set"
19
+  # Put the coverage profile somewhere codecov's script can find it
20
+    COVERPROFILE: coverage.out
19 21
 
20 22
   hosts:
21 23
   # Not used yet
... ...
@@ -72,8 +74,10 @@ test:
72 72
   post:
73 73
     - gvm use stable && make covmerge:
74 74
         timeout: 600
75
+        parallel: true
75 76
         pwd: $BASE_STABLE
76 77
 
77 78
   # Report to codecov.io
78
-    # - bash <(curl -s https://codecov.io/bash):
79
-    #     pwd: $BASE_STABLE
79
+    - bash <(curl -s https://codecov.io/bash):
80
+        parallel: true
81
+        pwd: $BASE_STABLE
... ...
@@ -77,3 +77,26 @@ func (c TufChange) Path() string {
77 77
 func (c TufChange) Content() []byte {
78 78
 	return c.Data
79 79
 }
80
+
81
+// TufDelegation represents a modification to a target delegation
82
+// this includes creating a delegations. This format is used to avoid
83
+// unexpected race conditions between humans modifying the same delegation
84
+type TufDelegation struct {
85
+	NewName                string       `json:"new_name,omitempty"`
86
+	NewThreshold           int          `json:"threshold, omitempty"`
87
+	AddKeys                data.KeyList `json:"add_keys, omitempty"`
88
+	RemoveKeys             []string     `json:"remove_keys,omitempty"`
89
+	AddPaths               []string     `json:"add_paths,omitempty"`
90
+	RemovePaths            []string     `json:"remove_paths,omitempty"`
91
+	AddPathHashPrefixes    []string     `json:"add_prefixes,omitempty"`
92
+	RemovePathHashPrefixes []string     `json:"remove_prefixes,omitempty"`
93
+}
94
+
95
+// ToNewRole creates a fresh role object from the TufDelegation data
96
+func (td TufDelegation) ToNewRole(scope string) (*data.Role, error) {
97
+	name := scope
98
+	if td.NewName != "" {
99
+		name = td.NewName
100
+	}
101
+	return data.NewRole(name, td.NewThreshold, td.AddKeys.IDs(), td.AddPaths, td.AddPathHashPrefixes)
102
+}
... ...
@@ -9,11 +9,13 @@ import (
9 9
 	"net/http"
10 10
 	"os"
11 11
 	"path/filepath"
12
+	"strings"
13
+	"time"
12 14
 
13 15
 	"github.com/Sirupsen/logrus"
16
+	"github.com/docker/notary/certs"
14 17
 	"github.com/docker/notary/client/changelist"
15 18
 	"github.com/docker/notary/cryptoservice"
16
-	"github.com/docker/notary/keystoremanager"
17 19
 	"github.com/docker/notary/trustmanager"
18 20
 	"github.com/docker/notary/tuf"
19 21
 	tufclient "github.com/docker/notary/tuf/client"
... ...
@@ -52,6 +54,17 @@ type ErrExpired struct {
52 52
 	signed.ErrExpired
53 53
 }
54 54
 
55
+// ErrInvalidRemoteRole is returned when the server is requested to manage
56
+// an unsupported key type
57
+type ErrInvalidRemoteRole struct {
58
+	Role string
59
+}
60
+
61
+func (e ErrInvalidRemoteRole) Error() string {
62
+	return fmt.Sprintf(
63
+		"notary does not support the server managing the %s key", e.Role)
64
+}
65
+
55 66
 const (
56 67
 	tufDir = "tuf"
57 68
 )
... ...
@@ -63,23 +76,67 @@ var ErrRepositoryNotExist = errors.New("repository does not exist")
63 63
 // NotaryRepository stores all the information needed to operate on a notary
64 64
 // repository.
65 65
 type NotaryRepository struct {
66
-	baseDir         string
67
-	gun             string
68
-	baseURL         string
69
-	tufRepoPath     string
70
-	fileStore       store.MetadataStore
71
-	CryptoService   signed.CryptoService
72
-	tufRepo         *tuf.Repo
73
-	roundTrip       http.RoundTripper
74
-	KeyStoreManager *keystoremanager.KeyStoreManager
66
+	baseDir       string
67
+	gun           string
68
+	baseURL       string
69
+	tufRepoPath   string
70
+	fileStore     store.MetadataStore
71
+	CryptoService signed.CryptoService
72
+	tufRepo       *tuf.Repo
73
+	roundTrip     http.RoundTripper
74
+	CertManager   *certs.Manager
75
+}
76
+
77
+// repositoryFromKeystores is a helper function for NewNotaryRepository that
78
+// takes some basic NotaryRepository parameters as well as keystores (in order
79
+// of usage preference), and returns a NotaryRepository.
80
+func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
81
+	keyStores []trustmanager.KeyStore) (*NotaryRepository, error) {
82
+
83
+	certManager, err := certs.NewManager(baseDir)
84
+	if err != nil {
85
+		return nil, err
86
+	}
87
+
88
+	cryptoService := cryptoservice.NewCryptoService(gun, keyStores...)
89
+
90
+	nRepo := &NotaryRepository{
91
+		gun:           gun,
92
+		baseDir:       baseDir,
93
+		baseURL:       baseURL,
94
+		tufRepoPath:   filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
95
+		CryptoService: cryptoService,
96
+		roundTrip:     rt,
97
+		CertManager:   certManager,
98
+	}
99
+
100
+	fileStore, err := store.NewFilesystemStore(
101
+		nRepo.tufRepoPath,
102
+		"metadata",
103
+		"json",
104
+		"",
105
+	)
106
+	if err != nil {
107
+		return nil, err
108
+	}
109
+	nRepo.fileStore = fileStore
110
+
111
+	return nRepo, nil
75 112
 }
76 113
 
77 114
 // Target represents a simplified version of the data TUF operates on, so external
78 115
 // applications don't have to depend on tuf data types.
79 116
 type Target struct {
80
-	Name   string
81
-	Hashes data.Hashes
82
-	Length int64
117
+	Name   string      // the name of the target
118
+	Hashes data.Hashes // the hash of the target
119
+	Length int64       // the size in bytes of the target
120
+}
121
+
122
+// TargetWithRole represents a Target that exists in a particular role - this is
123
+// produced by ListTargets and GetTargetByName
124
+type TargetWithRole struct {
125
+	Target
126
+	Role string
83 127
 }
84 128
 
85 129
 // NewTarget is a helper method that returns a Target
... ...
@@ -99,18 +156,48 @@ func NewTarget(targetName string, targetPath string) (*Target, error) {
99 99
 
100 100
 // Initialize creates a new repository by using rootKey as the root Key for the
101 101
 // TUF repository.
102
-func (r *NotaryRepository) Initialize(rootKeyID string) error {
102
+func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...string) error {
103 103
 	privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID)
104 104
 	if err != nil {
105 105
 		return err
106 106
 	}
107 107
 
108
-	rootCert, err := cryptoservice.GenerateCertificate(privKey, r.gun)
108
+	// currently we only support server managing timestamps and snapshots, and
109
+	// nothing else - timestamps are always managed by the server, and implicit
110
+	// (do not have to be passed in as part of `serverManagedRoles`, so that
111
+	// the API of Initialize doens't change).
112
+	var serverManagesSnapshot bool
113
+	locallyManagedKeys := []string{
114
+		data.CanonicalTargetsRole,
115
+		data.CanonicalSnapshotRole,
116
+		// root is also locally managed, but that should have been created
117
+		// already
118
+	}
119
+	remotelyManagedKeys := []string{data.CanonicalTimestampRole}
120
+	for _, role := range serverManagedRoles {
121
+		switch role {
122
+		case data.CanonicalTimestampRole:
123
+			continue // timestamp is already in the right place
124
+		case data.CanonicalSnapshotRole:
125
+			// because we put Snapshot last
126
+			locallyManagedKeys = []string{data.CanonicalTargetsRole}
127
+			remotelyManagedKeys = append(
128
+				remotelyManagedKeys, data.CanonicalSnapshotRole)
129
+			serverManagesSnapshot = true
130
+		default:
131
+			return ErrInvalidRemoteRole{Role: role}
132
+		}
133
+	}
134
+
135
+	// Hard-coded policy: the generated certificate expires in 10 years.
136
+	startTime := time.Now()
137
+	rootCert, err := cryptoservice.GenerateCertificate(
138
+		privKey, r.gun, startTime, startTime.AddDate(10, 0, 0))
109 139
 
110 140
 	if err != nil {
111 141
 		return err
112 142
 	}
113
-	r.KeyStoreManager.AddTrustedCert(rootCert)
143
+	r.CertManager.AddTrustedCert(rootCert)
114 144
 
115 145
 	// The root key gets stored in the TUF metadata X509 encoded, linking
116 146
 	// the tuf root.json to our X509 PKI.
... ...
@@ -127,112 +214,211 @@ func (r *NotaryRepository) Initialize(rootKeyID string) error {
127 127
 		return fmt.Errorf("invalid format for root key: %s", privKey.Algorithm())
128 128
 	}
129 129
 
130
-	// All the timestamp keys are generated by the remote server.
131
-	remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
130
+	kdb := keys.NewDB()
131
+	err = addKeyForRole(kdb, data.CanonicalRootRole, rootKey)
132 132
 	if err != nil {
133 133
 		return err
134 134
 	}
135
-	rawTSKey, err := remote.GetKey("timestamp")
136
-	if err != nil {
137
-		return err
135
+
136
+	// we want to create all the local keys first so we don't have to
137
+	// make unnecessary network calls
138
+	for _, role := range locallyManagedKeys {
139
+		// This is currently hardcoding the keys to ECDSA.
140
+		key, err := r.CryptoService.Create(role, data.ECDSAKey)
141
+		if err != nil {
142
+			return err
143
+		}
144
+		if err := addKeyForRole(kdb, role, key); err != nil {
145
+			return err
146
+		}
147
+	}
148
+	for _, role := range remotelyManagedKeys {
149
+		// This key is generated by the remote server.
150
+		key, err := getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
151
+		if err != nil {
152
+			return err
153
+		}
154
+		logrus.Debugf("got remote %s %s key with keyID: %s",
155
+			role, key.Algorithm(), key.ID())
156
+		if err := addKeyForRole(kdb, role, key); err != nil {
157
+			return err
158
+		}
138 159
 	}
139 160
 
140
-	timestampKey, err := data.UnmarshalPublicKey(rawTSKey)
161
+	r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)
162
+
163
+	err = r.tufRepo.InitRoot(false)
141 164
 	if err != nil {
165
+		logrus.Debug("Error on InitRoot: ", err.Error())
142 166
 		return err
143 167
 	}
144
-
145
-	logrus.Debugf("got remote %s timestamp key with keyID: %s", timestampKey.Algorithm(), timestampKey.ID())
146
-
147
-	// This is currently hardcoding the targets and snapshots keys to ECDSA
148
-	// Targets and snapshot keys are always generated locally.
149
-	targetsKey, err := r.CryptoService.Create("targets", data.ECDSAKey)
168
+	_, err = r.tufRepo.InitTargets(data.CanonicalTargetsRole)
150 169
 	if err != nil {
170
+		logrus.Debug("Error on InitTargets: ", err.Error())
151 171
 		return err
152 172
 	}
153
-	snapshotKey, err := r.CryptoService.Create("snapshot", data.ECDSAKey)
173
+	err = r.tufRepo.InitSnapshot()
154 174
 	if err != nil {
175
+		logrus.Debug("Error on InitSnapshot: ", err.Error())
155 176
 		return err
156 177
 	}
157 178
 
158
-	kdb := keys.NewDB()
179
+	return r.saveMetadata(serverManagesSnapshot)
180
+}
159 181
 
160
-	kdb.AddKey(rootKey)
161
-	kdb.AddKey(targetsKey)
162
-	kdb.AddKey(snapshotKey)
163
-	kdb.AddKey(timestampKey)
182
+// adds a TUF Change template to the given roles
183
+func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...string) error {
164 184
 
165
-	err = initRoles(kdb, rootKey, targetsKey, snapshotKey, timestampKey)
166
-	if err != nil {
167
-		return err
185
+	if len(roles) == 0 {
186
+		roles = []string{data.CanonicalTargetsRole}
168 187
 	}
169 188
 
170
-	r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)
189
+	var changes []changelist.Change
190
+	for _, role := range roles {
191
+		role = strings.ToLower(role)
171 192
 
172
-	err = r.tufRepo.InitRoot(false)
173
-	if err != nil {
174
-		logrus.Debug("Error on InitRoot: ", err.Error())
175
-		switch err.(type) {
176
-		case signed.ErrInsufficientSignatures, trustmanager.ErrPasswordInvalid:
177
-		default:
193
+		// Ensure we can only add targets to the CanonicalTargetsRole,
194
+		// or a Delegation role (which is <CanonicalTargetsRole>/something else)
195
+		if role != data.CanonicalTargetsRole && !data.IsDelegation(role) {
196
+			return data.ErrInvalidRole{
197
+				Role:   role,
198
+				Reason: "cannot add targets to this role",
199
+			}
200
+		}
201
+
202
+		changes = append(changes, changelist.NewTufChange(
203
+			c.Action(),
204
+			role,
205
+			c.Type(),
206
+			c.Path(),
207
+			c.Content(),
208
+		))
209
+	}
210
+
211
+	for _, c := range changes {
212
+		if err := cl.Add(c); err != nil {
178 213
 			return err
179 214
 		}
180 215
 	}
181
-	err = r.tufRepo.InitTargets()
216
+	return nil
217
+}
218
+
219
+// AddDelegation creates a new changelist entry to add a delegation to the repository
220
+// when the changelist gets applied at publish time.  This does not do any validation
221
+// other than checking the name of the delegation to add - all that will happen
222
+// at publish time.
223
+func (r *NotaryRepository) AddDelegation(name string, threshold int,
224
+	delegationKeys []data.PublicKey, paths []string) error {
225
+
226
+	if !data.IsDelegation(name) {
227
+		return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
228
+	}
229
+
230
+	cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
182 231
 	if err != nil {
183
-		logrus.Debug("Error on InitTargets: ", err.Error())
184 232
 		return err
185 233
 	}
186
-	err = r.tufRepo.InitSnapshot()
234
+	defer cl.Close()
235
+
236
+	logrus.Debugf(`Adding delegation "%s" with threshold %d, and %d keys\n`,
237
+		name, threshold, len(delegationKeys))
238
+
239
+	tdJSON, err := json.Marshal(&changelist.TufDelegation{
240
+		NewThreshold: threshold,
241
+		AddKeys:      data.KeyList(delegationKeys),
242
+		AddPaths:     paths,
243
+	})
187 244
 	if err != nil {
188
-		logrus.Debug("Error on InitSnapshot: ", err.Error())
189 245
 		return err
190 246
 	}
191 247
 
192
-	return r.saveMetadata()
248
+	template := changelist.NewTufChange(
249
+		changelist.ActionCreate,
250
+		name,
251
+		changelist.TypeTargetsDelegation,
252
+		"", // no path
253
+		tdJSON,
254
+	)
255
+
256
+	return addChange(cl, template, name)
193 257
 }
194 258
 
195
-// AddTarget adds a new target to the repository, forcing a timestamps check from TUF
196
-func (r *NotaryRepository) AddTarget(target *Target) error {
259
+// RemoveDelegation creates a new changelist entry to remove a delegation from
260
+// the repository when the changelist gets applied at publish time.
261
+// This does not validate that the delegation exists, since one might exist
262
+// after applying all changes.
263
+func (r *NotaryRepository) RemoveDelegation(name string) error {
264
+
265
+	if !data.IsDelegation(name) {
266
+		return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
267
+	}
268
+
197 269
 	cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
198 270
 	if err != nil {
199 271
 		return err
200 272
 	}
201 273
 	defer cl.Close()
202
-	logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length)
203 274
 
204
-	meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes}
205
-	metaJSON, err := json.Marshal(meta)
275
+	logrus.Debugf(`Removing delegation "%s"\n`, name)
276
+
277
+	template := changelist.NewTufChange(
278
+		changelist.ActionDelete,
279
+		name,
280
+		changelist.TypeTargetsDelegation,
281
+		"", // no path
282
+		nil,
283
+	)
284
+
285
+	return addChange(cl, template, name)
286
+}
287
+
288
+// AddTarget creates new changelist entries to add a target to the given roles
289
+// in the repository when the changelist gets appied at publish time.
290
+// If roles are unspecified, the default role is "targets".
291
+func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error {
292
+
293
+	cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
206 294
 	if err != nil {
207 295
 		return err
208 296
 	}
297
+	defer cl.Close()
298
+	logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length)
209 299
 
210
-	c := changelist.NewTufChange(changelist.ActionCreate, changelist.ScopeTargets, "target", target.Name, metaJSON)
211
-	err = cl.Add(c)
300
+	meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes}
301
+	metaJSON, err := json.Marshal(meta)
212 302
 	if err != nil {
213 303
 		return err
214 304
 	}
215
-	return nil
305
+
306
+	template := changelist.NewTufChange(
307
+		changelist.ActionCreate, "", changelist.TypeTargetsTarget,
308
+		target.Name, metaJSON)
309
+	return addChange(cl, template, roles...)
216 310
 }
217 311
 
218
-// RemoveTarget creates a new changelist entry to remove a target from the repository
219
-// when the changelist gets applied at publish time
220
-func (r *NotaryRepository) RemoveTarget(targetName string) error {
312
+// RemoveTarget creates new changelist entries to remove a target from the given
313
+// roles in the repository when the changelist gets applied at publish time.
314
+// If roles are unspecified, the default role is "target".
315
+func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) error {
316
+
221 317
 	cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist"))
222 318
 	if err != nil {
223 319
 		return err
224 320
 	}
225 321
 	logrus.Debugf("Removing target \"%s\"", targetName)
226
-	c := changelist.NewTufChange(changelist.ActionDelete, changelist.ScopeTargets, "target", targetName, nil)
227
-	err = cl.Add(c)
228
-	if err != nil {
229
-		return err
230
-	}
231
-	return nil
322
+	template := changelist.NewTufChange(changelist.ActionDelete, "",
323
+		changelist.TypeTargetsTarget, targetName, nil)
324
+	return addChange(cl, template, roles...)
232 325
 }
233 326
 
234
-// ListTargets lists all targets for the current repository
235
-func (r *NotaryRepository) ListTargets() ([]*Target, error) {
327
+// ListTargets lists all targets for the current repository. The list of
328
+// roles should be passed in order from highest to lowest priority.
329
+// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
330
+// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
331
+// its entries will be strictly shadowed by those in other parts of the "targets/a"
332
+// subtree and also the "targets/x" subtree, as we will defer parsing it until
333
+// we explicitly reach it in our iteration of the provided list of roles.
334
+func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) {
236 335
 	c, err := r.bootstrapClient()
237 336
 	if err != nil {
238 337
 		return nil, err
... ...
@@ -246,17 +432,61 @@ func (r *NotaryRepository) ListTargets() ([]*Target, error) {
246 246
 		return nil, err
247 247
 	}
248 248
 
249
-	var targetList []*Target
250
-	for name, meta := range r.tufRepo.Targets["targets"].Signed.Targets {
251
-		target := &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}
252
-		targetList = append(targetList, target)
249
+	if len(roles) == 0 {
250
+		roles = []string{data.CanonicalTargetsRole}
251
+	}
252
+	targets := make(map[string]*TargetWithRole)
253
+	for _, role := range roles {
254
+		// we don't need to do anything special with removing role from
255
+		// roles because listSubtree always processes role and only excludes
256
+		// descendent delegations that appear in roles.
257
+		r.listSubtree(targets, role, roles...)
258
+	}
259
+
260
+	var targetList []*TargetWithRole
261
+	for _, v := range targets {
262
+		targetList = append(targetList, v)
253 263
 	}
254 264
 
255 265
 	return targetList, nil
256 266
 }
257 267
 
258
-// GetTargetByName returns a target given a name
259
-func (r *NotaryRepository) GetTargetByName(name string) (*Target, error) {
268
+func (r *NotaryRepository) listSubtree(targets map[string]*TargetWithRole, role string, exclude ...string) {
269
+	excl := make(map[string]bool)
270
+	for _, r := range exclude {
271
+		excl[r] = true
272
+	}
273
+	roles := []string{role}
274
+	for len(roles) > 0 {
275
+		role = roles[0]
276
+		roles = roles[1:]
277
+		tgts, ok := r.tufRepo.Targets[role]
278
+		if !ok {
279
+			// not every role has to exist
280
+			continue
281
+		}
282
+		for name, meta := range tgts.Signed.Targets {
283
+			if _, ok := targets[name]; !ok {
284
+				targets[name] = &TargetWithRole{
285
+					Target: Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, Role: role}
286
+			}
287
+		}
288
+		for _, d := range tgts.Signed.Delegations.Roles {
289
+			if !excl[d.Name] {
290
+				roles = append(roles, d.Name)
291
+			}
292
+		}
293
+	}
294
+}
295
+
296
+// GetTargetByName returns a target given a name. If no roles are passed
297
+// it uses the targets role and does a search of the entire delegation
298
+// graph, finding the first entry in a breadth first search of the delegations.
299
+// If roles are passed, they should be passed in descending priority and
300
+// the target entry found in the subtree of the highest priority role
301
+// will be returned
302
+// See the IMPORTANT section on ListTargets above. Those roles also apply here.
303
+func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*TargetWithRole, error) {
260 304
 	c, err := r.bootstrapClient()
261 305
 	if err != nil {
262 306
 		return nil, err
... ...
@@ -270,14 +500,18 @@ func (r *NotaryRepository) GetTargetByName(name string) (*Target, error) {
270 270
 		return nil, err
271 271
 	}
272 272
 
273
-	meta, err := c.TargetMeta(name)
274
-	if meta == nil {
275
-		return nil, fmt.Errorf("No trust data for %s", name)
276
-	} else if err != nil {
277
-		return nil, err
273
+	if len(roles) == 0 {
274
+		roles = append(roles, data.CanonicalTargetsRole)
275
+	}
276
+	for _, role := range roles {
277
+		meta, foundRole := c.TargetMeta(role, name, roles...)
278
+		if meta != nil {
279
+			return &TargetWithRole{
280
+				Target: Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, Role: foundRole}, nil
281
+		}
278 282
 	}
283
+	return nil, fmt.Errorf("No trust data for %s", name)
279 284
 
280
-	return &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, nil
281 285
 }
282 286
 
283 287
 // GetChangelist returns the list of the repository's unpublished changes
... ...
@@ -294,30 +528,30 @@ func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) {
294 294
 // Publish pushes the local changes in signed material to the remote notary-server
295 295
 // Conceptually it performs an operation similar to a `git rebase`
296 296
 func (r *NotaryRepository) Publish() error {
297
-	var updateRoot bool
298
-	var root *data.Signed
297
+	var initialPublish bool
299 298
 	// attempt to initialize the repo from the remote store
300 299
 	c, err := r.bootstrapClient()
301 300
 	if err != nil {
302 301
 		if _, ok := err.(store.ErrMetaNotFound); ok {
303 302
 			// if the remote store return a 404 (translated into ErrMetaNotFound),
304
-			// the repo hasn't been initialized yet. Attempt to load it from disk.
303
+			// there is no trust data for yet. Attempt to load it from disk.
305 304
 			err := r.bootstrapRepo()
306 305
 			if err != nil {
307
-				// Repo hasn't been initialized, It must be initialized before
308
-				// it can be published. Return an error and let caller determine
309
-				// what it wants to do.
310
-				logrus.Debug(err.Error())
311
-				logrus.Debug("Repository not initialized during Publish")
312
-				return &ErrRepoNotInitialized{}
313
-			}
314
-			// We had local data but the server doesn't know about the repo yet,
315
-			// ensure we will push the initial root file
316
-			root, err = r.tufRepo.Root.ToSigned()
317
-			if err != nil {
306
+				// There are lots of reasons there might be an error, such as
307
+				// corrupt metadata.  We need better errors from bootstrapRepo.
308
+				logrus.Debugf("Unable to load repository from local files: %s",
309
+					err.Error())
310
+				if _, ok := err.(store.ErrMetaNotFound); ok {
311
+					return &ErrRepoNotInitialized{}
312
+				}
318 313
 				return err
319 314
 			}
320
-			updateRoot = true
315
+			// We had local data but the server doesn't know about the repo yet,
316
+			// ensure we will push the initial root and targets file.  Either or
317
+			// both of the root and targets may not be marked as Dirty, since
318
+			// there may not be any changes that update them, so use a
319
+			// different boolean.
320
+			initialPublish = true
321 321
 		} else {
322 322
 			// The remote store returned an error other than 404. We're
323 323
 			// unable to determine if the repo has been initialized or not.
... ...
@@ -326,8 +560,8 @@ func (r *NotaryRepository) Publish() error {
326 326
 		}
327 327
 	} else {
328 328
 		// If we were successfully able to bootstrap the client (which only pulls
329
-		// root.json), update it the rest of the tuf metadata in preparation for
330
-		// applying the changelist.
329
+		// root.json), update it with the rest of the tuf metadata in
330
+		// preparation for applying the changelist.
331 331
 		err = c.Update()
332 332
 		if err != nil {
333 333
 			if err, ok := err.(signed.ErrExpired); ok {
... ...
@@ -347,24 +581,53 @@ func (r *NotaryRepository) Publish() error {
347 347
 		return err
348 348
 	}
349 349
 
350
+	// these are the tuf files we will need to update, serialized as JSON before
351
+	// we send anything to remote
352
+	updatedFiles := make(map[string][]byte)
353
+
350 354
 	// check if our root file is nearing expiry. Resign if it is.
351
-	if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty {
355
+	if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty || initialPublish {
356
+		rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole)
352 357
 		if err != nil {
353 358
 			return err
354 359
 		}
355
-		root, err = r.tufRepo.SignRoot(data.DefaultExpires("root"))
356
-		if err != nil {
357
-			return err
360
+		updatedFiles[data.CanonicalRootRole] = rootJSON
361
+	}
362
+
363
+	// iterate through all the targets files - if they are dirty, sign and update
364
+	for roleName, roleObj := range r.tufRepo.Targets {
365
+		if roleObj.Dirty || (roleName == data.CanonicalTargetsRole && initialPublish) {
366
+			targetsJSON, err := serializeCanonicalRole(r.tufRepo, roleName)
367
+			if err != nil {
368
+				return err
369
+			}
370
+			updatedFiles[roleName] = targetsJSON
358 371
 		}
359
-		updateRoot = true
360 372
 	}
361
-	// we will always resign targets and snapshots
362
-	targets, err := r.tufRepo.SignTargets("targets", data.DefaultExpires("targets"))
363
-	if err != nil {
364
-		return err
373
+
374
+	// if we initialized the repo while designating the server as the snapshot
375
+	// signer, then there won't be a snapshots file.  However, we might now
376
+	// have a local key (if there was a rotation), so initialize one.
377
+	if r.tufRepo.Snapshot == nil {
378
+		if err := r.tufRepo.InitSnapshot(); err != nil {
379
+			return err
380
+		}
365 381
 	}
366
-	snapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot"))
367
-	if err != nil {
382
+
383
+	snapshotJSON, err := serializeCanonicalRole(
384
+		r.tufRepo, data.CanonicalSnapshotRole)
385
+
386
+	if err == nil {
387
+		// Only update the snapshot if we've sucessfully signed it.
388
+		updatedFiles[data.CanonicalSnapshotRole] = snapshotJSON
389
+	} else if _, ok := err.(signed.ErrNoKeys); ok {
390
+		// If signing fails due to us not having the snapshot key, then
391
+		// assume the server is going to sign, and do not include any snapshot
392
+		// data.
393
+		logrus.Debugf("Client does not have the key to sign snapshot. " +
394
+			"Assuming that server should sign the snapshot.")
395
+	} else {
396
+		logrus.Debugf("Client was unable to sign the snapshot: %s", err.Error())
368 397
 		return err
369 398
 	}
370 399
 
... ...
@@ -373,27 +636,7 @@ func (r *NotaryRepository) Publish() error {
373 373
 		return err
374 374
 	}
375 375
 
376
-	// ensure we can marshal all the json before sending anything to remote
377
-	targetsJSON, err := json.Marshal(targets)
378
-	if err != nil {
379
-		return err
380
-	}
381
-	snapshotJSON, err := json.Marshal(snapshot)
382
-	if err != nil {
383
-		return err
384
-	}
385
-	update := make(map[string][]byte)
386
-	// if we need to update the root, marshal it and push the update to remote
387
-	if updateRoot {
388
-		rootJSON, err := json.Marshal(root)
389
-		if err != nil {
390
-			return err
391
-		}
392
-		update["root"] = rootJSON
393
-	}
394
-	update["targets"] = targetsJSON
395
-	update["snapshot"] = snapshotJSON
396
-	err = remote.SetMultiMeta(update)
376
+	err = remote.SetMultiMeta(updatedFiles)
397 377
 	if err != nil {
398 378
 		return err
399 379
 	}
... ...
@@ -407,6 +650,11 @@ func (r *NotaryRepository) Publish() error {
407 407
 	return nil
408 408
 }
409 409
 
410
+// bootstrapRepo loads the repository from the local file system.  This attempts
411
+// to load metadata for all roles.  Since server snapshots are supported,
412
+// if the snapshot metadata fails to load, that's ok.
413
+// This can also be unified with some cache reading tools from tuf/client.
414
+// This assumes that bootstrapRepo is only used by Publish()
410 415
 func (r *NotaryRepository) bootstrapRepo() error {
411 416
 	kdb := keys.NewDB()
412 417
 	tufRepo := tuf.NewRepo(kdb, r.CryptoService)
... ...
@@ -435,30 +683,32 @@ func (r *NotaryRepository) bootstrapRepo() error {
435 435
 		return err
436 436
 	}
437 437
 	tufRepo.SetTargets("targets", targets)
438
+
438 439
 	snapshotJSON, err := r.fileStore.GetMeta("snapshot", 0)
439
-	if err != nil {
440
-		return err
441
-	}
442
-	snapshot := &data.SignedSnapshot{}
443
-	err = json.Unmarshal(snapshotJSON, snapshot)
444
-	if err != nil {
440
+	if err == nil {
441
+		snapshot := &data.SignedSnapshot{}
442
+		err = json.Unmarshal(snapshotJSON, snapshot)
443
+		if err != nil {
444
+			return err
445
+		}
446
+		tufRepo.SetSnapshot(snapshot)
447
+	} else if _, ok := err.(store.ErrMetaNotFound); !ok {
445 448
 		return err
446 449
 	}
447
-	tufRepo.SetSnapshot(snapshot)
448 450
 
449 451
 	r.tufRepo = tufRepo
450 452
 
451 453
 	return nil
452 454
 }
453 455
 
454
-func (r *NotaryRepository) saveMetadata() error {
456
+func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
455 457
 	logrus.Debugf("Saving changes to Trusted Collection.")
456 458
 
457
-	signedRoot, err := r.tufRepo.SignRoot(data.DefaultExpires("root"))
459
+	rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole)
458 460
 	if err != nil {
459 461
 		return err
460 462
 	}
461
-	rootJSON, err := json.Marshal(signedRoot)
463
+	err = r.fileStore.SetMeta(data.CanonicalRootRole, rootJSON)
462 464
 	if err != nil {
463 465
 		return err
464 466
 	}
... ...
@@ -476,27 +726,22 @@ func (r *NotaryRepository) saveMetadata() error {
476 476
 		targetsToSave[t] = targetsJSON
477 477
 	}
478 478
 
479
-	signedSnapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot"))
480
-	if err != nil {
481
-		return err
479
+	for role, blob := range targetsToSave {
480
+		parentDir := filepath.Dir(role)
481
+		os.MkdirAll(parentDir, 0755)
482
+		r.fileStore.SetMeta(role, blob)
482 483
 	}
483
-	snapshotJSON, err := json.Marshal(signedSnapshot)
484
-	if err != nil {
485
-		return err
484
+
485
+	if ignoreSnapshot {
486
+		return nil
486 487
 	}
487 488
 
488
-	err = r.fileStore.SetMeta("root", rootJSON)
489
+	snapshotJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalSnapshotRole)
489 490
 	if err != nil {
490 491
 		return err
491 492
 	}
492 493
 
493
-	for role, blob := range targetsToSave {
494
-		parentDir := filepath.Dir(role)
495
-		os.MkdirAll(parentDir, 0755)
496
-		r.fileStore.SetMeta(role, blob)
497
-	}
498
-
499
-	return r.fileStore.SetMeta("snapshot", snapshotJSON)
494
+	return r.fileStore.SetMeta(data.CanonicalSnapshotRole, snapshotJSON)
500 495
 }
501 496
 
502 497
 func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
... ...
@@ -515,11 +760,15 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
515 515
 			// the store and it doesn't know about the repo.
516 516
 			return nil, err
517 517
 		}
518
-		rootJSON, err = r.fileStore.GetMeta("root", maxSize)
519
-		if err != nil {
520
-			// if cache didn't return a root, we cannot proceed
521
-			return nil, store.ErrMetaNotFound{}
518
+		result, cacheErr := r.fileStore.GetMeta("root", maxSize)
519
+		if cacheErr != nil {
520
+			// if cache didn't return a root, we cannot proceed - just return
521
+			// the original error.
522
+			return nil, err
522 523
 		}
524
+		rootJSON = result
525
+		logrus.Debugf(
526
+			"Using local cache instead of remote due to failure: %s", err.Error())
523 527
 	}
524 528
 	// can't just unmarshal into SignedRoot because validate root
525 529
 	// needs the root.Signed field to still be []byte for signature
... ...
@@ -530,7 +779,7 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
530 530
 		return nil, err
531 531
 	}
532 532
 
533
-	err = r.KeyStoreManager.ValidateRoot(root, r.gun)
533
+	err = r.CertManager.ValidateRoot(root, r.gun)
534 534
 	if err != nil {
535 535
 		return nil, err
536 536
 	}
... ...
@@ -555,21 +804,32 @@ func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
555 555
 	), nil
556 556
 }
557 557
 
558
-// RotateKeys removes all existing keys associated with role and adds
559
-// the keys specified by keyIDs to the role. These changes are staged
560
-// in a changelist until publish is called.
561
-func (r *NotaryRepository) RotateKeys() error {
562
-	for _, role := range []string{"targets", "snapshot"} {
563
-		key, err := r.CryptoService.Create(role, data.ECDSAKey)
564
-		if err != nil {
565
-			return err
566
-		}
567
-		err = r.rootFileKeyChange(role, changelist.ActionCreate, key)
568
-		if err != nil {
569
-			return err
570
-		}
558
+// RotateKey removes all existing keys associated with the role, and either
559
+// creates and adds one new key or delegates managing the key to the server.
560
+// These changes are staged in a changelist until publish is called.
561
+func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
562
+	if role == data.CanonicalRootRole || role == data.CanonicalTimestampRole {
563
+		return fmt.Errorf(
564
+			"notary does not currently support rotating the %s key", role)
571 565
 	}
572
-	return nil
566
+	if serverManagesKey && role == data.CanonicalTargetsRole {
567
+		return ErrInvalidRemoteRole{Role: data.CanonicalTargetsRole}
568
+	}
569
+
570
+	var (
571
+		pubKey data.PublicKey
572
+		err    error
573
+	)
574
+	if serverManagesKey {
575
+		pubKey, err = getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
576
+	} else {
577
+		pubKey, err = r.CryptoService.Create(role, data.ECDSAKey)
578
+	}
579
+	if err != nil {
580
+		return err
581
+	}
582
+
583
+	return r.rootFileKeyChange(role, changelist.ActionCreate, pubKey)
573 584
 }
574 585
 
575 586
 func (r *NotaryRepository) rootFileKeyChange(role, action string, key data.PublicKey) error {
... ...
@@ -2,7 +2,9 @@ package client
2 2
 
3 3
 import (
4 4
 	"encoding/json"
5
+	"fmt"
5 6
 	"net/http"
7
+	"path"
6 8
 	"time"
7 9
 
8 10
 	"github.com/Sirupsen/logrus"
... ...
@@ -36,10 +38,11 @@ func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
36 36
 		if err != nil {
37 37
 			return err
38 38
 		}
39
-		switch c.Scope() {
40
-		case changelist.ScopeTargets:
39
+		isDel := data.IsDelegation(c.Scope())
40
+		switch {
41
+		case c.Scope() == changelist.ScopeTargets || isDel:
41 42
 			err = applyTargetsChange(repo, c)
42
-		case changelist.ScopeRoot:
43
+		case c.Scope() == changelist.ScopeRoot:
43 44
 			err = applyRootChange(repo, c)
44 45
 		default:
45 46
 			logrus.Debug("scope not supported: ", c.Scope())
... ...
@@ -54,6 +57,89 @@ func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
54 54
 }
55 55
 
56 56
 func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
57
+	switch c.Type() {
58
+	case changelist.TypeTargetsTarget:
59
+		return changeTargetMeta(repo, c)
60
+	case changelist.TypeTargetsDelegation:
61
+		return changeTargetsDelegation(repo, c)
62
+	default:
63
+		return fmt.Errorf("only target meta and delegations changes supported")
64
+	}
65
+}
66
+
67
+func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
68
+	switch c.Action() {
69
+	case changelist.ActionCreate:
70
+		td := changelist.TufDelegation{}
71
+		err := json.Unmarshal(c.Content(), &td)
72
+		if err != nil {
73
+			return err
74
+		}
75
+		r, err := repo.GetDelegation(c.Scope())
76
+		if _, ok := err.(data.ErrNoSuchRole); err != nil && !ok {
77
+			// error that wasn't ErrNoSuchRole
78
+			return err
79
+		}
80
+		if err == nil {
81
+			// role existed
82
+			return data.ErrInvalidRole{
83
+				Role:   c.Scope(),
84
+				Reason: "cannot create a role that already exists",
85
+			}
86
+		}
87
+		// role doesn't exist, create brand new
88
+		r, err = td.ToNewRole(c.Scope())
89
+		if err != nil {
90
+			return err
91
+		}
92
+		return repo.UpdateDelegations(r, td.AddKeys)
93
+	case changelist.ActionUpdate:
94
+		td := changelist.TufDelegation{}
95
+		err := json.Unmarshal(c.Content(), &td)
96
+		if err != nil {
97
+			return err
98
+		}
99
+		r, err := repo.GetDelegation(c.Scope())
100
+		if err != nil {
101
+			return err
102
+		}
103
+		// role exists, merge
104
+		if err := r.AddPaths(td.AddPaths); err != nil {
105
+			return err
106
+		}
107
+		if err := r.AddPathHashPrefixes(td.AddPathHashPrefixes); err != nil {
108
+			return err
109
+		}
110
+		r.RemoveKeys(td.RemoveKeys)
111
+		r.RemovePaths(td.RemovePaths)
112
+		r.RemovePathHashPrefixes(td.RemovePathHashPrefixes)
113
+		return repo.UpdateDelegations(r, td.AddKeys)
114
+	case changelist.ActionDelete:
115
+		r := data.Role{Name: c.Scope()}
116
+		return repo.DeleteDelegation(r)
117
+	default:
118
+		return fmt.Errorf("unsupported action against delegations: %s", c.Action())
119
+	}
120
+
121
+}
122
+
123
+// applies a function repeatedly, falling back on the parent role, until it no
124
+// longer can
125
+func doWithRoleFallback(role string, doFunc func(string) error) error {
126
+	for role == data.CanonicalTargetsRole || data.IsDelegation(role) {
127
+		err := doFunc(role)
128
+		if err == nil {
129
+			return nil
130
+		}
131
+		if _, ok := err.(data.ErrInvalidRole); !ok {
132
+			return err
133
+		}
134
+		role = path.Dir(role)
135
+	}
136
+	return data.ErrInvalidRole{Role: role}
137
+}
138
+
139
+func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
57 140
 	var err error
58 141
 	switch c.Action() {
59 142
 	case changelist.ActionCreate:
... ...
@@ -64,17 +150,29 @@ func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
64 64
 			return err
65 65
 		}
66 66
 		files := data.Files{c.Path(): *meta}
67
-		_, err = repo.AddTargets(c.Scope(), files)
67
+
68
+		err = doWithRoleFallback(c.Scope(), func(role string) error {
69
+			_, e := repo.AddTargets(role, files)
70
+			return e
71
+		})
72
+		if err != nil {
73
+			logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error())
74
+		}
75
+
68 76
 	case changelist.ActionDelete:
69 77
 		logrus.Debug("changelist remove: ", c.Path())
70
-		err = repo.RemoveTargets(c.Scope(), c.Path())
78
+
79
+		err = doWithRoleFallback(c.Scope(), func(role string) error {
80
+			return repo.RemoveTargets(role, c.Path())
81
+		})
82
+		if err != nil {
83
+			logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error())
84
+		}
85
+
71 86
 	default:
72 87
 		logrus.Debug("action not yet supported: ", c.Action())
73 88
 	}
74
-	if err != nil {
75
-		return err
76
-	}
77
-	return nil
89
+	return err
78 90
 }
79 91
 
80 92
 func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
... ...
@@ -112,35 +210,56 @@ func nearExpiry(r *data.SignedRoot) bool {
112 112
 	return r.Signed.Expires.Before(plus6mo)
113 113
 }
114 114
 
115
-func initRoles(kdb *keys.KeyDB, rootKey, targetsKey, snapshotKey, timestampKey data.PublicKey) error {
116
-	rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil)
115
+// Fetches a public key from a remote store, given a gun and role
116
+func getRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) {
117
+	remote, err := getRemoteStore(url, gun, rt)
117 118
 	if err != nil {
118
-		return err
119
+		return nil, err
119 120
 	}
120
-	targetsRole, err := data.NewRole("targets", 1, []string{targetsKey.ID()}, nil, nil)
121
+	rawPubKey, err := remote.GetKey(role)
121 122
 	if err != nil {
122
-		return err
123
+		return nil, err
123 124
 	}
124
-	snapshotRole, err := data.NewRole("snapshot", 1, []string{snapshotKey.ID()}, nil, nil)
125
-	if err != nil {
126
-		return err
127
-	}
128
-	timestampRole, err := data.NewRole("timestamp", 1, []string{timestampKey.ID()}, nil, nil)
125
+
126
+	pubKey, err := data.UnmarshalPublicKey(rawPubKey)
129 127
 	if err != nil {
130
-		return err
128
+		return nil, err
131 129
 	}
132 130
 
133
-	if err := kdb.AddRole(rootRole); err != nil {
131
+	return pubKey, nil
132
+}
133
+
134
+// add a key to a KeyDB, and create a role for the key and add it.
135
+func addKeyForRole(kdb *keys.KeyDB, role string, key data.PublicKey) error {
136
+	theRole, err := data.NewRole(role, 1, []string{key.ID()}, nil, nil)
137
+	if err != nil {
134 138
 		return err
135 139
 	}
136
-	if err := kdb.AddRole(targetsRole); err != nil {
140
+	kdb.AddKey(key)
141
+	if err := kdb.AddRole(theRole); err != nil {
137 142
 		return err
138 143
 	}
139
-	if err := kdb.AddRole(snapshotRole); err != nil {
140
-		return err
144
+	return nil
145
+}
146
+
147
+// signs and serializes the metadata for a canonical role in a tuf repo to JSON
148
+func serializeCanonicalRole(tufRepo *tuf.Repo, role string) (out []byte, err error) {
149
+	var s *data.Signed
150
+	switch {
151
+	case role == data.CanonicalRootRole:
152
+		s, err = tufRepo.SignRoot(data.DefaultExpires(role))
153
+	case role == data.CanonicalSnapshotRole:
154
+		s, err = tufRepo.SignSnapshot(data.DefaultExpires(role))
155
+	case tufRepo.Targets[role] != nil:
156
+		s, err = tufRepo.SignTargets(
157
+			role, data.DefaultExpires(data.CanonicalTargetsRole))
158
+	default:
159
+		err = fmt.Errorf("%s not supported role to sign on the client", role)
141 160
 	}
142
-	if err := kdb.AddRole(timestampRole); err != nil {
143
-		return err
161
+
162
+	if err != nil {
163
+		return
144 164
 	}
145
-	return nil
165
+
166
+	return json.Marshal(s)
146 167
 }
... ...
@@ -5,52 +5,23 @@ package client
5 5
 import (
6 6
 	"fmt"
7 7
 	"net/http"
8
-	"path/filepath"
9 8
 
10
-	"github.com/docker/notary/cryptoservice"
11
-	"github.com/docker/notary/keystoremanager"
12 9
 	"github.com/docker/notary/passphrase"
13 10
 	"github.com/docker/notary/trustmanager"
14
-	"github.com/docker/notary/tuf/store"
15 11
 )
16 12
 
17 13
 // NewNotaryRepository is a helper method that returns a new notary repository.
18 14
 // It takes the base directory under where all the trust files will be stored
19 15
 // (usually ~/.docker/trust/).
20 16
 func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
21
-	retriever passphrase.Retriever) (*NotaryRepository, error) {
17
+	retriever passphrase.Retriever) (
18
+	*NotaryRepository, error) {
19
+
22 20
 	fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
23 21
 	if err != nil {
24 22
 		return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
25 23
 	}
26 24
 
27
-	keyStoreManager, err := keystoremanager.NewKeyStoreManager(baseDir, fileKeyStore)
28
-	if err != nil {
29
-		return nil, err
30
-	}
31
-
32
-	cryptoService := cryptoservice.NewCryptoService(gun, keyStoreManager.KeyStore)
33
-
34
-	nRepo := &NotaryRepository{
35
-		gun:             gun,
36
-		baseDir:         baseDir,
37
-		baseURL:         baseURL,
38
-		tufRepoPath:     filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
39
-		CryptoService:   cryptoService,
40
-		roundTrip:       rt,
41
-		KeyStoreManager: keyStoreManager,
42
-	}
43
-
44
-	fileStore, err := store.NewFilesystemStore(
45
-		nRepo.tufRepoPath,
46
-		"metadata",
47
-		"json",
48
-		"",
49
-	)
50
-	if err != nil {
51
-		return nil, err
52
-	}
53
-	nRepo.fileStore = fileStore
54
-
55
-	return nRepo, nil
25
+	return repositoryFromKeystores(baseDir, gun, baseURL, rt,
26
+		[]trustmanager.KeyStore{fileKeyStore})
56 27
 }
... ...
@@ -5,57 +5,29 @@ package client
5 5
 import (
6 6
 	"fmt"
7 7
 	"net/http"
8
-	"path/filepath"
9 8
 
10
-	"github.com/docker/notary/cryptoservice"
11
-	"github.com/docker/notary/keystoremanager"
12 9
 	"github.com/docker/notary/passphrase"
13 10
 	"github.com/docker/notary/trustmanager"
14 11
 	"github.com/docker/notary/trustmanager/yubikey"
15
-	"github.com/docker/notary/tuf/signed"
16
-	"github.com/docker/notary/tuf/store"
17 12
 )
18 13
 
19 14
 // NewNotaryRepository is a helper method that returns a new notary repository.
20 15
 // It takes the base directory under where all the trust files will be stored
21 16
 // (usually ~/.docker/trust/).
22 17
 func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
23
-	retriever passphrase.Retriever) (*NotaryRepository, error) {
18
+	retriever passphrase.Retriever) (
19
+	*NotaryRepository, error) {
24 20
 
25 21
 	fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
26 22
 	if err != nil {
27 23
 		return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
28 24
 	}
29 25
 
30
-	keyStoreManager, err := keystoremanager.NewKeyStoreManager(baseDir, fileKeyStore)
26
+	keyStores := []trustmanager.KeyStore{fileKeyStore}
31 27
 	yubiKeyStore, _ := yubikey.NewYubiKeyStore(fileKeyStore, retriever)
32
-	var cryptoService signed.CryptoService
33
-	if yubiKeyStore == nil {
34
-		cryptoService = cryptoservice.NewCryptoService(gun, keyStoreManager.KeyStore)
35
-	} else {
36
-		cryptoService = cryptoservice.NewCryptoService(gun, yubiKeyStore, keyStoreManager.KeyStore)
28
+	if yubiKeyStore != nil {
29
+		keyStores = append(keyStores, yubiKeyStore)
37 30
 	}
38 31
 
39
-	nRepo := &NotaryRepository{
40
-		gun:             gun,
41
-		baseDir:         baseDir,
42
-		baseURL:         baseURL,
43
-		tufRepoPath:     filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
44
-		CryptoService:   cryptoService,
45
-		roundTrip:       rt,
46
-		KeyStoreManager: keyStoreManager,
47
-	}
48
-
49
-	fileStore, err := store.NewFilesystemStore(
50
-		nRepo.tufRepoPath,
51
-		"metadata",
52
-		"json",
53
-		"",
54
-	)
55
-	if err != nil {
56
-		return nil, err
57
-	}
58
-	nRepo.fileStore = fileStore
59
-
60
-	return nRepo, nil
32
+	return repositoryFromKeystores(baseDir, gun, baseURL, rt, keyStores)
61 33
 }
... ...
@@ -1,22 +1,35 @@
1 1
 package cryptoservice
2 2
 
3 3
 import (
4
+	"crypto"
4 5
 	"crypto/rand"
5 6
 	"crypto/x509"
6 7
 	"fmt"
8
+	"time"
7 9
 
8 10
 	"github.com/docker/notary/trustmanager"
9 11
 	"github.com/docker/notary/tuf/data"
10 12
 )
11 13
 
12
-// GenerateCertificate generates an X509 Certificate from a template, given a GUN
13
-func GenerateCertificate(rootKey data.PrivateKey, gun string) (*x509.Certificate, error) {
14
+// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval
15
+func GenerateCertificate(rootKey data.PrivateKey, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
14 16
 	signer := rootKey.CryptoSigner()
15 17
 	if signer == nil {
16 18
 		return nil, fmt.Errorf("key type not supported for Certificate generation: %s\n", rootKey.Algorithm())
17 19
 	}
18 20
 
19
-	template, err := trustmanager.NewCertificate(gun)
21
+	return generateCertificate(signer, gun, startTime, endTime)
22
+}
23
+
24
+// GenerateTestingCertificate generates a non-expired X509 Certificate from a template, given a GUN.
25
+// Good enough for tests where expiration does not really matter; do not use if you care about the policy.
26
+func GenerateTestingCertificate(signer crypto.Signer, gun string) (*x509.Certificate, error) {
27
+	startTime := time.Now()
28
+	return generateCertificate(signer, gun, startTime, startTime.AddDate(10, 0, 0))
29
+}
30
+
31
+func generateCertificate(signer crypto.Signer, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
32
+	template, err := trustmanager.NewCertificate(gun, startTime, endTime)
20 33
 	if err != nil {
21 34
 		return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
22 35
 	}
... ...
@@ -26,7 +39,6 @@ func GenerateCertificate(rootKey data.PrivateKey, gun string) (*x509.Certificate
26 26
 		return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err)
27 27
 	}
28 28
 
29
-	// Encode the new certificate into PEM
30 29
 	cert, err := x509.ParseCertificate(derBytes)
31 30
 	if err != nil {
32 31
 		return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err)
... ...
@@ -82,10 +82,15 @@ func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role st
82 82
 	for _, ks := range cs.keyStores {
83 83
 		for _, keyPath := range keyPaths {
84 84
 			k, role, err = ks.GetKey(keyPath)
85
-			if err != nil {
85
+			if err == nil {
86
+				return
87
+			}
88
+			switch err.(type) {
89
+			case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
90
+				return
91
+			default:
86 92
 				continue
87 93
 			}
88
-			return
89 94
 		}
90 95
 	}
91 96
 	return // returns whatever the final values were
... ...
@@ -111,39 +116,6 @@ func (cs *CryptoService) RemoveKey(keyID string) (err error) {
111 111
 	return // returns whatever the final values were
112 112
 }
113 113
 
114
-// Sign returns the signatures for the payload with a set of keyIDs. It ignores
115
-// errors to sign and expects the called to validate if the number of returned
116
-// signatures is adequate.
117
-func (cs *CryptoService) Sign(keyIDs []string, payload []byte) ([]data.Signature, error) {
118
-	signatures := make([]data.Signature, 0, len(keyIDs))
119
-	for _, keyID := range keyIDs {
120
-		privKey, _, err := cs.GetPrivateKey(keyID)
121
-		if err != nil {
122
-			logrus.Debugf("error attempting to retrieve private key: %s, %v", keyID, err)
123
-			continue
124
-		}
125
-
126
-		sigAlgo := privKey.SignatureAlgorithm()
127
-		sig, err := privKey.Sign(rand.Reader, payload, nil)
128
-		if err != nil {
129
-			logrus.Debugf("ignoring error attempting to %s sign with keyID: %s, %v",
130
-				privKey.Algorithm(), keyID, err)
131
-			continue
132
-		}
133
-
134
-		logrus.Debugf("appending %s signature with Key ID: %s", privKey.Algorithm(), keyID)
135
-
136
-		// Append signatures to result array
137
-		signatures = append(signatures, data.Signature{
138
-			KeyID:     keyID,
139
-			Method:    sigAlgo,
140
-			Signature: sig[:],
141
-		})
142
-	}
143
-
144
-	return signatures, nil
145
-}
146
-
147 114
 // ListKeys returns a list of key IDs valid for the given role
148 115
 func (cs *CryptoService) ListKeys(role string) []string {
149 116
 	var res []string
... ...
@@ -261,12 +261,12 @@ func moveKeysByGUN(oldKeyStore, newKeyStore trustmanager.KeyStore, gun string) e
261 261
 
262 262
 func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error {
263 263
 	for f := range oldKeyStore.ListKeys() {
264
-		privateKey, alias, err := oldKeyStore.GetKey(f)
264
+		privateKey, role, err := oldKeyStore.GetKey(f)
265 265
 		if err != nil {
266 266
 			return err
267 267
 		}
268 268
 
269
-		err = newKeyStore.AddKey(f, alias, privateKey)
269
+		err = newKeyStore.AddKey(f, role, privateKey)
270 270
 
271 271
 		if err != nil {
272 272
 			return err
... ...
@@ -278,7 +278,10 @@ func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error {
278 278
 
279 279
 func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileStore) error {
280 280
 	for _, relKeyPath := range newKeyStore.ListFiles() {
281
-		fullKeyPath := filepath.Join(newKeyStore.BaseDir(), relKeyPath)
281
+		fullKeyPath, err := newKeyStore.GetPath(relKeyPath)
282
+		if err != nil {
283
+			return err
284
+		}
282 285
 
283 286
 		fi, err := os.Lstat(fullKeyPath)
284 287
 		if err != nil {
... ...
@@ -290,7 +293,11 @@ func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileSt
290 290
 			return err
291 291
 		}
292 292
 
293
-		infoHeader.Name = relKeyPath
293
+		relPath, err := filepath.Rel(newKeyStore.BaseDir(), fullKeyPath)
294
+		if err != nil {
295
+			return err
296
+		}
297
+		infoHeader.Name = relPath
294 298
 
295 299
 		zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader)
296 300
 		if err != nil {
... ...
@@ -8,7 +8,8 @@ notaryserver:
8 8
    - "8080"
9 9
    - "4443:4443"
10 10
   environment:
11
-    SERVICE_NAME: notary
11
+   - SERVICE_NAME=notary
12
+  command: -config=fixtures/server-config.json
12 13
 notarysigner:
13 14
   volumes:
14 15
    - /dev/bus/usb/003/010:/dev/bus/usb/002/010
... ...
@@ -17,7 +18,10 @@ notarysigner:
17 17
   dockerfile: Dockerfile.signer
18 18
   links:
19 19
    - notarymysql
20
+  command: -config=fixtures/signer-config.json
20 21
 notarymysql:
22
+  volumes:
23
+    - notarymysql:/var/lib/mysql
21 24
   build: ./notarymysql/
22 25
   ports:
23 26
     - "3306:3306"
24 27
deleted file mode 100644
... ...
@@ -1,380 +0,0 @@
1
-package keystoremanager
2
-
3
-import (
4
-	"crypto/rand"
5
-	"crypto/x509"
6
-	"errors"
7
-	"fmt"
8
-	"path/filepath"
9
-	"strings"
10
-	"time"
11
-
12
-	"github.com/Sirupsen/logrus"
13
-	"github.com/docker/notary/trustmanager"
14
-	"github.com/docker/notary/tuf/data"
15
-	"github.com/docker/notary/tuf/signed"
16
-)
17
-
18
-// KeyStoreManager is an abstraction around the root and non-root key stores,
19
-// and related CA stores
20
-type KeyStoreManager struct {
21
-	KeyStore                *trustmanager.KeyFileStore
22
-	trustedCAStore          trustmanager.X509Store
23
-	trustedCertificateStore trustmanager.X509Store
24
-}
25
-
26
-const (
27
-	trustDir       = "trusted_certificates"
28
-	rsaRootKeySize = 4096 // Used for new root keys
29
-)
30
-
31
-// ErrValidationFail is returned when there is no valid trusted certificates
32
-// being served inside of the roots.json
33
-type ErrValidationFail struct {
34
-	Reason string
35
-}
36
-
37
-// ErrValidationFail is returned when there is no valid trusted certificates
38
-// being served inside of the roots.json
39
-func (err ErrValidationFail) Error() string {
40
-	return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason)
41
-}
42
-
43
-// ErrRootRotationFail is returned when we fail to do a full root key rotation
44
-// by either failing to add the new root certificate, or delete the old ones
45
-type ErrRootRotationFail struct {
46
-	Reason string
47
-}
48
-
49
-// ErrRootRotationFail is returned when we fail to do a full root key rotation
50
-// by either failing to add the new root certificate, or delete the old ones
51
-func (err ErrRootRotationFail) Error() string {
52
-	return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
53
-}
54
-
55
-// NewKeyStoreManager returns an initialized KeyStoreManager, or an error
56
-// if it fails to create the KeyFileStores or load certificates
57
-func NewKeyStoreManager(baseDir string, keyStore *trustmanager.KeyFileStore) (*KeyStoreManager, error) {
58
-	trustPath := filepath.Join(baseDir, trustDir)
59
-
60
-	// Load all CAs that aren't expired and don't use SHA1
61
-	trustedCAStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
62
-		return cert.IsCA && cert.BasicConstraintsValid && cert.SubjectKeyId != nil &&
63
-			time.Now().Before(cert.NotAfter) &&
64
-			cert.SignatureAlgorithm != x509.SHA1WithRSA &&
65
-			cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
66
-			cert.SignatureAlgorithm != x509.ECDSAWithSHA1
67
-	})
68
-	if err != nil {
69
-		return nil, err
70
-	}
71
-
72
-	// Load all individual (non-CA) certificates that aren't expired and don't use SHA1
73
-	trustedCertificateStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool {
74
-		return !cert.IsCA &&
75
-			time.Now().Before(cert.NotAfter) &&
76
-			cert.SignatureAlgorithm != x509.SHA1WithRSA &&
77
-			cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
78
-			cert.SignatureAlgorithm != x509.ECDSAWithSHA1
79
-	})
80
-	if err != nil {
81
-		return nil, err
82
-	}
83
-
84
-	return &KeyStoreManager{
85
-		KeyStore:                keyStore,
86
-		trustedCAStore:          trustedCAStore,
87
-		trustedCertificateStore: trustedCertificateStore,
88
-	}, nil
89
-}
90
-
91
-// TrustedCertificateStore returns the trusted certificate store being managed
92
-// by this KeyStoreManager
93
-func (km *KeyStoreManager) TrustedCertificateStore() trustmanager.X509Store {
94
-	return km.trustedCertificateStore
95
-}
96
-
97
-// TrustedCAStore returns the CA store being managed by this KeyStoreManager
98
-func (km *KeyStoreManager) TrustedCAStore() trustmanager.X509Store {
99
-	return km.trustedCAStore
100
-}
101
-
102
-// AddTrustedCert adds a cert to the trusted certificate store (not the CA
103
-// store)
104
-func (km *KeyStoreManager) AddTrustedCert(cert *x509.Certificate) {
105
-	km.trustedCertificateStore.AddCert(cert)
106
-}
107
-
108
-// AddTrustedCACert adds a cert to the trusted CA certificate store
109
-func (km *KeyStoreManager) AddTrustedCACert(cert *x509.Certificate) {
110
-	km.trustedCAStore.AddCert(cert)
111
-}
112
-
113
-// GenRootKey generates a new root key
114
-func (km *KeyStoreManager) GenRootKey(algorithm string) (string, error) {
115
-	var err error
116
-	var privKey data.PrivateKey
117
-
118
-	// We don't want external API callers to rely on internal TUF data types, so
119
-	// the API here should continue to receive a string algorithm, and ensure
120
-	// that it is downcased
121
-	switch strings.ToLower(algorithm) {
122
-	case data.RSAKey:
123
-		privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaRootKeySize)
124
-	case data.ECDSAKey:
125
-		privKey, err = trustmanager.GenerateECDSAKey(rand.Reader)
126
-	default:
127
-		return "", fmt.Errorf("only RSA or ECDSA keys are currently supported. Found: %s", algorithm)
128
-
129
-	}
130
-	if err != nil {
131
-		return "", fmt.Errorf("failed to generate private key: %v", err)
132
-	}
133
-
134
-	// Changing the root
135
-	km.KeyStore.AddKey(privKey.ID(), "root", privKey)
136
-
137
-	return privKey.ID(), nil
138
-}
139
-
140
-/*
141
-ValidateRoot receives a new root, validates its correctness and attempts to
142
-do root key rotation if needed.
143
-
144
-First we list the current trusted certificates we have for a particular GUN. If
145
-that list is non-empty means that we've already seen this repository before, and
146
-have a list of trusted certificates for it. In this case, we use this list of
147
-certificates to attempt to validate this root file.
148
-
149
-If the previous validation suceeds, or in the case where we found no trusted
150
-certificates for this particular GUN, we check the integrity of the root by
151
-making sure that it is validated by itself. This means that we will attempt to
152
-validate the root data with the certificates that are included in the root keys
153
-themselves.
154
-
155
-If this last steps succeeds, we attempt to do root rotation, by ensuring that
156
-we only trust the certificates that are present in the new root.
157
-
158
-This mechanism of operation is essentially Trust On First Use (TOFU): if we
159
-have never seen a certificate for a particular CN, we trust it. If later we see
160
-a different certificate for that certificate, we return an ErrValidationFailed error.
161
-
162
-Note that since we only allow trust data to be downloaded over an HTTPS channel
163
-we are using the current public PKI to validate the first download of the certificate
164
-adding an extra layer of security over the normal (SSH style) trust model.
165
-We shall call this: TOFUS.
166
-*/
167
-func (km *KeyStoreManager) ValidateRoot(root *data.Signed, gun string) error {
168
-	logrus.Debugf("entered ValidateRoot with dns: %s", gun)
169
-	signedRoot, err := data.RootFromSigned(root)
170
-	if err != nil {
171
-		return err
172
-	}
173
-
174
-	// Retrieve all the leaf certificates in root for which the CN matches the GUN
175
-	allValidCerts, err := validRootLeafCerts(signedRoot, gun)
176
-	if err != nil {
177
-		logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
178
-		return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
179
-	}
180
-
181
-	// Retrieve all the trusted certificates that match this gun
182
-	certsForCN, err := km.trustedCertificateStore.GetCertificatesByCN(gun)
183
-	if err != nil {
184
-		// If the error that we get back is different than ErrNoCertificatesFound
185
-		// we couldn't check if there are any certificates with this CN already
186
-		// trusted. Let's take the conservative approach and return a failed validation
187
-		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok {
188
-			logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err)
189
-			return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"}
190
-		}
191
-	}
192
-
193
-	// If we have certificates that match this specific GUN, let's make sure to
194
-	// use them first to validate that this new root is valid.
195
-	if len(certsForCN) != 0 {
196
-		logrus.Debugf("found %d valid root certificates for %s", len(certsForCN), gun)
197
-		err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(certsForCN))
198
-		if err != nil {
199
-			logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
200
-			return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"}
201
-		}
202
-	} else {
203
-		logrus.Debugf("found no currently valid root certificates for %s", gun)
204
-	}
205
-
206
-	// Validate the integrity of the new root (does it have valid signatures)
207
-	err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(allValidCerts))
208
-	if err != nil {
209
-		logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
210
-		return &ErrValidationFail{Reason: "failed to validate integrity of roots"}
211
-	}
212
-
213
-	// Getting here means A) we had trusted certificates and both the
214
-	// old and new validated this root; or B) we had no trusted certificates but
215
-	// the new set of certificates has integrity (self-signed)
216
-	logrus.Debugf("entering root certificate rotation for: %s", gun)
217
-
218
-	// Do root certificate rotation: we trust only the certs present in the new root
219
-	// First we add all the new certificates (even if they already exist)
220
-	for _, cert := range allValidCerts {
221
-		err := km.trustedCertificateStore.AddCert(cert)
222
-		if err != nil {
223
-			// If the error is already exists we don't fail the rotation
224
-			if _, ok := err.(*trustmanager.ErrCertExists); ok {
225
-				logrus.Debugf("ignoring certificate addition to: %s", gun)
226
-				continue
227
-			}
228
-			logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err)
229
-		}
230
-	}
231
-
232
-	// Now we delete old certificates that aren't present in the new root
233
-	for certID, cert := range certsToRemove(certsForCN, allValidCerts) {
234
-		logrus.Debugf("removing certificate with certID: %s", certID)
235
-		err = km.trustedCertificateStore.RemoveCert(cert)
236
-		if err != nil {
237
-			logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
238
-			return &ErrRootRotationFail{Reason: "failed to rotate root keys"}
239
-		}
240
-	}
241
-
242
-	logrus.Debugf("Root validation succeeded for %s", gun)
243
-	return nil
244
-}
245
-
246
-// validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whoose
247
-// Common-Names match the provided GUN
248
-func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, error) {
249
-	// Get a list of all of the leaf certificates present in root
250
-	allLeafCerts, _ := parseAllCerts(root)
251
-	var validLeafCerts []*x509.Certificate
252
-
253
-	// Go through every leaf certificate and check that the CN matches the gun
254
-	for _, cert := range allLeafCerts {
255
-		// Validate that this leaf certificate has a CN that matches the exact gun
256
-		if cert.Subject.CommonName != gun {
257
-			logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", cert.Subject.CommonName)
258
-			continue
259
-		}
260
-		// Make sure the certificate is not expired
261
-		if time.Now().After(cert.NotAfter) {
262
-			logrus.Debugf("error leaf certificate is expired")
263
-			continue
264
-		}
265
-
266
-		// We don't allow root certificates that use SHA1
267
-		if cert.SignatureAlgorithm == x509.SHA1WithRSA ||
268
-			cert.SignatureAlgorithm == x509.DSAWithSHA1 ||
269
-			cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
270
-
271
-			logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)")
272
-			continue
273
-		}
274
-
275
-		validLeafCerts = append(validLeafCerts, cert)
276
-	}
277
-
278
-	if len(validLeafCerts) < 1 {
279
-		logrus.Debugf("didn't find any valid leaf certificates for %s", gun)
280
-		return nil, errors.New("no valid leaf certificates found in any of the root keys")
281
-	}
282
-
283
-	logrus.Debugf("found %d valid leaf certificates for %s", len(validLeafCerts), gun)
284
-	return validLeafCerts, nil
285
-}
286
-
287
-// parseAllCerts returns two maps, one with all of the leafCertificates and one
288
-// with all the intermediate certificates found in signedRoot
289
-func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
290
-	leafCerts := make(map[string]*x509.Certificate)
291
-	intCerts := make(map[string][]*x509.Certificate)
292
-
293
-	// Before we loop through all root keys available, make sure any exist
294
-	rootRoles, ok := signedRoot.Signed.Roles["root"]
295
-	if !ok {
296
-		logrus.Debugf("tried to parse certificates from invalid root signed data")
297
-		return nil, nil
298
-	}
299
-
300
-	logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs)
301
-	// Iterate over every keyID for the root role inside of roots.json
302
-	for _, keyID := range rootRoles.KeyIDs {
303
-		// check that the key exists in the signed root keys map
304
-		key, ok := signedRoot.Signed.Keys[keyID]
305
-		if !ok {
306
-			logrus.Debugf("error while getting data for keyID: %s", keyID)
307
-			continue
308
-		}
309
-
310
-		// Decode all the x509 certificates that were bundled with this
311
-		// Specific root key
312
-		decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public())
313
-		if err != nil {
314
-			logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
315
-			continue
316
-		}
317
-
318
-		// Get all non-CA certificates in the decoded certificates
319
-		leafCertList := trustmanager.GetLeafCerts(decodedCerts)
320
-
321
-		// If we got no leaf certificates or we got more than one, fail
322
-		if len(leafCertList) != 1 {
323
-			logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID)
324
-			continue
325
-		}
326
-
327
-		// Get the ID of the leaf certificate
328
-		leafCert := leafCertList[0]
329
-		leafID, err := trustmanager.FingerprintCert(leafCert)
330
-		if err != nil {
331
-			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err)
332
-			continue
333
-		}
334
-
335
-		// Store the leaf cert in the map
336
-		leafCerts[leafID] = leafCert
337
-
338
-		// Get all the remainder certificates marked as a CA to be used as intermediates
339
-		intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
340
-		intCerts[leafID] = intermediateCerts
341
-	}
342
-
343
-	return leafCerts, intCerts
344
-}
345
-
346
-// certsToRemove returns all the certifificates from oldCerts that aren't present
347
-// in newCerts
348
-func certsToRemove(oldCerts, newCerts []*x509.Certificate) map[string]*x509.Certificate {
349
-	certsToRemove := make(map[string]*x509.Certificate)
350
-
351
-	// If no newCerts were provided
352
-	if len(newCerts) == 0 {
353
-		return certsToRemove
354
-	}
355
-
356
-	// Populate a map with all the IDs from newCert
357
-	var newCertMap = make(map[string]struct{})
358
-	for _, cert := range newCerts {
359
-		certID, err := trustmanager.FingerprintCert(cert)
360
-		if err != nil {
361
-			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err)
362
-			continue
363
-		}
364
-		newCertMap[certID] = struct{}{}
365
-	}
366
-
367
-	// Iterate over all the old certificates and check to see if we should remove them
368
-	for _, cert := range oldCerts {
369
-		certID, err := trustmanager.FingerprintCert(cert)
370
-		if err != nil {
371
-			logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err)
372
-			continue
373
-		}
374
-		if _, ok := newCertMap[certID]; !ok {
375
-			certsToRemove[certID] = cert
376
-		}
377
-	}
378
-
379
-	return certsToRemove
380
-}
... ...
@@ -99,12 +99,16 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]s
99 99
 			return "", true, ErrTooManyAttempts
100 100
 		}
101 101
 
102
-		state, err := term.SaveState(0)
103
-		if err != nil {
104
-			return "", false, err
102
+		// If typing on the terminal, we do not want the terminal to echo the
103
+		// password that is typed (so it doesn't display)
104
+		if term.IsTerminal(0) {
105
+			state, err := term.SaveState(0)
106
+			if err != nil {
107
+				return "", false, err
108
+			}
109
+			term.DisableEcho(0, state)
110
+			defer term.RestoreTerminal(0, state)
105 111
 		}
106
-		term.DisableEcho(0, state)
107
-		defer term.RestoreTerminal(0, state)
108 112
 
109 113
 		stdin := bufio.NewReader(in)
110 114
 
... ...
@@ -1,45 +1,13 @@
1 1
 package trustmanager
2 2
 
3 3
 import (
4
-	"errors"
5 4
 	"fmt"
6
-	"github.com/docker/notary"
7 5
 	"io/ioutil"
8 6
 	"os"
9 7
 	"path/filepath"
10 8
 	"strings"
11
-	"sync"
12 9
 )
13 10
 
14
-const (
15
-	visible = notary.PubCertPerms
16
-	private = notary.PrivKeyPerms
17
-)
18
-
19
-var (
20
-	// ErrPathOutsideStore indicates that the returned path would be
21
-	// outside the store
22
-	ErrPathOutsideStore = errors.New("path outside file store")
23
-)
24
-
25
-// LimitedFileStore implements the bare bones primitives (no hierarchy)
26
-type LimitedFileStore interface {
27
-	Add(fileName string, data []byte) error
28
-	Remove(fileName string) error
29
-	Get(fileName string) ([]byte, error)
30
-	ListFiles() []string
31
-}
32
-
33
-// FileStore is the interface for full-featured FileStores
34
-type FileStore interface {
35
-	LimitedFileStore
36
-
37
-	RemoveDir(directoryName string) error
38
-	GetPath(fileName string) (string, error)
39
-	ListDir(directoryName string) []string
40
-	BaseDir() string
41
-}
42
-
43 11
 // SimpleFileStore implements FileStore
44 12
 type SimpleFileStore struct {
45 13
 	baseDir string
... ...
@@ -55,6 +23,10 @@ func NewSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore, error
55 55
 		return nil, err
56 56
 	}
57 57
 
58
+	if !strings.HasPrefix(fileExt, ".") {
59
+		fileExt = "." + fileExt
60
+	}
61
+
58 62
 	return &SimpleFileStore{
59 63
 		baseDir: baseDir,
60 64
 		fileExt: fileExt,
... ...
@@ -68,6 +40,10 @@ func NewPrivateSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore
68 68
 		return nil, err
69 69
 	}
70 70
 
71
+	if !strings.HasPrefix(fileExt, ".") {
72
+		fileExt = "." + fileExt
73
+	}
74
+
71 75
 	return &SimpleFileStore{
72 76
 		baseDir: baseDir,
73 77
 		fileExt: fileExt,
... ...
@@ -176,7 +152,8 @@ func (f *SimpleFileStore) list(path string) []string {
176 176
 			if err != nil {
177 177
 				return err
178 178
 			}
179
-			files = append(files, fp)
179
+			trimmed := strings.TrimSuffix(fp, f.fileExt)
180
+			files = append(files, trimmed)
180 181
 		}
181 182
 		return nil
182 183
 	})
... ...
@@ -185,7 +162,7 @@ func (f *SimpleFileStore) list(path string) []string {
185 185
 
186 186
 // genFileName returns the name using the right extension
187 187
 func (f *SimpleFileStore) genFileName(name string) string {
188
-	return fmt.Sprintf("%s.%s", name, f.fileExt)
188
+	return fmt.Sprintf("%s%s", name, f.fileExt)
189 189
 }
190 190
 
191 191
 // BaseDir returns the base directory of the filestore
... ...
@@ -212,68 +189,3 @@ func createDirectory(dir string, perms os.FileMode) error {
212 212
 	dir = dir + "/"
213 213
 	return os.MkdirAll(dir, perms)
214 214
 }
215
-
216
-// MemoryFileStore is an implementation of LimitedFileStore that keeps
217
-// the contents in memory.
218
-type MemoryFileStore struct {
219
-	sync.Mutex
220
-
221
-	files map[string][]byte
222
-}
223
-
224
-// NewMemoryFileStore creates a MemoryFileStore
225
-func NewMemoryFileStore() *MemoryFileStore {
226
-	return &MemoryFileStore{
227
-		files: make(map[string][]byte),
228
-	}
229
-}
230
-
231
-// ErrMemFileNotFound is returned for a nonexistent "file" in the memory file
232
-// store
233
-var ErrMemFileNotFound = errors.New("key not found in memory file store")
234
-
235
-// Add writes data to a file with a given name
236
-func (f *MemoryFileStore) Add(name string, data []byte) error {
237
-	f.Lock()
238
-	defer f.Unlock()
239
-
240
-	f.files[name] = data
241
-	return nil
242
-}
243
-
244
-// Remove removes a file identified by name
245
-func (f *MemoryFileStore) Remove(name string) error {
246
-	f.Lock()
247
-	defer f.Unlock()
248
-
249
-	if _, present := f.files[name]; !present {
250
-		return ErrMemFileNotFound
251
-	}
252
-	delete(f.files, name)
253
-
254
-	return nil
255
-}
256
-
257
-// Get returns the data given a file name
258
-func (f *MemoryFileStore) Get(name string) ([]byte, error) {
259
-	f.Lock()
260
-	defer f.Unlock()
261
-
262
-	fileData, present := f.files[name]
263
-	if !present {
264
-		return nil, ErrMemFileNotFound
265
-	}
266
-
267
-	return fileData, nil
268
-}
269
-
270
-// ListFiles lists all the files inside of a store
271
-func (f *MemoryFileStore) ListFiles() []string {
272
-	var list []string
273
-
274
-	for name := range f.files {
275
-		list = append(list, name)
276
-	}
277
-
278
-	return list
279
-}
... ...
@@ -1,11 +1,13 @@
1 1
 package trustmanager
2 2
 
3 3
 import (
4
+	"encoding/pem"
4 5
 	"fmt"
5 6
 	"path/filepath"
6 7
 	"strings"
7 8
 	"sync"
8 9
 
10
+	"github.com/Sirupsen/logrus"
9 11
 	"github.com/docker/notary/passphrase"
10 12
 	"github.com/docker/notary/tuf/data"
11 13
 )
... ...
@@ -54,10 +56,10 @@ func (s *KeyFileStore) Name() string {
54 54
 }
55 55
 
56 56
 // AddKey stores the contents of a PEM-encoded private key as a PEM block
57
-func (s *KeyFileStore) AddKey(name, alias string, privKey data.PrivateKey) error {
57
+func (s *KeyFileStore) AddKey(name, role string, privKey data.PrivateKey) error {
58 58
 	s.Lock()
59 59
 	defer s.Unlock()
60
-	return addKey(s, s.Retriever, s.cachedKeys, name, alias, privKey)
60
+	return addKey(s, s.Retriever, s.cachedKeys, name, role, privKey)
61 61
 }
62 62
 
63 63
 // GetKey returns the PrivateKey given a KeyID
... ...
@@ -153,7 +155,7 @@ func (s *KeyMemoryStore) ImportKey(pemBytes []byte, alias string) error {
153 153
 	return importKey(s, s.Retriever, s.cachedKeys, alias, pemBytes)
154 154
 }
155 155
 
156
-func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, alias string, privKey data.PrivateKey) error {
156
+func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
157 157
 
158 158
 	var (
159 159
 		chosenPassphrase string
... ...
@@ -162,7 +164,7 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
162 162
 	)
163 163
 
164 164
 	for attempts := 0; ; attempts++ {
165
-		chosenPassphrase, giveup, err = passphraseRetriever(name, alias, true, attempts)
165
+		chosenPassphrase, giveup, err = passphraseRetriever(name, role, true, attempts)
166 166
 		if err != nil {
167 167
 			continue
168 168
 		}
... ...
@@ -175,25 +177,37 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
175 175
 		break
176 176
 	}
177 177
 
178
-	return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, alias, privKey)
178
+	return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, role, privKey)
179 179
 }
180 180
 
181
-func getKeyAlias(s LimitedFileStore, keyID string) (string, error) {
182
-	files := s.ListFiles()
183
-
181
+// getKeyRole finds the role for the given keyID. It attempts to look
182
+// both in the newer format PEM headers, and also in the legacy filename
183
+// format. It returns: the role, whether it was found in the legacy format
184
+// (true == legacy), and an error
185
+func getKeyRole(s LimitedFileStore, keyID string) (string, bool, error) {
184 186
 	name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
185 187
 
186
-	for _, file := range files {
188
+	for _, file := range s.ListFiles() {
187 189
 		filename := filepath.Base(file)
188 190
 
189 191
 		if strings.HasPrefix(filename, name) {
190
-			aliasPlusDotKey := strings.TrimPrefix(filename, name+"_")
191
-			retVal := strings.TrimSuffix(aliasPlusDotKey, "."+keyExtension)
192
-			return retVal, nil
192
+			d, err := s.Get(file)
193
+			if err != nil {
194
+				return "", false, err
195
+			}
196
+			block, _ := pem.Decode(d)
197
+			if block != nil {
198
+				if role, ok := block.Headers["role"]; ok {
199
+					return role, false, nil
200
+				}
201
+			}
202
+
203
+			role := strings.TrimPrefix(filename, name+"_")
204
+			return role, true, nil
193 205
 		}
194 206
 	}
195 207
 
196
-	return "", &ErrKeyNotFound{KeyID: keyID}
208
+	return "", false, &ErrKeyNotFound{KeyID: keyID}
197 209
 }
198 210
 
199 211
 // GetKey returns the PrivateKey given a KeyID
... ...
@@ -208,14 +222,13 @@ func getKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached
208 208
 		return nil, "", err
209 209
 	}
210 210
 
211
-	var retErr error
212 211
 	// See if the key is encrypted. If its encrypted we'll fail to parse the private key
213 212
 	privKey, err := ParsePEMPrivateKey(keyBytes, "")
214 213
 	if err != nil {
215
-		privKey, _, retErr = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias))
216
-	}
217
-	if retErr != nil {
218
-		return nil, "", retErr
214
+		privKey, _, err = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias))
215
+		if err != nil {
216
+			return nil, "", err
217
+		}
219 218
 	}
220 219
 	cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
221 220
 	return privKey, keyAlias, nil
... ...
@@ -228,44 +241,58 @@ func listKeys(s LimitedFileStore) map[string]string {
228 228
 
229 229
 	for _, f := range s.ListFiles() {
230 230
 		// Remove the prefix of the directory from the filename
231
-		if f[:len(rootKeysSubdir)] == rootKeysSubdir {
232
-			f = strings.TrimPrefix(f, rootKeysSubdir+"/")
231
+		var keyIDFull string
232
+		if strings.HasPrefix(f, rootKeysSubdir+"/") {
233
+			keyIDFull = strings.TrimPrefix(f, rootKeysSubdir+"/")
233 234
 		} else {
234
-			f = strings.TrimPrefix(f, nonRootKeysSubdir+"/")
235
+			keyIDFull = strings.TrimPrefix(f, nonRootKeysSubdir+"/")
235 236
 		}
236 237
 
237
-		// Remove the extension from the full filename
238
-		// abcde_root.key becomes abcde_root
239
-		keyIDFull := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f)))
238
+		keyIDFull = strings.TrimSpace(keyIDFull)
240 239
 
241
-		// If the key does not have a _, it is malformed
240
+		// If the key does not have a _, we'll attempt to
241
+		// read it as a PEM
242 242
 		underscoreIndex := strings.LastIndex(keyIDFull, "_")
243 243
 		if underscoreIndex == -1 {
244
-			continue
244
+			d, err := s.Get(f)
245
+			if err != nil {
246
+				logrus.Error(err)
247
+				continue
248
+			}
249
+			block, _ := pem.Decode(d)
250
+			if block == nil {
251
+				continue
252
+			}
253
+			if role, ok := block.Headers["role"]; ok {
254
+				keyIDMap[keyIDFull] = role
255
+			}
256
+		} else {
257
+			// The keyID is the first part of the keyname
258
+			// The KeyAlias is the second part of the keyname
259
+			// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
260
+			keyID := keyIDFull[:underscoreIndex]
261
+			keyAlias := keyIDFull[underscoreIndex+1:]
262
+			keyIDMap[keyID] = keyAlias
245 263
 		}
246
-
247
-		// The keyID is the first part of the keyname
248
-		// The KeyAlias is the second part of the keyname
249
-		// in a key named abcde_root, abcde is the keyID and root is the KeyAlias
250
-		keyID := keyIDFull[:underscoreIndex]
251
-		keyAlias := keyIDFull[underscoreIndex+1:]
252
-		keyIDMap[keyID] = keyAlias
253 264
 	}
254 265
 	return keyIDMap
255 266
 }
256 267
 
257 268
 // RemoveKey removes the key from the keyfilestore
258 269
 func removeKey(s LimitedFileStore, cachedKeys map[string]*cachedKey, name string) error {
259
-	keyAlias, err := getKeyAlias(s, name)
270
+	role, legacy, err := getKeyRole(s, name)
260 271
 	if err != nil {
261 272
 		return err
262 273
 	}
263 274
 
264 275
 	delete(cachedKeys, name)
265 276
 
277
+	if legacy {
278
+		name = name + "_" + role
279
+	}
280
+
266 281
 	// being in a subdirectory is for backwards compatibliity
267
-	filename := name + "_" + keyAlias
268
-	err = s.Remove(filepath.Join(getSubdir(keyAlias), filename))
282
+	err = s.Remove(filepath.Join(getSubdir(role), name))
269 283
 	if err != nil {
270 284
 		return err
271 285
 	}
... ...
@@ -283,18 +310,21 @@ func getSubdir(alias string) string {
283 283
 // Given a key ID, gets the bytes and alias belonging to that key if the key
284 284
 // exists
285 285
 func getRawKey(s LimitedFileStore, name string) ([]byte, string, error) {
286
-	keyAlias, err := getKeyAlias(s, name)
286
+	role, legacy, err := getKeyRole(s, name)
287 287
 	if err != nil {
288 288
 		return nil, "", err
289 289
 	}
290 290
 
291
-	filename := name + "_" + keyAlias
291
+	if legacy {
292
+		name = name + "_" + role
293
+	}
294
+
292 295
 	var keyBytes []byte
293
-	keyBytes, err = s.Get(filepath.Join(getSubdir(keyAlias), filename))
296
+	keyBytes, err = s.Get(filepath.Join(getSubdir(role), name))
294 297
 	if err != nil {
295 298
 		return nil, "", err
296 299
 	}
297
-	return keyBytes, keyAlias, nil
300
+	return keyBytes, role, nil
298 301
 }
299 302
 
300 303
 // GetPasswdDecryptBytes gets the password to decript the given pem bytes.
... ...
@@ -335,7 +365,7 @@ func GetPasswdDecryptBytes(passphraseRetriever passphrase.Retriever, pemBytes []
335 335
 	return privKey, passwd, nil
336 336
 }
337 337
 
338
-func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*cachedKey, name, alias string, privKey data.PrivateKey) error {
338
+func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
339 339
 
340 340
 	var (
341 341
 		pemPrivKey []byte
... ...
@@ -343,17 +373,17 @@ func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*
343 343
 	)
344 344
 
345 345
 	if passwd != "" {
346
-		pemPrivKey, err = EncryptPrivateKey(privKey, passwd)
346
+		pemPrivKey, err = EncryptPrivateKey(privKey, role, passwd)
347 347
 	} else {
348
-		pemPrivKey, err = KeyToPEM(privKey)
348
+		pemPrivKey, err = KeyToPEM(privKey, role)
349 349
 	}
350 350
 
351 351
 	if err != nil {
352 352
 		return err
353 353
 	}
354 354
 
355
-	cachedKeys[name] = &cachedKey{alias: alias, key: privKey}
356
-	return s.Add(filepath.Join(getSubdir(alias), name+"_"+alias), pemPrivKey)
355
+	cachedKeys[name] = &cachedKey{alias: role, key: privKey}
356
+	return s.Add(filepath.Join(getSubdir(role), name), pemPrivKey)
357 357
 }
358 358
 
359 359
 func importKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, alias string, pemBytes []byte) error {
360 360
new file mode 100644
... ...
@@ -0,0 +1,67 @@
0
+package trustmanager
1
+
2
+import (
3
+	"os"
4
+	"sync"
5
+)
6
+
7
+// MemoryFileStore is an implementation of LimitedFileStore that keeps
8
+// the contents in memory.
9
+type MemoryFileStore struct {
10
+	sync.Mutex
11
+
12
+	files map[string][]byte
13
+}
14
+
15
+// NewMemoryFileStore creates a MemoryFileStore
16
+func NewMemoryFileStore() *MemoryFileStore {
17
+	return &MemoryFileStore{
18
+		files: make(map[string][]byte),
19
+	}
20
+}
21
+
22
+// Add writes data to a file with a given name
23
+func (f *MemoryFileStore) Add(name string, data []byte) error {
24
+	f.Lock()
25
+	defer f.Unlock()
26
+
27
+	f.files[name] = data
28
+	return nil
29
+}
30
+
31
+// Remove removes a file identified by name
32
+func (f *MemoryFileStore) Remove(name string) error {
33
+	f.Lock()
34
+	defer f.Unlock()
35
+
36
+	if _, present := f.files[name]; !present {
37
+		return os.ErrNotExist
38
+	}
39
+	delete(f.files, name)
40
+
41
+	return nil
42
+}
43
+
44
+// Get returns the data given a file name
45
+func (f *MemoryFileStore) Get(name string) ([]byte, error) {
46
+	f.Lock()
47
+	defer f.Unlock()
48
+
49
+	fileData, present := f.files[name]
50
+	if !present {
51
+		return nil, os.ErrNotExist
52
+	}
53
+
54
+	return fileData, nil
55
+}
56
+
57
+// ListFiles lists all the files inside of a store
58
+func (f *MemoryFileStore) ListFiles() []string {
59
+	var list []string
60
+
61
+	for name := range f.files {
62
+		list = append(list, name)
63
+	}
64
+
65
+	return list
66
+}
0 67
new file mode 100644
... ...
@@ -0,0 +1,52 @@
0
+package trustmanager
1
+
2
+import (
3
+	"errors"
4
+
5
+	"github.com/docker/notary"
6
+)
7
+
8
+const (
9
+	visible = notary.PubCertPerms
10
+	private = notary.PrivKeyPerms
11
+)
12
+
13
+var (
14
+	// ErrPathOutsideStore indicates that the returned path would be
15
+	// outside the store
16
+	ErrPathOutsideStore = errors.New("path outside file store")
17
+)
18
+
19
+// LimitedFileStore implements the bare bones primitives (no hierarchy)
20
+type LimitedFileStore interface {
21
+	// Add writes a file to the specified location, returning an error if this
22
+	// is not possible (reasons may include permissions errors). The path is cleaned
23
+	// before being made absolute against the store's base dir.
24
+	Add(fileName string, data []byte) error
25
+
26
+	// Remove deletes a file from the store relative to the store's base directory.
27
+	// The path is cleaned before being made absolute to ensure no path traversal
28
+	// outside the base directory is possible.
29
+	Remove(fileName string) error
30
+
31
+	// Get returns the file content found at fileName relative to the base directory
32
+	// of the file store. The path is cleaned before being made absolute to ensure
33
+	// path traversal outside the store is not possible. If the file is not found
34
+	// an error to that effect is returned.
35
+	Get(fileName string) ([]byte, error)
36
+
37
+	// ListFiles returns a list of paths relative to the base directory of the
38
+	// filestore. Any of these paths must be retrievable via the
39
+	// LimitedFileStore.Get method.
40
+	ListFiles() []string
41
+}
42
+
43
+// FileStore is the interface for full-featured FileStores
44
+type FileStore interface {
45
+	LimitedFileStore
46
+
47
+	RemoveDir(directoryName string) error
48
+	GetPath(fileName string) (string, error)
49
+	ListDir(directoryName string) []string
50
+	BaseDir() string
51
+}
... ...
@@ -15,7 +15,6 @@ import (
15 15
 	"math/big"
16 16
 	"net/http"
17 17
 	"net/url"
18
-	"path/filepath"
19 18
 	"time"
20 19
 
21 20
 	"github.com/Sirupsen/logrus"
... ...
@@ -117,11 +116,15 @@ func fingerprintCert(cert *x509.Certificate) (CertID, error) {
117 117
 
118 118
 // loadCertsFromDir receives a store AddCertFromFile for each certificate found
119 119
 func loadCertsFromDir(s *X509FileStore) error {
120
-	certFiles := s.fileStore.ListFiles()
121
-	for _, f := range certFiles {
120
+	for _, f := range s.fileStore.ListFiles() {
122 121
 		// ListFiles returns relative paths
123
-		fullPath := filepath.Join(s.fileStore.BaseDir(), f)
124
-		err := s.AddCertFromFile(fullPath)
122
+		data, err := s.fileStore.Get(f)
123
+		if err != nil {
124
+			// the filestore told us it had a file that it then couldn't serve.
125
+			// this is a serious problem so error immediately
126
+			return err
127
+		}
128
+		err = s.AddCertFromPEM(data)
125 129
 		if err != nil {
126 130
 			if _, ok := err.(*ErrCertValidation); ok {
127 131
 				logrus.Debugf("ignoring certificate, did not pass validation: %s", f)
... ...
@@ -411,18 +414,26 @@ func blockType(k data.PrivateKey) (string, error) {
411 411
 }
412 412
 
413 413
 // KeyToPEM returns a PEM encoded key from a Private Key
414
-func KeyToPEM(privKey data.PrivateKey) ([]byte, error) {
414
+func KeyToPEM(privKey data.PrivateKey, role string) ([]byte, error) {
415 415
 	bt, err := blockType(privKey)
416 416
 	if err != nil {
417 417
 		return nil, err
418 418
 	}
419 419
 
420
-	return pem.EncodeToMemory(&pem.Block{Type: bt, Bytes: privKey.Private()}), nil
420
+	block := &pem.Block{
421
+		Type: bt,
422
+		Headers: map[string]string{
423
+			"role": role,
424
+		},
425
+		Bytes: privKey.Private(),
426
+	}
427
+
428
+	return pem.EncodeToMemory(block), nil
421 429
 }
422 430
 
423 431
 // EncryptPrivateKey returns an encrypted PEM key given a Privatekey
424 432
 // and a passphrase
425
-func EncryptPrivateKey(key data.PrivateKey, passphrase string) ([]byte, error) {
433
+func EncryptPrivateKey(key data.PrivateKey, role, passphrase string) ([]byte, error) {
426 434
 	bt, err := blockType(key)
427 435
 	if err != nil {
428 436
 		return nil, err
... ...
@@ -440,6 +451,11 @@ func EncryptPrivateKey(key data.PrivateKey, passphrase string) ([]byte, error) {
440 440
 		return nil, err
441 441
 	}
442 442
 
443
+	if encryptedPEMBlock.Headers == nil {
444
+		return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced")
445
+	}
446
+	encryptedPEMBlock.Headers["role"] = role
447
+
443 448
 	return pem.EncodeToMemory(encryptedPEMBlock), nil
444 449
 }
445 450
 
... ...
@@ -471,12 +487,8 @@ func CertsToKeys(certs []*x509.Certificate) map[string]data.PublicKey {
471 471
 	return keys
472 472
 }
473 473
 
474
-// NewCertificate returns an X509 Certificate following a template, given a GUN.
475
-func NewCertificate(gun string) (*x509.Certificate, error) {
476
-	notBefore := time.Now()
477
-	// Certificates will expire in 10 years
478
-	notAfter := notBefore.Add(time.Hour * 24 * 365 * 10)
479
-
474
+// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval.
475
+func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
480 476
 	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
481 477
 
482 478
 	serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
... ...
@@ -489,8 +501,8 @@ func NewCertificate(gun string) (*x509.Certificate, error) {
489 489
 		Subject: pkix.Name{
490 490
 			CommonName: gun,
491 491
 		},
492
-		NotBefore: notBefore,
493
-		NotAfter:  notAfter,
492
+		NotBefore: startTime,
493
+		NotAfter:  endTime,
494 494
 
495 495
 		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
496 496
 		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
... ...
@@ -14,6 +14,7 @@ import (
14 14
 	"io"
15 15
 	"math/big"
16 16
 	"os"
17
+	"time"
17 18
 
18 19
 	"github.com/Sirupsen/logrus"
19 20
 	"github.com/docker/notary/passphrase"
... ...
@@ -217,7 +218,9 @@ func addECDSAKey(
217 217
 
218 218
 	ecdsaPrivKeyD := ensurePrivateKeySize(ecdsaPrivKey.D.Bytes())
219 219
 
220
-	template, err := trustmanager.NewCertificate(role)
220
+	// Hard-coded policy: the generated certificate expires in 10 years.
221
+	startTime := time.Now()
222
+	template, err := trustmanager.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0))
221 223
 	if err != nil {
222 224
 		return fmt.Errorf("failed to create the certificate template: %v", err)
223 225
 	}
... ...
@@ -483,6 +486,12 @@ func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string
483 483
 				}
484 484
 			}
485 485
 		}
486
+
487
+		// we found nothing
488
+		if cert == nil {
489
+			continue
490
+		}
491
+
486 492
 		var ecdsaPubKey *ecdsa.PublicKey
487 493
 		switch cert.PublicKeyAlgorithm {
488 494
 		case x509.ECDSA:
... ...
@@ -790,28 +799,31 @@ func SetupHSMEnv(libraryPath string, libLoader pkcs11LibLoader) (
790 790
 	IPKCS11Ctx, pkcs11.SessionHandle, error) {
791 791
 
792 792
 	if libraryPath == "" {
793
-		return nil, 0, errors.New("No library found.")
793
+		return nil, 0, fmt.Errorf("no library found.")
794 794
 	}
795 795
 	p := libLoader(libraryPath)
796 796
 
797 797
 	if p == nil {
798
-		return nil, 0, errors.New("Failed to init library")
798
+		return nil, 0, fmt.Errorf("failed to load library %s", libraryPath)
799 799
 	}
800 800
 
801 801
 	if err := p.Initialize(); err != nil {
802 802
 		defer finalizeAndDestroy(p)
803
-		return nil, 0, fmt.Errorf("Initialize error %s", err.Error())
803
+		return nil, 0, fmt.Errorf(
804
+			"found library %s, but initialize error %s", libraryPath, err.Error())
804 805
 	}
805 806
 
806 807
 	slots, err := p.GetSlotList(true)
807 808
 	if err != nil {
808 809
 		defer finalizeAndDestroy(p)
809
-		return nil, 0, fmt.Errorf("Failed to list HSM slots %s", err)
810
+		return nil, 0, fmt.Errorf(
811
+			"loaded library %s, but failed to list HSM slots %s", libraryPath, err)
810 812
 	}
811 813
 	// Check to see if we got any slots from the HSM.
812 814
 	if len(slots) < 1 {
813 815
 		defer finalizeAndDestroy(p)
814
-		return nil, 0, fmt.Errorf("No HSM Slots found")
816
+		return nil, 0, fmt.Errorf(
817
+			"loaded library %s, but no HSM slots found", libraryPath)
815 818
 	}
816 819
 
817 820
 	// CKF_SERIAL_SESSION: TRUE if cryptographic functions are performed in serial with the application; FALSE if the functions may be performed in parallel with the application.
... ...
@@ -819,9 +831,12 @@ func SetupHSMEnv(libraryPath string, libLoader pkcs11LibLoader) (
819 819
 	session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
820 820
 	if err != nil {
821 821
 		defer cleanup(p, session)
822
-		return nil, 0, fmt.Errorf("Failed to Start Session with HSM %s", err)
822
+		return nil, 0, fmt.Errorf(
823
+			"loaded library %s, but failed to start session with HSM %s",
824
+			libraryPath, err)
823 825
 	}
824 826
 
827
+	logrus.Debugf("Initialized PKCS11 library %s and started HSM session", libraryPath)
825 828
 	return p, session, nil
826 829
 }
827 830
 
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"fmt"
9 9
 	"io"
10 10
 	"path"
11
-	"path/filepath"
12 11
 	"strings"
13 12
 
14 13
 	"github.com/Sirupsen/logrus"
... ...
@@ -82,7 +81,7 @@ func (c *Client) update() error {
82 82
 		// In this instance the root has not expired base on time, but is
83 83
 		// expired based on the snapshot dictating a new root has been produced.
84 84
 		logrus.Debug(err)
85
-		return tuf.ErrLocalRootExpired{}
85
+		return err
86 86
 	}
87 87
 	// will always need top level targets at a minimum
88 88
 	err = c.downloadTargets("targets")
... ...
@@ -369,34 +368,52 @@ func (c *Client) downloadSnapshot() error {
369 369
 	return nil
370 370
 }
371 371
 
372
-// downloadTargets is responsible for downloading any targets file
373
-// including delegates roles.
372
+// downloadTargets downloads all targets and delegated targets for the repository.
373
+// It uses a pre-order tree traversal as it's necessary to download parents first
374
+// to obtain the keys to validate children.
374 375
 func (c *Client) downloadTargets(role string) error {
375
-	role = data.RoleName(role) // this will really only do something for base targets role
376
-	if c.local.Snapshot == nil {
377
-		return ErrMissingMeta{role: role}
378
-	}
379
-	snap := c.local.Snapshot.Signed
380
-	root := c.local.Root.Signed
381
-	r := c.keysDB.GetRole(role)
382
-	if r == nil {
383
-		return fmt.Errorf("Invalid role: %s", role)
384
-	}
385
-	keyIDs := r.KeyIDs
386
-	s, err := c.getTargetsFile(role, keyIDs, snap.Meta, root.ConsistentSnapshot, r.Threshold)
387
-	if err != nil {
388
-		logrus.Error("Error getting targets file:", err)
389
-		return err
390
-	}
391
-	t, err := data.TargetsFromSigned(s)
392
-	if err != nil {
393
-		return err
394
-	}
395
-	err = c.local.SetTargets(role, t)
396
-	if err != nil {
397
-		return err
398
-	}
376
+	stack := utils.NewStack()
377
+	stack.Push(role)
378
+	for !stack.Empty() {
379
+		role, err := stack.PopString()
380
+		if err != nil {
381
+			return err
382
+		}
383
+		role = data.RoleName(role) // this will really only do something for base targets role
384
+		if c.local.Snapshot == nil {
385
+			return ErrMissingMeta{role: role}
386
+		}
387
+		snap := c.local.Snapshot.Signed
388
+		root := c.local.Root.Signed
389
+		r := c.keysDB.GetRole(role)
390
+		if r == nil {
391
+			return fmt.Errorf("Invalid role: %s", role)
392
+		}
393
+		keyIDs := r.KeyIDs
394
+		s, err := c.getTargetsFile(role, keyIDs, snap.Meta, root.ConsistentSnapshot, r.Threshold)
395
+		if err != nil {
396
+			if _, ok := err.(ErrMissingMeta); ok && role != data.CanonicalTargetsRole {
397
+				// if the role meta hasn't been published,
398
+				// that's ok, continue
399
+				continue
400
+			}
401
+			logrus.Error("Error getting targets file:", err)
402
+			return err
403
+		}
404
+		t, err := data.TargetsFromSigned(s)
405
+		if err != nil {
406
+			return err
407
+		}
408
+		err = c.local.SetTargets(role, t)
409
+		if err != nil {
410
+			return err
411
+		}
399 412
 
413
+		// push delegated roles contained in the targets file onto the stack
414
+		for _, r := range t.Signed.Delegations.Roles {
415
+			stack.Push(r.Name)
416
+		}
417
+	}
400 418
 	return nil
401 419
 }
402 420
 
... ...
@@ -482,17 +499,18 @@ func (c Client) getTargetsFile(role string, keyIDs []string, snapshotMeta data.F
482 482
 		// if we error when setting meta, we should continue.
483 483
 		err = c.cache.SetMeta(role, raw)
484 484
 		if err != nil {
485
-			logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
485
+			logrus.Errorf("Failed to write %s to local cache: %s", role, err.Error())
486 486
 		}
487 487
 	}
488 488
 	return s, nil
489 489
 }
490 490
 
491
-// RoleTargetsPath generates the appropriate filename for the targets file,
491
+// RoleTargetsPath generates the appropriate HTTP URL for the targets file,
492 492
 // based on whether the repo is marked as consistent.
493 493
 func (c Client) RoleTargetsPath(role string, hashSha256 string, consistent bool) (string, error) {
494 494
 	if consistent {
495
-		dir := filepath.Dir(role)
495
+		// Use path instead of filepath since we refer to the TUF role directly instead of its target files
496
+		dir := path.Dir(role)
496 497
 		if strings.Contains(role, "/") {
497 498
 			lastSlashIdx := strings.LastIndex(role, "/")
498 499
 			role = role[lastSlashIdx+1:]
... ...
@@ -505,42 +523,41 @@ func (c Client) RoleTargetsPath(role string, hashSha256 string, consistent bool)
505 505
 	return role, nil
506 506
 }
507 507
 
508
-// TargetMeta ensures the repo is up to date, downloading the minimum
509
-// necessary metadata files
510
-func (c Client) TargetMeta(path string) (*data.FileMeta, error) {
511
-	c.Update()
512
-	var meta *data.FileMeta
508
+// TargetMeta ensures the repo is up to date. It assumes downloadTargets
509
+// has already downloaded all delegated roles
510
+func (c Client) TargetMeta(role, path string, excludeRoles ...string) (*data.FileMeta, string) {
511
+	excl := make(map[string]bool)
512
+	for _, r := range excludeRoles {
513
+		excl[r] = true
514
+	}
513 515
 
514 516
 	pathDigest := sha256.Sum256([]byte(path))
515 517
 	pathHex := hex.EncodeToString(pathDigest[:])
516 518
 
517 519
 	// FIFO list of targets delegations to inspect for target
518
-	roles := []string{data.ValidRoles["targets"]}
519
-	var role string
520
+	roles := []string{role}
521
+	var (
522
+		meta *data.FileMeta
523
+		curr string
524
+	)
520 525
 	for len(roles) > 0 {
521 526
 		// have to do these lines here because of order of execution in for statement
522
-		role = roles[0]
527
+		curr = roles[0]
523 528
 		roles = roles[1:]
524 529
 
525
-		// Download the target role file if necessary
526
-		err := c.downloadTargets(role)
527
-		if err != nil {
528
-			// as long as we find a valid target somewhere we're happy.
529
-			// continue and search other delegated roles if any
530
-			continue
531
-		}
532
-
533
-		meta = c.local.TargetMeta(role, path)
530
+		meta = c.local.TargetMeta(curr, path)
534 531
 		if meta != nil {
535 532
 			// we found the target!
536
-			return meta, nil
533
+			return meta, curr
537 534
 		}
538
-		delegations := c.local.TargetDelegations(role, path, pathHex)
535
+		delegations := c.local.TargetDelegations(curr, path, pathHex)
539 536
 		for _, d := range delegations {
540
-			roles = append(roles, d.Name)
537
+			if !excl[d.Name] {
538
+				roles = append(roles, d.Name)
539
+			}
541 540
 		}
542 541
 	}
543
-	return meta, nil
542
+	return meta, ""
544 543
 }
545 544
 
546 545
 // DownloadTarget downloads the target to dst from the remote
... ...
@@ -77,6 +77,15 @@ func (ks *KeyList) UnmarshalJSON(data []byte) error {
77 77
 	return nil
78 78
 }
79 79
 
80
+// IDs generates a list of the hex encoded key IDs in the KeyList
81
+func (ks KeyList) IDs() []string {
82
+	keyIDs := make([]string, 0, len(ks))
83
+	for _, k := range ks {
84
+		keyIDs = append(keyIDs, k.ID())
85
+	}
86
+	return keyIDs
87
+}
88
+
80 89
 func typedPublicKey(tk tufKey) PublicKey {
81 90
 	switch tk.Algorithm() {
82 91
 	case ECDSAKey:
... ...
@@ -2,6 +2,8 @@ package data
2 2
 
3 3
 import (
4 4
 	"fmt"
5
+	"path"
6
+	"regexp"
5 7
 	"strings"
6 8
 )
7 9
 
... ...
@@ -24,15 +26,28 @@ var ValidRoles = map[string]string{
24 24
 	CanonicalTimestampRole: CanonicalTimestampRole,
25 25
 }
26 26
 
27
+// ErrNoSuchRole indicates the roles doesn't exist
28
+type ErrNoSuchRole struct {
29
+	Role string
30
+}
31
+
32
+func (e ErrNoSuchRole) Error() string {
33
+	return fmt.Sprintf("role does not exist: %s", e.Role)
34
+}
35
+
27 36
 // ErrInvalidRole represents an error regarding a role. Typically
28 37
 // something like a role for which sone of the public keys were
29 38
 // not found in the TUF repo.
30 39
 type ErrInvalidRole struct {
31
-	Role string
40
+	Role   string
41
+	Reason string
32 42
 }
33 43
 
34 44
 func (e ErrInvalidRole) Error() string {
35
-	return fmt.Sprintf("tuf: invalid role %s", e.Role)
45
+	if e.Reason != "" {
46
+		return fmt.Sprintf("tuf: invalid role %s. %s", e.Role, e.Reason)
47
+	}
48
+	return fmt.Sprintf("tuf: invalid role %s.", e.Role)
36 49
 }
37 50
 
38 51
 // SetValidRoles is a utility function to override some or all of the roles
... ...
@@ -85,10 +100,11 @@ func ValidRole(name string) bool {
85 85
 	if v, ok := ValidRoles[name]; ok {
86 86
 		return name == v
87 87
 	}
88
-	targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole])
89
-	if strings.HasPrefix(name, targetsBase) {
88
+
89
+	if IsDelegation(name) {
90 90
 		return true
91 91
 	}
92
+
92 93
 	for _, v := range ValidRoles {
93 94
 		if name == v {
94 95
 			return true
... ...
@@ -97,6 +113,24 @@ func ValidRole(name string) bool {
97 97
 	return false
98 98
 }
99 99
 
100
+// IsDelegation checks if the role is a delegation or a root role
101
+func IsDelegation(role string) bool {
102
+	targetsBase := ValidRoles[CanonicalTargetsRole] + "/"
103
+
104
+	delegationRegexp := regexp.MustCompile("^[-a-z0-9_/]+$")
105
+	whitelistedChars := delegationRegexp.MatchString(role)
106
+
107
+	// Limit size of full role string to 255 chars for db column size limit
108
+	correctLength := len(role) < 256
109
+
110
+	// Removes ., .., extra slashes, and trailing slash
111
+	isClean := path.Clean(role) == role
112
+	return strings.HasPrefix(role, targetsBase) &&
113
+		whitelistedChars &&
114
+		correctLength &&
115
+		isClean
116
+}
117
+
100 118
 // RootRole is a cut down role as it appears in the root.json
101 119
 type RootRole struct {
102 120
 	KeyIDs    []string `json:"keyids"`
... ...
@@ -115,7 +149,18 @@ type Role struct {
115 115
 // NewRole creates a new Role object from the given parameters
116 116
 func NewRole(name string, threshold int, keyIDs, paths, pathHashPrefixes []string) (*Role, error) {
117 117
 	if len(paths) > 0 && len(pathHashPrefixes) > 0 {
118
-		return nil, ErrInvalidRole{Role: name}
118
+		return nil, ErrInvalidRole{
119
+			Role:   name,
120
+			Reason: "roles may not have both Paths and PathHashPrefixes",
121
+		}
122
+	}
123
+	if IsDelegation(name) {
124
+		if len(paths) == 0 && len(pathHashPrefixes) == 0 {
125
+			return nil, ErrInvalidRole{
126
+				Role:   name,
127
+				Reason: "roles with no Paths and no PathHashPrefixes will never be able to publish content",
128
+			}
129
+		}
119 130
 	}
120 131
 	if threshold < 1 {
121 132
 		return nil, ErrInvalidRole{Role: name}
... ...
@@ -173,6 +218,78 @@ func (r Role) CheckPrefixes(hash string) bool {
173 173
 
174 174
 // IsDelegation checks if the role is a delegation or a root role
175 175
 func (r Role) IsDelegation() bool {
176
-	targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole])
177
-	return strings.HasPrefix(r.Name, targetsBase)
176
+	return IsDelegation(r.Name)
177
+}
178
+
179
+// AddKeys merges the ids into the current list of role key ids
180
+func (r *Role) AddKeys(ids []string) {
181
+	r.KeyIDs = mergeStrSlices(r.KeyIDs, ids)
182
+}
183
+
184
+// AddPaths merges the paths into the current list of role paths
185
+func (r *Role) AddPaths(paths []string) error {
186
+	if len(paths) == 0 {
187
+		return nil
188
+	}
189
+	if len(r.PathHashPrefixes) > 0 {
190
+		return ErrInvalidRole{Role: r.Name, Reason: "attempted to add paths to role that already has hash prefixes"}
191
+	}
192
+	r.Paths = mergeStrSlices(r.Paths, paths)
193
+	return nil
194
+}
195
+
196
+// AddPathHashPrefixes merges the prefixes into the list of role path hash prefixes
197
+func (r *Role) AddPathHashPrefixes(prefixes []string) error {
198
+	if len(prefixes) == 0 {
199
+		return nil
200
+	}
201
+	if len(r.Paths) > 0 {
202
+		return ErrInvalidRole{Role: r.Name, Reason: "attempted to add hash prefixes to role that already has paths"}
203
+	}
204
+	r.PathHashPrefixes = mergeStrSlices(r.PathHashPrefixes, prefixes)
205
+	return nil
206
+}
207
+
208
+// RemoveKeys removes the ids from the current list of key ids
209
+func (r *Role) RemoveKeys(ids []string) {
210
+	r.KeyIDs = subtractStrSlices(r.KeyIDs, ids)
211
+}
212
+
213
+// RemovePaths removes the paths from the current list of role paths
214
+func (r *Role) RemovePaths(paths []string) {
215
+	r.Paths = subtractStrSlices(r.Paths, paths)
216
+}
217
+
218
+// RemovePathHashPrefixes removes the prefixes from the current list of path hash prefixes
219
+func (r *Role) RemovePathHashPrefixes(prefixes []string) {
220
+	r.PathHashPrefixes = subtractStrSlices(r.PathHashPrefixes, prefixes)
221
+}
222
+
223
+func mergeStrSlices(orig, new []string) []string {
224
+	have := make(map[string]bool)
225
+	for _, e := range orig {
226
+		have[e] = true
227
+	}
228
+	merged := make([]string, len(orig), len(orig)+len(new))
229
+	copy(merged, orig)
230
+	for _, e := range new {
231
+		if !have[e] {
232
+			merged = append(merged, e)
233
+		}
234
+	}
235
+	return merged
236
+}
237
+
238
+func subtractStrSlices(orig, remove []string) []string {
239
+	kill := make(map[string]bool)
240
+	for _, e := range remove {
241
+		kill[e] = true
242
+	}
243
+	var keep []string
244
+	for _, e := range orig {
245
+		if !kill[e] {
246
+			keep = append(keep, e)
247
+		}
248
+	}
249
+	return keep
178 250
 }
... ...
@@ -88,6 +88,15 @@ func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) {
88 88
 	sp.Dirty = true
89 89
 }
90 90
 
91
+// DeleteMeta removes a role from the snapshot. If the role doesn't
92
+// exist in the snapshot, it's a noop.
93
+func (sp *SignedSnapshot) DeleteMeta(role string) {
94
+	if _, ok := sp.Signed.Meta[role]; ok {
95
+		delete(sp.Signed.Meta, role)
96
+		sp.Dirty = true
97
+	}
98
+}
99
+
91 100
 // SnapshotFromSigned fully unpacks a Signed object into a SignedSnapshot
92 101
 func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
93 102
 	sp := Snapshot{}
... ...
@@ -3,6 +3,7 @@ package data
3 3
 import (
4 4
 	"crypto/sha256"
5 5
 	"encoding/hex"
6
+	"errors"
6 7
 
7 8
 	"github.com/jfrazelle/go/canonical/json"
8 9
 )
... ...
@@ -88,7 +89,7 @@ func (t *SignedTargets) AddTarget(path string, meta FileMeta) {
88 88
 // ensuring the keys either already exist, or are added to the map
89 89
 // of delegation keys
90 90
 func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error {
91
-	return nil
91
+	return errors.New("Not Implemented")
92 92
 }
93 93
 
94 94
 // ToSigned partially serializes a SignedTargets for further signing
... ...
@@ -46,8 +46,10 @@ func (e *Ed25519) RemoveKey(keyID string) error {
46 46
 // ListKeys returns the list of keys IDs for the role
47 47
 func (e *Ed25519) ListKeys(role string) []string {
48 48
 	keyIDs := make([]string, 0, len(e.keys))
49
-	for id := range e.keys {
50
-		keyIDs = append(keyIDs, id)
49
+	for id, edCryptoKey := range e.keys {
50
+		if edCryptoKey.role == role {
51
+			keyIDs = append(keyIDs, id)
52
+		}
51 53
 	}
52 54
 	return keyIDs
53 55
 }
... ...
@@ -61,23 +63,6 @@ func (e *Ed25519) ListAllKeys() map[string]string {
61 61
 	return keys
62 62
 }
63 63
 
64
-// Sign generates an Ed25519 signature over the data
65
-func (e *Ed25519) Sign(keyIDs []string, toSign []byte) ([]data.Signature, error) {
66
-	signatures := make([]data.Signature, 0, len(keyIDs))
67
-	for _, keyID := range keyIDs {
68
-		priv := [ed25519.PrivateKeySize]byte{}
69
-		copy(priv[:], e.keys[keyID].privKey.Private())
70
-		sig := ed25519.Sign(&priv, toSign)
71
-		signatures = append(signatures, data.Signature{
72
-			KeyID:     keyID,
73
-			Method:    data.EDDSASignature,
74
-			Signature: sig[:],
75
-		})
76
-	}
77
-	return signatures, nil
78
-
79
-}
80
-
81 64
 // Create generates a new key and returns the public part
82 65
 func (e *Ed25519) Create(role, algorithm string) (data.PublicKey, error) {
83 66
 	if algorithm != data.ED25519Key {
... ...
@@ -63,10 +63,10 @@ func (e ErrInvalidKeyLength) Error() string {
63 63
 
64 64
 // ErrNoKeys indicates no signing keys were found when trying to sign
65 65
 type ErrNoKeys struct {
66
-	keyIDs []string
66
+	KeyIDs []string
67 67
 }
68 68
 
69 69
 func (e ErrNoKeys) Error() string {
70 70
 	return fmt.Sprintf("could not find necessary signing keys, at least one of these keys must be available: %s",
71
-		strings.Join(e.keyIDs, ", "))
71
+		strings.Join(e.KeyIDs, ", "))
72 72
 }
... ...
@@ -5,14 +5,6 @@ import (
5 5
 	"io"
6 6
 )
7 7
 
8
-// SigningService defines the necessary functions to determine
9
-// if a user is able to sign with a key, and to perform signing.
10
-type SigningService interface {
11
-	// Sign takes a slice of keyIDs and a piece of data to sign
12
-	// and returns a slice of signatures and an error
13
-	Sign(keyIDs []string, data []byte) ([]data.Signature, error)
14
-}
15
-
16 8
 // KeyService provides management of keys locally. It will never
17 9
 // accept or provide private keys. Communication between the KeyService
18 10
 // and a SigningService happen behind the Create function.
... ...
@@ -44,10 +36,9 @@ type KeyService interface {
44 44
 	ImportRootKey(source io.Reader) error
45 45
 }
46 46
 
47
-// CryptoService defines a unified Signing and Key Service as this
48
-// will be most useful for most applications.
47
+// CryptoService is deprecated and all instances of its use should be
48
+// replaced with KeyService
49 49
 type CryptoService interface {
50
-	SigningService
51 50
 	KeyService
52 51
 }
53 52
 
... ...
@@ -46,7 +46,7 @@ func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error {
46 46
 
47 47
 	// Check to ensure we have at least one signing key
48 48
 	if len(privKeys) == 0 {
49
-		return ErrNoKeys{keyIDs: ids}
49
+		return ErrNoKeys{KeyIDs: ids}
50 50
 	}
51 51
 
52 52
 	// Do signing and generate list of signatures
... ...
@@ -1,9 +1,13 @@
1 1
 package store
2 2
 
3
+import "fmt"
4
+
3 5
 // ErrMetaNotFound indicates we did not find a particular piece
4 6
 // of metadata in the store
5
-type ErrMetaNotFound struct{}
7
+type ErrMetaNotFound struct {
8
+	Role string
9
+}
6 10
 
7 11
 func (err ErrMetaNotFound) Error() string {
8
-	return "no trust data available"
12
+	return fmt.Sprintf("%s trust data unavailable", err.Role)
9 13
 }
... ...
@@ -45,6 +45,9 @@ func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
45 45
 	path := filepath.Join(f.metaDir, fileName)
46 46
 	meta, err := ioutil.ReadFile(path)
47 47
 	if err != nil {
48
+		if os.IsNotExist(err) {
49
+			err = ErrMetaNotFound{Role: name}
50
+		}
48 51
 		return nil, err
49 52
 	}
50 53
 	return meta, nil
... ...
@@ -65,7 +68,18 @@ func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error {
65 65
 func (f *FilesystemStore) SetMeta(name string, meta []byte) error {
66 66
 	fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
67 67
 	path := filepath.Join(f.metaDir, fileName)
68
-	if err := ioutil.WriteFile(path, meta, 0600); err != nil {
68
+
69
+	// Ensures the parent directories of the file we are about to write exist
70
+	err := os.MkdirAll(filepath.Dir(path), 0700)
71
+	if err != nil {
72
+		return err
73
+	}
74
+
75
+	// if something already exists, just delete it and re-write it
76
+	os.RemoveAll(path)
77
+
78
+	// Write the file to disk
79
+	if err = ioutil.WriteFile(path, meta, 0600); err != nil {
69 80
 		return err
70 81
 	}
71 82
 	return nil
... ...
@@ -1,7 +1,18 @@
1
+// A Store that can fetch and set metadata on a remote server.
2
+// Some API constraints:
3
+// - Response bodies for error codes should be unmarshallable as:
4
+//   {"errors": [{..., "detail": <serialized validation error>}]}
5
+//   else validation error details, etc. will be unparsable.  The errors
6
+//   should have a github.com/docker/notary/tuf/validation/SerializableError
7
+//   in the Details field.
8
+//   If writing your own server, please have a look at
9
+//   github.com/docker/distribution/registry/api/errcode
10
+
1 11
 package store
2 12
 
3 13
 import (
4 14
 	"bytes"
15
+	"encoding/json"
5 16
 	"errors"
6 17
 	"fmt"
7 18
 	"io"
... ...
@@ -12,6 +23,7 @@ import (
12 12
 	"path"
13 13
 
14 14
 	"github.com/Sirupsen/logrus"
15
+	"github.com/docker/notary/tuf/validation"
15 16
 )
16 17
 
17 18
 // ErrServerUnavailable indicates an error from the server. code allows us to
... ...
@@ -21,7 +33,7 @@ type ErrServerUnavailable struct {
21 21
 }
22 22
 
23 23
 func (err ErrServerUnavailable) Error() string {
24
-	return fmt.Sprintf("Unable to reach trust server at this time: %d.", err.code)
24
+	return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
25 25
 }
26 26
 
27 27
 // ErrMaliciousServer indicates the server returned a response that is highly suspected
... ...
@@ -30,7 +42,20 @@ func (err ErrServerUnavailable) Error() string {
30 30
 type ErrMaliciousServer struct{}
31 31
 
32 32
 func (err ErrMaliciousServer) Error() string {
33
-	return "Trust server returned a bad response."
33
+	return "trust server returned a bad response."
34
+}
35
+
36
+// ErrInvalidOperation indicates that the server returned a 400 response and
37
+// propagate any body we received.
38
+type ErrInvalidOperation struct {
39
+	msg string
40
+}
41
+
42
+func (err ErrInvalidOperation) Error() string {
43
+	if err.msg != "" {
44
+		return fmt.Sprintf("trust server rejected operation: %s", err.msg)
45
+	}
46
+	return "trust server rejected operation."
34 47
 }
35 48
 
36 49
 // HTTPStore manages pulling and pushing metadata from and to a remote
... ...
@@ -70,6 +95,42 @@ func NewHTTPStore(baseURL, metaPrefix, metaExtension, targetsPrefix, keyExtensio
70 70
 	}, nil
71 71
 }
72 72
 
73
+func tryUnmarshalError(resp *http.Response, defaultError error) error {
74
+	bodyBytes, err := ioutil.ReadAll(resp.Body)
75
+	if err != nil {
76
+		return defaultError
77
+	}
78
+	var parsedErrors struct {
79
+		Errors []struct {
80
+			Detail validation.SerializableError `json:"detail"`
81
+		} `json:"errors"`
82
+	}
83
+	if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
84
+		return defaultError
85
+	}
86
+	if len(parsedErrors.Errors) != 1 {
87
+		return defaultError
88
+	}
89
+	err = parsedErrors.Errors[0].Detail.Error
90
+	if err == nil {
91
+		return defaultError
92
+	}
93
+	return err
94
+}
95
+
96
+func translateStatusToError(resp *http.Response) error {
97
+	switch resp.StatusCode {
98
+	case http.StatusOK:
99
+		return nil
100
+	case http.StatusNotFound:
101
+		return ErrMetaNotFound{}
102
+	case http.StatusBadRequest:
103
+		return tryUnmarshalError(resp, ErrInvalidOperation{})
104
+	default:
105
+		return ErrServerUnavailable{code: resp.StatusCode}
106
+	}
107
+}
108
+
73 109
 // GetMeta downloads the named meta file with the given size. A short body
74 110
 // is acceptable because in the case of timestamp.json, the size is a cap,
75 111
 // not an exact length.
... ...
@@ -87,11 +148,9 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
87 87
 		return nil, err
88 88
 	}
89 89
 	defer resp.Body.Close()
90
-	if resp.StatusCode == http.StatusNotFound {
91
-		return nil, ErrMetaNotFound{}
92
-	} else if resp.StatusCode != http.StatusOK {
90
+	if err := translateStatusToError(resp); err != nil {
93 91
 		logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
94
-		return nil, ErrServerUnavailable{code: resp.StatusCode}
92
+		return nil, err
95 93
 	}
96 94
 	if resp.ContentLength > size {
97 95
 		return nil, ErrMaliciousServer{}
... ...
@@ -120,37 +179,42 @@ func (s HTTPStore) SetMeta(name string, blob []byte) error {
120 120
 		return err
121 121
 	}
122 122
 	defer resp.Body.Close()
123
-	if resp.StatusCode == http.StatusNotFound {
124
-		return ErrMetaNotFound{}
125
-	} else if resp.StatusCode != http.StatusOK {
126
-		return ErrServerUnavailable{code: resp.StatusCode}
127
-	}
128
-	return nil
123
+	return translateStatusToError(resp)
129 124
 }
130 125
 
131
-// SetMultiMeta does a single batch upload of multiple pieces of TUF metadata.
132
-// This should be preferred for updating a remote server as it enable the server
133
-// to remain consistent, either accepting or rejecting the complete update.
134
-func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
135
-	url, err := s.buildMetaURL("")
136
-	if err != nil {
137
-		return err
138
-	}
126
+// NewMultiPartMetaRequest builds a request with the provided metadata updates
127
+// in multipart form
128
+func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
139 129
 	body := &bytes.Buffer{}
140 130
 	writer := multipart.NewWriter(body)
141 131
 	for role, blob := range metas {
142 132
 		part, err := writer.CreateFormFile("files", role)
143 133
 		_, err = io.Copy(part, bytes.NewBuffer(blob))
144 134
 		if err != nil {
145
-			return err
135
+			return nil, err
146 136
 		}
147 137
 	}
148
-	err = writer.Close()
138
+	err := writer.Close()
149 139
 	if err != nil {
150
-		return err
140
+		return nil, err
141
+	}
142
+	req, err := http.NewRequest("POST", url, body)
143
+	if err != nil {
144
+		return nil, err
151 145
 	}
152
-	req, err := http.NewRequest("POST", url.String(), body)
153 146
 	req.Header.Set("Content-Type", writer.FormDataContentType())
147
+	return req, nil
148
+}
149
+
150
+// SetMultiMeta does a single batch upload of multiple pieces of TUF metadata.
151
+// This should be preferred for updating a remote server as it enable the server
152
+// to remain consistent, either accepting or rejecting the complete update.
153
+func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
154
+	url, err := s.buildMetaURL("")
155
+	if err != nil {
156
+		return err
157
+	}
158
+	req, err := NewMultiPartMetaRequest(url.String(), metas)
154 159
 	if err != nil {
155 160
 		return err
156 161
 	}
... ...
@@ -159,12 +223,7 @@ func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
159 159
 		return err
160 160
 	}
161 161
 	defer resp.Body.Close()
162
-	if resp.StatusCode == http.StatusNotFound {
163
-		return ErrMetaNotFound{}
164
-	} else if resp.StatusCode != http.StatusOK {
165
-		return ErrServerUnavailable{code: resp.StatusCode}
166
-	}
167
-	return nil
162
+	return translateStatusToError(resp)
168 163
 }
169 164
 
170 165
 func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
... ...
@@ -212,10 +271,8 @@ func (s HTTPStore) GetTarget(path string) (io.ReadCloser, error) {
212 212
 		return nil, err
213 213
 	}
214 214
 	defer resp.Body.Close()
215
-	if resp.StatusCode == http.StatusNotFound {
216
-		return nil, ErrMetaNotFound{}
217
-	} else if resp.StatusCode != http.StatusOK {
218
-		return nil, ErrServerUnavailable{code: resp.StatusCode}
215
+	if err := translateStatusToError(resp); err != nil {
216
+		return nil, err
219 217
 	}
220 218
 	return resp.Body, nil
221 219
 }
... ...
@@ -235,10 +292,8 @@ func (s HTTPStore) GetKey(role string) ([]byte, error) {
235 235
 		return nil, err
236 236
 	}
237 237
 	defer resp.Body.Close()
238
-	if resp.StatusCode == http.StatusNotFound {
239
-		return nil, ErrMetaNotFound{}
240
-	} else if resp.StatusCode != http.StatusOK {
241
-		return nil, ErrServerUnavailable{code: resp.StatusCode}
238
+	if err := translateStatusToError(resp); err != nil {
239
+		return nil, err
242 240
 	}
243 241
 	body, err := ioutil.ReadAll(resp.Body)
244 242
 	if err != nil {
... ...
@@ -7,7 +7,7 @@ import (
7 7
 	"encoding/hex"
8 8
 	"encoding/json"
9 9
 	"fmt"
10
-	"path/filepath"
10
+	"path"
11 11
 	"strings"
12 12
 	"time"
13 13
 
... ...
@@ -99,8 +99,24 @@ func (tr *Repo) AddBaseKeys(role string, keys ...data.PublicKey) error {
99 99
 	}
100 100
 	tr.keysDB.AddRole(r)
101 101
 	tr.Root.Dirty = true
102
-	return nil
103 102
 
103
+	// also, whichever role was switched out needs to be re-signed
104
+	// root has already been marked dirty
105
+	switch role {
106
+	case data.CanonicalSnapshotRole:
107
+		if tr.Snapshot != nil {
108
+			tr.Snapshot.Dirty = true
109
+		}
110
+	case data.CanonicalTargetsRole:
111
+		if target, ok := tr.Targets[data.CanonicalTargetsRole]; ok {
112
+			target.Dirty = true
113
+		}
114
+	case data.CanonicalTimestampRole:
115
+		if tr.Timestamp != nil {
116
+			tr.Timestamp.Dirty = true
117
+		}
118
+	}
119
+	return nil
104 120
 }
105 121
 
106 122
 // ReplaceBaseKeys is used to replace all keys for the given role with the new keys
... ...
@@ -157,24 +173,58 @@ func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error {
157 157
 	return nil
158 158
 }
159 159
 
160
+// GetDelegation finds the role entry representing the provided
161
+// role name or ErrInvalidRole
162
+func (tr *Repo) GetDelegation(role string) (*data.Role, error) {
163
+	r := data.Role{Name: role}
164
+	if !r.IsDelegation() {
165
+		return nil, data.ErrInvalidRole{Role: role, Reason: "not a valid delegated role"}
166
+	}
167
+
168
+	parent := path.Dir(role)
169
+
170
+	// check the parent role
171
+	if parentRole := tr.keysDB.GetRole(parent); parentRole == nil {
172
+		return nil, data.ErrInvalidRole{Role: role, Reason: "parent role not found"}
173
+	}
174
+
175
+	// check the parent role's metadata
176
+	p, ok := tr.Targets[parent]
177
+	if !ok { // the parent targetfile may not exist yet, so it can't be in the list
178
+		return nil, data.ErrNoSuchRole{Role: role}
179
+	}
180
+
181
+	foundAt := utils.FindRoleIndex(p.Signed.Delegations.Roles, role)
182
+	if foundAt < 0 {
183
+		return nil, data.ErrNoSuchRole{Role: role}
184
+	}
185
+	return p.Signed.Delegations.Roles[foundAt], nil
186
+}
187
+
160 188
 // UpdateDelegations updates the appropriate delegations, either adding
161 189
 // a new delegation or updating an existing one. If keys are
162 190
 // provided, the IDs will be added to the role (if they do not exist
163 191
 // there already), and the keys will be added to the targets file.
164
-// The "before" argument specifies another role which this new role
165
-// will be added in front of (i.e. higher priority) in the delegation list.
166
-// An empty before string indicates to add the role to the end of the
167
-// delegation list.
168
-// A new, empty, targets file will be created for the new role.
169
-func (tr *Repo) UpdateDelegations(role *data.Role, keys []data.PublicKey, before string) error {
192
+func (tr *Repo) UpdateDelegations(role *data.Role, keys []data.PublicKey) error {
170 193
 	if !role.IsDelegation() || !role.IsValid() {
171
-		return data.ErrInvalidRole{Role: role.Name}
194
+		return data.ErrInvalidRole{Role: role.Name, Reason: "not a valid delegated role"}
172 195
 	}
173
-	parent := filepath.Dir(role.Name)
196
+	parent := path.Dir(role.Name)
197
+
198
+	if err := tr.VerifyCanSign(parent); err != nil {
199
+		return err
200
+	}
201
+
202
+	// check the parent role's metadata
174 203
 	p, ok := tr.Targets[parent]
175
-	if !ok {
176
-		return data.ErrInvalidRole{Role: role.Name}
204
+	if !ok { // the parent targetfile may not exist yet - if not, then create it
205
+		var err error
206
+		p, err = tr.InitTargets(parent)
207
+		if err != nil {
208
+			return err
209
+		}
177 210
 	}
211
+
178 212
 	for _, k := range keys {
179 213
 		if !utils.StrSliceContains(role.KeyIDs, k.ID()) {
180 214
 			role.KeyIDs = append(role.KeyIDs, k.ID())
... ...
@@ -183,24 +233,75 @@ func (tr *Repo) UpdateDelegations(role *data.Role, keys []data.PublicKey, before
183 183
 		tr.keysDB.AddKey(k)
184 184
 	}
185 185
 
186
-	i := -1
187
-	var r *data.Role
188
-	for i, r = range p.Signed.Delegations.Roles {
189
-		if r.Name == role.Name {
190
-			break
191
-		}
186
+	// if the role has fewer keys than the threshold, it
187
+	// will never be able to create a valid targets file
188
+	// and should be considered invalid.
189
+	if len(role.KeyIDs) < role.Threshold {
190
+		return data.ErrInvalidRole{Role: role.Name, Reason: "insufficient keys to meet threshold"}
192 191
 	}
193
-	if i >= 0 {
194
-		p.Signed.Delegations.Roles[i] = role
192
+
193
+	foundAt := utils.FindRoleIndex(p.Signed.Delegations.Roles, role.Name)
194
+
195
+	if foundAt >= 0 {
196
+		p.Signed.Delegations.Roles[foundAt] = role
195 197
 	} else {
196 198
 		p.Signed.Delegations.Roles = append(p.Signed.Delegations.Roles, role)
197 199
 	}
200
+	// We've made a change to parent. Set it to dirty
198 201
 	p.Dirty = true
199 202
 
200
-	roleTargets := data.NewTargets() // NewTargets always marked Dirty
201
-	tr.Targets[role.Name] = roleTargets
203
+	// We don't actually want to create the new delegation metadata yet.
204
+	// When we add a delegation, it may only be signable by a key we don't have
205
+	// (hence we are delegating signing).
202 206
 
203 207
 	tr.keysDB.AddRole(role)
208
+	utils.RemoveUnusedKeys(p)
209
+
210
+	return nil
211
+}
212
+
213
+// DeleteDelegation removes a delegated targets role from its parent
214
+// targets object. It also deletes the delegation from the snapshot.
215
+// DeleteDelegation will only make use of the role Name field.
216
+func (tr *Repo) DeleteDelegation(role data.Role) error {
217
+	if !role.IsDelegation() {
218
+		return data.ErrInvalidRole{Role: role.Name, Reason: "not a valid delegated role"}
219
+	}
220
+	// the role variable must not be used past this assignment for safety
221
+	name := role.Name
222
+
223
+	parent := path.Dir(name)
224
+	if err := tr.VerifyCanSign(parent); err != nil {
225
+		return err
226
+	}
227
+
228
+	// delete delegated data from Targets map and Snapshot - if they don't
229
+	// exist, these are no-op
230
+	delete(tr.Targets, name)
231
+	tr.Snapshot.DeleteMeta(name)
232
+
233
+	p, ok := tr.Targets[parent]
234
+	if !ok {
235
+		// if there is no parent metadata (the role exists though), then this
236
+		// is as good as done.
237
+		return nil
238
+	}
239
+
240
+	foundAt := utils.FindRoleIndex(p.Signed.Delegations.Roles, name)
241
+
242
+	if foundAt >= 0 {
243
+		var roles []*data.Role
244
+		// slice out deleted role
245
+		roles = append(roles, p.Signed.Delegations.Roles[:foundAt]...)
246
+		if foundAt+1 < len(p.Signed.Delegations.Roles) {
247
+			roles = append(roles, p.Signed.Delegations.Roles[foundAt+1:]...)
248
+		}
249
+		p.Signed.Delegations.Roles = roles
250
+
251
+		utils.RemoveUnusedKeys(p)
252
+
253
+		p.Dirty = true
254
+	} // if the role wasn't found, it's a good as deleted
204 255
 
205 256
 	return nil
206 257
 }
... ...
@@ -213,7 +314,7 @@ func (tr *Repo) InitRepo(consistent bool) error {
213 213
 	if err := tr.InitRoot(consistent); err != nil {
214 214
 		return err
215 215
 	}
216
-	if err := tr.InitTargets(); err != nil {
216
+	if _, err := tr.InitTargets(data.CanonicalTargetsRole); err != nil {
217 217
 		return err
218 218
 	}
219 219
 	if err := tr.InitSnapshot(); err != nil {
... ...
@@ -230,7 +331,7 @@ func (tr *Repo) InitRoot(consistent bool) error {
230 230
 	for _, r := range data.ValidRoles {
231 231
 		role := tr.keysDB.GetRole(r)
232 232
 		if role == nil {
233
-			return data.ErrInvalidRole{Role: data.CanonicalRootRole}
233
+			return data.ErrInvalidRole{Role: data.CanonicalRootRole, Reason: "root role not initialized in key database"}
234 234
 		}
235 235
 		rootRoles[r] = &role.RootRole
236 236
 		for _, kid := range role.KeyIDs {
... ...
@@ -248,20 +349,34 @@ func (tr *Repo) InitRoot(consistent bool) error {
248 248
 	return nil
249 249
 }
250 250
 
251
-// InitTargets initializes an empty targets
252
-func (tr *Repo) InitTargets() error {
251
+// InitTargets initializes an empty targets, and returns the new empty target
252
+func (tr *Repo) InitTargets(role string) (*data.SignedTargets, error) {
253
+	r := data.Role{Name: role}
254
+	if !r.IsDelegation() && data.CanonicalRole(role) != data.CanonicalTargetsRole {
255
+		return nil, data.ErrInvalidRole{
256
+			Role:   role,
257
+			Reason: fmt.Sprintf("role is not a valid targets role name: %s", role),
258
+		}
259
+	}
253 260
 	targets := data.NewTargets()
254
-	tr.Targets[data.ValidRoles["targets"]] = targets
255
-	return nil
261
+	tr.Targets[data.RoleName(role)] = targets
262
+	return targets, nil
256 263
 }
257 264
 
258 265
 // InitSnapshot initializes a snapshot based on the current root and targets
259 266
 func (tr *Repo) InitSnapshot() error {
267
+	if tr.Root == nil {
268
+		return ErrNotLoaded{role: "root"}
269
+	}
260 270
 	root, err := tr.Root.ToSigned()
261 271
 	if err != nil {
262 272
 		return err
263 273
 	}
264
-	targets, err := tr.Targets[data.ValidRoles["targets"]].ToSigned()
274
+
275
+	if _, ok := tr.Targets[data.RoleName(data.CanonicalTargetsRole)]; !ok {
276
+		return ErrNotLoaded{role: "targets"}
277
+	}
278
+	targets, err := tr.Targets[data.RoleName(data.CanonicalTargetsRole)].ToSigned()
265 279
 	if err != nil {
266 280
 		return err
267 281
 	}
... ...
@@ -403,19 +518,61 @@ func (tr Repo) FindTarget(path string) *data.FileMeta {
403 403
 	return walkTargets("targets")
404 404
 }
405 405
 
406
+// VerifyCanSign returns nil if the role exists and we have at least one
407
+// signing key for the role, false otherwise.  This does not check that we have
408
+// enough signing keys to meet the threshold, since we want to support the use
409
+// case of multiple signers for a role.  It returns an error if the role doesn't
410
+// exist or if there are no signing keys.
411
+func (tr *Repo) VerifyCanSign(roleName string) error {
412
+	role := tr.keysDB.GetRole(roleName)
413
+	if role == nil {
414
+		return data.ErrInvalidRole{Role: roleName, Reason: "does not exist"}
415
+	}
416
+
417
+	for _, keyID := range role.KeyIDs {
418
+		k := tr.keysDB.GetKey(keyID)
419
+		canonicalID, err := utils.CanonicalKeyID(k)
420
+		check := []string{keyID}
421
+		if err == nil {
422
+			check = append(check, canonicalID)
423
+		}
424
+		for _, id := range check {
425
+			p, _, err := tr.cryptoService.GetPrivateKey(id)
426
+			if err == nil && p != nil {
427
+				return nil
428
+			}
429
+		}
430
+	}
431
+	return signed.ErrNoKeys{KeyIDs: role.KeyIDs}
432
+}
433
+
406 434
 // AddTargets will attempt to add the given targets specifically to
407
-// the directed role. If the user does not have the signing keys for the role
408
-// the function will return an error and the full slice of targets.
435
+// the directed role. If the metadata for the role doesn't exist yet,
436
+// AddTargets will create one.
409 437
 func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) {
438
+
439
+	err := tr.VerifyCanSign(role)
440
+	if err != nil {
441
+		return nil, err
442
+	}
443
+
444
+	// check the role's metadata
410 445
 	t, ok := tr.Targets[role]
411
-	if !ok {
412
-		return targets, data.ErrInvalidRole{Role: role}
446
+	if !ok { // the targetfile may not exist yet - if not, then create it
447
+		var err error
448
+		t, err = tr.InitTargets(role)
449
+		if err != nil {
450
+			return nil, err
451
+		}
413 452
 	}
453
+
454
+	// VerifyCanSign already makes sure this is not nil
455
+	r := tr.keysDB.GetRole(role)
456
+
414 457
 	invalid := make(data.Files)
415 458
 	for path, target := range targets {
416 459
 		pathDigest := sha256.Sum256([]byte(path))
417 460
 		pathHex := hex.EncodeToString(pathDigest[:])
418
-		r := tr.keysDB.GetRole(role)
419 461
 		if role == data.ValidRoles["targets"] || (r.CheckPaths(path) || r.CheckPrefixes(pathHex)) {
420 462
 			t.Signed.Targets[path] = target
421 463
 		} else {
... ...
@@ -431,15 +588,19 @@ func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error)
431 431
 
432 432
 // RemoveTargets removes the given target (paths) from the given target role (delegation)
433 433
 func (tr *Repo) RemoveTargets(role string, targets ...string) error {
434
-	t, ok := tr.Targets[role]
435
-	if !ok {
436
-		return data.ErrInvalidRole{Role: role}
434
+	if err := tr.VerifyCanSign(role); err != nil {
435
+		return err
437 436
 	}
438 437
 
439
-	for _, path := range targets {
440
-		delete(t.Signed.Targets, path)
438
+	// if the role exists but metadata does not yet, then our work is done
439
+	t, ok := tr.Targets[role]
440
+	if ok {
441
+		for _, path := range targets {
442
+			delete(t.Signed.Targets, path)
443
+		}
444
+		t.Dirty = true
441 445
 	}
442
-	t.Dirty = true
446
+
443 447
 	return nil
444 448
 }
445 449
 
... ...
@@ -494,6 +655,12 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
494 494
 // SignTargets signs the targets file for the given top level or delegated targets role
495 495
 func (tr *Repo) SignTargets(role string, expires time.Time) (*data.Signed, error) {
496 496
 	logrus.Debugf("sign targets called for role %s", role)
497
+	if _, ok := tr.Targets[role]; !ok {
498
+		return nil, data.ErrInvalidRole{
499
+			Role:   role,
500
+			Reason: "SignTargets called with non-existant targets role",
501
+		}
502
+	}
497 503
 	tr.Targets[role].Signed.Expires = expires
498 504
 	tr.Targets[role].Signed.Version++
499 505
 	signed, err := tr.Targets[role].ToSigned()
... ...
@@ -532,6 +699,7 @@ func (tr *Repo) SignSnapshot(expires time.Time) (*data.Signed, error) {
532 532
 		if err != nil {
533 533
 			return nil, err
534 534
 		}
535
+		targets.Dirty = false
535 536
 	}
536 537
 	tr.Snapshot.Signed.Expires = expires
537 538
 	tr.Snapshot.Signed.Version++
538 539
new file mode 100644
... ...
@@ -0,0 +1,31 @@
0
+package utils
1
+
2
+import (
3
+	"strings"
4
+)
5
+
6
+// RoleList is a list of roles
7
+type RoleList []string
8
+
9
+// Len returns the length of the list
10
+func (r RoleList) Len() int {
11
+	return len(r)
12
+}
13
+
14
+// Less returns true if the item at i should be sorted
15
+// before the item at j. It's an unstable partial ordering
16
+// based on the number of segments, separated by "/", in
17
+// the role name
18
+func (r RoleList) Less(i, j int) bool {
19
+	segsI := strings.Split(r[i], "/")
20
+	segsJ := strings.Split(r[j], "/")
21
+	if len(segsI) == len(segsJ) {
22
+		return r[i] < r[j]
23
+	}
24
+	return len(segsI) < len(segsJ)
25
+}
26
+
27
+// Swap the items at 2 locations in the list
28
+func (r RoleList) Swap(i, j int) {
29
+	r[i], r[j] = r[j], r[i]
30
+}
0 31
new file mode 100644
... ...
@@ -0,0 +1,85 @@
0
+package utils
1
+
2
+import (
3
+	"fmt"
4
+	"sync"
5
+)
6
+
7
+// ErrEmptyStack is used when an action that requires some
8
+// content is invoked and the stack is empty
9
+type ErrEmptyStack struct {
10
+	action string
11
+}
12
+
13
+func (err ErrEmptyStack) Error() string {
14
+	return fmt.Sprintf("attempted to %s with empty stack", err.action)
15
+}
16
+
17
+// ErrBadTypeCast is used by PopX functions when the item
18
+// cannot be typed to X
19
+type ErrBadTypeCast struct{}
20
+
21
+func (err ErrBadTypeCast) Error() string {
22
+	return "attempted to do a typed pop and item was not of type"
23
+}
24
+
25
+// Stack is a simple type agnostic stack implementation
26
+type Stack struct {
27
+	s []interface{}
28
+	l sync.Mutex
29
+}
30
+
31
+// NewStack create a new stack
32
+func NewStack() *Stack {
33
+	s := &Stack{
34
+		s: make([]interface{}, 0),
35
+	}
36
+	return s
37
+}
38
+
39
+// Push adds an item to the top of the stack.
40
+func (s *Stack) Push(item interface{}) {
41
+	s.l.Lock()
42
+	defer s.l.Unlock()
43
+	s.s = append(s.s, item)
44
+}
45
+
46
+// Pop removes and returns the top item on the stack, or returns
47
+// ErrEmptyStack if the stack has no content
48
+func (s *Stack) Pop() (interface{}, error) {
49
+	s.l.Lock()
50
+	defer s.l.Unlock()
51
+	l := len(s.s)
52
+	if l > 0 {
53
+		item := s.s[l-1]
54
+		s.s = s.s[:l-1]
55
+		return item, nil
56
+	}
57
+	return nil, ErrEmptyStack{action: "Pop"}
58
+}
59
+
60
+// PopString attempts to cast the top item on the stack to the string type.
61
+// If this succeeds, it removes and returns the top item. If the item
62
+// is not of the string type, ErrBadTypeCast is returned. If the stack
63
+// is empty, ErrEmptyStack is returned
64
+func (s *Stack) PopString() (string, error) {
65
+	s.l.Lock()
66
+	defer s.l.Unlock()
67
+	l := len(s.s)
68
+	if l > 0 {
69
+		item := s.s[l-1]
70
+		if item, ok := item.(string); ok {
71
+			s.s = s.s[:l-1]
72
+			return item, nil
73
+		}
74
+		return "", ErrBadTypeCast{}
75
+	}
76
+	return "", ErrEmptyStack{action: "PopString"}
77
+}
78
+
79
+// Empty returns true if the stack is empty
80
+func (s *Stack) Empty() bool {
81
+	s.l.Lock()
82
+	defer s.l.Unlock()
83
+	return len(s.s) == 0
84
+}
... ...
@@ -105,3 +105,44 @@ func DoHash(alg string, d []byte) []byte {
105 105
 	}
106 106
 	return nil
107 107
 }
108
+
109
+// UnusedDelegationKeys prunes a list of keys, returning those that are no
110
+// longer in use for a given targets file
111
+func UnusedDelegationKeys(t data.SignedTargets) []string {
112
+	// compare ids to all still active key ids in all active roles
113
+	// with the targets file
114
+	found := make(map[string]bool)
115
+	for _, r := range t.Signed.Delegations.Roles {
116
+		for _, id := range r.KeyIDs {
117
+			found[id] = true
118
+		}
119
+	}
120
+	var discard []string
121
+	for id := range t.Signed.Delegations.Keys {
122
+		if !found[id] {
123
+			discard = append(discard, id)
124
+		}
125
+	}
126
+	return discard
127
+}
128
+
129
+// RemoveUnusedKeys determines which keys in the slice of IDs are no longer
130
+// used in the given targets file and removes them from the delegated keys
131
+// map
132
+func RemoveUnusedKeys(t *data.SignedTargets) {
133
+	unusedIDs := UnusedDelegationKeys(*t)
134
+	for _, id := range unusedIDs {
135
+		delete(t.Signed.Delegations.Keys, id)
136
+	}
137
+}
138
+
139
+// FindRoleIndex returns the index of the role named <name> or -1 if no
140
+// matching role is found.
141
+func FindRoleIndex(rs []*data.Role, name string) int {
142
+	for i, r := range rs {
143
+		if r.Name == name {
144
+			return i
145
+		}
146
+	}
147
+	return -1
148
+}
108 149
new file mode 100644
... ...
@@ -0,0 +1,126 @@
0
+package validation
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+)
6
+
7
+// VALIDATION ERRORS
8
+
9
+// ErrValidation represents a general validation error
10
+type ErrValidation struct {
11
+	Msg string
12
+}
13
+
14
+func (err ErrValidation) Error() string {
15
+	return fmt.Sprintf("An error occurred during validation: %s", err.Msg)
16
+}
17
+
18
+// ErrBadHierarchy represents missing metadata.  Currently: a missing snapshot
19
+// at this current time. When delegations are implemented it will also
20
+// represent a missing delegation parent
21
+type ErrBadHierarchy struct {
22
+	Missing string
23
+	Msg     string
24
+}
25
+
26
+func (err ErrBadHierarchy) Error() string {
27
+	return fmt.Sprintf("Metadata hierarchy is incomplete: %s", err.Msg)
28
+}
29
+
30
+// ErrBadRoot represents a failure validating the root
31
+type ErrBadRoot struct {
32
+	Msg string
33
+}
34
+
35
+func (err ErrBadRoot) Error() string {
36
+	return fmt.Sprintf("The root metadata is invalid: %s", err.Msg)
37
+}
38
+
39
+// ErrBadTargets represents a failure to validate a targets (incl delegations)
40
+type ErrBadTargets struct {
41
+	Msg string
42
+}
43
+
44
+func (err ErrBadTargets) Error() string {
45
+	return fmt.Sprintf("The targets metadata is invalid: %s", err.Msg)
46
+}
47
+
48
+// ErrBadSnapshot represents a failure to validate the snapshot
49
+type ErrBadSnapshot struct {
50
+	Msg string
51
+}
52
+
53
+func (err ErrBadSnapshot) Error() string {
54
+	return fmt.Sprintf("The snapshot metadata is invalid: %s", err.Msg)
55
+}
56
+
57
+// END VALIDATION ERRORS
58
+
59
+// SerializableError is a struct that can be used to serialize an error as JSON
60
+type SerializableError struct {
61
+	Name  string
62
+	Error error
63
+}
64
+
65
+// UnmarshalJSON attempts to unmarshal the error into the right type
66
+func (s *SerializableError) UnmarshalJSON(text []byte) (err error) {
67
+	var x struct{ Name string }
68
+	err = json.Unmarshal(text, &x)
69
+	if err != nil {
70
+		return
71
+	}
72
+	var theError error
73
+	switch x.Name {
74
+	case "ErrValidation":
75
+		var e struct{ Error ErrValidation }
76
+		err = json.Unmarshal(text, &e)
77
+		theError = e.Error
78
+	case "ErrBadHierarchy":
79
+		var e struct{ Error ErrBadHierarchy }
80
+		err = json.Unmarshal(text, &e)
81
+		theError = e.Error
82
+	case "ErrBadRoot":
83
+		var e struct{ Error ErrBadRoot }
84
+		err = json.Unmarshal(text, &e)
85
+		theError = e.Error
86
+	case "ErrBadTargets":
87
+		var e struct{ Error ErrBadTargets }
88
+		err = json.Unmarshal(text, &e)
89
+		theError = e.Error
90
+	case "ErrBadSnapshot":
91
+		var e struct{ Error ErrBadSnapshot }
92
+		err = json.Unmarshal(text, &e)
93
+		theError = e.Error
94
+	default:
95
+		err = fmt.Errorf("do not know how to unmarshal %s", x.Name)
96
+		return
97
+	}
98
+	if err != nil {
99
+		return
100
+	}
101
+	s.Name = x.Name
102
+	s.Error = theError
103
+	return nil
104
+}
105
+
106
+// NewSerializableError serializes one of the above errors into JSON
107
+func NewSerializableError(err error) (*SerializableError, error) {
108
+	// make sure it's one of our errors
109
+	var name string
110
+	switch err.(type) {
111
+	case ErrValidation:
112
+		name = "ErrValidation"
113
+	case ErrBadHierarchy:
114
+		name = "ErrBadHierarchy"
115
+	case ErrBadRoot:
116
+		name = "ErrBadRoot"
117
+	case ErrBadTargets:
118
+		name = "ErrBadTargets"
119
+	case ErrBadSnapshot:
120
+		name = "ErrBadSnapshot"
121
+	default:
122
+		return nil, fmt.Errorf("does not support serializing non-validation errors")
123
+	}
124
+	return &SerializableError{Name: name, Error: err}, nil
125
+}