Use updated notary to pick up updates from security review
Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
... | ... |
@@ -137,7 +137,7 @@ RUN set -x \ |
137 | 137 |
&& rm -rf "$GOPATH" |
138 | 138 |
|
139 | 139 |
# Install notary server |
140 |
-ENV NOTARY_COMMIT 77bced079e83d80f40c1f0a544b1a8a3b97fb052 |
|
140 |
+ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7 |
|
141 | 141 |
RUN set -x \ |
142 | 142 |
&& export GOPATH="$(mktemp -d)" \ |
143 | 143 |
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ |
... | ... |
@@ -39,8 +39,8 @@ clone git github.com/hashicorp/consul v0.5.2 |
39 | 39 |
clone git github.com/docker/distribution 7dc8d4a26b689bd4892f2f2322dbce0b7119d686 |
40 | 40 |
clone git github.com/vbatts/tar-split v0.9.4 |
41 | 41 |
|
42 |
-clone git github.com/docker/notary 77bced079e83d80f40c1f0a544b1a8a3b97fb052 |
|
43 |
-clone git github.com/endophage/gotuf 374908abc8af7e953a2813c5c2b3944ab625ca68 |
|
42 |
+clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7 |
|
43 |
+clone git github.com/endophage/gotuf 89ceb27829b9353dfee5ccccf7a3a9bb77008b05 |
|
44 | 44 |
clone git github.com/tent/canonical-json-go 96e4ba3a7613a1216cbd1badca4efe382adea337 |
45 | 45 |
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c |
46 | 46 |
|
... | ... |
@@ -186,7 +186,7 @@ Apache License |
186 | 186 |
same "printed page" as the copyright notice for easier |
187 | 187 |
identification within third-party archives. |
188 | 188 |
|
189 |
- Copyright {yyyy} {name of copyright owner} |
|
189 |
+ Copyright 2015 Docker, Inc. |
|
190 | 190 |
|
191 | 191 |
Licensed under the Apache License, Version 2.0 (the "License"); |
192 | 192 |
you may not use this file except in compliance with the License. |
... | ... |
@@ -1,9 +1,19 @@ |
1 | 1 |
package changelist |
2 | 2 |
|
3 |
+// Scopes for TufChanges are simply the TUF roles. |
|
4 |
+// Unfortunately because of targets delegations, we can only |
|
5 |
+// cover the base roles. |
|
6 |
+const ( |
|
7 |
+ ScopeRoot = "root" |
|
8 |
+ ScopeTargets = "targets" |
|
9 |
+ ScopeSnapshot = "snapshot" |
|
10 |
+ ScopeTimestamp = "timestamp" |
|
11 |
+) |
|
12 |
+ |
|
3 | 13 |
// TufChange represents a change to a TUF repo |
4 | 14 |
type TufChange struct { |
5 | 15 |
// Abbreviated because Go doesn't permit a field and method of the same name |
6 |
- Actn int `json:"action"` |
|
16 |
+ Actn string `json:"action"` |
|
7 | 17 |
Role string `json:"role"` |
8 | 18 |
ChangeType string `json:"type"` |
9 | 19 |
ChangePath string `json:"path"` |
... | ... |
@@ -11,7 +21,7 @@ type TufChange struct { |
11 | 11 |
} |
12 | 12 |
|
13 | 13 |
// NewTufChange initializes a tufChange object |
14 |
-func NewTufChange(action int, role, changeType, changePath string, content []byte) *TufChange { |
|
14 |
+func NewTufChange(action string, role, changeType, changePath string, content []byte) *TufChange { |
|
15 | 15 |
return &TufChange{ |
16 | 16 |
Actn: action, |
17 | 17 |
Role: role, |
... | ... |
@@ -22,7 +32,7 @@ func NewTufChange(action int, role, changeType, changePath string, content []byt |
22 | 22 |
} |
23 | 23 |
|
24 | 24 |
// Action return c.Actn |
25 |
-func (c TufChange) Action() int { |
|
25 |
+func (c TufChange) Action() string { |
|
26 | 26 |
return c.Actn |
27 | 27 |
} |
28 | 28 |
|
... | ... |
@@ -5,6 +5,11 @@ type memChangelist struct { |
5 | 5 |
changes []Change |
6 | 6 |
} |
7 | 7 |
|
8 |
+// NewMemChangelist instantiates a new in-memory changelist |
|
9 |
+func NewMemChangelist() Changelist { |
|
10 |
+ return &memChangelist{} |
|
11 |
+} |
|
12 |
+ |
|
8 | 13 |
// List returns a list of Changes |
9 | 14 |
func (cl memChangelist) List() []Change { |
10 | 15 |
return cl.changes |
11 | 16 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,114 @@ |
0 |
+package changelist |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "encoding/json" |
|
4 |
+ "fmt" |
|
5 |
+ "io/ioutil" |
|
6 |
+ "os" |
|
7 |
+ "path" |
|
8 |
+ "sort" |
|
9 |
+ "time" |
|
10 |
+ |
|
11 |
+ "github.com/Sirupsen/logrus" |
|
12 |
+ "github.com/docker/distribution/uuid" |
|
13 |
+) |
|
14 |
+ |
|
15 |
+// FileChangelist stores all the changes as files |
|
16 |
+type FileChangelist struct { |
|
17 |
+ dir string |
|
18 |
+} |
|
19 |
+ |
|
20 |
+// NewFileChangelist is a convenience method for returning FileChangeLists |
|
21 |
+func NewFileChangelist(dir string) (*FileChangelist, error) { |
|
22 |
+ logrus.Debug("Making dir path: ", dir) |
|
23 |
+ err := os.MkdirAll(dir, 0700) |
|
24 |
+ if err != nil { |
|
25 |
+ return nil, err |
|
26 |
+ } |
|
27 |
+ return &FileChangelist{dir: dir}, nil |
|
28 |
+} |
|
29 |
+ |
|
30 |
+// List returns a list of sorted changes |
|
31 |
+func (cl FileChangelist) List() []Change { |
|
32 |
+ var changes []Change |
|
33 |
+ dir, err := os.Open(cl.dir) |
|
34 |
+ if err != nil { |
|
35 |
+ return changes |
|
36 |
+ } |
|
37 |
+ defer dir.Close() |
|
38 |
+ fileInfos, err := dir.Readdir(0) |
|
39 |
+ if err != nil { |
|
40 |
+ return changes |
|
41 |
+ } |
|
42 |
+ sort.Sort(fileChanges(fileInfos)) |
|
43 |
+ for _, f := range fileInfos { |
|
44 |
+ if f.IsDir() { |
|
45 |
+ continue |
|
46 |
+ } |
|
47 |
+ raw, err := ioutil.ReadFile(path.Join(cl.dir, f.Name())) |
|
48 |
+ if err != nil { |
|
49 |
+ logrus.Warn(err.Error()) |
|
50 |
+ continue |
|
51 |
+ } |
|
52 |
+ c := &TufChange{} |
|
53 |
+ err = json.Unmarshal(raw, c) |
|
54 |
+ if err != nil { |
|
55 |
+ logrus.Warn(err.Error()) |
|
56 |
+ continue |
|
57 |
+ } |
|
58 |
+ changes = append(changes, c) |
|
59 |
+ } |
|
60 |
+ return changes |
|
61 |
+} |
|
62 |
+ |
|
63 |
+// Add adds a change to the file change list |
|
64 |
+func (cl FileChangelist) Add(c Change) error { |
|
65 |
+ cJSON, err := json.Marshal(c) |
|
66 |
+ if err != nil { |
|
67 |
+ return err |
|
68 |
+ } |
|
69 |
+ filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate()) |
|
70 |
+ return ioutil.WriteFile(path.Join(cl.dir, filename), cJSON, 0644) |
|
71 |
+} |
|
72 |
+ |
|
73 |
+// Clear clears the change list |
|
74 |
+func (cl FileChangelist) Clear(archive string) error { |
|
75 |
+ dir, err := os.Open(cl.dir) |
|
76 |
+ if err != nil { |
|
77 |
+ return err |
|
78 |
+ } |
|
79 |
+ defer dir.Close() |
|
80 |
+ files, err := dir.Readdir(0) |
|
81 |
+ if err != nil { |
|
82 |
+ return err |
|
83 |
+ } |
|
84 |
+ for _, f := range files { |
|
85 |
+ os.Remove(path.Join(cl.dir, f.Name())) |
|
86 |
+ } |
|
87 |
+ return nil |
|
88 |
+} |
|
89 |
+ |
|
90 |
+// Close is a no-op |
|
91 |
+func (cl FileChangelist) Close() error { |
|
92 |
+ // Nothing to do here |
|
93 |
+ return nil |
|
94 |
+} |
|
95 |
+ |
|
96 |
+type fileChanges []os.FileInfo |
|
97 |
+ |
|
98 |
+// Len returns the length of a file change list |
|
99 |
+func (cs fileChanges) Len() int { |
|
100 |
+ return len(cs) |
|
101 |
+} |
|
102 |
+ |
|
103 |
+// Less compares the names of two different file changes |
|
104 |
+func (cs fileChanges) Less(i, j int) bool { |
|
105 |
+ return cs[i].Name() < cs[j].Name() |
|
106 |
+} |
|
107 |
+ |
|
108 |
+// Swap swaps the position of two file changes |
|
109 |
+func (cs fileChanges) Swap(i, j int) { |
|
110 |
+ tmp := cs[i] |
|
111 |
+ cs[i] = cs[j] |
|
112 |
+ cs[j] = tmp |
|
113 |
+} |
0 | 114 |
deleted file mode 100644 |
... | ... |
@@ -1,114 +0,0 @@ |
1 |
-package changelist |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "encoding/json" |
|
5 |
- "fmt" |
|
6 |
- "io/ioutil" |
|
7 |
- "os" |
|
8 |
- "path" |
|
9 |
- "sort" |
|
10 |
- "time" |
|
11 |
- |
|
12 |
- "github.com/Sirupsen/logrus" |
|
13 |
- "github.com/docker/distribution/uuid" |
|
14 |
-) |
|
15 |
- |
|
16 |
-// FileChangelist stores all the changes as files |
|
17 |
-type FileChangelist struct { |
|
18 |
- dir string |
|
19 |
-} |
|
20 |
- |
|
21 |
-// NewFileChangelist is a convenience method for returning FileChangeLists |
|
22 |
-func NewFileChangelist(dir string) (*FileChangelist, error) { |
|
23 |
- logrus.Debug("Making dir path: ", dir) |
|
24 |
- err := os.MkdirAll(dir, 0700) |
|
25 |
- if err != nil { |
|
26 |
- return nil, err |
|
27 |
- } |
|
28 |
- return &FileChangelist{dir: dir}, nil |
|
29 |
-} |
|
30 |
- |
|
31 |
-// List returns a list of sorted changes |
|
32 |
-func (cl FileChangelist) List() []Change { |
|
33 |
- var changes []Change |
|
34 |
- dir, err := os.Open(cl.dir) |
|
35 |
- if err != nil { |
|
36 |
- return changes |
|
37 |
- } |
|
38 |
- defer dir.Close() |
|
39 |
- fileInfos, err := dir.Readdir(0) |
|
40 |
- if err != nil { |
|
41 |
- return changes |
|
42 |
- } |
|
43 |
- sort.Sort(fileChanges(fileInfos)) |
|
44 |
- for _, f := range fileInfos { |
|
45 |
- if f.IsDir() { |
|
46 |
- continue |
|
47 |
- } |
|
48 |
- raw, err := ioutil.ReadFile(path.Join(cl.dir, f.Name())) |
|
49 |
- if err != nil { |
|
50 |
- logrus.Warn(err.Error()) |
|
51 |
- continue |
|
52 |
- } |
|
53 |
- c := &TufChange{} |
|
54 |
- err = json.Unmarshal(raw, c) |
|
55 |
- if err != nil { |
|
56 |
- logrus.Warn(err.Error()) |
|
57 |
- continue |
|
58 |
- } |
|
59 |
- changes = append(changes, c) |
|
60 |
- } |
|
61 |
- return changes |
|
62 |
-} |
|
63 |
- |
|
64 |
-// Add adds a change to the file change list |
|
65 |
-func (cl FileChangelist) Add(c Change) error { |
|
66 |
- cJSON, err := json.Marshal(c) |
|
67 |
- if err != nil { |
|
68 |
- return err |
|
69 |
- } |
|
70 |
- filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate()) |
|
71 |
- return ioutil.WriteFile(path.Join(cl.dir, filename), cJSON, 0644) |
|
72 |
-} |
|
73 |
- |
|
74 |
-// Clear clears the change list |
|
75 |
-func (cl FileChangelist) Clear(archive string) error { |
|
76 |
- dir, err := os.Open(cl.dir) |
|
77 |
- if err != nil { |
|
78 |
- return err |
|
79 |
- } |
|
80 |
- defer dir.Close() |
|
81 |
- files, err := dir.Readdir(0) |
|
82 |
- if err != nil { |
|
83 |
- return err |
|
84 |
- } |
|
85 |
- for _, f := range files { |
|
86 |
- os.Remove(path.Join(cl.dir, f.Name())) |
|
87 |
- } |
|
88 |
- return nil |
|
89 |
-} |
|
90 |
- |
|
91 |
-// Close is a no-op |
|
92 |
-func (cl FileChangelist) Close() error { |
|
93 |
- // Nothing to do here |
|
94 |
- return nil |
|
95 |
-} |
|
96 |
- |
|
97 |
-type fileChanges []os.FileInfo |
|
98 |
- |
|
99 |
-// Len returns the length of a file change list |
|
100 |
-func (cs fileChanges) Len() int { |
|
101 |
- return len(cs) |
|
102 |
-} |
|
103 |
- |
|
104 |
-// Less compares the names of two different file changes |
|
105 |
-func (cs fileChanges) Less(i, j int) bool { |
|
106 |
- return cs[i].Name() < cs[j].Name() |
|
107 |
-} |
|
108 |
- |
|
109 |
-// Swap swaps the position of two file changes |
|
110 |
-func (cs fileChanges) Swap(i, j int) { |
|
111 |
- tmp := cs[i] |
|
112 |
- cs[i] = cs[j] |
|
113 |
- cs[j] = tmp |
|
114 |
-} |
... | ... |
@@ -22,17 +22,17 @@ type Changelist interface { |
22 | 22 |
|
23 | 23 |
const ( |
24 | 24 |
// ActionCreate represents a Create action |
25 |
- ActionCreate = iota |
|
25 |
+ ActionCreate = "create" |
|
26 | 26 |
// ActionUpdate represents an Update action |
27 |
- ActionUpdate |
|
27 |
+ ActionUpdate = "update" |
|
28 | 28 |
// ActionDelete represents a Delete action |
29 |
- ActionDelete |
|
29 |
+ ActionDelete = "delete" |
|
30 | 30 |
) |
31 | 31 |
|
32 | 32 |
// Change is the interface for a TUF Change |
33 | 33 |
type Change interface { |
34 | 34 |
// "create","update", or "delete" |
35 |
- Action() int |
|
35 |
+ Action() string |
|
36 | 36 |
|
37 | 37 |
// Where the change should be made. |
38 | 38 |
// For TUF this will be the role |
... | ... |
@@ -250,7 +250,7 @@ func (r *NotaryRepository) AddTarget(target *Target) error { |
250 | 250 |
return err |
251 | 251 |
} |
252 | 252 |
|
253 |
- c := changelist.NewTufChange(changelist.ActionCreate, "targets", "target", target.Name, metaJSON) |
|
253 |
+ c := changelist.NewTufChange(changelist.ActionCreate, changelist.ScopeTargets, "target", target.Name, metaJSON) |
|
254 | 254 |
err = cl.Add(c) |
255 | 255 |
if err != nil { |
256 | 256 |
return err |
... | ... |
@@ -258,6 +258,22 @@ func (r *NotaryRepository) AddTarget(target *Target) error { |
258 | 258 |
return cl.Close() |
259 | 259 |
} |
260 | 260 |
|
261 |
+// RemoveTarget creates a new changelist entry to remove a target from the repository |
|
262 |
+// when the changelist gets applied at publish time |
|
263 |
+func (r *NotaryRepository) RemoveTarget(targetName string) error { |
|
264 |
+ cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) |
|
265 |
+ if err != nil { |
|
266 |
+ return err |
|
267 |
+ } |
|
268 |
+ logrus.Debugf("Removing target \"%s\"", targetName) |
|
269 |
+ c := changelist.NewTufChange(changelist.ActionDelete, changelist.ScopeTargets, "target", targetName, nil) |
|
270 |
+ err = cl.Add(c) |
|
271 |
+ if err != nil { |
|
272 |
+ return err |
|
273 |
+ } |
|
274 |
+ return nil |
|
275 |
+} |
|
276 |
+ |
|
261 | 277 |
// ListTargets lists all targets for the current repository |
262 | 278 |
func (r *NotaryRepository) ListTargets() ([]*Target, error) { |
263 | 279 |
c, err := r.bootstrapClient() |
... | ... |
@@ -5,6 +5,7 @@ import ( |
5 | 5 |
"net/http" |
6 | 6 |
"time" |
7 | 7 |
|
8 |
+ "github.com/Sirupsen/logrus" |
|
8 | 9 |
"github.com/docker/notary/client/changelist" |
9 | 10 |
"github.com/endophage/gotuf" |
10 | 11 |
"github.com/endophage/gotuf/data" |
... | ... |
@@ -26,13 +27,16 @@ func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStor |
26 | 26 |
|
27 | 27 |
func applyChangelist(repo *tuf.TufRepo, cl changelist.Changelist) error { |
28 | 28 |
changes := cl.List() |
29 |
- var err error |
|
29 |
+ logrus.Debugf("applying %d changes", len(changes)) |
|
30 | 30 |
for _, c := range changes { |
31 |
- if c.Scope() == "targets" { |
|
32 |
- applyTargetsChange(repo, c) |
|
33 |
- } |
|
34 |
- if err != nil { |
|
35 |
- return err |
|
31 |
+ switch c.Scope() { |
|
32 |
+ case changelist.ScopeTargets: |
|
33 |
+ err := applyTargetsChange(repo, c) |
|
34 |
+ if err != nil { |
|
35 |
+ return err |
|
36 |
+ } |
|
37 |
+ default: |
|
38 |
+ logrus.Debug("scope not supported: ", c.Scope()) |
|
36 | 39 |
} |
37 | 40 |
} |
38 | 41 |
return nil |
... | ... |
@@ -40,16 +44,21 @@ func applyChangelist(repo *tuf.TufRepo, cl changelist.Changelist) error { |
40 | 40 |
|
41 | 41 |
func applyTargetsChange(repo *tuf.TufRepo, c changelist.Change) error { |
42 | 42 |
var err error |
43 |
- meta := &data.FileMeta{} |
|
44 |
- err = json.Unmarshal(c.Content(), meta) |
|
45 |
- if err != nil { |
|
46 |
- return nil |
|
47 |
- } |
|
48 |
- if c.Action() == changelist.ActionCreate { |
|
43 |
+ switch c.Action() { |
|
44 |
+ case changelist.ActionCreate: |
|
45 |
+ logrus.Debug("changelist add: ", c.Path()) |
|
46 |
+ meta := &data.FileMeta{} |
|
47 |
+ err = json.Unmarshal(c.Content(), meta) |
|
48 |
+ if err != nil { |
|
49 |
+ return err |
|
50 |
+ } |
|
49 | 51 |
files := data.Files{c.Path(): *meta} |
50 |
- _, err = repo.AddTargets("targets", files) |
|
51 |
- } else if c.Action() == changelist.ActionDelete { |
|
52 |
- err = repo.RemoveTargets("targets", c.Path()) |
|
52 |
+ _, err = repo.AddTargets(c.Scope(), files) |
|
53 |
+ case changelist.ActionDelete: |
|
54 |
+ logrus.Debug("changelist remove: ", c.Path()) |
|
55 |
+ err = repo.RemoveTargets(c.Scope(), c.Path()) |
|
56 |
+ default: |
|
57 |
+ logrus.Debug("action not yet supported: ", c.Action()) |
|
53 | 58 |
} |
54 | 59 |
if err != nil { |
55 | 60 |
return err |
... | ... |
@@ -42,6 +42,39 @@ func (km *KeyStoreManager) ExportRootKey(dest io.Writer, keyID string) error { |
42 | 42 |
return err |
43 | 43 |
} |
44 | 44 |
|
45 |
+// ExportRootKeyReencrypt exports the specified root key to an io.Writer in |
|
46 |
+// PEM format. The key is reencrypted with a new passphrase. |
|
47 |
+func (km *KeyStoreManager) ExportRootKeyReencrypt(dest io.Writer, keyID string, newPassphraseRetriever passphrase.Retriever) error { |
|
48 |
+ privateKey, alias, err := km.rootKeyStore.GetKey(keyID) |
|
49 |
+ if err != nil { |
|
50 |
+ return err |
|
51 |
+ } |
|
52 |
+ |
|
53 |
+ // Create temporary keystore to use as a staging area |
|
54 |
+ tempBaseDir, err := ioutil.TempDir("", "notary-key-export-") |
|
55 |
+ defer os.RemoveAll(tempBaseDir) |
|
56 |
+ |
|
57 |
+ privRootKeysSubdir := filepath.Join(privDir, rootKeysSubdir) |
|
58 |
+ tempRootKeysPath := filepath.Join(tempBaseDir, privRootKeysSubdir) |
|
59 |
+ tempRootKeyStore, err := trustmanager.NewKeyFileStore(tempRootKeysPath, newPassphraseRetriever) |
|
60 |
+ if err != nil { |
|
61 |
+ return err |
|
62 |
+ } |
|
63 |
+ |
|
64 |
+ err = tempRootKeyStore.AddKey(keyID, alias, privateKey) |
|
65 |
+ if err != nil { |
|
66 |
+ return err |
|
67 |
+ } |
|
68 |
+ |
|
69 |
+ pemBytes, err := tempRootKeyStore.Get(keyID + "_" + alias) |
|
70 |
+ if err != nil { |
|
71 |
+ return err |
|
72 |
+ } |
|
73 |
+ |
|
74 |
+ _, err = dest.Write(pemBytes) |
|
75 |
+ return err |
|
76 |
+} |
|
77 |
+ |
|
45 | 78 |
// checkRootKeyIsEncrypted makes sure the root key is encrypted. We have |
46 | 79 |
// internal assumptions that depend on this. |
47 | 80 |
func checkRootKeyIsEncrypted(pemBytes []byte) error { |
... | ... |
@@ -80,13 +113,13 @@ func (km *KeyStoreManager) ImportRootKey(source io.Reader, keyID string) error { |
80 | 80 |
|
81 | 81 |
func moveKeys(oldKeyStore, newKeyStore *trustmanager.KeyFileStore) error { |
82 | 82 |
// List all files but no symlinks |
83 |
- for _, f := range oldKeyStore.ListKeys() { |
|
84 |
- pemBytes, alias, err := oldKeyStore.GetKey(f) |
|
83 |
+ for f := range oldKeyStore.ListKeys() { |
|
84 |
+ privateKey, alias, err := oldKeyStore.GetKey(f) |
|
85 | 85 |
if err != nil { |
86 | 86 |
return err |
87 | 87 |
} |
88 | 88 |
|
89 |
- err = newKeyStore.AddKey(f, alias, pemBytes) |
|
89 |
+ err = newKeyStore.AddKey(f, alias, privateKey) |
|
90 | 90 |
|
91 | 91 |
if err != nil { |
92 | 92 |
return err |
... | ... |
@@ -247,7 +280,7 @@ func (km *KeyStoreManager) ImportKeysZip(zipReader zip.Reader) error { |
247 | 247 |
|
248 | 248 |
func moveKeysByGUN(oldKeyStore, newKeyStore *trustmanager.KeyFileStore, gun string) error { |
249 | 249 |
// List all files but no symlinks |
250 |
- for _, relKeyPath := range oldKeyStore.ListKeys() { |
|
250 |
+ for relKeyPath := range oldKeyStore.ListKeys() { |
|
251 | 251 |
|
252 | 252 |
// Skip keys that aren't associated with this GUN |
253 | 253 |
if !strings.HasPrefix(relKeyPath, filepath.FromSlash(gun)) { |
... | ... |
@@ -22,28 +22,45 @@ import ( |
22 | 22 |
type Retriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) |
23 | 23 |
|
24 | 24 |
const ( |
25 |
- idBytesToDisplay = 5 |
|
25 |
+ idBytesToDisplay = 7 |
|
26 | 26 |
tufRootAlias = "root" |
27 | 27 |
tufTargetsAlias = "targets" |
28 | 28 |
tufSnapshotAlias = "snapshot" |
29 |
- tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase will be used to protect |
|
30 |
-the most sensitive key in your signing system. Please choose a long, complex passphrase and be careful |
|
31 |
-to keep the password and the key file itself secure and backed up. It is highly recommended that you use |
|
32 |
-a password manager to generate the passphrase and keep it safe. There will be no way to recover this key. |
|
33 |
-You can find the key in your config directory.` |
|
29 |
+ tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase |
|
30 |
+will be used to protect the most sensitive key in your signing system. Please |
|
31 |
+choose a long, complex passphrase and be careful to keep the password and the |
|
32 |
+key file itself secure and backed up. It is highly recommended that you use a |
|
33 |
+password manager to generate the passphrase and keep it safe. There will be no |
|
34 |
+way to recover this key. You can find the key in your config directory.` |
|
35 |
+) |
|
36 |
+ |
|
37 |
+var ( |
|
38 |
+ // ErrTooShort is returned if the passphrase entered for a new key is |
|
39 |
+ // below the minimum length |
|
40 |
+ ErrTooShort = errors.New("Passphrase too short") |
|
41 |
+ |
|
42 |
+ // ErrDontMatch is returned if the two entered passphrases don't match. |
|
43 |
+ // new key is below the minimum length |
|
44 |
+ ErrDontMatch = errors.New("The entered passphrases do not match") |
|
45 |
+ |
|
46 |
+ // ErrTooManyAttempts is returned if the maximum number of passphrase |
|
47 |
+ // entry attempts is reached. |
|
48 |
+ ErrTooManyAttempts = errors.New("Too many attempts") |
|
34 | 49 |
) |
35 | 50 |
|
36 | 51 |
// PromptRetriever returns a new Retriever which will provide a prompt on stdin |
37 | 52 |
// and stdout to retrieve a passphrase. The passphrase will be cached such that |
38 | 53 |
// subsequent prompts will produce the same passphrase. |
39 | 54 |
func PromptRetriever() Retriever { |
40 |
- return PromptRetrieverWithInOut(os.Stdin, os.Stdout) |
|
55 |
+ return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil) |
|
41 | 56 |
} |
42 | 57 |
|
43 | 58 |
// PromptRetrieverWithInOut returns a new Retriever which will provide a |
44 | 59 |
// prompt using the given in and out readers. The passphrase will be cached |
45 | 60 |
// such that subsequent prompts will produce the same passphrase. |
46 |
-func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever { |
|
61 |
+// aliasMap can be used to specify display names for TUF key aliases. If aliasMap |
|
62 |
+// is nil, a sensible default will be used. |
|
63 |
+func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) Retriever { |
|
47 | 64 |
userEnteredTargetsSnapshotsPass := false |
48 | 65 |
targetsSnapshotsPass := "" |
49 | 66 |
userEnteredRootsPass := false |
... | ... |
@@ -54,14 +71,20 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever { |
54 | 54 |
fmt.Fprintln(out, tufRootKeyGenerationWarning) |
55 | 55 |
} |
56 | 56 |
if numAttempts > 0 { |
57 |
- if createNew { |
|
58 |
- fmt.Fprintln(out, "Passphrases do not match. Please retry.") |
|
59 |
- |
|
60 |
- } else { |
|
57 |
+ if !createNew { |
|
61 | 58 |
fmt.Fprintln(out, "Passphrase incorrect. Please retry.") |
62 | 59 |
} |
63 | 60 |
} |
64 | 61 |
|
62 |
+ // Figure out if we should display a different string for this alias |
|
63 |
+ displayAlias := alias |
|
64 |
+ if aliasMap != nil { |
|
65 |
+ if val, ok := aliasMap[alias]; ok { |
|
66 |
+ displayAlias = val |
|
67 |
+ } |
|
68 |
+ |
|
69 |
+ } |
|
70 |
+ |
|
65 | 71 |
// First, check if we have a password cached for this alias. |
66 | 72 |
if numAttempts == 0 { |
67 | 73 |
if userEnteredTargetsSnapshotsPass && (alias == tufSnapshotAlias || alias == tufTargetsAlias) { |
... | ... |
@@ -73,7 +96,7 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever { |
73 | 73 |
} |
74 | 74 |
|
75 | 75 |
if numAttempts > 3 && !createNew { |
76 |
- return "", true, errors.New("Too many attempts") |
|
76 |
+ return "", true, ErrTooManyAttempts |
|
77 | 77 |
} |
78 | 78 |
|
79 | 79 |
state, err := term.SaveState(0) |
... | ... |
@@ -86,15 +109,24 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever { |
86 | 86 |
stdin := bufio.NewReader(in) |
87 | 87 |
|
88 | 88 |
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator)) |
89 |
+ if indexOfLastSeparator == -1 { |
|
90 |
+ indexOfLastSeparator = 0 |
|
91 |
+ } |
|
89 | 92 |
|
90 |
- if len(keyName) > indexOfLastSeparator+idBytesToDisplay+1 { |
|
91 |
- keyName = keyName[:indexOfLastSeparator+idBytesToDisplay+1] |
|
93 |
+ if len(keyName) > indexOfLastSeparator+idBytesToDisplay { |
|
94 |
+ if indexOfLastSeparator > 0 { |
|
95 |
+ keyNamePrefix := keyName[:indexOfLastSeparator] |
|
96 |
+ keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1] |
|
97 |
+ keyName = keyNamePrefix + " (" + keyNameID + ")" |
|
98 |
+ } else { |
|
99 |
+ keyName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay] |
|
100 |
+ } |
|
92 | 101 |
} |
93 | 102 |
|
94 | 103 |
if createNew { |
95 |
- fmt.Fprintf(out, "Enter passphrase for new %s key with id %s: ", alias, keyName) |
|
104 |
+ fmt.Fprintf(out, "Enter passphrase for new %s key with id %s: ", displayAlias, keyName) |
|
96 | 105 |
} else { |
97 |
- fmt.Fprintf(out, "Enter key passphrase for %s key with id %s: ", alias, keyName) |
|
106 |
+ fmt.Fprintf(out, "Enter key passphrase for %s key with id %s: ", displayAlias, keyName) |
|
98 | 107 |
} |
99 | 108 |
|
100 | 109 |
passphrase, err := stdin.ReadBytes('\n') |
... | ... |
@@ -119,10 +151,10 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever { |
119 | 119 |
|
120 | 120 |
if len(retPass) < 8 { |
121 | 121 |
fmt.Fprintln(out, "Please use a password manager to generate and store a good random passphrase.") |
122 |
- return "", false, errors.New("Passphrase too short") |
|
122 |
+ return "", false, ErrTooShort |
|
123 | 123 |
} |
124 | 124 |
|
125 |
- fmt.Fprintf(out, "Repeat passphrase for new %s key with id %s: ", alias, keyName) |
|
125 |
+ fmt.Fprintf(out, "Repeat passphrase for new %s key with id %s: ", displayAlias, keyName) |
|
126 | 126 |
confirmation, err := stdin.ReadBytes('\n') |
127 | 127 |
fmt.Fprintln(out) |
128 | 128 |
if err != nil { |
... | ... |
@@ -131,7 +163,8 @@ func PromptRetrieverWithInOut(in io.Reader, out io.Writer) Retriever { |
131 | 131 |
confirmationStr := strings.TrimSpace(string(confirmation)) |
132 | 132 |
|
133 | 133 |
if retPass != confirmationStr { |
134 |
- return "", false, errors.New("The entered passphrases do not match") |
|
134 |
+ fmt.Fprintln(out, "Passphrases do not match. Please retry.") |
|
135 |
+ return "", false, ErrDontMatch |
|
135 | 136 |
} |
136 | 137 |
|
137 | 138 |
if alias == tufSnapshotAlias || alias == tufTargetsAlias { |
... | ... |
@@ -5,65 +5,10 @@ import ( |
5 | 5 |
"strings" |
6 | 6 |
"sync" |
7 | 7 |
|
8 |
- "fmt" |
|
9 |
- |
|
10 | 8 |
"github.com/docker/notary/pkg/passphrase" |
11 | 9 |
"github.com/endophage/gotuf/data" |
12 | 10 |
) |
13 | 11 |
|
14 |
-const ( |
|
15 |
- keyExtension = "key" |
|
16 |
-) |
|
17 |
- |
|
18 |
-// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
19 |
-type ErrAttemptsExceeded struct{} |
|
20 |
- |
|
21 |
-// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
22 |
-func (err ErrAttemptsExceeded) Error() string { |
|
23 |
- return "maximum number of passphrase attempts exceeded" |
|
24 |
-} |
|
25 |
- |
|
26 |
-// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
27 |
-// key file was corrupted, but we have no way to distinguish. |
|
28 |
-type ErrPasswordInvalid struct{} |
|
29 |
- |
|
30 |
-// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
31 |
-// key file was corrupted, but we have no way to distinguish. |
|
32 |
-func (err ErrPasswordInvalid) Error() string { |
|
33 |
- return "password invalid, operation has failed." |
|
34 |
-} |
|
35 |
- |
|
36 |
-// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
37 |
-type ErrKeyNotFound struct { |
|
38 |
- KeyID string |
|
39 |
-} |
|
40 |
- |
|
41 |
-// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
42 |
-func (err ErrKeyNotFound) Error() string { |
|
43 |
- return fmt.Sprintf("signing key not found: %s", err.KeyID) |
|
44 |
-} |
|
45 |
- |
|
46 |
-// KeyStore is a generic interface for private key storage |
|
47 |
-type KeyStore interface { |
|
48 |
- LimitedFileStore |
|
49 |
- |
|
50 |
- AddKey(name, alias string, privKey data.PrivateKey) error |
|
51 |
- GetKey(name string) (data.PrivateKey, string, error) |
|
52 |
- ListKeys() []string |
|
53 |
- RemoveKey(name string) error |
|
54 |
-} |
|
55 |
- |
|
56 |
-type cachedKey struct { |
|
57 |
- alias string |
|
58 |
- key data.PrivateKey |
|
59 |
-} |
|
60 |
- |
|
61 |
-// PassphraseRetriever is a callback function that should retrieve a passphrase |
|
62 |
-// for a given named key. If it should be treated as new passphrase (e.g. with |
|
63 |
-// confirmation), createNew will be true. Attempts is passed in so that implementers |
|
64 |
-// decide how many chances to give to a human, for example. |
|
65 |
-type PassphraseRetriever func(keyId, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) |
|
66 |
- |
|
67 | 12 |
// KeyFileStore persists and manages private keys on disk |
68 | 13 |
type KeyFileStore struct { |
69 | 14 |
sync.Mutex |
... | ... |
@@ -111,7 +56,7 @@ func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) { |
111 | 111 |
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore. |
112 | 112 |
// There might be symlinks associating Certificate IDs to Public Keys, so this |
113 | 113 |
// method only returns the IDs that aren't symlinks |
114 |
-func (s *KeyFileStore) ListKeys() []string { |
|
114 |
+func (s *KeyFileStore) ListKeys() map[string]string { |
|
115 | 115 |
return listKeys(s) |
116 | 116 |
} |
117 | 117 |
|
... | ... |
@@ -149,7 +94,7 @@ func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) { |
149 | 149 |
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore. |
150 | 150 |
// There might be symlinks associating Certificate IDs to Public Keys, so this |
151 | 151 |
// method only returns the IDs that aren't symlinks |
152 |
-func (s *KeyMemoryStore) ListKeys() []string { |
|
152 |
+func (s *KeyMemoryStore) ListKeys() map[string]string { |
|
153 | 153 |
return listKeys(s) |
154 | 154 |
} |
155 | 155 |
|
... | ... |
@@ -167,10 +112,10 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached |
167 | 167 |
} |
168 | 168 |
|
169 | 169 |
attempts := 0 |
170 |
- passphrase := "" |
|
170 |
+ chosenPassphrase := "" |
|
171 | 171 |
giveup := false |
172 | 172 |
for { |
173 |
- passphrase, giveup, err = passphraseRetriever(name, alias, true, attempts) |
|
173 |
+ chosenPassphrase, giveup, err = passphraseRetriever(name, alias, true, attempts) |
|
174 | 174 |
if err != nil { |
175 | 175 |
attempts++ |
176 | 176 |
continue |
... | ... |
@@ -184,8 +129,8 @@ func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached |
184 | 184 |
break |
185 | 185 |
} |
186 | 186 |
|
187 |
- if passphrase != "" { |
|
188 |
- pemPrivKey, err = EncryptPrivateKey(privKey, passphrase) |
|
187 |
+ if chosenPassphrase != "" { |
|
188 |
+ pemPrivKey, err = EncryptPrivateKey(privKey, chosenPassphrase) |
|
189 | 189 |
if err != nil { |
190 | 190 |
return err |
191 | 191 |
} |
... | ... |
@@ -261,18 +206,20 @@ func getKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cached |
261 | 261 |
return privKey, keyAlias, nil |
262 | 262 |
} |
263 | 263 |
|
264 |
-// ListKeys returns a list of unique PublicKeys present on the KeyFileStore. |
|
264 |
+// ListKeys returns a map of unique PublicKeys present on the KeyFileStore and |
|
265 |
+// their corresponding aliases. |
|
265 | 266 |
// There might be symlinks associating Certificate IDs to Public Keys, so this |
266 | 267 |
// method only returns the IDs that aren't symlinks |
267 |
-func listKeys(s LimitedFileStore) []string { |
|
268 |
- var keyIDList []string |
|
268 |
+func listKeys(s LimitedFileStore) map[string]string { |
|
269 |
+ keyIDMap := make(map[string]string) |
|
269 | 270 |
|
270 | 271 |
for _, f := range s.ListFiles(false) { |
271 |
- keyID := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f))) |
|
272 |
- keyID = keyID[:strings.LastIndex(keyID, "_")] |
|
273 |
- keyIDList = append(keyIDList, keyID) |
|
272 |
+ keyIDFull := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f))) |
|
273 |
+ keyID := keyIDFull[:strings.LastIndex(keyIDFull, "_")] |
|
274 |
+ keyAlias := keyIDFull[strings.LastIndex(keyIDFull, "_")+1:] |
|
275 |
+ keyIDMap[keyID] = keyAlias |
|
274 | 276 |
} |
275 |
- return keyIDList |
|
277 |
+ return keyIDMap |
|
276 | 278 |
} |
277 | 279 |
|
278 | 280 |
// RemoveKey removes the key from the keyfilestore |
279 | 281 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,52 @@ |
0 |
+package trustmanager |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ |
|
5 |
+ "github.com/endophage/gotuf/data" |
|
6 |
+) |
|
7 |
+ |
|
8 |
+// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
9 |
+type ErrAttemptsExceeded struct{} |
|
10 |
+ |
|
11 |
+// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
12 |
+func (err ErrAttemptsExceeded) Error() string { |
|
13 |
+ return "maximum number of passphrase attempts exceeded" |
|
14 |
+} |
|
15 |
+ |
|
16 |
+// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
17 |
+// key file was corrupted, but we have no way to distinguish. |
|
18 |
+type ErrPasswordInvalid struct{} |
|
19 |
+ |
|
20 |
+// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
21 |
+// key file was corrupted, but we have no way to distinguish. |
|
22 |
+func (err ErrPasswordInvalid) Error() string { |
|
23 |
+ return "password invalid, operation has failed." |
|
24 |
+} |
|
25 |
+ |
|
26 |
+// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
27 |
+type ErrKeyNotFound struct { |
|
28 |
+ KeyID string |
|
29 |
+} |
|
30 |
+ |
|
31 |
+// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
32 |
+func (err ErrKeyNotFound) Error() string { |
|
33 |
+ return fmt.Sprintf("signing key not found: %s", err.KeyID) |
|
34 |
+} |
|
35 |
+ |
|
36 |
+const ( |
|
37 |
+ keyExtension = "key" |
|
38 |
+) |
|
39 |
+ |
|
40 |
+// KeyStore is a generic interface for private key storage |
|
41 |
+type KeyStore interface { |
|
42 |
+ AddKey(name, alias string, privKey data.PrivateKey) error |
|
43 |
+ GetKey(name string) (data.PrivateKey, string, error) |
|
44 |
+ ListKeys() map[string]string |
|
45 |
+ RemoveKey(name string) error |
|
46 |
+} |
|
47 |
+ |
|
48 |
+type cachedKey struct { |
|
49 |
+ alias string |
|
50 |
+ key data.PrivateKey |
|
51 |
+} |
... | ... |
@@ -351,7 +351,7 @@ func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) { |
351 | 351 |
// PrivateKey. The serialization format we use is just the public key bytes |
352 | 352 |
// followed by the private key bytes |
353 | 353 |
func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) { |
354 |
- pub, priv, err := ed25519.GenerateKey(rand.Reader) |
|
354 |
+ pub, priv, err := ed25519.GenerateKey(random) |
|
355 | 355 |
if err != nil { |
356 | 356 |
return nil, err |
357 | 357 |
} |
... | ... |
@@ -50,15 +50,9 @@ func (c *Client) Update() error { |
50 | 50 |
logrus.Debug("updating TUF client") |
51 | 51 |
err := c.update() |
52 | 52 |
if err != nil { |
53 |
- switch err.(type) { |
|
54 |
- case signed.ErrRoleThreshold, signed.ErrExpired, tuf.ErrLocalRootExpired: |
|
55 |
- logrus.Debug("retryable error occurred. Root will be downloaded and another update attempted") |
|
56 |
- if err := c.downloadRoot(); err != nil { |
|
57 |
- logrus.Errorf("client Update (Root):", err) |
|
58 |
- return err |
|
59 |
- } |
|
60 |
- default: |
|
61 |
- logrus.Error("an unexpected error occurred while updating TUF client") |
|
53 |
+ logrus.Debug("Error occurred. Root will be downloaded and another update attempted") |
|
54 |
+ if err := c.downloadRoot(); err != nil { |
|
55 |
+ logrus.Errorf("client Update (Root):", err) |
|
62 | 56 |
return err |
63 | 57 |
} |
64 | 58 |
// If we error again, we now have the latest root and just want to fail |
... | ... |
@@ -114,6 +108,20 @@ func (c Client) checkRoot() error { |
114 | 114 |
if !bytes.Equal(hash[:], hashSha256) { |
115 | 115 |
return fmt.Errorf("Cached root sha256 did not match snapshot root sha256") |
116 | 116 |
} |
117 |
+ |
|
118 |
+ if int64(len(raw)) != size { |
|
119 |
+ return fmt.Errorf("Cached root size did not match snapshot size") |
|
120 |
+ } |
|
121 |
+ |
|
122 |
+ root := &data.SignedRoot{} |
|
123 |
+ err = json.Unmarshal(raw, root) |
|
124 |
+ if err != nil { |
|
125 |
+ return ErrCorruptedCache{file: "root.json"} |
|
126 |
+ } |
|
127 |
+ |
|
128 |
+ if signed.IsExpired(root.Signed.Expires) { |
|
129 |
+ return tuf.ErrLocalRootExpired{} |
|
130 |
+ } |
|
117 | 131 |
return nil |
118 | 132 |
} |
119 | 133 |
|
... | ... |
@@ -104,3 +104,11 @@ type ErrInvalidURL struct { |
104 | 104 |
func (e ErrInvalidURL) Error() string { |
105 | 105 |
return fmt.Sprintf("tuf: invalid repository URL %s", e.URL) |
106 | 106 |
} |
107 |
+ |
|
108 |
+type ErrCorruptedCache struct { |
|
109 |
+ file string |
|
110 |
+} |
|
111 |
+ |
|
112 |
+func (e ErrCorruptedCache) Error() string { |
|
113 |
+ return fmt.Sprintf("cache is corrupted: %s", e.file) |
|
114 |
+} |
... | ... |
@@ -7,16 +7,27 @@ import ( |
7 | 7 |
"github.com/endophage/gotuf/errors" |
8 | 8 |
) |
9 | 9 |
|
10 |
+// Canonical base role names |
|
11 |
+const ( |
|
12 |
+ CanonicalRootRole = "root" |
|
13 |
+ CanonicalTargetsRole = "targets" |
|
14 |
+ CanonicalSnapshotRole = "snapshot" |
|
15 |
+ CanonicalTimestampRole = "timestamp" |
|
16 |
+) |
|
17 |
+ |
|
10 | 18 |
var ValidRoles = map[string]string{ |
11 |
- "root": "root", |
|
12 |
- "targets": "targets", |
|
13 |
- "snapshot": "snapshot", |
|
14 |
- "timestamp": "timestamp", |
|
19 |
+ CanonicalRootRole: CanonicalRootRole, |
|
20 |
+ CanonicalTargetsRole: CanonicalTargetsRole, |
|
21 |
+ CanonicalSnapshotRole: CanonicalSnapshotRole, |
|
22 |
+ CanonicalTimestampRole: CanonicalTimestampRole, |
|
15 | 23 |
} |
16 | 24 |
|
17 | 25 |
func SetValidRoles(rs map[string]string) { |
18 |
- for k, v := range rs { |
|
19 |
- ValidRoles[strings.ToLower(k)] = strings.ToLower(v) |
|
26 |
+ // iterate ValidRoles |
|
27 |
+ for k, _ := range ValidRoles { |
|
28 |
+ if v, ok := rs[k]; ok { |
|
29 |
+ ValidRoles[k] = v |
|
30 |
+ } |
|
20 | 31 |
} |
21 | 32 |
} |
22 | 33 |
|
... | ... |
@@ -27,6 +38,27 @@ func RoleName(role string) string { |
27 | 27 |
return role |
28 | 28 |
} |
29 | 29 |
|
30 |
+func CanonicalRole(role string) string { |
|
31 |
+ name := strings.ToLower(role) |
|
32 |
+ if _, ok := ValidRoles[name]; ok { |
|
33 |
+ // The canonical version is always lower case |
|
34 |
+ // se ensure we return name, not role |
|
35 |
+ return name |
|
36 |
+ } |
|
37 |
+ targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole]) |
|
38 |
+ if strings.HasPrefix(name, targetsBase) { |
|
39 |
+ role = strings.TrimPrefix(role, targetsBase) |
|
40 |
+ role = fmt.Sprintf("%s/%s", CanonicalTargetsRole, role) |
|
41 |
+ return role |
|
42 |
+ } |
|
43 |
+ for r, v := range ValidRoles { |
|
44 |
+ if role == v { |
|
45 |
+ return r |
|
46 |
+ } |
|
47 |
+ } |
|
48 |
+ return "" |
|
49 |
+} |
|
50 |
+ |
|
30 | 51 |
// ValidRole only determines the name is semantically |
31 | 52 |
// correct. For target delegated roles, it does NOT check |
32 | 53 |
// the the appropriate parent roles exist. |
... | ... |
@@ -35,7 +67,7 @@ func ValidRole(name string) bool { |
35 | 35 |
if v, ok := ValidRoles[name]; ok { |
36 | 36 |
return name == v |
37 | 37 |
} |
38 |
- targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"]) |
|
38 |
+ targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole]) |
|
39 | 39 |
if strings.HasPrefix(name, targetsBase) { |
40 | 40 |
return true |
41 | 41 |
} |
... | ... |
@@ -112,6 +144,6 @@ func (r Role) CheckPrefixes(hash string) bool { |
112 | 112 |
} |
113 | 113 |
|
114 | 114 |
func (r Role) IsDelegation() bool { |
115 |
- targetsBase := fmt.Sprintf("%s/", ValidRoles["targets"]) |
|
115 |
+ targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole]) |
|
116 | 116 |
return strings.HasPrefix(r.Name, targetsBase) |
117 | 117 |
} |
... | ... |
@@ -43,10 +43,10 @@ const ( |
43 | 43 |
) |
44 | 44 |
|
45 | 45 |
var TUFTypes = map[string]string{ |
46 |
- "targets": "Targets", |
|
47 |
- "root": "Root", |
|
48 |
- "snapshot": "Snapshot", |
|
49 |
- "timestamp": "Timestamp", |
|
46 |
+ CanonicalRootRole: "Root", |
|
47 |
+ CanonicalTargetsRole: "Targets", |
|
48 |
+ CanonicalSnapshotRole: "Snapshot", |
|
49 |
+ CanonicalTimestampRole: "Timestamp", |
|
50 | 50 |
} |
51 | 51 |
|
52 | 52 |
// SetTUFTypes allows one to override some or all of the default |
... | ... |
@@ -57,19 +57,25 @@ func SetTUFTypes(ts map[string]string) { |
57 | 57 |
} |
58 | 58 |
} |
59 | 59 |
|
60 |
-// Checks if type is correct. |
|
61 |
-func ValidTUFType(t string) bool { |
|
60 |
+func ValidTUFType(typ, role string) bool { |
|
61 |
+ if ValidRole(role) { |
|
62 |
+ // All targets delegation roles must have |
|
63 |
+ // the valid type is for targets. |
|
64 |
+ role = CanonicalRole(role) |
|
65 |
+ if role == "" { |
|
66 |
+ // role is unknown and does not map to |
|
67 |
+ // a type |
|
68 |
+ return false |
|
69 |
+ } |
|
70 |
+ if strings.HasPrefix(role, CanonicalTargetsRole+"/") { |
|
71 |
+ role = CanonicalTargetsRole |
|
72 |
+ } |
|
73 |
+ } |
|
62 | 74 |
// most people will just use the defaults so have this optimal check |
63 | 75 |
// first. Do comparison just in case there is some unknown vulnerability |
64 | 76 |
// if a key and value in the map differ. |
65 |
- if v, ok := TUFTypes[t]; ok { |
|
66 |
- return t == v |
|
67 |
- } |
|
68 |
- // For people that feel the need to change the default type names. |
|
69 |
- for _, v := range TUFTypes { |
|
70 |
- if t == v { |
|
71 |
- return true |
|
72 |
- } |
|
77 |
+ if v, ok := TUFTypes[role]; ok { |
|
78 |
+ return typ == v |
|
73 | 79 |
} |
74 | 80 |
return false |
75 | 81 |
} |
... | ... |
@@ -138,10 +144,10 @@ func NewDelegations() *Delegations { |
138 | 138 |
|
139 | 139 |
// defines number of days in which something should expire |
140 | 140 |
var defaultExpiryTimes = map[string]int{ |
141 |
- "root": 365, |
|
142 |
- "targets": 90, |
|
143 |
- "snapshot": 7, |
|
144 |
- "timestamp": 1, |
|
141 |
+ CanonicalRootRole: 365, |
|
142 |
+ CanonicalTargetsRole: 90, |
|
143 |
+ CanonicalSnapshotRole: 7, |
|
144 |
+ CanonicalTimestampRole: 1, |
|
145 | 145 |
} |
146 | 146 |
|
147 | 147 |
// SetDefaultExpiryTimes allows one to change the default expiries. |
... | ... |
@@ -27,3 +27,17 @@ type ErrRoleThreshold struct{} |
27 | 27 |
func (e ErrRoleThreshold) Error() string { |
28 | 28 |
return "valid signatures did not meet threshold" |
29 | 29 |
} |
30 |
+ |
|
31 |
+type ErrInvalidKeyType struct{} |
|
32 |
+ |
|
33 |
+func (e ErrInvalidKeyType) Error() string { |
|
34 |
+ return "key type is not valid for signature" |
|
35 |
+} |
|
36 |
+ |
|
37 |
+type ErrInvalidKeyLength struct { |
|
38 |
+ msg string |
|
39 |
+} |
|
40 |
+ |
|
41 |
+func (e ErrInvalidKeyLength) Error() string { |
|
42 |
+ return fmt.Sprintf("key length is not supported: %s", e.msg) |
|
43 |
+} |
... | ... |
@@ -7,6 +7,7 @@ import ( |
7 | 7 |
"crypto/sha256" |
8 | 8 |
"crypto/x509" |
9 | 9 |
"encoding/pem" |
10 |
+ "fmt" |
|
10 | 11 |
"math/big" |
11 | 12 |
"reflect" |
12 | 13 |
|
... | ... |
@@ -15,6 +16,11 @@ import ( |
15 | 15 |
"github.com/endophage/gotuf/data" |
16 | 16 |
) |
17 | 17 |
|
18 |
+const ( |
|
19 |
+ minRSAKeySizeBit = 2048 // 2048 bits = 256 bytes |
|
20 |
+ minRSAKeySizeByte = minRSAKeySizeBit / 8 |
|
21 |
+) |
|
22 |
+ |
|
18 | 23 |
// Verifiers serves as a map of all verifiers available on the system and |
19 | 24 |
// can be injected into a verificationService. For testing and configuration |
20 | 25 |
// purposes, it will not be used by default. |
... | ... |
@@ -47,15 +53,27 @@ func RegisterVerifier(algorithm data.SigAlgorithm, v Verifier) { |
47 | 47 |
type Ed25519Verifier struct{} |
48 | 48 |
|
49 | 49 |
func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { |
50 |
+ if key.Algorithm() != data.ED25519Key { |
|
51 |
+ return ErrInvalidKeyType{} |
|
52 |
+ } |
|
50 | 53 |
var sigBytes [ed25519.SignatureSize]byte |
51 |
- if len(sig) != len(sigBytes) { |
|
54 |
+ if len(sig) != ed25519.SignatureSize { |
|
52 | 55 |
logrus.Infof("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig)) |
53 | 56 |
return ErrInvalid |
54 | 57 |
} |
55 | 58 |
copy(sigBytes[:], sig) |
56 | 59 |
|
57 | 60 |
var keyBytes [ed25519.PublicKeySize]byte |
58 |
- copy(keyBytes[:], key.Public()) |
|
61 |
+ pub := key.Public() |
|
62 |
+ if len(pub) != ed25519.PublicKeySize { |
|
63 |
+ logrus.Errorf("public key is incorrect size, must be %d, was %d.", ed25519.PublicKeySize, len(pub)) |
|
64 |
+ return ErrInvalidKeyLength{msg: fmt.Sprintf("ed25519 public key must be %d bytes.", ed25519.PublicKeySize)} |
|
65 |
+ } |
|
66 |
+ n := copy(keyBytes[:], key.Public()) |
|
67 |
+ if n < ed25519.PublicKeySize { |
|
68 |
+ logrus.Errorf("failed to copy the key, must have %d bytes, copied %d bytes.", ed25519.PublicKeySize, n) |
|
69 |
+ return ErrInvalid |
|
70 |
+ } |
|
59 | 71 |
|
60 | 72 |
if !ed25519.Verify(&keyBytes, msg, &sigBytes) { |
61 | 73 |
logrus.Infof("failed ed25519 verification") |
... | ... |
@@ -71,6 +89,16 @@ func verifyPSS(key interface{}, digest, sig []byte) error { |
71 | 71 |
return ErrInvalid |
72 | 72 |
} |
73 | 73 |
|
74 |
+ if rsaPub.N.BitLen() < minRSAKeySizeBit { |
|
75 |
+ logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) |
|
76 |
+ return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)} |
|
77 |
+ } |
|
78 |
+ |
|
79 |
+ if len(sig) < minRSAKeySizeByte { |
|
80 |
+ logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) |
|
81 |
+ return ErrInvalid |
|
82 |
+ } |
|
83 |
+ |
|
74 | 84 |
opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256} |
75 | 85 |
if err := rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil { |
76 | 86 |
logrus.Infof("failed RSAPSS verification: %s", err) |
... | ... |
@@ -104,8 +132,9 @@ func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) { |
104 | 104 |
return nil, ErrInvalid |
105 | 105 |
} |
106 | 106 |
default: |
107 |
+ // only accept RSA keys |
|
107 | 108 |
logrus.Infof("invalid key type for RSAPSS verifier: %s", algorithm) |
108 |
- return nil, ErrInvalid |
|
109 |
+ return nil, ErrInvalidKeyType{} |
|
109 | 110 |
} |
110 | 111 |
|
111 | 112 |
return pubKey, nil |
... | ... |
@@ -116,6 +145,7 @@ type RSAPSSVerifier struct{} |
116 | 116 |
|
117 | 117 |
// Verify does the actual check. |
118 | 118 |
func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { |
119 |
+ // will return err if keytype is not a recognized RSA type |
|
119 | 120 |
pubKey, err := getRSAPubKey(key) |
120 | 121 |
if err != nil { |
121 | 122 |
return err |
... | ... |
@@ -130,6 +160,7 @@ func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error |
130 | 130 |
type RSAPKCS1v15Verifier struct{} |
131 | 131 |
|
132 | 132 |
func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { |
133 |
+ // will return err if keytype is not a recognized RSA type |
|
133 | 134 |
pubKey, err := getRSAPubKey(key) |
134 | 135 |
if err != nil { |
135 | 136 |
return err |
... | ... |
@@ -142,6 +173,16 @@ func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) |
142 | 142 |
return ErrInvalid |
143 | 143 |
} |
144 | 144 |
|
145 |
+ if rsaPub.N.BitLen() < minRSAKeySizeBit { |
|
146 |
+ logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) |
|
147 |
+ return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)} |
|
148 |
+ } |
|
149 |
+ |
|
150 |
+ if len(sig) < minRSAKeySizeByte { |
|
151 |
+ logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) |
|
152 |
+ return ErrInvalid |
|
153 |
+ } |
|
154 |
+ |
|
145 | 155 |
if err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig); err != nil { |
146 | 156 |
logrus.Errorf("Failed verification: %s", err.Error()) |
147 | 157 |
return ErrInvalid |
... | ... |
@@ -157,6 +198,9 @@ type RSAPyCryptoVerifier struct{} |
157 | 157 |
// with PyCrypto. |
158 | 158 |
func (v RSAPyCryptoVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { |
159 | 159 |
digest := sha256.Sum256(msg) |
160 |
+ if key.Algorithm() != data.RSAKey { |
|
161 |
+ return ErrInvalidKeyType{} |
|
162 |
+ } |
|
160 | 163 |
|
161 | 164 |
k, _ := pem.Decode([]byte(key.Public())) |
162 | 165 |
if k == nil { |
... | ... |
@@ -203,8 +247,9 @@ func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error |
203 | 203 |
return ErrInvalid |
204 | 204 |
} |
205 | 205 |
default: |
206 |
+ // only accept ECDSA keys. |
|
206 | 207 |
logrus.Infof("invalid key type for ECDSA verifier: %s", algorithm) |
207 |
- return ErrInvalid |
|
208 |
+ return ErrInvalidKeyType{} |
|
208 | 209 |
} |
209 | 210 |
|
210 | 211 |
ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey) |
... | ... |
@@ -22,9 +22,9 @@ var ( |
22 | 22 |
) |
23 | 23 |
|
24 | 24 |
type signedMeta struct { |
25 |
- Type string `json:"_type"` |
|
26 |
- Expires string `json:"expires"` |
|
27 |
- Version int `json:"version"` |
|
25 |
+ Type string `json:"_type"` |
|
26 |
+ Expires time.Time `json:"expires"` |
|
27 |
+ Version int `json:"version"` |
|
28 | 28 |
} |
29 | 29 |
|
30 | 30 |
// VerifyRoot checks if a given root file is valid against a known set of keys. |
... | ... |
@@ -80,12 +80,12 @@ func verifyMeta(s *data.Signed, role string, minVersion int) error { |
80 | 80 |
if err := json.Unmarshal(s.Signed, sm); err != nil { |
81 | 81 |
return err |
82 | 82 |
} |
83 |
- if !data.ValidTUFType(sm.Type) { |
|
83 |
+ if !data.ValidTUFType(sm.Type, role) { |
|
84 | 84 |
return ErrWrongType |
85 | 85 |
} |
86 | 86 |
if IsExpired(sm.Expires) { |
87 | 87 |
logrus.Errorf("Metadata for %s expired", role) |
88 |
- return ErrExpired{Role: role, Expired: sm.Expires} |
|
88 |
+ return ErrExpired{Role: role, Expired: sm.Expires.Format("Mon Jan 2 15:04:05 MST 2006")} |
|
89 | 89 |
} |
90 | 90 |
if sm.Version < minVersion { |
91 | 91 |
return ErrLowVersion{sm.Version, minVersion} |
... | ... |
@@ -94,15 +94,8 @@ func verifyMeta(s *data.Signed, role string, minVersion int) error { |
94 | 94 |
return nil |
95 | 95 |
} |
96 | 96 |
|
97 |
-var IsExpired = func(t string) bool { |
|
98 |
- ts, err := time.Parse(time.RFC3339, t) |
|
99 |
- if err != nil { |
|
100 |
- ts, err = time.Parse("2006-01-02 15:04:05 MST", t) |
|
101 |
- if err != nil { |
|
102 |
- return false |
|
103 |
- } |
|
104 |
- } |
|
105 |
- return ts.Sub(time.Now()) <= 0 |
|
97 |
+var IsExpired = func(t time.Time) bool { |
|
98 |
+ return t.Before(time.Now()) |
|
106 | 99 |
} |
107 | 100 |
|
108 | 101 |
func VerifySignatures(s *data.Signed, role string, db *keys.KeyDB) error { |