Browse code

Bump notary version up to 0.3.0 and re-vendor.

Signed-off-by: cyli <cyli@twistedmatrix.com>

cyli authored on 2016/05/12 07:25:05
Showing 38 changed files
... ...
@@ -186,7 +186,7 @@ RUN set -x \
186 186
 	&& rm -rf "$GOPATH"
187 187
 
188 188
 # Install notary and notary-server
189
-ENV NOTARY_VERSION v0.3.0-RC1
189
+ENV NOTARY_VERSION v0.3.0
190 190
 RUN set -x \
191 191
 	&& export GO15VENDOREXPERIMENT=1 \
192 192
 	&& export GOPATH="$(mktemp -d)" \
... ...
@@ -117,7 +117,7 @@ RUN set -x \
117 117
 	&& rm -rf "$GOPATH"
118 118
 
119 119
 # Install notary and notary-server
120
-ENV NOTARY_VERSION v0.3.0-RC1
120
+ENV NOTARY_VERSION v0.3.0
121 121
 RUN set -x \
122 122
 	&& export GO15VENDOREXPERIMENT=1 \
123 123
 	&& export GOPATH="$(mktemp -d)" \
... ...
@@ -128,7 +128,7 @@ RUN set -x \
128 128
 	&& rm -rf "$GOPATH"
129 129
 
130 130
 # Install notary and notary-server
131
-ENV NOTARY_VERSION v0.3.0-RC1
131
+ENV NOTARY_VERSION v0.3.0
132 132
 RUN set -x \
133 133
 	&& export GO15VENDOREXPERIMENT=1 \
134 134
 	&& export GOPATH="$(mktemp -d)" \
... ...
@@ -141,7 +141,7 @@ RUN set -x \
141 141
 	&& rm -rf "$GOPATH"
142 142
 
143 143
 # Install notary and notary-server
144
-ENV NOTARY_VERSION v0.3.0-RC1
144
+ENV NOTARY_VERSION v0.3.0
145 145
 RUN set -x \
146 146
 	&& export GOPATH="$(mktemp -d)" \
147 147
 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
... ...
@@ -130,7 +130,7 @@ RUN set -x \
130 130
 	&& rm -rf "$GOPATH"
131 131
 
132 132
 # Install notary and notary-server
133
-ENV NOTARY_VERSION v0.3.0-RC1
133
+ENV NOTARY_VERSION v0.3.0
134 134
 RUN set -x \
135 135
 	&& export GO15VENDOREXPERIMENT=1 \
136 136
 	&& export GOPATH="$(mktemp -d)" \
... ...
@@ -86,7 +86,7 @@ the tagged image prior to the loss. Image consumers would get an error for
86 86
 content that they already downloaded:
87 87
 
88 88
 ```
89
-could not validate the path to a trusted root: failed to validate data with current trusted certificates
89
+Warning: potential malicious behavior - trust data has insufficient signatures for remote repository docker.io/my/image: valid signatures did not meet threshold
90 90
 ```
91 91
 
92 92
 To correct this, they need to download a new image tag with that is signed with
... ...
@@ -56,7 +56,7 @@ clone git github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
56 56
 clone git github.com/pborman/uuid v1.0
57 57
 
58 58
 # get desired notary commit, might also need to be updated in Dockerfile
59
-clone git github.com/docker/notary v0.3.0-RC1
59
+clone git github.com/docker/notary v0.3.0
60 60
 
61 61
 clone git google.golang.org/grpc a22b6611561e9f0a3e0919690dd2caf48f14c517 https://github.com/grpc/grpc-go.git
62 62
 clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
... ...
@@ -73,6 +73,9 @@ ${PREFIX}/bin/static/notary-server:
73 73
 
74 74
 ${PREFIX}/bin/static/notary-signer:
75 75
 	@echo "notary-signer: static builds not supported on OS X"
76
+
77
+${PREFIX}/bin/static/notary:
78
+	@echo "notary: static builds not supported on OS X"
76 79
 else
77 80
 ${PREFIX}/bin/static/notary-server: NOTARY_VERSION $(shell find . -type f -name '*.go')
78 81
 	@echo "+ $@"
... ...
@@ -81,6 +84,10 @@ ${PREFIX}/bin/static/notary-server: NOTARY_VERSION $(shell find . -type f -name
81 81
 ${PREFIX}/bin/static/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
82 82
 	@echo "+ $@"
83 83
 	@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary-signer
84
+
85
+${PREFIX}/bin/static/notary:
86
+	@echo "+ $@"
87
+	@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary
84 88
 endif
85 89
 
86 90
 vet:
... ...
@@ -179,7 +186,7 @@ client: ${PREFIX}/bin/notary
179 179
 binaries: ${PREFIX}/bin/notary-server ${PREFIX}/bin/notary ${PREFIX}/bin/notary-signer
180 180
 	@echo "+ $@"
181 181
 
182
-static: ${PREFIX}/bin/static/notary-server ${PREFIX}/bin/static/notary-signer
182
+static: ${PREFIX}/bin/static/notary-server ${PREFIX}/bin/static/notary-signer ${PREFIX}/bin/static/notary
183 183
 	@echo "+ $@"
184 184
 
185 185
 notary-dockerfile:
... ...
@@ -83,8 +83,17 @@ Prerequisites:
83 83
 - Go >= 1.6.1
84 84
 - [godep](https://github.com/tools/godep) installed
85 85
 - libtool development headers installed
86
-    - Ubuntu: `apt-get install libtool-dev`
86
+    - Ubuntu: `apt-get install libltdl-dev`
87 87
     - CentOS/RedHat: `yum install libtool-ltdl-devel`
88 88
     - Mac OS ([Homebrew](http://brew.sh/)): `brew install libtool`
89 89
 
90 90
 Run `make binaries`, which creates the Notary Client CLI binary at `bin/notary`.
91
+Note that `make binaries` assumes a standard Go directory structure, in which
92
+Notary is checked out to the `src` directory in your `GOPATH`. For example:
93
+```
94
+$GOPATH/
95
+    src/
96
+        github.com/
97
+            docker/
98
+                notary/
99
+```
... ...
@@ -2,7 +2,6 @@ package client
2 2
 
3 3
 import (
4 4
 	"bytes"
5
-	"crypto/x509"
6 5
 	"encoding/json"
7 6
 	"fmt"
8 7
 	"io/ioutil"
... ...
@@ -87,7 +86,6 @@ type NotaryRepository struct {
87 87
 	CryptoService signed.CryptoService
88 88
 	tufRepo       *tuf.Repo
89 89
 	roundTrip     http.RoundTripper
90
-	CertStore     trustmanager.X509Store
91 90
 	trustPinning  trustpinning.TrustPinConfig
92 91
 }
93 92
 
... ...
@@ -97,15 +95,6 @@ type NotaryRepository struct {
97 97
 func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
98 98
 	keyStores []trustmanager.KeyStore, trustPin trustpinning.TrustPinConfig) (*NotaryRepository, error) {
99 99
 
100
-	certPath := filepath.Join(baseDir, notary.TrustedCertsDir)
101
-	certStore, err := trustmanager.NewX509FilteredFileStore(
102
-		certPath,
103
-		trustmanager.FilterCertsExpiredSha1,
104
-	)
105
-	if err != nil {
106
-		return nil, err
107
-	}
108
-
109 100
 	cryptoService := cryptoservice.NewCryptoService(keyStores...)
110 101
 
111 102
 	nRepo := &NotaryRepository{
... ...
@@ -115,7 +104,6 @@ func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper,
115 115
 		tufRepoPath:   filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)),
116 116
 		CryptoService: cryptoService,
117 117
 		roundTrip:     rt,
118
-		CertStore:     certStore,
119 118
 		trustPinning:  trustPin,
120 119
 	}
121 120
 
... ...
@@ -162,22 +150,22 @@ func NewTarget(targetName string, targetPath string) (*Target, error) {
162 162
 	return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length}, nil
163 163
 }
164 164
 
165
-func rootCertKey(gun string, privKey data.PrivateKey) (*x509.Certificate, data.PublicKey, error) {
165
+func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) {
166 166
 	// Hard-coded policy: the generated certificate expires in 10 years.
167 167
 	startTime := time.Now()
168 168
 	cert, err := cryptoservice.GenerateCertificate(
169 169
 		privKey, gun, startTime, startTime.Add(notary.Year*10))
170 170
 	if err != nil {
171
-		return nil, nil, err
171
+		return nil, err
172 172
 	}
173 173
 
174 174
 	x509PublicKey := trustmanager.CertToKey(cert)
175 175
 	if x509PublicKey == nil {
176
-		return nil, nil, fmt.Errorf(
176
+		return nil, fmt.Errorf(
177 177
 			"cannot use regenerated certificate: format %s", cert.PublicKeyAlgorithm)
178 178
 	}
179 179
 
180
-	return cert, x509PublicKey, nil
180
+	return x509PublicKey, nil
181 181
 }
182 182
 
183 183
 // Initialize creates a new repository by using rootKey as the root Key for the
... ...
@@ -218,11 +206,10 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st
218 218
 		}
219 219
 	}
220 220
 
221
-	rootCert, rootKey, err := rootCertKey(r.gun, privKey)
221
+	rootKey, err := rootCertKey(r.gun, privKey)
222 222
 	if err != nil {
223 223
 		return err
224 224
 	}
225
-	r.CertStore.AddCert(rootCert)
226 225
 
227 226
 	var (
228 227
 		rootRole = data.NewBaseRole(
... ...
@@ -394,8 +381,7 @@ func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) erro
394 394
 // subtree and also the "targets/x" subtree, as we will defer parsing it until
395 395
 // we explicitly reach it in our iteration of the provided list of roles.
396 396
 func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) {
397
-	err := r.Update(false)
398
-	if err != nil {
397
+	if err := r.Update(false); err != nil {
399 398
 		return nil, err
400 399
 	}
401 400
 
... ...
@@ -432,12 +418,12 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro
432 432
 	return targetList, nil
433 433
 }
434 434
 
435
-// GetTargetByName returns a target given a name. If no roles are passed
435
+// GetTargetByName returns a target by the given name. If no roles are passed
436 436
 // it uses the targets role and does a search of the entire delegation
437 437
 // graph, finding the first entry in a breadth first search of the delegations.
438 438
 // If roles are passed, they should be passed in descending priority and
439 439
 // the target entry found in the subtree of the highest priority role
440
-// will be returned
440
+// will be returned.
441 441
 // See the IMPORTANT section on ListTargets above. Those roles also apply here.
442 442
 func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*TargetWithRole, error) {
443 443
 	if err := r.Update(false); err != nil {
... ...
@@ -656,50 +642,33 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error {
656 656
 // a not yet published repo or a possibly obsolete local copy) into
657 657
 // r.tufRepo.  This attempts to load metadata for all roles.  Since server
658 658
 // snapshots are supported, if the snapshot metadata fails to load, that's ok.
659
-// This can also be unified with some cache reading tools from tuf/client.
660 659
 // This assumes that bootstrapRepo is only used by Publish() or RotateKey()
661 660
 func (r *NotaryRepository) bootstrapRepo() error {
662
-	tufRepo := tuf.NewRepo(r.CryptoService)
661
+	b := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning)
663 662
 
664 663
 	logrus.Debugf("Loading trusted collection.")
665
-	rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, -1)
666
-	if err != nil {
667
-		return err
668
-	}
669
-	root := &data.SignedRoot{}
670
-	err = json.Unmarshal(rootJSON, root)
671
-	if err != nil {
672
-		return err
673
-	}
674
-	err = tufRepo.SetRoot(root)
675
-	if err != nil {
676
-		return err
677
-	}
678
-	targetsJSON, err := r.fileStore.GetMeta(data.CanonicalTargetsRole, -1)
679
-	if err != nil {
680
-		return err
681
-	}
682
-	targets := &data.SignedTargets{}
683
-	err = json.Unmarshal(targetsJSON, targets)
684
-	if err != nil {
685
-		return err
686
-	}
687
-	tufRepo.SetTargets(data.CanonicalTargetsRole, targets)
688 664
 
689
-	snapshotJSON, err := r.fileStore.GetMeta(data.CanonicalSnapshotRole, -1)
690
-	if err == nil {
691
-		snapshot := &data.SignedSnapshot{}
692
-		err = json.Unmarshal(snapshotJSON, snapshot)
665
+	for _, role := range data.BaseRoles {
666
+		jsonBytes, err := r.fileStore.GetMeta(role, store.NoSizeLimit)
693 667
 		if err != nil {
668
+			if _, ok := err.(store.ErrMetaNotFound); ok &&
669
+				// server snapshots are supported, and server timestamp management
670
+				// is required, so if either of these fail to load that's ok - especially
671
+				// if the repo is new
672
+				role == data.CanonicalSnapshotRole || role == data.CanonicalTimestampRole {
673
+				continue
674
+			}
675
+			return err
676
+		}
677
+		if err := b.Load(role, jsonBytes, 1, true); err != nil {
694 678
 			return err
695 679
 		}
696
-		tufRepo.SetSnapshot(snapshot)
697
-	} else if _, ok := err.(store.ErrMetaNotFound); !ok {
698
-		return err
699 680
 	}
700 681
 
701
-	r.tufRepo = tufRepo
702
-
682
+	tufRepo, err := b.Finish()
683
+	if err == nil {
684
+		r.tufRepo = tufRepo
685
+	}
703 686
 	return nil
704 687
 }
705 688
 
... ...
@@ -769,15 +738,17 @@ func (r *NotaryRepository) Update(forWrite bool) error {
769 769
 		}
770 770
 		return err
771 771
 	}
772
-	if err := c.Update(); err != nil {
772
+	repo, err := c.Update()
773
+	if err != nil {
773 774
 		// notFound.Resource may include a checksum so when the role is root,
774
-		// it will be root.json or root.<checksum>.json. Therefore best we can
775
+		// it will be root or root.<checksum>. Therefore best we can
775 776
 		// do it match a "root." prefix
776 777
 		if notFound, ok := err.(store.ErrMetaNotFound); ok && strings.HasPrefix(notFound.Resource, data.CanonicalRootRole+".") {
777 778
 			return r.errRepositoryNotExist()
778 779
 		}
779 780
 		return err
780 781
 	}
782
+	r.tufRepo = repo
781 783
 	return nil
782 784
 }
783 785
 
... ...
@@ -787,12 +758,9 @@ func (r *NotaryRepository) Update(forWrite bool) error {
787 787
 // is initialized or not. If set to true, we will always attempt to download
788 788
 // and return an error if the remote repository errors.
789 789
 //
790
-// Partially populates r.tufRepo with this root metadata (only; use
790
+// Populates a tuf.RepoBuilder with this root metadata (only use
791 791
 // tufclient.Client.Update to load the rest).
792 792
 //
793
-// As another side effect, r.CertManager's list of trusted certificates
794
-// is updated with data from the loaded root.json.
795
-//
796 793
 // Fails if the remote server is reachable and does not know the repo
797 794
 // (i.e. before the first r.Publish()), in which case the error is
798 795
 // store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
... ...
@@ -801,40 +769,55 @@ func (r *NotaryRepository) Update(forWrite bool) error {
801 801
 // Returns a tufclient.Client for the remote server, which may not be actually
802 802
 // operational (if the URL is invalid but a root.json is cached).
803 803
 func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
804
-	var (
805
-		rootJSON   []byte
806
-		err        error
807
-		signedRoot *data.SignedRoot
808
-	)
809
-	// try to read root from cache first. We will trust this root
810
-	// until we detect a problem during update which will cause
811
-	// us to download a new root and perform a rotation.
812
-	rootJSON, cachedRootErr := r.fileStore.GetMeta(data.CanonicalRootRole, -1)
804
+	minVersion := 1
805
+	// the old root on disk should not be validated against any trust pinning configuration
806
+	// because if we have an old root, it itself is the thing that pins trust
807
+	oldBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{})
808
+
809
+	// by default, we want to use the trust pinning configuration on any new root that we download
810
+	newBuilder := tuf.NewRepoBuilder(r.gun, r.CryptoService, r.trustPinning)
811
+
812
+	// Try to read root from cache first. We will trust this root until we detect a problem
813
+	// during update which will cause us to download a new root and perform a rotation.
814
+	// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
815
+	// preloaded with the old root or one which uses the old root for trust bootstrapping.
816
+	if rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, store.NoSizeLimit); err == nil {
817
+		// if we can't load the cached root, fail hard because that is how we pin trust
818
+		if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
819
+			return nil, err
820
+		}
813 821
 
814
-	if cachedRootErr == nil {
815
-		signedRoot, cachedRootErr = r.validateRoot(rootJSON)
822
+		// again, the root on disk is the source of trust pinning, so use an empty trust
823
+		// pinning configuration
824
+		newBuilder = tuf.NewRepoBuilder(r.gun, r.CryptoService, trustpinning.TrustPinConfig{})
825
+
826
+		if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
827
+			// Ok, the old root is expired - we want to download a new one.  But we want to use the
828
+			// old root to verify the new root, so bootstrap a new builder with the old builder
829
+			minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
830
+			newBuilder = oldBuilder.BootstrapNewBuilder()
831
+		}
816 832
 	}
817 833
 
818 834
 	remote, remoteErr := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
819 835
 	if remoteErr != nil {
820 836
 		logrus.Error(remoteErr)
821
-	} else if cachedRootErr != nil || checkInitialized {
822
-		// remoteErr was nil and we had a cachedRootErr (or are specifically
823
-		// checking for initialization of the repo).
837
+	} else if !newBuilder.IsLoaded(data.CanonicalRootRole) || checkInitialized {
838
+		// remoteErr was nil and we were not able to load a root from cache or
839
+		// are specifically checking for initialization of the repo.
824 840
 
825 841
 		// if remote store successfully set up, try and get root from remote
826 842
 		// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
827
-		tmpJSON, err := remote.GetMeta(data.CanonicalRootRole, -1)
843
+		tmpJSON, err := remote.GetMeta(data.CanonicalRootRole, store.NoSizeLimit)
828 844
 		if err != nil {
829 845
 			// we didn't have a root in cache and were unable to load one from
830 846
 			// the server. Nothing we can do but error.
831 847
 			return nil, err
832 848
 		}
833
-		if cachedRootErr != nil {
834
-			// we always want to use the downloaded root if there was a cache
835
-			// error.
836
-			signedRoot, err = r.validateRoot(tmpJSON)
837
-			if err != nil {
849
+
850
+		if !newBuilder.IsLoaded(data.CanonicalRootRole) {
851
+			// we always want to use the downloaded root if we couldn't load from cache
852
+			if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
838 853
 				return nil, err
839 854
 			}
840 855
 
... ...
@@ -846,44 +829,13 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl
846 846
 		}
847 847
 	}
848 848
 
849
-	r.tufRepo = tuf.NewRepo(r.CryptoService)
850
-
851
-	if signedRoot == nil {
849
+	// We can only get here if remoteErr != nil (hence we don't download any new root),
850
+	// and there was no root on disk
851
+	if !newBuilder.IsLoaded(data.CanonicalRootRole) {
852 852
 		return nil, ErrRepoNotInitialized{}
853 853
 	}
854 854
 
855
-	err = r.tufRepo.SetRoot(signedRoot)
856
-	if err != nil {
857
-		return nil, err
858
-	}
859
-
860
-	return tufclient.NewClient(
861
-		r.tufRepo,
862
-		remote,
863
-		r.fileStore,
864
-	), nil
865
-}
866
-
867
-// validateRoot MUST only be used during bootstrapping. It will only validate
868
-// signatures of the root based on known keys, not expiry or other metadata.
869
-// This is so that an out of date root can be loaded to be used in a rotation
870
-// should the TUF update process detect a problem.
871
-func (r *NotaryRepository) validateRoot(rootJSON []byte) (*data.SignedRoot, error) {
872
-	// can't just unmarshal into SignedRoot because validate root
873
-	// needs the root.Signed field to still be []byte for signature
874
-	// validation
875
-	root := &data.Signed{}
876
-	err := json.Unmarshal(rootJSON, root)
877
-	if err != nil {
878
-		return nil, err
879
-	}
880
-
881
-	err = trustpinning.ValidateRoot(r.CertStore, root, r.gun, r.trustPinning)
882
-	if err != nil {
883
-		return nil, err
884
-	}
885
-
886
-	return data.RootFromSigned(root)
855
+	return tufclient.NewClient(oldBuilder, newBuilder, remote, r.fileStore), nil
887 856
 }
888 857
 
889 858
 // RotateKey removes all existing keys associated with the role, and either
... ...
@@ -929,7 +881,7 @@ func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
929 929
 		if err != nil {
930 930
 			return err
931 931
 		}
932
-		_, pubKey, err = rootCertKey(r.gun, privKey)
932
+		pubKey, err = rootCertKey(r.gun, privKey)
933 933
 		if err != nil {
934 934
 			return err
935 935
 		}
... ...
@@ -964,26 +916,12 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act
964 964
 	return cl.Add(c)
965 965
 }
966 966
 
967
-// DeleteTrustData removes the trust data stored for this repo in the TUF cache and certificate store on the client side
967
+// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
968 968
 func (r *NotaryRepository) DeleteTrustData() error {
969 969
 	// Clear TUF files and cache
970 970
 	if err := r.fileStore.RemoveAll(); err != nil {
971 971
 		return fmt.Errorf("error clearing TUF repo data: %v", err)
972 972
 	}
973 973
 	r.tufRepo = tuf.NewRepo(nil)
974
-	// Clear certificates
975
-	certificates, err := r.CertStore.GetCertificatesByCN(r.gun)
976
-	if err != nil {
977
-		// If there were no certificates to delete, we're done
978
-		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); ok {
979
-			return nil
980
-		}
981
-		return fmt.Errorf("error retrieving certificates for %s: %v", r.gun, err)
982
-	}
983
-	for _, cert := range certificates {
984
-		if err := r.CertStore.RemoveCert(cert); err != nil {
985
-			return fmt.Errorf("error removing certificate: %v: %v", cert, err)
986
-		}
987
-	}
988 974
 	return nil
989 975
 }
... ...
@@ -13,7 +13,8 @@ import (
13 13
 
14 14
 // NewNotaryRepository is a helper method that returns a new notary repository.
15 15
 // It takes the base directory under where all the trust files will be stored
16
-// (usually ~/.docker/trust/).
16
+// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling
17
+// docker content trust).
17 18
 func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper,
18 19
 	retriever passphrase.Retriever, trustPinning trustpinning.TrustPinConfig) (
19 20
 	*NotaryRepository, error) {
20 21
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+codecov:
1
+  notify:
2
+    # 2 builds on circleci, 1 jenkins build
3
+    after_n_builds: 3
4
+coverage:
5
+  status:
6
+    # project will give us the diff in the total code coverage between a commit
7
+    # and its parent
8
+    project:
9
+      default:
10
+        target: auto
11
+    # patch would give us the code coverage of the diff only
12
+    patch: false
13
+    # changes tells us if there are unexpected code coverage changes in other files
14
+    # which were not changed by the diff
15
+    changes: false
16
+comment: off
17
+
... ...
@@ -39,7 +39,7 @@ services:
39 39
       depends_on:
40 40
         - rdb-proxy
41 41
     rdb-01:
42
-      image: jlhawn/rethinkdb-tls
42
+      image: jlhawn/rethinkdb:2.3.0
43 43
       volumes:
44 44
         - ./fixtures/rethinkdb:/tls
45 45
         - rdb-01-data:/var/data
... ...
@@ -49,9 +49,9 @@ services:
49 49
             - rdb
50 50
             - rdb.rdb
51 51
             - rdb-01.rdb
52
-      command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
52
+      command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
53 53
     rdb-02:
54
-      image: jlhawn/rethinkdb-tls
54
+      image: jlhawn/rethinkdb:2.3.0
55 55
       volumes:
56 56
         - ./fixtures/rethinkdb:/tls
57 57
         - rdb-02-data:/var/data
... ...
@@ -61,9 +61,9 @@ services:
61 61
             - rdb
62 62
             - rdb.rdb
63 63
             - rdb-02.rdb
64
-      command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
64
+      command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
65 65
     rdb-03:
66
-      image: jlhawn/rethinkdb-tls
66
+      image: jlhawn/rethinkdb:2.3.0
67 67
       volumes:
68 68
         - ./fixtures/rethinkdb:/tls
69 69
         - rdb-03-data:/var/data
... ...
@@ -73,9 +73,9 @@ services:
73 73
             - rdb
74 74
             - rdb.rdb
75 75
             - rdb-03.rdb
76
-      command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
76
+      command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
77 77
     rdb-proxy:
78
-      image: jlhawn/rethinkdb-tls
78
+      image: jlhawn/rethinkdb:2.3.0
79 79
       ports:
80 80
         - "8080:8080"
81 81
       volumes:
... ...
@@ -85,7 +85,7 @@ services:
85 85
           aliases:
86 86
             - rdb-proxy
87 87
             - rdb-proxy.rdp
88
-      command: "proxy --bind all --join rdb.rdb --web-tls --web-tls-key /tls/key.pem --web-tls-cert /tls/cert.pem --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
88
+      command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
89 89
       depends_on:
90 90
         - rdb-01
91 91
         - rdb-02
... ...
@@ -39,7 +39,7 @@ services:
39 39
       depends_on:
40 40
         - rdb-proxy
41 41
     rdb-01:
42
-      image: jlhawn/rethinkdb-tls
42
+      image: jlhawn/rethinkdb:2.3.0
43 43
       volumes:
44 44
         - ./fixtures/rethinkdb:/tls
45 45
         - rdb-01-data:/var/data
... ...
@@ -49,9 +49,9 @@ services:
49 49
             - rdb
50 50
             - rdb.rdb
51 51
             - rdb-01.rdb
52
-      command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
52
+      command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
53 53
     rdb-02:
54
-      image: jlhawn/rethinkdb-tls
54
+      image: jlhawn/rethinkdb:2.3.0
55 55
       volumes:
56 56
         - ./fixtures/rethinkdb:/tls
57 57
         - rdb-02-data:/var/data
... ...
@@ -61,9 +61,9 @@ services:
61 61
             - rdb
62 62
             - rdb.rdb
63 63
             - rdb-02.rdb
64
-      command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
64
+      command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
65 65
     rdb-03:
66
-      image: jlhawn/rethinkdb-tls
66
+      image: jlhawn/rethinkdb:2.3.0
67 67
       volumes:
68 68
         - ./fixtures/rethinkdb:/tls
69 69
         - rdb-03-data:/var/data
... ...
@@ -73,9 +73,9 @@ services:
73 73
             - rdb
74 74
             - rdb.rdb
75 75
             - rdb-03.rdb
76
-      command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
76
+      command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
77 77
     rdb-proxy:
78
-      image: jlhawn/rethinkdb-tls
78
+      image: jlhawn/rethinkdb:2.3.0
79 79
       ports:
80 80
         - "8080:8080"
81 81
       volumes:
... ...
@@ -85,7 +85,7 @@ services:
85 85
           aliases:
86 86
             - rdb-proxy
87 87
             - rdb-proxy.rdp
88
-      command: "proxy --bind all --join rdb.rdb --web-tls --web-tls-key /tls/key.pem --web-tls-cert /tls/cert.pem --driver-tls --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
88
+      command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
89 89
       depends_on:
90 90
         - rdb-01
91 91
         - rdb-02
92 92
deleted file mode 100644
... ...
@@ -1,272 +0,0 @@
1
-package trustmanager
2
-
3
-import (
4
-	"crypto/x509"
5
-	"errors"
6
-	"os"
7
-	"path"
8
-
9
-	"github.com/Sirupsen/logrus"
10
-)
11
-
12
-// X509FileStore implements X509Store that persists on disk
13
-type X509FileStore struct {
14
-	validate       Validator
15
-	fileMap        map[CertID]string
16
-	fingerprintMap map[CertID]*x509.Certificate
17
-	nameMap        map[string][]CertID
18
-	fileStore      Storage
19
-}
20
-
21
-// NewX509FileStore returns a new X509FileStore.
22
-func NewX509FileStore(directory string) (*X509FileStore, error) {
23
-	validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true })
24
-	return newX509FileStore(directory, validate)
25
-}
26
-
27
-// NewX509FilteredFileStore returns a new X509FileStore that validates certificates
28
-// that are added.
29
-func NewX509FilteredFileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) {
30
-	return newX509FileStore(directory, validate)
31
-}
32
-
33
-func newX509FileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) {
34
-	fileStore, err := NewSimpleFileStore(directory, certExtension)
35
-	if err != nil {
36
-		return nil, err
37
-	}
38
-
39
-	s := &X509FileStore{
40
-		validate:       ValidatorFunc(validate),
41
-		fileMap:        make(map[CertID]string),
42
-		fingerprintMap: make(map[CertID]*x509.Certificate),
43
-		nameMap:        make(map[string][]CertID),
44
-		fileStore:      fileStore,
45
-	}
46
-
47
-	err = loadCertsFromDir(s)
48
-	if err != nil {
49
-		return nil, err
50
-	}
51
-
52
-	return s, nil
53
-}
54
-
55
-// AddCert creates a filename for a given cert and adds a certificate with that name
56
-func (s *X509FileStore) AddCert(cert *x509.Certificate) error {
57
-	if cert == nil {
58
-		return errors.New("adding nil Certificate to X509Store")
59
-	}
60
-
61
-	// Check if this certificate meets our validation criteria
62
-	if !s.validate.Validate(cert) {
63
-		return &ErrCertValidation{}
64
-	}
65
-	// Attempt to write the certificate to the file
66
-	if err := s.addNamedCert(cert); err != nil {
67
-		return err
68
-	}
69
-
70
-	return nil
71
-}
72
-
73
-// addNamedCert allows adding a certificate while controlling the filename it gets
74
-// stored under. If the file does not exist on disk, saves it.
75
-func (s *X509FileStore) addNamedCert(cert *x509.Certificate) error {
76
-	fileName, certID, err := fileName(cert)
77
-	if err != nil {
78
-		return err
79
-	}
80
-
81
-	logrus.Debug("Adding cert with certID: ", certID)
82
-	// Validate if we already added this certificate before
83
-	if _, ok := s.fingerprintMap[certID]; ok {
84
-		return &ErrCertExists{}
85
-	}
86
-
87
-	// Convert certificate to PEM
88
-	certBytes := CertToPEM(cert)
89
-
90
-	// Save the file to disk if not already there.
91
-	if _, err = s.fileStore.Get(fileName); os.IsNotExist(err) {
92
-		if err := s.fileStore.Add(fileName, certBytes); err != nil {
93
-			return err
94
-		}
95
-	} else if err != nil {
96
-		return err
97
-	}
98
-
99
-	// We wrote the certificate succcessfully, add it to our in-memory storage
100
-	s.fingerprintMap[certID] = cert
101
-	s.fileMap[certID] = fileName
102
-
103
-	name := string(cert.Subject.CommonName)
104
-	s.nameMap[name] = append(s.nameMap[name], certID)
105
-
106
-	return nil
107
-}
108
-
109
-// RemoveCert removes a certificate from a X509FileStore.
110
-func (s *X509FileStore) RemoveCert(cert *x509.Certificate) error {
111
-	if cert == nil {
112
-		return errors.New("removing nil Certificate from X509Store")
113
-	}
114
-
115
-	certID, err := fingerprintCert(cert)
116
-	if err != nil {
117
-		return err
118
-	}
119
-	delete(s.fingerprintMap, certID)
120
-	filename := s.fileMap[certID]
121
-	delete(s.fileMap, certID)
122
-
123
-	name := string(cert.Subject.CommonName)
124
-
125
-	// Filter the fingerprint out of this name entry
126
-	fpList := s.nameMap[name]
127
-	newfpList := fpList[:0]
128
-	for _, x := range fpList {
129
-		if x != certID {
130
-			newfpList = append(newfpList, x)
131
-		}
132
-	}
133
-
134
-	s.nameMap[name] = newfpList
135
-
136
-	if err := s.fileStore.Remove(filename); err != nil {
137
-		return err
138
-	}
139
-
140
-	return nil
141
-}
142
-
143
-// RemoveAll removes all the certificates from the store
144
-func (s *X509FileStore) RemoveAll() error {
145
-	for _, filename := range s.fileMap {
146
-		if err := s.fileStore.Remove(filename); err != nil {
147
-			return err
148
-		}
149
-	}
150
-	s.fileMap = make(map[CertID]string)
151
-	s.fingerprintMap = make(map[CertID]*x509.Certificate)
152
-	s.nameMap = make(map[string][]CertID)
153
-
154
-	return nil
155
-}
156
-
157
-// AddCertFromPEM adds the first certificate that it finds in the byte[], returning
158
-// an error if no Certificates are found
159
-func (s X509FileStore) AddCertFromPEM(pemBytes []byte) error {
160
-	cert, err := LoadCertFromPEM(pemBytes)
161
-	if err != nil {
162
-		return err
163
-	}
164
-	return s.AddCert(cert)
165
-}
166
-
167
-// AddCertFromFile tries to adds a X509 certificate to the store given a filename
168
-func (s *X509FileStore) AddCertFromFile(filename string) error {
169
-	cert, err := LoadCertFromFile(filename)
170
-	if err != nil {
171
-		return err
172
-	}
173
-
174
-	return s.AddCert(cert)
175
-}
176
-
177
-// GetCertificates returns an array with all of the current X509 Certificates.
178
-func (s *X509FileStore) GetCertificates() []*x509.Certificate {
179
-	certs := make([]*x509.Certificate, len(s.fingerprintMap))
180
-	i := 0
181
-	for _, v := range s.fingerprintMap {
182
-		certs[i] = v
183
-		i++
184
-	}
185
-	return certs
186
-}
187
-
188
-// GetCertificatePool returns an x509 CertPool loaded with all the certificates
189
-// in the store.
190
-func (s *X509FileStore) GetCertificatePool() *x509.CertPool {
191
-	pool := x509.NewCertPool()
192
-
193
-	for _, v := range s.fingerprintMap {
194
-		pool.AddCert(v)
195
-	}
196
-	return pool
197
-}
198
-
199
-// GetCertificateByCertID returns the certificate that matches a certain certID
200
-func (s *X509FileStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) {
201
-	return s.getCertificateByCertID(CertID(certID))
202
-}
203
-
204
-// getCertificateByCertID returns the certificate that matches a certain certID
205
-func (s *X509FileStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) {
206
-	// If it does not look like a hex encoded sha256 hash, error
207
-	if len(certID) != 64 {
208
-		return nil, errors.New("invalid Subject Key Identifier")
209
-	}
210
-
211
-	// Check to see if this subject key identifier exists
212
-	if cert, ok := s.fingerprintMap[CertID(certID)]; ok {
213
-		return cert, nil
214
-
215
-	}
216
-	return nil, &ErrNoCertificatesFound{query: string(certID)}
217
-}
218
-
219
-// GetCertificatesByCN returns all the certificates that match a specific
220
-// CommonName
221
-func (s *X509FileStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) {
222
-	var certs []*x509.Certificate
223
-	if ids, ok := s.nameMap[cn]; ok {
224
-		for _, v := range ids {
225
-			cert, err := s.getCertificateByCertID(v)
226
-			if err != nil {
227
-				// This error should never happen. This would mean that we have
228
-				// an inconsistent X509FileStore
229
-				return nil, &ErrBadCertificateStore{}
230
-			}
231
-			certs = append(certs, cert)
232
-		}
233
-	}
234
-	if len(certs) == 0 {
235
-		return nil, &ErrNoCertificatesFound{query: cn}
236
-	}
237
-
238
-	return certs, nil
239
-}
240
-
241
-// GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore
242
-// as part of the roots list. This never allows the use of system roots, returning
243
-// an error if there are no root CAs.
244
-func (s *X509FileStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) {
245
-	// If we have no Certificates loaded return error (we don't want to revert to using
246
-	// system CAs).
247
-	if len(s.fingerprintMap) == 0 {
248
-		return x509.VerifyOptions{}, errors.New("no root CAs available")
249
-	}
250
-
251
-	opts := x509.VerifyOptions{
252
-		DNSName: dnsName,
253
-		Roots:   s.GetCertificatePool(),
254
-	}
255
-
256
-	return opts, nil
257
-}
258
-
259
-// Empty returns true if there are no certificates in the X509FileStore, false
260
-// otherwise.
261
-func (s *X509FileStore) Empty() bool {
262
-	return len(s.fingerprintMap) == 0
263
-}
264
-
265
-func fileName(cert *x509.Certificate) (string, CertID, error) {
266
-	certID, err := fingerprintCert(cert)
267
-	if err != nil {
268
-		return "", "", err
269
-	}
270
-
271
-	return path.Join(cert.Subject.CommonName, string(certID)), certID, nil
272
-}
273 1
deleted file mode 100644
... ...
@@ -1,203 +0,0 @@
1
-package trustmanager
2
-
3
-import (
4
-	"crypto/x509"
5
-	"errors"
6
-
7
-	"github.com/Sirupsen/logrus"
8
-)
9
-
10
-// X509MemStore implements X509Store as an in-memory object with no persistence
11
-type X509MemStore struct {
12
-	validate       Validator
13
-	fingerprintMap map[CertID]*x509.Certificate
14
-	nameMap        map[string][]CertID
15
-}
16
-
17
-// NewX509MemStore returns a new X509MemStore.
18
-func NewX509MemStore() *X509MemStore {
19
-	validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true })
20
-
21
-	return &X509MemStore{
22
-		validate:       validate,
23
-		fingerprintMap: make(map[CertID]*x509.Certificate),
24
-		nameMap:        make(map[string][]CertID),
25
-	}
26
-}
27
-
28
-// NewX509FilteredMemStore returns a new X509Memstore that validates certificates
29
-// that are added.
30
-func NewX509FilteredMemStore(validate func(*x509.Certificate) bool) *X509MemStore {
31
-	s := &X509MemStore{
32
-
33
-		validate:       ValidatorFunc(validate),
34
-		fingerprintMap: make(map[CertID]*x509.Certificate),
35
-		nameMap:        make(map[string][]CertID),
36
-	}
37
-
38
-	return s
39
-}
40
-
41
-// AddCert adds a certificate to the store
42
-func (s *X509MemStore) AddCert(cert *x509.Certificate) error {
43
-	if cert == nil {
44
-		return errors.New("adding nil Certificate to X509Store")
45
-	}
46
-
47
-	if !s.validate.Validate(cert) {
48
-		return &ErrCertValidation{}
49
-	}
50
-
51
-	certID, err := fingerprintCert(cert)
52
-	if err != nil {
53
-		return err
54
-	}
55
-
56
-	logrus.Debug("Adding cert with certID: ", certID)
57
-
58
-	// In this store we overwrite the certificate if it already exists
59
-	s.fingerprintMap[certID] = cert
60
-	name := string(cert.RawSubject)
61
-	s.nameMap[name] = append(s.nameMap[name], certID)
62
-
63
-	return nil
64
-}
65
-
66
-// RemoveCert removes a certificate from a X509MemStore.
67
-func (s *X509MemStore) RemoveCert(cert *x509.Certificate) error {
68
-	if cert == nil {
69
-		return errors.New("removing nil Certificate to X509Store")
70
-	}
71
-
72
-	certID, err := fingerprintCert(cert)
73
-	if err != nil {
74
-		return err
75
-	}
76
-	delete(s.fingerprintMap, certID)
77
-	name := string(cert.RawSubject)
78
-
79
-	// Filter the fingerprint out of this name entry
80
-	fpList := s.nameMap[name]
81
-	newfpList := fpList[:0]
82
-	for _, x := range fpList {
83
-		if x != certID {
84
-			newfpList = append(newfpList, x)
85
-		}
86
-	}
87
-
88
-	s.nameMap[name] = newfpList
89
-	return nil
90
-}
91
-
92
-// RemoveAll removes all the certificates from the store
93
-func (s *X509MemStore) RemoveAll() error {
94
-
95
-	for _, cert := range s.fingerprintMap {
96
-		if err := s.RemoveCert(cert); err != nil {
97
-			return err
98
-		}
99
-	}
100
-
101
-	return nil
102
-}
103
-
104
-// AddCertFromPEM adds a certificate to the store from a PEM blob
105
-func (s *X509MemStore) AddCertFromPEM(pemBytes []byte) error {
106
-	cert, err := LoadCertFromPEM(pemBytes)
107
-	if err != nil {
108
-		return err
109
-	}
110
-	return s.AddCert(cert)
111
-}
112
-
113
-// AddCertFromFile tries to adds a X509 certificate to the store given a filename
114
-func (s *X509MemStore) AddCertFromFile(originFilname string) error {
115
-	cert, err := LoadCertFromFile(originFilname)
116
-	if err != nil {
117
-		return err
118
-	}
119
-
120
-	return s.AddCert(cert)
121
-}
122
-
123
-// GetCertificates returns an array with all of the current X509 Certificates.
124
-func (s *X509MemStore) GetCertificates() []*x509.Certificate {
125
-	certs := make([]*x509.Certificate, len(s.fingerprintMap))
126
-	i := 0
127
-	for _, v := range s.fingerprintMap {
128
-		certs[i] = v
129
-		i++
130
-	}
131
-	return certs
132
-}
133
-
134
-// GetCertificatePool returns an x509 CertPool loaded with all the certificates
135
-// in the store.
136
-func (s *X509MemStore) GetCertificatePool() *x509.CertPool {
137
-	pool := x509.NewCertPool()
138
-
139
-	for _, v := range s.fingerprintMap {
140
-		pool.AddCert(v)
141
-	}
142
-	return pool
143
-}
144
-
145
-// GetCertificateByCertID returns the certificate that matches a certain certID
146
-func (s *X509MemStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) {
147
-	return s.getCertificateByCertID(CertID(certID))
148
-}
149
-
150
-// getCertificateByCertID returns the certificate that matches a certain certID or error
151
-func (s *X509MemStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) {
152
-	// If it does not look like a hex encoded sha256 hash, error
153
-	if len(certID) != 64 {
154
-		return nil, errors.New("invalid Subject Key Identifier")
155
-	}
156
-
157
-	// Check to see if this subject key identifier exists
158
-	if cert, ok := s.fingerprintMap[CertID(certID)]; ok {
159
-		return cert, nil
160
-
161
-	}
162
-	return nil, &ErrNoCertificatesFound{query: string(certID)}
163
-}
164
-
165
-// GetCertificatesByCN returns all the certificates that match a specific
166
-// CommonName
167
-func (s *X509MemStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) {
168
-	var certs []*x509.Certificate
169
-	if ids, ok := s.nameMap[cn]; ok {
170
-		for _, v := range ids {
171
-			cert, err := s.getCertificateByCertID(v)
172
-			if err != nil {
173
-				// This error should never happen. This would mean that we have
174
-				// an inconsistent X509MemStore
175
-				return nil, err
176
-			}
177
-			certs = append(certs, cert)
178
-		}
179
-	}
180
-	if len(certs) == 0 {
181
-		return nil, &ErrNoCertificatesFound{query: cn}
182
-	}
183
-
184
-	return certs, nil
185
-}
186
-
187
-// GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore
188
-// as part of the roots list. This never allows the use of system roots, returning
189
-// an error if there are no root CAs.
190
-func (s *X509MemStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) {
191
-	// If we have no Certificates loaded return error (we don't want to revert to using
192
-	// system CAs).
193
-	if len(s.fingerprintMap) == 0 {
194
-		return x509.VerifyOptions{}, errors.New("no root CAs available")
195
-	}
196
-
197
-	opts := x509.VerifyOptions{
198
-		DNSName: dnsName,
199
-		Roots:   s.GetCertificatePool(),
200
-	}
201
-
202
-	return opts, nil
203
-}
204 1
deleted file mode 100644
... ...
@@ -1,144 +0,0 @@
1
-package trustmanager
2
-
3
-import (
4
-	"crypto/x509"
5
-	"errors"
6
-	"fmt"
7
-)
8
-
9
-const certExtension string = "crt"
10
-
11
-// ErrNoCertificatesFound is returned when no certificates are found for a
12
-// GetCertificatesBy*
13
-type ErrNoCertificatesFound struct {
14
-	query string
15
-}
16
-
17
-// ErrNoCertificatesFound is returned when no certificates are found for a
18
-// GetCertificatesBy*
19
-func (err ErrNoCertificatesFound) Error() string {
20
-	return fmt.Sprintf("error, no certificates found in the keystore match: %s", err.query)
21
-}
22
-
23
-// ErrCertValidation is returned when a certificate doesn't pass the store specific
24
-// validations
25
-type ErrCertValidation struct {
26
-}
27
-
28
-// ErrCertValidation is returned when a certificate doesn't pass the store specific
29
-// validations
30
-func (err ErrCertValidation) Error() string {
31
-	return fmt.Sprintf("store-specific certificate validations failed")
32
-}
33
-
34
-// ErrCertExists is returned when a Certificate already exists in the key store
35
-type ErrCertExists struct {
36
-}
37
-
38
-// ErrCertExists is returned when a Certificate already exists in the key store
39
-func (err ErrCertExists) Error() string {
40
-	return fmt.Sprintf("certificate already in the store")
41
-}
42
-
43
-// ErrBadCertificateStore is returned when there is an internal inconsistency
44
-// in our x509 store
45
-type ErrBadCertificateStore struct {
46
-}
47
-
48
-// ErrBadCertificateStore is returned when there is an internal inconsistency
49
-// in our x509 store
50
-func (err ErrBadCertificateStore) Error() string {
51
-	return fmt.Sprintf("inconsistent certificate store")
52
-}
53
-
54
-// X509Store is the interface for all X509Stores
55
-type X509Store interface {
56
-	AddCert(cert *x509.Certificate) error
57
-	AddCertFromPEM(pemCerts []byte) error
58
-	AddCertFromFile(filename string) error
59
-	RemoveCert(cert *x509.Certificate) error
60
-	RemoveAll() error
61
-	GetCertificateByCertID(certID string) (*x509.Certificate, error)
62
-	GetCertificatesByCN(cn string) ([]*x509.Certificate, error)
63
-	GetCertificates() []*x509.Certificate
64
-	GetCertificatePool() *x509.CertPool
65
-	GetVerifyOptions(dnsName string) (x509.VerifyOptions, error)
66
-}
67
-
68
-// CertID represent the ID used to identify certificates
69
-type CertID string
70
-
71
-// Validator is a convenience type to create validating function that filters
72
-// certificates that get added to the store
73
-type Validator interface {
74
-	Validate(cert *x509.Certificate) bool
75
-}
76
-
77
-// ValidatorFunc is a convenience type to create functions that implement
78
-// the Validator interface
79
-type ValidatorFunc func(cert *x509.Certificate) bool
80
-
81
-// Validate implements the Validator interface to allow for any func() bool method
82
-// to be passed as a Validator
83
-func (vf ValidatorFunc) Validate(cert *x509.Certificate) bool {
84
-	return vf(cert)
85
-}
86
-
87
-// Verify operates on an X509Store and validates the existence of a chain of trust
88
-// between a leafCertificate and a CA present inside of the X509 Store.
89
-// It requires at least two certificates in certList, a leaf Certificate and an
90
-// intermediate CA certificate.
91
-func Verify(s X509Store, dnsName string, certList []*x509.Certificate) error {
92
-	// If we have no Certificates loaded return error (we don't want to revert to using
93
-	// system CAs).
94
-	if len(s.GetCertificates()) == 0 {
95
-		return errors.New("no root CAs available")
96
-	}
97
-
98
-	// At a minimum we should be provided a leaf cert and an intermediate.
99
-	if len(certList) < 2 {
100
-		return errors.New("certificate and at least one intermediate needed")
101
-	}
102
-
103
-	// Get the VerifyOptions from the keystore for a base dnsName
104
-	opts, err := s.GetVerifyOptions(dnsName)
105
-	if err != nil {
106
-		return err
107
-	}
108
-
109
-	// Create a Certificate Pool for our intermediate certificates
110
-	intPool := x509.NewCertPool()
111
-	var leafCert *x509.Certificate
112
-
113
-	// Iterate through all the certificates
114
-	for _, c := range certList {
115
-		// If the cert is a CA, we add it to the intermediates pool. If not, we call
116
-		// it the leaf cert
117
-		if c.IsCA {
118
-			intPool.AddCert(c)
119
-			continue
120
-		}
121
-		// Certificate is not a CA, it must be our leaf certificate.
122
-		// If we already found one, bail with error
123
-		if leafCert != nil {
124
-			return errors.New("more than one leaf certificate found")
125
-		}
126
-		leafCert = c
127
-	}
128
-
129
-	// We exited the loop with no leaf certificates
130
-	if leafCert == nil {
131
-		return errors.New("no leaf certificates found")
132
-	}
133
-
134
-	// We have one leaf certificate and at least one intermediate. Lets add this
135
-	// Cert Pool as the Intermediates list on our VerifyOptions
136
-	opts.Intermediates = intPool
137
-
138
-	// Finally, let's call Verify on our leafCert with our fully configured options
139
-	chains, err := leafCert.Verify(opts)
140
-	if len(chains) == 0 || err != nil {
141
-		return fmt.Errorf("certificate verification failed: %v", err)
142
-	}
143
-	return nil
144
-}
... ...
@@ -14,8 +14,6 @@ import (
14 14
 	"io"
15 15
 	"io/ioutil"
16 16
 	"math/big"
17
-	"net/http"
18
-	"net/url"
19 17
 	"time"
20 18
 
21 19
 	"github.com/Sirupsen/logrus"
... ...
@@ -24,40 +22,6 @@ import (
24 24
 	"github.com/docker/notary/tuf/data"
25 25
 )
26 26
 
27
-// GetCertFromURL tries to get a X509 certificate given a HTTPS URL
28
-func GetCertFromURL(urlStr string) (*x509.Certificate, error) {
29
-	url, err := url.Parse(urlStr)
30
-	if err != nil {
31
-		return nil, err
32
-	}
33
-
34
-	// Check if we are adding via HTTPS
35
-	if url.Scheme != "https" {
36
-		return nil, errors.New("only HTTPS URLs allowed")
37
-	}
38
-
39
-	// Download the certificate and write to directory
40
-	resp, err := http.Get(url.String())
41
-	if err != nil {
42
-		return nil, err
43
-	}
44
-
45
-	// Copy the content to certBytes
46
-	defer resp.Body.Close()
47
-	certBytes, err := ioutil.ReadAll(resp.Body)
48
-	if err != nil {
49
-		return nil, err
50
-	}
51
-
52
-	// Try to extract the first valid PEM certificate from the bytes
53
-	cert, err := LoadCertFromPEM(certBytes)
54
-	if err != nil {
55
-		return nil, err
56
-	}
57
-
58
-	return cert, nil
59
-}
60
-
61 27
 // CertToPEM is a utility function returns a PEM encoded x509 Certificate
62 28
 func CertToPEM(cert *x509.Certificate) []byte {
63 29
 	pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
... ...
@@ -100,60 +64,6 @@ func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
100 100
 	return nil, errors.New("no certificates found in PEM data")
101 101
 }
102 102
 
103
-// FingerprintCert returns a TUF compliant fingerprint for a X509 Certificate
104
-func FingerprintCert(cert *x509.Certificate) (string, error) {
105
-	certID, err := fingerprintCert(cert)
106
-	if err != nil {
107
-		return "", err
108
-	}
109
-
110
-	return string(certID), nil
111
-}
112
-
113
-func fingerprintCert(cert *x509.Certificate) (CertID, error) {
114
-	block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
115
-	pemdata := pem.EncodeToMemory(&block)
116
-
117
-	var tufKey data.PublicKey
118
-	switch cert.PublicKeyAlgorithm {
119
-	case x509.RSA:
120
-		tufKey = data.NewRSAx509PublicKey(pemdata)
121
-	case x509.ECDSA:
122
-		tufKey = data.NewECDSAx509PublicKey(pemdata)
123
-	default:
124
-		return "", fmt.Errorf("got Unknown key type while fingerprinting certificate")
125
-	}
126
-
127
-	return CertID(tufKey.ID()), nil
128
-}
129
-
130
-// loadCertsFromDir receives a store AddCertFromFile for each certificate found
131
-func loadCertsFromDir(s *X509FileStore) error {
132
-	for _, f := range s.fileStore.ListFiles() {
133
-		// ListFiles returns relative paths
134
-		data, err := s.fileStore.Get(f)
135
-		if err != nil {
136
-			// the filestore told us it had a file that it then couldn't serve.
137
-			// this is a serious problem so error immediately
138
-			return err
139
-		}
140
-		err = s.AddCertFromPEM(data)
141
-		if err != nil {
142
-			if _, ok := err.(*ErrCertValidation); ok {
143
-				logrus.Debugf("ignoring certificate, did not pass validation: %s", f)
144
-				continue
145
-			}
146
-			if _, ok := err.(*ErrCertExists); ok {
147
-				logrus.Debugf("ignoring certificate, already exists in the store: %s", f)
148
-				continue
149
-			}
150
-
151
-			return err
152
-		}
153
-	}
154
-	return nil
155
-}
156
-
157 103
 // LoadCertFromFile loads the first certificate from the file provided. The
158 104
 // data is expected to be PEM Encoded and contain one of more certificates
159 105
 // with PEM type "CERTIFICATE"
... ...
@@ -533,37 +443,39 @@ func CertToKey(cert *x509.Certificate) data.PublicKey {
533 533
 
534 534
 // CertsToKeys transforms each of the input certificate chains into its corresponding
535 535
 // PublicKey
536
-func CertsToKeys(leafCerts []*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
536
+func CertsToKeys(leafCerts map[string]*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
537 537
 	keys := make(map[string]data.PublicKey)
538
-	for _, leafCert := range leafCerts {
539
-		certBundle := []*x509.Certificate{leafCert}
540
-		certID, err := FingerprintCert(leafCert)
541
-		if err != nil {
542
-			continue
543
-		}
544
-		if intCertsForLeafs, ok := intCerts[certID]; ok {
545
-			certBundle = append(certBundle, intCertsForLeafs...)
538
+	for id, leafCert := range leafCerts {
539
+		if key, err := CertBundleToKey(leafCert, intCerts[id]); err == nil {
540
+			keys[key.ID()] = key
546 541
 		}
547
-		certChainPEM, err := CertChainToPEM(certBundle)
548
-		if err != nil {
549
-			continue
550
-		}
551
-		var newKey data.PublicKey
552
-		// Use the leaf cert's public key algorithm for typing
553
-		switch leafCert.PublicKeyAlgorithm {
554
-		case x509.RSA:
555
-			newKey = data.NewRSAx509PublicKey(certChainPEM)
556
-		case x509.ECDSA:
557
-			newKey = data.NewECDSAx509PublicKey(certChainPEM)
558
-		default:
559
-			logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
560
-			continue
561
-		}
562
-		keys[newKey.ID()] = newKey
563 542
 	}
564 543
 	return keys
565 544
 }
566 545
 
546
+// CertBundleToKey creates a TUF key from a leaf certs and a list of
547
+// intermediates
548
+func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) (data.PublicKey, error) {
549
+	certBundle := []*x509.Certificate{leafCert}
550
+	certBundle = append(certBundle, intCerts...)
551
+	certChainPEM, err := CertChainToPEM(certBundle)
552
+	if err != nil {
553
+		return nil, err
554
+	}
555
+	var newKey data.PublicKey
556
+	// Use the leaf cert's public key algorithm for typing
557
+	switch leafCert.PublicKeyAlgorithm {
558
+	case x509.RSA:
559
+		newKey = data.NewRSAx509PublicKey(certChainPEM)
560
+	case x509.ECDSA:
561
+		newKey = data.NewECDSAx509PublicKey(certChainPEM)
562
+	default:
563
+		logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
564
+		return nil, x509.ErrUnsupportedAlgorithm
565
+	}
566
+	return newKey, nil
567
+}
568
+
567 569
 // NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval.
568 570
 func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
569 571
 	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
... ...
@@ -610,14 +522,3 @@ func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
610 610
 
611 611
 	return key.ID(), nil
612 612
 }
613
-
614
-// FilterCertsExpiredSha1 can be used as the filter function to cert store
615
-// initializers to filter out all expired or SHA-1 certificate that we
616
-// shouldn't load.
617
-func FilterCertsExpiredSha1(cert *x509.Certificate) bool {
618
-	return !cert.IsCA &&
619
-		time.Now().Before(cert.NotAfter) &&
620
-		cert.SignatureAlgorithm != x509.SHA1WithRSA &&
621
-		cert.SignatureAlgorithm != x509.DSAWithSHA1 &&
622
-		cert.SignatureAlgorithm != x509.ECDSAWithSHA1
623
-}
... ...
@@ -137,7 +137,7 @@ type YubiPrivateKey struct {
137 137
 	libLoader     pkcs11LibLoader
138 138
 }
139 139
 
140
-// YubiKeySigner wraps a YubiPrivateKey and implements the crypto.Signer interface
140
+// yubikeySigner wraps a YubiPrivateKey and implements the crypto.Signer interface
141 141
 type yubikeySigner struct {
142 142
 	YubiPrivateKey
143 143
 }
... ...
@@ -344,7 +344,7 @@ func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byt
344 344
 	return data.NewECDSAPublicKey(pubBytes), data.CanonicalRootRole, nil
345 345
 }
346 346
 
347
-// Sign returns a signature for a given signature request
347
+// sign returns a signature for a given signature request
348 348
 func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, payload []byte) ([]byte, error) {
349 349
 	err := login(ctx, session, passRetriever, pkcs11.CKU_USER, UserPin)
350 350
 	if err != nil {
... ...
@@ -37,13 +37,9 @@ func (err ErrRootRotationFail) Error() string {
37 37
 	return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
38 38
 }
39 39
 
40
-func prettyFormatCertIDs(certs []*x509.Certificate) string {
40
+func prettyFormatCertIDs(certs map[string]*x509.Certificate) string {
41 41
 	ids := make([]string, 0, len(certs))
42
-	for _, cert := range certs {
43
-		id, err := trustmanager.FingerprintCert(cert)
44
-		if err != nil {
45
-			id = fmt.Sprintf("[Error %s]", err)
46
-		}
42
+	for id := range certs {
47 43
 		ids = append(ids, id)
48 44
 	}
49 45
 	return strings.Join(ids, ", ")
... ...
@@ -53,8 +49,9 @@ func prettyFormatCertIDs(certs []*x509.Certificate) string {
53 53
 ValidateRoot receives a new root, validates its correctness and attempts to
54 54
 do root key rotation if needed.
55 55
 
56
-First we list the current trusted certificates we have for a particular GUN. If
57
-that list is non-empty means that we've already seen this repository before, and
56
+First we check if we have any trusted certificates for a particular GUN in
57
+a previous root, if we have one. If the previous root is not nil and we find
58
+certificates for this GUN, we've already seen this repository before, and
58 59
 have a list of trusted certificates for it. In this case, we use this list of
59 60
 certificates to attempt to validate this root file.
60 61
 
... ...
@@ -86,68 +83,67 @@ We shall call this: TOFUS.
86 86
 
87 87
 Validation failure at any step will result in an ErrValidationFailed error.
88 88
 */
89
-func ValidateRoot(certStore trustmanager.X509Store, root *data.Signed, gun string, trustPinning TrustPinConfig) error {
89
+func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trustPinning TrustPinConfig) (*data.SignedRoot, error) {
90 90
 	logrus.Debugf("entered ValidateRoot with dns: %s", gun)
91 91
 	signedRoot, err := data.RootFromSigned(root)
92 92
 	if err != nil {
93
-		return err
93
+		return nil, err
94 94
 	}
95 95
 
96 96
 	rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
97 97
 	if err != nil {
98
-		return err
98
+		return nil, err
99 99
 	}
100 100
 
101 101
 	// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN
102 102
 	allLeafCerts, allIntCerts := parseAllCerts(signedRoot)
103
-	certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun)
103
+	certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true)
104
+
104 105
 	if err != nil {
105 106
 		logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
106
-		return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
107
+		return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
107 108
 	}
108 109
 
109
-	// Retrieve all the trusted certificates that match this gun
110
-	trustedCerts, err := certStore.GetCertificatesByCN(gun)
111
-	if err != nil {
112
-		// If the error that we get back is different than ErrNoCertificatesFound
113
-		// we couldn't check if there are any certificates with this CN already
114
-		// trusted. Let's take the conservative approach and return a failed validation
115
-		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok {
116
-			logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err)
117
-			return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"}
110
+	// If we have a previous root, let's try to use it to validate that this new root is valid.
111
+	if prevRoot != nil {
112
+		// Retrieve all the trusted certificates from our previous root
113
+		// Note that we do not validate expiries here since our originally trusted root might have expired certs
114
+		allTrustedLeafCerts, allTrustedIntCerts := parseAllCerts(prevRoot)
115
+		trustedLeafCerts, err := validRootLeafCerts(allTrustedLeafCerts, gun, false)
116
+
117
+		// Use the certificates we found in the previous root for the GUN to verify its signatures
118
+		// This could potentially be an empty set, in which case we will fail to verify
119
+		logrus.Debugf("found %d valid root leaf certificates for %s: %s", len(trustedLeafCerts), gun,
120
+			prettyFormatCertIDs(trustedLeafCerts))
121
+
122
+		// Extract the previous root's threshold for signature verification
123
+		prevRootRoleData, ok := prevRoot.Signed.Roles[data.CanonicalRootRole]
124
+		if !ok {
125
+			return nil, &ErrValidationFail{Reason: "could not retrieve previous root role data"}
118 126
 		}
119
-	}
120
-	// If we have certificates that match this specific GUN, let's make sure to
121
-	// use them first to validate that this new root is valid.
122
-	if len(trustedCerts) != 0 {
123
-		logrus.Debugf("found %d valid root certificates for %s: %s", len(trustedCerts), gun,
124
-			prettyFormatCertIDs(trustedCerts))
127
+
125 128
 		err = signed.VerifySignatures(
126
-			root, data.BaseRole{Keys: trustmanager.CertsToKeys(trustedCerts, allIntCerts), Threshold: 1})
129
+			root, data.BaseRole{Keys: trustmanager.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
127 130
 		if err != nil {
128 131
 			logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
129
-			return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"}
132
+			return nil, &ErrRootRotationFail{Reason: "failed to validate data with current trusted certificates"}
130 133
 		}
131 134
 	} else {
132 135
 		logrus.Debugf("found no currently valid root certificates for %s, using trust_pinning config to bootstrap trust", gun)
133 136
 		trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun)
134 137
 		if err != nil {
135
-			return &ErrValidationFail{Reason: err.Error()}
138
+			return nil, &ErrValidationFail{Reason: err.Error()}
136 139
 		}
137 140
 
138
-		validPinnedCerts := []*x509.Certificate{}
139
-		for _, cert := range certsFromRoot {
140
-			certID, err := trustmanager.FingerprintCert(cert)
141
-			if err != nil {
141
+		validPinnedCerts := map[string]*x509.Certificate{}
142
+		for id, cert := range certsFromRoot {
143
+			if ok := trustPinCheckFunc(cert, allIntCerts[id]); !ok {
142 144
 				continue
143 145
 			}
144
-			if ok := trustPinCheckFunc(cert, allIntCerts[certID]); !ok {
145
-				continue
146
-			}
147
-			validPinnedCerts = append(validPinnedCerts, cert)
146
+			validPinnedCerts[id] = cert
148 147
 		}
149 148
 		if len(validPinnedCerts) == 0 {
150
-			return &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
149
+			return nil, &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
151 150
 		}
152 151
 		certsFromRoot = validPinnedCerts
153 152
 	}
... ...
@@ -159,64 +155,29 @@ func ValidateRoot(certStore trustmanager.X509Store, root *data.Signed, gun strin
159 159
 		Keys: trustmanager.CertsToKeys(certsFromRoot, allIntCerts), Threshold: rootRole.Threshold})
160 160
 	if err != nil {
161 161
 		logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
162
-		return &ErrValidationFail{Reason: "failed to validate integrity of roots"}
163
-	}
164
-
165
-	// Getting here means:
166
-	// A) we had trusted certificates and both the old and new validated this root.
167
-	// or
168
-	// B) we had no trusted certificates but the new set of certificates has integrity (self-signed).
169
-	logrus.Debugf("entering root certificate rotation for: %s", gun)
170
-
171
-	// Do root certificate rotation: we trust only the certs present in the new root
172
-	// First we add all the new certificates (even if they already exist)
173
-	for _, cert := range certsFromRoot {
174
-		err := certStore.AddCert(cert)
175
-		if err != nil {
176
-			// If the error is already exists we don't fail the rotation
177
-			if _, ok := err.(*trustmanager.ErrCertExists); ok {
178
-				logrus.Debugf("ignoring certificate addition to: %s", gun)
179
-				continue
180
-			}
181
-			logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err)
182
-		}
183
-	}
184
-
185
-	// Now we delete old certificates that aren't present in the new root
186
-	oldCertsToRemove, err := certsToRemove(trustedCerts, certsFromRoot)
187
-	if err != nil {
188
-		logrus.Debugf("inconsistency when removing old certificates: %v", err)
189
-		return err
190
-	}
191
-	for certID, cert := range oldCertsToRemove {
192
-		logrus.Debugf("removing certificate with certID: %s", certID)
193
-		err = certStore.RemoveCert(cert)
194
-		if err != nil {
195
-			logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err)
196
-			return &ErrRootRotationFail{Reason: "failed to rotate root keys"}
197
-		}
162
+		return nil, &ErrValidationFail{Reason: "failed to validate integrity of roots"}
198 163
 	}
199 164
 
200 165
 	logrus.Debugf("Root validation succeeded for %s", gun)
201
-	return nil
166
+	return signedRoot, nil
202 167
 }
203 168
 
204
-// validRootLeafCerts returns a list of non-expired, non-sha1 certificates
169
+// validRootLeafCerts returns a list of possibly (if checkExpiry is true) non-expired, non-sha1 certificates
205 170
 // found in root whose Common-Names match the provided GUN. Note that this
206 171
 // "validity" alone does not imply any measure of trust.
207
-func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string) ([]*x509.Certificate, error) {
208
-	var validLeafCerts []*x509.Certificate
172
+func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, checkExpiry bool) (map[string]*x509.Certificate, error) {
173
+	validLeafCerts := make(map[string]*x509.Certificate)
209 174
 
210 175
 	// Go through every leaf certificate and check that the CN matches the gun
211
-	for _, cert := range allLeafCerts {
176
+	for id, cert := range allLeafCerts {
212 177
 		// Validate that this leaf certificate has a CN that matches the exact gun
213 178
 		if cert.Subject.CommonName != gun {
214 179
 			logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s",
215 180
 				cert.Subject.CommonName, gun)
216 181
 			continue
217 182
 		}
218
-		// Make sure the certificate is not expired
219
-		if time.Now().After(cert.NotAfter) {
183
+		// Make sure the certificate is not expired if checkExpiry is true
184
+		if checkExpiry && time.Now().After(cert.NotAfter) {
220 185
 			logrus.Debugf("error leaf certificate is expired")
221 186
 			continue
222 187
 		}
... ...
@@ -230,7 +191,7 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string) (
230 230
 			continue
231 231
 		}
232 232
 
233
-		validLeafCerts = append(validLeafCerts, cert)
233
+		validLeafCerts[id] = cert
234 234
 	}
235 235
 
236 236
 	if len(validLeafCerts) < 1 {
... ...
@@ -246,11 +207,15 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string) (
246 246
 // parseAllCerts returns two maps, one with all of the leafCertificates and one
247 247
 // with all the intermediate certificates found in signedRoot
248 248
 func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
249
+	if signedRoot == nil {
250
+		return nil, nil
251
+	}
252
+
249 253
 	leafCerts := make(map[string]*x509.Certificate)
250 254
 	intCerts := make(map[string][]*x509.Certificate)
251 255
 
252 256
 	// Before we loop through all root keys available, make sure any exist
253
-	rootRoles, ok := signedRoot.Signed.Roles["root"]
257
+	rootRoles, ok := signedRoot.Signed.Roles[data.CanonicalRootRole]
254 258
 	if !ok {
255 259
 		logrus.Debugf("tried to parse certificates from invalid root signed data")
256 260
 		return nil, nil
... ...
@@ -290,59 +255,14 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m
290 290
 
291 291
 		// Get the ID of the leaf certificate
292 292
 		leafCert := leafCertList[0]
293
-		leafID, err := trustmanager.FingerprintCert(leafCert)
294
-		if err != nil {
295
-			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err)
296
-			continue
297
-		}
298 293
 
299 294
 		// Store the leaf cert in the map
300
-		leafCerts[leafID] = leafCert
295
+		leafCerts[key.ID()] = leafCert
301 296
 
302 297
 		// Get all the remainder certificates marked as a CA to be used as intermediates
303 298
 		intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts)
304
-		intCerts[leafID] = intermediateCerts
299
+		intCerts[key.ID()] = intermediateCerts
305 300
 	}
306 301
 
307 302
 	return leafCerts, intCerts
308 303
 }
309
-
310
-// certsToRemove returns all the certificates from oldCerts that aren't present
311
-// in newCerts.  Note that newCerts should never be empty, else this function will error.
312
-// We expect newCerts to come from validateRootLeafCerts, which does not return empty sets.
313
-func certsToRemove(oldCerts, newCerts []*x509.Certificate) (map[string]*x509.Certificate, error) {
314
-	certsToRemove := make(map[string]*x509.Certificate)
315
-
316
-	// Populate a map with all the IDs from newCert
317
-	var newCertMap = make(map[string]struct{})
318
-	for _, cert := range newCerts {
319
-		certID, err := trustmanager.FingerprintCert(cert)
320
-		if err != nil {
321
-			logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err)
322
-			continue
323
-		}
324
-		newCertMap[certID] = struct{}{}
325
-	}
326
-
327
-	// We don't want to "rotate" certificates to an empty set, nor keep old certificates if the
328
-	// new root does not trust them.  newCerts should come from validRootLeafCerts, which refuses
329
-	// to return an empty set, and they should all be fingerprintable, so this should never happen
330
-	// - fail just to be sure.
331
-	if len(newCertMap) == 0 {
332
-		return nil, &ErrRootRotationFail{Reason: "internal error, got no certificates to rotate to"}
333
-	}
334
-
335
-	// Iterate over all the old certificates and check to see if we should remove them
336
-	for _, cert := range oldCerts {
337
-		certID, err := trustmanager.FingerprintCert(cert)
338
-		if err != nil {
339
-			logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err)
340
-			continue
341
-		}
342
-		if _, ok := newCertMap[certID]; !ok {
343
-			certsToRemove[certID] = cert
344
-		}
345
-	}
346
-
347
-	return certsToRemove, nil
348
-}
... ...
@@ -3,6 +3,7 @@ package trustpinning
3 3
 import (
4 4
 	"crypto/x509"
5 5
 	"fmt"
6
+	"github.com/Sirupsen/logrus"
6 7
 	"github.com/docker/notary/trustmanager"
7 8
 	"github.com/docker/notary/tuf/utils"
8 9
 	"strings"
... ...
@@ -67,17 +68,12 @@ func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker,
67 67
 func (t trustPinChecker) certsCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
68 68
 	// reconstruct the leaf + intermediate cert chain, which is bundled as {leaf, intermediates...},
69 69
 	// in order to get the matching id in the root file
70
-	leafCertID, err := trustmanager.FingerprintCert(leafCert)
70
+	key, err := trustmanager.CertBundleToKey(leafCert, intCerts)
71 71
 	if err != nil {
72
+		logrus.Debug("error creating cert bundle: ", err.Error())
72 73
 		return false
73 74
 	}
74
-	rootKeys := trustmanager.CertsToKeys([]*x509.Certificate{leafCert}, map[string][]*x509.Certificate{leafCertID: intCerts})
75
-	for keyID := range rootKeys {
76
-		if utils.StrSliceContains(t.pinnedCertIDs, keyID) {
77
-			return true
78
-		}
79
-	}
80
-	return false
75
+	return utils.StrSliceContains(t.pinnedCertIDs, key.ID())
81 76
 }
82 77
 
83 78
 func (t trustPinChecker) caCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
84 79
new file mode 100644
... ...
@@ -0,0 +1,673 @@
0
+package tuf
1
+
2
+import (
3
+	"fmt"
4
+
5
+	"github.com/docker/go/canonical/json"
6
+	"github.com/docker/notary"
7
+
8
+	"github.com/docker/notary/trustpinning"
9
+	"github.com/docker/notary/tuf/data"
10
+	"github.com/docker/notary/tuf/signed"
11
+	"github.com/docker/notary/tuf/utils"
12
+)
13
+
14
+// ErrBuildDone is returned when any functions are called on RepoBuilder, and it
15
+// is already finished building
16
+var ErrBuildDone = fmt.Errorf(
17
+	"the builder has finished building and cannot accept any more input or produce any more output")
18
+
19
+// ErrInvalidBuilderInput is returned when RepoBuilder.Load is called
20
+// with the wrong type of metadata for thes tate that it's in
21
+type ErrInvalidBuilderInput struct{ msg string }
22
+
23
+func (e ErrInvalidBuilderInput) Error() string {
24
+	return e.msg
25
+}
26
+
27
+// ConsistentInfo is the consistent name and size of a role, or just the name
28
+// of the role and a -1 if no file metadata for the role is known
29
+type ConsistentInfo struct {
30
+	RoleName string
31
+	fileMeta data.FileMeta
32
+}
33
+
34
+// ChecksumKnown determines whether or not we know enough to provide a size and
35
+// consistent name
36
+func (c ConsistentInfo) ChecksumKnown() bool {
37
+	// empty hash, no size : this is the zero value
38
+	return len(c.fileMeta.Hashes) > 0 || c.fileMeta.Length != 0
39
+}
40
+
41
+// ConsistentName returns the consistent name (rolename.sha256) for the role
42
+// given this consistent information
43
+func (c ConsistentInfo) ConsistentName() string {
44
+	return utils.ConsistentName(c.RoleName, c.fileMeta.Hashes[notary.SHA256])
45
+}
46
+
47
+// Length returns the expected length of the role as per this consistent
48
+// information - if no checksum information is known, the size is -1.
49
+func (c ConsistentInfo) Length() int64 {
50
+	if c.ChecksumKnown() {
51
+		return c.fileMeta.Length
52
+	}
53
+	return -1
54
+}
55
+
56
+// RepoBuilder is an interface for an object which builds a tuf.Repo
57
+type RepoBuilder interface {
58
+	Load(roleName string, content []byte, minVersion int, allowExpired bool) error
59
+	GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error)
60
+	GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error)
61
+	Finish() (*Repo, error)
62
+	BootstrapNewBuilder() RepoBuilder
63
+
64
+	// informative functions
65
+	IsLoaded(roleName string) bool
66
+	GetLoadedVersion(roleName string) int
67
+	GetConsistentInfo(roleName string) ConsistentInfo
68
+}
69
+
70
+// finishedBuilder refuses any more input or output
71
+type finishedBuilder struct{}
72
+
73
+func (f finishedBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
74
+	return ErrBuildDone
75
+}
76
+func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
77
+	return nil, 0, ErrBuildDone
78
+}
79
+func (f finishedBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
80
+	return nil, 0, ErrBuildDone
81
+}
82
+func (f finishedBuilder) Finish() (*Repo, error)               { return nil, ErrBuildDone }
83
+func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder     { return f }
84
+func (f finishedBuilder) IsLoaded(roleName string) bool        { return false }
85
+func (f finishedBuilder) GetLoadedVersion(roleName string) int { return 0 }
86
+func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
87
+	return ConsistentInfo{RoleName: roleName}
88
+}
89
+
90
+// NewRepoBuilder is the only way to get a pre-built RepoBuilder
91
+func NewRepoBuilder(gun string, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder {
92
+	return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
93
+		repo:                 NewRepo(cs),
94
+		gun:                  gun,
95
+		trustpin:             trustpin,
96
+		loadedNotChecksummed: make(map[string][]byte),
97
+	}}
98
+}
99
+
100
+// repoBuilderWrapper embeds a repoBuilder, but once Finish is called, swaps
101
+// the embed out with a finishedBuilder
102
+type repoBuilderWrapper struct {
103
+	RepoBuilder
104
+}
105
+
106
+func (rbw *repoBuilderWrapper) Finish() (*Repo, error) {
107
+	switch rbw.RepoBuilder.(type) {
108
+	case finishedBuilder:
109
+		return rbw.RepoBuilder.Finish()
110
+	default:
111
+		old := rbw.RepoBuilder
112
+		rbw.RepoBuilder = finishedBuilder{}
113
+		return old.Finish()
114
+	}
115
+}
116
+
117
+// repoBuilder actually builds a tuf.Repo
118
+type repoBuilder struct {
119
+	repo *Repo
120
+
121
+	// needed for root trust pininng verification
122
+	gun      string
123
+	trustpin trustpinning.TrustPinConfig
124
+
125
+	// in case we load root and/or targets before snapshot and timestamp (
126
+	// or snapshot and not timestamp), so we know what to verify when the
127
+	// data with checksums come in
128
+	loadedNotChecksummed map[string][]byte
129
+
130
+	// bootstrapped values to validate a new root
131
+	prevRoot                 *data.SignedRoot
132
+	bootstrappedRootChecksum *data.FileMeta
133
+
134
+	// for bootstrapping the next builder
135
+	nextRootChecksum *data.FileMeta
136
+}
137
+
138
+func (rb *repoBuilder) Finish() (*Repo, error) {
139
+	return rb.repo, nil
140
+}
141
+
142
+func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
143
+	return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
144
+		repo:                 NewRepo(rb.repo.cryptoService),
145
+		gun:                  rb.gun,
146
+		loadedNotChecksummed: make(map[string][]byte),
147
+		trustpin:             rb.trustpin,
148
+
149
+		prevRoot:                 rb.repo.Root,
150
+		bootstrappedRootChecksum: rb.nextRootChecksum,
151
+	}}
152
+}
153
+
154
+// IsLoaded returns whether a particular role has already been loaded
155
+func (rb *repoBuilder) IsLoaded(roleName string) bool {
156
+	switch roleName {
157
+	case data.CanonicalRootRole:
158
+		return rb.repo.Root != nil
159
+	case data.CanonicalSnapshotRole:
160
+		return rb.repo.Snapshot != nil
161
+	case data.CanonicalTimestampRole:
162
+		return rb.repo.Timestamp != nil
163
+	default:
164
+		return rb.repo.Targets[roleName] != nil
165
+	}
166
+}
167
+
168
+// GetLoadedVersion returns the metadata version, if it is loaded, or 1 (the
169
+// minimum valid version number) otherwise
170
+func (rb *repoBuilder) GetLoadedVersion(roleName string) int {
171
+	switch {
172
+	case roleName == data.CanonicalRootRole && rb.repo.Root != nil:
173
+		return rb.repo.Root.Signed.Version
174
+	case roleName == data.CanonicalSnapshotRole && rb.repo.Snapshot != nil:
175
+		return rb.repo.Snapshot.Signed.Version
176
+	case roleName == data.CanonicalTimestampRole && rb.repo.Timestamp != nil:
177
+		return rb.repo.Timestamp.Signed.Version
178
+	default:
179
+		if tgts, ok := rb.repo.Targets[roleName]; ok {
180
+			return tgts.Signed.Version
181
+		}
182
+	}
183
+
184
+	return 1
185
+}
186
+
187
+// GetConsistentInfo returns the consistent name and size of a role, if it is known,
188
+// otherwise just the rolename and a -1 for size (both of which are inside a
189
+// ConsistentInfo object)
190
+func (rb *repoBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
191
+	info := ConsistentInfo{RoleName: roleName} // starts out with unknown filemeta
192
+	switch roleName {
193
+	case data.CanonicalTimestampRole:
194
+		// we do not want to get a consistent timestamp, but we do want to
195
+		// limit its size
196
+		info.fileMeta.Length = notary.MaxTimestampSize
197
+	case data.CanonicalSnapshotRole:
198
+		if rb.repo.Timestamp != nil {
199
+			info.fileMeta = rb.repo.Timestamp.Signed.Meta[roleName]
200
+		}
201
+	case data.CanonicalRootRole:
202
+		switch {
203
+		case rb.bootstrappedRootChecksum != nil:
204
+			info.fileMeta = *rb.bootstrappedRootChecksum
205
+		case rb.repo.Snapshot != nil:
206
+			info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
207
+		}
208
+	default:
209
+		if rb.repo.Snapshot != nil {
210
+			info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName]
211
+		}
212
+	}
213
+	return info
214
+}
215
+
216
+func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error {
217
+	if !data.ValidRole(roleName) {
218
+		return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s is an invalid role", roleName)}
219
+	}
220
+
221
+	if rb.IsLoaded(roleName) {
222
+		return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s has already been loaded", roleName)}
223
+	}
224
+
225
+	var err error
226
+	switch roleName {
227
+	case data.CanonicalRootRole:
228
+		break
229
+	case data.CanonicalTimestampRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole:
230
+		err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole})
231
+	default: // delegations
232
+		err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalTargetsRole})
233
+	}
234
+	if err != nil {
235
+		return err
236
+	}
237
+
238
+	switch roleName {
239
+	case data.CanonicalRootRole:
240
+		return rb.loadRoot(content, minVersion, allowExpired)
241
+	case data.CanonicalSnapshotRole:
242
+		return rb.loadSnapshot(content, minVersion, allowExpired)
243
+	case data.CanonicalTimestampRole:
244
+		return rb.loadTimestamp(content, minVersion, allowExpired)
245
+	case data.CanonicalTargetsRole:
246
+		return rb.loadTargets(content, minVersion, allowExpired)
247
+	default:
248
+		return rb.loadDelegation(roleName, content, minVersion, allowExpired)
249
+	}
250
+}
251
+
252
+func (rb *repoBuilder) checkPrereqsLoaded(prereqRoles []string) error {
253
+	for _, req := range prereqRoles {
254
+		if !rb.IsLoaded(req) {
255
+			return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s must be loaded first", req)}
256
+		}
257
+	}
258
+	return nil
259
+}
260
+
261
+// GenerateSnapshot generates a new snapshot given a previous (optional) snapshot
262
+// We can't just load the previous snapshot, because it may have been signed by a different
263
+// snapshot key (maybe from a previous root version).  Note that we need the root role and
264
+// targets role to be loaded, because we need to generate metadata for both (and we need
265
+// the root to be loaded so we can get the snapshot role to sign with)
266
+func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
267
+	switch {
268
+	case rb.repo.cryptoService == nil:
269
+		return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot without a cryptoservice"}
270
+	case rb.IsLoaded(data.CanonicalSnapshotRole):
271
+		return nil, 0, ErrInvalidBuilderInput{msg: "snapshot has already been loaded"}
272
+	case rb.IsLoaded(data.CanonicalTimestampRole):
273
+		return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot if timestamp has already been loaded"}
274
+	}
275
+
276
+	if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole}); err != nil {
277
+		return nil, 0, err
278
+	}
279
+
280
+	// If there is no previous snapshot, we need to generate one, and so the targets must
281
+	// have already been loaded.  Otherwise, so long as the previous snapshot structure is
282
+	// valid (it has a targets meta), we're good.
283
+	switch prev {
284
+	case nil:
285
+		if err := rb.checkPrereqsLoaded([]string{data.CanonicalTargetsRole}); err != nil {
286
+			return nil, 0, err
287
+		}
288
+
289
+		if err := rb.repo.InitSnapshot(); err != nil {
290
+			rb.repo.Snapshot = nil
291
+			return nil, 0, err
292
+		}
293
+	default:
294
+		if err := data.IsValidSnapshotStructure(prev.Signed); err != nil {
295
+			return nil, 0, err
296
+		}
297
+		rb.repo.Snapshot = prev
298
+	}
299
+
300
+	sgnd, err := rb.repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole))
301
+	if err != nil {
302
+		rb.repo.Snapshot = nil
303
+		return nil, 0, err
304
+	}
305
+
306
+	sgndJSON, err := json.Marshal(sgnd)
307
+	if err != nil {
308
+		rb.repo.Snapshot = nil
309
+		return nil, 0, err
310
+	}
311
+
312
+	// loadedNotChecksummed should currently contain the root awaiting checksumming,
313
+	// since it has to have been loaded.  Since the snapshot was generated using
314
+	// the root and targets data (there may not be any) that that have been loaded,
315
+	// remove all of them from rb.loadedNotChecksummed
316
+	for tgtName := range rb.repo.Targets {
317
+		delete(rb.loadedNotChecksummed, tgtName)
318
+	}
319
+	delete(rb.loadedNotChecksummed, data.CanonicalRootRole)
320
+
321
+	// The timestamp can't have been loaded yet, so we want to cache the snapshot
322
+	// bytes so we can validate the checksum when a timestamp gets generated or
323
+	// loaded later.
324
+	rb.loadedNotChecksummed[data.CanonicalSnapshotRole] = sgndJSON
325
+
326
+	return sgndJSON, rb.repo.Snapshot.Signed.Version, nil
327
+}
328
+
329
+// GenerateTimestamp generates a new timestamp given a previous (optional) timestamp
330
+// We can't just load the previous timestamp, because it may have been signed by a different
331
+// timestamp key (maybe from a previous root version)
332
+func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
333
+	switch {
334
+	case rb.repo.cryptoService == nil:
335
+		return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate timestamp without a cryptoservice"}
336
+	case rb.IsLoaded(data.CanonicalTimestampRole):
337
+		return nil, 0, ErrInvalidBuilderInput{msg: "timestamp has already been loaded"}
338
+	}
339
+
340
+	// SignTimetamp always serializes the loaded snapshot and signs in the data, so we must always
341
+	// have the snapshot loaded first
342
+	if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil {
343
+		return nil, 0, err
344
+	}
345
+
346
+	switch prev {
347
+	case nil:
348
+		if err := rb.repo.InitTimestamp(); err != nil {
349
+			rb.repo.Timestamp = nil
350
+			return nil, 0, err
351
+		}
352
+	default:
353
+		if err := data.IsValidTimestampStructure(prev.Signed); err != nil {
354
+			return nil, 0, err
355
+		}
356
+		rb.repo.Timestamp = prev
357
+	}
358
+
359
+	sgnd, err := rb.repo.SignTimestamp(data.DefaultExpires(data.CanonicalTimestampRole))
360
+	if err != nil {
361
+		rb.repo.Timestamp = nil
362
+		return nil, 0, err
363
+	}
364
+
365
+	sgndJSON, err := json.Marshal(sgnd)
366
+	if err != nil {
367
+		rb.repo.Timestamp = nil
368
+		return nil, 0, err
369
+	}
370
+
371
+	// The snapshot should have been loaded (and not checksummed, since a timestamp
372
+	// cannot have been loaded), so it is awaiting checksumming. Since this
373
+	// timestamp was generated using the snapshot awaiting checksumming, we can
374
+	// remove it from rb.loadedNotChecksummed. There should be no other items
375
+	// awaiting checksumming now since loading/generating a snapshot should have
376
+	// cleared out everything else in `loadNotChecksummed`.
377
+	delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
378
+
379
+	return sgndJSON, rb.repo.Timestamp.Signed.Version, nil
380
+}
381
+
382
+// loadRoot loads a root if one has not been loaded
383
+func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired bool) error {
384
+	roleName := data.CanonicalRootRole
385
+
386
+	signedObj, err := rb.bytesToSigned(content, data.CanonicalRootRole)
387
+	if err != nil {
388
+		return err
389
+	}
390
+	// ValidateRoot validates against the previous root's role, as well as validates that the root
391
+	// itself is self-consistent with its own signatures and thresholds.
392
+	// This assumes that ValidateRoot calls data.RootFromSigned, which validates
393
+	// the metadata, rather than just unmarshalling signedObject into a SignedRoot object itself.
394
+	signedRoot, err := trustpinning.ValidateRoot(rb.prevRoot, signedObj, rb.gun, rb.trustpin)
395
+	if err != nil {
396
+		return err
397
+	}
398
+
399
+	if err := signed.VerifyVersion(&(signedRoot.Signed.SignedCommon), minVersion); err != nil {
400
+		return err
401
+	}
402
+
403
+	if !allowExpired { // check must go at the end because all other validation should pass
404
+		if err := signed.VerifyExpiry(&(signedRoot.Signed.SignedCommon), roleName); err != nil {
405
+			return err
406
+		}
407
+	}
408
+
409
+	rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
410
+	if err != nil { // this should never happen since the root has been validated
411
+		return err
412
+	}
413
+
414
+	rb.repo.Root = signedRoot
415
+	rb.repo.originalRootRole = rootRole
416
+	return nil
417
+}
418
+
419
+func (rb *repoBuilder) loadTimestamp(content []byte, minVersion int, allowExpired bool) error {
420
+	roleName := data.CanonicalTimestampRole
421
+
422
+	timestampRole, err := rb.repo.Root.BuildBaseRole(roleName)
423
+	if err != nil { // this should never happen, since it's already been validated
424
+		return err
425
+	}
426
+
427
+	signedObj, err := rb.bytesToSignedAndValidateSigs(timestampRole, content)
428
+	if err != nil {
429
+		return err
430
+	}
431
+
432
+	signedTimestamp, err := data.TimestampFromSigned(signedObj)
433
+	if err != nil {
434
+		return err
435
+	}
436
+
437
+	if err := signed.VerifyVersion(&(signedTimestamp.Signed.SignedCommon), minVersion); err != nil {
438
+		return err
439
+	}
440
+
441
+	if !allowExpired { // check must go at the end because all other validation should pass
442
+		if err := signed.VerifyExpiry(&(signedTimestamp.Signed.SignedCommon), roleName); err != nil {
443
+			return err
444
+		}
445
+	}
446
+
447
+	if err := rb.validateChecksumsFromTimestamp(signedTimestamp); err != nil {
448
+		return err
449
+	}
450
+
451
+	rb.repo.Timestamp = signedTimestamp
452
+	return nil
453
+}
454
+
455
+func (rb *repoBuilder) loadSnapshot(content []byte, minVersion int, allowExpired bool) error {
456
+	roleName := data.CanonicalSnapshotRole
457
+
458
+	snapshotRole, err := rb.repo.Root.BuildBaseRole(roleName)
459
+	if err != nil { // this should never happen, since it's already been validated
460
+		return err
461
+	}
462
+
463
+	signedObj, err := rb.bytesToSignedAndValidateSigs(snapshotRole, content)
464
+	if err != nil {
465
+		return err
466
+	}
467
+
468
+	signedSnapshot, err := data.SnapshotFromSigned(signedObj)
469
+	if err != nil {
470
+		return err
471
+	}
472
+
473
+	if err := signed.VerifyVersion(&(signedSnapshot.Signed.SignedCommon), minVersion); err != nil {
474
+		return err
475
+	}
476
+
477
+	if !allowExpired { // check must go at the end because all other validation should pass
478
+		if err := signed.VerifyExpiry(&(signedSnapshot.Signed.SignedCommon), roleName); err != nil {
479
+			return err
480
+		}
481
+	}
482
+
483
+	// at this point, the only thing left to validate is existing checksums - we can use
484
+	// this snapshot to bootstrap the next builder if needed - and we don't need to do
485
+	// the 2-value assignment since we've already validated the signedSnapshot, which MUST
486
+	// have root metadata
487
+	rootMeta := signedSnapshot.Signed.Meta[data.CanonicalRootRole]
488
+	rb.nextRootChecksum = &rootMeta
489
+
490
+	if err := rb.validateChecksumsFromSnapshot(signedSnapshot); err != nil {
491
+		return err
492
+	}
493
+
494
+	rb.repo.Snapshot = signedSnapshot
495
+	return nil
496
+}
497
+
498
+func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired bool) error {
499
+	roleName := data.CanonicalTargetsRole
500
+
501
+	targetsRole, err := rb.repo.Root.BuildBaseRole(roleName)
502
+	if err != nil { // this should never happen, since it's already been validated
503
+		return err
504
+	}
505
+
506
+	signedObj, err := rb.bytesToSignedAndValidateSigs(targetsRole, content)
507
+	if err != nil {
508
+		return err
509
+	}
510
+
511
+	signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
512
+	if err != nil {
513
+		return err
514
+	}
515
+
516
+	if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
517
+		return err
518
+	}
519
+
520
+	if !allowExpired { // check must go at the end because all other validation should pass
521
+		if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
522
+			return err
523
+		}
524
+	}
525
+
526
+	rb.repo.Targets[roleName] = signedTargets
527
+	return nil
528
+}
529
+
530
+func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersion int, allowExpired bool) error {
531
+	delegationRole, err := rb.repo.GetDelegationRole(roleName)
532
+	if err != nil {
533
+		return err
534
+	}
535
+
536
+	signedObj, err := rb.bytesToSignedAndValidateSigs(delegationRole.BaseRole, content)
537
+	if err != nil {
538
+		return err
539
+	}
540
+
541
+	signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
542
+	if err != nil {
543
+		return err
544
+	}
545
+
546
+	if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
547
+		return err
548
+	}
549
+
550
+	if !allowExpired { // check must go at the end because all other validation should pass
551
+		if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
552
+			return err
553
+		}
554
+	}
555
+
556
+	rb.repo.Targets[roleName] = signedTargets
557
+	return nil
558
+}
559
+
560
+func (rb *repoBuilder) validateChecksumsFromTimestamp(ts *data.SignedTimestamp) error {
561
+	sn, ok := rb.loadedNotChecksummed[data.CanonicalSnapshotRole]
562
+	if ok {
563
+		// by this point, the SignedTimestamp has been validated so it must have a snapshot hash
564
+		snMeta := ts.Signed.Meta[data.CanonicalSnapshotRole].Hashes
565
+		if err := data.CheckHashes(sn, data.CanonicalSnapshotRole, snMeta); err != nil {
566
+			return err
567
+		}
568
+		delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
569
+	}
570
+	return nil
571
+}
572
+
573
+func (rb *repoBuilder) validateChecksumsFromSnapshot(sn *data.SignedSnapshot) error {
574
+	var goodRoles []string
575
+	for roleName, loadedBytes := range rb.loadedNotChecksummed {
576
+		switch roleName {
577
+		case data.CanonicalSnapshotRole, data.CanonicalTimestampRole:
578
+			break
579
+		default:
580
+			if err := data.CheckHashes(loadedBytes, roleName, sn.Signed.Meta[roleName].Hashes); err != nil {
581
+				return err
582
+			}
583
+			goodRoles = append(goodRoles, roleName)
584
+		}
585
+	}
586
+	for _, roleName := range goodRoles {
587
+		delete(rb.loadedNotChecksummed, roleName)
588
+	}
589
+	return nil
590
+}
591
+
592
+func (rb *repoBuilder) validateChecksumFor(content []byte, roleName string) error {
593
+	// validate the bootstrap checksum for root, if provided
594
+	if roleName == data.CanonicalRootRole && rb.bootstrappedRootChecksum != nil {
595
+		if err := data.CheckHashes(content, roleName, rb.bootstrappedRootChecksum.Hashes); err != nil {
596
+			return err
597
+		}
598
+	}
599
+
600
+	// but we also want to cache the root content, so that when the snapshot is
601
+	// loaded it is validated (to make sure everything in the repo is self-consistent)
602
+	checksums := rb.getChecksumsFor(roleName)
603
+	if checksums != nil { // as opposed to empty, in which case hash check should fail
604
+		if err := data.CheckHashes(content, roleName, *checksums); err != nil {
605
+			return err
606
+		}
607
+	} else if roleName != data.CanonicalTimestampRole {
608
+		// timestamp is the only role which does not need to be checksummed, but
609
+		// for everything else, cache the contents in the list of roles that have
610
+		// not been checksummed by the snapshot/timestamp yet
611
+		rb.loadedNotChecksummed[roleName] = content
612
+	}
613
+
614
+	return nil
615
+}
616
+
617
+// Checksums the given bytes, and if they validate, convert to a data.Signed object.
618
+// If a checksums are nil (as opposed to empty), adds the bytes to the list of roles that
619
+// haven't been checksummed (unless it's a timestamp, which has no checksum reference).
620
+func (rb *repoBuilder) bytesToSigned(content []byte, roleName string) (*data.Signed, error) {
621
+	if err := rb.validateChecksumFor(content, roleName); err != nil {
622
+		return nil, err
623
+	}
624
+
625
+	// unmarshal to signed
626
+	signedObj := &data.Signed{}
627
+	if err := json.Unmarshal(content, signedObj); err != nil {
628
+		return nil, err
629
+	}
630
+
631
+	return signedObj, nil
632
+}
633
+
634
+func (rb *repoBuilder) bytesToSignedAndValidateSigs(role data.BaseRole, content []byte) (*data.Signed, error) {
635
+
636
+	signedObj, err := rb.bytesToSigned(content, role.Name)
637
+	if err != nil {
638
+		return nil, err
639
+	}
640
+
641
+	// verify signature
642
+	if err := signed.VerifySignatures(signedObj, role); err != nil {
643
+		return nil, err
644
+	}
645
+
646
+	return signedObj, nil
647
+}
648
+
649
+// If the checksum reference (the loaded timestamp for the snapshot role, and
650
+// the loaded snapshot for every other role except timestamp and snapshot) is nil,
651
+// then return nil for the checksums, meaning that the checksum is not yet
652
+// available.  If the checksum reference *is* loaded, then always returns the
653
+// Hashes object for the given role - if it doesn't exist, returns an empty Hash
654
+// object (against which any checksum validation would fail).
655
+func (rb *repoBuilder) getChecksumsFor(role string) *data.Hashes {
656
+	var hashes data.Hashes
657
+	switch role {
658
+	case data.CanonicalTimestampRole:
659
+		return nil
660
+	case data.CanonicalSnapshotRole:
661
+		if rb.repo.Timestamp == nil {
662
+			return nil
663
+		}
664
+		hashes = rb.repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole].Hashes
665
+	default:
666
+		if rb.repo.Snapshot == nil {
667
+			return nil
668
+		}
669
+		hashes = rb.repo.Snapshot.Signed.Meta[role].Hashes
670
+	}
671
+	return &hashes
672
+}
... ...
@@ -2,36 +2,34 @@ package client
2 2
 
3 3
 import (
4 4
 	"encoding/json"
5
-	"fmt"
6
-	"path"
7 5
 
8 6
 	"github.com/Sirupsen/logrus"
9 7
 	"github.com/docker/notary"
10 8
 	tuf "github.com/docker/notary/tuf"
11 9
 	"github.com/docker/notary/tuf/data"
12
-	"github.com/docker/notary/tuf/signed"
13 10
 	"github.com/docker/notary/tuf/store"
14
-	"github.com/docker/notary/tuf/utils"
15 11
 )
16 12
 
17 13
 // Client is a usability wrapper around a raw TUF repo
18 14
 type Client struct {
19
-	local  *tuf.Repo
20
-	remote store.RemoteStore
21
-	cache  store.MetadataStore
15
+	remote     store.RemoteStore
16
+	cache      store.MetadataStore
17
+	oldBuilder tuf.RepoBuilder
18
+	newBuilder tuf.RepoBuilder
22 19
 }
23 20
 
24 21
 // NewClient initialized a Client with the given repo, remote source of content, and cache
25
-func NewClient(local *tuf.Repo, remote store.RemoteStore, cache store.MetadataStore) *Client {
22
+func NewClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *Client {
26 23
 	return &Client{
27
-		local:  local,
28
-		remote: remote,
29
-		cache:  cache,
24
+		oldBuilder: oldBuilder,
25
+		newBuilder: newBuilder,
26
+		remote:     remote,
27
+		cache:      cache,
30 28
 	}
31 29
 }
32 30
 
33 31
 // Update performs an update to the TUF repo as defined by the TUF spec
34
-func (c *Client) Update() error {
32
+func (c *Client) Update() (*tuf.Repo, error) {
35 33
 	// 1. Get timestamp
36 34
 	//   a. If timestamp error (verification, expired, etc...) download new root and return to 1.
37 35
 	// 2. Check if local snapshot is up to date
... ...
@@ -44,503 +42,188 @@ func (c *Client) Update() error {
44 44
 	err := c.update()
45 45
 	if err != nil {
46 46
 		logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
47
+		logrus.Debug("Resetting the TUF builder...")
48
+
49
+		c.newBuilder = c.newBuilder.BootstrapNewBuilder()
50
+
47 51
 		if err := c.downloadRoot(); err != nil {
48 52
 			logrus.Debug("Client Update (Root):", err)
49
-			return err
53
+			return nil, err
50 54
 		}
51 55
 		// If we error again, we now have the latest root and just want to fail
52 56
 		// out as there's no expectation the problem can be resolved automatically
53 57
 		logrus.Debug("retrying TUF client update")
54
-		return c.update()
58
+		if err := c.update(); err != nil {
59
+			return nil, err
60
+		}
55 61
 	}
56
-	return nil
62
+	return c.newBuilder.Finish()
57 63
 }
58 64
 
59 65
 func (c *Client) update() error {
60
-	err := c.downloadTimestamp()
61
-	if err != nil {
66
+	if err := c.downloadTimestamp(); err != nil {
62 67
 		logrus.Debugf("Client Update (Timestamp): %s", err.Error())
63 68
 		return err
64 69
 	}
65
-	err = c.downloadSnapshot()
66
-	if err != nil {
70
+	if err := c.downloadSnapshot(); err != nil {
67 71
 		logrus.Debugf("Client Update (Snapshot): %s", err.Error())
68 72
 		return err
69 73
 	}
70
-	err = c.checkRoot()
71
-	if err != nil {
72
-		// In this instance the root has not expired base on time, but is
73
-		// expired based on the snapshot dictating a new root has been produced.
74
-		logrus.Debug(err)
75
-		return err
76
-	}
77 74
 	// will always need top level targets at a minimum
78
-	err = c.downloadTargets(data.CanonicalTargetsRole)
79
-	if err != nil {
75
+	if err := c.downloadTargets(); err != nil {
80 76
 		logrus.Debugf("Client Update (Targets): %s", err.Error())
81 77
 		return err
82 78
 	}
83 79
 	return nil
84 80
 }
85 81
 
86
-// checkRoot determines if the hash, and size are still those reported
87
-// in the snapshot file. It will also check the expiry, however, if the
88
-// hash and size in snapshot are unchanged but the root file has expired,
89
-// there is little expectation that the situation can be remedied.
90
-func (c Client) checkRoot() error {
91
-	role := data.CanonicalRootRole
92
-	size := c.local.Snapshot.Signed.Meta[role].Length
93
-
94
-	expectedHashes := c.local.Snapshot.Signed.Meta[role].Hashes
95
-
96
-	raw, err := c.cache.GetMeta(data.CanonicalRootRole, size)
97
-	if err != nil {
98
-		return err
99
-	}
100
-
101
-	if err := data.CheckHashes(raw, expectedHashes); err != nil {
102
-		return fmt.Errorf("Cached root hashes did not match snapshot root hashes")
103
-	}
104
-
105
-	if int64(len(raw)) != size {
106
-		return fmt.Errorf("Cached root size did not match snapshot size")
107
-	}
108
-
109
-	root := &data.SignedRoot{}
110
-	err = json.Unmarshal(raw, root)
111
-	if err != nil {
112
-		return ErrCorruptedCache{file: "root.json"}
113
-	}
114
-
115
-	if signed.IsExpired(root.Signed.Expires) {
116
-		return tuf.ErrLocalRootExpired{}
117
-	}
118
-	return nil
119
-}
120
-
121 82
 // downloadRoot is responsible for downloading the root.json
122 83
 func (c *Client) downloadRoot() error {
123
-	logrus.Debug("Downloading Root...")
124 84
 	role := data.CanonicalRootRole
85
+	consistentInfo := c.newBuilder.GetConsistentInfo(role)
86
+
125 87
 	// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
126 88
 	// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
127
-	var size int64 = -1
128
-
129
-	// We could not expect what the "snapshot" meta has specified.
130
-	//
131
-	// In some old clients, there is only the "sha256",
132
-	// but both "sha256" and "sha512" in the newer ones.
133
-	//
134
-	// And possibly more in the future.
135
-	var expectedHashes data.Hashes
136
-
137
-	if c.local.Snapshot != nil {
138
-		if prevRootMeta, ok := c.local.Snapshot.Signed.Meta[role]; ok {
139
-			size = prevRootMeta.Length
140
-			expectedHashes = prevRootMeta.Hashes
141
-		}
142
-	}
143
-
144
-	// if we're bootstrapping we may not have a cached root, an
145
-	// error will result in the "previous root version" being
146
-	// interpreted as 0.
147
-	var download bool
148
-	var err error
149
-	var cachedRoot []byte
150
-	old := &data.Signed{}
151
-	version := 0
152
-
153
-	// Due to the same reason, we don't really know how many hashes are there.
154
-	if len(expectedHashes) != 0 {
155
-		// can only trust cache if we have an expected sha256(for example) to trust
156
-		cachedRoot, err = c.cache.GetMeta(role, size)
157
-	}
158
-
159
-	if cachedRoot == nil || err != nil {
160
-		logrus.Debug("didn't find a cached root, must download")
161
-		download = true
162
-	} else {
163
-		if err := data.CheckHashes(cachedRoot, expectedHashes); err != nil {
164
-			logrus.Debug("cached root's hash didn't match expected, must download")
165
-			download = true
166
-		}
167
-
168
-		err := json.Unmarshal(cachedRoot, old)
169
-		if err == nil {
170
-			root, err := data.RootFromSigned(old)
171
-			if err == nil {
172
-				version = root.Signed.Version
173
-			} else {
174
-				logrus.Debug("couldn't parse Signed part of cached root, must download")
175
-				download = true
176
-			}
177
-		} else {
178
-			logrus.Debug("couldn't parse cached root, must download")
179
-			download = true
180
-		}
181
-	}
182
-	var s *data.Signed
183
-	var raw []byte
184
-	if download {
185
-		// use consistent download if we have the checksum.
186
-		raw, s, err = c.downloadSigned(role, size, expectedHashes)
187
-		if err != nil {
188
-			return err
189
-		}
190
-	} else {
191
-		logrus.Debug("using cached root")
192
-		s = old
193
-	}
194
-	if err := c.verifyRoot(role, s, version); err != nil {
195
-		return err
196
-	}
197
-	if download {
198
-		logrus.Debug("caching downloaded root")
199
-		// Now that we have accepted new root, write it to cache
200
-		if err = c.cache.SetMeta(role, raw); err != nil {
201
-			logrus.Errorf("Failed to write root to local cache: %s", err.Error())
202
-		}
203
-	}
204
-	return nil
205
-}
89
+	if !consistentInfo.ChecksumKnown() {
90
+		logrus.Debugf("Loading root with no expected checksum")
206 91
 
207
-func (c Client) verifyRoot(role string, s *data.Signed, minVersion int) error {
208
-	// this will confirm that the root has been signed by the old root role
209
-	// with the root keys we bootstrapped with.
210
-	// Still need to determine if there has been a root key update and
211
-	// confirm signature with new root key
212
-	logrus.Debug("verifying root with existing keys")
213
-	rootRole, err := c.local.GetBaseRole(role)
214
-	if err != nil {
215
-		logrus.Debug("no previous root role loaded")
216
-		return err
217
-	}
218
-	// Verify using the rootRole loaded from the known root.json
219
-	if err = signed.Verify(s, rootRole, minVersion); err != nil {
220
-		logrus.Debug("root did not verify with existing keys")
221
-		return err
92
+		// get the cached root, if it exists, just for version checking
93
+		cachedRoot, _ := c.cache.GetMeta(role, -1)
94
+		// prefer to download a new root
95
+		_, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot)
96
+		return remoteErr
222 97
 	}
223 98
 
224
-	logrus.Debug("updating known root roles and keys")
225
-	root, err := data.RootFromSigned(s)
226
-	if err != nil {
227
-		logrus.Error(err.Error())
228
-		return err
229
-	}
230
-	// replace the existing root.json with the new one (just in memory, we
231
-	// have another validation step before we fully accept the new root)
232
-	err = c.local.SetRoot(root)
233
-	if err != nil {
234
-		logrus.Error(err.Error())
235
-		return err
236
-	}
237
-	// Verify the new root again having loaded the rootRole out of this new
238
-	// file (verifies self-referential integrity)
239
-	// TODO(endophage): be more intelligent and only re-verify if we detect
240
-	//                  there has been a change in root keys
241
-	logrus.Debug("verifying root with updated keys")
242
-	rootRole, err = c.local.GetBaseRole(role)
243
-	if err != nil {
244
-		logrus.Debug("root role with new keys not loaded")
245
-		return err
246
-	}
247
-	err = signed.Verify(s, rootRole, minVersion)
248
-	if err != nil {
249
-		logrus.Debug("root did not verify with new keys")
250
-		return err
251
-	}
252
-	logrus.Debug("successfully verified root")
253
-	return nil
99
+	_, err := c.tryLoadCacheThenRemote(consistentInfo)
100
+	return err
254 101
 }
255 102
 
256 103
 // downloadTimestamp is responsible for downloading the timestamp.json
257 104
 // Timestamps are special in that we ALWAYS attempt to download and only
258 105
 // use cache if the download fails (and the cache is still valid).
259 106
 func (c *Client) downloadTimestamp() error {
260
-	logrus.Debug("Downloading Timestamp...")
107
+	logrus.Debug("Loading timestamp...")
261 108
 	role := data.CanonicalTimestampRole
109
+	consistentInfo := c.newBuilder.GetConsistentInfo(role)
262 110
 
263
-	// We may not have a cached timestamp if this is the first time
264
-	// we're interacting with the repo. This will result in the
265
-	// version being 0
266
-	var (
267
-		old     *data.Signed
268
-		ts      *data.SignedTimestamp
269
-		version = 0
270
-	)
271
-	cachedTS, err := c.cache.GetMeta(role, notary.MaxTimestampSize)
272
-	if err == nil {
273
-		cached := &data.Signed{}
274
-		err := json.Unmarshal(cachedTS, cached)
275
-		if err == nil {
276
-			ts, err := data.TimestampFromSigned(cached)
277
-			if err == nil {
278
-				version = ts.Signed.Version
279
-			}
280
-			old = cached
281
-		}
282
-	}
283
-	// unlike root, targets and snapshot, always try and download timestamps
284
-	// from remote, only using the cache one if we couldn't reach remote.
285
-	raw, s, err := c.downloadSigned(role, notary.MaxTimestampSize, nil)
286
-	if err == nil {
287
-		ts, err = c.verifyTimestamp(s, version)
111
+	// get the cached timestamp, if it exists
112
+	cachedTS, cachedErr := c.cache.GetMeta(role, notary.MaxTimestampSize)
113
+	// always get the remote timestamp, since it supercedes the local one
114
+	_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
115
+
116
+	switch {
117
+	case remoteErr == nil:
118
+		return nil
119
+	case cachedErr == nil:
120
+		logrus.Debug(remoteErr.Error())
121
+		logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
122
+
123
+		err := c.newBuilder.Load(role, cachedTS, 1, false)
288 124
 		if err == nil {
289
-			logrus.Debug("successfully verified downloaded timestamp")
290
-			c.cache.SetMeta(role, raw)
291
-			c.local.SetTimestamp(ts)
292
-			return nil
125
+			logrus.Debug("successfully verified cached timestamp")
293 126
 		}
294
-	}
295
-	if old == nil {
296
-		// couldn't retrieve valid data from server and don't have unmarshallable data in cache.
297
-		logrus.Debug("no cached timestamp available")
298 127
 		return err
128
+	default:
129
+		logrus.Debug("no cached or remote timestamp available")
130
+		return remoteErr
299 131
 	}
300
-	logrus.Debug(err.Error())
301
-	logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
302
-	ts, err = c.verifyTimestamp(old, version)
303
-	if err != nil {
304
-		return err
305
-	}
306
-	logrus.Debug("successfully verified cached timestamp")
307
-	c.local.SetTimestamp(ts)
308
-	return nil
309
-}
310
-
311
-// verifies that a timestamp is valid, and returned the SignedTimestamp object to add to the tuf repo
312
-func (c *Client) verifyTimestamp(s *data.Signed, minVersion int) (*data.SignedTimestamp, error) {
313
-	timestampRole, err := c.local.GetBaseRole(data.CanonicalTimestampRole)
314
-	if err != nil {
315
-		logrus.Debug("no timestamp role loaded")
316
-		return nil, err
317
-	}
318
-	if err := signed.Verify(s, timestampRole, minVersion); err != nil {
319
-		return nil, err
320
-	}
321
-	return data.TimestampFromSigned(s)
322 132
 }
323 133
 
324 134
 // downloadSnapshot is responsible for downloading the snapshot.json
325 135
 func (c *Client) downloadSnapshot() error {
326
-	logrus.Debug("Downloading Snapshot...")
136
+	logrus.Debug("Loading snapshot...")
327 137
 	role := data.CanonicalSnapshotRole
328
-	if c.local.Timestamp == nil {
329
-		return tuf.ErrNotLoaded{Role: data.CanonicalTimestampRole}
330
-	}
331
-	size := c.local.Timestamp.Signed.Meta[role].Length
332
-	expectedHashes := c.local.Timestamp.Signed.Meta[role].Hashes
333
-	if len(expectedHashes) == 0 {
334
-		return data.ErrMissingMeta{Role: data.CanonicalSnapshotRole}
335
-	}
336
-
337
-	var download bool
338
-	old := &data.Signed{}
339
-	version := 0
340
-	raw, err := c.cache.GetMeta(role, size)
341
-	if raw == nil || err != nil {
342
-		logrus.Debug("no snapshot in cache, must download")
343
-		download = true
344
-	} else {
345
-		// file may have been tampered with on disk. Always check the hash!
346
-		if err := data.CheckHashes(raw, expectedHashes); err != nil {
347
-			logrus.Debug("hash of snapshot in cache did not match expected hash, must download")
348
-			download = true
349
-		}
350
-
351
-		err := json.Unmarshal(raw, old)
352
-		if err == nil {
353
-			snap, err := data.SnapshotFromSigned(old)
354
-			if err == nil {
355
-				version = snap.Signed.Version
356
-			} else {
357
-				logrus.Debug("Could not parse Signed part of snapshot, must download")
358
-				download = true
359
-			}
360
-		} else {
361
-			logrus.Debug("Could not parse snapshot, must download")
362
-			download = true
363
-		}
364
-	}
365
-	var s *data.Signed
366
-	if download {
367
-		raw, s, err = c.downloadSigned(role, size, expectedHashes)
368
-		if err != nil {
369
-			return err
370
-		}
371
-	} else {
372
-		logrus.Debug("using cached snapshot")
373
-		s = old
374
-	}
138
+	consistentInfo := c.newBuilder.GetConsistentInfo(role)
375 139
 
376
-	snapshotRole, err := c.local.GetBaseRole(role)
377
-	if err != nil {
378
-		logrus.Debug("no snapshot role loaded")
379
-		return err
380
-	}
381
-	err = signed.Verify(s, snapshotRole, version)
382
-	if err != nil {
383
-		return err
384
-	}
385
-	logrus.Debug("successfully verified snapshot")
386
-	snap, err := data.SnapshotFromSigned(s)
387
-	if err != nil {
388
-		return err
389
-	}
390
-	c.local.SetSnapshot(snap)
391
-	if download {
392
-		err = c.cache.SetMeta(role, raw)
393
-		if err != nil {
394
-			logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error())
395
-		}
396
-	}
397
-	return nil
140
+	_, err := c.tryLoadCacheThenRemote(consistentInfo)
141
+	return err
398 142
 }
399 143
 
400 144
 // downloadTargets downloads all targets and delegated targets for the repository.
401 145
 // It uses a pre-order tree traversal as it's necessary to download parents first
402 146
 // to obtain the keys to validate children.
403
-func (c *Client) downloadTargets(role string) error {
404
-	logrus.Debug("Downloading Targets...")
405
-	stack := utils.NewStack()
406
-	stack.Push(role)
407
-	for !stack.Empty() {
408
-		role, err := stack.PopString()
147
+func (c *Client) downloadTargets() error {
148
+	toDownload := []data.DelegationRole{{
149
+		BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
150
+		Paths:    []string{""},
151
+	}}
152
+	for len(toDownload) > 0 {
153
+		role := toDownload[0]
154
+		toDownload = toDownload[1:]
155
+
156
+		consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
157
+		if !consistentInfo.ChecksumKnown() {
158
+			logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
159
+			continue
160
+		}
161
+
162
+		children, err := c.getTargetsFile(role, consistentInfo)
409 163
 		if err != nil {
410
-			return err
411
-		}
412
-		if c.local.Snapshot == nil {
413
-			return tuf.ErrNotLoaded{Role: data.CanonicalSnapshotRole}
414
-		}
415
-		snap := c.local.Snapshot.Signed
416
-		root := c.local.Root.Signed
417
-
418
-		s, err := c.getTargetsFile(role, snap.Meta, root.ConsistentSnapshot)
419
-		if err != nil {
420
-			if _, ok := err.(data.ErrMissingMeta); ok && role != data.CanonicalTargetsRole {
164
+			if _, ok := err.(data.ErrMissingMeta); ok && role.Name != data.CanonicalTargetsRole {
421 165
 				// if the role meta hasn't been published,
422 166
 				// that's ok, continue
423 167
 				continue
424 168
 			}
425
-			logrus.Error("Error getting targets file:", err)
426
-			return err
427
-		}
428
-		t, err := data.TargetsFromSigned(s, role)
429
-		if err != nil {
169
+			logrus.Debugf("Error getting %s: %s", role.Name, err)
430 170
 			return err
431 171
 		}
432
-		err = c.local.SetTargets(role, t)
433
-		if err != nil {
434
-			return err
435
-		}
436
-
437
-		// push delegated roles contained in the targets file onto the stack
438
-		for _, r := range t.Signed.Delegations.Roles {
439
-			if path.Dir(r.Name) == role {
440
-				// only load children that are direct 1st generation descendants
441
-				// of the role we've just downloaded
442
-				stack.Push(r.Name)
443
-			}
444
-		}
172
+		toDownload = append(children, toDownload...)
445 173
 	}
446 174
 	return nil
447 175
 }
448 176
 
449
-func (c *Client) downloadSigned(role string, size int64, expectedHashes data.Hashes) ([]byte, *data.Signed, error) {
450
-	rolePath := utils.ConsistentName(role, expectedHashes["sha256"])
451
-	raw, err := c.remote.GetMeta(rolePath, size)
177
+func (c Client) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
178
+	logrus.Debugf("Loading %s...", role.Name)
179
+	tgs := &data.SignedTargets{}
180
+
181
+	raw, err := c.tryLoadCacheThenRemote(ci)
452 182
 	if err != nil {
453
-		return nil, nil, err
183
+		return nil, err
454 184
 	}
455 185
 
456
-	if expectedHashes != nil {
457
-		if err := data.CheckHashes(raw, expectedHashes); err != nil {
458
-			return nil, nil, ErrChecksumMismatch{role: role}
459
-		}
460
-	}
186
+	// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
187
+	// the raw has already been loaded into the builder
188
+	json.Unmarshal(raw, tgs)
189
+	return tgs.GetValidDelegations(role), nil
190
+}
461 191
 
462
-	s := &data.Signed{}
463
-	err = json.Unmarshal(raw, s)
192
+func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
193
+	cachedTS, err := c.cache.GetMeta(consistentInfo.RoleName, consistentInfo.Length())
464 194
 	if err != nil {
465
-		return nil, nil, err
195
+		logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
196
+		return c.tryLoadRemote(consistentInfo, nil)
466 197
 	}
467
-	return raw, s, nil
468
-}
469 198
 
470
-func (c Client) getTargetsFile(role string, snapshotMeta data.Files, consistent bool) (*data.Signed, error) {
471
-	// require role exists in snapshots
472
-	roleMeta, ok := snapshotMeta[role]
473
-	if !ok {
474
-		return nil, data.ErrMissingMeta{Role: role}
475
-	}
476
-	expectedHashes := snapshotMeta[role].Hashes
477
-	if len(expectedHashes) == 0 {
478
-		return nil, data.ErrMissingMeta{Role: role}
199
+	if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
200
+		logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
201
+		return cachedTS, nil
479 202
 	}
480 203
 
481
-	// try to get meta file from content addressed cache
482
-	var download bool
483
-	old := &data.Signed{}
484
-	version := 0
485
-	raw, err := c.cache.GetMeta(role, roleMeta.Length)
486
-	if err != nil || raw == nil {
487
-		logrus.Debugf("Couldn't not find cached %s, must download", role)
488
-		download = true
489
-	} else {
490
-		// file may have been tampered with on disk. Always check the hash!
491
-		if err := data.CheckHashes(raw, expectedHashes); err != nil {
492
-			download = true
493
-		}
204
+	logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
205
+	return c.tryLoadRemote(consistentInfo, cachedTS)
206
+}
494 207
 
495
-		err := json.Unmarshal(raw, old)
496
-		if err == nil {
497
-			targ, err := data.TargetsFromSigned(old, role)
498
-			if err == nil {
499
-				version = targ.Signed.Version
500
-			} else {
501
-				download = true
502
-			}
503
-		} else {
504
-			download = true
505
-		}
208
+func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
209
+	consistentName := consistentInfo.ConsistentName()
210
+	raw, err := c.remote.GetMeta(consistentName, consistentInfo.Length())
211
+	if err != nil {
212
+		logrus.Debugf("error downloading %s: %s", consistentName, err)
213
+		return old, err
506 214
 	}
507 215
 
508
-	size := snapshotMeta[role].Length
509
-	var s *data.Signed
510
-	if download {
511
-		raw, s, err = c.downloadSigned(role, size, expectedHashes)
512
-		if err != nil {
513
-			return nil, err
514
-		}
515
-	} else {
516
-		logrus.Debug("using cached ", role)
517
-		s = old
518
-	}
519
-	var targetOrDelgRole data.BaseRole
520
-	if data.IsDelegation(role) {
521
-		delgRole, err := c.local.GetDelegationRole(role)
522
-		if err != nil {
523
-			logrus.Debugf("no %s delegation role loaded", role)
524
-			return nil, err
525
-		}
526
-		targetOrDelgRole = delgRole.BaseRole
527
-	} else {
528
-		targetOrDelgRole, err = c.local.GetBaseRole(role)
529
-		if err != nil {
530
-			logrus.Debugf("no %s role loaded", role)
531
-			return nil, err
532
-		}
533
-	}
534
-	if err = signed.Verify(s, targetOrDelgRole, version); err != nil {
535
-		return nil, err
216
+	// try to load the old data into the old builder - only use it to validate
217
+	// versions if it loads successfully.  If it errors, then the loaded version
218
+	// will be 1
219
+	c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
220
+	minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
221
+
222
+	if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
223
+		logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
224
+		return raw, err
536 225
 	}
537
-	logrus.Debugf("successfully verified %s", role)
538
-	if download {
539
-		// if we error when setting meta, we should continue.
540
-		err = c.cache.SetMeta(role, raw)
541
-		if err != nil {
542
-			logrus.Errorf("Failed to write %s to local cache: %s", role, err.Error())
543
-		}
226
+	logrus.Debugf("successfully verified downloaded %s", consistentName)
227
+	if err := c.cache.SetMeta(consistentInfo.RoleName, raw); err != nil {
228
+		logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
544 229
 	}
545
-	return s, nil
230
+	return raw, nil
546 231
 }
... ...
@@ -4,15 +4,6 @@ import (
4 4
 	"fmt"
5 5
 )
6 6
 
7
-// ErrChecksumMismatch - a checksum failed verification
8
-type ErrChecksumMismatch struct {
9
-	role string
10
-}
11
-
12
-func (e ErrChecksumMismatch) Error() string {
13
-	return fmt.Sprintf("tuf: checksum for %s did not match", e.role)
14
-}
15
-
16 7
 // ErrCorruptedCache - local data is incorrect
17 8
 type ErrCorruptedCache struct {
18 9
 	file string
... ...
@@ -12,13 +12,14 @@ func (e ErrInvalidMetadata) Error() string {
12 12
 	return fmt.Sprintf("%s type metadata invalid: %s", e.role, e.msg)
13 13
 }
14 14
 
15
-// ErrMissingMeta - couldn't find the FileMeta object for a role or target
15
+// ErrMissingMeta - couldn't find the FileMeta object for the given Role, or
16
+// the FileMeta object contained no supported checksums
16 17
 type ErrMissingMeta struct {
17 18
 	Role string
18 19
 }
19 20
 
20 21
 func (e ErrMissingMeta) Error() string {
21
-	return fmt.Sprintf("tuf: sha256 checksum required for %s", e.Role)
22
+	return fmt.Sprintf("no checksums for supported algorithms were provided for %s", e.Role)
22 23
 }
23 24
 
24 25
 // ErrInvalidChecksum is the error to be returned when checksum is invalid
... ...
@@ -32,9 +33,12 @@ func (e ErrInvalidChecksum) Error() string {
32 32
 
33 33
 // ErrMismatchedChecksum is the error to be returned when checksum is mismatched
34 34
 type ErrMismatchedChecksum struct {
35
-	alg string
35
+	alg      string
36
+	name     string
37
+	expected string
36 38
 }
37 39
 
38 40
 func (e ErrMismatchedChecksum) Error() string {
39
-	return fmt.Sprintf("%s checksum mismatched", e.alg)
41
+	return fmt.Sprintf("%s checksum for %s did not match: expected %s", e.alg, e.name,
42
+		e.expected)
40 43
 }
... ...
@@ -31,9 +31,9 @@ func isValidRootStructure(r Root) error {
31 31
 			role: CanonicalRootRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, r.Type)}
32 32
 	}
33 33
 
34
-	if r.Version < 0 {
34
+	if r.Version < 1 {
35 35
 		return ErrInvalidMetadata{
36
-			role: CanonicalRootRole, msg: "version cannot be negative"}
36
+			role: CanonicalRootRole, msg: "version cannot be less than 1"}
37 37
 	}
38 38
 
39 39
 	// all the base roles MUST appear in the root.json - other roles are allowed,
... ...
@@ -22,19 +22,19 @@ type Snapshot struct {
22 22
 	Meta Files `json:"meta"`
23 23
 }
24 24
 
25
-// isValidSnapshotStructure returns an error, or nil, depending on whether the content of the
25
+// IsValidSnapshotStructure returns an error, or nil, depending on whether the content of the
26 26
 // struct is valid for snapshot metadata.  This does not check signatures or expiry, just that
27 27
 // the metadata content is valid.
28
-func isValidSnapshotStructure(s Snapshot) error {
28
+func IsValidSnapshotStructure(s Snapshot) error {
29 29
 	expectedType := TUFTypes[CanonicalSnapshotRole]
30 30
 	if s.Type != expectedType {
31 31
 		return ErrInvalidMetadata{
32 32
 			role: CanonicalSnapshotRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, s.Type)}
33 33
 	}
34 34
 
35
-	if s.Version < 0 {
35
+	if s.Version < 1 {
36 36
 		return ErrInvalidMetadata{
37
-			role: CanonicalSnapshotRole, msg: "version cannot be negative"}
37
+			role: CanonicalSnapshotRole, msg: "version cannot be less than one"}
38 38
 	}
39 39
 
40 40
 	for _, role := range []string{CanonicalRootRole, CanonicalTargetsRole} {
... ...
@@ -126,7 +126,9 @@ func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) {
126 126
 // not found
127 127
 func (sp *SignedSnapshot) GetMeta(role string) (*FileMeta, error) {
128 128
 	if meta, ok := sp.Signed.Meta[role]; ok {
129
-		return &meta, nil
129
+		if _, ok := meta.Hashes["sha256"]; ok {
130
+			return &meta, nil
131
+		}
130 132
 	}
131 133
 	return nil, ErrMissingMeta{Role: role}
132 134
 }
... ...
@@ -155,7 +157,7 @@ func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
155 155
 	if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
156 156
 		return nil, err
157 157
 	}
158
-	if err := isValidSnapshotStructure(sp); err != nil {
158
+	if err := IsValidSnapshotStructure(sp); err != nil {
159 159
 		return nil, err
160 160
 	}
161 161
 	sigs := make([]Signature, len(s.Signatures))
... ...
@@ -38,8 +38,8 @@ func isValidTargetsStructure(t Targets, roleName string) error {
38 38
 			role: roleName, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
39 39
 	}
40 40
 
41
-	if t.Version < 0 {
42
-		return ErrInvalidMetadata{role: roleName, msg: "version cannot be negative"}
41
+	if t.Version < 1 {
42
+		return ErrInvalidMetadata{role: roleName, msg: "version cannot be less than one"}
43 43
 	}
44 44
 
45 45
 	for _, roleObj := range t.Delegations.Roles {
... ...
@@ -21,19 +21,19 @@ type Timestamp struct {
21 21
 	Meta Files `json:"meta"`
22 22
 }
23 23
 
24
-// isValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
24
+// IsValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
25 25
 // is valid for timestamp metadata.  This does not check signatures or expiry, just that
26 26
 // the metadata content is valid.
27
-func isValidTimestampStructure(t Timestamp) error {
27
+func IsValidTimestampStructure(t Timestamp) error {
28 28
 	expectedType := TUFTypes[CanonicalTimestampRole]
29 29
 	if t.Type != expectedType {
30 30
 		return ErrInvalidMetadata{
31 31
 			role: CanonicalTimestampRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
32 32
 	}
33 33
 
34
-	if t.Version < 0 {
34
+	if t.Version < 1 {
35 35
 		return ErrInvalidMetadata{
36
-			role: CanonicalTimestampRole, msg: "version cannot be negative"}
36
+			role: CanonicalTimestampRole, msg: "version cannot be less than one"}
37 37
 	}
38 38
 
39 39
 	// Meta is a map of FileMeta, so if the role isn't in the map it returns
... ...
@@ -124,7 +124,7 @@ func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
124 124
 	if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
125 125
 		return nil, err
126 126
 	}
127
-	if err := isValidTimestampStructure(ts); err != nil {
127
+	if err := IsValidTimestampStructure(ts); err != nil {
128 128
 		return nil, err
129 129
 	}
130 130
 	sigs := make([]Signature, len(s.Signatures))
... ...
@@ -4,6 +4,7 @@ import (
4 4
 	"crypto/sha256"
5 5
 	"crypto/sha512"
6 6
 	"crypto/subtle"
7
+	"encoding/hex"
7 8
 	"fmt"
8 9
 	"hash"
9 10
 	"io"
... ...
@@ -132,7 +133,7 @@ type FileMeta struct {
132 132
 }
133 133
 
134 134
 // CheckHashes verifies all the checksums specified by the "hashes" of the payload.
135
-func CheckHashes(payload []byte, hashes Hashes) error {
135
+func CheckHashes(payload []byte, name string, hashes Hashes) error {
136 136
 	cnt := 0
137 137
 
138 138
 	// k, v indicate the hash algorithm and the corresponding value
... ...
@@ -141,20 +142,20 @@ func CheckHashes(payload []byte, hashes Hashes) error {
141 141
 		case notary.SHA256:
142 142
 			checksum := sha256.Sum256(payload)
143 143
 			if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
144
-				return ErrMismatchedChecksum{alg: notary.SHA256}
144
+				return ErrMismatchedChecksum{alg: notary.SHA256, name: name, expected: hex.EncodeToString(v)}
145 145
 			}
146 146
 			cnt++
147 147
 		case notary.SHA512:
148 148
 			checksum := sha512.Sum512(payload)
149 149
 			if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
150
-				return ErrMismatchedChecksum{alg: notary.SHA512}
150
+				return ErrMismatchedChecksum{alg: notary.SHA512, name: name, expected: hex.EncodeToString(v)}
151 151
 			}
152 152
 			cnt++
153 153
 		}
154 154
 	}
155 155
 
156 156
 	if cnt == 0 {
157
-		return fmt.Errorf("at least one supported hash needed")
157
+		return ErrMissingMeta{Role: name}
158 158
 	}
159 159
 
160 160
 	return nil
... ...
@@ -44,10 +44,15 @@ func (e ErrLowVersion) Error() string {
44 44
 }
45 45
 
46 46
 // ErrRoleThreshold indicates we did not validate enough signatures to meet the threshold
47
-type ErrRoleThreshold struct{}
47
+type ErrRoleThreshold struct {
48
+	Msg string
49
+}
48 50
 
49 51
 func (e ErrRoleThreshold) Error() string {
50
-	return "valid signatures did not meet threshold"
52
+	if e.Msg == "" {
53
+		return "valid signatures did not meet threshold"
54
+	}
55
+	return e.Msg
51 56
 }
52 57
 
53 58
 // ErrInvalidKeyType indicates the types for the key and signature it's associated with are
... ...
@@ -18,17 +18,20 @@ type KeyService interface {
18 18
 	// GetKey retrieves the public key if present, otherwise it returns nil
19 19
 	GetKey(keyID string) data.PublicKey
20 20
 
21
-	// GetPrivateKey retrieves the private key and role if present, otherwise
22
-	// it returns nil
21
+	// GetPrivateKey retrieves the private key and role if present and retrievable,
22
+	// otherwise it returns nil and an error
23 23
 	GetPrivateKey(keyID string) (data.PrivateKey, string, error)
24 24
 
25
-	// RemoveKey deletes the specified key
25
+	// RemoveKey deletes the specified key, and returns an error only if the key
26
+	// removal fails. If the key doesn't exist, no error should be returned.
26 27
 	RemoveKey(keyID string) error
27 28
 
28
-	// ListKeys returns a list of key IDs for the role
29
+	// ListKeys returns a list of key IDs for the role, or an empty list or
30
+	// nil if there are no keys.
29 31
 	ListKeys(role string) []string
30 32
 
31
-	// ListAllKeys returns a map of all available signing key IDs to role
33
+	// ListAllKeys returns a map of all available signing key IDs to role, or
34
+	// an empty map or nil if there are no keys.
32 35
 	ListAllKeys() map[string]string
33 36
 }
34 37
 
... ...
@@ -21,37 +21,26 @@ var (
21 21
 	ErrWrongType    = errors.New("tuf: meta file has wrong type")
22 22
 )
23 23
 
24
-// Verify checks the signatures and metadata (expiry, version) for the signed role
25
-// data
26
-func Verify(s *data.Signed, role data.BaseRole, minVersion int) error {
27
-	if err := verifyMeta(s, role.Name, minVersion); err != nil {
28
-		return err
29
-	}
30
-	return VerifySignatures(s, role)
24
+// IsExpired checks if the given time passed before the present time
25
+func IsExpired(t time.Time) bool {
26
+	return t.Before(time.Now())
31 27
 }
32 28
 
33
-func verifyMeta(s *data.Signed, role string, minVersion int) error {
34
-	sm := &data.SignedCommon{}
35
-	if err := json.Unmarshal(*s.Signed, sm); err != nil {
36
-		return err
37
-	}
38
-	if !data.ValidTUFType(sm.Type, role) {
39
-		return ErrWrongType
40
-	}
41
-	if IsExpired(sm.Expires) {
29
+// VerifyExpiry returns ErrExpired if the metadata is expired
30
+func VerifyExpiry(s *data.SignedCommon, role string) error {
31
+	if IsExpired(s.Expires) {
42 32
 		logrus.Errorf("Metadata for %s expired", role)
43
-		return ErrExpired{Role: role, Expired: sm.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
33
+		return ErrExpired{Role: role, Expired: s.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
44 34
 	}
45
-	if sm.Version < minVersion {
46
-		return ErrLowVersion{sm.Version, minVersion}
47
-	}
48
-
49 35
 	return nil
50 36
 }
51 37
 
52
-// IsExpired checks if the given time passed before the present time
53
-func IsExpired(t time.Time) bool {
54
-	return t.Before(time.Now())
38
+// VerifyVersion returns ErrLowVersion if the metadata version is lower than the min version
39
+func VerifyVersion(s *data.SignedCommon, minVersion int) error {
40
+	if s.Version < minVersion {
41
+		return ErrLowVersion{Actual: s.Version, Current: minVersion}
42
+	}
43
+	return nil
55 44
 }
56 45
 
57 46
 // VerifySignatures checks the we have sufficient valid signatures for the given role
... ...
@@ -39,7 +39,7 @@ func (f *FilesystemStore) getPath(name string) string {
39 39
 }
40 40
 
41 41
 // GetMeta returns the meta for the given name (a role) up to size bytes
42
-// If size is -1, this corresponds to "infinite," but we cut off at the
42
+// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
43 43
 // predefined threshold "notary.MaxDownloadSize".
44 44
 func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
45 45
 	meta, err := ioutil.ReadFile(f.getPath(name))
... ...
@@ -49,7 +49,7 @@ func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
49 49
 		}
50 50
 		return nil, err
51 51
 	}
52
-	if size == -1 {
52
+	if size == NoSizeLimit {
53 53
 		size = notary.MaxDownloadSize
54 54
 	}
55 55
 	// Only return up to size bytes
... ...
@@ -139,7 +139,8 @@ func translateStatusToError(resp *http.Response, resource string) error {
139 139
 // GetMeta downloads the named meta file with the given size. A short body
140 140
 // is acceptable because in the case of timestamp.json, the size is a cap,
141 141
 // not an exact length.
142
-// If size is -1, this corresponds to "infinite," but we cut off at 100MB
142
+// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
143
+// predefined threshold "notary.MaxDownloadSize".
143 144
 func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
144 145
 	url, err := s.buildMetaURL(name)
145 146
 	if err != nil {
... ...
@@ -158,7 +159,7 @@ func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
158 158
 		logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
159 159
 		return nil, err
160 160
 	}
161
-	if size == -1 {
161
+	if size == NoSizeLimit {
162 162
 		size = notary.MaxDownloadSize
163 163
 	}
164 164
 	if resp.ContentLength > size {
... ...
@@ -1,5 +1,8 @@
1 1
 package store
2 2
 
3
+// NoSizeLimit is represented as -1 for arguments to GetMeta
4
+const NoSizeLimit int64 = -1
5
+
3 6
 // MetadataStore must be implemented by anything that intends to interact
4 7
 // with a store of TUF files
5 8
 type MetadataStore interface {
... ...
@@ -39,13 +39,14 @@ type MemoryStore struct {
39 39
 }
40 40
 
41 41
 // GetMeta returns up to size bytes of data references by name.
42
-// If size is -1, this corresponds to "infinite," but we cut off at 100MB
43
-// as we will always know the size for everything but a timestamp and
44
-// sometimes a root, neither of which should be exceptionally large
42
+// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
43
+// predefined threshold "notary.MaxDownloadSize", as we will always know the
44
+// size for everything but a timestamp and sometimes a root,
45
+// neither of which should be exceptionally large
45 46
 func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) {
46 47
 	d, ok := m.meta[name]
47 48
 	if ok {
48
-		if size == -1 {
49
+		if size == NoSizeLimit {
49 50
 			size = notary.MaxDownloadSize
50 51
 		}
51 52
 		if int64(len(d)) < size {
... ...
@@ -549,37 +549,6 @@ func (tr *Repo) InitTimestamp() error {
549 549
 	return nil
550 550
 }
551 551
 
552
-// SetRoot sets the Repo.Root field to the SignedRoot object.
553
-func (tr *Repo) SetRoot(s *data.SignedRoot) error {
554
-	tr.Root = s
555
-	var err error
556
-	// originalRootRole is the root role prior to any mutations that might
557
-	// occur on tr.Root.
558
-	tr.originalRootRole, err = tr.Root.BuildBaseRole(data.CanonicalRootRole)
559
-	return err
560
-}
561
-
562
-// SetTimestamp parses the Signed object into a SignedTimestamp object
563
-// and sets the Repo.Timestamp field.
564
-func (tr *Repo) SetTimestamp(s *data.SignedTimestamp) error {
565
-	tr.Timestamp = s
566
-	return nil
567
-}
568
-
569
-// SetSnapshot parses the Signed object into a SignedSnapshots object
570
-// and sets the Repo.Snapshot field.
571
-func (tr *Repo) SetSnapshot(s *data.SignedSnapshot) error {
572
-	tr.Snapshot = s
573
-	return nil
574
-}
575
-
576
-// SetTargets sets the SignedTargets object agaist the role in the
577
-// Repo.Targets map.
578
-func (tr *Repo) SetTargets(role string, s *data.SignedTargets) error {
579
-	tr.Targets[role] = s
580
-	return nil
581
-}
582
-
583 552
 // TargetMeta returns the FileMeta entry for the given path in the
584 553
 // targets file associated with the given role. This may be nil if
585 554
 // the target isn't found in the targets file.
... ...
@@ -876,7 +845,15 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
876 876
 		}
877 877
 	}
878 878
 
879
-	// if the root role has changed and original role had not been saved as a previous role, save it now
879
+	// If the root role (root keys or root threshold) has changed, save the
880
+	// previous role under the role name "root.<n>", such that the "n" is the
881
+	// latest root.json version for which previous root role was valid.
882
+	// Also, guard against re-saving the previous role if the latest
883
+	// saved role is the same (which should not happen).
884
+	// n   = root.json version of the originalRootRole (previous role)
885
+	// n+1 = root.json version of the currRoot (current role)
886
+	// n-m = root.json version of latestSavedRole (not necessarily n-1, because the
887
+	//       last root rotation could have happened several root.json versions ago
880 888
 	if !tr.originalRootRole.Equals(currRoot) && !tr.originalRootRole.Equals(latestSavedRole) {
881 889
 		rolesToSignWith = append(rolesToSignWith, tr.originalRootRole)
882 890
 		latestSavedRole = tr.originalRootRole
... ...
@@ -884,20 +861,11 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
884 884
 		versionName := oldRootVersionName(tempRoot.Signed.Version)
885 885
 		tempRoot.Signed.Roles[versionName] = &data.RootRole{
886 886
 			KeyIDs: latestSavedRole.ListKeyIDs(), Threshold: latestSavedRole.Threshold}
887
-
888 887
 	}
889 888
 
890 889
 	tempRoot.Signed.Expires = expires
891 890
 	tempRoot.Signed.Version++
892
-
893
-	// if the current role doesn't match with the latest saved role, save it
894
-	if !currRoot.Equals(latestSavedRole) {
895
-		rolesToSignWith = append(rolesToSignWith, currRoot)
896
-
897
-		versionName := oldRootVersionName(tempRoot.Signed.Version)
898
-		tempRoot.Signed.Roles[versionName] = &data.RootRole{
899
-			KeyIDs: currRoot.ListKeyIDs(), Threshold: currRoot.Threshold}
900
-	}
891
+	rolesToSignWith = append(rolesToSignWith, currRoot)
901 892
 
902 893
 	signed, err := tempRoot.ToSigned()
903 894
 	if err != nil {
... ...
@@ -914,7 +882,7 @@ func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) {
914 914
 	return signed, nil
915 915
 }
916 916
 
917
-// get all the saved previous roles <= the current root version
917
+// get all the saved previous roles < the current root version
918 918
 func (tr *Repo) getOldRootRoles() versionedRootRoles {
919 919
 	oldRootRoles := make(versionedRootRoles, 0, len(tr.Root.Signed.Roles))
920 920
 
... ...
@@ -930,7 +898,7 @@ func (tr *Repo) getOldRootRoles() versionedRootRoles {
930 930
 			continue
931 931
 		}
932 932
 		version, err := strconv.Atoi(nameTokens[1])
933
-		if err != nil || version > tr.Root.Signed.Version {
933
+		if err != nil || version >= tr.Root.Signed.Version {
934 934
 			continue
935 935
 		}
936 936