Signed-off-by: cyli <cyli@twistedmatrix.com>
| ... | ... |
@@ -176,7 +176,7 @@ RUN set -x \ |
| 176 | 176 |
&& rm -rf "$GOPATH" |
| 177 | 177 |
|
| 178 | 178 |
# Install notary and notary-server |
| 179 |
-ENV NOTARY_VERSION v0.3.0 |
|
| 179 |
+ENV NOTARY_VERSION v0.4.2 |
|
| 180 | 180 |
RUN set -x \ |
| 181 | 181 |
&& export GOPATH="$(mktemp -d)" \ |
| 182 | 182 |
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ |
| ... | ... |
@@ -121,7 +121,7 @@ RUN set -x \ |
| 121 | 121 |
&& rm -rf "$GOPATH" |
| 122 | 122 |
|
| 123 | 123 |
# Install notary and notary-server |
| 124 |
-ENV NOTARY_VERSION v0.3.0 |
|
| 124 |
+ENV NOTARY_VERSION v0.4.2 |
|
| 125 | 125 |
RUN set -x \ |
| 126 | 126 |
&& export GOPATH="$(mktemp -d)" \ |
| 127 | 127 |
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ |
| ... | ... |
@@ -120,7 +120,7 @@ RUN set -x \ |
| 120 | 120 |
&& rm -rf "$GOPATH" |
| 121 | 121 |
|
| 122 | 122 |
# Install notary and notary-server |
| 123 |
-ENV NOTARY_VERSION v0.3.0 |
|
| 123 |
+ENV NOTARY_VERSION v0.4.2 |
|
| 124 | 124 |
RUN set -x \ |
| 125 | 125 |
&& export GOPATH="$(mktemp -d)" \ |
| 126 | 126 |
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ |
| ... | ... |
@@ -139,7 +139,7 @@ RUN set -x \ |
| 139 | 139 |
&& rm -rf "$GOPATH" |
| 140 | 140 |
|
| 141 | 141 |
# Install notary and notary-server |
| 142 |
-ENV NOTARY_VERSION v0.3.0 |
|
| 142 |
+ENV NOTARY_VERSION v0.4.2 |
|
| 143 | 143 |
RUN set -x \ |
| 144 | 144 |
&& export GOPATH="$(mktemp -d)" \ |
| 145 | 145 |
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ |
| ... | ... |
@@ -131,7 +131,7 @@ RUN set -x \ |
| 131 | 131 |
&& rm -rf "$GOPATH" |
| 132 | 132 |
|
| 133 | 133 |
# Install notary and notary-server |
| 134 |
-ENV NOTARY_VERSION v0.3.0 |
|
| 134 |
+ENV NOTARY_VERSION v0.4.2 |
|
| 135 | 135 |
RUN set -x \ |
| 136 | 136 |
&& export GOPATH="$(mktemp -d)" \ |
| 137 | 137 |
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ |
| ... | ... |
@@ -99,7 +99,7 @@ clone git github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa |
| 99 | 99 |
clone git github.com/pborman/uuid v1.0 |
| 100 | 100 |
|
| 101 | 101 |
# get desired notary commit, might also need to be updated in Dockerfile |
| 102 |
-clone git github.com/docker/notary v0.3.0 |
|
| 102 |
+clone git github.com/docker/notary v0.4.2 |
|
| 103 | 103 |
|
| 104 | 104 |
clone git google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go.git |
| 105 | 105 |
clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f |
| ... | ... |
@@ -1,5 +1,66 @@ |
| 1 | 1 |
# Changelog |
| 2 | 2 |
|
| 3 |
+## [v0.4.2](https://github.com/docker/notary/releases/tag/v0.4.2) 9/30/2016 |
|
| 4 |
++ Bump the cross compiler to golang 1.7.1, since [1.6.3 builds binaries that could have non-deterministic bugs in OS X Sierra](https://groups.google.com/forum/#!msg/golang-dev/Jho5sBHZgAg/cq6d97S1AwAJ) [#984](https://github.com/docker/notary/pull/984) |
|
| 5 |
+ |
|
| 6 |
+## [v0.4.1](https://github.com/docker/notary/releases/tag/v0.4.1) 9/27/2016 |
|
| 7 |
++ Preliminary Windows support for notary client [#970](https://github.com/docker/notary/pull/970) |
|
| 8 |
++ Output message to CLI when repo changes have been successfully published [#974](https://github.com/docker/notary/pull/974) |
|
| 9 |
++ Improved error messages for client authentication errors and for the witness command [#972](https://github.com/docker/notary/pull/972) |
|
| 10 |
++ Support for finding keys that are anywhere in the notary directory's "private" directory, not just under "private/root_keys" or "private/tuf_keys" [#981](https://github.com/docker/notary/pull/981) |
|
| 11 |
++ Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [#982](https://github.com/docker/notary/pull/982) |
|
| 12 |
+ |
|
| 13 |
+## [v0.4.0](https://github.com/docker/notary/releases/tag/v0.4.0) 9/21/2016 |
|
| 14 |
++ Server-managed key rotations [#889](https://github.com/docker/notary/pull/889) |
|
| 15 |
++ Remove `timestamp_keys` table, which stored redundant information [#889](https://github.com/docker/notary/pull/889) |
|
| 16 |
++ Introduce `notary delete` command to delete local and/or remote repo data [#895](https://github.com/docker/notary/pull/895) |
|
| 17 |
++ Introduce `notary witness` command to stage signatures for specified roles [#875](https://github.com/docker/notary/pull/875) |
|
| 18 |
++ Add `-p` flag to offline commands to attempt auto-publish [#886](https://github.com/docker/notary/pull/886) [#912](https://github.com/docker/notary/pull/912) [#923](https://github.com/docker/notary/pull/923) |
|
| 19 |
++ Introduce `notary reset` command to manage staged changes [#959](https://github.com/docker/notary/pull/959) [#856](https://github.com/docker/notary/pull/856) |
|
| 20 |
++ Add `--rootkey` flag to `notary init` to provide a private root key for a repo [#801](https://github.com/docker/notary/pull/801) |
|
| 21 |
++ Introduce `notary delegation purge` command to remove a specified key from all delegations [#855](https://github.com/docker/notary/pull/855) |
|
| 22 |
++ Removed HTTP endpoint from notary-signer [#870](https://github.com/docker/notary/pull/870) |
|
| 23 |
++ Refactored and unified key storage [#825](https://github.com/docker/notary/pull/825) |
|
| 24 |
++ Batched key import and export now operate on PEM files (potentially with multiple blocks) instead of ZIP [#825](https://github.com/docker/notary/pull/825) [#882](https://github.com/docker/notary/pull/882) |
|
| 25 |
++ Add full database integration test-suite [#824](https://github.com/docker/notary/pull/824) [#854](https://github.com/docker/notary/pull/854) [#863](https://github.com/docker/notary/pull/863) |
|
| 26 |
++ Improve notary-server, trust pinning, and yubikey logging [#798](https://github.com/docker/notary/pull/798) [#858](https://github.com/docker/notary/pull/858) [#891](https://github.com/docker/notary/pull/891) |
|
| 27 |
++ Warn if certificates for root or delegations are near expiry [#802](https://github.com/docker/notary/pull/802) |
|
| 28 |
++ Warn if role metadata is near expiry [#786](https://github.com/docker/notary/pull/786) |
|
| 29 |
++ Reformat CLI table output to use the `text/tabwriter` package [#809](https://github.com/docker/notary/pull/809) |
|
| 30 |
++ Fix passphrase retrieval attempt counting and terminal detection [#906](https://github.com/docker/notary/pull/906) |
|
| 31 |
++ Fix listing nested delegations [#864](https://github.com/docker/notary/pull/864) |
|
| 32 |
++ Bump go version to 1.6.3, fix go1.7 compatibility [#851](https://github.com/docker/notary/pull/851) [#793](https://github.com/docker/notary/pull/793) |
|
| 33 |
++ Convert docker-compose files to v2 format [#755](https://github.com/docker/notary/pull/755) |
|
| 34 |
++ Validate root rotations against trust pinning [#800](https://github.com/docker/notary/pull/800) |
|
| 35 |
++ Update fixture certificates for two-year expiry window [#951](https://github.com/docker/notary/pull/951) |
|
| 36 |
+ |
|
| 37 |
+## [v0.3.0](https://github.com/docker/notary/releases/tag/v0.3.0) 5/11/2016 |
|
| 38 |
++ Root rotations |
|
| 39 |
++ RethinkDB support as a storage backend for Server and Signer |
|
| 40 |
++ A new TUF repo builder that merges server and client validation |
|
| 41 |
++ Trust Pinning: configure known good key IDs and CAs to replace TOFU. |
|
| 42 |
++ Add --input, --output, and --quiet flags to notary verify command |
|
| 43 |
++ Remove local certificate store. It was redundant as all certs were also stored in the cached root.json |
|
| 44 |
++ Cleanup of dead code in client side key storage logic |
|
| 45 |
++ Update project to Go 1.6.1 |
|
| 46 |
++ Reorganize vendoring to meet Go 1.6+ standard. Still using Godeps to manage vendored packages |
|
| 47 |
++ Add targets by hash, no longer necessary to have the original target data available |
|
| 48 |
++ Active Key ID verification during signature verification |
|
| 49 |
++ Switch all testing from assert to require, reduces noise in test runs |
|
| 50 |
++ Use alpine based images for smaller downloads and faster setup times |
|
| 51 |
++ Clean up out of data signatures when re-signing content |
|
| 52 |
++ Set cache control headers on HTTP responses from Notary Server |
|
| 53 |
++ Add sha512 support for targets |
|
| 54 |
++ Add environment variable for delegation key passphrase |
|
| 55 |
++ Reduce permissions requested by client from token server |
|
| 56 |
++ Update formatting for delegation list output |
|
| 57 |
++ Move SQLite dependency to tests only so it doesn't get built into official images |
|
| 58 |
++ Fixed asking for password to list private repositories |
|
| 59 |
++ Enable using notary client with username/password in a scripted fashion |
|
| 60 |
++ Fix static compilation of client |
|
| 61 |
++ Enforce TUF version to be >= 1, previously 0 was acceptable although unused |
|
| 62 |
++ json.RawMessage should always be used as *json.RawMessage due to concepts of addressability in Go and effects on encoding |
|
| 63 |
+ |
|
| 3 | 64 |
## [v0.2](https://github.com/docker/notary/releases/tag/v0.2.0) 2/24/2016 |
| 4 | 65 |
+ Add support for delegation roles in `notary` server and client |
| 5 | 66 |
+ Add `notary CLI` commands for managing delegation roles: `notary delegation` |
| ... | ... |
@@ -1,4 +1,4 @@ |
| 1 |
-FROM golang:1.6.1 |
|
| 1 |
+FROM golang:1.7.1 |
|
| 2 | 2 |
|
| 3 | 3 |
RUN apt-get update && apt-get install -y \ |
| 4 | 4 |
curl \ |
| ... | ... |
@@ -8,10 +8,14 @@ RUN apt-get update && apt-get install -y \ |
| 8 | 8 |
patch \ |
| 9 | 9 |
tar \ |
| 10 | 10 |
xz-utils \ |
| 11 |
+ python \ |
|
| 12 |
+ python-pip \ |
|
| 11 | 13 |
--no-install-recommends \ |
| 12 | 14 |
&& rm -rf /var/lib/apt/lists/* |
| 13 | 15 |
|
| 14 |
-RUN go get golang.org/x/tools/cmd/cover |
|
| 16 |
+RUN useradd -ms /bin/bash notary \ |
|
| 17 |
+ && pip install codecov \ |
|
| 18 |
+ && go get golang.org/x/tools/cmd/cover github.com/golang/lint/golint github.com/client9/misspell/cmd/misspell github.com/gordonklaus/ineffassign |
|
| 15 | 19 |
|
| 16 | 20 |
# Configure the container for OSX cross compilation |
| 17 | 21 |
ENV OSX_SDK MacOSX10.11.sdk |
| ... | ... |
@@ -27,8 +31,7 @@ ENV PATH /osxcross/target/bin:$PATH |
| 27 | 27 |
ENV NOTARYDIR /go/src/github.com/docker/notary |
| 28 | 28 |
|
| 29 | 29 |
COPY . ${NOTARYDIR}
|
| 30 |
- |
|
| 31 |
-ENV GOPATH ${NOTARYDIR}/Godeps/_workspace:$GOPATH
|
|
| 30 |
+RUN chmod -R a+rw /go |
|
| 32 | 31 |
|
| 33 | 32 |
WORKDIR ${NOTARYDIR}
|
| 34 | 33 |
|
| ... | ... |
@@ -13,13 +13,15 @@ endif |
| 13 | 13 |
CTIMEVAR=-X $(NOTARY_PKG)/version.GitCommit=$(GITCOMMIT) -X $(NOTARY_PKG)/version.NotaryVersion=$(NOTARY_VERSION) |
| 14 | 14 |
GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)" |
| 15 | 15 |
GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static" |
| 16 |
-GOOSES = darwin linux |
|
| 16 |
+GOOSES = darwin linux windows |
|
| 17 | 17 |
NOTARY_BUILDTAGS ?= pkcs11 |
| 18 | 18 |
NOTARYDIR := /go/src/github.com/docker/notary |
| 19 | 19 |
|
| 20 |
-GO_VERSION := $(shell go version | grep "1\.[6-9]\(\.[0-9]+\)*") |
|
| 21 |
-# check to make sure we have the right version |
|
| 22 |
-ifeq ($(strip $(GO_VERSION)),) |
|
| 20 |
+GO_VERSION := $(shell go version | grep "1\.[6-9]\(\.[0-9]+\)*\|devel") |
|
| 21 |
+# check to make sure we have the right version. development versions of Go are |
|
| 22 |
+# not officially supported, but allowed for building |
|
| 23 |
+ |
|
| 24 |
+ifeq ($(strip $(GO_VERSION))$(SKIPENVCHECK),) |
|
| 23 | 25 |
$(error Bad Go version - please install Go >= 1.6) |
| 24 | 26 |
endif |
| 25 | 27 |
|
| ... | ... |
@@ -40,13 +42,11 @@ COVERPROFILE?=$(COVERDIR)/cover.out |
| 40 | 40 |
COVERMODE=count |
| 41 | 41 |
PKGS ?= $(shell go list -tags "${NOTARY_BUILDTAGS}" ./... | grep -v /vendor/ | tr '\n' ' ')
|
| 42 | 42 |
|
| 43 |
-GO_VERSION = $(shell go version | awk '{print $$3}')
|
|
| 44 |
- |
|
| 45 |
-.PHONY: clean all fmt vet lint build test binaries cross cover docker-images notary-dockerfile |
|
| 43 |
+.PHONY: clean all lint build test binaries cross cover docker-images notary-dockerfile |
|
| 46 | 44 |
.DELETE_ON_ERROR: cover |
| 47 | 45 |
.DEFAULT: default |
| 48 | 46 |
|
| 49 |
-all: AUTHORS clean fmt vet fmt lint build test binaries |
|
| 47 |
+all: AUTHORS clean lint build test binaries |
|
| 50 | 48 |
|
| 51 | 49 |
AUTHORS: .git/HEAD |
| 52 | 50 |
git log --format='%aN <%aE>' | sort -fu > $@ |
| ... | ... |
@@ -90,32 +90,27 @@ ${PREFIX}/bin/static/notary:
|
| 90 | 90 |
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary
|
| 91 | 91 |
endif |
| 92 | 92 |
|
| 93 |
-vet: |
|
| 94 |
- @echo "+ $@" |
|
| 93 |
+ |
|
| 94 |
+# run all lint functionality - excludes Godep directory, vendoring, binaries, python tests, and git files |
|
| 95 |
+lint: |
|
| 96 |
+ @echo "+ $@: golint, go vet, go fmt, misspell, ineffassign" |
|
| 97 |
+ # golint |
|
| 98 |
+ @test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec golint {} \; | tee /dev/stderr)"
|
|
| 99 |
+ # gofmt |
|
| 100 |
+ @test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)" |
|
| 101 |
+ # govet |
|
| 95 | 102 |
ifeq ($(shell uname -s), Darwin) |
| 96 | 103 |
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs echo "This file should end with '_test':" | tee /dev/stderr)" |
| 97 | 104 |
else |
| 98 | 105 |
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs -r echo "This file should end with '_test':" | tee /dev/stderr)" |
| 99 | 106 |
endif |
| 100 | 107 |
@test -z "$$(go tool vet -printf=false . 2>&1 | grep -v vendor/ | tee /dev/stderr)" |
| 101 |
- |
|
| 102 |
-fmt: |
|
| 103 |
- @echo "+ $@" |
|
| 104 |
- @test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)" |
|
| 105 |
- |
|
| 106 |
-lint: |
|
| 107 |
- @echo "+ $@" |
|
| 108 |
- @test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec golint {} \; | tee /dev/stderr)"
|
|
| 109 |
- |
|
| 110 |
-# Requires that the following: |
|
| 111 |
-# go get -u github.com/client9/misspell/cmd/misspell |
|
| 112 |
-# |
|
| 113 |
-# be run first |
|
| 114 |
- |
|
| 115 |
-# misspell target, don't include Godeps, binaries, python tests, or git files |
|
| 116 |
-misspell: |
|
| 117 |
- @echo "+ $@" |
|
| 118 |
- @test -z "$$(find . -name '*' | grep -v vendor/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | xargs misspell | tee /dev/stderr)" |
|
| 108 |
+ # misspell - requires that the following be run first: |
|
| 109 |
+ # go get -u github.com/client9/misspell/cmd/misspell |
|
| 110 |
+ @test -z "$$(find . -type f | grep -v vendor/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | grep -v \.pdf | xargs misspell | tee /dev/stderr)" |
|
| 111 |
+ # ineffassign - requires that the following be run first: |
|
| 112 |
+ # go get -u github.com/gordonklaus/ineffassign |
|
| 113 |
+ @test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec ineffassign {} \; | tee /dev/stderr)"
|
|
| 119 | 114 |
|
| 120 | 115 |
build: |
| 121 | 116 |
@echo "+ $@" |
| ... | ... |
@@ -130,15 +125,13 @@ test: |
| 130 | 130 |
@echo |
| 131 | 131 |
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) $(PKGS)
|
| 132 | 132 |
|
| 133 |
-test-full: TESTOPTS = |
|
| 134 |
-test-full: vet lint |
|
| 135 |
- @echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"' |
|
| 136 |
- @echo "+ $@" |
|
| 137 |
- @echo |
|
| 138 |
- go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) -v $(PKGS)
|
|
| 133 |
+integration: TESTDB = mysql |
|
| 134 |
+integration: clean |
|
| 135 |
+ buildscripts/integrationtest.sh $(TESTDB) |
|
| 139 | 136 |
|
| 140 |
-integration: |
|
| 141 |
- buildscripts/integrationtest.sh development.yml |
|
| 137 |
+testdb: TESTDB = mysql |
|
| 138 |
+testdb: |
|
| 139 |
+ buildscripts/dbtests.sh $(TESTDB) |
|
| 142 | 140 |
|
| 143 | 141 |
protos: |
| 144 | 142 |
@protoc --go_out=plugins=grpc:. proto/*.proto |
| ... | ... |
@@ -148,25 +141,19 @@ protos: |
| 148 | 148 |
# go get github.com/wadey/gocovmerge; go install github.com/wadey/gocovmerge |
| 149 | 149 |
# |
| 150 | 150 |
# be run first |
| 151 |
- |
|
| 152 |
-define gocover |
|
| 153 |
-go test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).coverage.txt" "$(1)" || exit 1; |
|
| 154 |
-endef |
|
| 155 |
- |
|
| 151 |
+gen-cover: |
|
| 156 | 152 |
gen-cover: |
| 157 | 153 |
@mkdir -p "$(COVERDIR)" |
| 158 |
- $(foreach PKG,$(PKGS),$(call gocover,$(PKG))) |
|
| 159 |
- rm -f "$(COVERDIR)"/*testutils*.coverage.txt |
|
| 154 |
+ python -u buildscripts/covertest.py --coverdir "$(COVERDIR)" --tags "$(NOTARY_BUILDTAGS)" --pkgs="$(PKGS)" --testopts="${TESTOPTS}"
|
|
| 160 | 155 |
|
| 161 | 156 |
# Generates the cover binaries and runs them all in serial, so this can be used |
| 162 | 157 |
# run all tests with a yubikey without any problems |
| 163 |
-cover: OPTS = -tags "${NOTARY_BUILDTAGS}" -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))"
|
|
| 164 | 158 |
cover: gen-cover covmerge |
| 165 | 159 |
@go tool cover -html="$(COVERPROFILE)" |
| 166 | 160 |
|
| 167 | 161 |
# Generates the cover binaries and runs them all in serial, so this can be used |
| 168 | 162 |
# run all tests with a yubikey without any problems |
| 169 |
-ci: OPTS = -tags "${NOTARY_BUILDTAGS}" -race -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))"
|
|
| 163 |
+ci: override TESTOPTS = -race |
|
| 170 | 164 |
# Codecov knows how to merge multiple coverage files, so covmerge is not needed |
| 171 | 165 |
ci: gen-cover |
| 172 | 166 |
|
| ... | ... |
@@ -205,10 +192,9 @@ shell: notary-dockerfile |
| 205 | 205 |
|
| 206 | 206 |
cross: notary-dockerfile |
| 207 | 207 |
@rm -rf $(CURDIR)/cross |
| 208 |
- docker run --rm -v $(CURDIR)/cross:$(NOTARYDIR)/cross -e NOTARY_BUILDTAGS=$(NOTARY_BUILDTAGS) notary buildscripts/cross.sh $(GOOSES) |
|
| 209 |
- |
|
| 208 |
+ docker run --rm -v $(CURDIR)/cross:$(NOTARYDIR)/cross -e CTIMEVAR="${CTIMEVAR}" -e NOTARY_BUILDTAGS=$(NOTARY_BUILDTAGS) notary buildscripts/cross.sh $(GOOSES)
|
|
| 210 | 209 |
|
| 211 | 210 |
clean: |
| 212 | 211 |
@echo "+ $@" |
| 213 |
- @rm -rf "$(COVERDIR)" |
|
| 212 |
+ @rm -rf "$(COVERDIR)" cross |
|
| 214 | 213 |
@rm -rf "${PREFIX}/bin/notary-server" "${PREFIX}/bin/notary" "${PREFIX}/bin/notary-signer"
|
| ... | ... |
@@ -1,5 +1,5 @@ |
| 1 | 1 |
# Notary |
| 2 |
-[](https://circleci.com/gh/docker/notary/tree/master) [](https://codecov.io/github/docker/notary) |
|
| 2 |
+[](https://circleci.com/gh/docker/notary/tree/master) [](https://codecov.io/github/docker/notary) [](https://goreportcard.com/report/github.com/docker/notary) |
|
| 3 | 3 |
|
| 4 | 4 |
The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting |
| 5 | 5 |
with trusted collections. Please see the [service architecture](docs/service_architecture.md) documentation |
| ... | ... |
@@ -80,7 +80,8 @@ to use `notary` with Docker images. |
| 80 | 80 |
|
| 81 | 81 |
Prerequisites: |
| 82 | 82 |
|
| 83 |
-- Go >= 1.6.1 |
|
| 83 |
+- Go >= 1.7 |
|
| 84 |
+ |
|
| 84 | 85 |
- [godep](https://github.com/tools/godep) installed |
| 85 | 86 |
- libtool development headers installed |
| 86 | 87 |
- Ubuntu: `apt-get install libltdl-dev` |
| ... | ... |
@@ -1,87 +1,23 @@ |
| 1 |
-# Pony-up! |
|
| 2 | 1 |
machine: |
| 3 | 2 |
pre: |
| 4 |
- # Install gvm |
|
| 5 |
- - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) |
|
| 6 | 3 |
# Upgrade docker |
| 7 |
- - sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci' |
|
| 8 |
- - sudo chmod 0755 /usr/bin/docker |
|
| 9 |
- |
|
| 10 |
- post: |
|
| 11 |
- # Install many go versions |
|
| 12 |
- - gvm install go1.6.1 -B --name=stable |
|
| 4 |
+ - curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | bash -s -- 1.10.0 |
|
| 13 | 5 |
# upgrade compose |
| 14 | 6 |
- sudo pip install --upgrade docker-compose |
| 15 | 7 |
|
| 16 | 8 |
services: |
| 17 | 9 |
- docker |
| 18 | 10 |
|
| 19 |
- environment: |
|
| 20 |
- # Convenient shortcuts to "common" locations |
|
| 21 |
- CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME |
|
| 22 |
- BASE_DIR: src/github.com/docker/notary |
|
| 23 |
- # Trick circle brainflat "no absolute path" behavior |
|
| 24 |
- BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR |
|
| 25 |
- # Workaround Circle parsing dumb bugs and/or YAML wonkyness |
|
| 26 |
- CIRCLE_PAIN: "mode: set" |
|
| 27 |
- # Put the coverage profile somewhere codecov's script can find it |
|
| 28 |
- COVERPROFILE: coverage.out |
|
| 29 |
- |
|
| 30 |
- hosts: |
|
| 31 |
- # Not used yet |
|
| 32 |
- fancy: 127.0.0.1 |
|
| 33 |
- |
|
| 34 | 11 |
dependencies: |
| 35 |
- pre: |
|
| 36 |
- # Copy the code to the gopath of all go versions |
|
| 37 |
- - > |
|
| 38 |
- gvm use stable && |
|
| 39 |
- mkdir -p "$(dirname $BASE_STABLE)" && |
|
| 40 |
- cp -R "$CHECKOUT" "$BASE_STABLE" |
|
| 41 |
- |
|
| 42 | 12 |
override: |
| 43 |
- # don't use circleci's default dependency installation step of `go get -d -u ./...` |
|
| 44 |
- # since we already vendor everything; additionally install linting and misspell tools |
|
| 45 |
- - > |
|
| 46 |
- gvm use stable && |
|
| 47 |
- go get github.com/golang/lint/golint && |
|
| 48 |
- go get -u github.com/client9/misspell/cmd/misspell |
|
| 13 |
+ - docker build -t notary_client . |
|
| 49 | 14 |
|
| 50 | 15 |
test: |
| 51 |
- pre: |
|
| 52 |
- # Output the go versions we are going to test |
|
| 53 |
- - gvm use stable && go version |
|
| 54 |
- |
|
| 55 |
- # CLEAN |
|
| 56 |
- - gvm use stable && make clean: |
|
| 57 |
- pwd: $BASE_STABLE |
|
| 58 |
- |
|
| 59 |
- # FMT |
|
| 60 |
- - gvm use stable && make fmt: |
|
| 61 |
- pwd: $BASE_STABLE |
|
| 62 |
- |
|
| 63 |
- # VET |
|
| 64 |
- - gvm use stable && make vet: |
|
| 65 |
- pwd: $BASE_STABLE |
|
| 66 |
- |
|
| 67 |
- # LINT |
|
| 68 |
- - gvm use stable && make lint: |
|
| 69 |
- pwd: $BASE_STABLE |
|
| 70 |
- |
|
| 71 |
- # MISSPELL |
|
| 72 |
- - gvm use stable && make misspell: |
|
| 73 |
- pwd: $BASE_STABLE |
|
| 74 |
- |
|
| 75 | 16 |
override: |
| 76 |
- # Test stable, and report |
|
| 77 |
- # hacking this to be parallel |
|
| 78 |
- - case $CIRCLE_NODE_INDEX in 0) gvm use stable && NOTARY_BUILDTAGS=pkcs11 make ci ;; 1) gvm use stable && NOTARY_BUILDTAGS=none make ci ;; 2) gvm use stable && make integration ;; esac: |
|
| 17 |
+ # circleci only supports manual parellism |
|
| 18 |
+ - buildscripts/circle_parallelism.sh: |
|
| 79 | 19 |
parallel: true |
| 80 | 20 |
timeout: 600 |
| 81 |
- pwd: $BASE_STABLE |
|
| 82 |
- |
|
| 83 | 21 |
post: |
| 84 |
- # Report to codecov.io |
|
| 85 |
- - case $CIRCLE_NODE_INDEX in 0) bash <(curl -s https://codecov.io/bash) ;; 1) bash <(curl -s https://codecov.io/bash) ;; esac: |
|
| 86 |
- parallel: true |
|
| 87 |
- pwd: $BASE_STABLE |
|
| 22 |
+ - docker-compose -f docker-compose.yml down -v |
|
| 23 |
+ - docker-compose -f docker-compose.rethink.yml down -v |
| ... | ... |
@@ -4,7 +4,7 @@ import ( |
| 4 | 4 |
"github.com/docker/notary/tuf/data" |
| 5 | 5 |
) |
| 6 | 6 |
|
| 7 |
-// Scopes for TufChanges are simply the TUF roles. |
|
| 7 |
+// Scopes for TUFChanges are simply the TUF roles. |
|
| 8 | 8 |
// Unfortunately because of targets delegations, we can only |
| 9 | 9 |
// cover the base roles. |
| 10 | 10 |
const ( |
| ... | ... |
@@ -14,7 +14,7 @@ const ( |
| 14 | 14 |
ScopeTimestamp = "timestamp" |
| 15 | 15 |
) |
| 16 | 16 |
|
| 17 |
-// Types for TufChanges are namespaced by the Role they |
|
| 17 |
+// Types for TUFChanges are namespaced by the Role they |
|
| 18 | 18 |
// are relevant for. The Root and Targets roles are the |
| 19 | 19 |
// only ones for which user action can cause a change, as |
| 20 | 20 |
// all changes in Snapshot and Timestamp are programmatically |
| ... | ... |
@@ -23,10 +23,11 @@ const ( |
| 23 | 23 |
TypeRootRole = "role" |
| 24 | 24 |
TypeTargetsTarget = "target" |
| 25 | 25 |
TypeTargetsDelegation = "delegation" |
| 26 |
+ TypeWitness = "witness" |
|
| 26 | 27 |
) |
| 27 | 28 |
|
| 28 |
-// TufChange represents a change to a TUF repo |
|
| 29 |
-type TufChange struct {
|
|
| 29 |
+// TUFChange represents a change to a TUF repo |
|
| 30 |
+type TUFChange struct {
|
|
| 30 | 31 |
// Abbreviated because Go doesn't permit a field and method of the same name |
| 31 | 32 |
Actn string `json:"action"` |
| 32 | 33 |
Role string `json:"role"` |
| ... | ... |
@@ -35,16 +36,16 @@ type TufChange struct {
|
| 35 | 35 |
Data []byte `json:"data"` |
| 36 | 36 |
} |
| 37 | 37 |
|
| 38 |
-// TufRootData represents a modification of the keys associated |
|
| 38 |
+// TUFRootData represents a modification of the keys associated |
|
| 39 | 39 |
// with a role that appears in the root.json |
| 40 |
-type TufRootData struct {
|
|
| 40 |
+type TUFRootData struct {
|
|
| 41 | 41 |
Keys data.KeyList `json:"keys"` |
| 42 | 42 |
RoleName string `json:"role"` |
| 43 | 43 |
} |
| 44 | 44 |
|
| 45 |
-// NewTufChange initializes a tufChange object |
|
| 46 |
-func NewTufChange(action string, role, changeType, changePath string, content []byte) *TufChange {
|
|
| 47 |
- return &TufChange{
|
|
| 45 |
+// NewTUFChange initializes a TUFChange object |
|
| 46 |
+func NewTUFChange(action string, role, changeType, changePath string, content []byte) *TUFChange {
|
|
| 47 |
+ return &TUFChange{
|
|
| 48 | 48 |
Actn: action, |
| 49 | 49 |
Role: role, |
| 50 | 50 |
ChangeType: changeType, |
| ... | ... |
@@ -54,34 +55,34 @@ func NewTufChange(action string, role, changeType, changePath string, content [] |
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 | 56 |
// Action return c.Actn |
| 57 |
-func (c TufChange) Action() string {
|
|
| 57 |
+func (c TUFChange) Action() string {
|
|
| 58 | 58 |
return c.Actn |
| 59 | 59 |
} |
| 60 | 60 |
|
| 61 | 61 |
// Scope returns c.Role |
| 62 |
-func (c TufChange) Scope() string {
|
|
| 62 |
+func (c TUFChange) Scope() string {
|
|
| 63 | 63 |
return c.Role |
| 64 | 64 |
} |
| 65 | 65 |
|
| 66 | 66 |
// Type returns c.ChangeType |
| 67 |
-func (c TufChange) Type() string {
|
|
| 67 |
+func (c TUFChange) Type() string {
|
|
| 68 | 68 |
return c.ChangeType |
| 69 | 69 |
} |
| 70 | 70 |
|
| 71 | 71 |
// Path return c.ChangePath |
| 72 |
-func (c TufChange) Path() string {
|
|
| 72 |
+func (c TUFChange) Path() string {
|
|
| 73 | 73 |
return c.ChangePath |
| 74 | 74 |
} |
| 75 | 75 |
|
| 76 | 76 |
// Content returns c.Data |
| 77 |
-func (c TufChange) Content() []byte {
|
|
| 77 |
+func (c TUFChange) Content() []byte {
|
|
| 78 | 78 |
return c.Data |
| 79 | 79 |
} |
| 80 | 80 |
|
| 81 |
-// TufDelegation represents a modification to a target delegation |
|
| 81 |
+// TUFDelegation represents a modification to a target delegation |
|
| 82 | 82 |
// this includes creating a delegations. This format is used to avoid |
| 83 | 83 |
// unexpected race conditions between humans modifying the same delegation |
| 84 |
-type TufDelegation struct {
|
|
| 84 |
+type TUFDelegation struct {
|
|
| 85 | 85 |
NewName string `json:"new_name,omitempty"` |
| 86 | 86 |
NewThreshold int `json:"threshold, omitempty"` |
| 87 | 87 |
AddKeys data.KeyList `json:"add_keys, omitempty"` |
| ... | ... |
@@ -91,8 +92,8 @@ type TufDelegation struct {
|
| 91 | 91 |
ClearAllPaths bool `json:"clear_paths,omitempty"` |
| 92 | 92 |
} |
| 93 | 93 |
|
| 94 |
-// ToNewRole creates a fresh role object from the TufDelegation data |
|
| 95 |
-func (td TufDelegation) ToNewRole(scope string) (*data.Role, error) {
|
|
| 94 |
+// ToNewRole creates a fresh role object from the TUFDelegation data |
|
| 95 |
+func (td TUFDelegation) ToNewRole(scope string) (*data.Role, error) {
|
|
| 96 | 96 |
name := scope |
| 97 | 97 |
if td.NewName != "" {
|
| 98 | 98 |
name = td.NewName |
| ... | ... |
@@ -21,6 +21,24 @@ func (cl *memChangelist) Add(c Change) error {
|
| 21 | 21 |
return nil |
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 |
+// Remove deletes the changes found at the given indices |
|
| 25 |
+func (cl *memChangelist) Remove(idxs []int) error {
|
|
| 26 |
+ remove := make(map[int]struct{})
|
|
| 27 |
+ for _, i := range idxs {
|
|
| 28 |
+ remove[i] = struct{}{}
|
|
| 29 |
+ } |
|
| 30 |
+ var keep []Change |
|
| 31 |
+ |
|
| 32 |
+ for i, c := range cl.changes {
|
|
| 33 |
+ if _, ok := remove[i]; ok {
|
|
| 34 |
+ continue |
|
| 35 |
+ } |
|
| 36 |
+ keep = append(keep, c) |
|
| 37 |
+ } |
|
| 38 |
+ cl.changes = keep |
|
| 39 |
+ return nil |
|
| 40 |
+} |
|
| 41 |
+ |
|
| 24 | 42 |
// Clear empties the changelist file. |
| 25 | 43 |
func (cl *memChangelist) Clear(archive string) error {
|
| 26 | 44 |
// appending to a nil list initializes it. |
| ... | ... |
@@ -5,12 +5,12 @@ import ( |
| 5 | 5 |
"fmt" |
| 6 | 6 |
"io/ioutil" |
| 7 | 7 |
"os" |
| 8 |
- "path" |
|
| 9 | 8 |
"sort" |
| 10 | 9 |
"time" |
| 11 | 10 |
|
| 12 | 11 |
"github.com/Sirupsen/logrus" |
| 13 | 12 |
"github.com/docker/distribution/uuid" |
| 13 |
+ "path/filepath" |
|
| 14 | 14 |
) |
| 15 | 15 |
|
| 16 | 16 |
// FileChangelist stores all the changes as files |
| ... | ... |
@@ -46,13 +46,14 @@ func getFileNames(dirName string) ([]os.FileInfo, error) {
|
| 46 | 46 |
} |
| 47 | 47 |
fileInfos = append(fileInfos, f) |
| 48 | 48 |
} |
| 49 |
+ sort.Sort(fileChanges(fileInfos)) |
|
| 49 | 50 |
return fileInfos, nil |
| 50 | 51 |
} |
| 51 | 52 |
|
| 52 |
-// Read a JSON formatted file from disk; convert to TufChange struct |
|
| 53 |
-func unmarshalFile(dirname string, f os.FileInfo) (*TufChange, error) {
|
|
| 54 |
- c := &TufChange{}
|
|
| 55 |
- raw, err := ioutil.ReadFile(path.Join(dirname, f.Name())) |
|
| 53 |
+// Read a JSON formatted file from disk; convert to TUFChange struct |
|
| 54 |
+func unmarshalFile(dirname string, f os.FileInfo) (*TUFChange, error) {
|
|
| 55 |
+ c := &TUFChange{}
|
|
| 56 |
+ raw, err := ioutil.ReadFile(filepath.Join(dirname, f.Name())) |
|
| 56 | 57 |
if err != nil {
|
| 57 | 58 |
return c, err |
| 58 | 59 |
} |
| ... | ... |
@@ -70,7 +71,6 @@ func (cl FileChangelist) List() []Change {
|
| 70 | 70 |
if err != nil {
|
| 71 | 71 |
return changes |
| 72 | 72 |
} |
| 73 |
- sort.Sort(fileChanges(fileInfos)) |
|
| 74 | 73 |
for _, f := range fileInfos {
|
| 75 | 74 |
c, err := unmarshalFile(cl.dir, f) |
| 76 | 75 |
if err != nil {
|
| ... | ... |
@@ -89,10 +89,32 @@ func (cl FileChangelist) Add(c Change) error {
|
| 89 | 89 |
return err |
| 90 | 90 |
} |
| 91 | 91 |
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
|
| 92 |
- return ioutil.WriteFile(path.Join(cl.dir, filename), cJSON, 0644) |
|
| 92 |
+ return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0644) |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+// Remove deletes the changes found at the given indices |
|
| 96 |
+func (cl FileChangelist) Remove(idxs []int) error {
|
|
| 97 |
+ fileInfos, err := getFileNames(cl.dir) |
|
| 98 |
+ if err != nil {
|
|
| 99 |
+ return err |
|
| 100 |
+ } |
|
| 101 |
+ remove := make(map[int]struct{})
|
|
| 102 |
+ for _, i := range idxs {
|
|
| 103 |
+ remove[i] = struct{}{}
|
|
| 104 |
+ } |
|
| 105 |
+ for i, c := range fileInfos {
|
|
| 106 |
+ if _, ok := remove[i]; ok {
|
|
| 107 |
+ file := filepath.Join(cl.dir, c.Name()) |
|
| 108 |
+ if err := os.Remove(file); err != nil {
|
|
| 109 |
+ logrus.Errorf("could not remove change %d: %s", i, err.Error())
|
|
| 110 |
+ } |
|
| 111 |
+ } |
|
| 112 |
+ } |
|
| 113 |
+ return nil |
|
| 93 | 114 |
} |
| 94 | 115 |
|
| 95 | 116 |
// Clear clears the change list |
| 117 |
+// N.B. archiving not currently implemented |
|
| 96 | 118 |
func (cl FileChangelist) Clear(archive string) error {
|
| 97 | 119 |
dir, err := os.Open(cl.dir) |
| 98 | 120 |
if err != nil {
|
| ... | ... |
@@ -104,7 +126,7 @@ func (cl FileChangelist) Clear(archive string) error {
|
| 104 | 104 |
return err |
| 105 | 105 |
} |
| 106 | 106 |
for _, f := range files {
|
| 107 |
- os.Remove(path.Join(cl.dir, f.Name())) |
|
| 107 |
+ os.Remove(filepath.Join(cl.dir, f.Name())) |
|
| 108 | 108 |
} |
| 109 | 109 |
return nil |
| 110 | 110 |
} |
| ... | ... |
@@ -121,7 +143,6 @@ func (cl FileChangelist) NewIterator() (ChangeIterator, error) {
|
| 121 | 121 |
if err != nil {
|
| 122 | 122 |
return &FileChangeListIterator{}, err
|
| 123 | 123 |
} |
| 124 |
- sort.Sort(fileChanges(fileInfos)) |
|
| 125 | 124 |
return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil
|
| 126 | 125 |
} |
| 127 | 126 |
|
| ... | ... |
@@ -15,6 +15,9 @@ type Changelist interface {
|
| 15 | 15 |
// to save a copy of the changelist in that location |
| 16 | 16 |
Clear(archive string) error |
| 17 | 17 |
|
| 18 |
+ // Remove deletes the changes corresponding with the indices given |
|
| 19 |
+ Remove(idxs []int) error |
|
| 20 |
+ |
|
| 18 | 21 |
// Close syncronizes any pending writes to the underlying |
| 19 | 22 |
// storage and closes the file/connection |
| 20 | 23 |
Close() error |
| ... | ... |
@@ -16,13 +16,12 @@ import ( |
| 16 | 16 |
"github.com/docker/notary" |
| 17 | 17 |
"github.com/docker/notary/client/changelist" |
| 18 | 18 |
"github.com/docker/notary/cryptoservice" |
| 19 |
+ store "github.com/docker/notary/storage" |
|
| 19 | 20 |
"github.com/docker/notary/trustmanager" |
| 20 | 21 |
"github.com/docker/notary/trustpinning" |
| 21 | 22 |
"github.com/docker/notary/tuf" |
| 22 |
- tufclient "github.com/docker/notary/tuf/client" |
|
| 23 | 23 |
"github.com/docker/notary/tuf/data" |
| 24 | 24 |
"github.com/docker/notary/tuf/signed" |
| 25 |
- "github.com/docker/notary/tuf/store" |
|
| 26 | 25 |
"github.com/docker/notary/tuf/utils" |
| 27 | 26 |
) |
| 28 | 27 |
|
| ... | ... |
@@ -85,6 +84,7 @@ type NotaryRepository struct {
|
| 85 | 85 |
fileStore store.MetadataStore |
| 86 | 86 |
CryptoService signed.CryptoService |
| 87 | 87 |
tufRepo *tuf.Repo |
| 88 |
+ invalid *tuf.Repo // known data that was parsable but deemed invalid |
|
| 88 | 89 |
roundTrip http.RoundTripper |
| 89 | 90 |
trustPinning trustpinning.TrustPinConfig |
| 90 | 91 |
} |
| ... | ... |
@@ -121,7 +121,7 @@ func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper, |
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 | 123 |
// Target represents a simplified version of the data TUF operates on, so external |
| 124 |
-// applications don't have to depend on tuf data types. |
|
| 124 |
+// applications don't have to depend on TUF data types. |
|
| 125 | 125 |
type Target struct {
|
| 126 | 126 |
Name string // the name of the target |
| 127 | 127 |
Hashes data.Hashes // the hash of the target |
| ... | ... |
@@ -159,7 +159,7 @@ func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) {
|
| 159 | 159 |
return nil, err |
| 160 | 160 |
} |
| 161 | 161 |
|
| 162 |
- x509PublicKey := trustmanager.CertToKey(cert) |
|
| 162 |
+ x509PublicKey := utils.CertToKey(cert) |
|
| 163 | 163 |
if x509PublicKey == nil {
|
| 164 | 164 |
return nil, fmt.Errorf( |
| 165 | 165 |
"cannot use regenerated certificate: format %s", cert.PublicKeyAlgorithm) |
| ... | ... |
@@ -173,10 +173,14 @@ func rootCertKey(gun string, privKey data.PrivateKey) (data.PublicKey, error) {
|
| 173 | 173 |
// timestamp key and possibly other serverManagedRoles), but the created repository |
| 174 | 174 |
// result is only stored on local disk, not published to the server. To do that, |
| 175 | 175 |
// use r.Publish() eventually. |
| 176 |
-func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...string) error {
|
|
| 177 |
- privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID) |
|
| 178 |
- if err != nil {
|
|
| 179 |
- return err |
|
| 176 |
+func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles ...string) error {
|
|
| 177 |
+ privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs)) |
|
| 178 |
+ for _, keyID := range rootKeyIDs {
|
|
| 179 |
+ privKey, _, err := r.CryptoService.GetPrivateKey(keyID) |
|
| 180 |
+ if err != nil {
|
|
| 181 |
+ return err |
|
| 182 |
+ } |
|
| 183 |
+ privKeys = append(privKeys, privKey) |
|
| 180 | 184 |
} |
| 181 | 185 |
|
| 182 | 186 |
// currently we only support server managing timestamps and snapshots, and |
| ... | ... |
@@ -206,16 +210,20 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st |
| 206 | 206 |
} |
| 207 | 207 |
} |
| 208 | 208 |
|
| 209 |
- rootKey, err := rootCertKey(r.gun, privKey) |
|
| 210 |
- if err != nil {
|
|
| 211 |
- return err |
|
| 209 |
+ rootKeys := make([]data.PublicKey, 0, len(privKeys)) |
|
| 210 |
+ for _, privKey := range privKeys {
|
|
| 211 |
+ rootKey, err := rootCertKey(r.gun, privKey) |
|
| 212 |
+ if err != nil {
|
|
| 213 |
+ return err |
|
| 214 |
+ } |
|
| 215 |
+ rootKeys = append(rootKeys, rootKey) |
|
| 212 | 216 |
} |
| 213 | 217 |
|
| 214 | 218 |
var ( |
| 215 | 219 |
rootRole = data.NewBaseRole( |
| 216 | 220 |
data.CanonicalRootRole, |
| 217 | 221 |
notary.MinThreshold, |
| 218 |
- rootKey, |
|
| 222 |
+ rootKeys..., |
|
| 219 | 223 |
) |
| 220 | 224 |
timestampRole data.BaseRole |
| 221 | 225 |
snapshotRole data.BaseRole |
| ... | ... |
@@ -271,7 +279,7 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st |
| 271 | 271 |
|
| 272 | 272 |
r.tufRepo = tuf.NewRepo(r.CryptoService) |
| 273 | 273 |
|
| 274 |
- err = r.tufRepo.InitRoot( |
|
| 274 |
+ err := r.tufRepo.InitRoot( |
|
| 275 | 275 |
rootRole, |
| 276 | 276 |
timestampRole, |
| 277 | 277 |
snapshotRole, |
| ... | ... |
@@ -307,14 +315,14 @@ func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...stri |
| 307 | 307 |
for _, role := range roles {
|
| 308 | 308 |
// Ensure we can only add targets to the CanonicalTargetsRole, |
| 309 | 309 |
// or a Delegation role (which is <CanonicalTargetsRole>/something else) |
| 310 |
- if role != data.CanonicalTargetsRole && !data.IsDelegation(role) {
|
|
| 310 |
+ if role != data.CanonicalTargetsRole && !data.IsDelegation(role) && !data.IsWildDelegation(role) {
|
|
| 311 | 311 |
return data.ErrInvalidRole{
|
| 312 | 312 |
Role: role, |
| 313 | 313 |
Reason: "cannot add targets to this role", |
| 314 | 314 |
} |
| 315 | 315 |
} |
| 316 | 316 |
|
| 317 |
- changes = append(changes, changelist.NewTufChange( |
|
| 317 |
+ changes = append(changes, changelist.NewTUFChange( |
|
| 318 | 318 |
c.Action(), |
| 319 | 319 |
role, |
| 320 | 320 |
c.Type(), |
| ... | ... |
@@ -352,7 +360,7 @@ func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error {
|
| 352 | 352 |
return err |
| 353 | 353 |
} |
| 354 | 354 |
|
| 355 |
- template := changelist.NewTufChange( |
|
| 355 |
+ template := changelist.NewTUFChange( |
|
| 356 | 356 |
changelist.ActionCreate, "", changelist.TypeTargetsTarget, |
| 357 | 357 |
target.Name, metaJSON) |
| 358 | 358 |
return addChange(cl, template, roles...) |
| ... | ... |
@@ -368,13 +376,14 @@ func (r *NotaryRepository) RemoveTarget(targetName string, roles ...string) erro |
| 368 | 368 |
return err |
| 369 | 369 |
} |
| 370 | 370 |
logrus.Debugf("Removing target \"%s\"", targetName)
|
| 371 |
- template := changelist.NewTufChange(changelist.ActionDelete, "", |
|
| 371 |
+ template := changelist.NewTUFChange(changelist.ActionDelete, "", |
|
| 372 | 372 |
changelist.TypeTargetsTarget, targetName, nil) |
| 373 | 373 |
return addChange(cl, template, roles...) |
| 374 | 374 |
} |
| 375 | 375 |
|
| 376 | 376 |
// ListTargets lists all targets for the current repository. The list of |
| 377 | 377 |
// roles should be passed in order from highest to lowest priority. |
| 378 |
+// |
|
| 378 | 379 |
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x" |
| 379 | 380 |
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree |
| 380 | 381 |
// its entries will be strictly shadowed by those in other parts of the "targets/a" |
| ... | ... |
@@ -402,11 +411,18 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro |
| 402 | 402 |
if _, ok := targets[targetName]; ok || !validRole.CheckPaths(targetName) {
|
| 403 | 403 |
continue |
| 404 | 404 |
} |
| 405 |
- targets[targetName] = |
|
| 406 |
- &TargetWithRole{Target: Target{Name: targetName, Hashes: targetMeta.Hashes, Length: targetMeta.Length}, Role: validRole.Name}
|
|
| 405 |
+ targets[targetName] = &TargetWithRole{
|
|
| 406 |
+ Target: Target{
|
|
| 407 |
+ Name: targetName, |
|
| 408 |
+ Hashes: targetMeta.Hashes, |
|
| 409 |
+ Length: targetMeta.Length, |
|
| 410 |
+ }, |
|
| 411 |
+ Role: validRole.Name, |
|
| 412 |
+ } |
|
| 407 | 413 |
} |
| 408 | 414 |
return nil |
| 409 | 415 |
} |
| 416 |
+ |
|
| 410 | 417 |
r.tufRepo.WalkTargets("", role, listVisitorFunc, skipRoles...)
|
| 411 | 418 |
} |
| 412 | 419 |
|
| ... | ... |
@@ -462,6 +478,62 @@ func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*Targe |
| 462 | 462 |
|
| 463 | 463 |
} |
| 464 | 464 |
|
| 465 |
+// TargetSignedStruct is a struct that contains a Target, the role it was found in, and the list of signatures for that role |
|
| 466 |
+type TargetSignedStruct struct {
|
|
| 467 |
+ Role data.DelegationRole |
|
| 468 |
+ Target Target |
|
| 469 |
+ Signatures []data.Signature |
|
| 470 |
+} |
|
| 471 |
+ |
|
| 472 |
+// GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all |
|
| 473 |
+// roles, and returns a list of TargetSignedStructs for each time it finds the specified target. |
|
| 474 |
+// If given an empty string for a target name, it will return back all targets signed into the repository in every role |
|
| 475 |
+func (r *NotaryRepository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
|
|
| 476 |
+ if err := r.Update(false); err != nil {
|
|
| 477 |
+ return nil, err |
|
| 478 |
+ } |
|
| 479 |
+ |
|
| 480 |
+ var targetInfoList []TargetSignedStruct |
|
| 481 |
+ |
|
| 482 |
+ // Define a visitor function to find the specified target |
|
| 483 |
+ getAllTargetInfoByNameVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
|
| 484 |
+ if tgt == nil {
|
|
| 485 |
+ return nil |
|
| 486 |
+ } |
|
| 487 |
+ // We found a target and validated path compatibility in our walk, |
|
| 488 |
+ // so add it to our list if we have a match |
|
| 489 |
+ // if we have an empty name, add all targets, else check if we have it |
|
| 490 |
+ var targetMetaToAdd data.Files |
|
| 491 |
+ if name == "" {
|
|
| 492 |
+ targetMetaToAdd = tgt.Signed.Targets |
|
| 493 |
+ } else {
|
|
| 494 |
+ if meta, ok := tgt.Signed.Targets[name]; ok {
|
|
| 495 |
+ targetMetaToAdd = data.Files{name: meta}
|
|
| 496 |
+ } |
|
| 497 |
+ } |
|
| 498 |
+ |
|
| 499 |
+ for targetName, resultMeta := range targetMetaToAdd {
|
|
| 500 |
+ targetInfo := TargetSignedStruct{
|
|
| 501 |
+ Role: validRole, |
|
| 502 |
+ Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length},
|
|
| 503 |
+ Signatures: tgt.Signatures, |
|
| 504 |
+ } |
|
| 505 |
+ targetInfoList = append(targetInfoList, targetInfo) |
|
| 506 |
+ } |
|
| 507 |
+ // continue walking to all child roles |
|
| 508 |
+ return nil |
|
| 509 |
+ } |
|
| 510 |
+ |
|
| 511 |
+ // Check that we didn't error, and that we found the target at least once |
|
| 512 |
+ if err := r.tufRepo.WalkTargets(name, "", getAllTargetInfoByNameVisitorFunc); err != nil {
|
|
| 513 |
+ return nil, err |
|
| 514 |
+ } |
|
| 515 |
+ if len(targetInfoList) == 0 {
|
|
| 516 |
+ return nil, fmt.Errorf("No valid trust data for %s", name)
|
|
| 517 |
+ } |
|
| 518 |
+ return targetInfoList, nil |
|
| 519 |
+} |
|
| 520 |
+ |
|
| 465 | 521 |
// GetChangelist returns the list of the repository's unpublished changes |
| 466 | 522 |
func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) {
|
| 467 | 523 |
changelistDir := filepath.Join(r.tufRepoPath, "changelist") |
| ... | ... |
@@ -567,19 +639,19 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error {
|
| 567 | 567 |
} |
| 568 | 568 |
} |
| 569 | 569 |
// apply the changelist to the repo |
| 570 |
- if err := applyChangelist(r.tufRepo, cl); err != nil {
|
|
| 570 |
+ if err := applyChangelist(r.tufRepo, r.invalid, cl); err != nil {
|
|
| 571 | 571 |
logrus.Debug("Error applying changelist")
|
| 572 | 572 |
return err |
| 573 | 573 |
} |
| 574 | 574 |
|
| 575 |
- // these are the tuf files we will need to update, serialized as JSON before |
|
| 575 |
+ // these are the TUF files we will need to update, serialized as JSON before |
|
| 576 | 576 |
// we send anything to remote |
| 577 | 577 |
updatedFiles := make(map[string][]byte) |
| 578 | 578 |
|
| 579 | 579 |
// check if our root file is nearing expiry or dirty. Resign if it is. If |
| 580 | 580 |
// root is not dirty but we are publishing for the first time, then just |
| 581 | 581 |
// publish the existing root we have. |
| 582 |
- if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty {
|
|
| 582 |
+ if nearExpiry(r.tufRepo.Root.Signed.SignedCommon) || r.tufRepo.Root.Dirty {
|
|
| 583 | 583 |
rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole) |
| 584 | 584 |
if err != nil {
|
| 585 | 585 |
return err |
| ... | ... |
@@ -635,7 +707,7 @@ func (r *NotaryRepository) publish(cl changelist.Changelist) error {
|
| 635 | 635 |
return err |
| 636 | 636 |
} |
| 637 | 637 |
|
| 638 |
- return remote.SetMultiMeta(updatedFiles) |
|
| 638 |
+ return remote.SetMulti(updatedFiles) |
|
| 639 | 639 |
} |
| 640 | 640 |
|
| 641 | 641 |
// bootstrapRepo loads the repository from the local file system (i.e. |
| ... | ... |
@@ -649,7 +721,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
|
| 649 | 649 |
logrus.Debugf("Loading trusted collection.")
|
| 650 | 650 |
|
| 651 | 651 |
for _, role := range data.BaseRoles {
|
| 652 |
- jsonBytes, err := r.fileStore.GetMeta(role, store.NoSizeLimit) |
|
| 652 |
+ jsonBytes, err := r.fileStore.GetSized(role, store.NoSizeLimit) |
|
| 653 | 653 |
if err != nil {
|
| 654 | 654 |
if _, ok := err.(store.ErrMetaNotFound); ok && |
| 655 | 655 |
// server snapshots are supported, and server timestamp management |
| ... | ... |
@@ -665,7 +737,7 @@ func (r *NotaryRepository) bootstrapRepo() error {
|
| 665 | 665 |
} |
| 666 | 666 |
} |
| 667 | 667 |
|
| 668 |
- tufRepo, err := b.Finish() |
|
| 668 |
+ tufRepo, _, err := b.Finish() |
|
| 669 | 669 |
if err == nil {
|
| 670 | 670 |
r.tufRepo = tufRepo |
| 671 | 671 |
} |
| ... | ... |
@@ -681,7 +753,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
|
| 681 | 681 |
if err != nil {
|
| 682 | 682 |
return err |
| 683 | 683 |
} |
| 684 |
- err = r.fileStore.SetMeta(data.CanonicalRootRole, rootJSON) |
|
| 684 |
+ err = r.fileStore.Set(data.CanonicalRootRole, rootJSON) |
|
| 685 | 685 |
if err != nil {
|
| 686 | 686 |
return err |
| 687 | 687 |
} |
| ... | ... |
@@ -702,7 +774,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
|
| 702 | 702 |
for role, blob := range targetsToSave {
|
| 703 | 703 |
parentDir := filepath.Dir(role) |
| 704 | 704 |
os.MkdirAll(parentDir, 0755) |
| 705 |
- r.fileStore.SetMeta(role, blob) |
|
| 705 |
+ r.fileStore.Set(role, blob) |
|
| 706 | 706 |
} |
| 707 | 707 |
|
| 708 | 708 |
if ignoreSnapshot {
|
| ... | ... |
@@ -714,7 +786,7 @@ func (r *NotaryRepository) saveMetadata(ignoreSnapshot bool) error {
|
| 714 | 714 |
return err |
| 715 | 715 |
} |
| 716 | 716 |
|
| 717 |
- return r.fileStore.SetMeta(data.CanonicalSnapshotRole, snapshotJSON) |
|
| 717 |
+ return r.fileStore.Set(data.CanonicalSnapshotRole, snapshotJSON) |
|
| 718 | 718 |
} |
| 719 | 719 |
|
| 720 | 720 |
// returns a properly constructed ErrRepositoryNotExist error based on this |
| ... | ... |
@@ -738,7 +810,7 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
| 738 | 738 |
} |
| 739 | 739 |
return err |
| 740 | 740 |
} |
| 741 |
- repo, err := c.Update() |
|
| 741 |
+ repo, invalid, err := c.Update() |
|
| 742 | 742 |
if err != nil {
|
| 743 | 743 |
// notFound.Resource may include a checksum so when the role is root, |
| 744 | 744 |
// it will be root or root.<checksum>. Therefore best we can |
| ... | ... |
@@ -748,7 +820,11 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
| 748 | 748 |
} |
| 749 | 749 |
return err |
| 750 | 750 |
} |
| 751 |
+ // we can be assured if we are at this stage that the repo we built is good |
|
| 752 |
+ // no need to test the following function call for an error as it will always be fine should the repo be good- it is! |
|
| 751 | 753 |
r.tufRepo = repo |
| 754 |
+ r.invalid = invalid |
|
| 755 |
+ warnRolesNearExpiry(repo) |
|
| 752 | 756 |
return nil |
| 753 | 757 |
} |
| 754 | 758 |
|
| ... | ... |
@@ -759,16 +835,16 @@ func (r *NotaryRepository) Update(forWrite bool) error {
|
| 759 | 759 |
// and return an error if the remote repository errors. |
| 760 | 760 |
// |
| 761 | 761 |
// Populates a tuf.RepoBuilder with this root metadata (only use |
| 762 |
-// tufclient.Client.Update to load the rest). |
|
| 762 |
+// TUFClient.Update to load the rest). |
|
| 763 | 763 |
// |
| 764 | 764 |
// Fails if the remote server is reachable and does not know the repo |
| 765 | 765 |
// (i.e. before the first r.Publish()), in which case the error is |
| 766 | 766 |
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used) |
| 767 | 767 |
// is not trusted. |
| 768 | 768 |
// |
| 769 |
-// Returns a tufclient.Client for the remote server, which may not be actually |
|
| 769 |
+// Returns a TUFClient for the remote server, which may not be actually |
|
| 770 | 770 |
// operational (if the URL is invalid but a root.json is cached). |
| 771 |
-func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
|
|
| 771 |
+func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*TUFClient, error) {
|
|
| 772 | 772 |
minVersion := 1 |
| 773 | 773 |
// the old root on disk should not be validated against any trust pinning configuration |
| 774 | 774 |
// because if we have an old root, it itself is the thing that pins trust |
| ... | ... |
@@ -781,7 +857,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl |
| 781 | 781 |
// during update which will cause us to download a new root and perform a rotation. |
| 782 | 782 |
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one |
| 783 | 783 |
// preloaded with the old root or one which uses the old root for trust bootstrapping. |
| 784 |
- if rootJSON, err := r.fileStore.GetMeta(data.CanonicalRootRole, store.NoSizeLimit); err == nil {
|
|
| 784 |
+ if rootJSON, err := r.fileStore.GetSized(data.CanonicalRootRole, store.NoSizeLimit); err == nil {
|
|
| 785 | 785 |
// if we can't load the cached root, fail hard because that is how we pin trust |
| 786 | 786 |
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
|
| 787 | 787 |
return nil, err |
| ... | ... |
@@ -794,8 +870,9 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl |
| 794 | 794 |
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
|
| 795 | 795 |
// Ok, the old root is expired - we want to download a new one. But we want to use the |
| 796 | 796 |
// old root to verify the new root, so bootstrap a new builder with the old builder |
| 797 |
+ // but use the trustpinning to validate the new root |
|
| 797 | 798 |
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole) |
| 798 |
- newBuilder = oldBuilder.BootstrapNewBuilder() |
|
| 799 |
+ newBuilder = oldBuilder.BootstrapNewBuilderWithNewTrustpin(r.trustPinning) |
|
| 799 | 800 |
} |
| 800 | 801 |
} |
| 801 | 802 |
|
| ... | ... |
@@ -808,7 +885,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl |
| 808 | 808 |
|
| 809 | 809 |
// if remote store successfully set up, try and get root from remote |
| 810 | 810 |
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB) |
| 811 |
- tmpJSON, err := remote.GetMeta(data.CanonicalRootRole, store.NoSizeLimit) |
|
| 811 |
+ tmpJSON, err := remote.GetSized(data.CanonicalRootRole, store.NoSizeLimit) |
|
| 812 | 812 |
if err != nil {
|
| 813 | 813 |
// we didn't have a root in cache and were unable to load one from |
| 814 | 814 |
// the server. Nothing we can do but error. |
| ... | ... |
@@ -821,7 +898,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl |
| 821 | 821 |
return nil, err |
| 822 | 822 |
} |
| 823 | 823 |
|
| 824 |
- err = r.fileStore.SetMeta(data.CanonicalRootRole, tmpJSON) |
|
| 824 |
+ err = r.fileStore.Set(data.CanonicalRootRole, tmpJSON) |
|
| 825 | 825 |
if err != nil {
|
| 826 | 826 |
// if we can't write cache we should still continue, just log error |
| 827 | 827 |
logrus.Errorf("could not save root to cache: %s", err.Error())
|
| ... | ... |
@@ -835,7 +912,7 @@ func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Cl |
| 835 | 835 |
return nil, ErrRepoNotInitialized{}
|
| 836 | 836 |
} |
| 837 | 837 |
|
| 838 |
- return tufclient.NewClient(oldBuilder, newBuilder, remote, r.fileStore), nil |
|
| 838 |
+ return NewTUFClient(oldBuilder, newBuilder, remote, r.fileStore), nil |
|
| 839 | 839 |
} |
| 840 | 840 |
|
| 841 | 841 |
// RotateKey removes all existing keys associated with the role, and either |
| ... | ... |
@@ -864,7 +941,7 @@ func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
|
| 864 | 864 |
) |
| 865 | 865 |
switch serverManagesKey {
|
| 866 | 866 |
case true: |
| 867 |
- pubKey, err = getRemoteKey(r.baseURL, r.gun, role, r.roundTrip) |
|
| 867 |
+ pubKey, err = rotateRemoteKey(r.baseURL, r.gun, role, r.roundTrip) |
|
| 868 | 868 |
errFmtMsg = "unable to rotate remote key: %s" |
| 869 | 869 |
default: |
| 870 | 870 |
pubKey, err = r.CryptoService.Create(role, r.gun, data.ECDSAKey) |
| ... | ... |
@@ -897,7 +974,7 @@ func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error {
|
| 897 | 897 |
func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, action string, key data.PublicKey) error {
|
| 898 | 898 |
kl := make(data.KeyList, 0, 1) |
| 899 | 899 |
kl = append(kl, key) |
| 900 |
- meta := changelist.TufRootData{
|
|
| 900 |
+ meta := changelist.TUFRootData{
|
|
| 901 | 901 |
RoleName: role, |
| 902 | 902 |
Keys: kl, |
| 903 | 903 |
} |
| ... | ... |
@@ -906,7 +983,7 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act |
| 906 | 906 |
return err |
| 907 | 907 |
} |
| 908 | 908 |
|
| 909 |
- c := changelist.NewTufChange( |
|
| 909 |
+ c := changelist.NewTUFChange( |
|
| 910 | 910 |
action, |
| 911 | 911 |
changelist.ScopeRoot, |
| 912 | 912 |
changelist.TypeRootRole, |
| ... | ... |
@@ -917,11 +994,21 @@ func (r *NotaryRepository) rootFileKeyChange(cl changelist.Changelist, role, act |
| 917 | 917 |
} |
| 918 | 918 |
|
| 919 | 919 |
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side |
| 920 |
-func (r *NotaryRepository) DeleteTrustData() error {
|
|
| 921 |
- // Clear TUF files and cache |
|
| 922 |
- if err := r.fileStore.RemoveAll(); err != nil {
|
|
| 920 |
+// Note that we will not delete any private key material from local storage |
|
| 921 |
+func (r *NotaryRepository) DeleteTrustData(deleteRemote bool) error {
|
|
| 922 |
+ // Remove the tufRepoPath directory, which includes local TUF metadata files and changelist information |
|
| 923 |
+ if err := os.RemoveAll(r.tufRepoPath); err != nil {
|
|
| 923 | 924 |
return fmt.Errorf("error clearing TUF repo data: %v", err)
|
| 924 | 925 |
} |
| 925 |
- r.tufRepo = tuf.NewRepo(nil) |
|
| 926 |
+ // Note that this will require admin permission in this NotaryRepository's roundtripper |
|
| 927 |
+ if deleteRemote {
|
|
| 928 |
+ remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip) |
|
| 929 |
+ if err != nil {
|
|
| 930 |
+ return err |
|
| 931 |
+ } |
|
| 932 |
+ if err := remote.RemoveAll(); err != nil {
|
|
| 933 |
+ return err |
|
| 934 |
+ } |
|
| 935 |
+ } |
|
| 926 | 936 |
return nil |
| 927 | 937 |
} |
| ... | ... |
@@ -8,8 +8,8 @@ import ( |
| 8 | 8 |
"github.com/Sirupsen/logrus" |
| 9 | 9 |
"github.com/docker/notary" |
| 10 | 10 |
"github.com/docker/notary/client/changelist" |
| 11 |
+ store "github.com/docker/notary/storage" |
|
| 11 | 12 |
"github.com/docker/notary/tuf/data" |
| 12 |
- "github.com/docker/notary/tuf/store" |
|
| 13 | 13 |
"github.com/docker/notary/tuf/utils" |
| 14 | 14 |
) |
| 15 | 15 |
|
| ... | ... |
@@ -50,7 +50,7 @@ func (r *NotaryRepository) AddDelegationRoleAndKeys(name string, delegationKeys |
| 50 | 50 |
name, notary.MinThreshold, len(delegationKeys)) |
| 51 | 51 |
|
| 52 | 52 |
// Defaulting to threshold of 1, since we don't allow for larger thresholds at the moment. |
| 53 |
- tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
|
| 53 |
+ tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
|
| 54 | 54 |
NewThreshold: notary.MinThreshold, |
| 55 | 55 |
AddKeys: data.KeyList(delegationKeys), |
| 56 | 56 |
}) |
| ... | ... |
@@ -78,7 +78,7 @@ func (r *NotaryRepository) AddDelegationPaths(name string, paths []string) error |
| 78 | 78 |
|
| 79 | 79 |
logrus.Debugf(`Adding %s paths to delegation %s\n`, paths, name) |
| 80 | 80 |
|
| 81 |
- tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
|
| 81 |
+ tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
|
| 82 | 82 |
AddPaths: paths, |
| 83 | 83 |
}) |
| 84 | 84 |
if err != nil {
|
| ... | ... |
@@ -141,7 +141,7 @@ func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) er |
| 141 | 141 |
|
| 142 | 142 |
logrus.Debugf(`Removing %s paths from delegation "%s"\n`, paths, name) |
| 143 | 143 |
|
| 144 |
- tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
|
| 144 |
+ tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
|
| 145 | 145 |
RemovePaths: paths, |
| 146 | 146 |
}) |
| 147 | 147 |
if err != nil {
|
| ... | ... |
@@ -155,9 +155,11 @@ func (r *NotaryRepository) RemoveDelegationPaths(name string, paths []string) er |
| 155 | 155 |
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation. |
| 156 | 156 |
// When this changelist is applied, if the specified keys are the only keys left in the role, |
| 157 | 157 |
// the role itself will be deleted in its entirety. |
| 158 |
+// It can also delete a key from all delegations under a parent using a name |
|
| 159 |
+// with a wildcard at the end. |
|
| 158 | 160 |
func (r *NotaryRepository) RemoveDelegationKeys(name string, keyIDs []string) error {
|
| 159 | 161 |
|
| 160 |
- if !data.IsDelegation(name) {
|
|
| 162 |
+ if !data.IsDelegation(name) && !data.IsWildDelegation(name) {
|
|
| 161 | 163 |
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
| 162 | 164 |
} |
| 163 | 165 |
|
| ... | ... |
@@ -169,7 +171,7 @@ func (r *NotaryRepository) RemoveDelegationKeys(name string, keyIDs []string) er |
| 169 | 169 |
|
| 170 | 170 |
logrus.Debugf(`Removing %s keys from delegation "%s"\n`, keyIDs, name) |
| 171 | 171 |
|
| 172 |
- tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
|
| 172 |
+ tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
|
| 173 | 173 |
RemoveKeys: keyIDs, |
| 174 | 174 |
}) |
| 175 | 175 |
if err != nil {
|
| ... | ... |
@@ -195,7 +197,7 @@ func (r *NotaryRepository) ClearDelegationPaths(name string) error {
|
| 195 | 195 |
|
| 196 | 196 |
logrus.Debugf(`Removing all paths from delegation "%s"\n`, name) |
| 197 | 197 |
|
| 198 |
- tdJSON, err := json.Marshal(&changelist.TufDelegation{
|
|
| 198 |
+ tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
|
| 199 | 199 |
ClearAllPaths: true, |
| 200 | 200 |
}) |
| 201 | 201 |
if err != nil {
|
| ... | ... |
@@ -206,8 +208,8 @@ func (r *NotaryRepository) ClearDelegationPaths(name string) error {
|
| 206 | 206 |
return addChange(cl, template, name) |
| 207 | 207 |
} |
| 208 | 208 |
|
| 209 |
-func newUpdateDelegationChange(name string, content []byte) *changelist.TufChange {
|
|
| 210 |
- return changelist.NewTufChange( |
|
| 209 |
+func newUpdateDelegationChange(name string, content []byte) *changelist.TUFChange {
|
|
| 210 |
+ return changelist.NewTUFChange( |
|
| 211 | 211 |
changelist.ActionUpdate, |
| 212 | 212 |
name, |
| 213 | 213 |
changelist.TypeTargetsDelegation, |
| ... | ... |
@@ -216,8 +218,8 @@ func newUpdateDelegationChange(name string, content []byte) *changelist.TufChang |
| 216 | 216 |
) |
| 217 | 217 |
} |
| 218 | 218 |
|
| 219 |
-func newCreateDelegationChange(name string, content []byte) *changelist.TufChange {
|
|
| 220 |
- return changelist.NewTufChange( |
|
| 219 |
+func newCreateDelegationChange(name string, content []byte) *changelist.TUFChange {
|
|
| 220 |
+ return changelist.NewTUFChange( |
|
| 221 | 221 |
changelist.ActionCreate, |
| 222 | 222 |
name, |
| 223 | 223 |
changelist.TypeTargetsDelegation, |
| ... | ... |
@@ -226,8 +228,8 @@ func newCreateDelegationChange(name string, content []byte) *changelist.TufChang |
| 226 | 226 |
) |
| 227 | 227 |
} |
| 228 | 228 |
|
| 229 |
-func newDeleteDelegationChange(name string, content []byte) *changelist.TufChange {
|
|
| 230 |
- return changelist.NewTufChange( |
|
| 229 |
+func newDeleteDelegationChange(name string, content []byte) *changelist.TUFChange {
|
|
| 230 |
+ return changelist.NewTUFChange( |
|
| 231 | 231 |
changelist.ActionDelete, |
| 232 | 232 |
name, |
| 233 | 233 |
changelist.TypeTargetsDelegation, |
| ... | ... |
@@ -238,7 +240,7 @@ func newDeleteDelegationChange(name string, content []byte) *changelist.TufChang |
| 238 | 238 |
|
| 239 | 239 |
// GetDelegationRoles returns the keys and roles of the repository's delegations |
| 240 | 240 |
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts |
| 241 |
-func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) {
|
|
| 241 |
+func (r *NotaryRepository) GetDelegationRoles() ([]data.Role, error) {
|
|
| 242 | 242 |
// Update state of the repo to latest |
| 243 | 243 |
if err := r.Update(false); err != nil {
|
| 244 | 244 |
return nil, err |
| ... | ... |
@@ -251,7 +253,7 @@ func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) {
|
| 251 | 251 |
} |
| 252 | 252 |
|
| 253 | 253 |
// make a copy for traversing nested delegations |
| 254 |
- allDelegations := []*data.Role{}
|
|
| 254 |
+ allDelegations := []data.Role{}
|
|
| 255 | 255 |
|
| 256 | 256 |
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs |
| 257 | 257 |
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
| ... | ... |
@@ -271,20 +273,23 @@ func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) {
|
| 271 | 271 |
return allDelegations, nil |
| 272 | 272 |
} |
| 273 | 273 |
|
| 274 |
-func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]*data.Role, error) {
|
|
| 275 |
- canonicalDelegations := make([]*data.Role, len(delegationInfo.Roles)) |
|
| 276 |
- copy(canonicalDelegations, delegationInfo.Roles) |
|
| 274 |
+func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]data.Role, error) {
|
|
| 275 |
+ canonicalDelegations := make([]data.Role, len(delegationInfo.Roles)) |
|
| 276 |
+ // Do a copy by value to ensure local delegation metadata is untouched |
|
| 277 |
+ for idx, origRole := range delegationInfo.Roles {
|
|
| 278 |
+ canonicalDelegations[idx] = *origRole |
|
| 279 |
+ } |
|
| 277 | 280 |
delegationKeys := delegationInfo.Keys |
| 278 | 281 |
for i, delegation := range canonicalDelegations {
|
| 279 | 282 |
canonicalKeyIDs := []string{}
|
| 280 | 283 |
for _, keyID := range delegation.KeyIDs {
|
| 281 | 284 |
pubKey, ok := delegationKeys[keyID] |
| 282 | 285 |
if !ok {
|
| 283 |
- return nil, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
|
|
| 286 |
+ return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
|
|
| 284 | 287 |
} |
| 285 | 288 |
canonicalKeyID, err := utils.CanonicalKeyID(pubKey) |
| 286 | 289 |
if err != nil {
|
| 287 |
- return nil, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
|
|
| 290 |
+ return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
|
|
| 288 | 291 |
} |
| 289 | 292 |
canonicalKeyIDs = append(canonicalKeyIDs, canonicalKeyID) |
| 290 | 293 |
} |
| ... | ... |
@@ -4,14 +4,13 @@ import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 | 6 |
"net/http" |
| 7 |
- "strings" |
|
| 8 | 7 |
"time" |
| 9 | 8 |
|
| 10 | 9 |
"github.com/Sirupsen/logrus" |
| 11 | 10 |
"github.com/docker/notary/client/changelist" |
| 12 |
- tuf "github.com/docker/notary/tuf" |
|
| 11 |
+ store "github.com/docker/notary/storage" |
|
| 12 |
+ "github.com/docker/notary/tuf" |
|
| 13 | 13 |
"github.com/docker/notary/tuf/data" |
| 14 |
- "github.com/docker/notary/tuf/store" |
|
| 15 | 14 |
"github.com/docker/notary/tuf/utils" |
| 16 | 15 |
) |
| 17 | 16 |
|
| ... | ... |
@@ -30,7 +29,7 @@ func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStor |
| 30 | 30 |
return s, err |
| 31 | 31 |
} |
| 32 | 32 |
|
| 33 |
-func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
|
|
| 33 |
+func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist) error {
|
|
| 34 | 34 |
it, err := cl.NewIterator() |
| 35 | 35 |
if err != nil {
|
| 36 | 36 |
return err |
| ... | ... |
@@ -41,30 +40,33 @@ func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
|
| 41 | 41 |
if err != nil {
|
| 42 | 42 |
return err |
| 43 | 43 |
} |
| 44 |
- isDel := data.IsDelegation(c.Scope()) |
|
| 44 |
+ isDel := data.IsDelegation(c.Scope()) || data.IsWildDelegation(c.Scope()) |
|
| 45 | 45 |
switch {
|
| 46 | 46 |
case c.Scope() == changelist.ScopeTargets || isDel: |
| 47 |
- err = applyTargetsChange(repo, c) |
|
| 47 |
+ err = applyTargetsChange(repo, invalid, c) |
|
| 48 | 48 |
case c.Scope() == changelist.ScopeRoot: |
| 49 | 49 |
err = applyRootChange(repo, c) |
| 50 | 50 |
default: |
| 51 |
- logrus.Debug("scope not supported: ", c.Scope())
|
|
| 51 |
+ return fmt.Errorf("scope not supported: %s", c.Scope())
|
|
| 52 | 52 |
} |
| 53 |
- index++ |
|
| 54 | 53 |
if err != nil {
|
| 54 |
+ logrus.Debugf("error attempting to apply change #%d: %s, on scope: %s path: %s type: %s", index, c.Action(), c.Scope(), c.Path(), c.Type())
|
|
| 55 | 55 |
return err |
| 56 | 56 |
} |
| 57 |
+ index++ |
|
| 57 | 58 |
} |
| 58 | 59 |
logrus.Debugf("applied %d change(s)", index)
|
| 59 | 60 |
return nil |
| 60 | 61 |
} |
| 61 | 62 |
|
| 62 |
-func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
|
|
| 63 |
+func applyTargetsChange(repo *tuf.Repo, invalid *tuf.Repo, c changelist.Change) error {
|
|
| 63 | 64 |
switch c.Type() {
|
| 64 | 65 |
case changelist.TypeTargetsTarget: |
| 65 | 66 |
return changeTargetMeta(repo, c) |
| 66 | 67 |
case changelist.TypeTargetsDelegation: |
| 67 | 68 |
return changeTargetsDelegation(repo, c) |
| 69 |
+ case changelist.TypeWitness: |
|
| 70 |
+ return witnessTargets(repo, invalid, c.Scope()) |
|
| 68 | 71 |
default: |
| 69 | 72 |
return fmt.Errorf("only target meta and delegations changes supported")
|
| 70 | 73 |
} |
| ... | ... |
@@ -73,7 +75,7 @@ func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error {
|
| 73 | 73 |
func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
| 74 | 74 |
switch c.Action() {
|
| 75 | 75 |
case changelist.ActionCreate: |
| 76 |
- td := changelist.TufDelegation{}
|
|
| 76 |
+ td := changelist.TUFDelegation{}
|
|
| 77 | 77 |
err := json.Unmarshal(c.Content(), &td) |
| 78 | 78 |
if err != nil {
|
| 79 | 79 |
return err |
| ... | ... |
@@ -87,11 +89,15 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
| 87 | 87 |
} |
| 88 | 88 |
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, []string{}, false)
|
| 89 | 89 |
case changelist.ActionUpdate: |
| 90 |
- td := changelist.TufDelegation{}
|
|
| 90 |
+ td := changelist.TUFDelegation{}
|
|
| 91 | 91 |
err := json.Unmarshal(c.Content(), &td) |
| 92 | 92 |
if err != nil {
|
| 93 | 93 |
return err |
| 94 | 94 |
} |
| 95 |
+ if data.IsWildDelegation(c.Scope()) {
|
|
| 96 |
+ return repo.PurgeDelegationKeys(c.Scope(), td.RemoveKeys) |
|
| 97 |
+ } |
|
| 98 |
+ |
|
| 95 | 99 |
delgRole, err := repo.GetDelegationRole(c.Scope()) |
| 96 | 100 |
if err != nil {
|
| 97 | 101 |
return err |
| ... | ... |
@@ -112,10 +118,6 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
| 112 | 112 |
removeTUFKeyIDs = append(removeTUFKeyIDs, canonicalToTUFID[canonID]) |
| 113 | 113 |
} |
| 114 | 114 |
|
| 115 |
- // If we specify the only keys left delete the role, else just delete specified keys |
|
| 116 |
- if strings.Join(delgRole.ListKeyIDs(), ";") == strings.Join(removeTUFKeyIDs, ";") && len(td.AddKeys) == 0 {
|
|
| 117 |
- return repo.DeleteDelegation(c.Scope()) |
|
| 118 |
- } |
|
| 119 | 115 |
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, removeTUFKeyIDs, td.NewThreshold) |
| 120 | 116 |
if err != nil {
|
| 121 | 117 |
return err |
| ... | ... |
@@ -155,7 +157,7 @@ func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
|
| 155 | 155 |
} |
| 156 | 156 |
|
| 157 | 157 |
default: |
| 158 |
- logrus.Debug("action not yet supported: ", c.Action())
|
|
| 158 |
+ err = fmt.Errorf("action not yet supported: %s", c.Action())
|
|
| 159 | 159 |
} |
| 160 | 160 |
return err |
| 161 | 161 |
} |
| ... | ... |
@@ -166,7 +168,7 @@ func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
|
| 166 | 166 |
case changelist.TypeRootRole: |
| 167 | 167 |
err = applyRootRoleChange(repo, c) |
| 168 | 168 |
default: |
| 169 |
- logrus.Debug("type of root change not yet supported: ", c.Type())
|
|
| 169 |
+ err = fmt.Errorf("type of root change not yet supported: %s", c.Type())
|
|
| 170 | 170 |
} |
| 171 | 171 |
return err // might be nil |
| 172 | 172 |
} |
| ... | ... |
@@ -175,7 +177,7 @@ func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
|
| 175 | 175 |
switch c.Action() {
|
| 176 | 176 |
case changelist.ActionCreate: |
| 177 | 177 |
// replaces all keys for a role |
| 178 |
- d := &changelist.TufRootData{}
|
|
| 178 |
+ d := &changelist.TUFRootData{}
|
|
| 179 | 179 |
err := json.Unmarshal(c.Content(), d) |
| 180 | 180 |
if err != nil {
|
| 181 | 181 |
return err |
| ... | ... |
@@ -185,14 +187,34 @@ func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
|
| 185 | 185 |
return err |
| 186 | 186 |
} |
| 187 | 187 |
default: |
| 188 |
- logrus.Debug("action not yet supported for root: ", c.Action())
|
|
| 188 |
+ return fmt.Errorf("action not yet supported for root: %s", c.Action())
|
|
| 189 | 189 |
} |
| 190 | 190 |
return nil |
| 191 | 191 |
} |
| 192 | 192 |
|
| 193 |
-func nearExpiry(r *data.SignedRoot) bool {
|
|
| 193 |
+func nearExpiry(r data.SignedCommon) bool {
|
|
| 194 | 194 |
plus6mo := time.Now().AddDate(0, 6, 0) |
| 195 |
- return r.Signed.Expires.Before(plus6mo) |
|
| 195 |
+ return r.Expires.Before(plus6mo) |
|
| 196 |
+} |
|
| 197 |
+ |
|
| 198 |
+func warnRolesNearExpiry(r *tuf.Repo) {
|
|
| 199 |
+ //get every role and its respective signed common and call nearExpiry on it |
|
| 200 |
+ //Root check |
|
| 201 |
+ if nearExpiry(r.Root.Signed.SignedCommon) {
|
|
| 202 |
+ logrus.Warn("root is nearing expiry, you should re-sign the role metadata")
|
|
| 203 |
+ } |
|
| 204 |
+ //Targets and delegations check |
|
| 205 |
+ for role, signedTOrD := range r.Targets {
|
|
| 206 |
+ //signedTOrD is of type *data.SignedTargets |
|
| 207 |
+ if nearExpiry(signedTOrD.Signed.SignedCommon) {
|
|
| 208 |
+ logrus.Warn(role, " metadata is nearing expiry, you should re-sign the role metadata") |
|
| 209 |
+ } |
|
| 210 |
+ } |
|
| 211 |
+ //Snapshot check |
|
| 212 |
+ if nearExpiry(r.Snapshot.Signed.SignedCommon) {
|
|
| 213 |
+ logrus.Warn("snapshot is nearing expiry, you should re-sign the role metadata")
|
|
| 214 |
+ } |
|
| 215 |
+ //do not need to worry about Timestamp, notary signer will re-sign with the timestamp key |
|
| 196 | 216 |
} |
| 197 | 217 |
|
| 198 | 218 |
// Fetches a public key from a remote store, given a gun and role |
| ... | ... |
@@ -214,7 +236,26 @@ func getRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, |
| 214 | 214 |
return pubKey, nil |
| 215 | 215 |
} |
| 216 | 216 |
|
| 217 |
-// signs and serializes the metadata for a canonical role in a tuf repo to JSON |
|
| 217 |
+// Rotates a private key in a remote store and returns the public key component |
|
| 218 |
+func rotateRemoteKey(url, gun, role string, rt http.RoundTripper) (data.PublicKey, error) {
|
|
| 219 |
+ remote, err := getRemoteStore(url, gun, rt) |
|
| 220 |
+ if err != nil {
|
|
| 221 |
+ return nil, err |
|
| 222 |
+ } |
|
| 223 |
+ rawPubKey, err := remote.RotateKey(role) |
|
| 224 |
+ if err != nil {
|
|
| 225 |
+ return nil, err |
|
| 226 |
+ } |
|
| 227 |
+ |
|
| 228 |
+ pubKey, err := data.UnmarshalPublicKey(rawPubKey) |
|
| 229 |
+ if err != nil {
|
|
| 230 |
+ return nil, err |
|
| 231 |
+ } |
|
| 232 |
+ |
|
| 233 |
+ return pubKey, nil |
|
| 234 |
+} |
|
| 235 |
+ |
|
| 236 |
+// signs and serializes the metadata for a canonical role in a TUF repo to JSON |
|
| 218 | 237 |
func serializeCanonicalRole(tufRepo *tuf.Repo, role string) (out []byte, err error) {
|
| 219 | 238 |
var s *data.Signed |
| 220 | 239 |
switch {
|
| ... | ... |
@@ -6,7 +6,7 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"net/http" |
| 8 | 8 |
|
| 9 |
- "github.com/docker/notary/passphrase" |
|
| 9 |
+ "github.com/docker/notary" |
|
| 10 | 10 |
"github.com/docker/notary/trustmanager" |
| 11 | 11 |
"github.com/docker/notary/trustpinning" |
| 12 | 12 |
) |
| ... | ... |
@@ -16,7 +16,7 @@ import ( |
| 16 | 16 |
// (This is normally defaults to "~/.notary" or "~/.docker/trust" when enabling |
| 17 | 17 |
// docker content trust). |
| 18 | 18 |
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper, |
| 19 |
- retriever passphrase.Retriever, trustPinning trustpinning.TrustPinConfig) ( |
|
| 19 |
+ retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) ( |
|
| 20 | 20 |
*NotaryRepository, error) {
|
| 21 | 21 |
|
| 22 | 22 |
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever) |
| ... | ... |
@@ -6,7 +6,7 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"net/http" |
| 8 | 8 |
|
| 9 |
- "github.com/docker/notary/passphrase" |
|
| 9 |
+ "github.com/docker/notary" |
|
| 10 | 10 |
"github.com/docker/notary/trustmanager" |
| 11 | 11 |
"github.com/docker/notary/trustmanager/yubikey" |
| 12 | 12 |
"github.com/docker/notary/trustpinning" |
| ... | ... |
@@ -16,7 +16,7 @@ import ( |
| 16 | 16 |
// It takes the base directory under where all the trust files will be stored |
| 17 | 17 |
// (usually ~/.docker/trust/). |
| 18 | 18 |
func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper, |
| 19 |
- retriever passphrase.Retriever, trustPinning trustpinning.TrustPinConfig) ( |
|
| 19 |
+ retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) ( |
|
| 20 | 20 |
*NotaryRepository, error) {
|
| 21 | 21 |
|
| 22 | 22 |
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever) |
| 23 | 23 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,239 @@ |
| 0 |
+package client |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "encoding/json" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/Sirupsen/logrus" |
|
| 6 |
+ "github.com/docker/notary" |
|
| 7 |
+ store "github.com/docker/notary/storage" |
|
| 8 |
+ "github.com/docker/notary/tuf" |
|
| 9 |
+ "github.com/docker/notary/tuf/data" |
|
| 10 |
+ "github.com/docker/notary/tuf/signed" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+// TUFClient is a usability wrapper around a raw TUF repo |
|
| 14 |
+type TUFClient struct {
|
|
| 15 |
+ remote store.RemoteStore |
|
| 16 |
+ cache store.MetadataStore |
|
| 17 |
+ oldBuilder tuf.RepoBuilder |
|
| 18 |
+ newBuilder tuf.RepoBuilder |
|
| 19 |
+} |
|
| 20 |
+ |
|
| 21 |
+// NewTUFClient initialized a TUFClient with the given repo, remote source of content, and cache |
|
| 22 |
+func NewTUFClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *TUFClient {
|
|
| 23 |
+ return &TUFClient{
|
|
| 24 |
+ oldBuilder: oldBuilder, |
|
| 25 |
+ newBuilder: newBuilder, |
|
| 26 |
+ remote: remote, |
|
| 27 |
+ cache: cache, |
|
| 28 |
+ } |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+// Update performs an update to the TUF repo as defined by the TUF spec |
|
| 32 |
+func (c *TUFClient) Update() (*tuf.Repo, *tuf.Repo, error) {
|
|
| 33 |
+ // 1. Get timestamp |
|
| 34 |
+ // a. If timestamp error (verification, expired, etc...) download new root and return to 1. |
|
| 35 |
+ // 2. Check if local snapshot is up to date |
|
| 36 |
+ // a. If out of date, get updated snapshot |
|
| 37 |
+ // i. If snapshot error, download new root and return to 1. |
|
| 38 |
+ // 3. Check if root correct against snapshot |
|
| 39 |
+ // a. If incorrect, download new root and return to 1. |
|
| 40 |
+ // 4. Iteratively download and search targets and delegations to find target meta |
|
| 41 |
+ logrus.Debug("updating TUF client")
|
|
| 42 |
+ err := c.update() |
|
| 43 |
+ if err != nil {
|
|
| 44 |
+ logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
|
|
| 45 |
+ logrus.Debug("Resetting the TUF builder...")
|
|
| 46 |
+ |
|
| 47 |
+ c.newBuilder = c.newBuilder.BootstrapNewBuilder() |
|
| 48 |
+ |
|
| 49 |
+ if err := c.downloadRoot(); err != nil {
|
|
| 50 |
+ logrus.Debug("Client Update (Root):", err)
|
|
| 51 |
+ return nil, nil, err |
|
| 52 |
+ } |
|
| 53 |
+ // If we error again, we now have the latest root and just want to fail |
|
| 54 |
+ // out as there's no expectation the problem can be resolved automatically |
|
| 55 |
+ logrus.Debug("retrying TUF client update")
|
|
| 56 |
+ if err := c.update(); err != nil {
|
|
| 57 |
+ return nil, nil, err |
|
| 58 |
+ } |
|
| 59 |
+ } |
|
| 60 |
+ return c.newBuilder.Finish() |
|
| 61 |
+} |
|
| 62 |
+ |
|
| 63 |
+func (c *TUFClient) update() error {
|
|
| 64 |
+ if err := c.downloadTimestamp(); err != nil {
|
|
| 65 |
+ logrus.Debugf("Client Update (Timestamp): %s", err.Error())
|
|
| 66 |
+ return err |
|
| 67 |
+ } |
|
| 68 |
+ if err := c.downloadSnapshot(); err != nil {
|
|
| 69 |
+ logrus.Debugf("Client Update (Snapshot): %s", err.Error())
|
|
| 70 |
+ return err |
|
| 71 |
+ } |
|
| 72 |
+ // will always need top level targets at a minimum |
|
| 73 |
+ if err := c.downloadTargets(); err != nil {
|
|
| 74 |
+ logrus.Debugf("Client Update (Targets): %s", err.Error())
|
|
| 75 |
+ return err |
|
| 76 |
+ } |
|
| 77 |
+ return nil |
|
| 78 |
+} |
|
| 79 |
+ |
|
| 80 |
+// downloadRoot is responsible for downloading the root.json |
|
| 81 |
+func (c *TUFClient) downloadRoot() error {
|
|
| 82 |
+ role := data.CanonicalRootRole |
|
| 83 |
+ consistentInfo := c.newBuilder.GetConsistentInfo(role) |
|
| 84 |
+ |
|
| 85 |
+ // We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle |
|
| 86 |
+ // since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch |
|
| 87 |
+ if !consistentInfo.ChecksumKnown() {
|
|
| 88 |
+ logrus.Debugf("Loading root with no expected checksum")
|
|
| 89 |
+ |
|
| 90 |
+ // get the cached root, if it exists, just for version checking |
|
| 91 |
+ cachedRoot, _ := c.cache.GetSized(role, -1) |
|
| 92 |
+ // prefer to download a new root |
|
| 93 |
+ _, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot) |
|
| 94 |
+ return remoteErr |
|
| 95 |
+ } |
|
| 96 |
+ |
|
| 97 |
+ _, err := c.tryLoadCacheThenRemote(consistentInfo) |
|
| 98 |
+ return err |
|
| 99 |
+} |
|
| 100 |
+ |
|
| 101 |
+// downloadTimestamp is responsible for downloading the timestamp.json |
|
| 102 |
+// Timestamps are special in that we ALWAYS attempt to download and only |
|
| 103 |
+// use cache if the download fails (and the cache is still valid). |
|
| 104 |
+func (c *TUFClient) downloadTimestamp() error {
|
|
| 105 |
+ logrus.Debug("Loading timestamp...")
|
|
| 106 |
+ role := data.CanonicalTimestampRole |
|
| 107 |
+ consistentInfo := c.newBuilder.GetConsistentInfo(role) |
|
| 108 |
+ |
|
| 109 |
+ // always get the remote timestamp, since it supersedes the local one |
|
| 110 |
+ cachedTS, cachedErr := c.cache.GetSized(role, notary.MaxTimestampSize) |
|
| 111 |
+ _, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS) |
|
| 112 |
+ |
|
| 113 |
+ // check that there was no remote error, or if there was a network problem |
|
| 114 |
+ // If there was a validation error, we should error out so we can download a new root or fail the update |
|
| 115 |
+ switch remoteErr.(type) {
|
|
| 116 |
+ case nil: |
|
| 117 |
+ return nil |
|
| 118 |
+ case store.ErrMetaNotFound, store.ErrServerUnavailable, store.ErrOffline, store.NetworkError: |
|
| 119 |
+ break |
|
| 120 |
+ default: |
|
| 121 |
+ return remoteErr |
|
| 122 |
+ } |
|
| 123 |
+ |
|
| 124 |
+ // since it was a network error: get the cached timestamp, if it exists |
|
| 125 |
+ if cachedErr != nil {
|
|
| 126 |
+ logrus.Debug("no cached or remote timestamp available")
|
|
| 127 |
+ return remoteErr |
|
| 128 |
+ } |
|
| 129 |
+ |
|
| 130 |
+ logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
|
| 131 |
+ err := c.newBuilder.Load(role, cachedTS, 1, false) |
|
| 132 |
+ if err == nil {
|
|
| 133 |
+ logrus.Debug("successfully verified cached timestamp")
|
|
| 134 |
+ } |
|
| 135 |
+ return err |
|
| 136 |
+ |
|
| 137 |
+} |
|
| 138 |
+ |
|
| 139 |
+// downloadSnapshot is responsible for downloading the snapshot.json |
|
| 140 |
+func (c *TUFClient) downloadSnapshot() error {
|
|
| 141 |
+ logrus.Debug("Loading snapshot...")
|
|
| 142 |
+ role := data.CanonicalSnapshotRole |
|
| 143 |
+ consistentInfo := c.newBuilder.GetConsistentInfo(role) |
|
| 144 |
+ |
|
| 145 |
+ _, err := c.tryLoadCacheThenRemote(consistentInfo) |
|
| 146 |
+ return err |
|
| 147 |
+} |
|
| 148 |
+ |
|
| 149 |
+// downloadTargets downloads all targets and delegated targets for the repository. |
|
| 150 |
+// It uses a pre-order tree traversal as it's necessary to download parents first |
|
| 151 |
+// to obtain the keys to validate children. |
|
| 152 |
+func (c *TUFClient) downloadTargets() error {
|
|
| 153 |
+ toDownload := []data.DelegationRole{{
|
|
| 154 |
+ BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
|
|
| 155 |
+ Paths: []string{""},
|
|
| 156 |
+ }} |
|
| 157 |
+ |
|
| 158 |
+ for len(toDownload) > 0 {
|
|
| 159 |
+ role := toDownload[0] |
|
| 160 |
+ toDownload = toDownload[1:] |
|
| 161 |
+ |
|
| 162 |
+ consistentInfo := c.newBuilder.GetConsistentInfo(role.Name) |
|
| 163 |
+ if !consistentInfo.ChecksumKnown() {
|
|
| 164 |
+ logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
|
|
| 165 |
+ continue |
|
| 166 |
+ } |
|
| 167 |
+ |
|
| 168 |
+ children, err := c.getTargetsFile(role, consistentInfo) |
|
| 169 |
+ switch err.(type) {
|
|
| 170 |
+ case signed.ErrExpired, signed.ErrRoleThreshold: |
|
| 171 |
+ if role.Name == data.CanonicalTargetsRole {
|
|
| 172 |
+ return err |
|
| 173 |
+ } |
|
| 174 |
+ logrus.Warnf("Error getting %s: %s", role.Name, err)
|
|
| 175 |
+ break |
|
| 176 |
+ case nil: |
|
| 177 |
+ toDownload = append(children, toDownload...) |
|
| 178 |
+ default: |
|
| 179 |
+ return err |
|
| 180 |
+ } |
|
| 181 |
+ } |
|
| 182 |
+ return nil |
|
| 183 |
+} |
|
| 184 |
+ |
|
| 185 |
+func (c TUFClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
|
|
| 186 |
+ logrus.Debugf("Loading %s...", role.Name)
|
|
| 187 |
+ tgs := &data.SignedTargets{}
|
|
| 188 |
+ |
|
| 189 |
+ raw, err := c.tryLoadCacheThenRemote(ci) |
|
| 190 |
+ if err != nil {
|
|
| 191 |
+ return nil, err |
|
| 192 |
+ } |
|
| 193 |
+ |
|
| 194 |
+ // we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then |
|
| 195 |
+ // the raw has already been loaded into the builder |
|
| 196 |
+ json.Unmarshal(raw, tgs) |
|
| 197 |
+ return tgs.GetValidDelegations(role), nil |
|
| 198 |
+} |
|
| 199 |
+ |
|
| 200 |
+func (c *TUFClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
|
|
| 201 |
+ cachedTS, err := c.cache.GetSized(consistentInfo.RoleName, consistentInfo.Length()) |
|
| 202 |
+ if err != nil {
|
|
| 203 |
+ logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
|
|
| 204 |
+ return c.tryLoadRemote(consistentInfo, nil) |
|
| 205 |
+ } |
|
| 206 |
+ |
|
| 207 |
+ if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
|
|
| 208 |
+ logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
|
|
| 209 |
+ return cachedTS, nil |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
|
|
| 213 |
+ return c.tryLoadRemote(consistentInfo, cachedTS) |
|
| 214 |
+} |
|
| 215 |
+ |
|
| 216 |
+func (c *TUFClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
|
|
| 217 |
+ consistentName := consistentInfo.ConsistentName() |
|
| 218 |
+ raw, err := c.remote.GetSized(consistentName, consistentInfo.Length()) |
|
| 219 |
+ if err != nil {
|
|
| 220 |
+ logrus.Debugf("error downloading %s: %s", consistentName, err)
|
|
| 221 |
+ return old, err |
|
| 222 |
+ } |
|
| 223 |
+ |
|
| 224 |
+ // try to load the old data into the old builder - only use it to validate |
|
| 225 |
+ // versions if it loads successfully. If it errors, then the loaded version |
|
| 226 |
+ // will be 1 |
|
| 227 |
+ c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true) |
|
| 228 |
+ minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName) |
|
| 229 |
+ if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
|
|
| 230 |
+ logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
|
|
| 231 |
+ return raw, err |
|
| 232 |
+ } |
|
| 233 |
+ logrus.Debugf("successfully verified downloaded %s", consistentName)
|
|
| 234 |
+ if err := c.cache.Set(consistentInfo.RoleName, raw); err != nil {
|
|
| 235 |
+ logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
|
|
| 236 |
+ } |
|
| 237 |
+ return raw, nil |
|
| 238 |
+} |
| 0 | 239 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,69 @@ |
| 0 |
+package client |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "path/filepath" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/notary/client/changelist" |
|
| 6 |
+ "github.com/docker/notary/tuf" |
|
| 7 |
+ "github.com/docker/notary/tuf/data" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// Witness creates change objects to witness (i.e. re-sign) the given |
|
| 11 |
+// roles on the next publish. One change is created per role |
|
| 12 |
+func (r *NotaryRepository) Witness(roles ...string) ([]string, error) {
|
|
| 13 |
+ cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) |
|
| 14 |
+ if err != nil {
|
|
| 15 |
+ return nil, err |
|
| 16 |
+ } |
|
| 17 |
+ defer cl.Close() |
|
| 18 |
+ |
|
| 19 |
+ successful := make([]string, 0, len(roles)) |
|
| 20 |
+ for _, role := range roles {
|
|
| 21 |
+ // scope is role |
|
| 22 |
+ c := changelist.NewTUFChange( |
|
| 23 |
+ changelist.ActionUpdate, |
|
| 24 |
+ role, |
|
| 25 |
+ changelist.TypeWitness, |
|
| 26 |
+ "", |
|
| 27 |
+ nil, |
|
| 28 |
+ ) |
|
| 29 |
+ err = cl.Add(c) |
|
| 30 |
+ if err != nil {
|
|
| 31 |
+ break |
|
| 32 |
+ } |
|
| 33 |
+ successful = append(successful, role) |
|
| 34 |
+ } |
|
| 35 |
+ return successful, err |
|
| 36 |
+} |
|
| 37 |
+ |
|
| 38 |
+func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role string) error {
|
|
| 39 |
+ if r, ok := repo.Targets[role]; ok {
|
|
| 40 |
+ // role is already valid, mark for re-signing/updating |
|
| 41 |
+ r.Dirty = true |
|
| 42 |
+ return nil |
|
| 43 |
+ } |
|
| 44 |
+ |
|
| 45 |
+ if roleObj, err := repo.GetDelegationRole(role); err == nil && invalid != nil {
|
|
| 46 |
+ // A role with a threshold > len(keys) is technically invalid, but we let it build in the builder because |
|
| 47 |
+ // we want to be able to download the role (which may still have targets on it), add more keys, and then |
|
| 48 |
+ // witness the role, thus bringing it back to valid. However, if no keys have been added before witnessing, |
|
| 49 |
+ // then it is still an invalid role, and can't be witnessed because nothing can bring it back to valid. |
|
| 50 |
+ if roleObj.Threshold > len(roleObj.Keys) {
|
|
| 51 |
+ return data.ErrInvalidRole{
|
|
| 52 |
+ Role: role, |
|
| 53 |
+ Reason: "role does not specify enough valid signing keys to meet its required threshold", |
|
| 54 |
+ } |
|
| 55 |
+ } |
|
| 56 |
+ if r, ok := invalid.Targets[role]; ok {
|
|
| 57 |
+ // role is recognized but invalid, move to valid data and mark for re-signing |
|
| 58 |
+ repo.Targets[role] = r |
|
| 59 |
+ r.Dirty = true |
|
| 60 |
+ return nil |
|
| 61 |
+ } |
|
| 62 |
+ } |
|
| 63 |
+ // role isn't recognized, even as invalid |
|
| 64 |
+ return data.ErrInvalidRole{
|
|
| 65 |
+ Role: role, |
|
| 66 |
+ Reason: "this role is not known", |
|
| 67 |
+ } |
|
| 68 |
+} |
| ... | ... |
@@ -3,16 +3,20 @@ codecov: |
| 3 | 3 |
# 2 builds on circleci, 1 jenkins build |
| 4 | 4 |
after_n_builds: 3 |
| 5 | 5 |
coverage: |
| 6 |
+ range: "50...100" |
|
| 6 | 7 |
status: |
| 7 | 8 |
# project will give us the diff in the total code coverage between a commit |
| 8 | 9 |
# and its parent |
| 9 | 10 |
project: |
| 10 | 11 |
default: |
| 11 | 12 |
target: auto |
| 13 |
+ threshold: "0.05%" |
|
| 12 | 14 |
# patch would give us the code coverage of the diff only |
| 13 | 15 |
patch: false |
| 14 | 16 |
# changes tells us if there are unexpected code coverage changes in other files |
| 15 | 17 |
# which were not changed by the diff |
| 16 | 18 |
changes: false |
| 19 |
+ ignore: # ignore testutils for coverage |
|
| 20 |
+ - "tuf/testutils/*" |
|
| 17 | 21 |
comment: off |
| 18 | 22 |
|
| ... | ... |
@@ -1,8 +1,6 @@ |
| 1 | 1 |
package notary |
| 2 | 2 |
|
| 3 |
-import ( |
|
| 4 |
- "time" |
|
| 5 |
-) |
|
| 3 |
+import "time" |
|
| 6 | 4 |
|
| 7 | 5 |
// application wide constants |
| 8 | 6 |
const ( |
| ... | ... |
@@ -34,6 +32,8 @@ const ( |
| 34 | 34 |
RootKeysSubdir = "root_keys" |
| 35 | 35 |
// NonRootKeysSubdir is the subdirectory under PrivDir where non-root private keys are stored |
| 36 | 36 |
NonRootKeysSubdir = "tuf_keys" |
| 37 |
+ // KeyExtension is the file extension to use for private key files |
|
| 38 |
+ KeyExtension = "key" |
|
| 37 | 39 |
|
| 38 | 40 |
// Day is a duration of one day |
| 39 | 41 |
Day = 24 * time.Hour |
| ... | ... |
@@ -56,6 +56,8 @@ const ( |
| 56 | 56 |
MemoryBackend = "memory" |
| 57 | 57 |
SQLiteBackend = "sqlite3" |
| 58 | 58 |
RethinkDBBackend = "rethinkdb" |
| 59 |
+ |
|
| 60 |
+ DefaultImportRole = "delegation" |
|
| 59 | 61 |
) |
| 60 | 62 |
|
| 61 | 63 |
// NotaryDefaultExpiries is the construct used to configure the default expiry times of |
| 62 | 64 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,16 @@ |
| 0 |
+// +build !windows |
|
| 1 |
+ |
|
| 2 |
+package notary |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "os" |
|
| 6 |
+ "syscall" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// NotarySupportedSignals contains the signals we would like to capture: |
|
| 10 |
+// - SIGUSR1, indicates a increment of the log level. |
|
| 11 |
+// - SIGUSR2, indicates a decrement of the log level. |
|
| 12 |
+var NotarySupportedSignals = []os.Signal{
|
|
| 13 |
+ syscall.SIGUSR1, |
|
| 14 |
+ syscall.SIGUSR2, |
|
| 15 |
+} |
| 0 | 8 |
deleted file mode 100755 |
| ... | ... |
@@ -1,10 +0,0 @@ |
| 1 |
-#!/usr/bin/env bash |
|
| 2 |
- |
|
| 3 |
-# Given a subpackage and the containing package, figures out which packages |
|
| 4 |
-# need to be passed to `go test -coverpkg`: this includes all of the |
|
| 5 |
-# subpackage's dependencies within the containing package, as well as the |
|
| 6 |
-# subpackage itself. |
|
| 7 |
- |
|
| 8 |
-DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v ${2}/vendor)"
|
|
| 9 |
- |
|
| 10 |
-echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','
|
| ... | ... |
@@ -7,8 +7,8 @@ import ( |
| 7 | 7 |
"fmt" |
| 8 | 8 |
"time" |
| 9 | 9 |
|
| 10 |
- "github.com/docker/notary/trustmanager" |
|
| 11 | 10 |
"github.com/docker/notary/tuf/data" |
| 11 |
+ "github.com/docker/notary/tuf/utils" |
|
| 12 | 12 |
) |
| 13 | 13 |
|
| 14 | 14 |
// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval |
| ... | ... |
@@ -22,7 +22,7 @@ func GenerateCertificate(rootKey data.PrivateKey, gun string, startTime, endTime |
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 | 24 |
func generateCertificate(signer crypto.Signer, gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
|
| 25 |
- template, err := trustmanager.NewCertificate(gun, startTime, endTime) |
|
| 25 |
+ template, err := utils.NewCertificate(gun, startTime, endTime) |
|
| 26 | 26 |
if err != nil {
|
| 27 | 27 |
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
|
| 28 | 28 |
} |
| ... | ... |
@@ -4,13 +4,24 @@ import ( |
| 4 | 4 |
"crypto/rand" |
| 5 | 5 |
"fmt" |
| 6 | 6 |
|
| 7 |
+ "crypto/x509" |
|
| 8 |
+ "encoding/pem" |
|
| 9 |
+ "errors" |
|
| 7 | 10 |
"github.com/Sirupsen/logrus" |
| 11 |
+ "github.com/docker/notary" |
|
| 8 | 12 |
"github.com/docker/notary/trustmanager" |
| 9 | 13 |
"github.com/docker/notary/tuf/data" |
| 14 |
+ "github.com/docker/notary/tuf/utils" |
|
| 10 | 15 |
) |
| 11 | 16 |
|
| 12 |
-const ( |
|
| 13 |
- rsaKeySize = 2048 // Used for snapshots and targets keys |
|
| 17 |
+var ( |
|
| 18 |
+ // ErrNoValidPrivateKey is returned if a key being imported doesn't |
|
| 19 |
+ // look like a private key |
|
| 20 |
+ ErrNoValidPrivateKey = errors.New("no valid private key found")
|
|
| 21 |
+ |
|
| 22 |
+ // ErrRootKeyNotEncrypted is returned if a root key being imported is |
|
| 23 |
+ // unencrypted |
|
| 24 |
+ ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
|
|
| 14 | 25 |
) |
| 15 | 26 |
|
| 16 | 27 |
// CryptoService implements Sign and Create, holding a specific GUN and keystore to |
| ... | ... |
@@ -31,17 +42,17 @@ func (cs *CryptoService) Create(role, gun, algorithm string) (data.PublicKey, er |
| 31 | 31 |
|
| 32 | 32 |
switch algorithm {
|
| 33 | 33 |
case data.RSAKey: |
| 34 |
- privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaKeySize) |
|
| 34 |
+ privKey, err = utils.GenerateRSAKey(rand.Reader, notary.MinRSABitSize) |
|
| 35 | 35 |
if err != nil {
|
| 36 | 36 |
return nil, fmt.Errorf("failed to generate RSA key: %v", err)
|
| 37 | 37 |
} |
| 38 | 38 |
case data.ECDSAKey: |
| 39 |
- privKey, err = trustmanager.GenerateECDSAKey(rand.Reader) |
|
| 39 |
+ privKey, err = utils.GenerateECDSAKey(rand.Reader) |
|
| 40 | 40 |
if err != nil {
|
| 41 | 41 |
return nil, fmt.Errorf("failed to generate EC key: %v", err)
|
| 42 | 42 |
} |
| 43 | 43 |
case data.ED25519Key: |
| 44 |
- privKey, err = trustmanager.GenerateED25519Key(rand.Reader) |
|
| 44 |
+ privKey, err = utils.GenerateED25519Key(rand.Reader) |
|
| 45 | 45 |
if err != nil {
|
| 46 | 46 |
return nil, fmt.Errorf("failed to generate ED25519 key: %v", err)
|
| 47 | 47 |
} |
| ... | ... |
@@ -153,3 +164,18 @@ func (cs *CryptoService) ListAllKeys() map[string]string {
|
| 153 | 153 |
} |
| 154 | 154 |
return res |
| 155 | 155 |
} |
| 156 |
+ |
|
| 157 |
+// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have |
|
| 158 |
+// internal assumptions that depend on this. |
|
| 159 |
+func CheckRootKeyIsEncrypted(pemBytes []byte) error {
|
|
| 160 |
+ block, _ := pem.Decode(pemBytes) |
|
| 161 |
+ if block == nil {
|
|
| 162 |
+ return ErrNoValidPrivateKey |
|
| 163 |
+ } |
|
| 164 |
+ |
|
| 165 |
+ if !x509.IsEncryptedPEMBlock(block) {
|
|
| 166 |
+ return ErrRootKeyNotEncrypted |
|
| 167 |
+ } |
|
| 168 |
+ |
|
| 169 |
+ return nil |
|
| 170 |
+} |
| 156 | 171 |
deleted file mode 100644 |
| ... | ... |
@@ -1,313 +0,0 @@ |
| 1 |
-package cryptoservice |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "archive/zip" |
|
| 5 |
- "crypto/x509" |
|
| 6 |
- "encoding/pem" |
|
| 7 |
- "errors" |
|
| 8 |
- "io" |
|
| 9 |
- "io/ioutil" |
|
| 10 |
- "os" |
|
| 11 |
- "path/filepath" |
|
| 12 |
- "strings" |
|
| 13 |
- |
|
| 14 |
- "github.com/docker/notary/passphrase" |
|
| 15 |
- "github.com/docker/notary/trustmanager" |
|
| 16 |
-) |
|
| 17 |
- |
|
| 18 |
-const zipMadeByUNIX = 3 << 8 |
|
| 19 |
- |
|
| 20 |
-var ( |
|
| 21 |
- // ErrNoValidPrivateKey is returned if a key being imported doesn't |
|
| 22 |
- // look like a private key |
|
| 23 |
- ErrNoValidPrivateKey = errors.New("no valid private key found")
|
|
| 24 |
- |
|
| 25 |
- // ErrRootKeyNotEncrypted is returned if a root key being imported is |
|
| 26 |
- // unencrypted |
|
| 27 |
- ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
|
|
| 28 |
- |
|
| 29 |
- // ErrNoKeysFoundForGUN is returned if no keys are found for the |
|
| 30 |
- // specified GUN during export |
|
| 31 |
- ErrNoKeysFoundForGUN = errors.New("no keys found for specified GUN")
|
|
| 32 |
-) |
|
| 33 |
- |
|
| 34 |
-// ExportKey exports the specified private key to an io.Writer in PEM format. |
|
| 35 |
-// The key's existing encryption is preserved. |
|
| 36 |
-func (cs *CryptoService) ExportKey(dest io.Writer, keyID, role string) error {
|
|
| 37 |
- var ( |
|
| 38 |
- pemBytes []byte |
|
| 39 |
- err error |
|
| 40 |
- ) |
|
| 41 |
- |
|
| 42 |
- for _, ks := range cs.keyStores {
|
|
| 43 |
- pemBytes, err = ks.ExportKey(keyID) |
|
| 44 |
- if err != nil {
|
|
| 45 |
- continue |
|
| 46 |
- } |
|
| 47 |
- } |
|
| 48 |
- if err != nil {
|
|
| 49 |
- return err |
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
- nBytes, err := dest.Write(pemBytes) |
|
| 53 |
- if err != nil {
|
|
| 54 |
- return err |
|
| 55 |
- } |
|
| 56 |
- if nBytes != len(pemBytes) {
|
|
| 57 |
- return errors.New("Unable to finish writing exported key.")
|
|
| 58 |
- } |
|
| 59 |
- return nil |
|
| 60 |
-} |
|
| 61 |
- |
|
| 62 |
-// ExportKeyReencrypt exports the specified private key to an io.Writer in |
|
| 63 |
-// PEM format. The key is reencrypted with a new passphrase. |
|
| 64 |
-func (cs *CryptoService) ExportKeyReencrypt(dest io.Writer, keyID string, newPassphraseRetriever passphrase.Retriever) error {
|
|
| 65 |
- privateKey, _, err := cs.GetPrivateKey(keyID) |
|
| 66 |
- if err != nil {
|
|
| 67 |
- return err |
|
| 68 |
- } |
|
| 69 |
- |
|
| 70 |
- keyInfo, err := cs.GetKeyInfo(keyID) |
|
| 71 |
- if err != nil {
|
|
| 72 |
- return err |
|
| 73 |
- } |
|
| 74 |
- |
|
| 75 |
- // Create temporary keystore to use as a staging area |
|
| 76 |
- tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
|
|
| 77 |
- defer os.RemoveAll(tempBaseDir) |
|
| 78 |
- |
|
| 79 |
- tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever) |
|
| 80 |
- if err != nil {
|
|
| 81 |
- return err |
|
| 82 |
- } |
|
| 83 |
- |
|
| 84 |
- err = tempKeyStore.AddKey(keyInfo, privateKey) |
|
| 85 |
- if err != nil {
|
|
| 86 |
- return err |
|
| 87 |
- } |
|
| 88 |
- |
|
| 89 |
- pemBytes, err := tempKeyStore.ExportKey(keyID) |
|
| 90 |
- if err != nil {
|
|
| 91 |
- return err |
|
| 92 |
- } |
|
| 93 |
- nBytes, err := dest.Write(pemBytes) |
|
| 94 |
- if err != nil {
|
|
| 95 |
- return err |
|
| 96 |
- } |
|
| 97 |
- if nBytes != len(pemBytes) {
|
|
| 98 |
- return errors.New("Unable to finish writing exported key.")
|
|
| 99 |
- } |
|
| 100 |
- return nil |
|
| 101 |
-} |
|
| 102 |
- |
|
| 103 |
-// ExportAllKeys exports all keys to an io.Writer in zip format. |
|
| 104 |
-// newPassphraseRetriever will be used to obtain passphrases to use to encrypt the existing keys. |
|
| 105 |
-func (cs *CryptoService) ExportAllKeys(dest io.Writer, newPassphraseRetriever passphrase.Retriever) error {
|
|
| 106 |
- tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
|
|
| 107 |
- defer os.RemoveAll(tempBaseDir) |
|
| 108 |
- |
|
| 109 |
- // Create temporary keystore to use as a staging area |
|
| 110 |
- tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever) |
|
| 111 |
- if err != nil {
|
|
| 112 |
- return err |
|
| 113 |
- } |
|
| 114 |
- |
|
| 115 |
- for _, ks := range cs.keyStores {
|
|
| 116 |
- if err := moveKeys(ks, tempKeyStore); err != nil {
|
|
| 117 |
- return err |
|
| 118 |
- } |
|
| 119 |
- } |
|
| 120 |
- |
|
| 121 |
- zipWriter := zip.NewWriter(dest) |
|
| 122 |
- |
|
| 123 |
- if err := addKeysToArchive(zipWriter, tempKeyStore); err != nil {
|
|
| 124 |
- return err |
|
| 125 |
- } |
|
| 126 |
- |
|
| 127 |
- zipWriter.Close() |
|
| 128 |
- |
|
| 129 |
- return nil |
|
| 130 |
-} |
|
| 131 |
- |
|
| 132 |
-// ImportKeysZip imports keys from a zip file provided as an zip.Reader. The |
|
| 133 |
-// keys in the root_keys directory are left encrypted, but the other keys are |
|
| 134 |
-// decrypted with the specified passphrase. |
|
| 135 |
-func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader, retriever passphrase.Retriever) error {
|
|
| 136 |
- // Temporarily store the keys in maps, so we can bail early if there's |
|
| 137 |
- // an error (for example, wrong passphrase), without leaving the key |
|
| 138 |
- // store in an inconsistent state |
|
| 139 |
- newKeys := make(map[string][]byte) |
|
| 140 |
- |
|
| 141 |
- // Iterate through the files in the archive. Don't add the keys |
|
| 142 |
- for _, f := range zipReader.File {
|
|
| 143 |
- fNameTrimmed := strings.TrimSuffix(f.Name, filepath.Ext(f.Name)) |
|
| 144 |
- rc, err := f.Open() |
|
| 145 |
- if err != nil {
|
|
| 146 |
- return err |
|
| 147 |
- } |
|
| 148 |
- defer rc.Close() |
|
| 149 |
- |
|
| 150 |
- fileBytes, err := ioutil.ReadAll(rc) |
|
| 151 |
- if err != nil {
|
|
| 152 |
- return nil |
|
| 153 |
- } |
|
| 154 |
- |
|
| 155 |
- // Note that using / as a separator is okay here - the zip |
|
| 156 |
- // package guarantees that the separator will be / |
|
| 157 |
- if fNameTrimmed[len(fNameTrimmed)-5:] == "_root" {
|
|
| 158 |
- if err = CheckRootKeyIsEncrypted(fileBytes); err != nil {
|
|
| 159 |
- return err |
|
| 160 |
- } |
|
| 161 |
- } |
|
| 162 |
- newKeys[fNameTrimmed] = fileBytes |
|
| 163 |
- } |
|
| 164 |
- |
|
| 165 |
- for keyName, pemBytes := range newKeys {
|
|
| 166 |
- // Get the key role information as well as its data.PrivateKey representation |
|
| 167 |
- _, keyInfo, err := trustmanager.KeyInfoFromPEM(pemBytes, keyName) |
|
| 168 |
- if err != nil {
|
|
| 169 |
- return err |
|
| 170 |
- } |
|
| 171 |
- privKey, err := trustmanager.ParsePEMPrivateKey(pemBytes, "") |
|
| 172 |
- if err != nil {
|
|
| 173 |
- privKey, _, err = trustmanager.GetPasswdDecryptBytes(retriever, pemBytes, "", "imported "+keyInfo.Role) |
|
| 174 |
- if err != nil {
|
|
| 175 |
- return err |
|
| 176 |
- } |
|
| 177 |
- } |
|
| 178 |
- // Add the key to our cryptoservice, will add to the first successful keystore |
|
| 179 |
- if err = cs.AddKey(keyInfo.Role, keyInfo.Gun, privKey); err != nil {
|
|
| 180 |
- return err |
|
| 181 |
- } |
|
| 182 |
- } |
|
| 183 |
- |
|
| 184 |
- return nil |
|
| 185 |
-} |
|
| 186 |
- |
|
| 187 |
-// ExportKeysByGUN exports all keys associated with a specified GUN to an |
|
| 188 |
-// io.Writer in zip format. passphraseRetriever is used to select new passphrases to use to |
|
| 189 |
-// encrypt the keys. |
|
| 190 |
-func (cs *CryptoService) ExportKeysByGUN(dest io.Writer, gun string, passphraseRetriever passphrase.Retriever) error {
|
|
| 191 |
- tempBaseDir, err := ioutil.TempDir("", "notary-key-export-")
|
|
| 192 |
- defer os.RemoveAll(tempBaseDir) |
|
| 193 |
- |
|
| 194 |
- // Create temporary keystore to use as a staging area |
|
| 195 |
- tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, passphraseRetriever) |
|
| 196 |
- if err != nil {
|
|
| 197 |
- return err |
|
| 198 |
- } |
|
| 199 |
- |
|
| 200 |
- for _, ks := range cs.keyStores {
|
|
| 201 |
- if err := moveKeysByGUN(ks, tempKeyStore, gun); err != nil {
|
|
| 202 |
- return err |
|
| 203 |
- } |
|
| 204 |
- } |
|
| 205 |
- |
|
| 206 |
- zipWriter := zip.NewWriter(dest) |
|
| 207 |
- |
|
| 208 |
- if len(tempKeyStore.ListKeys()) == 0 {
|
|
| 209 |
- return ErrNoKeysFoundForGUN |
|
| 210 |
- } |
|
| 211 |
- |
|
| 212 |
- if err := addKeysToArchive(zipWriter, tempKeyStore); err != nil {
|
|
| 213 |
- return err |
|
| 214 |
- } |
|
| 215 |
- |
|
| 216 |
- zipWriter.Close() |
|
| 217 |
- |
|
| 218 |
- return nil |
|
| 219 |
-} |
|
| 220 |
- |
|
| 221 |
-func moveKeysByGUN(oldKeyStore, newKeyStore trustmanager.KeyStore, gun string) error {
|
|
| 222 |
- for keyID, keyInfo := range oldKeyStore.ListKeys() {
|
|
| 223 |
- // Skip keys that aren't associated with this GUN |
|
| 224 |
- if keyInfo.Gun != gun {
|
|
| 225 |
- continue |
|
| 226 |
- } |
|
| 227 |
- |
|
| 228 |
- privKey, _, err := oldKeyStore.GetKey(keyID) |
|
| 229 |
- if err != nil {
|
|
| 230 |
- return err |
|
| 231 |
- } |
|
| 232 |
- |
|
| 233 |
- err = newKeyStore.AddKey(keyInfo, privKey) |
|
| 234 |
- if err != nil {
|
|
| 235 |
- return err |
|
| 236 |
- } |
|
| 237 |
- } |
|
| 238 |
- |
|
| 239 |
- return nil |
|
| 240 |
-} |
|
| 241 |
- |
|
| 242 |
-func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error {
|
|
| 243 |
- for keyID, keyInfo := range oldKeyStore.ListKeys() {
|
|
| 244 |
- privateKey, _, err := oldKeyStore.GetKey(keyID) |
|
| 245 |
- if err != nil {
|
|
| 246 |
- return err |
|
| 247 |
- } |
|
| 248 |
- |
|
| 249 |
- err = newKeyStore.AddKey(keyInfo, privateKey) |
|
| 250 |
- |
|
| 251 |
- if err != nil {
|
|
| 252 |
- return err |
|
| 253 |
- } |
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- return nil |
|
| 257 |
-} |
|
| 258 |
- |
|
| 259 |
-func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileStore) error {
|
|
| 260 |
- for _, relKeyPath := range newKeyStore.ListFiles() {
|
|
| 261 |
- fullKeyPath, err := newKeyStore.GetPath(relKeyPath) |
|
| 262 |
- if err != nil {
|
|
| 263 |
- return err |
|
| 264 |
- } |
|
| 265 |
- |
|
| 266 |
- fi, err := os.Lstat(fullKeyPath) |
|
| 267 |
- if err != nil {
|
|
| 268 |
- return err |
|
| 269 |
- } |
|
| 270 |
- |
|
| 271 |
- infoHeader, err := zip.FileInfoHeader(fi) |
|
| 272 |
- if err != nil {
|
|
| 273 |
- return err |
|
| 274 |
- } |
|
| 275 |
- |
|
| 276 |
- relPath, err := filepath.Rel(newKeyStore.BaseDir(), fullKeyPath) |
|
| 277 |
- if err != nil {
|
|
| 278 |
- return err |
|
| 279 |
- } |
|
| 280 |
- infoHeader.Name = relPath |
|
| 281 |
- |
|
| 282 |
- zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader) |
|
| 283 |
- if err != nil {
|
|
| 284 |
- return err |
|
| 285 |
- } |
|
| 286 |
- |
|
| 287 |
- fileContents, err := ioutil.ReadFile(fullKeyPath) |
|
| 288 |
- if err != nil {
|
|
| 289 |
- return err |
|
| 290 |
- } |
|
| 291 |
- |
|
| 292 |
- if _, err = zipFileEntryWriter.Write(fileContents); err != nil {
|
|
| 293 |
- return err |
|
| 294 |
- } |
|
| 295 |
- } |
|
| 296 |
- |
|
| 297 |
- return nil |
|
| 298 |
-} |
|
| 299 |
- |
|
| 300 |
-// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have |
|
| 301 |
-// internal assumptions that depend on this. |
|
| 302 |
-func CheckRootKeyIsEncrypted(pemBytes []byte) error {
|
|
| 303 |
- block, _ := pem.Decode(pemBytes) |
|
| 304 |
- if block == nil {
|
|
| 305 |
- return ErrNoValidPrivateKey |
|
| 306 |
- } |
|
| 307 |
- |
|
| 308 |
- if !x509.IsEncryptedPEMBlock(block) {
|
|
| 309 |
- return ErrRootKeyNotEncrypted |
|
| 310 |
- } |
|
| 311 |
- |
|
| 312 |
- return nil |
|
| 313 |
-} |
| 314 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,60 @@ |
| 0 |
+version: "2" |
|
| 1 |
+services: |
|
| 2 |
+ server: |
|
| 3 |
+ build: |
|
| 4 |
+ context: . |
|
| 5 |
+ dockerfile: server.Dockerfile |
|
| 6 |
+ networks: |
|
| 7 |
+ mdb: |
|
| 8 |
+ sig: |
|
| 9 |
+ srv: |
|
| 10 |
+ aliases: |
|
| 11 |
+ - notary-server |
|
| 12 |
+ entrypoint: /usr/bin/env sh |
|
| 13 |
+ command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json" |
|
| 14 |
+ depends_on: |
|
| 15 |
+ - mysql |
|
| 16 |
+ - signer |
|
| 17 |
+ signer: |
|
| 18 |
+ build: |
|
| 19 |
+ context: . |
|
| 20 |
+ dockerfile: signer.Dockerfile |
|
| 21 |
+ networks: |
|
| 22 |
+ mdb: |
|
| 23 |
+ sig: |
|
| 24 |
+ aliases: |
|
| 25 |
+ - notarysigner |
|
| 26 |
+ entrypoint: /usr/bin/env sh |
|
| 27 |
+ command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json" |
|
| 28 |
+ depends_on: |
|
| 29 |
+ - mysql |
|
| 30 |
+ mysql: |
|
| 31 |
+ networks: |
|
| 32 |
+ - mdb |
|
| 33 |
+ volumes: |
|
| 34 |
+ - ./notarymysql/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d |
|
| 35 |
+ image: mariadb:10.1.10 |
|
| 36 |
+ environment: |
|
| 37 |
+ - TERM=dumb |
|
| 38 |
+ - MYSQL_ALLOW_EMPTY_PASSWORD="true" |
|
| 39 |
+ command: mysqld --innodb_file_per_table |
|
| 40 |
+ client: |
|
| 41 |
+ build: |
|
| 42 |
+ context: . |
|
| 43 |
+ dockerfile: Dockerfile |
|
| 44 |
+ env_file: buildscripts/env.list |
|
| 45 |
+ command: buildscripts/testclient.py |
|
| 46 |
+ volumes: |
|
| 47 |
+ - ./test_output:/test_output |
|
| 48 |
+ networks: |
|
| 49 |
+ - mdb |
|
| 50 |
+ - srv |
|
| 51 |
+ depends_on: |
|
| 52 |
+ - server |
|
| 53 |
+networks: |
|
| 54 |
+ mdb: |
|
| 55 |
+ external: false |
|
| 56 |
+ sig: |
|
| 57 |
+ external: false |
|
| 58 |
+ srv: |
|
| 59 |
+ external: false |
| ... | ... |
@@ -11,8 +11,6 @@ services: |
| 11 | 11 |
links: |
| 12 | 12 |
- rdb-proxy:rdb-proxy.rdb |
| 13 | 13 |
- signer |
| 14 |
- environment: |
|
| 15 |
- - SERVICE_NAME=notary_server |
|
| 16 | 14 |
ports: |
| 17 | 15 |
- "8080" |
| 18 | 16 |
- "4443:4443" |
| ... | ... |
@@ -32,14 +30,12 @@ services: |
| 32 | 32 |
- notarysigner |
| 33 | 33 |
links: |
| 34 | 34 |
- rdb-proxy:rdb-proxy.rdb |
| 35 |
- environment: |
|
| 36 |
- - SERVICE_NAME=notary_signer |
|
| 37 | 35 |
entrypoint: /usr/bin/env sh |
| 38 | 36 |
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json" |
| 39 | 37 |
depends_on: |
| 40 | 38 |
- rdb-proxy |
| 41 | 39 |
rdb-01: |
| 42 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 40 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 43 | 41 |
volumes: |
| 44 | 42 |
- ./fixtures/rethinkdb:/tls |
| 45 | 43 |
- rdb-01-data:/var/data |
| ... | ... |
@@ -51,7 +47,7 @@ services: |
| 51 | 51 |
- rdb-01.rdb |
| 52 | 52 |
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
| 53 | 53 |
rdb-02: |
| 54 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 54 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 55 | 55 |
volumes: |
| 56 | 56 |
- ./fixtures/rethinkdb:/tls |
| 57 | 57 |
- rdb-02-data:/var/data |
| ... | ... |
@@ -63,7 +59,7 @@ services: |
| 63 | 63 |
- rdb-02.rdb |
| 64 | 64 |
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
| 65 | 65 |
rdb-03: |
| 66 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 66 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 67 | 67 |
volumes: |
| 68 | 68 |
- ./fixtures/rethinkdb:/tls |
| 69 | 69 |
- rdb-03-data:/var/data |
| ... | ... |
@@ -75,7 +71,7 @@ services: |
| 75 | 75 |
- rdb-03.rdb |
| 76 | 76 |
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
| 77 | 77 |
rdb-proxy: |
| 78 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 78 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 79 | 79 |
ports: |
| 80 | 80 |
- "8080:8080" |
| 81 | 81 |
volumes: |
| ... | ... |
@@ -91,16 +87,17 @@ services: |
| 91 | 91 |
- rdb-02 |
| 92 | 92 |
- rdb-03 |
| 93 | 93 |
client: |
| 94 |
+ build: |
|
| 95 |
+ context: . |
|
| 96 |
+ dockerfile: Dockerfile |
|
| 94 | 97 |
volumes: |
| 95 | 98 |
- ./test_output:/test_output |
| 96 | 99 |
networks: |
| 97 | 100 |
- rdb |
| 98 |
- build: |
|
| 99 |
- context: . |
|
| 100 |
- dockerfile: Dockerfile |
|
| 101 |
+ env_file: buildscripts/env.list |
|
| 101 | 102 |
links: |
| 102 | 103 |
- server:notary-server |
| 103 |
- command: buildscripts/testclient.sh |
|
| 104 |
+ command: buildscripts/testclient.py |
|
| 104 | 105 |
volumes: |
| 105 | 106 |
rdb-01-data: |
| 106 | 107 |
external: false |
| ... | ... |
@@ -110,4 +107,4 @@ volumes: |
| 110 | 110 |
external: false |
| 111 | 111 |
networks: |
| 112 | 112 |
rdb: |
| 113 |
- external: false |
|
| 114 | 113 |
\ No newline at end of file |
| 114 |
+ external: false |
| 115 | 115 |
deleted file mode 100644 |
| ... | ... |
@@ -1,36 +0,0 @@ |
| 1 |
-server: |
|
| 2 |
- build: . |
|
| 3 |
- dockerfile: server.Dockerfile |
|
| 4 |
- links: |
|
| 5 |
- - mysql |
|
| 6 |
- - signer |
|
| 7 |
- - signer:notarysigner |
|
| 8 |
- environment: |
|
| 9 |
- - SERVICE_NAME=notary_server |
|
| 10 |
- entrypoint: /usr/bin/env sh |
|
| 11 |
- command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json" |
|
| 12 |
-signer: |
|
| 13 |
- build: . |
|
| 14 |
- dockerfile: signer.Dockerfile |
|
| 15 |
- links: |
|
| 16 |
- - mysql |
|
| 17 |
- environment: |
|
| 18 |
- - SERVICE_NAME=notary_signer |
|
| 19 |
- entrypoint: /usr/bin/env sh |
|
| 20 |
- command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json" |
|
| 21 |
-mysql: |
|
| 22 |
- volumes: |
|
| 23 |
- - ./notarymysql/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d |
|
| 24 |
- image: mariadb:10.1.10 |
|
| 25 |
- environment: |
|
| 26 |
- - TERM=dumb |
|
| 27 |
- - MYSQL_ALLOW_EMPTY_PASSWORD="true" |
|
| 28 |
- command: mysqld --innodb_file_per_table |
|
| 29 |
-client: |
|
| 30 |
- volumes: |
|
| 31 |
- - ./test_output:/test_output |
|
| 32 |
- build: . |
|
| 33 |
- dockerfile: Dockerfile |
|
| 34 |
- links: |
|
| 35 |
- - server:notary-server |
|
| 36 |
- command: buildscripts/testclient.sh |
| ... | ... |
@@ -1,7 +1,7 @@ |
| 1 | 1 |
version: "2" |
| 2 | 2 |
services: |
| 3 | 3 |
server: |
| 4 |
- build: |
|
| 4 |
+ build: |
|
| 5 | 5 |
context: . |
| 6 | 6 |
dockerfile: server.Dockerfile |
| 7 | 7 |
volumes: |
| ... | ... |
@@ -11,17 +11,14 @@ services: |
| 11 | 11 |
links: |
| 12 | 12 |
- rdb-proxy:rdb-proxy.rdb |
| 13 | 13 |
- signer |
| 14 |
- environment: |
|
| 15 |
- - SERVICE_NAME=notary_server |
|
| 16 | 14 |
ports: |
| 17 |
- - "8080" |
|
| 18 | 15 |
- "4443:4443" |
| 19 | 16 |
entrypoint: /usr/bin/env sh |
| 20 | 17 |
command: -c "sh migrations/rethink_migrate.sh && notary-server -config=fixtures/server-config.rethink.json" |
| 21 | 18 |
depends_on: |
| 22 | 19 |
- rdb-proxy |
| 23 | 20 |
signer: |
| 24 |
- build: |
|
| 21 |
+ build: |
|
| 25 | 22 |
context: . |
| 26 | 23 |
dockerfile: signer.Dockerfile |
| 27 | 24 |
volumes: |
| ... | ... |
@@ -32,50 +29,47 @@ services: |
| 32 | 32 |
- notarysigner |
| 33 | 33 |
links: |
| 34 | 34 |
- rdb-proxy:rdb-proxy.rdb |
| 35 |
- environment: |
|
| 36 |
- - SERVICE_NAME=notary_signer |
|
| 37 | 35 |
entrypoint: /usr/bin/env sh |
| 38 | 36 |
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json" |
| 39 | 37 |
depends_on: |
| 40 | 38 |
- rdb-proxy |
| 41 | 39 |
rdb-01: |
| 42 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 40 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 43 | 41 |
volumes: |
| 44 | 42 |
- ./fixtures/rethinkdb:/tls |
| 45 | 43 |
- rdb-01-data:/var/data |
| 46 | 44 |
networks: |
| 47 | 45 |
rdb: |
| 48 | 46 |
aliases: |
| 49 |
- - rdb |
|
| 50 |
- - rdb.rdb |
|
| 51 | 47 |
- rdb-01.rdb |
| 52 |
- command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 48 |
+ command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 53 | 49 |
rdb-02: |
| 54 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 50 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 55 | 51 |
volumes: |
| 56 | 52 |
- ./fixtures/rethinkdb:/tls |
| 57 | 53 |
- rdb-02-data:/var/data |
| 58 | 54 |
networks: |
| 59 | 55 |
rdb: |
| 60 | 56 |
aliases: |
| 61 |
- - rdb |
|
| 62 |
- - rdb.rdb |
|
| 63 | 57 |
- rdb-02.rdb |
| 64 |
- command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 58 |
+ command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb-01 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 59 |
+ depends_on: |
|
| 60 |
+ - rdb-01 |
|
| 65 | 61 |
rdb-03: |
| 66 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 62 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 67 | 63 |
volumes: |
| 68 | 64 |
- ./fixtures/rethinkdb:/tls |
| 69 | 65 |
- rdb-03-data:/var/data |
| 70 | 66 |
networks: |
| 71 | 67 |
rdb: |
| 72 | 68 |
aliases: |
| 73 |
- - rdb |
|
| 74 |
- - rdb.rdb |
|
| 75 | 69 |
- rdb-03.rdb |
| 76 |
- command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 70 |
+ command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb-02 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 71 |
+ depends_on: |
|
| 72 |
+ - rdb-01 |
|
| 73 |
+ - rdb-02 |
|
| 77 | 74 |
rdb-proxy: |
| 78 |
- image: jlhawn/rethinkdb:2.3.0 |
|
| 75 |
+ image: jlhawn/rethinkdb:2.3.4 |
|
| 79 | 76 |
ports: |
| 80 | 77 |
- "8080:8080" |
| 81 | 78 |
volumes: |
| ... | ... |
@@ -85,7 +79,7 @@ services: |
| 85 | 85 |
aliases: |
| 86 | 86 |
- rdb-proxy |
| 87 | 87 |
- rdb-proxy.rdp |
| 88 |
- command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 88 |
+ command: "proxy --bind all --join rdb-03 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem" |
|
| 89 | 89 |
depends_on: |
| 90 | 90 |
- rdb-01 |
| 91 | 91 |
- rdb-02 |
| ... | ... |
@@ -1,34 +1,49 @@ |
| 1 |
-server: |
|
| 2 |
- build: . |
|
| 3 |
- dockerfile: server.Dockerfile |
|
| 4 |
- links: |
|
| 5 |
- - mysql |
|
| 6 |
- - signer |
|
| 7 |
- - signer:notarysigner |
|
| 8 |
- environment: |
|
| 9 |
- - SERVICE_NAME=notary_server |
|
| 10 |
- ports: |
|
| 11 |
- - "8080" |
|
| 12 |
- - "4443:4443" |
|
| 13 |
- entrypoint: /usr/bin/env sh |
|
| 14 |
- command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json" |
|
| 15 |
-signer: |
|
| 16 |
- build: . |
|
| 17 |
- dockerfile: signer.Dockerfile |
|
| 18 |
- links: |
|
| 19 |
- - mysql |
|
| 20 |
- environment: |
|
| 21 |
- - SERVICE_NAME=notary_signer |
|
| 22 |
- entrypoint: /usr/bin/env sh |
|
| 23 |
- command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json" |
|
| 24 |
-mysql: |
|
| 25 |
- volumes: |
|
| 26 |
- - ./notarymysql/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d |
|
| 27 |
- - notary_data:/var/lib/mysql |
|
| 28 |
- image: mariadb:10.1.10 |
|
| 29 |
- ports: |
|
| 30 |
- - "3306:3306" |
|
| 31 |
- environment: |
|
| 32 |
- - TERM=dumb |
|
| 33 |
- - MYSQL_ALLOW_EMPTY_PASSWORD="true" |
|
| 34 |
- command: mysqld --innodb_file_per_table |
|
| 1 |
+version: "2" |
|
| 2 |
+services: |
|
| 3 |
+ server: |
|
| 4 |
+ build: |
|
| 5 |
+ context: . |
|
| 6 |
+ dockerfile: server.Dockerfile |
|
| 7 |
+ networks: |
|
| 8 |
+ - mdb |
|
| 9 |
+ - sig |
|
| 10 |
+ ports: |
|
| 11 |
+ - "8080" |
|
| 12 |
+ - "4443:4443" |
|
| 13 |
+ entrypoint: /usr/bin/env sh |
|
| 14 |
+ command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json" |
|
| 15 |
+ depends_on: |
|
| 16 |
+ - mysql |
|
| 17 |
+ - signer |
|
| 18 |
+ signer: |
|
| 19 |
+ build: |
|
| 20 |
+ context: . |
|
| 21 |
+ dockerfile: signer.Dockerfile |
|
| 22 |
+ networks: |
|
| 23 |
+ mdb: |
|
| 24 |
+ sig: |
|
| 25 |
+ aliases: |
|
| 26 |
+ - notarysigner |
|
| 27 |
+ entrypoint: /usr/bin/env sh |
|
| 28 |
+ command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json" |
|
| 29 |
+ depends_on: |
|
| 30 |
+ - mysql |
|
| 31 |
+ mysql: |
|
| 32 |
+ networks: |
|
| 33 |
+ - mdb |
|
| 34 |
+ volumes: |
|
| 35 |
+ - ./notarymysql/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d |
|
| 36 |
+ - notary_data:/var/lib/mysql |
|
| 37 |
+ image: mariadb:10.1.10 |
|
| 38 |
+ environment: |
|
| 39 |
+ - TERM=dumb |
|
| 40 |
+ - MYSQL_ALLOW_EMPTY_PASSWORD="true" |
|
| 41 |
+ command: mysqld --innodb_file_per_table |
|
| 42 |
+volumes: |
|
| 43 |
+ notary_data: |
|
| 44 |
+ external: false |
|
| 45 |
+networks: |
|
| 46 |
+ mdb: |
|
| 47 |
+ external: false |
|
| 48 |
+ sig: |
|
| 49 |
+ external: false |
| 35 | 50 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,7 @@ |
| 0 |
+package notary |
|
| 1 |
+ |
|
| 2 |
+// PassRetriever is a callback function that should retrieve a passphrase |
|
| 3 |
+// for a given named key. If it should be treated as new passphrase (e.g. with |
|
| 4 |
+// confirmation), createNew will be true. Attempts is passed in so that implementers |
|
| 5 |
+// decide how many chances to give to a human, for example. |
|
| 6 |
+type PassRetriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) |
| ... | ... |
@@ -8,19 +8,13 @@ import ( |
| 8 | 8 |
"fmt" |
| 9 | 9 |
"io" |
| 10 | 10 |
"os" |
| 11 |
- "strings" |
|
| 12 |
- |
|
| 13 | 11 |
"path/filepath" |
| 12 |
+ "strings" |
|
| 14 | 13 |
|
| 15 | 14 |
"github.com/docker/docker/pkg/term" |
| 15 |
+ "github.com/docker/notary" |
|
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 |
-// Retriever is a callback function that should retrieve a passphrase |
|
| 19 |
-// for a given named key. If it should be treated as new passphrase (e.g. with |
|
| 20 |
-// confirmation), createNew will be true. Attempts is passed in so that implementers |
|
| 21 |
-// decide how many chances to give to a human, for example. |
|
| 22 |
-type Retriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) |
|
| 23 |
- |
|
| 24 | 18 |
const ( |
| 25 | 19 |
idBytesToDisplay = 7 |
| 26 | 20 |
tufRootAlias = "root" |
| ... | ... |
@@ -46,155 +40,165 @@ var ( |
| 46 | 46 |
// ErrTooManyAttempts is returned if the maximum number of passphrase |
| 47 | 47 |
// entry attempts is reached. |
| 48 | 48 |
ErrTooManyAttempts = errors.New("Too many attempts")
|
| 49 |
+ |
|
| 50 |
+ // ErrNoInput is returned if we do not have a valid input method for passphrases |
|
| 51 |
+ ErrNoInput = errors.New("Please either use environment variables or STDIN with a terminal to provide key passphrases")
|
|
| 49 | 52 |
) |
| 50 | 53 |
|
| 51 | 54 |
// PromptRetriever returns a new Retriever which will provide a prompt on stdin |
| 52 |
-// and stdout to retrieve a passphrase. The passphrase will be cached such that |
|
| 55 |
+// and stdout to retrieve a passphrase. stdin will be checked if it is a terminal, |
|
| 56 |
+// else the PromptRetriever will error when attempting to retrieve a passphrase. |
|
| 57 |
+// Upon successful passphrase retrievals, the passphrase will be cached such that |
|
| 53 | 58 |
// subsequent prompts will produce the same passphrase. |
| 54 |
-func PromptRetriever() Retriever {
|
|
| 59 |
+func PromptRetriever() notary.PassRetriever {
|
|
| 60 |
+ if !term.IsTerminal(os.Stdin.Fd()) {
|
|
| 61 |
+ return func(string, string, bool, int) (string, bool, error) {
|
|
| 62 |
+ return "", false, ErrNoInput |
|
| 63 |
+ } |
|
| 64 |
+ } |
|
| 55 | 65 |
return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil) |
| 56 | 66 |
} |
| 57 | 67 |
|
| 58 |
-// PromptRetrieverWithInOut returns a new Retriever which will provide a |
|
| 59 |
-// prompt using the given in and out readers. The passphrase will be cached |
|
| 60 |
-// such that subsequent prompts will produce the same passphrase. |
|
| 61 |
-// aliasMap can be used to specify display names for TUF key aliases. If aliasMap |
|
| 62 |
-// is nil, a sensible default will be used. |
|
| 63 |
-func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) Retriever {
|
|
| 64 |
- userEnteredTargetsSnapshotsPass := false |
|
| 65 |
- targetsSnapshotsPass := "" |
|
| 66 |
- userEnteredRootsPass := false |
|
| 67 |
- rootsPass := "" |
|
| 68 |
- |
|
| 69 |
- return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
|
| 70 |
- if alias == tufRootAlias && createNew && numAttempts == 0 {
|
|
| 71 |
- fmt.Fprintln(out, tufRootKeyGenerationWarning) |
|
| 72 |
- } |
|
| 73 |
- if numAttempts > 0 {
|
|
| 74 |
- if !createNew {
|
|
| 75 |
- fmt.Fprintln(out, "Passphrase incorrect. Please retry.") |
|
| 76 |
- } |
|
| 77 |
- } |
|
| 78 |
- |
|
| 79 |
- // Figure out if we should display a different string for this alias |
|
| 80 |
- displayAlias := alias |
|
| 81 |
- if aliasMap != nil {
|
|
| 82 |
- if val, ok := aliasMap[alias]; ok {
|
|
| 83 |
- displayAlias = val |
|
| 84 |
- } |
|
| 68 |
+type boundRetriever struct {
|
|
| 69 |
+ in io.Reader |
|
| 70 |
+ out io.Writer |
|
| 71 |
+ aliasMap map[string]string |
|
| 72 |
+ passphraseCache map[string]string |
|
| 73 |
+} |
|
| 85 | 74 |
|
| 75 |
+func (br *boundRetriever) getPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
|
| 76 |
+ if numAttempts == 0 {
|
|
| 77 |
+ if alias == tufRootAlias && createNew {
|
|
| 78 |
+ fmt.Fprintln(br.out, tufRootKeyGenerationWarning) |
|
| 86 | 79 |
} |
| 87 | 80 |
|
| 88 |
- // First, check if we have a password cached for this alias. |
|
| 89 |
- if numAttempts == 0 {
|
|
| 90 |
- if userEnteredTargetsSnapshotsPass && (alias == tufSnapshotAlias || alias == tufTargetsAlias) {
|
|
| 91 |
- return targetsSnapshotsPass, false, nil |
|
| 92 |
- } |
|
| 93 |
- if userEnteredRootsPass && (alias == "root") {
|
|
| 94 |
- return rootsPass, false, nil |
|
| 95 |
- } |
|
| 81 |
+ if pass, ok := br.passphraseCache[alias]; ok {
|
|
| 82 |
+ return pass, false, nil |
|
| 96 | 83 |
} |
| 97 |
- |
|
| 98 |
- if numAttempts > 3 && !createNew {
|
|
| 84 |
+ } else if !createNew { // per `if`, numAttempts > 0 if we're at this `else`
|
|
| 85 |
+ if numAttempts > 3 {
|
|
| 99 | 86 |
return "", true, ErrTooManyAttempts |
| 100 | 87 |
} |
| 88 |
+ fmt.Fprintln(br.out, "Passphrase incorrect. Please retry.") |
|
| 89 |
+ } |
|
| 101 | 90 |
|
| 102 |
- // If typing on the terminal, we do not want the terminal to echo the |
|
| 103 |
- // password that is typed (so it doesn't display) |
|
| 104 |
- if term.IsTerminal(0) {
|
|
| 105 |
- state, err := term.SaveState(0) |
|
| 106 |
- if err != nil {
|
|
| 107 |
- return "", false, err |
|
| 108 |
- } |
|
| 109 |
- term.DisableEcho(0, state) |
|
| 110 |
- defer term.RestoreTerminal(0, state) |
|
| 111 |
- } |
|
| 112 |
- |
|
| 113 |
- stdin := bufio.NewReader(in) |
|
| 91 |
+ // passphrase not cached and we're not aborting, get passphrase from user! |
|
| 92 |
+ return br.requestPassphrase(keyName, alias, createNew, numAttempts) |
|
| 93 |
+} |
|
| 114 | 94 |
|
| 115 |
- indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator)) |
|
| 116 |
- if indexOfLastSeparator == -1 {
|
|
| 117 |
- indexOfLastSeparator = 0 |
|
| 118 |
- } |
|
| 95 |
+func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
|
| 96 |
+ // Figure out if we should display a different string for this alias |
|
| 97 |
+ displayAlias := alias |
|
| 98 |
+ if val, ok := br.aliasMap[alias]; ok {
|
|
| 99 |
+ displayAlias = val |
|
| 100 |
+ } |
|
| 119 | 101 |
|
| 120 |
- var shortName string |
|
| 121 |
- if len(keyName) > indexOfLastSeparator+idBytesToDisplay {
|
|
| 122 |
- if indexOfLastSeparator > 0 {
|
|
| 123 |
- keyNamePrefix := keyName[:indexOfLastSeparator] |
|
| 124 |
- keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1] |
|
| 125 |
- shortName = keyNameID + " (" + keyNamePrefix + ")"
|
|
| 126 |
- } else {
|
|
| 127 |
- shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay] |
|
| 128 |
- } |
|
| 102 |
+ // If typing on the terminal, we do not want the terminal to echo the |
|
| 103 |
+ // password that is typed (so it doesn't display) |
|
| 104 |
+ if term.IsTerminal(os.Stdin.Fd()) {
|
|
| 105 |
+ state, err := term.SaveState(os.Stdin.Fd()) |
|
| 106 |
+ if err != nil {
|
|
| 107 |
+ return "", false, err |
|
| 129 | 108 |
} |
| 109 |
+ term.DisableEcho(os.Stdin.Fd(), state) |
|
| 110 |
+ defer term.RestoreTerminal(os.Stdin.Fd(), state) |
|
| 111 |
+ } |
|
| 130 | 112 |
|
| 131 |
- withID := fmt.Sprintf(" with ID %s", shortName)
|
|
| 132 |
- if shortName == "" {
|
|
| 133 |
- withID = "" |
|
| 134 |
- } |
|
| 113 |
+ indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator)) |
|
| 114 |
+ if indexOfLastSeparator == -1 {
|
|
| 115 |
+ indexOfLastSeparator = 0 |
|
| 116 |
+ } |
|
| 135 | 117 |
|
| 136 |
- if createNew {
|
|
| 137 |
- fmt.Fprintf(out, "Enter passphrase for new %s key%s: ", displayAlias, withID) |
|
| 138 |
- } else if displayAlias == "yubikey" {
|
|
| 139 |
- fmt.Fprintf(out, "Enter the %s for the attached Yubikey: ", keyName) |
|
| 118 |
+ var shortName string |
|
| 119 |
+ if len(keyName) > indexOfLastSeparator+idBytesToDisplay {
|
|
| 120 |
+ if indexOfLastSeparator > 0 {
|
|
| 121 |
+ keyNamePrefix := keyName[:indexOfLastSeparator] |
|
| 122 |
+ keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1] |
|
| 123 |
+ shortName = keyNameID + " (" + keyNamePrefix + ")"
|
|
| 140 | 124 |
} else {
|
| 141 |
- fmt.Fprintf(out, "Enter passphrase for %s key%s: ", displayAlias, withID) |
|
| 125 |
+ shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay] |
|
| 142 | 126 |
} |
| 127 |
+ } |
|
| 143 | 128 |
|
| 144 |
- passphrase, err := stdin.ReadBytes('\n')
|
|
| 145 |
- fmt.Fprintln(out) |
|
| 146 |
- if err != nil {
|
|
| 147 |
- return "", false, err |
|
| 148 |
- } |
|
| 129 |
+ withID := fmt.Sprintf(" with ID %s", shortName)
|
|
| 130 |
+ if shortName == "" {
|
|
| 131 |
+ withID = "" |
|
| 132 |
+ } |
|
| 149 | 133 |
|
| 150 |
- retPass := strings.TrimSpace(string(passphrase)) |
|
| 151 |
- |
|
| 152 |
- if !createNew {
|
|
| 153 |
- if alias == tufSnapshotAlias || alias == tufTargetsAlias {
|
|
| 154 |
- userEnteredTargetsSnapshotsPass = true |
|
| 155 |
- targetsSnapshotsPass = retPass |
|
| 156 |
- } |
|
| 157 |
- if alias == tufRootAlias {
|
|
| 158 |
- userEnteredRootsPass = true |
|
| 159 |
- rootsPass = retPass |
|
| 160 |
- } |
|
| 161 |
- return retPass, false, nil |
|
| 162 |
- } |
|
| 134 |
+ switch {
|
|
| 135 |
+ case createNew: |
|
| 136 |
+ fmt.Fprintf(br.out, "Enter passphrase for new %s key%s: ", displayAlias, withID) |
|
| 137 |
+ case displayAlias == "yubikey": |
|
| 138 |
+ fmt.Fprintf(br.out, "Enter the %s for the attached Yubikey: ", keyName) |
|
| 139 |
+ default: |
|
| 140 |
+ fmt.Fprintf(br.out, "Enter passphrase for %s key%s: ", displayAlias, withID) |
|
| 141 |
+ } |
|
| 163 | 142 |
|
| 164 |
- if len(retPass) < 8 {
|
|
| 165 |
- fmt.Fprintln(out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.") |
|
| 166 |
- return "", false, ErrTooShort |
|
| 167 |
- } |
|
| 143 |
+ stdin := bufio.NewReader(br.in) |
|
| 144 |
+ passphrase, err := stdin.ReadBytes('\n')
|
|
| 145 |
+ fmt.Fprintln(br.out) |
|
| 146 |
+ if err != nil {
|
|
| 147 |
+ return "", false, err |
|
| 148 |
+ } |
|
| 168 | 149 |
|
| 169 |
- fmt.Fprintf(out, "Repeat passphrase for new %s key%s: ", displayAlias, withID) |
|
| 170 |
- confirmation, err := stdin.ReadBytes('\n')
|
|
| 171 |
- fmt.Fprintln(out) |
|
| 150 |
+ retPass := strings.TrimSpace(string(passphrase)) |
|
| 151 |
+ |
|
| 152 |
+ if createNew {
|
|
| 153 |
+ err = br.verifyAndConfirmPassword(stdin, retPass, displayAlias, withID) |
|
| 172 | 154 |
if err != nil {
|
| 173 | 155 |
return "", false, err |
| 174 | 156 |
} |
| 175 |
- confirmationStr := strings.TrimSpace(string(confirmation)) |
|
| 157 |
+ } |
|
| 176 | 158 |
|
| 177 |
- if retPass != confirmationStr {
|
|
| 178 |
- fmt.Fprintln(out, "Passphrases do not match. Please retry.") |
|
| 179 |
- return "", false, ErrDontMatch |
|
| 180 |
- } |
|
| 159 |
+ br.cachePassword(alias, retPass) |
|
| 181 | 160 |
|
| 182 |
- if alias == tufSnapshotAlias || alias == tufTargetsAlias {
|
|
| 183 |
- userEnteredTargetsSnapshotsPass = true |
|
| 184 |
- targetsSnapshotsPass = retPass |
|
| 185 |
- } |
|
| 186 |
- if alias == tufRootAlias {
|
|
| 187 |
- userEnteredRootsPass = true |
|
| 188 |
- rootsPass = retPass |
|
| 189 |
- } |
|
| 161 |
+ return retPass, false, nil |
|
| 162 |
+} |
|
| 190 | 163 |
|
| 191 |
- return retPass, false, nil |
|
| 164 |
+func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, displayAlias, withID string) error {
|
|
| 165 |
+ if len(retPass) < 8 {
|
|
| 166 |
+ fmt.Fprintln(br.out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.") |
|
| 167 |
+ return ErrTooShort |
|
| 192 | 168 |
} |
| 169 |
+ |
|
| 170 |
+ fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID) |
|
| 171 |
+ confirmation, err := stdin.ReadBytes('\n')
|
|
| 172 |
+ fmt.Fprintln(br.out) |
|
| 173 |
+ if err != nil {
|
|
| 174 |
+ return err |
|
| 175 |
+ } |
|
| 176 |
+ confirmationStr := strings.TrimSpace(string(confirmation)) |
|
| 177 |
+ |
|
| 178 |
+ if retPass != confirmationStr {
|
|
| 179 |
+ fmt.Fprintln(br.out, "Passphrases do not match. Please retry.") |
|
| 180 |
+ return ErrDontMatch |
|
| 181 |
+ } |
|
| 182 |
+ return nil |
|
| 183 |
+} |
|
| 184 |
+ |
|
| 185 |
+func (br *boundRetriever) cachePassword(alias, retPass string) {
|
|
| 186 |
+ br.passphraseCache[alias] = retPass |
|
| 187 |
+} |
|
| 188 |
+ |
|
| 189 |
+// PromptRetrieverWithInOut returns a new Retriever which will provide a |
|
| 190 |
+// prompt using the given in and out readers. The passphrase will be cached |
|
| 191 |
+// such that subsequent prompts will produce the same passphrase. |
|
| 192 |
+// aliasMap can be used to specify display names for TUF key aliases. If aliasMap |
|
| 193 |
+// is nil, a sensible default will be used. |
|
| 194 |
+func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) notary.PassRetriever {
|
|
| 195 |
+ bound := &boundRetriever{
|
|
| 196 |
+ in: in, |
|
| 197 |
+ out: out, |
|
| 198 |
+ aliasMap: aliasMap, |
|
| 199 |
+ passphraseCache: make(map[string]string), |
|
| 200 |
+ } |
|
| 201 |
+ |
|
| 202 |
+ return bound.getPassphrase |
|
| 193 | 203 |
} |
| 194 | 204 |
|
| 195 | 205 |
// ConstantRetriever returns a new Retriever which will return a constant string |
| 196 | 206 |
// as a passphrase. |
| 197 |
-func ConstantRetriever(constantPassphrase string) Retriever {
|
|
| 207 |
+func ConstantRetriever(constantPassphrase string) notary.PassRetriever {
|
|
| 198 | 208 |
return func(k, a string, c bool, n int) (string, bool, error) {
|
| 199 | 209 |
return constantPassphrase, false, nil |
| 200 | 210 |
} |
| ... | ... |
@@ -1,4 +1,4 @@ |
| 1 |
-FROM golang:1.6.1-alpine |
|
| 1 |
+FROM golang:1.7.1-alpine |
|
| 2 | 2 |
MAINTAINER David Lawrence "david.lawrence@docker.com" |
| 3 | 3 |
|
| 4 | 4 |
RUN apk add --update git gcc libc-dev && rm -rf /var/cache/apk/* |
| ... | ... |
@@ -13,6 +13,7 @@ COPY . /go/src/${NOTARYPKG}
|
| 13 | 13 |
|
| 14 | 14 |
WORKDIR /go/src/${NOTARYPKG}
|
| 15 | 15 |
|
| 16 |
+ENV SERVICE_NAME=notary_server |
|
| 16 | 17 |
EXPOSE 4443 |
| 17 | 18 |
|
| 18 | 19 |
# Install notary-server |
| ... | ... |
@@ -1,4 +1,4 @@ |
| 1 |
-FROM golang:1.6.1-alpine |
|
| 1 |
+FROM golang:1.7.1-alpine |
|
| 2 | 2 |
MAINTAINER David Lawrence "david.lawrence@docker.com" |
| 3 | 3 |
|
| 4 | 4 |
RUN apk add --update git gcc libc-dev && rm -rf /var/cache/apk/* |
| ... | ... |
@@ -13,11 +13,10 @@ COPY . /go/src/${NOTARYPKG}
|
| 13 | 13 |
|
| 14 | 14 |
WORKDIR /go/src/${NOTARYPKG}
|
| 15 | 15 |
|
| 16 |
+ENV SERVICE_NAME=notary_signer |
|
| 16 | 17 |
ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1" |
| 17 | 18 |
ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword" |
| 18 | 19 |
|
| 19 |
-EXPOSE 4444 |
|
| 20 |
- |
|
| 21 | 20 |
# Install notary-signer |
| 22 | 21 |
RUN go install \ |
| 23 | 22 |
-tags pkcs11 \ |
| 24 | 23 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,22 @@ |
| 0 |
+package storage |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "errors" |
|
| 4 |
+ "fmt" |
|
| 5 |
+) |
|
| 6 |
+ |
|
| 7 |
+var ( |
|
| 8 |
+ // ErrPathOutsideStore indicates that the returned path would be |
|
| 9 |
+ // outside the store |
|
| 10 |
+ ErrPathOutsideStore = errors.New("path outside file store")
|
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+// ErrMetaNotFound indicates we did not find a particular piece |
|
| 14 |
+// of metadata in the store |
|
| 15 |
+type ErrMetaNotFound struct {
|
|
| 16 |
+ Resource string |
|
| 17 |
+} |
|
| 18 |
+ |
|
| 19 |
+func (err ErrMetaNotFound) Error() string {
|
|
| 20 |
+ return fmt.Sprintf("%s trust data unavailable. Has a notary repository been initialized?", err.Resource)
|
|
| 21 |
+} |
| 0 | 22 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,222 @@ |
| 0 |
+package storage |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io" |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "strings" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/docker/notary" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+// NewFilesystemStore creates a new store in a directory tree |
|
| 14 |
+func NewFilesystemStore(baseDir, subDir, extension string) (*FilesystemStore, error) {
|
|
| 15 |
+ baseDir = filepath.Join(baseDir, subDir) |
|
| 16 |
+ |
|
| 17 |
+ return NewFileStore(baseDir, extension, notary.PrivKeyPerms) |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+// NewFileStore creates a fully configurable file store |
|
| 21 |
+func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*FilesystemStore, error) {
|
|
| 22 |
+ baseDir = filepath.Clean(baseDir) |
|
| 23 |
+ if err := createDirectory(baseDir, perms); err != nil {
|
|
| 24 |
+ return nil, err |
|
| 25 |
+ } |
|
| 26 |
+ if !strings.HasPrefix(fileExt, ".") {
|
|
| 27 |
+ fileExt = "." + fileExt |
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ return &FilesystemStore{
|
|
| 31 |
+ baseDir: baseDir, |
|
| 32 |
+ ext: fileExt, |
|
| 33 |
+ perms: perms, |
|
| 34 |
+ }, nil |
|
| 35 |
+} |
|
| 36 |
+ |
|
| 37 |
+// NewSimpleFileStore is a convenience wrapper to create a world readable, |
|
| 38 |
+// owner writeable filestore |
|
| 39 |
+func NewSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
|
|
| 40 |
+ return NewFileStore(baseDir, fileExt, notary.PubCertPerms) |
|
| 41 |
+} |
|
| 42 |
+ |
|
| 43 |
+// NewPrivateKeyFileStorage initializes a new filestore for private keys, appending |
|
| 44 |
+// the notary.PrivDir to the baseDir. |
|
| 45 |
+func NewPrivateKeyFileStorage(baseDir, fileExt string) (*FilesystemStore, error) {
|
|
| 46 |
+ baseDir = filepath.Join(baseDir, notary.PrivDir) |
|
| 47 |
+ return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms) |
|
| 48 |
+} |
|
| 49 |
+ |
|
| 50 |
+// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable |
|
| 51 |
+// _only_ filestore |
|
| 52 |
+func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
|
|
| 53 |
+ return NewFileStore(baseDir, fileExt, notary.PrivKeyPerms) |
|
| 54 |
+} |
|
| 55 |
+ |
|
| 56 |
+// FilesystemStore is a store in a locally accessible directory |
|
| 57 |
+type FilesystemStore struct {
|
|
| 58 |
+ baseDir string |
|
| 59 |
+ ext string |
|
| 60 |
+ perms os.FileMode |
|
| 61 |
+} |
|
| 62 |
+ |
|
| 63 |
+func (f *FilesystemStore) getPath(name string) (string, error) {
|
|
| 64 |
+ fileName := fmt.Sprintf("%s%s", name, f.ext)
|
|
| 65 |
+ fullPath := filepath.Join(f.baseDir, fileName) |
|
| 66 |
+ |
|
| 67 |
+ if !strings.HasPrefix(fullPath, f.baseDir) {
|
|
| 68 |
+ return "", ErrPathOutsideStore |
|
| 69 |
+ } |
|
| 70 |
+ return fullPath, nil |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+// GetSized returns the meta for the given name (a role) up to size bytes |
|
| 74 |
+// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a |
|
| 75 |
+// predefined threshold "notary.MaxDownloadSize". If the file is larger than size |
|
| 76 |
+// we return ErrMaliciousServer for consistency with the HTTPStore |
|
| 77 |
+func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) {
|
|
| 78 |
+ p, err := f.getPath(name) |
|
| 79 |
+ if err != nil {
|
|
| 80 |
+ return nil, err |
|
| 81 |
+ } |
|
| 82 |
+ file, err := os.OpenFile(p, os.O_RDONLY, f.perms) |
|
| 83 |
+ if err != nil {
|
|
| 84 |
+ if os.IsNotExist(err) {
|
|
| 85 |
+ err = ErrMetaNotFound{Resource: name}
|
|
| 86 |
+ } |
|
| 87 |
+ return nil, err |
|
| 88 |
+ } |
|
| 89 |
+ defer file.Close() |
|
| 90 |
+ |
|
| 91 |
+ if size == NoSizeLimit {
|
|
| 92 |
+ size = notary.MaxDownloadSize |
|
| 93 |
+ } |
|
| 94 |
+ |
|
| 95 |
+ stat, err := file.Stat() |
|
| 96 |
+ if err != nil {
|
|
| 97 |
+ return nil, err |
|
| 98 |
+ } |
|
| 99 |
+ if stat.Size() > size {
|
|
| 100 |
+ return nil, ErrMaliciousServer{}
|
|
| 101 |
+ } |
|
| 102 |
+ |
|
| 103 |
+ l := io.LimitReader(file, size) |
|
| 104 |
+ return ioutil.ReadAll(l) |
|
| 105 |
+} |
|
| 106 |
+ |
|
| 107 |
+// Get returns the meta for the given name. |
|
| 108 |
+func (f *FilesystemStore) Get(name string) ([]byte, error) {
|
|
| 109 |
+ p, err := f.getPath(name) |
|
| 110 |
+ if err != nil {
|
|
| 111 |
+ return nil, err |
|
| 112 |
+ } |
|
| 113 |
+ meta, err := ioutil.ReadFile(p) |
|
| 114 |
+ if err != nil {
|
|
| 115 |
+ if os.IsNotExist(err) {
|
|
| 116 |
+ err = ErrMetaNotFound{Resource: name}
|
|
| 117 |
+ } |
|
| 118 |
+ return nil, err |
|
| 119 |
+ } |
|
| 120 |
+ return meta, nil |
|
| 121 |
+} |
|
| 122 |
+ |
|
| 123 |
+// SetMulti sets the metadata for multiple roles in one operation |
|
| 124 |
+func (f *FilesystemStore) SetMulti(metas map[string][]byte) error {
|
|
| 125 |
+ for role, blob := range metas {
|
|
| 126 |
+ err := f.Set(role, blob) |
|
| 127 |
+ if err != nil {
|
|
| 128 |
+ return err |
|
| 129 |
+ } |
|
| 130 |
+ } |
|
| 131 |
+ return nil |
|
| 132 |
+} |
|
| 133 |
+ |
|
| 134 |
+// Set sets the meta for a single role |
|
| 135 |
+func (f *FilesystemStore) Set(name string, meta []byte) error {
|
|
| 136 |
+ fp, err := f.getPath(name) |
|
| 137 |
+ if err != nil {
|
|
| 138 |
+ return err |
|
| 139 |
+ } |
|
| 140 |
+ |
|
| 141 |
+ // Ensures the parent directories of the file we are about to write exist |
|
| 142 |
+ err = os.MkdirAll(filepath.Dir(fp), f.perms) |
|
| 143 |
+ if err != nil {
|
|
| 144 |
+ return err |
|
| 145 |
+ } |
|
| 146 |
+ |
|
| 147 |
+ // if something already exists, just delete it and re-write it |
|
| 148 |
+ os.RemoveAll(fp) |
|
| 149 |
+ |
|
| 150 |
+ // Write the file to disk |
|
| 151 |
+ if err = ioutil.WriteFile(fp, meta, f.perms); err != nil {
|
|
| 152 |
+ return err |
|
| 153 |
+ } |
|
| 154 |
+ return nil |
|
| 155 |
+} |
|
| 156 |
+ |
|
| 157 |
+// RemoveAll clears the existing filestore by removing its base directory |
|
| 158 |
+func (f *FilesystemStore) RemoveAll() error {
|
|
| 159 |
+ return os.RemoveAll(f.baseDir) |
|
| 160 |
+} |
|
| 161 |
+ |
|
| 162 |
+// Remove removes the metadata for a single role - if the metadata doesn't |
|
| 163 |
+// exist, no error is returned |
|
| 164 |
+func (f *FilesystemStore) Remove(name string) error {
|
|
| 165 |
+ p, err := f.getPath(name) |
|
| 166 |
+ if err != nil {
|
|
| 167 |
+ return err |
|
| 168 |
+ } |
|
| 169 |
+ return os.RemoveAll(p) // RemoveAll succeeds if path doesn't exist |
|
| 170 |
+} |
|
| 171 |
+ |
|
| 172 |
+// Location returns a human readable name for the storage location |
|
| 173 |
+func (f FilesystemStore) Location() string {
|
|
| 174 |
+ return f.baseDir |
|
| 175 |
+} |
|
| 176 |
+ |
|
| 177 |
+// ListFiles returns a list of all the filenames that can be used with Get* |
|
| 178 |
+// to retrieve content from this filestore |
|
| 179 |
+func (f FilesystemStore) ListFiles() []string {
|
|
| 180 |
+ files := make([]string, 0, 0) |
|
| 181 |
+ filepath.Walk(f.baseDir, func(fp string, fi os.FileInfo, err error) error {
|
|
| 182 |
+ // If there are errors, ignore this particular file |
|
| 183 |
+ if err != nil {
|
|
| 184 |
+ return nil |
|
| 185 |
+ } |
|
| 186 |
+ // Ignore if it is a directory |
|
| 187 |
+ if fi.IsDir() {
|
|
| 188 |
+ return nil |
|
| 189 |
+ } |
|
| 190 |
+ |
|
| 191 |
+ // If this is a symlink, ignore it |
|
| 192 |
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
|
| 193 |
+ return nil |
|
| 194 |
+ } |
|
| 195 |
+ |
|
| 196 |
+ // Only allow matches that end with our certificate extension (e.g. *.crt) |
|
| 197 |
+ matched, _ := filepath.Match("*"+f.ext, fi.Name())
|
|
| 198 |
+ |
|
| 199 |
+ if matched {
|
|
| 200 |
+ // Find the relative path for this file relative to the base path. |
|
| 201 |
+ fp, err = filepath.Rel(f.baseDir, fp) |
|
| 202 |
+ if err != nil {
|
|
| 203 |
+ return err |
|
| 204 |
+ } |
|
| 205 |
+ trimmed := strings.TrimSuffix(fp, f.ext) |
|
| 206 |
+ files = append(files, trimmed) |
|
| 207 |
+ } |
|
| 208 |
+ return nil |
|
| 209 |
+ }) |
|
| 210 |
+ return files |
|
| 211 |
+} |
|
| 212 |
+ |
|
| 213 |
+// createDirectory receives a string of the path to a directory. |
|
| 214 |
+// It does not support passing files, so the caller has to remove |
|
| 215 |
+// the filename by doing filepath.Dir(full_path_to_file) |
|
| 216 |
+func createDirectory(dir string, perms os.FileMode) error {
|
|
| 217 |
+ // This prevents someone passing /path/to/dir and 'dir' not being created |
|
| 218 |
+ // If two '//' exist, MkdirAll deals it with correctly |
|
| 219 |
+ dir = dir + "/" |
|
| 220 |
+ return os.MkdirAll(dir, perms) |
|
| 221 |
+} |
| 0 | 222 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,339 @@ |
| 0 |
+// A Store that can fetch and set metadata on a remote server. |
|
| 1 |
+// Some API constraints: |
|
| 2 |
+// - Response bodies for error codes should be unmarshallable as: |
|
| 3 |
+// {"errors": [{..., "detail": <serialized validation error>}]}
|
|
| 4 |
+// else validation error details, etc. will be unparsable. The errors |
|
| 5 |
+// should have a github.com/docker/notary/tuf/validation/SerializableError |
|
| 6 |
+// in the Details field. |
|
| 7 |
+// If writing your own server, please have a look at |
|
| 8 |
+// github.com/docker/distribution/registry/api/errcode |
|
| 9 |
+ |
|
| 10 |
+package storage |
|
| 11 |
+ |
|
| 12 |
+import ( |
|
| 13 |
+ "bytes" |
|
| 14 |
+ "encoding/json" |
|
| 15 |
+ "errors" |
|
| 16 |
+ "fmt" |
|
| 17 |
+ "io" |
|
| 18 |
+ "io/ioutil" |
|
| 19 |
+ "mime/multipart" |
|
| 20 |
+ "net/http" |
|
| 21 |
+ "net/url" |
|
| 22 |
+ "path" |
|
| 23 |
+ |
|
| 24 |
+ "github.com/Sirupsen/logrus" |
|
| 25 |
+ "github.com/docker/notary" |
|
| 26 |
+ "github.com/docker/notary/tuf/validation" |
|
| 27 |
+) |
|
| 28 |
+ |
|
| 29 |
+// ErrServerUnavailable indicates an error from the server. code allows us to |
|
| 30 |
+// populate the http error we received |
|
| 31 |
+type ErrServerUnavailable struct {
|
|
| 32 |
+ code int |
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+// NetworkError represents any kind of network error when attempting to make a request |
|
| 36 |
+type NetworkError struct {
|
|
| 37 |
+ Wrapped error |
|
| 38 |
+} |
|
| 39 |
+ |
|
| 40 |
+func (n NetworkError) Error() string {
|
|
| 41 |
+ return n.Wrapped.Error() |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+func (err ErrServerUnavailable) Error() string {
|
|
| 45 |
+ if err.code == 401 {
|
|
| 46 |
+ return fmt.Sprintf("you are not authorized to perform this operation: server returned 401.")
|
|
| 47 |
+ } |
|
| 48 |
+ return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
|
|
| 49 |
+} |
|
| 50 |
+ |
|
| 51 |
+// ErrMaliciousServer indicates the server returned a response that is highly suspected |
|
| 52 |
+// of being malicious. i.e. it attempted to send us more data than the known size of a |
|
| 53 |
+// particular role metadata. |
|
| 54 |
+type ErrMaliciousServer struct{}
|
|
| 55 |
+ |
|
| 56 |
+func (err ErrMaliciousServer) Error() string {
|
|
| 57 |
+ return "trust server returned a bad response." |
|
| 58 |
+} |
|
| 59 |
+ |
|
| 60 |
+// ErrInvalidOperation indicates that the server returned a 400 response and |
|
| 61 |
+// propagate any body we received. |
|
| 62 |
+type ErrInvalidOperation struct {
|
|
| 63 |
+ msg string |
|
| 64 |
+} |
|
| 65 |
+ |
|
| 66 |
+func (err ErrInvalidOperation) Error() string {
|
|
| 67 |
+ if err.msg != "" {
|
|
| 68 |
+ return fmt.Sprintf("trust server rejected operation: %s", err.msg)
|
|
| 69 |
+ } |
|
| 70 |
+ return "trust server rejected operation." |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+// HTTPStore manages pulling and pushing metadata from and to a remote |
|
| 74 |
+// service over HTTP. It assumes the URL structure of the remote service |
|
| 75 |
+// maps identically to the structure of the TUF repo: |
|
| 76 |
+// <baseURL>/<metaPrefix>/(root|targets|snapshot|timestamp).json |
|
| 77 |
+// <baseURL>/<targetsPrefix>/foo.sh |
|
| 78 |
+// |
|
| 79 |
+// If consistent snapshots are disabled, it is advised that caching is not |
|
| 80 |
+// enabled. Simple set a cachePath (and ensure it's writeable) to enable |
|
| 81 |
+// caching. |
|
| 82 |
+type HTTPStore struct {
|
|
| 83 |
+ baseURL url.URL |
|
| 84 |
+ metaPrefix string |
|
| 85 |
+ metaExtension string |
|
| 86 |
+ keyExtension string |
|
| 87 |
+ roundTrip http.RoundTripper |
|
| 88 |
+} |
|
| 89 |
+ |
|
| 90 |
+// NewHTTPStore initializes a new store against a URL and a number of configuration options |
|
| 91 |
+func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) {
|
|
| 92 |
+ base, err := url.Parse(baseURL) |
|
| 93 |
+ if err != nil {
|
|
| 94 |
+ return nil, err |
|
| 95 |
+ } |
|
| 96 |
+ if !base.IsAbs() {
|
|
| 97 |
+ return nil, errors.New("HTTPStore requires an absolute baseURL")
|
|
| 98 |
+ } |
|
| 99 |
+ if roundTrip == nil {
|
|
| 100 |
+ return &OfflineStore{}, nil
|
|
| 101 |
+ } |
|
| 102 |
+ return &HTTPStore{
|
|
| 103 |
+ baseURL: *base, |
|
| 104 |
+ metaPrefix: metaPrefix, |
|
| 105 |
+ metaExtension: metaExtension, |
|
| 106 |
+ keyExtension: keyExtension, |
|
| 107 |
+ roundTrip: roundTrip, |
|
| 108 |
+ }, nil |
|
| 109 |
+} |
|
| 110 |
+ |
|
| 111 |
+func tryUnmarshalError(resp *http.Response, defaultError error) error {
|
|
| 112 |
+ bodyBytes, err := ioutil.ReadAll(resp.Body) |
|
| 113 |
+ if err != nil {
|
|
| 114 |
+ return defaultError |
|
| 115 |
+ } |
|
| 116 |
+ var parsedErrors struct {
|
|
| 117 |
+ Errors []struct {
|
|
| 118 |
+ Detail validation.SerializableError `json:"detail"` |
|
| 119 |
+ } `json:"errors"` |
|
| 120 |
+ } |
|
| 121 |
+ if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
|
|
| 122 |
+ return defaultError |
|
| 123 |
+ } |
|
| 124 |
+ if len(parsedErrors.Errors) != 1 {
|
|
| 125 |
+ return defaultError |
|
| 126 |
+ } |
|
| 127 |
+ err = parsedErrors.Errors[0].Detail.Error |
|
| 128 |
+ if err == nil {
|
|
| 129 |
+ return defaultError |
|
| 130 |
+ } |
|
| 131 |
+ return err |
|
| 132 |
+} |
|
| 133 |
+ |
|
| 134 |
+func translateStatusToError(resp *http.Response, resource string) error {
|
|
| 135 |
+ switch resp.StatusCode {
|
|
| 136 |
+ case http.StatusOK: |
|
| 137 |
+ return nil |
|
| 138 |
+ case http.StatusNotFound: |
|
| 139 |
+ return ErrMetaNotFound{Resource: resource}
|
|
| 140 |
+ case http.StatusBadRequest: |
|
| 141 |
+ return tryUnmarshalError(resp, ErrInvalidOperation{})
|
|
| 142 |
+ default: |
|
| 143 |
+ return ErrServerUnavailable{code: resp.StatusCode}
|
|
| 144 |
+ } |
|
| 145 |
+} |
|
| 146 |
+ |
|
| 147 |
+// GetSized downloads the named meta file with the given size. A short body |
|
| 148 |
+// is acceptable because in the case of timestamp.json, the size is a cap, |
|
| 149 |
+// not an exact length. |
|
| 150 |
+// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a |
|
| 151 |
+// predefined threshold "notary.MaxDownloadSize". |
|
| 152 |
+func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) {
|
|
| 153 |
+ url, err := s.buildMetaURL(name) |
|
| 154 |
+ if err != nil {
|
|
| 155 |
+ return nil, err |
|
| 156 |
+ } |
|
| 157 |
+ req, err := http.NewRequest("GET", url.String(), nil)
|
|
| 158 |
+ if err != nil {
|
|
| 159 |
+ return nil, err |
|
| 160 |
+ } |
|
| 161 |
+ resp, err := s.roundTrip.RoundTrip(req) |
|
| 162 |
+ if err != nil {
|
|
| 163 |
+ return nil, NetworkError{Wrapped: err}
|
|
| 164 |
+ } |
|
| 165 |
+ defer resp.Body.Close() |
|
| 166 |
+ if err := translateStatusToError(resp, name); err != nil {
|
|
| 167 |
+ logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
|
|
| 168 |
+ return nil, err |
|
| 169 |
+ } |
|
| 170 |
+ if size == NoSizeLimit {
|
|
| 171 |
+ size = notary.MaxDownloadSize |
|
| 172 |
+ } |
|
| 173 |
+ if resp.ContentLength > size {
|
|
| 174 |
+ return nil, ErrMaliciousServer{}
|
|
| 175 |
+ } |
|
| 176 |
+ logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
|
|
| 177 |
+ b := io.LimitReader(resp.Body, size) |
|
| 178 |
+ body, err := ioutil.ReadAll(b) |
|
| 179 |
+ if err != nil {
|
|
| 180 |
+ return nil, err |
|
| 181 |
+ } |
|
| 182 |
+ return body, nil |
|
| 183 |
+} |
|
| 184 |
+ |
|
| 185 |
+// Set sends a single piece of metadata to the TUF server |
|
| 186 |
+func (s HTTPStore) Set(name string, blob []byte) error {
|
|
| 187 |
+ return s.SetMulti(map[string][]byte{name: blob})
|
|
| 188 |
+} |
|
| 189 |
+ |
|
| 190 |
+// Remove always fails, because we should never be able to delete metadata |
|
| 191 |
+// remotely |
|
| 192 |
+func (s HTTPStore) Remove(name string) error {
|
|
| 193 |
+ return ErrInvalidOperation{msg: "cannot delete individual metadata files"}
|
|
| 194 |
+} |
|
| 195 |
+ |
|
| 196 |
+// NewMultiPartMetaRequest builds a request with the provided metadata updates |
|
| 197 |
+// in multipart form |
|
| 198 |
+func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
|
|
| 199 |
+ body := &bytes.Buffer{}
|
|
| 200 |
+ writer := multipart.NewWriter(body) |
|
| 201 |
+ for role, blob := range metas {
|
|
| 202 |
+ part, err := writer.CreateFormFile("files", role)
|
|
| 203 |
+ if err != nil {
|
|
| 204 |
+ return nil, err |
|
| 205 |
+ } |
|
| 206 |
+ _, err = io.Copy(part, bytes.NewBuffer(blob)) |
|
| 207 |
+ if err != nil {
|
|
| 208 |
+ return nil, err |
|
| 209 |
+ } |
|
| 210 |
+ } |
|
| 211 |
+ err := writer.Close() |
|
| 212 |
+ if err != nil {
|
|
| 213 |
+ return nil, err |
|
| 214 |
+ } |
|
| 215 |
+ req, err := http.NewRequest("POST", url, body)
|
|
| 216 |
+ if err != nil {
|
|
| 217 |
+ return nil, err |
|
| 218 |
+ } |
|
| 219 |
+ req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
| 220 |
+ return req, nil |
|
| 221 |
+} |
|
| 222 |
+ |
|
| 223 |
+// SetMulti does a single batch upload of multiple pieces of TUF metadata. |
|
| 224 |
+// This should be preferred for updating a remote server as it enable the server |
|
| 225 |
+// to remain consistent, either accepting or rejecting the complete update. |
|
| 226 |
+func (s HTTPStore) SetMulti(metas map[string][]byte) error {
|
|
| 227 |
+ url, err := s.buildMetaURL("")
|
|
| 228 |
+ if err != nil {
|
|
| 229 |
+ return err |
|
| 230 |
+ } |
|
| 231 |
+ req, err := NewMultiPartMetaRequest(url.String(), metas) |
|
| 232 |
+ if err != nil {
|
|
| 233 |
+ return err |
|
| 234 |
+ } |
|
| 235 |
+ resp, err := s.roundTrip.RoundTrip(req) |
|
| 236 |
+ if err != nil {
|
|
| 237 |
+ return NetworkError{Wrapped: err}
|
|
| 238 |
+ } |
|
| 239 |
+ defer resp.Body.Close() |
|
| 240 |
+ // if this 404's something is pretty wrong |
|
| 241 |
+ return translateStatusToError(resp, "POST metadata endpoint") |
|
| 242 |
+} |
|
| 243 |
+ |
|
| 244 |
+// RemoveAll will attempt to delete all TUF metadata for a GUN |
|
| 245 |
+func (s HTTPStore) RemoveAll() error {
|
|
| 246 |
+ url, err := s.buildMetaURL("")
|
|
| 247 |
+ if err != nil {
|
|
| 248 |
+ return err |
|
| 249 |
+ } |
|
| 250 |
+ req, err := http.NewRequest("DELETE", url.String(), nil)
|
|
| 251 |
+ if err != nil {
|
|
| 252 |
+ return err |
|
| 253 |
+ } |
|
| 254 |
+ resp, err := s.roundTrip.RoundTrip(req) |
|
| 255 |
+ if err != nil {
|
|
| 256 |
+ return NetworkError{Wrapped: err}
|
|
| 257 |
+ } |
|
| 258 |
+ defer resp.Body.Close() |
|
| 259 |
+ return translateStatusToError(resp, "DELETE metadata for GUN endpoint") |
|
| 260 |
+} |
|
| 261 |
+ |
|
| 262 |
+func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
|
|
| 263 |
+ var filename string |
|
| 264 |
+ if name != "" {
|
|
| 265 |
+ filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
|
|
| 266 |
+ } |
|
| 267 |
+ uri := path.Join(s.metaPrefix, filename) |
|
| 268 |
+ return s.buildURL(uri) |
|
| 269 |
+} |
|
| 270 |
+ |
|
| 271 |
+func (s HTTPStore) buildKeyURL(name string) (*url.URL, error) {
|
|
| 272 |
+ filename := fmt.Sprintf("%s.%s", name, s.keyExtension)
|
|
| 273 |
+ uri := path.Join(s.metaPrefix, filename) |
|
| 274 |
+ return s.buildURL(uri) |
|
| 275 |
+} |
|
| 276 |
+ |
|
| 277 |
+func (s HTTPStore) buildURL(uri string) (*url.URL, error) {
|
|
| 278 |
+ sub, err := url.Parse(uri) |
|
| 279 |
+ if err != nil {
|
|
| 280 |
+ return nil, err |
|
| 281 |
+ } |
|
| 282 |
+ return s.baseURL.ResolveReference(sub), nil |
|
| 283 |
+} |
|
| 284 |
+ |
|
| 285 |
+// GetKey retrieves a public key from the remote server |
|
| 286 |
+func (s HTTPStore) GetKey(role string) ([]byte, error) {
|
|
| 287 |
+ url, err := s.buildKeyURL(role) |
|
| 288 |
+ if err != nil {
|
|
| 289 |
+ return nil, err |
|
| 290 |
+ } |
|
| 291 |
+ req, err := http.NewRequest("GET", url.String(), nil)
|
|
| 292 |
+ if err != nil {
|
|
| 293 |
+ return nil, err |
|
| 294 |
+ } |
|
| 295 |
+ resp, err := s.roundTrip.RoundTrip(req) |
|
| 296 |
+ if err != nil {
|
|
| 297 |
+ return nil, NetworkError{Wrapped: err}
|
|
| 298 |
+ } |
|
| 299 |
+ defer resp.Body.Close() |
|
| 300 |
+ if err := translateStatusToError(resp, role+" key"); err != nil {
|
|
| 301 |
+ return nil, err |
|
| 302 |
+ } |
|
| 303 |
+ body, err := ioutil.ReadAll(resp.Body) |
|
| 304 |
+ if err != nil {
|
|
| 305 |
+ return nil, err |
|
| 306 |
+ } |
|
| 307 |
+ return body, nil |
|
| 308 |
+} |
|
| 309 |
+ |
|
| 310 |
+// RotateKey rotates a private key and returns the public component from the remote server |
|
| 311 |
+func (s HTTPStore) RotateKey(role string) ([]byte, error) {
|
|
| 312 |
+ url, err := s.buildKeyURL(role) |
|
| 313 |
+ if err != nil {
|
|
| 314 |
+ return nil, err |
|
| 315 |
+ } |
|
| 316 |
+ req, err := http.NewRequest("POST", url.String(), nil)
|
|
| 317 |
+ if err != nil {
|
|
| 318 |
+ return nil, err |
|
| 319 |
+ } |
|
| 320 |
+ resp, err := s.roundTrip.RoundTrip(req) |
|
| 321 |
+ if err != nil {
|
|
| 322 |
+ return nil, NetworkError{Wrapped: err}
|
|
| 323 |
+ } |
|
| 324 |
+ defer resp.Body.Close() |
|
| 325 |
+ if err := translateStatusToError(resp, role+" key"); err != nil {
|
|
| 326 |
+ return nil, err |
|
| 327 |
+ } |
|
| 328 |
+ body, err := ioutil.ReadAll(resp.Body) |
|
| 329 |
+ if err != nil {
|
|
| 330 |
+ return nil, err |
|
| 331 |
+ } |
|
| 332 |
+ return body, nil |
|
| 333 |
+} |
|
| 334 |
+ |
|
| 335 |
+// Location returns a human readable name for the storage location |
|
| 336 |
+func (s HTTPStore) Location() string {
|
|
| 337 |
+ return s.baseURL.String() |
|
| 338 |
+} |
| 0 | 339 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,34 @@ |
| 0 |
+package storage |
|
| 1 |
+ |
|
| 2 |
+// NoSizeLimit is represented as -1 for arguments to GetMeta |
|
| 3 |
+const NoSizeLimit int64 = -1 |
|
| 4 |
+ |
|
| 5 |
+// MetadataStore must be implemented by anything that intends to interact |
|
| 6 |
+// with a store of TUF files |
|
| 7 |
+type MetadataStore interface {
|
|
| 8 |
+ GetSized(name string, size int64) ([]byte, error) |
|
| 9 |
+ Set(name string, blob []byte) error |
|
| 10 |
+ SetMulti(map[string][]byte) error |
|
| 11 |
+ RemoveAll() error |
|
| 12 |
+ Remove(name string) error |
|
| 13 |
+} |
|
| 14 |
+ |
|
| 15 |
+// PublicKeyStore must be implemented by a key service |
|
| 16 |
+type PublicKeyStore interface {
|
|
| 17 |
+ GetKey(role string) ([]byte, error) |
|
| 18 |
+ RotateKey(role string) ([]byte, error) |
|
| 19 |
+} |
|
| 20 |
+ |
|
| 21 |
+// RemoteStore is similar to LocalStore with the added expectation that it should |
|
| 22 |
+// provide a way to download targets once located |
|
| 23 |
+type RemoteStore interface {
|
|
| 24 |
+ MetadataStore |
|
| 25 |
+ PublicKeyStore |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// Bootstrapper is a thing that can set itself up |
|
| 29 |
+type Bootstrapper interface {
|
|
| 30 |
+ // Bootstrap instructs a configured Bootstrapper to perform |
|
| 31 |
+ // its setup operations. |
|
| 32 |
+ Bootstrap() error |
|
| 33 |
+} |
| 0 | 34 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,124 @@ |
| 0 |
+package storage |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "crypto/sha256" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/notary" |
|
| 6 |
+ "github.com/docker/notary/tuf/utils" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// NewMemoryStore returns a MetadataStore that operates entirely in memory. |
|
| 10 |
+// Very useful for testing |
|
| 11 |
+func NewMemoryStore(initial map[string][]byte) *MemoryStore {
|
|
| 12 |
+ var consistent = make(map[string][]byte) |
|
| 13 |
+ if initial == nil {
|
|
| 14 |
+ initial = make(map[string][]byte) |
|
| 15 |
+ } else {
|
|
| 16 |
+ // add all seed meta to consistent |
|
| 17 |
+ for name, data := range initial {
|
|
| 18 |
+ checksum := sha256.Sum256(data) |
|
| 19 |
+ path := utils.ConsistentName(name, checksum[:]) |
|
| 20 |
+ consistent[path] = data |
|
| 21 |
+ } |
|
| 22 |
+ } |
|
| 23 |
+ return &MemoryStore{
|
|
| 24 |
+ data: initial, |
|
| 25 |
+ consistent: consistent, |
|
| 26 |
+ } |
|
| 27 |
+} |
|
| 28 |
+ |
|
| 29 |
+// MemoryStore implements a mock RemoteStore entirely in memory. |
|
| 30 |
+// For testing purposes only. |
|
| 31 |
+type MemoryStore struct {
|
|
| 32 |
+ data map[string][]byte |
|
| 33 |
+ consistent map[string][]byte |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// GetSized returns up to size bytes of data references by name. |
|
| 37 |
+// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a |
|
| 38 |
+// predefined threshold "notary.MaxDownloadSize", as we will always know the |
|
| 39 |
+// size for everything but a timestamp and sometimes a root, |
|
| 40 |
+// neither of which should be exceptionally large |
|
| 41 |
+func (m MemoryStore) GetSized(name string, size int64) ([]byte, error) {
|
|
| 42 |
+ d, ok := m.data[name] |
|
| 43 |
+ if ok {
|
|
| 44 |
+ if size == NoSizeLimit {
|
|
| 45 |
+ size = notary.MaxDownloadSize |
|
| 46 |
+ } |
|
| 47 |
+ if int64(len(d)) < size {
|
|
| 48 |
+ return d, nil |
|
| 49 |
+ } |
|
| 50 |
+ return d[:size], nil |
|
| 51 |
+ } |
|
| 52 |
+ d, ok = m.consistent[name] |
|
| 53 |
+ if ok {
|
|
| 54 |
+ if int64(len(d)) < size {
|
|
| 55 |
+ return d, nil |
|
| 56 |
+ } |
|
| 57 |
+ return d[:size], nil |
|
| 58 |
+ } |
|
| 59 |
+ return nil, ErrMetaNotFound{Resource: name}
|
|
| 60 |
+} |
|
| 61 |
+ |
|
| 62 |
+// Get returns the data associated with name |
|
| 63 |
+func (m MemoryStore) Get(name string) ([]byte, error) {
|
|
| 64 |
+ if d, ok := m.data[name]; ok {
|
|
| 65 |
+ return d, nil |
|
| 66 |
+ } |
|
| 67 |
+ if d, ok := m.consistent[name]; ok {
|
|
| 68 |
+ return d, nil |
|
| 69 |
+ } |
|
| 70 |
+ return nil, ErrMetaNotFound{Resource: name}
|
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+// Set sets the metadata value for the given name |
|
| 74 |
+func (m *MemoryStore) Set(name string, meta []byte) error {
|
|
| 75 |
+ m.data[name] = meta |
|
| 76 |
+ |
|
| 77 |
+ checksum := sha256.Sum256(meta) |
|
| 78 |
+ path := utils.ConsistentName(name, checksum[:]) |
|
| 79 |
+ m.consistent[path] = meta |
|
| 80 |
+ return nil |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+// SetMulti sets multiple pieces of metadata for multiple names |
|
| 84 |
+// in a single operation. |
|
| 85 |
+func (m *MemoryStore) SetMulti(metas map[string][]byte) error {
|
|
| 86 |
+ for role, blob := range metas {
|
|
| 87 |
+ m.Set(role, blob) |
|
| 88 |
+ } |
|
| 89 |
+ return nil |
|
| 90 |
+} |
|
| 91 |
+ |
|
| 92 |
+// Remove removes the metadata for a single role - if the metadata doesn't |
|
| 93 |
+// exist, no error is returned |
|
| 94 |
+func (m *MemoryStore) Remove(name string) error {
|
|
| 95 |
+ if meta, ok := m.data[name]; ok {
|
|
| 96 |
+ checksum := sha256.Sum256(meta) |
|
| 97 |
+ path := utils.ConsistentName(name, checksum[:]) |
|
| 98 |
+ delete(m.data, name) |
|
| 99 |
+ delete(m.consistent, path) |
|
| 100 |
+ } |
|
| 101 |
+ return nil |
|
| 102 |
+} |
|
| 103 |
+ |
|
| 104 |
+// RemoveAll clears the existing memory store by setting this store as new empty one |
|
| 105 |
+func (m *MemoryStore) RemoveAll() error {
|
|
| 106 |
+ *m = *NewMemoryStore(nil) |
|
| 107 |
+ return nil |
|
| 108 |
+} |
|
| 109 |
+ |
|
| 110 |
+// Location provides a human readable name for the storage location |
|
| 111 |
+func (m MemoryStore) Location() string {
|
|
| 112 |
+ return "memory" |
|
| 113 |
+} |
|
| 114 |
+ |
|
| 115 |
+// ListFiles returns a list of all files. The names returned should be |
|
| 116 |
+// usable with Get directly, with no modification. |
|
| 117 |
+func (m *MemoryStore) ListFiles() []string {
|
|
| 118 |
+ names := make([]string, 0, len(m.data)) |
|
| 119 |
+ for n := range m.data {
|
|
| 120 |
+ names = append(names, n) |
|
| 121 |
+ } |
|
| 122 |
+ return names |
|
| 123 |
+} |
| 0 | 124 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,54 @@ |
| 0 |
+package storage |
|
| 1 |
+ |
|
| 2 |
+// ErrOffline is used to indicate we are operating offline |
|
| 3 |
+type ErrOffline struct{}
|
|
| 4 |
+ |
|
| 5 |
+func (e ErrOffline) Error() string {
|
|
| 6 |
+ return "client is offline" |
|
| 7 |
+} |
|
| 8 |
+ |
|
| 9 |
+var err = ErrOffline{}
|
|
| 10 |
+ |
|
| 11 |
+// OfflineStore is to be used as a placeholder for a nil store. It simply |
|
| 12 |
+// returns ErrOffline for every operation |
|
| 13 |
+type OfflineStore struct{}
|
|
| 14 |
+ |
|
| 15 |
+// GetSized returns ErrOffline |
|
| 16 |
+func (es OfflineStore) GetSized(name string, size int64) ([]byte, error) {
|
|
| 17 |
+ return nil, err |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+// Set returns ErrOffline |
|
| 21 |
+func (es OfflineStore) Set(name string, blob []byte) error {
|
|
| 22 |
+ return err |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+// SetMulti returns ErrOffline |
|
| 26 |
+func (es OfflineStore) SetMulti(map[string][]byte) error {
|
|
| 27 |
+ return err |
|
| 28 |
+} |
|
| 29 |
+ |
|
| 30 |
+// Remove returns ErrOffline |
|
| 31 |
+func (es OfflineStore) Remove(name string) error {
|
|
| 32 |
+ return err |
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+// GetKey returns ErrOffline |
|
| 36 |
+func (es OfflineStore) GetKey(role string) ([]byte, error) {
|
|
| 37 |
+ return nil, err |
|
| 38 |
+} |
|
| 39 |
+ |
|
| 40 |
+// RotateKey returns ErrOffline |
|
| 41 |
+func (es OfflineStore) RotateKey(role string) ([]byte, error) {
|
|
| 42 |
+ return nil, err |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+// RemoveAll return ErrOffline |
|
| 46 |
+func (es OfflineStore) RemoveAll() error {
|
|
| 47 |
+ return err |
|
| 48 |
+} |
|
| 49 |
+ |
|
| 50 |
+// Location returns a human readable name for the storage location |
|
| 51 |
+func (es OfflineStore) Location() string {
|
|
| 52 |
+ return "offline" |
|
| 53 |
+} |
| 0 | 54 |
deleted file mode 100644 |
| ... | ... |
@@ -1,150 +0,0 @@ |
| 1 |
-package trustmanager |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "io/ioutil" |
|
| 6 |
- "os" |
|
| 7 |
- "path/filepath" |
|
| 8 |
- "strings" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-// SimpleFileStore implements FileStore |
|
| 12 |
-type SimpleFileStore struct {
|
|
| 13 |
- baseDir string |
|
| 14 |
- fileExt string |
|
| 15 |
- perms os.FileMode |
|
| 16 |
-} |
|
| 17 |
- |
|
| 18 |
-// NewFileStore creates a fully configurable file store |
|
| 19 |
-func NewFileStore(baseDir, fileExt string, perms os.FileMode) (*SimpleFileStore, error) {
|
|
| 20 |
- baseDir = filepath.Clean(baseDir) |
|
| 21 |
- if err := createDirectory(baseDir, perms); err != nil {
|
|
| 22 |
- return nil, err |
|
| 23 |
- } |
|
| 24 |
- if !strings.HasPrefix(fileExt, ".") {
|
|
| 25 |
- fileExt = "." + fileExt |
|
| 26 |
- } |
|
| 27 |
- |
|
| 28 |
- return &SimpleFileStore{
|
|
| 29 |
- baseDir: baseDir, |
|
| 30 |
- fileExt: fileExt, |
|
| 31 |
- perms: perms, |
|
| 32 |
- }, nil |
|
| 33 |
-} |
|
| 34 |
- |
|
| 35 |
-// NewSimpleFileStore is a convenience wrapper to create a world readable, |
|
| 36 |
-// owner writeable filestore |
|
| 37 |
-func NewSimpleFileStore(baseDir, fileExt string) (*SimpleFileStore, error) {
|
|
| 38 |
- return NewFileStore(baseDir, fileExt, visible) |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable |
|
| 42 |
-// _only_ filestore |
|
| 43 |
-func NewPrivateSimpleFileStore(baseDir, fileExt string) (*SimpleFileStore, error) {
|
|
| 44 |
- return NewFileStore(baseDir, fileExt, private) |
|
| 45 |
-} |
|
| 46 |
- |
|
| 47 |
-// Add writes data to a file with a given name |
|
| 48 |
-func (f *SimpleFileStore) Add(name string, data []byte) error {
|
|
| 49 |
- filePath, err := f.GetPath(name) |
|
| 50 |
- if err != nil {
|
|
| 51 |
- return err |
|
| 52 |
- } |
|
| 53 |
- createDirectory(filepath.Dir(filePath), f.perms) |
|
| 54 |
- return ioutil.WriteFile(filePath, data, f.perms) |
|
| 55 |
-} |
|
| 56 |
- |
|
| 57 |
-// Remove removes a file identified by name |
|
| 58 |
-func (f *SimpleFileStore) Remove(name string) error {
|
|
| 59 |
- // Attempt to remove |
|
| 60 |
- filePath, err := f.GetPath(name) |
|
| 61 |
- if err != nil {
|
|
| 62 |
- return err |
|
| 63 |
- } |
|
| 64 |
- return os.Remove(filePath) |
|
| 65 |
-} |
|
| 66 |
- |
|
| 67 |
-// Get returns the data given a file name |
|
| 68 |
-func (f *SimpleFileStore) Get(name string) ([]byte, error) {
|
|
| 69 |
- filePath, err := f.GetPath(name) |
|
| 70 |
- if err != nil {
|
|
| 71 |
- return nil, err |
|
| 72 |
- } |
|
| 73 |
- data, err := ioutil.ReadFile(filePath) |
|
| 74 |
- if err != nil {
|
|
| 75 |
- return nil, err |
|
| 76 |
- } |
|
| 77 |
- |
|
| 78 |
- return data, nil |
|
| 79 |
-} |
|
| 80 |
- |
|
| 81 |
-// GetPath returns the full final path of a file with a given name |
|
| 82 |
-func (f *SimpleFileStore) GetPath(name string) (string, error) {
|
|
| 83 |
- fileName := f.genFileName(name) |
|
| 84 |
- fullPath := filepath.Clean(filepath.Join(f.baseDir, fileName)) |
|
| 85 |
- |
|
| 86 |
- if !strings.HasPrefix(fullPath, f.baseDir) {
|
|
| 87 |
- return "", ErrPathOutsideStore |
|
| 88 |
- } |
|
| 89 |
- return fullPath, nil |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-// ListFiles lists all the files inside of a store |
|
| 93 |
-func (f *SimpleFileStore) ListFiles() []string {
|
|
| 94 |
- return f.list(f.baseDir) |
|
| 95 |
-} |
|
| 96 |
- |
|
| 97 |
-// list lists all the files in a directory given a full path. Ignores symlinks. |
|
| 98 |
-func (f *SimpleFileStore) list(path string) []string {
|
|
| 99 |
- files := make([]string, 0, 0) |
|
| 100 |
- filepath.Walk(path, func(fp string, fi os.FileInfo, err error) error {
|
|
| 101 |
- // If there are errors, ignore this particular file |
|
| 102 |
- if err != nil {
|
|
| 103 |
- return nil |
|
| 104 |
- } |
|
| 105 |
- // Ignore if it is a directory |
|
| 106 |
- if fi.IsDir() {
|
|
| 107 |
- return nil |
|
| 108 |
- } |
|
| 109 |
- |
|
| 110 |
- // If this is a symlink, ignore it |
|
| 111 |
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
|
| 112 |
- return nil |
|
| 113 |
- } |
|
| 114 |
- |
|
| 115 |
- // Only allow matches that end with our certificate extension (e.g. *.crt) |
|
| 116 |
- matched, _ := filepath.Match("*"+f.fileExt, fi.Name())
|
|
| 117 |
- |
|
| 118 |
- if matched {
|
|
| 119 |
- // Find the relative path for this file relative to the base path. |
|
| 120 |
- fp, err = filepath.Rel(path, fp) |
|
| 121 |
- if err != nil {
|
|
| 122 |
- return err |
|
| 123 |
- } |
|
| 124 |
- trimmed := strings.TrimSuffix(fp, f.fileExt) |
|
| 125 |
- files = append(files, trimmed) |
|
| 126 |
- } |
|
| 127 |
- return nil |
|
| 128 |
- }) |
|
| 129 |
- return files |
|
| 130 |
-} |
|
| 131 |
- |
|
| 132 |
-// genFileName returns the name using the right extension |
|
| 133 |
-func (f *SimpleFileStore) genFileName(name string) string {
|
|
| 134 |
- return fmt.Sprintf("%s%s", name, f.fileExt)
|
|
| 135 |
-} |
|
| 136 |
- |
|
| 137 |
-// BaseDir returns the base directory of the filestore |
|
| 138 |
-func (f *SimpleFileStore) BaseDir() string {
|
|
| 139 |
- return f.baseDir |
|
| 140 |
-} |
|
| 141 |
- |
|
| 142 |
-// createDirectory receives a string of the path to a directory. |
|
| 143 |
-// It does not support passing files, so the caller has to remove |
|
| 144 |
-// the filename by doing filepath.Dir(full_path_to_file) |
|
| 145 |
-func createDirectory(dir string, perms os.FileMode) error {
|
|
| 146 |
- // This prevents someone passing /path/to/dir and 'dir' not being created |
|
| 147 |
- // If two '//' exist, MkdirAll deals it with correctly |
|
| 148 |
- dir = dir + "/" |
|
| 149 |
- return os.MkdirAll(dir, perms) |
|
| 150 |
-} |
| 151 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,82 @@ |
| 0 |
+package trustmanager |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/notary/tuf/data" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+// Storage implements the bare bones primitives (no hierarchy) |
|
| 9 |
+type Storage interface {
|
|
| 10 |
+ // Add writes a file to the specified location, returning an error if this |
|
| 11 |
+ // is not possible (reasons may include permissions errors). The path is cleaned |
|
| 12 |
+ // before being made absolute against the store's base dir. |
|
| 13 |
+ Set(fileName string, data []byte) error |
|
| 14 |
+ |
|
| 15 |
+ // Remove deletes a file from the store relative to the store's base directory. |
|
| 16 |
+ // The path is cleaned before being made absolute to ensure no path traversal |
|
| 17 |
+ // outside the base directory is possible. |
|
| 18 |
+ Remove(fileName string) error |
|
| 19 |
+ |
|
| 20 |
+ // Get returns the file content found at fileName relative to the base directory |
|
| 21 |
+ // of the file store. The path is cleaned before being made absolute to ensure |
|
| 22 |
+ // path traversal outside the store is not possible. If the file is not found |
|
| 23 |
+ // an error to that effect is returned. |
|
| 24 |
+ Get(fileName string) ([]byte, error) |
|
| 25 |
+ |
|
| 26 |
+ // ListFiles returns a list of paths relative to the base directory of the |
|
| 27 |
+ // filestore. Any of these paths must be retrievable via the |
|
| 28 |
+ // Storage.Get method. |
|
| 29 |
+ ListFiles() []string |
|
| 30 |
+ |
|
| 31 |
+ // Location returns a human readable name indicating where the implementer |
|
| 32 |
+ // is storing keys |
|
| 33 |
+ Location() string |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
| 37 |
+type ErrAttemptsExceeded struct{}
|
|
| 38 |
+ |
|
| 39 |
+// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
| 40 |
+func (err ErrAttemptsExceeded) Error() string {
|
|
| 41 |
+ return "maximum number of passphrase attempts exceeded" |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
| 45 |
+// key file was corrupted, but we have no way to distinguish. |
|
| 46 |
+type ErrPasswordInvalid struct{}
|
|
| 47 |
+ |
|
| 48 |
+// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
| 49 |
+// key file was corrupted, but we have no way to distinguish. |
|
| 50 |
+func (err ErrPasswordInvalid) Error() string {
|
|
| 51 |
+ return "password invalid, operation has failed." |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
| 55 |
+type ErrKeyNotFound struct {
|
|
| 56 |
+ KeyID string |
|
| 57 |
+} |
|
| 58 |
+ |
|
| 59 |
+// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
| 60 |
+func (err ErrKeyNotFound) Error() string {
|
|
| 61 |
+ return fmt.Sprintf("signing key not found: %s", err.KeyID)
|
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+// KeyStore is a generic interface for private key storage |
|
| 65 |
+type KeyStore interface {
|
|
| 66 |
+ // AddKey adds a key to the KeyStore, and if the key already exists, |
|
| 67 |
+ // succeeds. Otherwise, returns an error if it cannot add. |
|
| 68 |
+ AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error |
|
| 69 |
+ // Should fail with ErrKeyNotFound if the keystore is operating normally |
|
| 70 |
+ // and knows that it does not store the requested key. |
|
| 71 |
+ GetKey(keyID string) (data.PrivateKey, string, error) |
|
| 72 |
+ GetKeyInfo(keyID string) (KeyInfo, error) |
|
| 73 |
+ ListKeys() map[string]KeyInfo |
|
| 74 |
+ RemoveKey(keyID string) error |
|
| 75 |
+ Name() string |
|
| 76 |
+} |
|
| 77 |
+ |
|
| 78 |
+type cachedKey struct {
|
|
| 79 |
+ alias string |
|
| 80 |
+ key data.PrivateKey |
|
| 81 |
+} |
| 0 | 82 |
deleted file mode 100644 |
| ... | ... |
@@ -1,497 +0,0 @@ |
| 1 |
-package trustmanager |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "encoding/pem" |
|
| 5 |
- "fmt" |
|
| 6 |
- "path/filepath" |
|
| 7 |
- "strings" |
|
| 8 |
- "sync" |
|
| 9 |
- |
|
| 10 |
- "github.com/Sirupsen/logrus" |
|
| 11 |
- "github.com/docker/notary" |
|
| 12 |
- "github.com/docker/notary/passphrase" |
|
| 13 |
- "github.com/docker/notary/tuf/data" |
|
| 14 |
-) |
|
| 15 |
- |
|
| 16 |
-type keyInfoMap map[string]KeyInfo |
|
| 17 |
- |
|
| 18 |
-// KeyFileStore persists and manages private keys on disk |
|
| 19 |
-type KeyFileStore struct {
|
|
| 20 |
- sync.Mutex |
|
| 21 |
- SimpleFileStore |
|
| 22 |
- passphrase.Retriever |
|
| 23 |
- cachedKeys map[string]*cachedKey |
|
| 24 |
- keyInfoMap |
|
| 25 |
-} |
|
| 26 |
- |
|
| 27 |
-// KeyMemoryStore manages private keys in memory |
|
| 28 |
-type KeyMemoryStore struct {
|
|
| 29 |
- sync.Mutex |
|
| 30 |
- MemoryFileStore |
|
| 31 |
- passphrase.Retriever |
|
| 32 |
- cachedKeys map[string]*cachedKey |
|
| 33 |
- keyInfoMap |
|
| 34 |
-} |
|
| 35 |
- |
|
| 36 |
-// KeyInfo stores the role, path, and gun for a corresponding private key ID |
|
| 37 |
-// It is assumed that each private key ID is unique |
|
| 38 |
-type KeyInfo struct {
|
|
| 39 |
- Gun string |
|
| 40 |
- Role string |
|
| 41 |
-} |
|
| 42 |
- |
|
| 43 |
-// NewKeyFileStore returns a new KeyFileStore creating a private directory to |
|
| 44 |
-// hold the keys. |
|
| 45 |
-func NewKeyFileStore(baseDir string, passphraseRetriever passphrase.Retriever) (*KeyFileStore, error) {
|
|
| 46 |
- baseDir = filepath.Join(baseDir, notary.PrivDir) |
|
| 47 |
- fileStore, err := NewPrivateSimpleFileStore(baseDir, keyExtension) |
|
| 48 |
- if err != nil {
|
|
| 49 |
- return nil, err |
|
| 50 |
- } |
|
| 51 |
- cachedKeys := make(map[string]*cachedKey) |
|
| 52 |
- keyInfoMap := make(keyInfoMap) |
|
| 53 |
- |
|
| 54 |
- keyStore := &KeyFileStore{SimpleFileStore: *fileStore,
|
|
| 55 |
- Retriever: passphraseRetriever, |
|
| 56 |
- cachedKeys: cachedKeys, |
|
| 57 |
- keyInfoMap: keyInfoMap, |
|
| 58 |
- } |
|
| 59 |
- |
|
| 60 |
- // Load this keystore's ID --> gun/role map |
|
| 61 |
- keyStore.loadKeyInfo() |
|
| 62 |
- return keyStore, nil |
|
| 63 |
-} |
|
| 64 |
- |
|
| 65 |
-func generateKeyInfoMap(s Storage) map[string]KeyInfo {
|
|
| 66 |
- keyInfoMap := make(map[string]KeyInfo) |
|
| 67 |
- for _, keyPath := range s.ListFiles() {
|
|
| 68 |
- d, err := s.Get(keyPath) |
|
| 69 |
- if err != nil {
|
|
| 70 |
- logrus.Error(err) |
|
| 71 |
- continue |
|
| 72 |
- } |
|
| 73 |
- keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath) |
|
| 74 |
- if err != nil {
|
|
| 75 |
- logrus.Error(err) |
|
| 76 |
- continue |
|
| 77 |
- } |
|
| 78 |
- keyInfoMap[keyID] = keyInfo |
|
| 79 |
- } |
|
| 80 |
- return keyInfoMap |
|
| 81 |
-} |
|
| 82 |
- |
|
| 83 |
-// Attempts to infer the keyID, role, and GUN from the specified key path. |
|
| 84 |
-// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key |
|
| 85 |
-func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) {
|
|
| 86 |
- var keyID, role, gun string |
|
| 87 |
- keyID = filepath.Base(keyPath) |
|
| 88 |
- underscoreIndex := strings.LastIndex(keyID, "_") |
|
| 89 |
- |
|
| 90 |
- // This is the legacy KEYID_ROLE filename |
|
| 91 |
- // The keyID is the first part of the keyname |
|
| 92 |
- // The keyRole is the second part of the keyname |
|
| 93 |
- // in a key named abcde_root, abcde is the keyID and root is the KeyAlias |
|
| 94 |
- if underscoreIndex != -1 {
|
|
| 95 |
- role = keyID[underscoreIndex+1:] |
|
| 96 |
- keyID = keyID[:underscoreIndex] |
|
| 97 |
- } |
|
| 98 |
- |
|
| 99 |
- if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") {
|
|
| 100 |
- return keyID, data.CanonicalRootRole, "" |
|
| 101 |
- } |
|
| 102 |
- |
|
| 103 |
- keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/") |
|
| 104 |
- gun = getGunFromFullID(keyPath) |
|
| 105 |
- return keyID, role, gun |
|
| 106 |
-} |
|
| 107 |
- |
|
| 108 |
-func getGunFromFullID(fullKeyID string) string {
|
|
| 109 |
- keyGun := filepath.Dir(fullKeyID) |
|
| 110 |
- // If the gun is empty, Dir will return . |
|
| 111 |
- if keyGun == "." {
|
|
| 112 |
- keyGun = "" |
|
| 113 |
- } |
|
| 114 |
- return keyGun |
|
| 115 |
-} |
|
| 116 |
- |
|
| 117 |
-func (s *KeyFileStore) loadKeyInfo() {
|
|
| 118 |
- s.keyInfoMap = generateKeyInfoMap(s) |
|
| 119 |
-} |
|
| 120 |
- |
|
| 121 |
-func (s *KeyMemoryStore) loadKeyInfo() {
|
|
| 122 |
- s.keyInfoMap = generateKeyInfoMap(s) |
|
| 123 |
-} |
|
| 124 |
- |
|
| 125 |
-// GetKeyInfo returns the corresponding gun and role key info for a keyID |
|
| 126 |
-func (s *KeyFileStore) GetKeyInfo(keyID string) (KeyInfo, error) {
|
|
| 127 |
- if info, ok := s.keyInfoMap[keyID]; ok {
|
|
| 128 |
- return info, nil |
|
| 129 |
- } |
|
| 130 |
- return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
|
| 131 |
-} |
|
| 132 |
- |
|
| 133 |
-// GetKeyInfo returns the corresponding gun and role key info for a keyID |
|
| 134 |
-func (s *KeyMemoryStore) GetKeyInfo(keyID string) (KeyInfo, error) {
|
|
| 135 |
- if info, ok := s.keyInfoMap[keyID]; ok {
|
|
| 136 |
- return info, nil |
|
| 137 |
- } |
|
| 138 |
- return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
|
| 139 |
-} |
|
| 140 |
- |
|
| 141 |
-// Name returns a user friendly name for the location this store |
|
| 142 |
-// keeps its data |
|
| 143 |
-func (s *KeyFileStore) Name() string {
|
|
| 144 |
- return fmt.Sprintf("file (%s)", s.SimpleFileStore.BaseDir())
|
|
| 145 |
-} |
|
| 146 |
- |
|
| 147 |
-// AddKey stores the contents of a PEM-encoded private key as a PEM block |
|
| 148 |
-func (s *KeyFileStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
|
|
| 149 |
- s.Lock() |
|
| 150 |
- defer s.Unlock() |
|
| 151 |
- if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
|
|
| 152 |
- keyInfo.Gun = "" |
|
| 153 |
- } |
|
| 154 |
- err := addKey(s, s.Retriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey) |
|
| 155 |
- if err != nil {
|
|
| 156 |
- return err |
|
| 157 |
- } |
|
| 158 |
- s.keyInfoMap[privKey.ID()] = keyInfo |
|
| 159 |
- return nil |
|
| 160 |
-} |
|
| 161 |
- |
|
| 162 |
-// GetKey returns the PrivateKey given a KeyID |
|
| 163 |
-func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) {
|
|
| 164 |
- s.Lock() |
|
| 165 |
- defer s.Unlock() |
|
| 166 |
- // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds |
|
| 167 |
- if keyInfo, ok := s.keyInfoMap[name]; ok {
|
|
| 168 |
- name = filepath.Join(keyInfo.Gun, name) |
|
| 169 |
- } |
|
| 170 |
- return getKey(s, s.Retriever, s.cachedKeys, name) |
|
| 171 |
-} |
|
| 172 |
- |
|
| 173 |
-// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap |
|
| 174 |
-func (s *KeyFileStore) ListKeys() map[string]KeyInfo {
|
|
| 175 |
- return copyKeyInfoMap(s.keyInfoMap) |
|
| 176 |
-} |
|
| 177 |
- |
|
| 178 |
-// RemoveKey removes the key from the keyfilestore |
|
| 179 |
-func (s *KeyFileStore) RemoveKey(keyID string) error {
|
|
| 180 |
- s.Lock() |
|
| 181 |
- defer s.Unlock() |
|
| 182 |
- // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds |
|
| 183 |
- if keyInfo, ok := s.keyInfoMap[keyID]; ok {
|
|
| 184 |
- keyID = filepath.Join(keyInfo.Gun, keyID) |
|
| 185 |
- } |
|
| 186 |
- err := removeKey(s, s.cachedKeys, keyID) |
|
| 187 |
- if err != nil {
|
|
| 188 |
- return err |
|
| 189 |
- } |
|
| 190 |
- // Remove this key from our keyInfo map if we removed from our filesystem |
|
| 191 |
- delete(s.keyInfoMap, filepath.Base(keyID)) |
|
| 192 |
- return nil |
|
| 193 |
-} |
|
| 194 |
- |
|
| 195 |
-// ExportKey exports the encrypted bytes from the keystore |
|
| 196 |
-func (s *KeyFileStore) ExportKey(keyID string) ([]byte, error) {
|
|
| 197 |
- if keyInfo, ok := s.keyInfoMap[keyID]; ok {
|
|
| 198 |
- keyID = filepath.Join(keyInfo.Gun, keyID) |
|
| 199 |
- } |
|
| 200 |
- keyBytes, _, err := getRawKey(s, keyID) |
|
| 201 |
- if err != nil {
|
|
| 202 |
- return nil, err |
|
| 203 |
- } |
|
| 204 |
- return keyBytes, nil |
|
| 205 |
-} |
|
| 206 |
- |
|
| 207 |
-// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory |
|
| 208 |
-func NewKeyMemoryStore(passphraseRetriever passphrase.Retriever) *KeyMemoryStore {
|
|
| 209 |
- memStore := NewMemoryFileStore() |
|
| 210 |
- cachedKeys := make(map[string]*cachedKey) |
|
| 211 |
- |
|
| 212 |
- keyInfoMap := make(keyInfoMap) |
|
| 213 |
- |
|
| 214 |
- keyStore := &KeyMemoryStore{MemoryFileStore: *memStore,
|
|
| 215 |
- Retriever: passphraseRetriever, |
|
| 216 |
- cachedKeys: cachedKeys, |
|
| 217 |
- keyInfoMap: keyInfoMap, |
|
| 218 |
- } |
|
| 219 |
- |
|
| 220 |
- // Load this keystore's ID --> gun/role map |
|
| 221 |
- keyStore.loadKeyInfo() |
|
| 222 |
- return keyStore |
|
| 223 |
-} |
|
| 224 |
- |
|
| 225 |
-// Name returns a user friendly name for the location this store |
|
| 226 |
-// keeps its data |
|
| 227 |
-func (s *KeyMemoryStore) Name() string {
|
|
| 228 |
- return "memory" |
|
| 229 |
-} |
|
| 230 |
- |
|
| 231 |
-// AddKey stores the contents of a PEM-encoded private key as a PEM block |
|
| 232 |
-func (s *KeyMemoryStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
|
|
| 233 |
- s.Lock() |
|
| 234 |
- defer s.Unlock() |
|
| 235 |
- if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
|
|
| 236 |
- keyInfo.Gun = "" |
|
| 237 |
- } |
|
| 238 |
- err := addKey(s, s.Retriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey) |
|
| 239 |
- if err != nil {
|
|
| 240 |
- return err |
|
| 241 |
- } |
|
| 242 |
- s.keyInfoMap[privKey.ID()] = keyInfo |
|
| 243 |
- return nil |
|
| 244 |
-} |
|
| 245 |
- |
|
| 246 |
-// GetKey returns the PrivateKey given a KeyID |
|
| 247 |
-func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) {
|
|
| 248 |
- s.Lock() |
|
| 249 |
- defer s.Unlock() |
|
| 250 |
- // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds |
|
| 251 |
- if keyInfo, ok := s.keyInfoMap[name]; ok {
|
|
| 252 |
- name = filepath.Join(keyInfo.Gun, name) |
|
| 253 |
- } |
|
| 254 |
- return getKey(s, s.Retriever, s.cachedKeys, name) |
|
| 255 |
-} |
|
| 256 |
- |
|
| 257 |
-// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap |
|
| 258 |
-func (s *KeyMemoryStore) ListKeys() map[string]KeyInfo {
|
|
| 259 |
- return copyKeyInfoMap(s.keyInfoMap) |
|
| 260 |
-} |
|
| 261 |
- |
|
| 262 |
-// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap |
|
| 263 |
-func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
|
|
| 264 |
- copyMap := make(map[string]KeyInfo) |
|
| 265 |
- for keyID, keyInfo := range keyInfoMap {
|
|
| 266 |
- copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
|
|
| 267 |
- } |
|
| 268 |
- return copyMap |
|
| 269 |
-} |
|
| 270 |
- |
|
| 271 |
-// RemoveKey removes the key from the keystore |
|
| 272 |
-func (s *KeyMemoryStore) RemoveKey(keyID string) error {
|
|
| 273 |
- s.Lock() |
|
| 274 |
- defer s.Unlock() |
|
| 275 |
- // If this is a bare key ID without the gun, prepend the gun so the filestore lookup succeeds |
|
| 276 |
- if keyInfo, ok := s.keyInfoMap[keyID]; ok {
|
|
| 277 |
- keyID = filepath.Join(keyInfo.Gun, keyID) |
|
| 278 |
- } |
|
| 279 |
- err := removeKey(s, s.cachedKeys, keyID) |
|
| 280 |
- if err != nil {
|
|
| 281 |
- return err |
|
| 282 |
- } |
|
| 283 |
- // Remove this key from our keyInfo map if we removed from our filesystem |
|
| 284 |
- delete(s.keyInfoMap, filepath.Base(keyID)) |
|
| 285 |
- return nil |
|
| 286 |
-} |
|
| 287 |
- |
|
| 288 |
-// ExportKey exports the encrypted bytes from the keystore |
|
| 289 |
-func (s *KeyMemoryStore) ExportKey(keyID string) ([]byte, error) {
|
|
| 290 |
- keyBytes, _, err := getRawKey(s, keyID) |
|
| 291 |
- if err != nil {
|
|
| 292 |
- return nil, err |
|
| 293 |
- } |
|
| 294 |
- return keyBytes, nil |
|
| 295 |
-} |
|
| 296 |
- |
|
| 297 |
-// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key |
|
| 298 |
-func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
|
|
| 299 |
- keyID, role, gun := inferKeyInfoFromKeyPath(filename) |
|
| 300 |
- if role == "" {
|
|
| 301 |
- block, _ := pem.Decode(pemBytes) |
|
| 302 |
- if block == nil {
|
|
| 303 |
- return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename)
|
|
| 304 |
- } |
|
| 305 |
- if keyRole, ok := block.Headers["role"]; ok {
|
|
| 306 |
- role = keyRole |
|
| 307 |
- } |
|
| 308 |
- } |
|
| 309 |
- return keyID, KeyInfo{Gun: gun, Role: role}, nil
|
|
| 310 |
-} |
|
| 311 |
- |
|
| 312 |
-func addKey(s Storage, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
|
|
| 313 |
- |
|
| 314 |
- var ( |
|
| 315 |
- chosenPassphrase string |
|
| 316 |
- giveup bool |
|
| 317 |
- err error |
|
| 318 |
- ) |
|
| 319 |
- |
|
| 320 |
- for attempts := 0; ; attempts++ {
|
|
| 321 |
- chosenPassphrase, giveup, err = passphraseRetriever(name, role, true, attempts) |
|
| 322 |
- if err != nil {
|
|
| 323 |
- continue |
|
| 324 |
- } |
|
| 325 |
- if giveup {
|
|
| 326 |
- return ErrAttemptsExceeded{}
|
|
| 327 |
- } |
|
| 328 |
- if attempts > 10 {
|
|
| 329 |
- return ErrAttemptsExceeded{}
|
|
| 330 |
- } |
|
| 331 |
- break |
|
| 332 |
- } |
|
| 333 |
- |
|
| 334 |
- return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, role, privKey) |
|
| 335 |
-} |
|
| 336 |
- |
|
| 337 |
-// getKeyRole finds the role for the given keyID. It attempts to look |
|
| 338 |
-// both in the newer format PEM headers, and also in the legacy filename |
|
| 339 |
-// format. It returns: the role, whether it was found in the legacy format |
|
| 340 |
-// (true == legacy), and an error |
|
| 341 |
-func getKeyRole(s Storage, keyID string) (string, bool, error) {
|
|
| 342 |
- name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) |
|
| 343 |
- |
|
| 344 |
- for _, file := range s.ListFiles() {
|
|
| 345 |
- filename := filepath.Base(file) |
|
| 346 |
- |
|
| 347 |
- if strings.HasPrefix(filename, name) {
|
|
| 348 |
- d, err := s.Get(file) |
|
| 349 |
- if err != nil {
|
|
| 350 |
- return "", false, err |
|
| 351 |
- } |
|
| 352 |
- block, _ := pem.Decode(d) |
|
| 353 |
- if block != nil {
|
|
| 354 |
- if role, ok := block.Headers["role"]; ok {
|
|
| 355 |
- return role, false, nil |
|
| 356 |
- } |
|
| 357 |
- } |
|
| 358 |
- |
|
| 359 |
- role := strings.TrimPrefix(filename, name+"_") |
|
| 360 |
- return role, true, nil |
|
| 361 |
- } |
|
| 362 |
- } |
|
| 363 |
- |
|
| 364 |
- return "", false, ErrKeyNotFound{KeyID: keyID}
|
|
| 365 |
-} |
|
| 366 |
- |
|
| 367 |
-// GetKey returns the PrivateKey given a KeyID |
|
| 368 |
-func getKey(s Storage, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name string) (data.PrivateKey, string, error) {
|
|
| 369 |
- cachedKeyEntry, ok := cachedKeys[name] |
|
| 370 |
- if ok {
|
|
| 371 |
- return cachedKeyEntry.key, cachedKeyEntry.alias, nil |
|
| 372 |
- } |
|
| 373 |
- |
|
| 374 |
- keyBytes, keyAlias, err := getRawKey(s, name) |
|
| 375 |
- if err != nil {
|
|
| 376 |
- return nil, "", err |
|
| 377 |
- } |
|
| 378 |
- |
|
| 379 |
- // See if the key is encrypted. If its encrypted we'll fail to parse the private key |
|
| 380 |
- privKey, err := ParsePEMPrivateKey(keyBytes, "") |
|
| 381 |
- if err != nil {
|
|
| 382 |
- privKey, _, err = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias)) |
|
| 383 |
- if err != nil {
|
|
| 384 |
- return nil, "", err |
|
| 385 |
- } |
|
| 386 |
- } |
|
| 387 |
- cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
|
|
| 388 |
- return privKey, keyAlias, nil |
|
| 389 |
-} |
|
| 390 |
- |
|
| 391 |
-// RemoveKey removes the key from the keyfilestore |
|
| 392 |
-func removeKey(s Storage, cachedKeys map[string]*cachedKey, name string) error {
|
|
| 393 |
- role, legacy, err := getKeyRole(s, name) |
|
| 394 |
- if err != nil {
|
|
| 395 |
- return err |
|
| 396 |
- } |
|
| 397 |
- |
|
| 398 |
- delete(cachedKeys, name) |
|
| 399 |
- |
|
| 400 |
- if legacy {
|
|
| 401 |
- name = name + "_" + role |
|
| 402 |
- } |
|
| 403 |
- |
|
| 404 |
- // being in a subdirectory is for backwards compatibliity |
|
| 405 |
- err = s.Remove(filepath.Join(getSubdir(role), name)) |
|
| 406 |
- if err != nil {
|
|
| 407 |
- return err |
|
| 408 |
- } |
|
| 409 |
- return nil |
|
| 410 |
-} |
|
| 411 |
- |
|
| 412 |
-// Assumes 2 subdirectories, 1 containing root keys and 1 containing tuf keys |
|
| 413 |
-func getSubdir(alias string) string {
|
|
| 414 |
- if alias == data.CanonicalRootRole {
|
|
| 415 |
- return notary.RootKeysSubdir |
|
| 416 |
- } |
|
| 417 |
- return notary.NonRootKeysSubdir |
|
| 418 |
-} |
|
| 419 |
- |
|
| 420 |
-// Given a key ID, gets the bytes and alias belonging to that key if the key |
|
| 421 |
-// exists |
|
| 422 |
-func getRawKey(s Storage, name string) ([]byte, string, error) {
|
|
| 423 |
- role, legacy, err := getKeyRole(s, name) |
|
| 424 |
- if err != nil {
|
|
| 425 |
- return nil, "", err |
|
| 426 |
- } |
|
| 427 |
- |
|
| 428 |
- if legacy {
|
|
| 429 |
- name = name + "_" + role |
|
| 430 |
- } |
|
| 431 |
- |
|
| 432 |
- var keyBytes []byte |
|
| 433 |
- keyBytes, err = s.Get(filepath.Join(getSubdir(role), name)) |
|
| 434 |
- if err != nil {
|
|
| 435 |
- return nil, "", err |
|
| 436 |
- } |
|
| 437 |
- return keyBytes, role, nil |
|
| 438 |
-} |
|
| 439 |
- |
|
| 440 |
-// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes. |
|
| 441 |
-// Returns the password and private key |
|
| 442 |
-func GetPasswdDecryptBytes(passphraseRetriever passphrase.Retriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) {
|
|
| 443 |
- var ( |
|
| 444 |
- passwd string |
|
| 445 |
- retErr error |
|
| 446 |
- privKey data.PrivateKey |
|
| 447 |
- ) |
|
| 448 |
- for attempts := 0; ; attempts++ {
|
|
| 449 |
- var ( |
|
| 450 |
- giveup bool |
|
| 451 |
- err error |
|
| 452 |
- ) |
|
| 453 |
- passwd, giveup, err = passphraseRetriever(name, alias, false, attempts) |
|
| 454 |
- // Check if the passphrase retriever got an error or if it is telling us to give up |
|
| 455 |
- if giveup || err != nil {
|
|
| 456 |
- return nil, "", ErrPasswordInvalid{}
|
|
| 457 |
- } |
|
| 458 |
- if attempts > 10 {
|
|
| 459 |
- return nil, "", ErrAttemptsExceeded{}
|
|
| 460 |
- } |
|
| 461 |
- |
|
| 462 |
- // Try to convert PEM encoded bytes back to a PrivateKey using the passphrase |
|
| 463 |
- privKey, err = ParsePEMPrivateKey(pemBytes, passwd) |
|
| 464 |
- if err != nil {
|
|
| 465 |
- retErr = ErrPasswordInvalid{}
|
|
| 466 |
- } else {
|
|
| 467 |
- // We managed to parse the PrivateKey. We've succeeded! |
|
| 468 |
- retErr = nil |
|
| 469 |
- break |
|
| 470 |
- } |
|
| 471 |
- } |
|
| 472 |
- if retErr != nil {
|
|
| 473 |
- return nil, "", retErr |
|
| 474 |
- } |
|
| 475 |
- return privKey, passwd, nil |
|
| 476 |
-} |
|
| 477 |
- |
|
| 478 |
-func encryptAndAddKey(s Storage, passwd string, cachedKeys map[string]*cachedKey, name, role string, privKey data.PrivateKey) error {
|
|
| 479 |
- |
|
| 480 |
- var ( |
|
| 481 |
- pemPrivKey []byte |
|
| 482 |
- err error |
|
| 483 |
- ) |
|
| 484 |
- |
|
| 485 |
- if passwd != "" {
|
|
| 486 |
- pemPrivKey, err = EncryptPrivateKey(privKey, role, passwd) |
|
| 487 |
- } else {
|
|
| 488 |
- pemPrivKey, err = KeyToPEM(privKey, role) |
|
| 489 |
- } |
|
| 490 |
- |
|
| 491 |
- if err != nil {
|
|
| 492 |
- return err |
|
| 493 |
- } |
|
| 494 |
- |
|
| 495 |
- cachedKeys[name] = &cachedKey{alias: role, key: privKey}
|
|
| 496 |
- return s.Add(filepath.Join(getSubdir(role), name), pemPrivKey) |
|
| 497 |
-} |
| ... | ... |
@@ -1,59 +1,325 @@ |
| 1 | 1 |
package trustmanager |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "encoding/pem" |
|
| 4 | 5 |
"fmt" |
| 6 |
+ "path/filepath" |
|
| 7 |
+ "strings" |
|
| 8 |
+ "sync" |
|
| 5 | 9 |
|
| 10 |
+ "github.com/Sirupsen/logrus" |
|
| 11 |
+ "github.com/docker/notary" |
|
| 12 |
+ store "github.com/docker/notary/storage" |
|
| 6 | 13 |
"github.com/docker/notary/tuf/data" |
| 14 |
+ "github.com/docker/notary/tuf/utils" |
|
| 7 | 15 |
) |
| 8 | 16 |
|
| 9 |
-// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
| 10 |
-type ErrAttemptsExceeded struct{}
|
|
| 17 |
+type keyInfoMap map[string]KeyInfo |
|
| 11 | 18 |
|
| 12 |
-// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key |
|
| 13 |
-func (err ErrAttemptsExceeded) Error() string {
|
|
| 14 |
- return "maximum number of passphrase attempts exceeded" |
|
| 19 |
+// KeyInfo stores the role, path, and gun for a corresponding private key ID |
|
| 20 |
+// It is assumed that each private key ID is unique |
|
| 21 |
+type KeyInfo struct {
|
|
| 22 |
+ Gun string |
|
| 23 |
+ Role string |
|
| 15 | 24 |
} |
| 16 | 25 |
|
| 17 |
-// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
| 18 |
-// key file was corrupted, but we have no way to distinguish. |
|
| 19 |
-type ErrPasswordInvalid struct{}
|
|
| 26 |
+// GenericKeyStore is a wrapper for Storage instances that provides |
|
| 27 |
+// translation between the []byte form and Public/PrivateKey objects |
|
| 28 |
+type GenericKeyStore struct {
|
|
| 29 |
+ store Storage |
|
| 30 |
+ sync.Mutex |
|
| 31 |
+ notary.PassRetriever |
|
| 32 |
+ cachedKeys map[string]*cachedKey |
|
| 33 |
+ keyInfoMap |
|
| 34 |
+} |
|
| 20 | 35 |
|
| 21 |
-// ErrPasswordInvalid is returned when signing fails. It could also mean the signing |
|
| 22 |
-// key file was corrupted, but we have no way to distinguish. |
|
| 23 |
-func (err ErrPasswordInvalid) Error() string {
|
|
| 24 |
- return "password invalid, operation has failed." |
|
| 36 |
+// NewKeyFileStore returns a new KeyFileStore creating a private directory to |
|
| 37 |
+// hold the keys. |
|
| 38 |
+func NewKeyFileStore(baseDir string, p notary.PassRetriever) (*GenericKeyStore, error) {
|
|
| 39 |
+ fileStore, err := store.NewPrivateKeyFileStorage(baseDir, notary.KeyExtension) |
|
| 40 |
+ if err != nil {
|
|
| 41 |
+ return nil, err |
|
| 42 |
+ } |
|
| 43 |
+ return NewGenericKeyStore(fileStore, p), nil |
|
| 25 | 44 |
} |
| 26 | 45 |
|
| 27 |
-// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
| 28 |
-type ErrKeyNotFound struct {
|
|
| 29 |
- KeyID string |
|
| 46 |
+// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory |
|
| 47 |
+func NewKeyMemoryStore(p notary.PassRetriever) *GenericKeyStore {
|
|
| 48 |
+ memStore := store.NewMemoryStore(nil) |
|
| 49 |
+ return NewGenericKeyStore(memStore, p) |
|
| 30 | 50 |
} |
| 31 | 51 |
|
| 32 |
-// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. |
|
| 33 |
-func (err ErrKeyNotFound) Error() string {
|
|
| 34 |
- return fmt.Sprintf("signing key not found: %s", err.KeyID)
|
|
| 52 |
+// NewGenericKeyStore creates a GenericKeyStore wrapping the provided |
|
| 53 |
+// Storage instance, using the PassRetriever to enc/decrypt keys |
|
| 54 |
+func NewGenericKeyStore(s Storage, p notary.PassRetriever) *GenericKeyStore {
|
|
| 55 |
+ ks := GenericKeyStore{
|
|
| 56 |
+ store: s, |
|
| 57 |
+ PassRetriever: p, |
|
| 58 |
+ cachedKeys: make(map[string]*cachedKey), |
|
| 59 |
+ keyInfoMap: make(keyInfoMap), |
|
| 60 |
+ } |
|
| 61 |
+ ks.loadKeyInfo() |
|
| 62 |
+ return &ks |
|
| 35 | 63 |
} |
| 36 | 64 |
|
| 37 |
-const ( |
|
| 38 |
- keyExtension = "key" |
|
| 39 |
-) |
|
| 65 |
+func generateKeyInfoMap(s Storage) map[string]KeyInfo {
|
|
| 66 |
+ keyInfoMap := make(map[string]KeyInfo) |
|
| 67 |
+ for _, keyPath := range s.ListFiles() {
|
|
| 68 |
+ d, err := s.Get(keyPath) |
|
| 69 |
+ if err != nil {
|
|
| 70 |
+ logrus.Error(err) |
|
| 71 |
+ continue |
|
| 72 |
+ } |
|
| 73 |
+ keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath) |
|
| 74 |
+ if err != nil {
|
|
| 75 |
+ logrus.Error(err) |
|
| 76 |
+ continue |
|
| 77 |
+ } |
|
| 78 |
+ keyInfoMap[keyID] = keyInfo |
|
| 79 |
+ } |
|
| 80 |
+ return keyInfoMap |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+// Attempts to infer the keyID, role, and GUN from the specified key path. |
|
| 84 |
+// Note that non-root roles can only be inferred if this is a legacy style filename: KEYID_ROLE.key |
|
| 85 |
+func inferKeyInfoFromKeyPath(keyPath string) (string, string, string) {
|
|
| 86 |
+ var keyID, role, gun string |
|
| 87 |
+ keyID = filepath.Base(keyPath) |
|
| 88 |
+ underscoreIndex := strings.LastIndex(keyID, "_") |
|
| 89 |
+ |
|
| 90 |
+ // This is the legacy KEYID_ROLE filename |
|
| 91 |
+ // The keyID is the first part of the keyname |
|
| 92 |
+ // The keyRole is the second part of the keyname |
|
| 93 |
+ // in a key named abcde_root, abcde is the keyID and root is the KeyAlias |
|
| 94 |
+ if underscoreIndex != -1 {
|
|
| 95 |
+ role = keyID[underscoreIndex+1:] |
|
| 96 |
+ keyID = keyID[:underscoreIndex] |
|
| 97 |
+ } |
|
| 98 |
+ |
|
| 99 |
+ if filepath.HasPrefix(keyPath, notary.RootKeysSubdir+"/") {
|
|
| 100 |
+ return keyID, data.CanonicalRootRole, "" |
|
| 101 |
+ } |
|
| 102 |
+ |
|
| 103 |
+ keyPath = strings.TrimPrefix(keyPath, notary.NonRootKeysSubdir+"/") |
|
| 104 |
+ gun = getGunFromFullID(keyPath) |
|
| 105 |
+ return keyID, role, gun |
|
| 106 |
+} |
|
| 107 |
+ |
|
| 108 |
+func getGunFromFullID(fullKeyID string) string {
|
|
| 109 |
+ keyGun := filepath.Dir(fullKeyID) |
|
| 110 |
+ // If the gun is empty, Dir will return . |
|
| 111 |
+ if keyGun == "." {
|
|
| 112 |
+ keyGun = "" |
|
| 113 |
+ } |
|
| 114 |
+ return keyGun |
|
| 115 |
+} |
|
| 116 |
+ |
|
| 117 |
+func (s *GenericKeyStore) loadKeyInfo() {
|
|
| 118 |
+ s.keyInfoMap = generateKeyInfoMap(s.store) |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+// GetKeyInfo returns the corresponding gun and role key info for a keyID |
|
| 122 |
+func (s *GenericKeyStore) GetKeyInfo(keyID string) (KeyInfo, error) {
|
|
| 123 |
+ if info, ok := s.keyInfoMap[keyID]; ok {
|
|
| 124 |
+ return info, nil |
|
| 125 |
+ } |
|
| 126 |
+ return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
|
| 127 |
+} |
|
| 128 |
+ |
|
| 129 |
+// AddKey stores the contents of a PEM-encoded private key as a PEM block |
|
| 130 |
+func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
|
|
| 131 |
+ var ( |
|
| 132 |
+ chosenPassphrase string |
|
| 133 |
+ giveup bool |
|
| 134 |
+ err error |
|
| 135 |
+ pemPrivKey []byte |
|
| 136 |
+ ) |
|
| 137 |
+ s.Lock() |
|
| 138 |
+ defer s.Unlock() |
|
| 139 |
+ if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
|
|
| 140 |
+ keyInfo.Gun = "" |
|
| 141 |
+ } |
|
| 142 |
+ name := filepath.Join(keyInfo.Gun, privKey.ID()) |
|
| 143 |
+ for attempts := 0; ; attempts++ {
|
|
| 144 |
+ chosenPassphrase, giveup, err = s.PassRetriever(name, keyInfo.Role, true, attempts) |
|
| 145 |
+ if err == nil {
|
|
| 146 |
+ break |
|
| 147 |
+ } |
|
| 148 |
+ if giveup || attempts > 10 {
|
|
| 149 |
+ return ErrAttemptsExceeded{}
|
|
| 150 |
+ } |
|
| 151 |
+ } |
|
| 152 |
+ |
|
| 153 |
+ if chosenPassphrase != "" {
|
|
| 154 |
+ pemPrivKey, err = utils.EncryptPrivateKey(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase) |
|
| 155 |
+ } else {
|
|
| 156 |
+ pemPrivKey, err = utils.KeyToPEM(privKey, keyInfo.Role) |
|
| 157 |
+ } |
|
| 158 |
+ |
|
| 159 |
+ if err != nil {
|
|
| 160 |
+ return err |
|
| 161 |
+ } |
|
| 162 |
+ |
|
| 163 |
+ s.cachedKeys[name] = &cachedKey{alias: keyInfo.Role, key: privKey}
|
|
| 164 |
+ err = s.store.Set(filepath.Join(getSubdir(keyInfo.Role), name), pemPrivKey) |
|
| 165 |
+ if err != nil {
|
|
| 166 |
+ return err |
|
| 167 |
+ } |
|
| 168 |
+ s.keyInfoMap[privKey.ID()] = keyInfo |
|
| 169 |
+ return nil |
|
| 170 |
+} |
|
| 171 |
+ |
|
| 172 |
+// GetKey returns the PrivateKey given a KeyID |
|
| 173 |
+func (s *GenericKeyStore) GetKey(name string) (data.PrivateKey, string, error) {
|
|
| 174 |
+ s.Lock() |
|
| 175 |
+ defer s.Unlock() |
|
| 176 |
+ cachedKeyEntry, ok := s.cachedKeys[name] |
|
| 177 |
+ if ok {
|
|
| 178 |
+ return cachedKeyEntry.key, cachedKeyEntry.alias, nil |
|
| 179 |
+ } |
|
| 180 |
+ |
|
| 181 |
+ keyBytes, _, keyAlias, err := getKey(s.store, name) |
|
| 182 |
+ if err != nil {
|
|
| 183 |
+ return nil, "", err |
|
| 184 |
+ } |
|
| 185 |
+ |
|
| 186 |
+ // See if the key is encrypted. If its encrypted we'll fail to parse the private key |
|
| 187 |
+ privKey, err := utils.ParsePEMPrivateKey(keyBytes, "") |
|
| 188 |
+ if err != nil {
|
|
| 189 |
+ privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, name, string(keyAlias)) |
|
| 190 |
+ if err != nil {
|
|
| 191 |
+ return nil, "", err |
|
| 192 |
+ } |
|
| 193 |
+ } |
|
| 194 |
+ s.cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey}
|
|
| 195 |
+ return privKey, keyAlias, nil |
|
| 196 |
+} |
|
| 197 |
+ |
|
| 198 |
+// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap |
|
| 199 |
+func (s *GenericKeyStore) ListKeys() map[string]KeyInfo {
|
|
| 200 |
+ return copyKeyInfoMap(s.keyInfoMap) |
|
| 201 |
+} |
|
| 202 |
+ |
|
| 203 |
+// RemoveKey removes the key from the keyfilestore |
|
| 204 |
+func (s *GenericKeyStore) RemoveKey(keyID string) error {
|
|
| 205 |
+ s.Lock() |
|
| 206 |
+ defer s.Unlock() |
|
| 207 |
+ |
|
| 208 |
+ _, filename, _, err := getKey(s.store, keyID) |
|
| 209 |
+ switch err.(type) {
|
|
| 210 |
+ case ErrKeyNotFound, nil: |
|
| 211 |
+ break |
|
| 212 |
+ default: |
|
| 213 |
+ return err |
|
| 214 |
+ } |
|
| 215 |
+ |
|
| 216 |
+ delete(s.cachedKeys, keyID) |
|
| 217 |
+ |
|
| 218 |
+ err = s.store.Remove(filename) // removing a file that doesn't exist doesn't fail |
|
| 219 |
+ if err != nil {
|
|
| 220 |
+ return err |
|
| 221 |
+ } |
|
| 222 |
+ |
|
| 223 |
+ // Remove this key from our keyInfo map if we removed from our filesystem |
|
| 224 |
+ delete(s.keyInfoMap, filepath.Base(keyID)) |
|
| 225 |
+ return nil |
|
| 226 |
+} |
|
| 227 |
+ |
|
| 228 |
+// Name returns a user friendly name for the location this store |
|
| 229 |
+// keeps its data |
|
| 230 |
+func (s *GenericKeyStore) Name() string {
|
|
| 231 |
+ return s.store.Location() |
|
| 232 |
+} |
|
| 233 |
+ |
|
| 234 |
+// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap |
|
| 235 |
+func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
|
|
| 236 |
+ copyMap := make(map[string]KeyInfo) |
|
| 237 |
+ for keyID, keyInfo := range keyInfoMap {
|
|
| 238 |
+ copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
|
|
| 239 |
+ } |
|
| 240 |
+ return copyMap |
|
| 241 |
+} |
|
| 242 |
+ |
|
| 243 |
+// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key |
|
| 244 |
+func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
|
|
| 245 |
+ keyID, role, gun := inferKeyInfoFromKeyPath(filename) |
|
| 246 |
+ if role == "" {
|
|
| 247 |
+ block, _ := pem.Decode(pemBytes) |
|
| 248 |
+ if block == nil {
|
|
| 249 |
+ return "", KeyInfo{}, fmt.Errorf("could not decode PEM block for key %s", filename)
|
|
| 250 |
+ } |
|
| 251 |
+ if keyRole, ok := block.Headers["role"]; ok {
|
|
| 252 |
+ role = keyRole |
|
| 253 |
+ } |
|
| 254 |
+ } |
|
| 255 |
+ return keyID, KeyInfo{Gun: gun, Role: role}, nil
|
|
| 256 |
+} |
|
| 257 |
+ |
|
| 258 |
+// getKey finds the key and role for the given keyID. It attempts to |
|
| 259 |
+// look both in the newer format PEM headers, and also in the legacy filename |
|
| 260 |
+// format. It returns: the key bytes, the filename it was found under, the role, |
|
| 261 |
+// and an error |
|
| 262 |
+func getKey(s Storage, keyID string) ([]byte, string, string, error) {
|
|
| 263 |
+ name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) |
|
| 264 |
+ |
|
| 265 |
+ for _, file := range s.ListFiles() {
|
|
| 266 |
+ filename := filepath.Base(file) |
|
| 267 |
+ |
|
| 268 |
+ if strings.HasPrefix(filename, name) {
|
|
| 269 |
+ d, err := s.Get(file) |
|
| 270 |
+ if err != nil {
|
|
| 271 |
+ return nil, "", "", err |
|
| 272 |
+ } |
|
| 273 |
+ block, _ := pem.Decode(d) |
|
| 274 |
+ if block != nil {
|
|
| 275 |
+ if role, ok := block.Headers["role"]; ok {
|
|
| 276 |
+ return d, file, role, nil |
|
| 277 |
+ } |
|
| 278 |
+ } |
|
| 279 |
+ |
|
| 280 |
+ role := strings.TrimPrefix(filename, name+"_") |
|
| 281 |
+ return d, file, role, nil |
|
| 282 |
+ } |
|
| 283 |
+ } |
|
| 284 |
+ |
|
| 285 |
+ return nil, "", "", ErrKeyNotFound{KeyID: keyID}
|
|
| 286 |
+} |
|
| 287 |
+ |
|
| 288 |
+// Assumes 2 subdirectories, 1 containing root keys and 1 containing TUF keys |
|
| 289 |
+func getSubdir(alias string) string {
|
|
| 290 |
+ if alias == data.CanonicalRootRole {
|
|
| 291 |
+ return notary.RootKeysSubdir |
|
| 292 |
+ } |
|
| 293 |
+ return notary.NonRootKeysSubdir |
|
| 294 |
+} |
|
| 295 |
+ |
|
| 296 |
+// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes. |
|
| 297 |
+// Returns the password and private key |
|
| 298 |
+func GetPasswdDecryptBytes(passphraseRetriever notary.PassRetriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) {
|
|
| 299 |
+ var ( |
|
| 300 |
+ passwd string |
|
| 301 |
+ privKey data.PrivateKey |
|
| 302 |
+ ) |
|
| 303 |
+ for attempts := 0; ; attempts++ {
|
|
| 304 |
+ var ( |
|
| 305 |
+ giveup bool |
|
| 306 |
+ err error |
|
| 307 |
+ ) |
|
| 308 |
+ if attempts > 10 {
|
|
| 309 |
+ return nil, "", ErrAttemptsExceeded{}
|
|
| 310 |
+ } |
|
| 311 |
+ passwd, giveup, err = passphraseRetriever(name, alias, false, attempts) |
|
| 312 |
+ // Check if the passphrase retriever got an error or if it is telling us to give up |
|
| 313 |
+ if giveup || err != nil {
|
|
| 314 |
+ return nil, "", ErrPasswordInvalid{}
|
|
| 315 |
+ } |
|
| 40 | 316 |
|
| 41 |
-// KeyStore is a generic interface for private key storage |
|
| 42 |
-type KeyStore interface {
|
|
| 43 |
- // AddKey adds a key to the KeyStore, and if the key already exists, |
|
| 44 |
- // succeeds. Otherwise, returns an error if it cannot add. |
|
| 45 |
- AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error |
|
| 46 |
- // Should fail with ErrKeyNotFound if the keystore is operating normally |
|
| 47 |
- // and knows that it does not store the requested key. |
|
| 48 |
- GetKey(keyID string) (data.PrivateKey, string, error) |
|
| 49 |
- GetKeyInfo(keyID string) (KeyInfo, error) |
|
| 50 |
- ListKeys() map[string]KeyInfo |
|
| 51 |
- RemoveKey(keyID string) error |
|
| 52 |
- ExportKey(keyID string) ([]byte, error) |
|
| 53 |
- Name() string |
|
| 54 |
-} |
|
| 55 |
- |
|
| 56 |
-type cachedKey struct {
|
|
| 57 |
- alias string |
|
| 58 |
- key data.PrivateKey |
|
| 317 |
+ // Try to convert PEM encoded bytes back to a PrivateKey using the passphrase |
|
| 318 |
+ privKey, err = utils.ParsePEMPrivateKey(pemBytes, passwd) |
|
| 319 |
+ if err == nil {
|
|
| 320 |
+ // We managed to parse the PrivateKey. We've succeeded! |
|
| 321 |
+ break |
|
| 322 |
+ } |
|
| 323 |
+ } |
|
| 324 |
+ return privKey, passwd, nil |
|
| 59 | 325 |
} |
| 60 | 326 |
deleted file mode 100644 |
| ... | ... |
@@ -1,67 +0,0 @@ |
| 1 |
-package trustmanager |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "os" |
|
| 5 |
- "sync" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-// MemoryFileStore is an implementation of Storage that keeps |
|
| 9 |
-// the contents in memory. |
|
| 10 |
-type MemoryFileStore struct {
|
|
| 11 |
- sync.Mutex |
|
| 12 |
- |
|
| 13 |
- files map[string][]byte |
|
| 14 |
-} |
|
| 15 |
- |
|
| 16 |
-// NewMemoryFileStore creates a MemoryFileStore |
|
| 17 |
-func NewMemoryFileStore() *MemoryFileStore {
|
|
| 18 |
- return &MemoryFileStore{
|
|
| 19 |
- files: make(map[string][]byte), |
|
| 20 |
- } |
|
| 21 |
-} |
|
| 22 |
- |
|
| 23 |
-// Add writes data to a file with a given name |
|
| 24 |
-func (f *MemoryFileStore) Add(name string, data []byte) error {
|
|
| 25 |
- f.Lock() |
|
| 26 |
- defer f.Unlock() |
|
| 27 |
- |
|
| 28 |
- f.files[name] = data |
|
| 29 |
- return nil |
|
| 30 |
-} |
|
| 31 |
- |
|
| 32 |
-// Remove removes a file identified by name |
|
| 33 |
-func (f *MemoryFileStore) Remove(name string) error {
|
|
| 34 |
- f.Lock() |
|
| 35 |
- defer f.Unlock() |
|
| 36 |
- |
|
| 37 |
- if _, present := f.files[name]; !present {
|
|
| 38 |
- return os.ErrNotExist |
|
| 39 |
- } |
|
| 40 |
- delete(f.files, name) |
|
| 41 |
- |
|
| 42 |
- return nil |
|
| 43 |
-} |
|
| 44 |
- |
|
| 45 |
-// Get returns the data given a file name |
|
| 46 |
-func (f *MemoryFileStore) Get(name string) ([]byte, error) {
|
|
| 47 |
- f.Lock() |
|
| 48 |
- defer f.Unlock() |
|
| 49 |
- |
|
| 50 |
- fileData, present := f.files[name] |
|
| 51 |
- if !present {
|
|
| 52 |
- return nil, os.ErrNotExist |
|
| 53 |
- } |
|
| 54 |
- |
|
| 55 |
- return fileData, nil |
|
| 56 |
-} |
|
| 57 |
- |
|
| 58 |
-// ListFiles lists all the files inside of a store |
|
| 59 |
-func (f *MemoryFileStore) ListFiles() []string {
|
|
| 60 |
- var list []string |
|
| 61 |
- |
|
| 62 |
- for name := range f.files {
|
|
| 63 |
- list = append(list, name) |
|
| 64 |
- } |
|
| 65 |
- |
|
| 66 |
- return list |
|
| 67 |
-} |
| 68 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,42 +0,0 @@ |
| 1 |
-package trustmanager |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "errors" |
|
| 5 |
- |
|
| 6 |
- "github.com/docker/notary" |
|
| 7 |
-) |
|
| 8 |
- |
|
| 9 |
-const ( |
|
| 10 |
- visible = notary.PubCertPerms |
|
| 11 |
- private = notary.PrivKeyPerms |
|
| 12 |
-) |
|
| 13 |
- |
|
| 14 |
-var ( |
|
| 15 |
- // ErrPathOutsideStore indicates that the returned path would be |
|
| 16 |
- // outside the store |
|
| 17 |
- ErrPathOutsideStore = errors.New("path outside file store")
|
|
| 18 |
-) |
|
| 19 |
- |
|
| 20 |
-// Storage implements the bare bones primitives (no hierarchy) |
|
| 21 |
-type Storage interface {
|
|
| 22 |
- // Add writes a file to the specified location, returning an error if this |
|
| 23 |
- // is not possible (reasons may include permissions errors). The path is cleaned |
|
| 24 |
- // before being made absolute against the store's base dir. |
|
| 25 |
- Add(fileName string, data []byte) error |
|
| 26 |
- |
|
| 27 |
- // Remove deletes a file from the store relative to the store's base directory. |
|
| 28 |
- // The path is cleaned before being made absolute to ensure no path traversal |
|
| 29 |
- // outside the base directory is possible. |
|
| 30 |
- Remove(fileName string) error |
|
| 31 |
- |
|
| 32 |
- // Get returns the file content found at fileName relative to the base directory |
|
| 33 |
- // of the file store. The path is cleaned before being made absolute to ensure |
|
| 34 |
- // path traversal outside the store is not possible. If the file is not found |
|
| 35 |
- // an error to that effect is returned. |
|
| 36 |
- Get(fileName string) ([]byte, error) |
|
| 37 |
- |
|
| 38 |
- // ListFiles returns a list of paths relative to the base directory of the |
|
| 39 |
- // filestore. Any of these paths must be retrievable via the |
|
| 40 |
- // Storage.Get method. |
|
| 41 |
- ListFiles() []string |
|
| 42 |
-} |
| 43 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,524 +0,0 @@ |
| 1 |
-package trustmanager |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "crypto/ecdsa" |
|
| 6 |
- "crypto/elliptic" |
|
| 7 |
- "crypto/rand" |
|
| 8 |
- "crypto/rsa" |
|
| 9 |
- "crypto/x509" |
|
| 10 |
- "crypto/x509/pkix" |
|
| 11 |
- "encoding/pem" |
|
| 12 |
- "errors" |
|
| 13 |
- "fmt" |
|
| 14 |
- "io" |
|
| 15 |
- "io/ioutil" |
|
| 16 |
- "math/big" |
|
| 17 |
- "time" |
|
| 18 |
- |
|
| 19 |
- "github.com/Sirupsen/logrus" |
|
| 20 |
- "github.com/agl/ed25519" |
|
| 21 |
- "github.com/docker/notary" |
|
| 22 |
- "github.com/docker/notary/tuf/data" |
|
| 23 |
-) |
|
| 24 |
- |
|
| 25 |
-// CertToPEM is a utility function returns a PEM encoded x509 Certificate |
|
| 26 |
-func CertToPEM(cert *x509.Certificate) []byte {
|
|
| 27 |
- pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
|
| 28 |
- |
|
| 29 |
- return pemCert |
|
| 30 |
-} |
|
| 31 |
- |
|
| 32 |
-// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed |
|
| 33 |
-func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) {
|
|
| 34 |
- var pemBytes bytes.Buffer |
|
| 35 |
- for _, cert := range certChain {
|
|
| 36 |
- if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
|
|
| 37 |
- return nil, err |
|
| 38 |
- } |
|
| 39 |
- } |
|
| 40 |
- return pemBytes.Bytes(), nil |
|
| 41 |
-} |
|
| 42 |
- |
|
| 43 |
-// LoadCertFromPEM returns the first certificate found in a bunch of bytes or error |
|
| 44 |
-// if nothing is found. Taken from https://golang.org/src/crypto/x509/cert_pool.go#L85. |
|
| 45 |
-func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
|
|
| 46 |
- for len(pemBytes) > 0 {
|
|
| 47 |
- var block *pem.Block |
|
| 48 |
- block, pemBytes = pem.Decode(pemBytes) |
|
| 49 |
- if block == nil {
|
|
| 50 |
- return nil, errors.New("no certificates found in PEM data")
|
|
| 51 |
- } |
|
| 52 |
- if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
|
| 53 |
- continue |
|
| 54 |
- } |
|
| 55 |
- |
|
| 56 |
- cert, err := x509.ParseCertificate(block.Bytes) |
|
| 57 |
- if err != nil {
|
|
| 58 |
- continue |
|
| 59 |
- } |
|
| 60 |
- |
|
| 61 |
- return cert, nil |
|
| 62 |
- } |
|
| 63 |
- |
|
| 64 |
- return nil, errors.New("no certificates found in PEM data")
|
|
| 65 |
-} |
|
| 66 |
- |
|
| 67 |
-// LoadCertFromFile loads the first certificate from the file provided. The |
|
| 68 |
-// data is expected to be PEM Encoded and contain one of more certificates |
|
| 69 |
-// with PEM type "CERTIFICATE" |
|
| 70 |
-func LoadCertFromFile(filename string) (*x509.Certificate, error) {
|
|
| 71 |
- certs, err := LoadCertBundleFromFile(filename) |
|
| 72 |
- if err != nil {
|
|
| 73 |
- return nil, err |
|
| 74 |
- } |
|
| 75 |
- return certs[0], nil |
|
| 76 |
-} |
|
| 77 |
- |
|
| 78 |
-// LoadCertBundleFromFile loads certificates from the []byte provided. The |
|
| 79 |
-// data is expected to be PEM Encoded and contain one of more certificates |
|
| 80 |
-// with PEM type "CERTIFICATE" |
|
| 81 |
-func LoadCertBundleFromFile(filename string) ([]*x509.Certificate, error) {
|
|
| 82 |
- b, err := ioutil.ReadFile(filename) |
|
| 83 |
- if err != nil {
|
|
| 84 |
- return nil, err |
|
| 85 |
- } |
|
| 86 |
- |
|
| 87 |
- return LoadCertBundleFromPEM(b) |
|
| 88 |
-} |
|
| 89 |
- |
|
| 90 |
-// LoadCertBundleFromPEM loads certificates from the []byte provided. The |
|
| 91 |
-// data is expected to be PEM Encoded and contain one of more certificates |
|
| 92 |
-// with PEM type "CERTIFICATE" |
|
| 93 |
-func LoadCertBundleFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
|
|
| 94 |
- certificates := []*x509.Certificate{}
|
|
| 95 |
- var block *pem.Block |
|
| 96 |
- block, pemBytes = pem.Decode(pemBytes) |
|
| 97 |
- for ; block != nil; block, pemBytes = pem.Decode(pemBytes) {
|
|
| 98 |
- if block.Type == "CERTIFICATE" {
|
|
| 99 |
- cert, err := x509.ParseCertificate(block.Bytes) |
|
| 100 |
- if err != nil {
|
|
| 101 |
- return nil, err |
|
| 102 |
- } |
|
| 103 |
- certificates = append(certificates, cert) |
|
| 104 |
- } else {
|
|
| 105 |
- return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
|
|
| 106 |
- } |
|
| 107 |
- } |
|
| 108 |
- |
|
| 109 |
- if len(certificates) == 0 {
|
|
| 110 |
- return nil, fmt.Errorf("no valid certificates found")
|
|
| 111 |
- } |
|
| 112 |
- |
|
| 113 |
- return certificates, nil |
|
| 114 |
-} |
|
| 115 |
- |
|
| 116 |
-// GetLeafCerts parses a list of x509 Certificates and returns all of them |
|
| 117 |
-// that aren't CA |
|
| 118 |
-func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate {
|
|
| 119 |
- var leafCerts []*x509.Certificate |
|
| 120 |
- for _, cert := range certs {
|
|
| 121 |
- if cert.IsCA {
|
|
| 122 |
- continue |
|
| 123 |
- } |
|
| 124 |
- leafCerts = append(leafCerts, cert) |
|
| 125 |
- } |
|
| 126 |
- return leafCerts |
|
| 127 |
-} |
|
| 128 |
- |
|
| 129 |
-// GetIntermediateCerts parses a list of x509 Certificates and returns all of the |
|
| 130 |
-// ones marked as a CA, to be used as intermediates |
|
| 131 |
-func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate {
|
|
| 132 |
- var intCerts []*x509.Certificate |
|
| 133 |
- for _, cert := range certs {
|
|
| 134 |
- if cert.IsCA {
|
|
| 135 |
- intCerts = append(intCerts, cert) |
|
| 136 |
- } |
|
| 137 |
- } |
|
| 138 |
- return intCerts |
|
| 139 |
-} |
|
| 140 |
- |
|
| 141 |
-// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It |
|
| 142 |
-// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted. |
|
| 143 |
-func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
|
|
| 144 |
- block, _ := pem.Decode(pemBytes) |
|
| 145 |
- if block == nil {
|
|
| 146 |
- return nil, errors.New("no valid private key found")
|
|
| 147 |
- } |
|
| 148 |
- |
|
| 149 |
- var privKeyBytes []byte |
|
| 150 |
- var err error |
|
| 151 |
- if x509.IsEncryptedPEMBlock(block) {
|
|
| 152 |
- privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) |
|
| 153 |
- if err != nil {
|
|
| 154 |
- return nil, errors.New("could not decrypt private key")
|
|
| 155 |
- } |
|
| 156 |
- } else {
|
|
| 157 |
- privKeyBytes = block.Bytes |
|
| 158 |
- } |
|
| 159 |
- |
|
| 160 |
- switch block.Type {
|
|
| 161 |
- case "RSA PRIVATE KEY": |
|
| 162 |
- rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes) |
|
| 163 |
- if err != nil {
|
|
| 164 |
- return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
|
|
| 165 |
- } |
|
| 166 |
- |
|
| 167 |
- tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey) |
|
| 168 |
- if err != nil {
|
|
| 169 |
- return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
|
|
| 170 |
- } |
|
| 171 |
- |
|
| 172 |
- return tufRSAPrivateKey, nil |
|
| 173 |
- case "EC PRIVATE KEY": |
|
| 174 |
- ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes) |
|
| 175 |
- if err != nil {
|
|
| 176 |
- return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
|
|
| 177 |
- } |
|
| 178 |
- |
|
| 179 |
- tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey) |
|
| 180 |
- if err != nil {
|
|
| 181 |
- return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
|
| 182 |
- } |
|
| 183 |
- |
|
| 184 |
- return tufECDSAPrivateKey, nil |
|
| 185 |
- case "ED25519 PRIVATE KEY": |
|
| 186 |
- // We serialize ED25519 keys by concatenating the private key |
|
| 187 |
- // to the public key and encoding with PEM. See the |
|
| 188 |
- // ED25519ToPrivateKey function. |
|
| 189 |
- tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes) |
|
| 190 |
- if err != nil {
|
|
| 191 |
- return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
|
| 192 |
- } |
|
| 193 |
- |
|
| 194 |
- return tufECDSAPrivateKey, nil |
|
| 195 |
- |
|
| 196 |
- default: |
|
| 197 |
- return nil, fmt.Errorf("unsupported key type %q", block.Type)
|
|
| 198 |
- } |
|
| 199 |
-} |
|
| 200 |
- |
|
| 201 |
-// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate. |
|
| 202 |
-func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
|
|
| 203 |
- pemBlock, _ := pem.Decode(pubKeyBytes) |
|
| 204 |
- if pemBlock == nil {
|
|
| 205 |
- return nil, errors.New("no valid public key found")
|
|
| 206 |
- } |
|
| 207 |
- |
|
| 208 |
- switch pemBlock.Type {
|
|
| 209 |
- case "CERTIFICATE": |
|
| 210 |
- cert, err := x509.ParseCertificate(pemBlock.Bytes) |
|
| 211 |
- if err != nil {
|
|
| 212 |
- return nil, fmt.Errorf("could not parse provided certificate: %v", err)
|
|
| 213 |
- } |
|
| 214 |
- err = ValidateCertificate(cert) |
|
| 215 |
- if err != nil {
|
|
| 216 |
- return nil, fmt.Errorf("invalid certificate: %v", err)
|
|
| 217 |
- } |
|
| 218 |
- return CertToKey(cert), nil |
|
| 219 |
- default: |
|
| 220 |
- return nil, fmt.Errorf("unsupported PEM block type %q, expected certificate", pemBlock.Type)
|
|
| 221 |
- } |
|
| 222 |
-} |
|
| 223 |
- |
|
| 224 |
-// ValidateCertificate returns an error if the certificate is not valid for notary |
|
| 225 |
-// Currently this is only a time expiry check, and ensuring the public key has a large enough modulus if RSA |
|
| 226 |
-func ValidateCertificate(c *x509.Certificate) error {
|
|
| 227 |
- if (c.NotBefore).After(c.NotAfter) {
|
|
| 228 |
- return fmt.Errorf("certificate validity window is invalid")
|
|
| 229 |
- } |
|
| 230 |
- now := time.Now() |
|
| 231 |
- tomorrow := now.AddDate(0, 0, 1) |
|
| 232 |
- // Give one day leeway on creation "before" time, check "after" against today |
|
| 233 |
- if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
|
|
| 234 |
- return fmt.Errorf("certificate is expired")
|
|
| 235 |
- } |
|
| 236 |
- // If we have an RSA key, make sure it's long enough |
|
| 237 |
- if c.PublicKeyAlgorithm == x509.RSA {
|
|
| 238 |
- rsaKey, ok := c.PublicKey.(*rsa.PublicKey) |
|
| 239 |
- if !ok {
|
|
| 240 |
- return fmt.Errorf("unable to parse RSA public key")
|
|
| 241 |
- } |
|
| 242 |
- if rsaKey.N.BitLen() < notary.MinRSABitSize {
|
|
| 243 |
- return fmt.Errorf("RSA bit length is too short")
|
|
| 244 |
- } |
|
| 245 |
- } |
|
| 246 |
- return nil |
|
| 247 |
-} |
|
| 248 |
- |
|
| 249 |
-// GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey |
|
| 250 |
-func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) {
|
|
| 251 |
- rsaPrivKey, err := rsa.GenerateKey(random, bits) |
|
| 252 |
- if err != nil {
|
|
| 253 |
- return nil, fmt.Errorf("could not generate private key: %v", err)
|
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- tufPrivKey, err := RSAToPrivateKey(rsaPrivKey) |
|
| 257 |
- if err != nil {
|
|
| 258 |
- return nil, err |
|
| 259 |
- } |
|
| 260 |
- |
|
| 261 |
- logrus.Debugf("generated RSA key with keyID: %s", tufPrivKey.ID())
|
|
| 262 |
- |
|
| 263 |
- return tufPrivKey, nil |
|
| 264 |
-} |
|
| 265 |
- |
|
| 266 |
-// RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type |
|
| 267 |
-func RSAToPrivateKey(rsaPrivKey *rsa.PrivateKey) (data.PrivateKey, error) {
|
|
| 268 |
- // Get a DER-encoded representation of the PublicKey |
|
| 269 |
- rsaPubBytes, err := x509.MarshalPKIXPublicKey(&rsaPrivKey.PublicKey) |
|
| 270 |
- if err != nil {
|
|
| 271 |
- return nil, fmt.Errorf("failed to marshal public key: %v", err)
|
|
| 272 |
- } |
|
| 273 |
- |
|
| 274 |
- // Get a DER-encoded representation of the PrivateKey |
|
| 275 |
- rsaPrivBytes := x509.MarshalPKCS1PrivateKey(rsaPrivKey) |
|
| 276 |
- |
|
| 277 |
- pubKey := data.NewRSAPublicKey(rsaPubBytes) |
|
| 278 |
- return data.NewRSAPrivateKey(pubKey, rsaPrivBytes) |
|
| 279 |
-} |
|
| 280 |
- |
|
| 281 |
-// GenerateECDSAKey generates an ECDSA Private key and returns a TUF PrivateKey |
|
| 282 |
-func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) {
|
|
| 283 |
- ecdsaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), random) |
|
| 284 |
- if err != nil {
|
|
| 285 |
- return nil, err |
|
| 286 |
- } |
|
| 287 |
- |
|
| 288 |
- tufPrivKey, err := ECDSAToPrivateKey(ecdsaPrivKey) |
|
| 289 |
- if err != nil {
|
|
| 290 |
- return nil, err |
|
| 291 |
- } |
|
| 292 |
- |
|
| 293 |
- logrus.Debugf("generated ECDSA key with keyID: %s", tufPrivKey.ID())
|
|
| 294 |
- |
|
| 295 |
- return tufPrivKey, nil |
|
| 296 |
-} |
|
| 297 |
- |
|
| 298 |
-// GenerateED25519Key generates an ED25519 private key and returns a TUF |
|
| 299 |
-// PrivateKey. The serialization format we use is just the public key bytes |
|
| 300 |
-// followed by the private key bytes |
|
| 301 |
-func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) {
|
|
| 302 |
- pub, priv, err := ed25519.GenerateKey(random) |
|
| 303 |
- if err != nil {
|
|
| 304 |
- return nil, err |
|
| 305 |
- } |
|
| 306 |
- |
|
| 307 |
- var serialized [ed25519.PublicKeySize + ed25519.PrivateKeySize]byte |
|
| 308 |
- copy(serialized[:], pub[:]) |
|
| 309 |
- copy(serialized[ed25519.PublicKeySize:], priv[:]) |
|
| 310 |
- |
|
| 311 |
- tufPrivKey, err := ED25519ToPrivateKey(serialized[:]) |
|
| 312 |
- if err != nil {
|
|
| 313 |
- return nil, err |
|
| 314 |
- } |
|
| 315 |
- |
|
| 316 |
- logrus.Debugf("generated ED25519 key with keyID: %s", tufPrivKey.ID())
|
|
| 317 |
- |
|
| 318 |
- return tufPrivKey, nil |
|
| 319 |
-} |
|
| 320 |
- |
|
| 321 |
-// ECDSAToPrivateKey converts an ecdsa.Private key to a TUF data.PrivateKey type |
|
| 322 |
-func ECDSAToPrivateKey(ecdsaPrivKey *ecdsa.PrivateKey) (data.PrivateKey, error) {
|
|
| 323 |
- // Get a DER-encoded representation of the PublicKey |
|
| 324 |
- ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey) |
|
| 325 |
- if err != nil {
|
|
| 326 |
- return nil, fmt.Errorf("failed to marshal public key: %v", err)
|
|
| 327 |
- } |
|
| 328 |
- |
|
| 329 |
- // Get a DER-encoded representation of the PrivateKey |
|
| 330 |
- ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey) |
|
| 331 |
- if err != nil {
|
|
| 332 |
- return nil, fmt.Errorf("failed to marshal private key: %v", err)
|
|
| 333 |
- } |
|
| 334 |
- |
|
| 335 |
- pubKey := data.NewECDSAPublicKey(ecdsaPubBytes) |
|
| 336 |
- return data.NewECDSAPrivateKey(pubKey, ecdsaPrivKeyBytes) |
|
| 337 |
-} |
|
| 338 |
- |
|
| 339 |
-// ED25519ToPrivateKey converts a serialized ED25519 key to a TUF |
|
| 340 |
-// data.PrivateKey type |
|
| 341 |
-func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) {
|
|
| 342 |
- if len(privKeyBytes) != ed25519.PublicKeySize+ed25519.PrivateKeySize {
|
|
| 343 |
- return nil, errors.New("malformed ed25519 private key")
|
|
| 344 |
- } |
|
| 345 |
- |
|
| 346 |
- pubKey := data.NewED25519PublicKey(privKeyBytes[:ed25519.PublicKeySize]) |
|
| 347 |
- return data.NewED25519PrivateKey(*pubKey, privKeyBytes) |
|
| 348 |
-} |
|
| 349 |
- |
|
| 350 |
-func blockType(k data.PrivateKey) (string, error) {
|
|
| 351 |
- switch k.Algorithm() {
|
|
| 352 |
- case data.RSAKey, data.RSAx509Key: |
|
| 353 |
- return "RSA PRIVATE KEY", nil |
|
| 354 |
- case data.ECDSAKey, data.ECDSAx509Key: |
|
| 355 |
- return "EC PRIVATE KEY", nil |
|
| 356 |
- case data.ED25519Key: |
|
| 357 |
- return "ED25519 PRIVATE KEY", nil |
|
| 358 |
- default: |
|
| 359 |
- return "", fmt.Errorf("algorithm %s not supported", k.Algorithm())
|
|
| 360 |
- } |
|
| 361 |
-} |
|
| 362 |
- |
|
| 363 |
-// KeyToPEM returns a PEM encoded key from a Private Key |
|
| 364 |
-func KeyToPEM(privKey data.PrivateKey, role string) ([]byte, error) {
|
|
| 365 |
- bt, err := blockType(privKey) |
|
| 366 |
- if err != nil {
|
|
| 367 |
- return nil, err |
|
| 368 |
- } |
|
| 369 |
- |
|
| 370 |
- headers := map[string]string{}
|
|
| 371 |
- if role != "" {
|
|
| 372 |
- headers = map[string]string{
|
|
| 373 |
- "role": role, |
|
| 374 |
- } |
|
| 375 |
- } |
|
| 376 |
- |
|
| 377 |
- block := &pem.Block{
|
|
| 378 |
- Type: bt, |
|
| 379 |
- Headers: headers, |
|
| 380 |
- Bytes: privKey.Private(), |
|
| 381 |
- } |
|
| 382 |
- |
|
| 383 |
- return pem.EncodeToMemory(block), nil |
|
| 384 |
-} |
|
| 385 |
- |
|
| 386 |
-// EncryptPrivateKey returns an encrypted PEM key given a Privatekey |
|
| 387 |
-// and a passphrase |
|
| 388 |
-func EncryptPrivateKey(key data.PrivateKey, role, passphrase string) ([]byte, error) {
|
|
| 389 |
- bt, err := blockType(key) |
|
| 390 |
- if err != nil {
|
|
| 391 |
- return nil, err |
|
| 392 |
- } |
|
| 393 |
- |
|
| 394 |
- password := []byte(passphrase) |
|
| 395 |
- cipherType := x509.PEMCipherAES256 |
|
| 396 |
- |
|
| 397 |
- encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader, |
|
| 398 |
- bt, |
|
| 399 |
- key.Private(), |
|
| 400 |
- password, |
|
| 401 |
- cipherType) |
|
| 402 |
- if err != nil {
|
|
| 403 |
- return nil, err |
|
| 404 |
- } |
|
| 405 |
- |
|
| 406 |
- if encryptedPEMBlock.Headers == nil {
|
|
| 407 |
- return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced")
|
|
| 408 |
- } |
|
| 409 |
- encryptedPEMBlock.Headers["role"] = role |
|
| 410 |
- |
|
| 411 |
- return pem.EncodeToMemory(encryptedPEMBlock), nil |
|
| 412 |
-} |
|
| 413 |
- |
|
| 414 |
-// ReadRoleFromPEM returns the value from the role PEM header, if it exists |
|
| 415 |
-func ReadRoleFromPEM(pemBytes []byte) string {
|
|
| 416 |
- pemBlock, _ := pem.Decode(pemBytes) |
|
| 417 |
- if pemBlock == nil || pemBlock.Headers == nil {
|
|
| 418 |
- return "" |
|
| 419 |
- } |
|
| 420 |
- role, ok := pemBlock.Headers["role"] |
|
| 421 |
- if !ok {
|
|
| 422 |
- return "" |
|
| 423 |
- } |
|
| 424 |
- return role |
|
| 425 |
-} |
|
| 426 |
- |
|
| 427 |
-// CertToKey transforms a single input certificate into its corresponding |
|
| 428 |
-// PublicKey |
|
| 429 |
-func CertToKey(cert *x509.Certificate) data.PublicKey {
|
|
| 430 |
- block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
|
|
| 431 |
- pemdata := pem.EncodeToMemory(&block) |
|
| 432 |
- |
|
| 433 |
- switch cert.PublicKeyAlgorithm {
|
|
| 434 |
- case x509.RSA: |
|
| 435 |
- return data.NewRSAx509PublicKey(pemdata) |
|
| 436 |
- case x509.ECDSA: |
|
| 437 |
- return data.NewECDSAx509PublicKey(pemdata) |
|
| 438 |
- default: |
|
| 439 |
- logrus.Debugf("Unknown key type parsed from certificate: %v", cert.PublicKeyAlgorithm)
|
|
| 440 |
- return nil |
|
| 441 |
- } |
|
| 442 |
-} |
|
| 443 |
- |
|
| 444 |
-// CertsToKeys transforms each of the input certificate chains into its corresponding |
|
| 445 |
-// PublicKey |
|
| 446 |
-func CertsToKeys(leafCerts map[string]*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
|
|
| 447 |
- keys := make(map[string]data.PublicKey) |
|
| 448 |
- for id, leafCert := range leafCerts {
|
|
| 449 |
- if key, err := CertBundleToKey(leafCert, intCerts[id]); err == nil {
|
|
| 450 |
- keys[key.ID()] = key |
|
| 451 |
- } |
|
| 452 |
- } |
|
| 453 |
- return keys |
|
| 454 |
-} |
|
| 455 |
- |
|
| 456 |
-// CertBundleToKey creates a TUF key from a leaf certs and a list of |
|
| 457 |
-// intermediates |
|
| 458 |
-func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) (data.PublicKey, error) {
|
|
| 459 |
- certBundle := []*x509.Certificate{leafCert}
|
|
| 460 |
- certBundle = append(certBundle, intCerts...) |
|
| 461 |
- certChainPEM, err := CertChainToPEM(certBundle) |
|
| 462 |
- if err != nil {
|
|
| 463 |
- return nil, err |
|
| 464 |
- } |
|
| 465 |
- var newKey data.PublicKey |
|
| 466 |
- // Use the leaf cert's public key algorithm for typing |
|
| 467 |
- switch leafCert.PublicKeyAlgorithm {
|
|
| 468 |
- case x509.RSA: |
|
| 469 |
- newKey = data.NewRSAx509PublicKey(certChainPEM) |
|
| 470 |
- case x509.ECDSA: |
|
| 471 |
- newKey = data.NewECDSAx509PublicKey(certChainPEM) |
|
| 472 |
- default: |
|
| 473 |
- logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
|
|
| 474 |
- return nil, x509.ErrUnsupportedAlgorithm |
|
| 475 |
- } |
|
| 476 |
- return newKey, nil |
|
| 477 |
-} |
|
| 478 |
- |
|
| 479 |
-// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval. |
|
| 480 |
-func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
|
|
| 481 |
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) |
|
| 482 |
- |
|
| 483 |
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) |
|
| 484 |
- if err != nil {
|
|
| 485 |
- return nil, fmt.Errorf("failed to generate new certificate: %v", err)
|
|
| 486 |
- } |
|
| 487 |
- |
|
| 488 |
- return &x509.Certificate{
|
|
| 489 |
- SerialNumber: serialNumber, |
|
| 490 |
- Subject: pkix.Name{
|
|
| 491 |
- CommonName: gun, |
|
| 492 |
- }, |
|
| 493 |
- NotBefore: startTime, |
|
| 494 |
- NotAfter: endTime, |
|
| 495 |
- |
|
| 496 |
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, |
|
| 497 |
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
|
|
| 498 |
- BasicConstraintsValid: true, |
|
| 499 |
- }, nil |
|
| 500 |
-} |
|
| 501 |
- |
|
| 502 |
-// X509PublicKeyID returns a public key ID as a string, given a |
|
| 503 |
-// data.PublicKey that contains an X509 Certificate |
|
| 504 |
-func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
|
|
| 505 |
- // Note that this only loads the first certificate from the public key |
|
| 506 |
- cert, err := LoadCertFromPEM(certPubKey.Public()) |
|
| 507 |
- if err != nil {
|
|
| 508 |
- return "", err |
|
| 509 |
- } |
|
| 510 |
- pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey) |
|
| 511 |
- if err != nil {
|
|
| 512 |
- return "", err |
|
| 513 |
- } |
|
| 514 |
- |
|
| 515 |
- var key data.PublicKey |
|
| 516 |
- switch certPubKey.Algorithm() {
|
|
| 517 |
- case data.ECDSAx509Key: |
|
| 518 |
- key = data.NewECDSAPublicKey(pubKeyBytes) |
|
| 519 |
- case data.RSAx509Key: |
|
| 520 |
- key = data.NewRSAPublicKey(pubKeyBytes) |
|
| 521 |
- } |
|
| 522 |
- |
|
| 523 |
- return key.ID(), nil |
|
| 524 |
-} |
| 525 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,57 @@ |
| 0 |
+// +build pkcs11 |
|
| 1 |
+ |
|
| 2 |
+package yubikey |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "encoding/pem" |
|
| 6 |
+ "errors" |
|
| 7 |
+ "github.com/docker/notary" |
|
| 8 |
+ "github.com/docker/notary/trustmanager" |
|
| 9 |
+ "github.com/docker/notary/tuf/utils" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+// YubiImport is a wrapper around the YubiStore that allows us to import private |
|
| 13 |
+// keys to the yubikey |
|
| 14 |
+type YubiImport struct {
|
|
| 15 |
+ dest *YubiStore |
|
| 16 |
+ passRetriever notary.PassRetriever |
|
| 17 |
+} |
|
| 18 |
+ |
|
| 19 |
+// NewImporter returns a wrapper for the YubiStore provided that enables importing |
|
| 20 |
+// keys via the simple Set(string, []byte) interface |
|
| 21 |
+func NewImporter(ys *YubiStore, ret notary.PassRetriever) *YubiImport {
|
|
| 22 |
+ return &YubiImport{
|
|
| 23 |
+ dest: ys, |
|
| 24 |
+ passRetriever: ret, |
|
| 25 |
+ } |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// Set determines if we are allowed to set the given key on the Yubikey and |
|
| 29 |
+// calls through to YubiStore.AddKey if it's valid |
|
| 30 |
+func (s *YubiImport) Set(name string, bytes []byte) error {
|
|
| 31 |
+ block, _ := pem.Decode(bytes) |
|
| 32 |
+ if block == nil {
|
|
| 33 |
+ return errors.New("invalid PEM data, could not parse")
|
|
| 34 |
+ } |
|
| 35 |
+ role, ok := block.Headers["role"] |
|
| 36 |
+ if !ok {
|
|
| 37 |
+ return errors.New("no role found for key")
|
|
| 38 |
+ } |
|
| 39 |
+ ki := trustmanager.KeyInfo{
|
|
| 40 |
+ // GUN is ignored by YubiStore |
|
| 41 |
+ Role: role, |
|
| 42 |
+ } |
|
| 43 |
+ privKey, err := utils.ParsePEMPrivateKey(bytes, "") |
|
| 44 |
+ if err != nil {
|
|
| 45 |
+ privKey, _, err = trustmanager.GetPasswdDecryptBytes( |
|
| 46 |
+ s.passRetriever, |
|
| 47 |
+ bytes, |
|
| 48 |
+ name, |
|
| 49 |
+ ki.Role, |
|
| 50 |
+ ) |
|
| 51 |
+ if err != nil {
|
|
| 52 |
+ return err |
|
| 53 |
+ } |
|
| 54 |
+ } |
|
| 55 |
+ return s.dest.AddKey(ki, privKey) |
|
| 56 |
+} |
| ... | ... |
@@ -17,10 +17,11 @@ import ( |
| 17 | 17 |
"time" |
| 18 | 18 |
|
| 19 | 19 |
"github.com/Sirupsen/logrus" |
| 20 |
- "github.com/docker/notary/passphrase" |
|
| 20 |
+ "github.com/docker/notary" |
|
| 21 | 21 |
"github.com/docker/notary/trustmanager" |
| 22 | 22 |
"github.com/docker/notary/tuf/data" |
| 23 | 23 |
"github.com/docker/notary/tuf/signed" |
| 24 |
+ "github.com/docker/notary/tuf/utils" |
|
| 24 | 25 |
"github.com/miekg/pkcs11" |
| 25 | 26 |
) |
| 26 | 27 |
|
| ... | ... |
@@ -132,7 +133,7 @@ type yubiSlot struct {
|
| 132 | 132 |
// YubiPrivateKey represents a private key inside of a yubikey |
| 133 | 133 |
type YubiPrivateKey struct {
|
| 134 | 134 |
data.ECDSAPublicKey |
| 135 |
- passRetriever passphrase.Retriever |
|
| 135 |
+ passRetriever notary.PassRetriever |
|
| 136 | 136 |
slot []byte |
| 137 | 137 |
libLoader pkcs11LibLoader |
| 138 | 138 |
} |
| ... | ... |
@@ -143,9 +144,9 @@ type yubikeySigner struct {
|
| 143 | 143 |
} |
| 144 | 144 |
|
| 145 | 145 |
// NewYubiPrivateKey returns a YubiPrivateKey, which implements the data.PrivateKey |
| 146 |
-// interface except that the private material is inacessible |
|
| 146 |
+// interface except that the private material is inaccessible |
|
| 147 | 147 |
func NewYubiPrivateKey(slot []byte, pubKey data.ECDSAPublicKey, |
| 148 |
- passRetriever passphrase.Retriever) *YubiPrivateKey {
|
|
| 148 |
+ passRetriever notary.PassRetriever) *YubiPrivateKey {
|
|
| 149 | 149 |
|
| 150 | 150 |
return &YubiPrivateKey{
|
| 151 | 151 |
ECDSAPublicKey: pubKey, |
| ... | ... |
@@ -228,7 +229,7 @@ func addECDSAKey( |
| 228 | 228 |
session pkcs11.SessionHandle, |
| 229 | 229 |
privKey data.PrivateKey, |
| 230 | 230 |
pkcs11KeyID []byte, |
| 231 |
- passRetriever passphrase.Retriever, |
|
| 231 |
+ passRetriever notary.PassRetriever, |
|
| 232 | 232 |
role string, |
| 233 | 233 |
) error {
|
| 234 | 234 |
logrus.Debugf("Attempting to add key to yubikey with ID: %s", privKey.ID())
|
| ... | ... |
@@ -249,7 +250,7 @@ func addECDSAKey( |
| 249 | 249 |
|
| 250 | 250 |
// Hard-coded policy: the generated certificate expires in 10 years. |
| 251 | 251 |
startTime := time.Now() |
| 252 |
- template, err := trustmanager.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0)) |
|
| 252 |
+ template, err := utils.NewCertificate(role, startTime, startTime.AddDate(10, 0, 0)) |
|
| 253 | 253 |
if err != nil {
|
| 254 | 254 |
return fmt.Errorf("failed to create the certificate template: %v", err)
|
| 255 | 255 |
} |
| ... | ... |
@@ -345,7 +346,7 @@ func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byt |
| 345 | 345 |
} |
| 346 | 346 |
|
| 347 | 347 |
// sign returns a signature for a given signature request |
| 348 |
-func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, payload []byte) ([]byte, error) {
|
|
| 348 |
+func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, payload []byte) ([]byte, error) {
|
|
| 349 | 349 |
err := login(ctx, session, passRetriever, pkcs11.CKU_USER, UserPin) |
| 350 | 350 |
if err != nil {
|
| 351 | 351 |
return nil, fmt.Errorf("error logging in: %v", err)
|
| ... | ... |
@@ -404,7 +405,7 @@ func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, pass |
| 404 | 404 |
return sig[:], nil |
| 405 | 405 |
} |
| 406 | 406 |
|
| 407 |
-func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, keyID string) error {
|
|
| 407 |
+func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, keyID string) error {
|
|
| 408 | 408 |
err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SOUserPin) |
| 409 | 409 |
if err != nil {
|
| 410 | 410 |
return err |
| ... | ... |
@@ -615,7 +616,7 @@ func getNextEmptySlot(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]byte, err |
| 615 | 615 |
|
| 616 | 616 |
// YubiStore is a KeyStore for private keys inside a Yubikey |
| 617 | 617 |
type YubiStore struct {
|
| 618 |
- passRetriever passphrase.Retriever |
|
| 618 |
+ passRetriever notary.PassRetriever |
|
| 619 | 619 |
keys map[string]yubiSlot |
| 620 | 620 |
backupStore trustmanager.KeyStore |
| 621 | 621 |
libLoader pkcs11LibLoader |
| ... | ... |
@@ -623,7 +624,7 @@ type YubiStore struct {
|
| 623 | 623 |
|
| 624 | 624 |
// NewYubiStore returns a YubiStore, given a backup key store to write any |
| 625 | 625 |
// generated keys to (usually a KeyFileStore) |
| 626 |
-func NewYubiStore(backupStore trustmanager.KeyStore, passphraseRetriever passphrase.Retriever) ( |
|
| 626 |
+func NewYubiStore(backupStore trustmanager.KeyStore, passphraseRetriever notary.PassRetriever) ( |
|
| 627 | 627 |
*YubiStore, error) {
|
| 628 | 628 |
|
| 629 | 629 |
s := &YubiStore{
|
| ... | ... |
@@ -653,7 +654,7 @@ func (s *YubiStore) ListKeys() map[string]trustmanager.KeyInfo {
|
| 653 | 653 |
} |
| 654 | 654 |
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) |
| 655 | 655 |
if err != nil {
|
| 656 |
- logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
|
| 656 |
+ logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
|
| 657 | 657 |
return nil |
| 658 | 658 |
} |
| 659 | 659 |
defer cleanup(ctx, session) |
| ... | ... |
@@ -697,7 +698,7 @@ func (s *YubiStore) addKey(keyID, role string, privKey data.PrivateKey) ( |
| 697 | 697 |
|
| 698 | 698 |
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) |
| 699 | 699 |
if err != nil {
|
| 700 |
- logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
|
| 700 |
+ logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
|
| 701 | 701 |
return false, err |
| 702 | 702 |
} |
| 703 | 703 |
defer cleanup(ctx, session) |
| ... | ... |
@@ -735,7 +736,7 @@ func (s *YubiStore) addKey(keyID, role string, privKey data.PrivateKey) ( |
| 735 | 735 |
func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, string, error) {
|
| 736 | 736 |
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) |
| 737 | 737 |
if err != nil {
|
| 738 |
- logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
|
| 738 |
+ logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
|
| 739 | 739 |
if _, ok := err.(errHSMNotPresent); ok {
|
| 740 | 740 |
err = trustmanager.ErrKeyNotFound{KeyID: keyID}
|
| 741 | 741 |
} |
| ... | ... |
@@ -770,7 +771,7 @@ func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, string, error) {
|
| 770 | 770 |
func (s *YubiStore) RemoveKey(keyID string) error {
|
| 771 | 771 |
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) |
| 772 | 772 |
if err != nil {
|
| 773 |
- logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error())
|
|
| 773 |
+ logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
|
| 774 | 774 |
return nil |
| 775 | 775 |
} |
| 776 | 776 |
defer cleanup(ctx, session) |
| ... | ... |
@@ -789,12 +790,6 @@ func (s *YubiStore) RemoveKey(keyID string) error {
|
| 789 | 789 |
return err |
| 790 | 790 |
} |
| 791 | 791 |
|
| 792 |
-// ExportKey doesn't work, because you can't export data from a Yubikey |
|
| 793 |
-func (s *YubiStore) ExportKey(keyID string) ([]byte, error) {
|
|
| 794 |
- logrus.Debugf("Attempting to export: %s key inside of YubiStore", keyID)
|
|
| 795 |
- return nil, errors.New("Keys cannot be exported from a Yubikey.")
|
|
| 796 |
-} |
|
| 797 |
- |
|
| 798 | 792 |
// GetKeyInfo is not yet implemented |
| 799 | 793 |
func (s *YubiStore) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
|
| 800 | 794 |
return trustmanager.KeyInfo{}, fmt.Errorf("Not yet implemented")
|
| ... | ... |
@@ -874,7 +869,7 @@ func IsAccessible() bool {
|
| 874 | 874 |
return true |
| 875 | 875 |
} |
| 876 | 876 |
|
| 877 |
-func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever passphrase.Retriever, userFlag uint, defaultPassw string) error {
|
|
| 877 |
+func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever notary.PassRetriever, userFlag uint, defaultPassw string) error {
|
|
| 878 | 878 |
// try default password |
| 879 | 879 |
err := ctx.Login(session, userFlag, defaultPassw) |
| 880 | 880 |
if err == nil {
|
| ... | ... |
@@ -902,13 +897,12 @@ func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever passphras |
| 902 | 902 |
return trustmanager.ErrAttemptsExceeded{}
|
| 903 | 903 |
} |
| 904 | 904 |
|
| 905 |
- // Try to convert PEM encoded bytes back to a PrivateKey using the passphrase |
|
| 905 |
+ // attempt to login. Loop if failed |
|
| 906 | 906 |
err = ctx.Login(session, userFlag, passwd) |
| 907 | 907 |
if err == nil {
|
| 908 | 908 |
return nil |
| 909 | 909 |
} |
| 910 | 910 |
} |
| 911 |
- return nil |
|
| 912 | 911 |
} |
| 913 | 912 |
|
| 914 | 913 |
func buildKeyMap(keys map[string]yubiSlot) map[string]trustmanager.KeyInfo {
|
| 915 | 914 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,37 @@ |
| 0 |
+-----BEGIN CERTIFICATE----- |
|
| 1 |
+MIIGMzCCBBugAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJVUzEL |
|
| 2 |
+MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv |
|
| 3 |
+Y2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0EwHhcNMTUwNzE2MDQyNTAz |
|
| 4 |
+WhcNMjUwNzEzMDQyNTAzWjBfMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTEL |
|
| 5 |
+MAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv |
|
| 6 |
+Y2tlcjELMAkGA1UECAwCQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC |
|
| 7 |
+AQCwVVD4pK7z7pXPpJbaZ1Hg5eRXIcaYtbFPCnN0iqy9HsVEGnEn5BPNSEsuP+m0 |
|
| 8 |
+5N0qVV7DGb1SjiloLXD1qDDvhXWk+giS9ppqPHPLVPB4bvzsqwDYrtpbqkYvO0YK |
|
| 9 |
+0SL3kxPXUFdlkFfgu0xjlczm2PhWG3Jd8aAtspL/L+VfPA13JUaWxSLpui1In8rh |
|
| 10 |
+gAyQTK6Q4Of6GbJYTnAHb59UoLXSzB5AfqiUq6L7nEYYKoPflPbRAIWL/UBm0c+H |
|
| 11 |
+ocms706PYpmPS2RQv3iOGmnn9hEVp3P6jq7WAevbA4aYGx5EsbVtYABqJBbFWAuw |
|
| 12 |
+wTGRYmzn0Mj0eTMge9ztYB2/2sxdTe6uhmFgpUXngDqJI5O9N3zPfvlEImCky3HM |
|
| 13 |
+jJoL7g5smqX9o1P+ESLh0VZzhh7IDPzQTXpcPIS/6z0l22QGkK/1N1PaADaUHdLL |
|
| 14 |
+vSav3y2BaEmPvf2fkZj8yP5eYgi7Cw5ONhHLDYHFcl9Zm/ywmdxHJETz9nfgXnsW |
|
| 15 |
+HNxDqrkCVO46r/u6rSrUt6hr3oddJG8s8Jo06earw6XU3MzM+3giwkK0SSM3uRPq |
|
| 16 |
+4AscR1Tv+E31AuOAmjqYQoT29bMIxoSzeljj/YnedwjW45pWyc3JoHaibDwvW9Uo |
|
| 17 |
+GSZBVy4hrM/Fa7XCWv1WfHNW1gDwaLYwDnl5jFmRBvcfuQIDAQABo4H5MIH2MIGR |
|
| 18 |
+BgNVHSMEgYkwgYaAFHUM1U3E4WyL1nvFd+dPY8f4O2hZoWOkYTBfMQswCQYDVQQG |
|
| 19 |
+EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNV |
|
| 20 |
+BAoMBkRvY2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0GCCQDCeDLbemIT |
|
| 21 |
+SzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEF |
|
| 22 |
+BQcDATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFHe48hcBcAp0bUVlTxXeRA4o |
|
| 23 |
+E16pMA0GCSqGSIb3DQEBCwUAA4ICAQAWUtAPdUFpwRq+N1SzGUejSikeMGyPZscZ |
|
| 24 |
+JBUCmhZoFufgXGbLO5OpcRLaV3Xda0t/5PtdGMSEzczeoZHWknDtw+79OBittPPj |
|
| 25 |
+Sh1oFDuPo35R7eP624lUCch/InZCphTaLx9oDLGcaK3ailQ9wjBdKdlBl8KNKIZp |
|
| 26 |
+a13aP5rnSm2Jva+tXy/yi3BSds3dGD8ITKZyI/6AFHxGvObrDIBpo4FF/zcWXVDj |
|
| 27 |
+paOmxplRtM4Hitm+sXGvfqJe4x5DuOXOnPrT3dHvRT6vSZUoKobxMqmRTOcrOIPa |
|
| 28 |
+EeMpOobshORuRntMDYvvgO3D6p6iciDW2Vp9N6rdMdfOWEQN8JVWvB7IxRHk9qKJ |
|
| 29 |
+vYOWVbczAt0qpMvXF3PXLjZbUM0knOdUKIEbqP4YUbgdzx6RtgiiY930Aj6tAtce |
|
| 30 |
+0fpgNlvjMRpSBuWTlAfNNjG/YhndMz9uI68TMfFpR3PcgVIv30krw/9VzoLi2Dpe |
|
| 31 |
+ow6DrGO6oi+DhN78P4jY/O9UczZK2roZL1Oi5P0RIxf23UZC7x1DlcN3nBr4sYSv |
|
| 32 |
+rBx4cFTMNpwU+nzsIi4djcFDKmJdEOyjMnkP2v0Lwe7yvK08pZdEu+0zbrq17kue |
|
| 33 |
+XpXLc7K68QB15yxzGylU5rRwzmC/YsAVyE4eoGu8PxWxrERvHby4B8YP0vAfOraL |
|
| 34 |
+lKmXlK4dTg== |
|
| 35 |
+-----END CERTIFICATE----- |
|
| 36 |
+ |
| ... | ... |
@@ -5,12 +5,11 @@ import ( |
| 5 | 5 |
"errors" |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"strings" |
| 8 |
- "time" |
|
| 9 | 8 |
|
| 10 | 9 |
"github.com/Sirupsen/logrus" |
| 11 |
- "github.com/docker/notary/trustmanager" |
|
| 12 | 10 |
"github.com/docker/notary/tuf/data" |
| 13 | 11 |
"github.com/docker/notary/tuf/signed" |
| 12 |
+ "github.com/docker/notary/tuf/utils" |
|
| 14 | 13 |
) |
| 15 | 14 |
|
| 16 | 15 |
// ErrValidationFail is returned when there is no valid trusted certificates |
| ... | ... |
@@ -98,18 +97,25 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus |
| 98 | 98 |
// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN |
| 99 | 99 |
allLeafCerts, allIntCerts := parseAllCerts(signedRoot) |
| 100 | 100 |
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true) |
| 101 |
+ validIntCerts := validRootIntCerts(allIntCerts) |
|
| 101 | 102 |
|
| 102 | 103 |
if err != nil {
|
| 103 | 104 |
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
|
| 104 | 105 |
return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
|
| 105 | 106 |
} |
| 106 | 107 |
|
| 108 |
+ logrus.Debugf("found %d leaf certs, of which %d are valid leaf certs for %s", len(allLeafCerts), len(certsFromRoot), gun)
|
|
| 109 |
+ |
|
| 107 | 110 |
// If we have a previous root, let's try to use it to validate that this new root is valid. |
| 108 |
- if prevRoot != nil {
|
|
| 111 |
+ havePrevRoot := prevRoot != nil |
|
| 112 |
+ if havePrevRoot {
|
|
| 109 | 113 |
// Retrieve all the trusted certificates from our previous root |
| 110 | 114 |
// Note that we do not validate expiries here since our originally trusted root might have expired certs |
| 111 | 115 |
allTrustedLeafCerts, allTrustedIntCerts := parseAllCerts(prevRoot) |
| 112 | 116 |
trustedLeafCerts, err := validRootLeafCerts(allTrustedLeafCerts, gun, false) |
| 117 |
+ if err != nil {
|
|
| 118 |
+ return nil, &ErrValidationFail{Reason: "could not retrieve trusted certs from previous root role data"}
|
|
| 119 |
+ } |
|
| 113 | 120 |
|
| 114 | 121 |
// Use the certificates we found in the previous root for the GUN to verify its signatures |
| 115 | 122 |
// This could potentially be an empty set, in which case we will fail to verify |
| ... | ... |
@@ -121,45 +127,52 @@ func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun string, trus |
| 121 | 121 |
if !ok {
|
| 122 | 122 |
return nil, &ErrValidationFail{Reason: "could not retrieve previous root role data"}
|
| 123 | 123 |
} |
| 124 |
- |
|
| 125 | 124 |
err = signed.VerifySignatures( |
| 126 |
- root, data.BaseRole{Keys: trustmanager.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
|
|
| 125 |
+ root, data.BaseRole{Keys: utils.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
|
|
| 127 | 126 |
if err != nil {
|
| 128 | 127 |
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
|
| 129 | 128 |
return nil, &ErrRootRotationFail{Reason: "failed to validate data with current trusted certificates"}
|
| 130 | 129 |
} |
| 131 |
- } else {
|
|
| 132 |
- logrus.Debugf("found no currently valid root certificates for %s, using trust_pinning config to bootstrap trust", gun)
|
|
| 133 |
- trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun) |
|
| 134 |
- if err != nil {
|
|
| 135 |
- return nil, &ErrValidationFail{Reason: err.Error()}
|
|
| 130 |
+ // Clear the IsValid marks we could have received from VerifySignatures |
|
| 131 |
+ for i := range root.Signatures {
|
|
| 132 |
+ root.Signatures[i].IsValid = false |
|
| 136 | 133 |
} |
| 134 |
+ } |
|
| 137 | 135 |
|
| 138 |
- validPinnedCerts := map[string]*x509.Certificate{}
|
|
| 139 |
- for id, cert := range certsFromRoot {
|
|
| 140 |
- if ok := trustPinCheckFunc(cert, allIntCerts[id]); !ok {
|
|
| 141 |
- continue |
|
| 142 |
- } |
|
| 143 |
- validPinnedCerts[id] = cert |
|
| 144 |
- } |
|
| 145 |
- if len(validPinnedCerts) == 0 {
|
|
| 146 |
- return nil, &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
|
|
| 136 |
+ // Regardless of having a previous root or not, confirm that the new root validates against the trust pinning |
|
| 137 |
+ logrus.Debugf("checking root against trust_pinning config", gun)
|
|
| 138 |
+ trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun, !havePrevRoot) |
|
| 139 |
+ if err != nil {
|
|
| 140 |
+ return nil, &ErrValidationFail{Reason: err.Error()}
|
|
| 141 |
+ } |
|
| 142 |
+ |
|
| 143 |
+ validPinnedCerts := map[string]*x509.Certificate{}
|
|
| 144 |
+ for id, cert := range certsFromRoot {
|
|
| 145 |
+ logrus.Debugf("checking trust-pinning for cert: %s", id)
|
|
| 146 |
+ if ok := trustPinCheckFunc(cert, validIntCerts[id]); !ok {
|
|
| 147 |
+ logrus.Debugf("trust-pinning check failed for cert: %s", id)
|
|
| 148 |
+ continue |
|
| 147 | 149 |
} |
| 148 |
- certsFromRoot = validPinnedCerts |
|
| 150 |
+ validPinnedCerts[id] = cert |
|
| 149 | 151 |
} |
| 152 |
+ if len(validPinnedCerts) == 0 {
|
|
| 153 |
+ return nil, &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
|
|
| 154 |
+ } |
|
| 155 |
+ certsFromRoot = validPinnedCerts |
|
| 150 | 156 |
|
| 151 | 157 |
// Validate the integrity of the new root (does it have valid signatures) |
| 152 | 158 |
// Note that certsFromRoot is guaranteed to be unchanged only if we had prior cert data for this GUN or enabled TOFUS |
| 153 | 159 |
// If we attempted to pin a certain certificate or CA, certsFromRoot could have been pruned accordingly |
| 154 | 160 |
err = signed.VerifySignatures(root, data.BaseRole{
|
| 155 |
- Keys: trustmanager.CertsToKeys(certsFromRoot, allIntCerts), Threshold: rootRole.Threshold}) |
|
| 161 |
+ Keys: utils.CertsToKeys(certsFromRoot, validIntCerts), Threshold: rootRole.Threshold}) |
|
| 156 | 162 |
if err != nil {
|
| 157 | 163 |
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
|
| 158 | 164 |
return nil, &ErrValidationFail{Reason: "failed to validate integrity of roots"}
|
| 159 | 165 |
} |
| 160 | 166 |
|
| 161 |
- logrus.Debugf("Root validation succeeded for %s", gun)
|
|
| 162 |
- return signedRoot, nil |
|
| 167 |
+ logrus.Debugf("root validation succeeded for %s", gun)
|
|
| 168 |
+ // Call RootFromSigned to make sure we pick up on the IsValid markings from VerifySignatures |
|
| 169 |
+ return data.RootFromSigned(root) |
|
| 163 | 170 |
} |
| 164 | 171 |
|
| 165 | 172 |
// validRootLeafCerts returns a list of possibly (if checkExpiry is true) non-expired, non-sha1 certificates |
| ... | ... |
@@ -177,17 +190,9 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, c |
| 177 | 177 |
continue |
| 178 | 178 |
} |
| 179 | 179 |
// Make sure the certificate is not expired if checkExpiry is true |
| 180 |
- if checkExpiry && time.Now().After(cert.NotAfter) {
|
|
| 181 |
- logrus.Debugf("error leaf certificate is expired")
|
|
| 182 |
- continue |
|
| 183 |
- } |
|
| 184 |
- |
|
| 185 |
- // We don't allow root certificates that use SHA1 |
|
| 186 |
- if cert.SignatureAlgorithm == x509.SHA1WithRSA || |
|
| 187 |
- cert.SignatureAlgorithm == x509.DSAWithSHA1 || |
|
| 188 |
- cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
|
|
| 189 |
- |
|
| 190 |
- logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)")
|
|
| 180 |
+ // and warn if it hasn't expired yet but is within 6 months of expiry |
|
| 181 |
+ if err := utils.ValidateCertificate(cert, checkExpiry); err != nil {
|
|
| 182 |
+ logrus.Debugf("%s is invalid: %s", id, err.Error())
|
|
| 191 | 183 |
continue |
| 192 | 184 |
} |
| 193 | 185 |
|
| ... | ... |
@@ -204,6 +209,24 @@ func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun string, c |
| 204 | 204 |
return validLeafCerts, nil |
| 205 | 205 |
} |
| 206 | 206 |
|
| 207 |
+// validRootIntCerts filters the passed in structure of intermediate certificates to only include non-expired, non-sha1 certificates |
|
| 208 |
+// Note that this "validity" alone does not imply any measure of trust. |
|
| 209 |
+func validRootIntCerts(allIntCerts map[string][]*x509.Certificate) map[string][]*x509.Certificate {
|
|
| 210 |
+ validIntCerts := make(map[string][]*x509.Certificate) |
|
| 211 |
+ |
|
| 212 |
+ // Go through every leaf cert ID, and build its valid intermediate certificate list |
|
| 213 |
+ for leafID, intCertList := range allIntCerts {
|
|
| 214 |
+ for _, intCert := range intCertList {
|
|
| 215 |
+ if err := utils.ValidateCertificate(intCert, true); err != nil {
|
|
| 216 |
+ continue |
|
| 217 |
+ } |
|
| 218 |
+ validIntCerts[leafID] = append(validIntCerts[leafID], intCert) |
|
| 219 |
+ } |
|
| 220 |
+ |
|
| 221 |
+ } |
|
| 222 |
+ return validIntCerts |
|
| 223 |
+} |
|
| 224 |
+ |
|
| 207 | 225 |
// parseAllCerts returns two maps, one with all of the leafCertificates and one |
| 208 | 226 |
// with all the intermediate certificates found in signedRoot |
| 209 | 227 |
func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
|
| ... | ... |
@@ -233,14 +256,14 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m |
| 233 | 233 |
|
| 234 | 234 |
// Decode all the x509 certificates that were bundled with this |
| 235 | 235 |
// Specific root key |
| 236 |
- decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public()) |
|
| 236 |
+ decodedCerts, err := utils.LoadCertBundleFromPEM(key.Public()) |
|
| 237 | 237 |
if err != nil {
|
| 238 | 238 |
logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
|
| 239 | 239 |
continue |
| 240 | 240 |
} |
| 241 | 241 |
|
| 242 | 242 |
// Get all non-CA certificates in the decoded certificates |
| 243 |
- leafCertList := trustmanager.GetLeafCerts(decodedCerts) |
|
| 243 |
+ leafCertList := utils.GetLeafCerts(decodedCerts) |
|
| 244 | 244 |
|
| 245 | 245 |
// If we got no leaf certificates or we got more than one, fail |
| 246 | 246 |
if len(leafCertList) != 1 {
|
| ... | ... |
@@ -260,7 +283,7 @@ func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, m |
| 260 | 260 |
leafCerts[key.ID()] = leafCert |
| 261 | 261 |
|
| 262 | 262 |
// Get all the remainder certificates marked as a CA to be used as intermediates |
| 263 |
- intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts) |
|
| 263 |
+ intermediateCerts := utils.GetIntermediateCerts(decodedCerts) |
|
| 264 | 264 |
intCerts[key.ID()] = intermediateCerts |
| 265 | 265 |
} |
| 266 | 266 |
|
| 267 | 267 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,31 @@ |
| 0 |
+-----BEGIN CERTIFICATE----- |
|
| 1 |
+MIIFKzCCAxWgAwIBAgIQRyp9QqcJfd3ayqdjiz8xIDALBgkqhkiG9w0BAQswODEa |
|
| 2 |
+MBgGA1UEChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20v |
|
| 3 |
+bm90YXJ5MB4XDTE1MDcxNzA2MzQyM1oXDTE3MDcxNjA2MzQyM1owODEaMBgGA1UE |
|
| 4 |
+ChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20vbm90YXJ5 |
|
| 5 |
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAoQffrzsYnsH8vGf4Jh55 |
|
| 6 |
+Cj5wrjUGzD/sHkaFHptjJ6ToJGJv5yMAPxzyInu5sIoGLJapnYVBoAU0YgI9qlAc |
|
| 7 |
+YA6SxaSwgm6rpvmnl8Qn0qc6ger3inpGaUJylWHuPwWkvcimQAqHZx2dQtL7g6kp |
|
| 8 |
+rmKeTWpWoWLw3JoAUZUVhZMd6a22ZL/DvAw+Hrogbz4XeyahFb9IH402zPxN6vga |
|
| 9 |
+JEFTF0Ji1jtNg0Mo4pb9SHsMsiw+LZK7SffHVKPxvd21m/biNmwsgExA3U8OOG8p |
|
| 10 |
+uygfacys5c8+ZrX+ZFG/cvwKz0k6/QfJU40s6MhXw5C2WttdVmsG9/7rGFYjHoIJ |
|
| 11 |
+weDyxgWk7vxKzRJI/un7cagDIaQsKrJQcCHIGFRlpIR5TwX7vl3R7cRncrDRMVvc |
|
| 12 |
+VSEG2esxbw7jtzIp/ypnVRxcOny7IypyjKqVeqZ6HgxZtTBVrF1O/aHo2kvlwyRS |
|
| 13 |
+Aus4kvh6z3+jzTm9EzfXiPQzY9BEk5gOLxhW9rc6UhlS+pe5lkaN/Hyqy/lPuq89 |
|
| 14 |
+fMr2rr7lf5WFdFnze6WNYMAaW7dNA4NE0dyD53428ZLXxNVPL4WU66Gac6lynQ8l |
|
| 15 |
+r5tPsYIFXzh6FVaRKGQUtW1hz9ecO6Y27Rh2JsyiIxgUqk2ooxE69uN42t+dtqKC |
|
| 16 |
+1s8G/7VtY8GDALFLYTnzLvsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1Ud |
|
| 17 |
+JQQMMAoGCCsGAQUFBwMDMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQBM |
|
| 18 |
+Oll3G/XBz8idiNdNJDWUh+5w3ojmwanrTBdCdqEk1WenaR6DtcflJx6Z3f/mwV4o |
|
| 19 |
+b1skOAX1yX5RCahJHUMxMicz/Q38pOVelGPrWnc3TJB+VKjGyHXlQDVkZFb+4+ef |
|
| 20 |
+wtj7HngXhHFFDSgjm3EdMndvgDQ7SQb4skOnCNS9iyX7eXxhFBCZmZL+HALKBj2B |
|
| 21 |
+yhV4IcBDqmp504t14rx9/Jvty0dG7fY7I51gEQpm4S02JML5xvTm1xfboWIhZODI |
|
| 22 |
+swEAO+ekBoFHbS1Q9KMPjIAw3TrCHH8x8XZq5zsYtAC1yZHdCKa26aWdy56A9eHj |
|
| 23 |
+O1VxzwmbNyXRenVuBYP+0wr3HVKFG4JJ4ZZpNZzQW/pqEPghCTJIvIueK652ByUc |
|
| 24 |
+//sv+nXd5f19LeES9pf0l253NDaFZPb6aegKfquWh8qlQBmUQ2GzaTLbtmNd28M6 |
|
| 25 |
+W7iL7tkKZe1ZnBz9RKgtPrDjjWGZInjjcOU8EtT4SLq7kCVDmPs5MD8vaAm96JsE |
|
| 26 |
+jmLC3Uu/4k7HiDYX0i0mOWkFjZQMdVatcIF5FPSppwsSbW8QidnXt54UtwtFDEPz |
|
| 27 |
+lpjs7ybeQE71JXcMZnVIK4bjRXsEFPI98RpIlEdedbSUdYAncLNJRT7HZBMPGSwZ |
|
| 28 |
+0PNJuglnlr3srVzdW1dz2xQjdvLwxy6mNUF6rbQBWA== |
|
| 29 |
+-----END CERTIFICATE----- |
|
| 30 |
+ |
| ... | ... |
@@ -4,7 +4,6 @@ import ( |
| 4 | 4 |
"crypto/x509" |
| 5 | 5 |
"fmt" |
| 6 | 6 |
"github.com/Sirupsen/logrus" |
| 7 |
- "github.com/docker/notary/trustmanager" |
|
| 8 | 7 |
"github.com/docker/notary/tuf/utils" |
| 9 | 8 |
"strings" |
| 10 | 9 |
) |
| ... | ... |
@@ -28,25 +27,29 @@ type trustPinChecker struct {
|
| 28 | 28 |
type CertChecker func(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool |
| 29 | 29 |
|
| 30 | 30 |
// NewTrustPinChecker returns a new certChecker function from a TrustPinConfig for a GUN |
| 31 |
-func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker, error) {
|
|
| 31 |
+func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string, firstBootstrap bool) (CertChecker, error) {
|
|
| 32 | 32 |
t := trustPinChecker{gun: gun, config: trustPinConfig}
|
| 33 | 33 |
// Determine the mode, and if it's even valid |
| 34 | 34 |
if pinnedCerts, ok := trustPinConfig.Certs[gun]; ok {
|
| 35 |
+ logrus.Debugf("trust-pinning using Cert IDs")
|
|
| 35 | 36 |
t.pinnedCertIDs = pinnedCerts |
| 36 | 37 |
return t.certsCheck, nil |
| 37 | 38 |
} |
| 38 | 39 |
|
| 39 | 40 |
if caFilepath, err := getPinnedCAFilepathByPrefix(gun, trustPinConfig); err == nil {
|
| 41 |
+ logrus.Debugf("trust-pinning using root CA bundle at: %s", caFilepath)
|
|
| 42 |
+ |
|
| 40 | 43 |
// Try to add the CA certs from its bundle file to our certificate store, |
| 41 | 44 |
// and use it to validate certs in the root.json later |
| 42 |
- caCerts, err := trustmanager.LoadCertBundleFromFile(caFilepath) |
|
| 45 |
+ caCerts, err := utils.LoadCertBundleFromFile(caFilepath) |
|
| 43 | 46 |
if err != nil {
|
| 44 | 47 |
return nil, fmt.Errorf("could not load root cert from CA path")
|
| 45 | 48 |
} |
| 46 | 49 |
// Now only consider certificates that are direct children from this CA cert chain |
| 47 | 50 |
caRootPool := x509.NewCertPool() |
| 48 | 51 |
for _, caCert := range caCerts {
|
| 49 |
- if err = trustmanager.ValidateCertificate(caCert); err != nil {
|
|
| 52 |
+ if err = utils.ValidateCertificate(caCert, true); err != nil {
|
|
| 53 |
+ logrus.Debugf("ignoring root CA certificate with CN %s in bundle: %s", caCert.Subject.CommonName, err)
|
|
| 50 | 54 |
continue |
| 51 | 55 |
} |
| 52 | 56 |
caRootPool.AddCert(caCert) |
| ... | ... |
@@ -59,16 +62,18 @@ func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun string) (CertChecker, |
| 59 | 59 |
return t.caCheck, nil |
| 60 | 60 |
} |
| 61 | 61 |
|
| 62 |
- if !trustPinConfig.DisableTOFU {
|
|
| 63 |
- return t.tofusCheck, nil |
|
| 62 |
+ // If TOFUs is disabled and we don't have any previous trusted root data for this GUN, we error out |
|
| 63 |
+ if trustPinConfig.DisableTOFU && firstBootstrap {
|
|
| 64 |
+ return nil, fmt.Errorf("invalid trust pinning specified")
|
|
| 65 |
+ |
|
| 64 | 66 |
} |
| 65 |
- return nil, fmt.Errorf("invalid trust pinning specified")
|
|
| 67 |
+ return t.tofusCheck, nil |
|
| 66 | 68 |
} |
| 67 | 69 |
|
| 68 | 70 |
func (t trustPinChecker) certsCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
|
| 69 | 71 |
// reconstruct the leaf + intermediate cert chain, which is bundled as {leaf, intermediates...},
|
| 70 | 72 |
// in order to get the matching id in the root file |
| 71 |
- key, err := trustmanager.CertBundleToKey(leafCert, intCerts) |
|
| 73 |
+ key, err := utils.CertBundleToKey(leafCert, intCerts) |
|
| 72 | 74 |
if err != nil {
|
| 73 | 75 |
logrus.Debug("error creating cert bundle: ", err.Error())
|
| 74 | 76 |
return false |
| ... | ... |
@@ -84,9 +89,11 @@ func (t trustPinChecker) caCheck(leafCert *x509.Certificate, intCerts []*x509.Ce |
| 84 | 84 |
} |
| 85 | 85 |
// Attempt to find a valid certificate chain from the leaf cert to CA root |
| 86 | 86 |
// Use this certificate if such a valid chain exists (possibly using intermediates) |
| 87 |
- if _, err := leafCert.Verify(x509.VerifyOptions{Roots: t.pinnedCAPool, Intermediates: caIntPool}); err == nil {
|
|
| 87 |
+ var err error |
|
| 88 |
+ if _, err = leafCert.Verify(x509.VerifyOptions{Roots: t.pinnedCAPool, Intermediates: caIntPool}); err == nil {
|
|
| 88 | 89 |
return true |
| 89 | 90 |
} |
| 91 |
+ logrus.Debugf("unable to find a valid certificate chain from leaf cert to CA root: %s", err)
|
|
| 90 | 92 |
return false |
| 91 | 93 |
} |
| 92 | 94 |
|
| ... | ... |
@@ -1,36 +1,6 @@ |
| 1 |
-# GOTUF |
|
| 2 |
- |
|
| 3 |
-This is still a work in progress but will shortly be a fully compliant |
|
| 4 |
-Go implementation of [The Update Framework (TUF)](http://theupdateframework.com/). |
|
| 5 |
- |
|
| 6 |
-## Where's the CLI |
|
| 7 |
- |
|
| 8 |
-This repository provides a library only. The [Notary project](https://github.com/docker/notary) |
|
| 9 |
-from Docker should be considered the official CLI to be used with this implementation of TUF. |
|
| 10 |
- |
|
| 11 |
-## TODOs: |
|
| 12 |
- |
|
| 13 |
-- [X] Add Targets to existing repo |
|
| 14 |
-- [X] Sign metadata files |
|
| 15 |
-- [X] Refactor TufRepo to take care of signing ~~and verification~~ |
|
| 16 |
-- [ ] Ensure consistent capitalization in naming (TUF\_\_\_ vs Tuf\_\_\_) |
|
| 17 |
-- [X] Make caching of metadata files smarter - PR #5 |
|
| 18 |
-- [ ] ~~Add configuration for CLI commands. Order of configuration priority from most to least: flags, config file, defaults~~ Notary should be the official CLI |
|
| 19 |
-- [X] Reasses organization of data types. Possibly consolidate a few things into the data package but break up package into a few more distinct files |
|
| 20 |
-- [ ] Comprehensive test cases |
|
| 21 |
-- [ ] Delete files no longer in use |
|
| 22 |
-- [ ] Fix up errors. Some have to be instantiated, others don't, the inconsistency is annoying. |
|
| 23 |
-- [X] Bump version numbers in meta files (could probably be done better) |
|
| 24 |
- |
|
| 25 | 1 |
## Credits |
| 26 | 2 |
|
| 27 |
-This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf), |
|
| 28 |
-however in attempting to add delegations I found I was making such |
|
| 29 |
-significant changes that I could not maintain backwards compatibility |
|
| 30 |
-without the code becoming overly convoluted. |
|
| 31 |
- |
|
| 32 |
-Some features such as pluggable verifiers have already been merged upstream to flynn/go-tuf |
|
| 33 |
-and we are in discussion with [titanous](https://github.com/titanous) about working to merge the 2 implementations. |
|
| 3 |
+This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf) |
|
| 34 | 4 |
|
| 35 | 5 |
This implementation retains the same 3 Clause BSD license present on |
| 36 | 6 |
the original flynn implementation. |
| ... | ... |
@@ -18,7 +18,7 @@ var ErrBuildDone = fmt.Errorf( |
| 18 | 18 |
"the builder has finished building and cannot accept any more input or produce any more output") |
| 19 | 19 |
|
| 20 | 20 |
// ErrInvalidBuilderInput is returned when RepoBuilder.Load is called |
| 21 |
-// with the wrong type of metadata for thes tate that it's in |
|
| 21 |
+// with the wrong type of metadata for the state that it's in |
|
| 22 | 22 |
type ErrInvalidBuilderInput struct{ msg string }
|
| 23 | 23 |
|
| 24 | 24 |
func (e ErrInvalidBuilderInput) Error() string {
|
| ... | ... |
@@ -59,8 +59,9 @@ type RepoBuilder interface {
|
| 59 | 59 |
Load(roleName string, content []byte, minVersion int, allowExpired bool) error |
| 60 | 60 |
GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) |
| 61 | 61 |
GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) |
| 62 |
- Finish() (*Repo, error) |
|
| 62 |
+ Finish() (*Repo, *Repo, error) |
|
| 63 | 63 |
BootstrapNewBuilder() RepoBuilder |
| 64 |
+ BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder |
|
| 64 | 65 |
|
| 65 | 66 |
// informative functions |
| 66 | 67 |
IsLoaded(roleName string) bool |
| ... | ... |
@@ -80,8 +81,11 @@ func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, in |
| 80 | 80 |
func (f finishedBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
|
| 81 | 81 |
return nil, 0, ErrBuildDone |
| 82 | 82 |
} |
| 83 |
-func (f finishedBuilder) Finish() (*Repo, error) { return nil, ErrBuildDone }
|
|
| 84 |
-func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f }
|
|
| 83 |
+func (f finishedBuilder) Finish() (*Repo, *Repo, error) { return nil, nil, ErrBuildDone }
|
|
| 84 |
+func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f }
|
|
| 85 |
+func (f finishedBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
|
| 86 |
+ return f |
|
| 87 |
+} |
|
| 85 | 88 |
func (f finishedBuilder) IsLoaded(roleName string) bool { return false }
|
| 86 | 89 |
func (f finishedBuilder) GetLoadedVersion(roleName string) int { return 0 }
|
| 87 | 90 |
func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
|
| ... | ... |
@@ -90,12 +94,21 @@ func (f finishedBuilder) GetConsistentInfo(roleName string) ConsistentInfo {
|
| 90 | 90 |
|
| 91 | 91 |
// NewRepoBuilder is the only way to get a pre-built RepoBuilder |
| 92 | 92 |
func NewRepoBuilder(gun string, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
| 93 |
- return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
|
| 94 |
- repo: NewRepo(cs), |
|
| 95 |
- gun: gun, |
|
| 96 |
- trustpin: trustpin, |
|
| 97 |
- loadedNotChecksummed: make(map[string][]byte), |
|
| 98 |
- }} |
|
| 93 |
+ return NewBuilderFromRepo(gun, NewRepo(cs), trustpin) |
|
| 94 |
+} |
|
| 95 |
+ |
|
| 96 |
+// NewBuilderFromRepo allows us to bootstrap a builder given existing repo data. |
|
| 97 |
+// YOU PROBABLY SHOULDN'T BE USING THIS OUTSIDE OF TESTING CODE!!! |
|
| 98 |
+func NewBuilderFromRepo(gun string, repo *Repo, trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
|
| 99 |
+ return &repoBuilderWrapper{
|
|
| 100 |
+ RepoBuilder: &repoBuilder{
|
|
| 101 |
+ repo: repo, |
|
| 102 |
+ invalidRoles: NewRepo(nil), |
|
| 103 |
+ gun: gun, |
|
| 104 |
+ trustpin: trustpin, |
|
| 105 |
+ loadedNotChecksummed: make(map[string][]byte), |
|
| 106 |
+ }, |
|
| 107 |
+ } |
|
| 99 | 108 |
} |
| 100 | 109 |
|
| 101 | 110 |
// repoBuilderWrapper embeds a repoBuilder, but once Finish is called, swaps |
| ... | ... |
@@ -104,7 +117,7 @@ type repoBuilderWrapper struct {
|
| 104 | 104 |
RepoBuilder |
| 105 | 105 |
} |
| 106 | 106 |
|
| 107 |
-func (rbw *repoBuilderWrapper) Finish() (*Repo, error) {
|
|
| 107 |
+func (rbw *repoBuilderWrapper) Finish() (*Repo, *Repo, error) {
|
|
| 108 | 108 |
switch rbw.RepoBuilder.(type) {
|
| 109 | 109 |
case finishedBuilder: |
| 110 | 110 |
return rbw.RepoBuilder.Finish() |
| ... | ... |
@@ -117,7 +130,8 @@ func (rbw *repoBuilderWrapper) Finish() (*Repo, error) {
|
| 117 | 117 |
|
| 118 | 118 |
// repoBuilder actually builds a tuf.Repo |
| 119 | 119 |
type repoBuilder struct {
|
| 120 |
- repo *Repo |
|
| 120 |
+ repo *Repo |
|
| 121 |
+ invalidRoles *Repo |
|
| 121 | 122 |
|
| 122 | 123 |
// needed for root trust pininng verification |
| 123 | 124 |
gun string |
| ... | ... |
@@ -136,13 +150,14 @@ type repoBuilder struct {
|
| 136 | 136 |
nextRootChecksum *data.FileMeta |
| 137 | 137 |
} |
| 138 | 138 |
|
| 139 |
-func (rb *repoBuilder) Finish() (*Repo, error) {
|
|
| 140 |
- return rb.repo, nil |
|
| 139 |
+func (rb *repoBuilder) Finish() (*Repo, *Repo, error) {
|
|
| 140 |
+ return rb.repo, rb.invalidRoles, nil |
|
| 141 | 141 |
} |
| 142 | 142 |
|
| 143 | 143 |
func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
|
| 144 | 144 |
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
| 145 | 145 |
repo: NewRepo(rb.repo.cryptoService), |
| 146 |
+ invalidRoles: NewRepo(nil), |
|
| 146 | 147 |
gun: rb.gun, |
| 147 | 148 |
loadedNotChecksummed: make(map[string][]byte), |
| 148 | 149 |
trustpin: rb.trustpin, |
| ... | ... |
@@ -152,6 +167,18 @@ func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
|
| 152 | 152 |
}} |
| 153 | 153 |
} |
| 154 | 154 |
|
| 155 |
+func (rb *repoBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
|
| 156 |
+ return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
|
| 157 |
+ repo: NewRepo(rb.repo.cryptoService), |
|
| 158 |
+ gun: rb.gun, |
|
| 159 |
+ loadedNotChecksummed: make(map[string][]byte), |
|
| 160 |
+ trustpin: trustpin, |
|
| 161 |
+ |
|
| 162 |
+ prevRoot: rb.repo.Root, |
|
| 163 |
+ bootstrappedRootChecksum: rb.nextRootChecksum, |
|
| 164 |
+ }} |
|
| 165 |
+} |
|
| 166 |
+ |
|
| 155 | 167 |
// IsLoaded returns whether a particular role has already been loaded |
| 156 | 168 |
func (rb *repoBuilder) IsLoaded(roleName string) bool {
|
| 157 | 169 |
switch roleName {
|
| ... | ... |
@@ -338,7 +365,7 @@ func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, in |
| 338 | 338 |
return nil, 0, ErrInvalidBuilderInput{msg: "timestamp has already been loaded"}
|
| 339 | 339 |
} |
| 340 | 340 |
|
| 341 |
- // SignTimetamp always serializes the loaded snapshot and signs in the data, so we must always |
|
| 341 |
+ // SignTimestamp always serializes the loaded snapshot and signs in the data, so we must always |
|
| 342 | 342 |
// have the snapshot loaded first |
| 343 | 343 |
if err := rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil {
|
| 344 | 344 |
return nil, 0, err |
| ... | ... |
@@ -411,7 +438,6 @@ func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired boo |
| 411 | 411 |
if err != nil { // this should never happen since the root has been validated
|
| 412 | 412 |
return err |
| 413 | 413 |
} |
| 414 |
- |
|
| 415 | 414 |
rb.repo.Root = signedRoot |
| 416 | 415 |
rb.repo.originalRootRole = rootRole |
| 417 | 416 |
return nil |
| ... | ... |
@@ -524,6 +550,7 @@ func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired |
| 524 | 524 |
} |
| 525 | 525 |
} |
| 526 | 526 |
|
| 527 |
+ signedTargets.Signatures = signedObj.Signatures |
|
| 527 | 528 |
rb.repo.Targets[roleName] = signedTargets |
| 528 | 529 |
return nil |
| 529 | 530 |
} |
| ... | ... |
@@ -534,7 +561,8 @@ func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersio |
| 534 | 534 |
return err |
| 535 | 535 |
} |
| 536 | 536 |
|
| 537 |
- signedObj, err := rb.bytesToSignedAndValidateSigs(delegationRole.BaseRole, content) |
|
| 537 |
+ // bytesToSigned checks checksum |
|
| 538 |
+ signedObj, err := rb.bytesToSigned(content, roleName) |
|
| 538 | 539 |
if err != nil {
|
| 539 | 540 |
return err |
| 540 | 541 |
} |
| ... | ... |
@@ -545,15 +573,24 @@ func (rb *repoBuilder) loadDelegation(roleName string, content []byte, minVersio |
| 545 | 545 |
} |
| 546 | 546 |
|
| 547 | 547 |
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
|
| 548 |
+ // don't capture in invalidRoles because the role we received is a rollback |
|
| 549 |
+ return err |
|
| 550 |
+ } |
|
| 551 |
+ |
|
| 552 |
+ // verify signature |
|
| 553 |
+ if err := signed.VerifySignatures(signedObj, delegationRole.BaseRole); err != nil {
|
|
| 554 |
+ rb.invalidRoles.Targets[roleName] = signedTargets |
|
| 548 | 555 |
return err |
| 549 | 556 |
} |
| 550 | 557 |
|
| 551 | 558 |
if !allowExpired { // check must go at the end because all other validation should pass
|
| 552 | 559 |
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
|
| 560 |
+ rb.invalidRoles.Targets[roleName] = signedTargets |
|
| 553 | 561 |
return err |
| 554 | 562 |
} |
| 555 | 563 |
} |
| 556 | 564 |
|
| 565 |
+ signedTargets.Signatures = signedObj.Signatures |
|
| 557 | 566 |
rb.repo.Targets[roleName] = signedTargets |
| 558 | 567 |
return nil |
| 559 | 568 |
} |
| 560 | 569 |
deleted file mode 100644 |
| ... | ... |
@@ -1,229 +0,0 @@ |
| 1 |
-package client |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "encoding/json" |
|
| 5 |
- |
|
| 6 |
- "github.com/Sirupsen/logrus" |
|
| 7 |
- "github.com/docker/notary" |
|
| 8 |
- tuf "github.com/docker/notary/tuf" |
|
| 9 |
- "github.com/docker/notary/tuf/data" |
|
| 10 |
- "github.com/docker/notary/tuf/store" |
|
| 11 |
-) |
|
| 12 |
- |
|
| 13 |
-// Client is a usability wrapper around a raw TUF repo |
|
| 14 |
-type Client struct {
|
|
| 15 |
- remote store.RemoteStore |
|
| 16 |
- cache store.MetadataStore |
|
| 17 |
- oldBuilder tuf.RepoBuilder |
|
| 18 |
- newBuilder tuf.RepoBuilder |
|
| 19 |
-} |
|
| 20 |
- |
|
| 21 |
-// NewClient initialized a Client with the given repo, remote source of content, and cache |
|
| 22 |
-func NewClient(oldBuilder, newBuilder tuf.RepoBuilder, remote store.RemoteStore, cache store.MetadataStore) *Client {
|
|
| 23 |
- return &Client{
|
|
| 24 |
- oldBuilder: oldBuilder, |
|
| 25 |
- newBuilder: newBuilder, |
|
| 26 |
- remote: remote, |
|
| 27 |
- cache: cache, |
|
| 28 |
- } |
|
| 29 |
-} |
|
| 30 |
- |
|
| 31 |
-// Update performs an update to the TUF repo as defined by the TUF spec |
|
| 32 |
-func (c *Client) Update() (*tuf.Repo, error) {
|
|
| 33 |
- // 1. Get timestamp |
|
| 34 |
- // a. If timestamp error (verification, expired, etc...) download new root and return to 1. |
|
| 35 |
- // 2. Check if local snapshot is up to date |
|
| 36 |
- // a. If out of date, get updated snapshot |
|
| 37 |
- // i. If snapshot error, download new root and return to 1. |
|
| 38 |
- // 3. Check if root correct against snapshot |
|
| 39 |
- // a. If incorrect, download new root and return to 1. |
|
| 40 |
- // 4. Iteratively download and search targets and delegations to find target meta |
|
| 41 |
- logrus.Debug("updating TUF client")
|
|
| 42 |
- err := c.update() |
|
| 43 |
- if err != nil {
|
|
| 44 |
- logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
|
|
| 45 |
- logrus.Debug("Resetting the TUF builder...")
|
|
| 46 |
- |
|
| 47 |
- c.newBuilder = c.newBuilder.BootstrapNewBuilder() |
|
| 48 |
- |
|
| 49 |
- if err := c.downloadRoot(); err != nil {
|
|
| 50 |
- logrus.Debug("Client Update (Root):", err)
|
|
| 51 |
- return nil, err |
|
| 52 |
- } |
|
| 53 |
- // If we error again, we now have the latest root and just want to fail |
|
| 54 |
- // out as there's no expectation the problem can be resolved automatically |
|
| 55 |
- logrus.Debug("retrying TUF client update")
|
|
| 56 |
- if err := c.update(); err != nil {
|
|
| 57 |
- return nil, err |
|
| 58 |
- } |
|
| 59 |
- } |
|
| 60 |
- return c.newBuilder.Finish() |
|
| 61 |
-} |
|
| 62 |
- |
|
| 63 |
-func (c *Client) update() error {
|
|
| 64 |
- if err := c.downloadTimestamp(); err != nil {
|
|
| 65 |
- logrus.Debugf("Client Update (Timestamp): %s", err.Error())
|
|
| 66 |
- return err |
|
| 67 |
- } |
|
| 68 |
- if err := c.downloadSnapshot(); err != nil {
|
|
| 69 |
- logrus.Debugf("Client Update (Snapshot): %s", err.Error())
|
|
| 70 |
- return err |
|
| 71 |
- } |
|
| 72 |
- // will always need top level targets at a minimum |
|
| 73 |
- if err := c.downloadTargets(); err != nil {
|
|
| 74 |
- logrus.Debugf("Client Update (Targets): %s", err.Error())
|
|
| 75 |
- return err |
|
| 76 |
- } |
|
| 77 |
- return nil |
|
| 78 |
-} |
|
| 79 |
- |
|
| 80 |
-// downloadRoot is responsible for downloading the root.json |
|
| 81 |
-func (c *Client) downloadRoot() error {
|
|
| 82 |
- role := data.CanonicalRootRole |
|
| 83 |
- consistentInfo := c.newBuilder.GetConsistentInfo(role) |
|
| 84 |
- |
|
| 85 |
- // We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle |
|
| 86 |
- // since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch |
|
| 87 |
- if !consistentInfo.ChecksumKnown() {
|
|
| 88 |
- logrus.Debugf("Loading root with no expected checksum")
|
|
| 89 |
- |
|
| 90 |
- // get the cached root, if it exists, just for version checking |
|
| 91 |
- cachedRoot, _ := c.cache.GetMeta(role, -1) |
|
| 92 |
- // prefer to download a new root |
|
| 93 |
- _, remoteErr := c.tryLoadRemote(consistentInfo, cachedRoot) |
|
| 94 |
- return remoteErr |
|
| 95 |
- } |
|
| 96 |
- |
|
| 97 |
- _, err := c.tryLoadCacheThenRemote(consistentInfo) |
|
| 98 |
- return err |
|
| 99 |
-} |
|
| 100 |
- |
|
| 101 |
-// downloadTimestamp is responsible for downloading the timestamp.json |
|
| 102 |
-// Timestamps are special in that we ALWAYS attempt to download and only |
|
| 103 |
-// use cache if the download fails (and the cache is still valid). |
|
| 104 |
-func (c *Client) downloadTimestamp() error {
|
|
| 105 |
- logrus.Debug("Loading timestamp...")
|
|
| 106 |
- role := data.CanonicalTimestampRole |
|
| 107 |
- consistentInfo := c.newBuilder.GetConsistentInfo(role) |
|
| 108 |
- |
|
| 109 |
- // get the cached timestamp, if it exists |
|
| 110 |
- cachedTS, cachedErr := c.cache.GetMeta(role, notary.MaxTimestampSize) |
|
| 111 |
- // always get the remote timestamp, since it supercedes the local one |
|
| 112 |
- _, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS) |
|
| 113 |
- |
|
| 114 |
- switch {
|
|
| 115 |
- case remoteErr == nil: |
|
| 116 |
- return nil |
|
| 117 |
- case cachedErr == nil: |
|
| 118 |
- logrus.Debug(remoteErr.Error()) |
|
| 119 |
- logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
|
| 120 |
- |
|
| 121 |
- err := c.newBuilder.Load(role, cachedTS, 1, false) |
|
| 122 |
- if err == nil {
|
|
| 123 |
- logrus.Debug("successfully verified cached timestamp")
|
|
| 124 |
- } |
|
| 125 |
- return err |
|
| 126 |
- default: |
|
| 127 |
- logrus.Debug("no cached or remote timestamp available")
|
|
| 128 |
- return remoteErr |
|
| 129 |
- } |
|
| 130 |
-} |
|
| 131 |
- |
|
| 132 |
-// downloadSnapshot is responsible for downloading the snapshot.json |
|
| 133 |
-func (c *Client) downloadSnapshot() error {
|
|
| 134 |
- logrus.Debug("Loading snapshot...")
|
|
| 135 |
- role := data.CanonicalSnapshotRole |
|
| 136 |
- consistentInfo := c.newBuilder.GetConsistentInfo(role) |
|
| 137 |
- |
|
| 138 |
- _, err := c.tryLoadCacheThenRemote(consistentInfo) |
|
| 139 |
- return err |
|
| 140 |
-} |
|
| 141 |
- |
|
| 142 |
-// downloadTargets downloads all targets and delegated targets for the repository. |
|
| 143 |
-// It uses a pre-order tree traversal as it's necessary to download parents first |
|
| 144 |
-// to obtain the keys to validate children. |
|
| 145 |
-func (c *Client) downloadTargets() error {
|
|
| 146 |
- toDownload := []data.DelegationRole{{
|
|
| 147 |
- BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
|
|
| 148 |
- Paths: []string{""},
|
|
| 149 |
- }} |
|
| 150 |
- for len(toDownload) > 0 {
|
|
| 151 |
- role := toDownload[0] |
|
| 152 |
- toDownload = toDownload[1:] |
|
| 153 |
- |
|
| 154 |
- consistentInfo := c.newBuilder.GetConsistentInfo(role.Name) |
|
| 155 |
- if !consistentInfo.ChecksumKnown() {
|
|
| 156 |
- logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
|
|
| 157 |
- continue |
|
| 158 |
- } |
|
| 159 |
- |
|
| 160 |
- children, err := c.getTargetsFile(role, consistentInfo) |
|
| 161 |
- if err != nil {
|
|
| 162 |
- if _, ok := err.(data.ErrMissingMeta); ok && role.Name != data.CanonicalTargetsRole {
|
|
| 163 |
- // if the role meta hasn't been published, |
|
| 164 |
- // that's ok, continue |
|
| 165 |
- continue |
|
| 166 |
- } |
|
| 167 |
- logrus.Debugf("Error getting %s: %s", role.Name, err)
|
|
| 168 |
- return err |
|
| 169 |
- } |
|
| 170 |
- toDownload = append(children, toDownload...) |
|
| 171 |
- } |
|
| 172 |
- return nil |
|
| 173 |
-} |
|
| 174 |
- |
|
| 175 |
-func (c Client) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
|
|
| 176 |
- logrus.Debugf("Loading %s...", role.Name)
|
|
| 177 |
- tgs := &data.SignedTargets{}
|
|
| 178 |
- |
|
| 179 |
- raw, err := c.tryLoadCacheThenRemote(ci) |
|
| 180 |
- if err != nil {
|
|
| 181 |
- return nil, err |
|
| 182 |
- } |
|
| 183 |
- |
|
| 184 |
- // we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then |
|
| 185 |
- // the raw has already been loaded into the builder |
|
| 186 |
- json.Unmarshal(raw, tgs) |
|
| 187 |
- return tgs.GetValidDelegations(role), nil |
|
| 188 |
-} |
|
| 189 |
- |
|
| 190 |
-func (c *Client) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
|
|
| 191 |
- cachedTS, err := c.cache.GetMeta(consistentInfo.RoleName, consistentInfo.Length()) |
|
| 192 |
- if err != nil {
|
|
| 193 |
- logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
|
|
| 194 |
- return c.tryLoadRemote(consistentInfo, nil) |
|
| 195 |
- } |
|
| 196 |
- |
|
| 197 |
- if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
|
|
| 198 |
- logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
|
|
| 199 |
- return cachedTS, nil |
|
| 200 |
- } |
|
| 201 |
- |
|
| 202 |
- logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
|
|
| 203 |
- return c.tryLoadRemote(consistentInfo, cachedTS) |
|
| 204 |
-} |
|
| 205 |
- |
|
| 206 |
-func (c *Client) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
|
|
| 207 |
- consistentName := consistentInfo.ConsistentName() |
|
| 208 |
- raw, err := c.remote.GetMeta(consistentName, consistentInfo.Length()) |
|
| 209 |
- if err != nil {
|
|
| 210 |
- logrus.Debugf("error downloading %s: %s", consistentName, err)
|
|
| 211 |
- return old, err |
|
| 212 |
- } |
|
| 213 |
- |
|
| 214 |
- // try to load the old data into the old builder - only use it to validate |
|
| 215 |
- // versions if it loads successfully. If it errors, then the loaded version |
|
| 216 |
- // will be 1 |
|
| 217 |
- c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true) |
|
| 218 |
- minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName) |
|
| 219 |
- |
|
| 220 |
- if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
|
|
| 221 |
- logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
|
|
| 222 |
- return raw, err |
|
| 223 |
- } |
|
| 224 |
- logrus.Debugf("successfully verified downloaded %s", consistentName)
|
|
| 225 |
- if err := c.cache.SetMeta(consistentInfo.RoleName, raw); err != nil {
|
|
| 226 |
- logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
|
|
| 227 |
- } |
|
| 228 |
- return raw, nil |
|
| 229 |
-} |
| 230 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,14 +0,0 @@ |
| 1 |
-package client |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
-) |
|
| 6 |
- |
|
| 7 |
-// ErrCorruptedCache - local data is incorrect |
|
| 8 |
-type ErrCorruptedCache struct {
|
|
| 9 |
- file string |
|
| 10 |
-} |
|
| 11 |
- |
|
| 12 |
-func (e ErrCorruptedCache) Error() string {
|
|
| 13 |
- return fmt.Sprintf("cache is corrupted: %s", e.file)
|
|
| 14 |
-} |
| ... | ... |
@@ -42,3 +42,12 @@ func (e ErrMismatchedChecksum) Error() string {
|
| 42 | 42 |
return fmt.Sprintf("%s checksum for %s did not match: expected %s", e.alg, e.name,
|
| 43 | 43 |
e.expected) |
| 44 | 44 |
} |
| 45 |
+ |
|
| 46 |
+// ErrCertExpired is the error to be returned when a certificate has expired |
|
| 47 |
+type ErrCertExpired struct {
|
|
| 48 |
+ CN string |
|
| 49 |
+} |
|
| 50 |
+ |
|
| 51 |
+func (e ErrCertExpired) Error() string {
|
|
| 52 |
+ return fmt.Sprintf("certificate with CN %s is expired", e.CN)
|
|
| 53 |
+} |
| ... | ... |
@@ -86,6 +86,31 @@ func IsDelegation(role string) bool {
|
| 86 | 86 |
isClean |
| 87 | 87 |
} |
| 88 | 88 |
|
| 89 |
+// IsBaseRole checks if the role is a base role |
|
| 90 |
+func IsBaseRole(role string) bool {
|
|
| 91 |
+ for _, baseRole := range BaseRoles {
|
|
| 92 |
+ if role == baseRole {
|
|
| 93 |
+ return true |
|
| 94 |
+ } |
|
| 95 |
+ } |
|
| 96 |
+ return false |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+// IsWildDelegation determines if a role represents a valid wildcard delegation |
|
| 100 |
+// path, i.e. targets/*, targets/foo/*. |
|
| 101 |
+// The wildcard may only appear as the final part of the delegation and must |
|
| 102 |
+// be a whole segment, i.e. targets/foo* is not a valid wildcard delegation. |
|
| 103 |
+func IsWildDelegation(role string) bool {
|
|
| 104 |
+ if path.Clean(role) != role {
|
|
| 105 |
+ return false |
|
| 106 |
+ } |
|
| 107 |
+ base := path.Dir(role) |
|
| 108 |
+ if !(IsDelegation(base) || base == CanonicalTargetsRole) {
|
|
| 109 |
+ return false |
|
| 110 |
+ } |
|
| 111 |
+ return role[len(role)-2:] == "/*" |
|
| 112 |
+} |
|
| 113 |
+ |
|
| 89 | 114 |
// BaseRole is an internal representation of a root/targets/snapshot/timestamp role, with its public keys included |
| 90 | 115 |
type BaseRole struct {
|
| 91 | 116 |
Keys map[string]PublicKey |
| ... | ... |
@@ -107,7 +107,10 @@ func (t *SignedTargets) BuildDelegationRole(roleName string) (DelegationRole, er |
| 107 | 107 |
pubKey, ok := t.Signed.Delegations.Keys[keyID] |
| 108 | 108 |
if !ok {
|
| 109 | 109 |
// Couldn't retrieve all keys, so stop walking and return invalid role |
| 110 |
- return DelegationRole{}, ErrInvalidRole{Role: roleName, Reason: "delegation does not exist with all specified keys"}
|
|
| 110 |
+ return DelegationRole{}, ErrInvalidRole{
|
|
| 111 |
+ Role: roleName, |
|
| 112 |
+ Reason: "role lists unknown key " + keyID + " as a signing key", |
|
| 113 |
+ } |
|
| 111 | 114 |
} |
| 112 | 115 |
pubKeys[keyID] = pubKey |
| 113 | 116 |
} |
| ... | ... |
@@ -111,6 +111,7 @@ type Signature struct {
|
| 111 | 111 |
KeyID string `json:"keyid"` |
| 112 | 112 |
Method SigAlgorithm `json:"method"` |
| 113 | 113 |
Signature []byte `json:"sig"` |
| 114 |
+ IsValid bool `json:"-"` |
|
| 114 | 115 |
} |
| 115 | 116 |
|
| 116 | 117 |
// Files is the map of paths to file meta container in targets and delegations |
| ... | ... |
@@ -161,6 +162,40 @@ func CheckHashes(payload []byte, name string, hashes Hashes) error {
|
| 161 | 161 |
return nil |
| 162 | 162 |
} |
| 163 | 163 |
|
| 164 |
+// CompareMultiHashes verifies that the two Hashes passed in can represent the same data. |
|
| 165 |
+// This means that both maps must have at least one key defined for which they map, and no conflicts. |
|
| 166 |
+// Note that we check the intersection of map keys, which adds support for non-default hash algorithms in notary |
|
| 167 |
+func CompareMultiHashes(hashes1, hashes2 Hashes) error {
|
|
| 168 |
+ // First check if the two hash structures are valid |
|
| 169 |
+ if err := CheckValidHashStructures(hashes1); err != nil {
|
|
| 170 |
+ return err |
|
| 171 |
+ } |
|
| 172 |
+ if err := CheckValidHashStructures(hashes2); err != nil {
|
|
| 173 |
+ return err |
|
| 174 |
+ } |
|
| 175 |
+ // Check if they have at least one matching hash, and no conflicts |
|
| 176 |
+ cnt := 0 |
|
| 177 |
+ for hashAlg, hash1 := range hashes1 {
|
|
| 178 |
+ |
|
| 179 |
+ hash2, ok := hashes2[hashAlg] |
|
| 180 |
+ if !ok {
|
|
| 181 |
+ continue |
|
| 182 |
+ } |
|
| 183 |
+ |
|
| 184 |
+ if subtle.ConstantTimeCompare(hash1[:], hash2[:]) == 0 {
|
|
| 185 |
+ return fmt.Errorf("mismatched %s checksum", hashAlg)
|
|
| 186 |
+ } |
|
| 187 |
+ // If we reached here, we had a match |
|
| 188 |
+ cnt++ |
|
| 189 |
+ } |
|
| 190 |
+ |
|
| 191 |
+ if cnt == 0 {
|
|
| 192 |
+ return fmt.Errorf("at least one matching hash needed")
|
|
| 193 |
+ } |
|
| 194 |
+ |
|
| 195 |
+ return nil |
|
| 196 |
+} |
|
| 197 |
+ |
|
| 164 | 198 |
// CheckValidHashStructures returns an error, or nil, depending on whether |
| 165 | 199 |
// the content of the hashes is valid or not. |
| 166 | 200 |
func CheckValidHashStructures(hashes Hashes) error {
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/notary/trustmanager" |
| 8 | 8 |
"github.com/docker/notary/tuf/data" |
| 9 |
+ "github.com/docker/notary/tuf/utils" |
|
| 9 | 10 |
) |
| 10 | 11 |
|
| 11 | 12 |
type edCryptoKey struct {
|
| ... | ... |
@@ -72,7 +73,7 @@ func (e *Ed25519) Create(role, gun, algorithm string) (data.PublicKey, error) {
|
| 72 | 72 |
return nil, errors.New("only ED25519 supported by this cryptoservice")
|
| 73 | 73 |
} |
| 74 | 74 |
|
| 75 |
- private, err := trustmanager.GenerateED25519Key(rand.Reader) |
|
| 75 |
+ private, err := utils.GenerateED25519Key(rand.Reader) |
|
| 76 | 76 |
if err != nil {
|
| 77 | 77 |
return nil, err |
| 78 | 78 |
} |
| ... | ... |
@@ -95,7 +96,10 @@ func (e *Ed25519) PublicKeys(keyIDs ...string) (map[string]data.PublicKey, error |
| 95 | 95 |
|
| 96 | 96 |
// GetKey returns a single public key based on the ID |
| 97 | 97 |
func (e *Ed25519) GetKey(keyID string) data.PublicKey {
|
| 98 |
- return data.PublicKeyFromPrivate(e.keys[keyID].privKey) |
|
| 98 |
+ if privKey, _, err := e.GetPrivateKey(keyID); err == nil {
|
|
| 99 |
+ return data.PublicKeyFromPrivate(privKey) |
|
| 100 |
+ } |
|
| 101 |
+ return nil |
|
| 99 | 102 |
} |
| 100 | 103 |
|
| 101 | 104 |
// GetPrivateKey returns a single private key and role if present, based on the ID |
| ... | ... |
@@ -14,12 +14,17 @@ type ErrInsufficientSignatures struct {
|
| 14 | 14 |
} |
| 15 | 15 |
|
| 16 | 16 |
func (e ErrInsufficientSignatures) Error() string {
|
| 17 |
- candidates := strings.Join(e.MissingKeyIDs, ", ") |
|
| 17 |
+ candidates := "" |
|
| 18 |
+ if len(e.MissingKeyIDs) > 0 {
|
|
| 19 |
+ candidates = fmt.Sprintf(" (%s)", strings.Join(e.MissingKeyIDs, ", "))
|
|
| 20 |
+ } |
|
| 21 |
+ |
|
| 18 | 22 |
if e.FoundKeys == 0 {
|
| 19 |
- return fmt.Sprintf("signing keys not available, need %d keys out of: %s", e.NeededKeys, candidates)
|
|
| 23 |
+ return fmt.Sprintf("signing keys not available: need %d keys from %d possible keys%s",
|
|
| 24 |
+ e.NeededKeys, len(e.MissingKeyIDs), candidates) |
|
| 20 | 25 |
} |
| 21 |
- return fmt.Sprintf("not enough signing keys: got %d of %d needed keys, other candidates: %s",
|
|
| 22 |
- e.FoundKeys, e.NeededKeys, candidates) |
|
| 26 |
+ return fmt.Sprintf("not enough signing keys: found %d of %d needed keys - %d other possible keys%s",
|
|
| 27 |
+ e.FoundKeys, e.NeededKeys, len(e.MissingKeyIDs), candidates) |
|
| 23 | 28 |
} |
| 24 | 29 |
|
| 25 | 30 |
// ErrExpired indicates a piece of metadata has expired |
| ... | ... |
@@ -100,7 +100,7 @@ func Sign(service CryptoService, s *data.Signed, signingKeys []data.PublicKey, |
| 100 | 100 |
// key is no longer a valid signing key |
| 101 | 101 |
continue |
| 102 | 102 |
} |
| 103 |
- if err := VerifySignature(*s.Signed, sig, k); err != nil {
|
|
| 103 |
+ if err := VerifySignature(*s.Signed, &sig, k); err != nil {
|
|
| 104 | 104 |
// signature is no longer valid |
| 105 | 105 |
continue |
| 106 | 106 |
} |
| ... | ... |
@@ -66,7 +66,8 @@ func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
|
| 66 | 66 |
} |
| 67 | 67 |
|
| 68 | 68 |
valid := make(map[string]struct{})
|
| 69 |
- for _, sig := range s.Signatures {
|
|
| 69 |
+ for i := range s.Signatures {
|
|
| 70 |
+ sig := &(s.Signatures[i]) |
|
| 70 | 71 |
logrus.Debug("verifying signature for key ID: ", sig.KeyID)
|
| 71 | 72 |
key, ok := roleData.Keys[sig.KeyID] |
| 72 | 73 |
if !ok {
|
| ... | ... |
@@ -82,17 +83,20 @@ func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
|
| 82 | 82 |
continue |
| 83 | 83 |
} |
| 84 | 84 |
valid[sig.KeyID] = struct{}{}
|
| 85 |
- |
|
| 86 | 85 |
} |
| 87 | 86 |
if len(valid) < roleData.Threshold {
|
| 88 |
- return ErrRoleThreshold{}
|
|
| 87 |
+ return ErrRoleThreshold{
|
|
| 88 |
+ Msg: fmt.Sprintf("valid signatures did not meet threshold for %s", roleData.Name),
|
|
| 89 |
+ } |
|
| 89 | 90 |
} |
| 90 | 91 |
|
| 91 | 92 |
return nil |
| 92 | 93 |
} |
| 93 | 94 |
|
| 94 | 95 |
// VerifySignature checks a single signature and public key against a payload |
| 95 |
-func VerifySignature(msg []byte, sig data.Signature, pk data.PublicKey) error {
|
|
| 96 |
+// If the signature is verified, the signature's is valid field will actually |
|
| 97 |
+// be mutated to be equal to the boolean true |
|
| 98 |
+func VerifySignature(msg []byte, sig *data.Signature, pk data.PublicKey) error {
|
|
| 96 | 99 |
// method lookup is consistent due to Unmarshal JSON doing lower case for us. |
| 97 | 100 |
method := sig.Method |
| 98 | 101 |
verifier, ok := Verifiers[method] |
| ... | ... |
@@ -103,5 +107,6 @@ func VerifySignature(msg []byte, sig data.Signature, pk data.PublicKey) error {
|
| 103 | 103 |
if err := verifier.Verify(pk, sig.Signature, msg); err != nil {
|
| 104 | 104 |
return fmt.Errorf("signature was invalid\n")
|
| 105 | 105 |
} |
| 106 |
+ sig.IsValid = true |
|
| 106 | 107 |
return nil |
| 107 | 108 |
} |
| 108 | 109 |
deleted file mode 100644 |
| ... | ... |
@@ -1,13 +0,0 @@ |
| 1 |
-package store |
|
| 2 |
- |
|
| 3 |
-import "fmt" |
|
| 4 |
- |
|
| 5 |
-// ErrMetaNotFound indicates we did not find a particular piece |
|
| 6 |
-// of metadata in the store |
|
| 7 |
-type ErrMetaNotFound struct {
|
|
| 8 |
- Resource string |
|
| 9 |
-} |
|
| 10 |
- |
|
| 11 |
-func (err ErrMetaNotFound) Error() string {
|
|
| 12 |
- return fmt.Sprintf("%s trust data unavailable. Has a notary repository been initialized?", err.Resource)
|
|
| 13 |
-} |
| 14 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,102 +0,0 @@ |
| 1 |
-package store |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "github.com/docker/notary" |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- "os" |
|
| 8 |
- "path" |
|
| 9 |
- "path/filepath" |
|
| 10 |
-) |
|
| 11 |
- |
|
| 12 |
-// NewFilesystemStore creates a new store in a directory tree |
|
| 13 |
-func NewFilesystemStore(baseDir, metaSubDir, metaExtension string) (*FilesystemStore, error) {
|
|
| 14 |
- metaDir := path.Join(baseDir, metaSubDir) |
|
| 15 |
- |
|
| 16 |
- // Make sure we can create the necessary dirs and they are writable |
|
| 17 |
- err := os.MkdirAll(metaDir, 0700) |
|
| 18 |
- if err != nil {
|
|
| 19 |
- return nil, err |
|
| 20 |
- } |
|
| 21 |
- |
|
| 22 |
- return &FilesystemStore{
|
|
| 23 |
- baseDir: baseDir, |
|
| 24 |
- metaDir: metaDir, |
|
| 25 |
- metaExtension: metaExtension, |
|
| 26 |
- }, nil |
|
| 27 |
-} |
|
| 28 |
- |
|
| 29 |
-// FilesystemStore is a store in a locally accessible directory |
|
| 30 |
-type FilesystemStore struct {
|
|
| 31 |
- baseDir string |
|
| 32 |
- metaDir string |
|
| 33 |
- metaExtension string |
|
| 34 |
-} |
|
| 35 |
- |
|
| 36 |
-func (f *FilesystemStore) getPath(name string) string {
|
|
| 37 |
- fileName := fmt.Sprintf("%s.%s", name, f.metaExtension)
|
|
| 38 |
- return filepath.Join(f.metaDir, fileName) |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// GetMeta returns the meta for the given name (a role) up to size bytes |
|
| 42 |
-// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a |
|
| 43 |
-// predefined threshold "notary.MaxDownloadSize". |
|
| 44 |
-func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) {
|
|
| 45 |
- meta, err := ioutil.ReadFile(f.getPath(name)) |
|
| 46 |
- if err != nil {
|
|
| 47 |
- if os.IsNotExist(err) {
|
|
| 48 |
- err = ErrMetaNotFound{Resource: name}
|
|
| 49 |
- } |
|
| 50 |
- return nil, err |
|
| 51 |
- } |
|
| 52 |
- if size == NoSizeLimit {
|
|
| 53 |
- size = notary.MaxDownloadSize |
|
| 54 |
- } |
|
| 55 |
- // Only return up to size bytes |
|
| 56 |
- if int64(len(meta)) < size {
|
|
| 57 |
- return meta, nil |
|
| 58 |
- } |
|
| 59 |
- return meta[:size], nil |
|
| 60 |
-} |
|
| 61 |
- |
|
| 62 |
-// SetMultiMeta sets the metadata for multiple roles in one operation |
|
| 63 |
-func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error {
|
|
| 64 |
- for role, blob := range metas {
|
|
| 65 |
- err := f.SetMeta(role, blob) |
|
| 66 |
- if err != nil {
|
|
| 67 |
- return err |
|
| 68 |
- } |
|
| 69 |
- } |
|
| 70 |
- return nil |
|
| 71 |
-} |
|
| 72 |
- |
|
| 73 |
-// SetMeta sets the meta for a single role |
|
| 74 |
-func (f *FilesystemStore) SetMeta(name string, meta []byte) error {
|
|
| 75 |
- fp := f.getPath(name) |
|
| 76 |
- |
|
| 77 |
- // Ensures the parent directories of the file we are about to write exist |
|
| 78 |
- err := os.MkdirAll(filepath.Dir(fp), 0700) |
|
| 79 |
- if err != nil {
|
|
| 80 |
- return err |
|
| 81 |
- } |
|
| 82 |
- |
|
| 83 |
- // if something already exists, just delete it and re-write it |
|
| 84 |
- os.RemoveAll(fp) |
|
| 85 |
- |
|
| 86 |
- // Write the file to disk |
|
| 87 |
- if err = ioutil.WriteFile(fp, meta, 0600); err != nil {
|
|
| 88 |
- return err |
|
| 89 |
- } |
|
| 90 |
- return nil |
|
| 91 |
-} |
|
| 92 |
- |
|
| 93 |
-// RemoveAll clears the existing filestore by removing its base directory |
|
| 94 |
-func (f *FilesystemStore) RemoveAll() error {
|
|
| 95 |
- return os.RemoveAll(f.baseDir) |
|
| 96 |
-} |
|
| 97 |
- |
|
| 98 |
-// RemoveMeta removes the metadata for a single role - if the metadata doesn't |
|
| 99 |
-// exist, no error is returned |
|
| 100 |
-func (f *FilesystemStore) RemoveMeta(name string) error {
|
|
| 101 |
- return os.RemoveAll(f.getPath(name)) // RemoveAll succeeds if path doesn't exist |
|
| 102 |
-} |
| 103 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,297 +0,0 @@ |
| 1 |
-// A Store that can fetch and set metadata on a remote server. |
|
| 2 |
-// Some API constraints: |
|
| 3 |
-// - Response bodies for error codes should be unmarshallable as: |
|
| 4 |
-// {"errors": [{..., "detail": <serialized validation error>}]}
|
|
| 5 |
-// else validation error details, etc. will be unparsable. The errors |
|
| 6 |
-// should have a github.com/docker/notary/tuf/validation/SerializableError |
|
| 7 |
-// in the Details field. |
|
| 8 |
-// If writing your own server, please have a look at |
|
| 9 |
-// github.com/docker/distribution/registry/api/errcode |
|
| 10 |
- |
|
| 11 |
-package store |
|
| 12 |
- |
|
| 13 |
-import ( |
|
| 14 |
- "bytes" |
|
| 15 |
- "encoding/json" |
|
| 16 |
- "errors" |
|
| 17 |
- "fmt" |
|
| 18 |
- "io" |
|
| 19 |
- "io/ioutil" |
|
| 20 |
- "mime/multipart" |
|
| 21 |
- "net/http" |
|
| 22 |
- "net/url" |
|
| 23 |
- "path" |
|
| 24 |
- |
|
| 25 |
- "github.com/Sirupsen/logrus" |
|
| 26 |
- "github.com/docker/notary" |
|
| 27 |
- "github.com/docker/notary/tuf/validation" |
|
| 28 |
-) |
|
| 29 |
- |
|
| 30 |
-// ErrServerUnavailable indicates an error from the server. code allows us to |
|
| 31 |
-// populate the http error we received |
|
| 32 |
-type ErrServerUnavailable struct {
|
|
| 33 |
- code int |
|
| 34 |
-} |
|
| 35 |
- |
|
| 36 |
-func (err ErrServerUnavailable) Error() string {
|
|
| 37 |
- if err.code == 401 {
|
|
| 38 |
- return fmt.Sprintf("you are not authorized to perform this operation: server returned 401.")
|
|
| 39 |
- } |
|
| 40 |
- return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
|
|
| 41 |
-} |
|
| 42 |
- |
|
| 43 |
-// ErrMaliciousServer indicates the server returned a response that is highly suspected |
|
| 44 |
-// of being malicious. i.e. it attempted to send us more data than the known size of a |
|
| 45 |
-// particular role metadata. |
|
| 46 |
-type ErrMaliciousServer struct{}
|
|
| 47 |
- |
|
| 48 |
-func (err ErrMaliciousServer) Error() string {
|
|
| 49 |
- return "trust server returned a bad response." |
|
| 50 |
-} |
|
| 51 |
- |
|
| 52 |
-// ErrInvalidOperation indicates that the server returned a 400 response and |
|
| 53 |
-// propagate any body we received. |
|
| 54 |
-type ErrInvalidOperation struct {
|
|
| 55 |
- msg string |
|
| 56 |
-} |
|
| 57 |
- |
|
| 58 |
-func (err ErrInvalidOperation) Error() string {
|
|
| 59 |
- if err.msg != "" {
|
|
| 60 |
- return fmt.Sprintf("trust server rejected operation: %s", err.msg)
|
|
| 61 |
- } |
|
| 62 |
- return "trust server rejected operation." |
|
| 63 |
-} |
|
| 64 |
- |
|
| 65 |
-// HTTPStore manages pulling and pushing metadata from and to a remote |
|
| 66 |
-// service over HTTP. It assumes the URL structure of the remote service |
|
| 67 |
-// maps identically to the structure of the TUF repo: |
|
| 68 |
-// <baseURL>/<metaPrefix>/(root|targets|snapshot|timestamp).json |
|
| 69 |
-// <baseURL>/<targetsPrefix>/foo.sh |
|
| 70 |
-// |
|
| 71 |
-// If consistent snapshots are disabled, it is advised that caching is not |
|
| 72 |
-// enabled. Simple set a cachePath (and ensure it's writeable) to enable |
|
| 73 |
-// caching. |
|
| 74 |
-type HTTPStore struct {
|
|
| 75 |
- baseURL url.URL |
|
| 76 |
- metaPrefix string |
|
| 77 |
- metaExtension string |
|
| 78 |
- keyExtension string |
|
| 79 |
- roundTrip http.RoundTripper |
|
| 80 |
-} |
|
| 81 |
- |
|
| 82 |
-// NewHTTPStore initializes a new store against a URL and a number of configuration options |
|
| 83 |
-func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) {
|
|
| 84 |
- base, err := url.Parse(baseURL) |
|
| 85 |
- if err != nil {
|
|
| 86 |
- return nil, err |
|
| 87 |
- } |
|
| 88 |
- if !base.IsAbs() {
|
|
| 89 |
- return nil, errors.New("HTTPStore requires an absolute baseURL")
|
|
| 90 |
- } |
|
| 91 |
- if roundTrip == nil {
|
|
| 92 |
- return &OfflineStore{}, nil
|
|
| 93 |
- } |
|
| 94 |
- return &HTTPStore{
|
|
| 95 |
- baseURL: *base, |
|
| 96 |
- metaPrefix: metaPrefix, |
|
| 97 |
- metaExtension: metaExtension, |
|
| 98 |
- keyExtension: keyExtension, |
|
| 99 |
- roundTrip: roundTrip, |
|
| 100 |
- }, nil |
|
| 101 |
-} |
|
| 102 |
- |
|
| 103 |
-func tryUnmarshalError(resp *http.Response, defaultError error) error {
|
|
| 104 |
- bodyBytes, err := ioutil.ReadAll(resp.Body) |
|
| 105 |
- if err != nil {
|
|
| 106 |
- return defaultError |
|
| 107 |
- } |
|
| 108 |
- var parsedErrors struct {
|
|
| 109 |
- Errors []struct {
|
|
| 110 |
- Detail validation.SerializableError `json:"detail"` |
|
| 111 |
- } `json:"errors"` |
|
| 112 |
- } |
|
| 113 |
- if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
|
|
| 114 |
- return defaultError |
|
| 115 |
- } |
|
| 116 |
- if len(parsedErrors.Errors) != 1 {
|
|
| 117 |
- return defaultError |
|
| 118 |
- } |
|
| 119 |
- err = parsedErrors.Errors[0].Detail.Error |
|
| 120 |
- if err == nil {
|
|
| 121 |
- return defaultError |
|
| 122 |
- } |
|
| 123 |
- return err |
|
| 124 |
-} |
|
| 125 |
- |
|
| 126 |
-func translateStatusToError(resp *http.Response, resource string) error {
|
|
| 127 |
- switch resp.StatusCode {
|
|
| 128 |
- case http.StatusOK: |
|
| 129 |
- return nil |
|
| 130 |
- case http.StatusNotFound: |
|
| 131 |
- return ErrMetaNotFound{Resource: resource}
|
|
| 132 |
- case http.StatusBadRequest: |
|
| 133 |
- return tryUnmarshalError(resp, ErrInvalidOperation{})
|
|
| 134 |
- default: |
|
| 135 |
- return ErrServerUnavailable{code: resp.StatusCode}
|
|
| 136 |
- } |
|
| 137 |
-} |
|
| 138 |
- |
|
| 139 |
-// GetMeta downloads the named meta file with the given size. A short body |
|
| 140 |
-// is acceptable because in the case of timestamp.json, the size is a cap, |
|
| 141 |
-// not an exact length. |
|
| 142 |
-// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a |
|
| 143 |
-// predefined threshold "notary.MaxDownloadSize". |
|
| 144 |
-func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) {
|
|
| 145 |
- url, err := s.buildMetaURL(name) |
|
| 146 |
- if err != nil {
|
|
| 147 |
- return nil, err |
|
| 148 |
- } |
|
| 149 |
- req, err := http.NewRequest("GET", url.String(), nil)
|
|
| 150 |
- if err != nil {
|
|
| 151 |
- return nil, err |
|
| 152 |
- } |
|
| 153 |
- resp, err := s.roundTrip.RoundTrip(req) |
|
| 154 |
- if err != nil {
|
|
| 155 |
- return nil, err |
|
| 156 |
- } |
|
| 157 |
- defer resp.Body.Close() |
|
| 158 |
- if err := translateStatusToError(resp, name); err != nil {
|
|
| 159 |
- logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
|
|
| 160 |
- return nil, err |
|
| 161 |
- } |
|
| 162 |
- if size == NoSizeLimit {
|
|
| 163 |
- size = notary.MaxDownloadSize |
|
| 164 |
- } |
|
| 165 |
- if resp.ContentLength > size {
|
|
| 166 |
- return nil, ErrMaliciousServer{}
|
|
| 167 |
- } |
|
| 168 |
- logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
|
|
| 169 |
- b := io.LimitReader(resp.Body, size) |
|
| 170 |
- body, err := ioutil.ReadAll(b) |
|
| 171 |
- if err != nil {
|
|
| 172 |
- return nil, err |
|
| 173 |
- } |
|
| 174 |
- return body, nil |
|
| 175 |
-} |
|
| 176 |
- |
|
| 177 |
-// SetMeta uploads a piece of TUF metadata to the server |
|
| 178 |
-func (s HTTPStore) SetMeta(name string, blob []byte) error {
|
|
| 179 |
- url, err := s.buildMetaURL("")
|
|
| 180 |
- if err != nil {
|
|
| 181 |
- return err |
|
| 182 |
- } |
|
| 183 |
- req, err := http.NewRequest("POST", url.String(), bytes.NewReader(blob))
|
|
| 184 |
- if err != nil {
|
|
| 185 |
- return err |
|
| 186 |
- } |
|
| 187 |
- resp, err := s.roundTrip.RoundTrip(req) |
|
| 188 |
- if err != nil {
|
|
| 189 |
- return err |
|
| 190 |
- } |
|
| 191 |
- defer resp.Body.Close() |
|
| 192 |
- return translateStatusToError(resp, "POST "+name) |
|
| 193 |
-} |
|
| 194 |
- |
|
| 195 |
-// RemoveMeta always fails, because we should never be able to delete metadata |
|
| 196 |
-// remotely |
|
| 197 |
-func (s HTTPStore) RemoveMeta(name string) error {
|
|
| 198 |
- return ErrInvalidOperation{msg: "cannot delete metadata"}
|
|
| 199 |
-} |
|
| 200 |
- |
|
| 201 |
-// NewMultiPartMetaRequest builds a request with the provided metadata updates |
|
| 202 |
-// in multipart form |
|
| 203 |
-func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
|
|
| 204 |
- body := &bytes.Buffer{}
|
|
| 205 |
- writer := multipart.NewWriter(body) |
|
| 206 |
- for role, blob := range metas {
|
|
| 207 |
- part, err := writer.CreateFormFile("files", role)
|
|
| 208 |
- _, err = io.Copy(part, bytes.NewBuffer(blob)) |
|
| 209 |
- if err != nil {
|
|
| 210 |
- return nil, err |
|
| 211 |
- } |
|
| 212 |
- } |
|
| 213 |
- err := writer.Close() |
|
| 214 |
- if err != nil {
|
|
| 215 |
- return nil, err |
|
| 216 |
- } |
|
| 217 |
- req, err := http.NewRequest("POST", url, body)
|
|
| 218 |
- if err != nil {
|
|
| 219 |
- return nil, err |
|
| 220 |
- } |
|
| 221 |
- req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
| 222 |
- return req, nil |
|
| 223 |
-} |
|
| 224 |
- |
|
| 225 |
-// SetMultiMeta does a single batch upload of multiple pieces of TUF metadata. |
|
| 226 |
-// This should be preferred for updating a remote server as it enable the server |
|
| 227 |
-// to remain consistent, either accepting or rejecting the complete update. |
|
| 228 |
-func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error {
|
|
| 229 |
- url, err := s.buildMetaURL("")
|
|
| 230 |
- if err != nil {
|
|
| 231 |
- return err |
|
| 232 |
- } |
|
| 233 |
- req, err := NewMultiPartMetaRequest(url.String(), metas) |
|
| 234 |
- if err != nil {
|
|
| 235 |
- return err |
|
| 236 |
- } |
|
| 237 |
- resp, err := s.roundTrip.RoundTrip(req) |
|
| 238 |
- if err != nil {
|
|
| 239 |
- return err |
|
| 240 |
- } |
|
| 241 |
- defer resp.Body.Close() |
|
| 242 |
- // if this 404's something is pretty wrong |
|
| 243 |
- return translateStatusToError(resp, "POST metadata endpoint") |
|
| 244 |
-} |
|
| 245 |
- |
|
| 246 |
-// RemoveAll in the interface is not supported, admins should use the DeleteHandler endpoint directly to delete remote data for a GUN |
|
| 247 |
-func (s HTTPStore) RemoveAll() error {
|
|
| 248 |
- return errors.New("remove all functionality not supported for HTTPStore")
|
|
| 249 |
-} |
|
| 250 |
- |
|
| 251 |
-func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
|
|
| 252 |
- var filename string |
|
| 253 |
- if name != "" {
|
|
| 254 |
- filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
|
|
| 255 |
- } |
|
| 256 |
- uri := path.Join(s.metaPrefix, filename) |
|
| 257 |
- return s.buildURL(uri) |
|
| 258 |
-} |
|
| 259 |
- |
|
| 260 |
-func (s HTTPStore) buildKeyURL(name string) (*url.URL, error) {
|
|
| 261 |
- filename := fmt.Sprintf("%s.%s", name, s.keyExtension)
|
|
| 262 |
- uri := path.Join(s.metaPrefix, filename) |
|
| 263 |
- return s.buildURL(uri) |
|
| 264 |
-} |
|
| 265 |
- |
|
| 266 |
-func (s HTTPStore) buildURL(uri string) (*url.URL, error) {
|
|
| 267 |
- sub, err := url.Parse(uri) |
|
| 268 |
- if err != nil {
|
|
| 269 |
- return nil, err |
|
| 270 |
- } |
|
| 271 |
- return s.baseURL.ResolveReference(sub), nil |
|
| 272 |
-} |
|
| 273 |
- |
|
| 274 |
-// GetKey retrieves a public key from the remote server |
|
| 275 |
-func (s HTTPStore) GetKey(role string) ([]byte, error) {
|
|
| 276 |
- url, err := s.buildKeyURL(role) |
|
| 277 |
- if err != nil {
|
|
| 278 |
- return nil, err |
|
| 279 |
- } |
|
| 280 |
- req, err := http.NewRequest("GET", url.String(), nil)
|
|
| 281 |
- if err != nil {
|
|
| 282 |
- return nil, err |
|
| 283 |
- } |
|
| 284 |
- resp, err := s.roundTrip.RoundTrip(req) |
|
| 285 |
- if err != nil {
|
|
| 286 |
- return nil, err |
|
| 287 |
- } |
|
| 288 |
- defer resp.Body.Close() |
|
| 289 |
- if err := translateStatusToError(resp, role+" key"); err != nil {
|
|
| 290 |
- return nil, err |
|
| 291 |
- } |
|
| 292 |
- body, err := ioutil.ReadAll(resp.Body) |
|
| 293 |
- if err != nil {
|
|
| 294 |
- return nil, err |
|
| 295 |
- } |
|
| 296 |
- return body, nil |
|
| 297 |
-} |
| 298 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,31 +0,0 @@ |
| 1 |
-package store |
|
| 2 |
- |
|
| 3 |
-// NoSizeLimit is represented as -1 for arguments to GetMeta |
|
| 4 |
-const NoSizeLimit int64 = -1 |
|
| 5 |
- |
|
| 6 |
-// MetadataStore must be implemented by anything that intends to interact |
|
| 7 |
-// with a store of TUF files |
|
| 8 |
-type MetadataStore interface {
|
|
| 9 |
- GetMeta(name string, size int64) ([]byte, error) |
|
| 10 |
- SetMeta(name string, blob []byte) error |
|
| 11 |
- SetMultiMeta(map[string][]byte) error |
|
| 12 |
- RemoveAll() error |
|
| 13 |
- RemoveMeta(name string) error |
|
| 14 |
-} |
|
| 15 |
- |
|
| 16 |
-// PublicKeyStore must be implemented by a key service |
|
| 17 |
-type PublicKeyStore interface {
|
|
| 18 |
- GetKey(role string) ([]byte, error) |
|
| 19 |
-} |
|
| 20 |
- |
|
| 21 |
-// LocalStore represents a local TUF sture |
|
| 22 |
-type LocalStore interface {
|
|
| 23 |
- MetadataStore |
|
| 24 |
-} |
|
| 25 |
- |
|
| 26 |
-// RemoteStore is similar to LocalStore with the added expectation that it should |
|
| 27 |
-// provide a way to download targets once located |
|
| 28 |
-type RemoteStore interface {
|
|
| 29 |
- MetadataStore |
|
| 30 |
- PublicKeyStore |
|
| 31 |
-} |
| 32 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,107 +0,0 @@ |
| 1 |
-package store |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "crypto/sha256" |
|
| 5 |
- "fmt" |
|
| 6 |
- |
|
| 7 |
- "github.com/docker/notary" |
|
| 8 |
- "github.com/docker/notary/tuf/data" |
|
| 9 |
- "github.com/docker/notary/tuf/utils" |
|
| 10 |
-) |
|
| 11 |
- |
|
| 12 |
-// NewMemoryStore returns a MetadataStore that operates entirely in memory. |
|
| 13 |
-// Very useful for testing |
|
| 14 |
-func NewMemoryStore(meta map[string][]byte) *MemoryStore {
|
|
| 15 |
- var consistent = make(map[string][]byte) |
|
| 16 |
- if meta == nil {
|
|
| 17 |
- meta = make(map[string][]byte) |
|
| 18 |
- } else {
|
|
| 19 |
- // add all seed meta to consistent |
|
| 20 |
- for name, data := range meta {
|
|
| 21 |
- checksum := sha256.Sum256(data) |
|
| 22 |
- path := utils.ConsistentName(name, checksum[:]) |
|
| 23 |
- consistent[path] = data |
|
| 24 |
- } |
|
| 25 |
- } |
|
| 26 |
- return &MemoryStore{
|
|
| 27 |
- meta: meta, |
|
| 28 |
- consistent: consistent, |
|
| 29 |
- keys: make(map[string][]data.PrivateKey), |
|
| 30 |
- } |
|
| 31 |
-} |
|
| 32 |
- |
|
| 33 |
-// MemoryStore implements a mock RemoteStore entirely in memory. |
|
| 34 |
-// For testing purposes only. |
|
| 35 |
-type MemoryStore struct {
|
|
| 36 |
- meta map[string][]byte |
|
| 37 |
- consistent map[string][]byte |
|
| 38 |
- keys map[string][]data.PrivateKey |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// GetMeta returns up to size bytes of data references by name. |
|
| 42 |
-// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a |
|
| 43 |
-// predefined threshold "notary.MaxDownloadSize", as we will always know the |
|
| 44 |
-// size for everything but a timestamp and sometimes a root, |
|
| 45 |
-// neither of which should be exceptionally large |
|
| 46 |
-func (m *MemoryStore) GetMeta(name string, size int64) ([]byte, error) {
|
|
| 47 |
- d, ok := m.meta[name] |
|
| 48 |
- if ok {
|
|
| 49 |
- if size == NoSizeLimit {
|
|
| 50 |
- size = notary.MaxDownloadSize |
|
| 51 |
- } |
|
| 52 |
- if int64(len(d)) < size {
|
|
| 53 |
- return d, nil |
|
| 54 |
- } |
|
| 55 |
- return d[:size], nil |
|
| 56 |
- } |
|
| 57 |
- d, ok = m.consistent[name] |
|
| 58 |
- if ok {
|
|
| 59 |
- if int64(len(d)) < size {
|
|
| 60 |
- return d, nil |
|
| 61 |
- } |
|
| 62 |
- return d[:size], nil |
|
| 63 |
- } |
|
| 64 |
- return nil, ErrMetaNotFound{Resource: name}
|
|
| 65 |
-} |
|
| 66 |
- |
|
| 67 |
-// SetMeta sets the metadata value for the given name |
|
| 68 |
-func (m *MemoryStore) SetMeta(name string, meta []byte) error {
|
|
| 69 |
- m.meta[name] = meta |
|
| 70 |
- |
|
| 71 |
- checksum := sha256.Sum256(meta) |
|
| 72 |
- path := utils.ConsistentName(name, checksum[:]) |
|
| 73 |
- m.consistent[path] = meta |
|
| 74 |
- return nil |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-// SetMultiMeta sets multiple pieces of metadata for multiple names |
|
| 78 |
-// in a single operation. |
|
| 79 |
-func (m *MemoryStore) SetMultiMeta(metas map[string][]byte) error {
|
|
| 80 |
- for role, blob := range metas {
|
|
| 81 |
- m.SetMeta(role, blob) |
|
| 82 |
- } |
|
| 83 |
- return nil |
|
| 84 |
-} |
|
| 85 |
- |
|
| 86 |
-// RemoveMeta removes the metadata for a single role - if the metadata doesn't |
|
| 87 |
-// exist, no error is returned |
|
| 88 |
-func (m *MemoryStore) RemoveMeta(name string) error {
|
|
| 89 |
- if meta, ok := m.meta[name]; ok {
|
|
| 90 |
- checksum := sha256.Sum256(meta) |
|
| 91 |
- path := utils.ConsistentName(name, checksum[:]) |
|
| 92 |
- delete(m.meta, name) |
|
| 93 |
- delete(m.consistent, path) |
|
| 94 |
- } |
|
| 95 |
- return nil |
|
| 96 |
-} |
|
| 97 |
- |
|
| 98 |
-// GetKey returns the public key for the given role |
|
| 99 |
-func (m *MemoryStore) GetKey(role string) ([]byte, error) {
|
|
| 100 |
- return nil, fmt.Errorf("GetKey is not implemented for the MemoryStore")
|
|
| 101 |
-} |
|
| 102 |
- |
|
| 103 |
-// RemoveAll clears the existing memory store by setting this store as new empty one |
|
| 104 |
-func (m *MemoryStore) RemoveAll() error {
|
|
| 105 |
- *m = *NewMemoryStore(nil) |
|
| 106 |
- return nil |
|
| 107 |
-} |
| 108 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,53 +0,0 @@ |
| 1 |
-package store |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "io" |
|
| 5 |
-) |
|
| 6 |
- |
|
| 7 |
-// ErrOffline is used to indicate we are operating offline |
|
| 8 |
-type ErrOffline struct{}
|
|
| 9 |
- |
|
| 10 |
-func (e ErrOffline) Error() string {
|
|
| 11 |
- return "client is offline" |
|
| 12 |
-} |
|
| 13 |
- |
|
| 14 |
-var err = ErrOffline{}
|
|
| 15 |
- |
|
| 16 |
-// OfflineStore is to be used as a placeholder for a nil store. It simply |
|
| 17 |
-// returns ErrOffline for every operation |
|
| 18 |
-type OfflineStore struct{}
|
|
| 19 |
- |
|
| 20 |
-// GetMeta returns ErrOffline |
|
| 21 |
-func (es OfflineStore) GetMeta(name string, size int64) ([]byte, error) {
|
|
| 22 |
- return nil, err |
|
| 23 |
-} |
|
| 24 |
- |
|
| 25 |
-// SetMeta returns ErrOffline |
|
| 26 |
-func (es OfflineStore) SetMeta(name string, blob []byte) error {
|
|
| 27 |
- return err |
|
| 28 |
-} |
|
| 29 |
- |
|
| 30 |
-// SetMultiMeta returns ErrOffline |
|
| 31 |
-func (es OfflineStore) SetMultiMeta(map[string][]byte) error {
|
|
| 32 |
- return err |
|
| 33 |
-} |
|
| 34 |
- |
|
| 35 |
-// RemoveMeta returns ErrOffline |
|
| 36 |
-func (es OfflineStore) RemoveMeta(name string) error {
|
|
| 37 |
- return err |
|
| 38 |
-} |
|
| 39 |
- |
|
| 40 |
-// GetKey returns ErrOffline |
|
| 41 |
-func (es OfflineStore) GetKey(role string) ([]byte, error) {
|
|
| 42 |
- return nil, err |
|
| 43 |
-} |
|
| 44 |
- |
|
| 45 |
-// GetTarget returns ErrOffline |
|
| 46 |
-func (es OfflineStore) GetTarget(path string) (io.ReadCloser, error) {
|
|
| 47 |
- return nil, err |
|
| 48 |
-} |
|
| 49 |
- |
|
| 50 |
-// RemoveAll return ErrOffline |
|
| 51 |
-func (es OfflineStore) RemoveAll() error {
|
|
| 52 |
- return err |
|
| 53 |
-} |
| ... | ... |
@@ -77,11 +77,10 @@ type Repo struct {
|
| 77 | 77 |
// If the Repo will only be used for reading, the CryptoService |
| 78 | 78 |
// can be nil. |
| 79 | 79 |
func NewRepo(cryptoService signed.CryptoService) *Repo {
|
| 80 |
- repo := &Repo{
|
|
| 80 |
+ return &Repo{
|
|
| 81 | 81 |
Targets: make(map[string]*data.SignedTargets), |
| 82 | 82 |
cryptoService: cryptoService, |
| 83 | 83 |
} |
| 84 |
- return repo |
|
| 85 | 84 |
} |
| 86 | 85 |
|
| 87 | 86 |
// AddBaseKeys is used to add keys to the role in root.json |
| ... | ... |
@@ -245,6 +244,21 @@ func (tr *Repo) GetDelegationRole(name string) (data.DelegationRole, error) {
|
| 245 | 245 |
if err != nil {
|
| 246 | 246 |
return err |
| 247 | 247 |
} |
| 248 |
+ // Check all public key certificates in the role for expiry |
|
| 249 |
+ // Currently we do not reject expired delegation keys but warn if they might expire soon or have already |
|
| 250 |
+ for keyID, pubKey := range delgRole.Keys {
|
|
| 251 |
+ certFromKey, err := utils.LoadCertFromPEM(pubKey.Public()) |
|
| 252 |
+ if err != nil {
|
|
| 253 |
+ continue |
|
| 254 |
+ } |
|
| 255 |
+ if err := utils.ValidateCertificate(certFromKey, true); err != nil {
|
|
| 256 |
+ if _, ok := err.(data.ErrCertExpired); !ok {
|
|
| 257 |
+ // do not allow other invalid cert errors |
|
| 258 |
+ return err |
|
| 259 |
+ } |
|
| 260 |
+ logrus.Warnf("error with delegation %s key ID %d: %s", delgRole.Name, keyID, err)
|
|
| 261 |
+ } |
|
| 262 |
+ } |
|
| 248 | 263 |
foundRole = &delgRole |
| 249 | 264 |
return StopWalk{}
|
| 250 | 265 |
} |
| ... | ... |
@@ -325,17 +339,16 @@ func delegationUpdateVisitor(roleName string, addKeys data.KeyList, removeKeys, |
| 325 | 325 |
break |
| 326 | 326 |
} |
| 327 | 327 |
} |
| 328 |
- // We didn't find the role earlier, so create it only if we have keys to add |
|
| 328 |
+ // We didn't find the role earlier, so create it. |
|
| 329 |
+ if addKeys == nil {
|
|
| 330 |
+ addKeys = data.KeyList{} // initialize to empty list if necessary so calling .IDs() below won't panic
|
|
| 331 |
+ } |
|
| 329 | 332 |
if delgRole == nil {
|
| 330 |
- if len(addKeys) > 0 {
|
|
| 331 |
- delgRole, err = data.NewRole(roleName, newThreshold, addKeys.IDs(), addPaths) |
|
| 332 |
- if err != nil {
|
|
| 333 |
- return err |
|
| 334 |
- } |
|
| 335 |
- } else {
|
|
| 336 |
- // If we can't find the role and didn't specify keys to add, this is an error |
|
| 337 |
- return data.ErrInvalidRole{Role: roleName, Reason: "cannot create new delegation without keys"}
|
|
| 333 |
+ delgRole, err = data.NewRole(roleName, newThreshold, addKeys.IDs(), addPaths) |
|
| 334 |
+ if err != nil {
|
|
| 335 |
+ return err |
|
| 338 | 336 |
} |
| 337 |
+ |
|
| 339 | 338 |
} |
| 340 | 339 |
// Add the key IDs to the role and the keys themselves to the parent |
| 341 | 340 |
for _, k := range addKeys {
|
| ... | ... |
@@ -345,7 +358,7 @@ func delegationUpdateVisitor(roleName string, addKeys data.KeyList, removeKeys, |
| 345 | 345 |
} |
| 346 | 346 |
// Make sure we have a valid role still |
| 347 | 347 |
if len(delgRole.KeyIDs) < delgRole.Threshold {
|
| 348 |
- return data.ErrInvalidRole{Role: roleName, Reason: "insufficient keys to meet threshold"}
|
|
| 348 |
+ logrus.Warnf("role %s has fewer keys than its threshold of %d; it will not be usable until keys are added to it", delgRole.Name, delgRole.Threshold)
|
|
| 349 | 349 |
} |
| 350 | 350 |
// NOTE: this closure CANNOT error after this point, as we've committed to editing the SignedTargets metadata in the repo object. |
| 351 | 351 |
// Any errors related to updating this delegation must occur before this point. |
| ... | ... |
@@ -392,11 +405,77 @@ func (tr *Repo) UpdateDelegationKeys(roleName string, addKeys data.KeyList, remo |
| 392 | 392 |
// Walk to the parent of this delegation, since that is where its role metadata exists |
| 393 | 393 |
// We do not have to verify that the walker reached its desired role in this scenario |
| 394 | 394 |
// since we've already done another walk to the parent role in VerifyCanSign, and potentially made a targets file |
| 395 |
- err := tr.WalkTargets("", parent, delegationUpdateVisitor(roleName, addKeys, removeKeys, []string{}, []string{}, false, newThreshold))
|
|
| 396 |
- if err != nil {
|
|
| 397 |
- return err |
|
| 395 |
+ return tr.WalkTargets("", parent, delegationUpdateVisitor(roleName, addKeys, removeKeys, []string{}, []string{}, false, newThreshold))
|
|
| 396 |
+} |
|
| 397 |
+ |
|
| 398 |
+// PurgeDelegationKeys removes the provided canonical key IDs from all delegations |
|
| 399 |
+// present in the subtree rooted at role. The role argument must be provided in a wildcard |
|
| 400 |
+// format, i.e. targets/* would remove the key from all delegations in the repo |
|
| 401 |
+func (tr *Repo) PurgeDelegationKeys(role string, removeKeys []string) error {
|
|
| 402 |
+ if !data.IsWildDelegation(role) {
|
|
| 403 |
+ return data.ErrInvalidRole{
|
|
| 404 |
+ Role: role, |
|
| 405 |
+ Reason: "only wildcard roles can be used in a purge", |
|
| 406 |
+ } |
|
| 398 | 407 |
} |
| 399 |
- return nil |
|
| 408 |
+ |
|
| 409 |
+ removeIDs := make(map[string]struct{})
|
|
| 410 |
+ for _, id := range removeKeys {
|
|
| 411 |
+ removeIDs[id] = struct{}{}
|
|
| 412 |
+ } |
|
| 413 |
+ |
|
| 414 |
+ start := path.Dir(role) |
|
| 415 |
+ tufIDToCanon := make(map[string]string) |
|
| 416 |
+ |
|
| 417 |
+ purgeKeys := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
|
| 418 |
+ var ( |
|
| 419 |
+ deleteCandidates []string |
|
| 420 |
+ err error |
|
| 421 |
+ ) |
|
| 422 |
+ for id, key := range tgt.Signed.Delegations.Keys {
|
|
| 423 |
+ var ( |
|
| 424 |
+ canonID string |
|
| 425 |
+ ok bool |
|
| 426 |
+ ) |
|
| 427 |
+ if canonID, ok = tufIDToCanon[id]; !ok {
|
|
| 428 |
+ canonID, err = utils.CanonicalKeyID(key) |
|
| 429 |
+ if err != nil {
|
|
| 430 |
+ return err |
|
| 431 |
+ } |
|
| 432 |
+ tufIDToCanon[id] = canonID |
|
| 433 |
+ } |
|
| 434 |
+ if _, ok := removeIDs[canonID]; ok {
|
|
| 435 |
+ deleteCandidates = append(deleteCandidates, id) |
|
| 436 |
+ } |
|
| 437 |
+ } |
|
| 438 |
+ if len(deleteCandidates) == 0 {
|
|
| 439 |
+ // none of the interesting keys were present. We're done with this role |
|
| 440 |
+ return nil |
|
| 441 |
+ } |
|
| 442 |
+ // now we know there are changes, check if we'll be able to sign them in |
|
| 443 |
+ if err := tr.VerifyCanSign(validRole.Name); err != nil {
|
|
| 444 |
+ logrus.Warnf( |
|
| 445 |
+ "role %s contains keys being purged but you do not have the necessary keys present to sign it; keys will not be purged from %s or its immediate children", |
|
| 446 |
+ validRole.Name, |
|
| 447 |
+ validRole.Name, |
|
| 448 |
+ ) |
|
| 449 |
+ return nil |
|
| 450 |
+ } |
|
| 451 |
+ // we know we can sign in the changes, delete the keys |
|
| 452 |
+ for _, id := range deleteCandidates {
|
|
| 453 |
+ delete(tgt.Signed.Delegations.Keys, id) |
|
| 454 |
+ } |
|
| 455 |
+ // delete candidate keys from all roles. |
|
| 456 |
+ for _, role := range tgt.Signed.Delegations.Roles {
|
|
| 457 |
+ role.RemoveKeys(deleteCandidates) |
|
| 458 |
+ if len(role.KeyIDs) < role.Threshold {
|
|
| 459 |
+ logrus.Warnf("role %s has fewer keys than its threshold of %d; it will not be usable until keys are added to it", role.Name, role.Threshold)
|
|
| 460 |
+ } |
|
| 461 |
+ } |
|
| 462 |
+ tgt.Dirty = true |
|
| 463 |
+ return nil |
|
| 464 |
+ } |
|
| 465 |
+ return tr.WalkTargets("", start, purgeKeys)
|
|
| 400 | 466 |
} |
| 401 | 467 |
|
| 402 | 468 |
// UpdateDelegationPaths updates the appropriate delegation's paths. |
| ... | ... |
@@ -655,7 +734,7 @@ func (tr *Repo) WalkTargets(targetPath, rolePath string, visitTargets walkVisito |
| 655 | 655 |
} |
| 656 | 656 |
|
| 657 | 657 |
// Determine whether to visit this role or not: |
| 658 |
- // If the paths validate against the specified targetPath and the rolePath is empty or is in the subtree |
|
| 658 |
+ // If the paths validate against the specified targetPath and the rolePath is empty or is in the subtree. |
|
| 659 | 659 |
// Also check if we are choosing to skip visiting this role on this walk (see ListTargets and GetTargetByName priority) |
| 660 | 660 |
if isValidPath(targetPath, role) && isAncestorRole(role.Name, rolePath) && !utils.StrSliceContains(skipRoles, role.Name) {
|
| 661 | 661 |
// If we had matching path or role name, visit this target and determine whether or not to keep walking |
| ... | ... |
@@ -948,7 +1027,7 @@ func (tr *Repo) SignTargets(role string, expires time.Time) (*data.Signed, error |
| 948 | 948 |
if _, ok := tr.Targets[role]; !ok {
|
| 949 | 949 |
return nil, data.ErrInvalidRole{
|
| 950 | 950 |
Role: role, |
| 951 |
- Reason: "SignTargets called with non-existant targets role", |
|
| 951 |
+ Reason: "SignTargets called with non-existent targets role", |
|
| 952 | 952 |
} |
| 953 | 953 |
} |
| 954 | 954 |
tr.Targets[role].Signed.Expires = expires |
| 955 | 955 |
deleted file mode 100644 |
| ... | ... |
@@ -1,109 +0,0 @@ |
| 1 |
-package utils |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "crypto/hmac" |
|
| 5 |
- "encoding/hex" |
|
| 6 |
- "errors" |
|
| 7 |
- "fmt" |
|
| 8 |
- gopath "path" |
|
| 9 |
- "path/filepath" |
|
| 10 |
- |
|
| 11 |
- "github.com/docker/notary/trustmanager" |
|
| 12 |
- "github.com/docker/notary/tuf/data" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-// ErrWrongLength indicates the length was different to that expected |
|
| 16 |
-var ErrWrongLength = errors.New("wrong length")
|
|
| 17 |
- |
|
| 18 |
-// ErrWrongHash indicates the hash was different to that expected |
|
| 19 |
-type ErrWrongHash struct {
|
|
| 20 |
- Type string |
|
| 21 |
- Expected []byte |
|
| 22 |
- Actual []byte |
|
| 23 |
-} |
|
| 24 |
- |
|
| 25 |
-// Error implements error interface |
|
| 26 |
-func (e ErrWrongHash) Error() string {
|
|
| 27 |
- return fmt.Sprintf("wrong %s hash, expected %#x got %#x", e.Type, e.Expected, e.Actual)
|
|
| 28 |
-} |
|
| 29 |
- |
|
| 30 |
-// ErrNoCommonHash indicates the metadata did not provide any hashes this |
|
| 31 |
-// client recognizes |
|
| 32 |
-type ErrNoCommonHash struct {
|
|
| 33 |
- Expected data.Hashes |
|
| 34 |
- Actual data.Hashes |
|
| 35 |
-} |
|
| 36 |
- |
|
| 37 |
-// Error implements error interface |
|
| 38 |
-func (e ErrNoCommonHash) Error() string {
|
|
| 39 |
- types := func(a data.Hashes) []string {
|
|
| 40 |
- t := make([]string, 0, len(a)) |
|
| 41 |
- for typ := range a {
|
|
| 42 |
- t = append(t, typ) |
|
| 43 |
- } |
|
| 44 |
- return t |
|
| 45 |
- } |
|
| 46 |
- return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual))
|
|
| 47 |
-} |
|
| 48 |
- |
|
| 49 |
-// ErrUnknownHashAlgorithm - client was ashed to use a hash algorithm |
|
| 50 |
-// it is not familiar with |
|
| 51 |
-type ErrUnknownHashAlgorithm struct {
|
|
| 52 |
- Name string |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-// Error implements error interface |
|
| 56 |
-func (e ErrUnknownHashAlgorithm) Error() string {
|
|
| 57 |
- return fmt.Sprintf("unknown hash algorithm: %s", e.Name)
|
|
| 58 |
-} |
|
| 59 |
- |
|
| 60 |
-// PassphraseFunc type for func that request a passphrase |
|
| 61 |
-type PassphraseFunc func(role string, confirm bool) ([]byte, error) |
|
| 62 |
- |
|
| 63 |
-// FileMetaEqual checks whether 2 FileMeta objects are consistent with eachother |
|
| 64 |
-func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error {
|
|
| 65 |
- if actual.Length != expected.Length {
|
|
| 66 |
- return ErrWrongLength |
|
| 67 |
- } |
|
| 68 |
- hashChecked := false |
|
| 69 |
- for typ, hash := range expected.Hashes {
|
|
| 70 |
- if h, ok := actual.Hashes[typ]; ok {
|
|
| 71 |
- hashChecked = true |
|
| 72 |
- if !hmac.Equal(h, hash) {
|
|
| 73 |
- return ErrWrongHash{typ, hash, h}
|
|
| 74 |
- } |
|
| 75 |
- } |
|
| 76 |
- } |
|
| 77 |
- if !hashChecked {
|
|
| 78 |
- return ErrNoCommonHash{expected.Hashes, actual.Hashes}
|
|
| 79 |
- } |
|
| 80 |
- return nil |
|
| 81 |
-} |
|
| 82 |
- |
|
| 83 |
-// NormalizeTarget adds a slash, if required, to the front of a target path |
|
| 84 |
-func NormalizeTarget(path string) string {
|
|
| 85 |
- return gopath.Join("/", path)
|
|
| 86 |
-} |
|
| 87 |
- |
|
| 88 |
-// HashedPaths prefixes the filename with the known hashes for the file, |
|
| 89 |
-// returning a list of possible consistent paths. |
|
| 90 |
-func HashedPaths(path string, hashes data.Hashes) []string {
|
|
| 91 |
- paths := make([]string, 0, len(hashes)) |
|
| 92 |
- for _, hash := range hashes {
|
|
| 93 |
- hashedPath := filepath.Join(filepath.Dir(path), hex.EncodeToString(hash)+"."+filepath.Base(path)) |
|
| 94 |
- paths = append(paths, hashedPath) |
|
| 95 |
- } |
|
| 96 |
- return paths |
|
| 97 |
-} |
|
| 98 |
- |
|
| 99 |
-// CanonicalKeyID returns the ID of the public bytes version of a TUF key. |
|
| 100 |
-// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA |
|
| 101 |
-// TUF keys, this is the key ID of the public key part of the key in the leaf cert |
|
| 102 |
-func CanonicalKeyID(k data.PublicKey) (string, error) {
|
|
| 103 |
- switch k.Algorithm() {
|
|
| 104 |
- case data.ECDSAx509Key, data.RSAx509Key: |
|
| 105 |
- return trustmanager.X509PublicKeyID(k) |
|
| 106 |
- default: |
|
| 107 |
- return k.ID(), nil |
|
| 108 |
- } |
|
| 109 |
-} |
| 110 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,551 @@ |
| 0 |
+package utils |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "crypto/ecdsa" |
|
| 5 |
+ "crypto/elliptic" |
|
| 6 |
+ "crypto/rand" |
|
| 7 |
+ "crypto/rsa" |
|
| 8 |
+ "crypto/x509" |
|
| 9 |
+ "crypto/x509/pkix" |
|
| 10 |
+ "encoding/pem" |
|
| 11 |
+ "errors" |
|
| 12 |
+ "fmt" |
|
| 13 |
+ "io" |
|
| 14 |
+ "io/ioutil" |
|
| 15 |
+ "math/big" |
|
| 16 |
+ "time" |
|
| 17 |
+ |
|
| 18 |
+ "github.com/Sirupsen/logrus" |
|
| 19 |
+ "github.com/agl/ed25519" |
|
| 20 |
+ "github.com/docker/notary" |
|
| 21 |
+ "github.com/docker/notary/tuf/data" |
|
| 22 |
+) |
|
| 23 |
+ |
|
| 24 |
+// CanonicalKeyID returns the ID of the public bytes version of a TUF key. |
|
| 25 |
+// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA |
|
| 26 |
+// TUF keys, this is the key ID of the public key part of the key in the leaf cert |
|
| 27 |
+func CanonicalKeyID(k data.PublicKey) (string, error) {
|
|
| 28 |
+ switch k.Algorithm() {
|
|
| 29 |
+ case data.ECDSAx509Key, data.RSAx509Key: |
|
| 30 |
+ return X509PublicKeyID(k) |
|
| 31 |
+ default: |
|
| 32 |
+ return k.ID(), nil |
|
| 33 |
+ } |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// LoadCertFromPEM returns the first certificate found in a bunch of bytes or error |
|
| 37 |
+// if nothing is found. Taken from https://golang.org/src/crypto/x509/cert_pool.go#L85. |
|
| 38 |
+func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
|
|
| 39 |
+ for len(pemBytes) > 0 {
|
|
| 40 |
+ var block *pem.Block |
|
| 41 |
+ block, pemBytes = pem.Decode(pemBytes) |
|
| 42 |
+ if block == nil {
|
|
| 43 |
+ return nil, errors.New("no certificates found in PEM data")
|
|
| 44 |
+ } |
|
| 45 |
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
|
| 46 |
+ continue |
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ cert, err := x509.ParseCertificate(block.Bytes) |
|
| 50 |
+ if err != nil {
|
|
| 51 |
+ continue |
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ return cert, nil |
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 57 |
+ return nil, errors.New("no certificates found in PEM data")
|
|
| 58 |
+} |
|
| 59 |
+ |
|
| 60 |
+// X509PublicKeyID returns a public key ID as a string, given a |
|
| 61 |
+// data.PublicKey that contains an X509 Certificate |
|
| 62 |
+func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
|
|
| 63 |
+ // Note that this only loads the first certificate from the public key |
|
| 64 |
+ cert, err := LoadCertFromPEM(certPubKey.Public()) |
|
| 65 |
+ if err != nil {
|
|
| 66 |
+ return "", err |
|
| 67 |
+ } |
|
| 68 |
+ pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey) |
|
| 69 |
+ if err != nil {
|
|
| 70 |
+ return "", err |
|
| 71 |
+ } |
|
| 72 |
+ |
|
| 73 |
+ var key data.PublicKey |
|
| 74 |
+ switch certPubKey.Algorithm() {
|
|
| 75 |
+ case data.ECDSAx509Key: |
|
| 76 |
+ key = data.NewECDSAPublicKey(pubKeyBytes) |
|
| 77 |
+ case data.RSAx509Key: |
|
| 78 |
+ key = data.NewRSAPublicKey(pubKeyBytes) |
|
| 79 |
+ } |
|
| 80 |
+ |
|
| 81 |
+ return key.ID(), nil |
|
| 82 |
+} |
|
| 83 |
+ |
|
| 84 |
+// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It |
|
| 85 |
+// only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted. |
|
| 86 |
+func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
|
|
| 87 |
+ block, _ := pem.Decode(pemBytes) |
|
| 88 |
+ if block == nil {
|
|
| 89 |
+ return nil, errors.New("no valid private key found")
|
|
| 90 |
+ } |
|
| 91 |
+ |
|
| 92 |
+ var privKeyBytes []byte |
|
| 93 |
+ var err error |
|
| 94 |
+ if x509.IsEncryptedPEMBlock(block) {
|
|
| 95 |
+ privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) |
|
| 96 |
+ if err != nil {
|
|
| 97 |
+ return nil, errors.New("could not decrypt private key")
|
|
| 98 |
+ } |
|
| 99 |
+ } else {
|
|
| 100 |
+ privKeyBytes = block.Bytes |
|
| 101 |
+ } |
|
| 102 |
+ |
|
| 103 |
+ switch block.Type {
|
|
| 104 |
+ case "RSA PRIVATE KEY": |
|
| 105 |
+ rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes) |
|
| 106 |
+ if err != nil {
|
|
| 107 |
+ return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
|
|
| 108 |
+ } |
|
| 109 |
+ |
|
| 110 |
+ tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey) |
|
| 111 |
+ if err != nil {
|
|
| 112 |
+ return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
|
|
| 113 |
+ } |
|
| 114 |
+ |
|
| 115 |
+ return tufRSAPrivateKey, nil |
|
| 116 |
+ case "EC PRIVATE KEY": |
|
| 117 |
+ ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes) |
|
| 118 |
+ if err != nil {
|
|
| 119 |
+ return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
|
|
| 120 |
+ } |
|
| 121 |
+ |
|
| 122 |
+ tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey) |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ return tufECDSAPrivateKey, nil |
|
| 128 |
+ case "ED25519 PRIVATE KEY": |
|
| 129 |
+ // We serialize ED25519 keys by concatenating the private key |
|
| 130 |
+ // to the public key and encoding with PEM. See the |
|
| 131 |
+ // ED25519ToPrivateKey function. |
|
| 132 |
+ tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes) |
|
| 133 |
+ if err != nil {
|
|
| 134 |
+ return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
|
| 135 |
+ } |
|
| 136 |
+ |
|
| 137 |
+ return tufECDSAPrivateKey, nil |
|
| 138 |
+ |
|
| 139 |
+ default: |
|
| 140 |
+ return nil, fmt.Errorf("unsupported key type %q", block.Type)
|
|
| 141 |
+ } |
|
| 142 |
+} |
|
| 143 |
+ |
|
| 144 |
+// CertToPEM is a utility function returns a PEM encoded x509 Certificate |
|
| 145 |
+func CertToPEM(cert *x509.Certificate) []byte {
|
|
| 146 |
+ pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
|
| 147 |
+ |
|
| 148 |
+ return pemCert |
|
| 149 |
+} |
|
| 150 |
+ |
|
| 151 |
+// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed |
|
| 152 |
+func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) {
|
|
| 153 |
+ var pemBytes bytes.Buffer |
|
| 154 |
+ for _, cert := range certChain {
|
|
| 155 |
+ if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
|
|
| 156 |
+ return nil, err |
|
| 157 |
+ } |
|
| 158 |
+ } |
|
| 159 |
+ return pemBytes.Bytes(), nil |
|
| 160 |
+} |
|
| 161 |
+ |
|
| 162 |
+// LoadCertFromFile loads the first certificate from the file provided. The |
|
| 163 |
+// data is expected to be PEM Encoded and contain one of more certificates |
|
| 164 |
+// with PEM type "CERTIFICATE" |
|
| 165 |
+func LoadCertFromFile(filename string) (*x509.Certificate, error) {
|
|
| 166 |
+ certs, err := LoadCertBundleFromFile(filename) |
|
| 167 |
+ if err != nil {
|
|
| 168 |
+ return nil, err |
|
| 169 |
+ } |
|
| 170 |
+ return certs[0], nil |
|
| 171 |
+} |
|
| 172 |
+ |
|
| 173 |
+// LoadCertBundleFromFile loads certificates from the []byte provided. The |
|
| 174 |
+// data is expected to be PEM Encoded and contain one of more certificates |
|
| 175 |
+// with PEM type "CERTIFICATE" |
|
| 176 |
+func LoadCertBundleFromFile(filename string) ([]*x509.Certificate, error) {
|
|
| 177 |
+ b, err := ioutil.ReadFile(filename) |
|
| 178 |
+ if err != nil {
|
|
| 179 |
+ return nil, err |
|
| 180 |
+ } |
|
| 181 |
+ |
|
| 182 |
+ return LoadCertBundleFromPEM(b) |
|
| 183 |
+} |
|
| 184 |
+ |
|
| 185 |
+// LoadCertBundleFromPEM loads certificates from the []byte provided. The |
|
| 186 |
+// data is expected to be PEM Encoded and contain one of more certificates |
|
| 187 |
+// with PEM type "CERTIFICATE" |
|
| 188 |
+func LoadCertBundleFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
|
|
| 189 |
+ certificates := []*x509.Certificate{}
|
|
| 190 |
+ var block *pem.Block |
|
| 191 |
+ block, pemBytes = pem.Decode(pemBytes) |
|
| 192 |
+ for ; block != nil; block, pemBytes = pem.Decode(pemBytes) {
|
|
| 193 |
+ if block.Type == "CERTIFICATE" {
|
|
| 194 |
+ cert, err := x509.ParseCertificate(block.Bytes) |
|
| 195 |
+ if err != nil {
|
|
| 196 |
+ return nil, err |
|
| 197 |
+ } |
|
| 198 |
+ certificates = append(certificates, cert) |
|
| 199 |
+ } else {
|
|
| 200 |
+ return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
|
|
| 201 |
+ } |
|
| 202 |
+ } |
|
| 203 |
+ |
|
| 204 |
+ if len(certificates) == 0 {
|
|
| 205 |
+ return nil, fmt.Errorf("no valid certificates found")
|
|
| 206 |
+ } |
|
| 207 |
+ |
|
| 208 |
+ return certificates, nil |
|
| 209 |
+} |
|
| 210 |
+ |
|
| 211 |
+// GetLeafCerts parses a list of x509 Certificates and returns all of them |
|
| 212 |
+// that aren't CA |
|
| 213 |
+func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate {
|
|
| 214 |
+ var leafCerts []*x509.Certificate |
|
| 215 |
+ for _, cert := range certs {
|
|
| 216 |
+ if cert.IsCA {
|
|
| 217 |
+ continue |
|
| 218 |
+ } |
|
| 219 |
+ leafCerts = append(leafCerts, cert) |
|
| 220 |
+ } |
|
| 221 |
+ return leafCerts |
|
| 222 |
+} |
|
| 223 |
+ |
|
| 224 |
+// GetIntermediateCerts parses a list of x509 Certificates and returns all of the |
|
| 225 |
+// ones marked as a CA, to be used as intermediates |
|
| 226 |
+func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate {
|
|
| 227 |
+ var intCerts []*x509.Certificate |
|
| 228 |
+ for _, cert := range certs {
|
|
| 229 |
+ if cert.IsCA {
|
|
| 230 |
+ intCerts = append(intCerts, cert) |
|
| 231 |
+ } |
|
| 232 |
+ } |
|
| 233 |
+ return intCerts |
|
| 234 |
+} |
|
| 235 |
+ |
|
| 236 |
+// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate. |
|
| 237 |
+func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
|
|
| 238 |
+ pemBlock, _ := pem.Decode(pubKeyBytes) |
|
| 239 |
+ if pemBlock == nil {
|
|
| 240 |
+ return nil, errors.New("no valid public key found")
|
|
| 241 |
+ } |
|
| 242 |
+ |
|
| 243 |
+ switch pemBlock.Type {
|
|
| 244 |
+ case "CERTIFICATE": |
|
| 245 |
+ cert, err := x509.ParseCertificate(pemBlock.Bytes) |
|
| 246 |
+ if err != nil {
|
|
| 247 |
+ return nil, fmt.Errorf("could not parse provided certificate: %v", err)
|
|
| 248 |
+ } |
|
| 249 |
+ err = ValidateCertificate(cert, true) |
|
| 250 |
+ if err != nil {
|
|
| 251 |
+ return nil, fmt.Errorf("invalid certificate: %v", err)
|
|
| 252 |
+ } |
|
| 253 |
+ return CertToKey(cert), nil |
|
| 254 |
+ default: |
|
| 255 |
+ return nil, fmt.Errorf("unsupported PEM block type %q, expected certificate", pemBlock.Type)
|
|
| 256 |
+ } |
|
| 257 |
+} |
|
| 258 |
+ |
|
| 259 |
+// ValidateCertificate returns an error if the certificate is not valid for notary |
|
| 260 |
+// Currently this is only ensuring the public key has a large enough modulus if RSA, |
|
| 261 |
+// using a non SHA1 signature algorithm, and an optional time expiry check |
|
| 262 |
+func ValidateCertificate(c *x509.Certificate, checkExpiry bool) error {
|
|
| 263 |
+ if (c.NotBefore).After(c.NotAfter) {
|
|
| 264 |
+ return fmt.Errorf("certificate validity window is invalid")
|
|
| 265 |
+ } |
|
| 266 |
+ // Can't have SHA1 sig algorithm |
|
| 267 |
+ if c.SignatureAlgorithm == x509.SHA1WithRSA || c.SignatureAlgorithm == x509.DSAWithSHA1 || c.SignatureAlgorithm == x509.ECDSAWithSHA1 {
|
|
| 268 |
+ return fmt.Errorf("certificate with CN %s uses invalid SHA1 signature algorithm", c.Subject.CommonName)
|
|
| 269 |
+ } |
|
| 270 |
+ // If we have an RSA key, make sure it's long enough |
|
| 271 |
+ if c.PublicKeyAlgorithm == x509.RSA {
|
|
| 272 |
+ rsaKey, ok := c.PublicKey.(*rsa.PublicKey) |
|
| 273 |
+ if !ok {
|
|
| 274 |
+ return fmt.Errorf("unable to parse RSA public key")
|
|
| 275 |
+ } |
|
| 276 |
+ if rsaKey.N.BitLen() < notary.MinRSABitSize {
|
|
| 277 |
+ return fmt.Errorf("RSA bit length is too short")
|
|
| 278 |
+ } |
|
| 279 |
+ } |
|
| 280 |
+ if checkExpiry {
|
|
| 281 |
+ now := time.Now() |
|
| 282 |
+ tomorrow := now.AddDate(0, 0, 1) |
|
| 283 |
+ // Give one day leeway on creation "before" time, check "after" against today |
|
| 284 |
+ if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
|
|
| 285 |
+ return data.ErrCertExpired{CN: c.Subject.CommonName}
|
|
| 286 |
+ } |
|
| 287 |
+ // If this certificate is expiring within 6 months, put out a warning |
|
| 288 |
+ if (c.NotAfter).Before(time.Now().AddDate(0, 6, 0)) {
|
|
| 289 |
+ logrus.Warnf("certificate with CN %s is near expiry", c.Subject.CommonName)
|
|
| 290 |
+ } |
|
| 291 |
+ } |
|
| 292 |
+ return nil |
|
| 293 |
+} |
|
| 294 |
+ |
|
| 295 |
+// GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey |
|
| 296 |
+func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) {
|
|
| 297 |
+ rsaPrivKey, err := rsa.GenerateKey(random, bits) |
|
| 298 |
+ if err != nil {
|
|
| 299 |
+ return nil, fmt.Errorf("could not generate private key: %v", err)
|
|
| 300 |
+ } |
|
| 301 |
+ |
|
| 302 |
+ tufPrivKey, err := RSAToPrivateKey(rsaPrivKey) |
|
| 303 |
+ if err != nil {
|
|
| 304 |
+ return nil, err |
|
| 305 |
+ } |
|
| 306 |
+ |
|
| 307 |
+ logrus.Debugf("generated RSA key with keyID: %s", tufPrivKey.ID())
|
|
| 308 |
+ |
|
| 309 |
+ return tufPrivKey, nil |
|
| 310 |
+} |
|
| 311 |
+ |
|
| 312 |
+// RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type |
|
| 313 |
+func RSAToPrivateKey(rsaPrivKey *rsa.PrivateKey) (data.PrivateKey, error) {
|
|
| 314 |
+ // Get a DER-encoded representation of the PublicKey |
|
| 315 |
+ rsaPubBytes, err := x509.MarshalPKIXPublicKey(&rsaPrivKey.PublicKey) |
|
| 316 |
+ if err != nil {
|
|
| 317 |
+ return nil, fmt.Errorf("failed to marshal public key: %v", err)
|
|
| 318 |
+ } |
|
| 319 |
+ |
|
| 320 |
+ // Get a DER-encoded representation of the PrivateKey |
|
| 321 |
+ rsaPrivBytes := x509.MarshalPKCS1PrivateKey(rsaPrivKey) |
|
| 322 |
+ |
|
| 323 |
+ pubKey := data.NewRSAPublicKey(rsaPubBytes) |
|
| 324 |
+ return data.NewRSAPrivateKey(pubKey, rsaPrivBytes) |
|
| 325 |
+} |
|
| 326 |
+ |
|
| 327 |
+// GenerateECDSAKey generates an ECDSA Private key and returns a TUF PrivateKey |
|
| 328 |
+func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) {
|
|
| 329 |
+ ecdsaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), random) |
|
| 330 |
+ if err != nil {
|
|
| 331 |
+ return nil, err |
|
| 332 |
+ } |
|
| 333 |
+ |
|
| 334 |
+ tufPrivKey, err := ECDSAToPrivateKey(ecdsaPrivKey) |
|
| 335 |
+ if err != nil {
|
|
| 336 |
+ return nil, err |
|
| 337 |
+ } |
|
| 338 |
+ |
|
| 339 |
+ logrus.Debugf("generated ECDSA key with keyID: %s", tufPrivKey.ID())
|
|
| 340 |
+ |
|
| 341 |
+ return tufPrivKey, nil |
|
| 342 |
+} |
|
| 343 |
+ |
|
| 344 |
+// GenerateED25519Key generates an ED25519 private key and returns a TUF |
|
| 345 |
+// PrivateKey. The serialization format we use is just the public key bytes |
|
| 346 |
+// followed by the private key bytes |
|
| 347 |
+func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) {
|
|
| 348 |
+ pub, priv, err := ed25519.GenerateKey(random) |
|
| 349 |
+ if err != nil {
|
|
| 350 |
+ return nil, err |
|
| 351 |
+ } |
|
| 352 |
+ |
|
| 353 |
+ var serialized [ed25519.PublicKeySize + ed25519.PrivateKeySize]byte |
|
| 354 |
+ copy(serialized[:], pub[:]) |
|
| 355 |
+ copy(serialized[ed25519.PublicKeySize:], priv[:]) |
|
| 356 |
+ |
|
| 357 |
+ tufPrivKey, err := ED25519ToPrivateKey(serialized[:]) |
|
| 358 |
+ if err != nil {
|
|
| 359 |
+ return nil, err |
|
| 360 |
+ } |
|
| 361 |
+ |
|
| 362 |
+ logrus.Debugf("generated ED25519 key with keyID: %s", tufPrivKey.ID())
|
|
| 363 |
+ |
|
| 364 |
+ return tufPrivKey, nil |
|
| 365 |
+} |
|
| 366 |
+ |
|
| 367 |
+// ECDSAToPrivateKey converts an ecdsa.Private key to a TUF data.PrivateKey type |
|
| 368 |
+func ECDSAToPrivateKey(ecdsaPrivKey *ecdsa.PrivateKey) (data.PrivateKey, error) {
|
|
| 369 |
+ // Get a DER-encoded representation of the PublicKey |
|
| 370 |
+ ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey) |
|
| 371 |
+ if err != nil {
|
|
| 372 |
+ return nil, fmt.Errorf("failed to marshal public key: %v", err)
|
|
| 373 |
+ } |
|
| 374 |
+ |
|
| 375 |
+ // Get a DER-encoded representation of the PrivateKey |
|
| 376 |
+ ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey) |
|
| 377 |
+ if err != nil {
|
|
| 378 |
+ return nil, fmt.Errorf("failed to marshal private key: %v", err)
|
|
| 379 |
+ } |
|
| 380 |
+ |
|
| 381 |
+ pubKey := data.NewECDSAPublicKey(ecdsaPubBytes) |
|
| 382 |
+ return data.NewECDSAPrivateKey(pubKey, ecdsaPrivKeyBytes) |
|
| 383 |
+} |
|
| 384 |
+ |
|
| 385 |
+// ED25519ToPrivateKey converts a serialized ED25519 key to a TUF |
|
| 386 |
+// data.PrivateKey type |
|
| 387 |
+func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) {
|
|
| 388 |
+ if len(privKeyBytes) != ed25519.PublicKeySize+ed25519.PrivateKeySize {
|
|
| 389 |
+ return nil, errors.New("malformed ed25519 private key")
|
|
| 390 |
+ } |
|
| 391 |
+ |
|
| 392 |
+ pubKey := data.NewED25519PublicKey(privKeyBytes[:ed25519.PublicKeySize]) |
|
| 393 |
+ return data.NewED25519PrivateKey(*pubKey, privKeyBytes) |
|
| 394 |
+} |
|
| 395 |
+ |
|
| 396 |
+func blockType(k data.PrivateKey) (string, error) {
|
|
| 397 |
+ switch k.Algorithm() {
|
|
| 398 |
+ case data.RSAKey, data.RSAx509Key: |
|
| 399 |
+ return "RSA PRIVATE KEY", nil |
|
| 400 |
+ case data.ECDSAKey, data.ECDSAx509Key: |
|
| 401 |
+ return "EC PRIVATE KEY", nil |
|
| 402 |
+ case data.ED25519Key: |
|
| 403 |
+ return "ED25519 PRIVATE KEY", nil |
|
| 404 |
+ default: |
|
| 405 |
+ return "", fmt.Errorf("algorithm %s not supported", k.Algorithm())
|
|
| 406 |
+ } |
|
| 407 |
+} |
|
| 408 |
+ |
|
| 409 |
+// KeyToPEM returns a PEM encoded key from a Private Key |
|
| 410 |
+func KeyToPEM(privKey data.PrivateKey, role string) ([]byte, error) {
|
|
| 411 |
+ bt, err := blockType(privKey) |
|
| 412 |
+ if err != nil {
|
|
| 413 |
+ return nil, err |
|
| 414 |
+ } |
|
| 415 |
+ |
|
| 416 |
+ headers := map[string]string{}
|
|
| 417 |
+ if role != "" {
|
|
| 418 |
+ headers = map[string]string{
|
|
| 419 |
+ "role": role, |
|
| 420 |
+ } |
|
| 421 |
+ } |
|
| 422 |
+ |
|
| 423 |
+ block := &pem.Block{
|
|
| 424 |
+ Type: bt, |
|
| 425 |
+ Headers: headers, |
|
| 426 |
+ Bytes: privKey.Private(), |
|
| 427 |
+ } |
|
| 428 |
+ |
|
| 429 |
+ return pem.EncodeToMemory(block), nil |
|
| 430 |
+} |
|
| 431 |
+ |
|
| 432 |
+// EncryptPrivateKey returns an encrypted PEM key given a Privatekey |
|
| 433 |
+// and a passphrase |
|
| 434 |
+func EncryptPrivateKey(key data.PrivateKey, role, gun, passphrase string) ([]byte, error) {
|
|
| 435 |
+ bt, err := blockType(key) |
|
| 436 |
+ if err != nil {
|
|
| 437 |
+ return nil, err |
|
| 438 |
+ } |
|
| 439 |
+ |
|
| 440 |
+ password := []byte(passphrase) |
|
| 441 |
+ cipherType := x509.PEMCipherAES256 |
|
| 442 |
+ |
|
| 443 |
+ encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader, |
|
| 444 |
+ bt, |
|
| 445 |
+ key.Private(), |
|
| 446 |
+ password, |
|
| 447 |
+ cipherType) |
|
| 448 |
+ if err != nil {
|
|
| 449 |
+ return nil, err |
|
| 450 |
+ } |
|
| 451 |
+ |
|
| 452 |
+ if encryptedPEMBlock.Headers == nil {
|
|
| 453 |
+ return nil, fmt.Errorf("unable to encrypt key - invalid PEM file produced")
|
|
| 454 |
+ } |
|
| 455 |
+ encryptedPEMBlock.Headers["role"] = role |
|
| 456 |
+ |
|
| 457 |
+ if gun != "" {
|
|
| 458 |
+ encryptedPEMBlock.Headers["gun"] = gun |
|
| 459 |
+ } |
|
| 460 |
+ |
|
| 461 |
+ return pem.EncodeToMemory(encryptedPEMBlock), nil |
|
| 462 |
+} |
|
| 463 |
+ |
|
| 464 |
+// ReadRoleFromPEM returns the value from the role PEM header, if it exists |
|
| 465 |
+func ReadRoleFromPEM(pemBytes []byte) string {
|
|
| 466 |
+ pemBlock, _ := pem.Decode(pemBytes) |
|
| 467 |
+ if pemBlock == nil || pemBlock.Headers == nil {
|
|
| 468 |
+ return "" |
|
| 469 |
+ } |
|
| 470 |
+ role, ok := pemBlock.Headers["role"] |
|
| 471 |
+ if !ok {
|
|
| 472 |
+ return "" |
|
| 473 |
+ } |
|
| 474 |
+ return role |
|
| 475 |
+} |
|
| 476 |
+ |
|
| 477 |
+// CertToKey transforms a single input certificate into its corresponding |
|
| 478 |
+// PublicKey |
|
| 479 |
+func CertToKey(cert *x509.Certificate) data.PublicKey {
|
|
| 480 |
+ block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
|
|
| 481 |
+ pemdata := pem.EncodeToMemory(&block) |
|
| 482 |
+ |
|
| 483 |
+ switch cert.PublicKeyAlgorithm {
|
|
| 484 |
+ case x509.RSA: |
|
| 485 |
+ return data.NewRSAx509PublicKey(pemdata) |
|
| 486 |
+ case x509.ECDSA: |
|
| 487 |
+ return data.NewECDSAx509PublicKey(pemdata) |
|
| 488 |
+ default: |
|
| 489 |
+ logrus.Debugf("Unknown key type parsed from certificate: %v", cert.PublicKeyAlgorithm)
|
|
| 490 |
+ return nil |
|
| 491 |
+ } |
|
| 492 |
+} |
|
| 493 |
+ |
|
| 494 |
+// CertsToKeys transforms each of the input certificate chains into its corresponding |
|
| 495 |
+// PublicKey |
|
| 496 |
+func CertsToKeys(leafCerts map[string]*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
|
|
| 497 |
+ keys := make(map[string]data.PublicKey) |
|
| 498 |
+ for id, leafCert := range leafCerts {
|
|
| 499 |
+ if key, err := CertBundleToKey(leafCert, intCerts[id]); err == nil {
|
|
| 500 |
+ keys[key.ID()] = key |
|
| 501 |
+ } |
|
| 502 |
+ } |
|
| 503 |
+ return keys |
|
| 504 |
+} |
|
| 505 |
+ |
|
| 506 |
+// CertBundleToKey creates a TUF key from a leaf certs and a list of |
|
| 507 |
+// intermediates |
|
| 508 |
+func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) (data.PublicKey, error) {
|
|
| 509 |
+ certBundle := []*x509.Certificate{leafCert}
|
|
| 510 |
+ certBundle = append(certBundle, intCerts...) |
|
| 511 |
+ certChainPEM, err := CertChainToPEM(certBundle) |
|
| 512 |
+ if err != nil {
|
|
| 513 |
+ return nil, err |
|
| 514 |
+ } |
|
| 515 |
+ var newKey data.PublicKey |
|
| 516 |
+ // Use the leaf cert's public key algorithm for typing |
|
| 517 |
+ switch leafCert.PublicKeyAlgorithm {
|
|
| 518 |
+ case x509.RSA: |
|
| 519 |
+ newKey = data.NewRSAx509PublicKey(certChainPEM) |
|
| 520 |
+ case x509.ECDSA: |
|
| 521 |
+ newKey = data.NewECDSAx509PublicKey(certChainPEM) |
|
| 522 |
+ default: |
|
| 523 |
+ logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
|
|
| 524 |
+ return nil, x509.ErrUnsupportedAlgorithm |
|
| 525 |
+ } |
|
| 526 |
+ return newKey, nil |
|
| 527 |
+} |
|
| 528 |
+ |
|
| 529 |
+// NewCertificate returns an X509 Certificate following a template, given a GUN and validity interval. |
|
| 530 |
+func NewCertificate(gun string, startTime, endTime time.Time) (*x509.Certificate, error) {
|
|
| 531 |
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) |
|
| 532 |
+ |
|
| 533 |
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) |
|
| 534 |
+ if err != nil {
|
|
| 535 |
+ return nil, fmt.Errorf("failed to generate new certificate: %v", err)
|
|
| 536 |
+ } |
|
| 537 |
+ |
|
| 538 |
+ return &x509.Certificate{
|
|
| 539 |
+ SerialNumber: serialNumber, |
|
| 540 |
+ Subject: pkix.Name{
|
|
| 541 |
+ CommonName: gun, |
|
| 542 |
+ }, |
|
| 543 |
+ NotBefore: startTime, |
|
| 544 |
+ NotAfter: endTime, |
|
| 545 |
+ |
|
| 546 |
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, |
|
| 547 |
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
|
|
| 548 |
+ BasicConstraintsValid: true, |
|
| 549 |
+ }, nil |
|
| 550 |
+} |