Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
| ... | ... |
@@ -3,11 +3,13 @@ package build // import "github.com/docker/docker/api/server/backend/build" |
| 3 | 3 |
import ( |
| 4 | 4 |
"context" |
| 5 | 5 |
"fmt" |
| 6 |
+ "strings" |
|
| 6 | 7 |
|
| 7 | 8 |
"github.com/docker/distribution/reference" |
| 8 | 9 |
"github.com/docker/docker/api/types" |
| 9 | 10 |
"github.com/docker/docker/api/types/backend" |
| 10 | 11 |
"github.com/docker/docker/builder" |
| 12 |
+ buildkit "github.com/docker/docker/builder/builder-next" |
|
| 11 | 13 |
"github.com/docker/docker/builder/fscache" |
| 12 | 14 |
"github.com/docker/docker/image" |
| 13 | 15 |
"github.com/docker/docker/pkg/stringid" |
| ... | ... |
@@ -30,24 +32,39 @@ type Backend struct {
|
| 30 | 30 |
builder Builder |
| 31 | 31 |
fsCache *fscache.FSCache |
| 32 | 32 |
imageComponent ImageComponent |
| 33 |
+ buildkit *buildkit.Builder |
|
| 33 | 34 |
} |
| 34 | 35 |
|
| 35 | 36 |
// NewBackend creates a new build backend from components |
| 36 |
-func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) {
|
|
| 37 |
- return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil
|
|
| 37 |
+func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache, buildkit *buildkit.Builder) (*Backend, error) {
|
|
| 38 |
+ return &Backend{imageComponent: components, builder: builder, fsCache: fsCache, buildkit: buildkit}, nil
|
|
| 38 | 39 |
} |
| 39 | 40 |
|
| 40 | 41 |
// Build builds an image from a Source |
| 41 | 42 |
func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) {
|
| 42 | 43 |
options := config.Options |
| 44 |
+ useBuildKit := false |
|
| 45 |
+ if strings.HasPrefix(options.SessionID, "buildkit:") {
|
|
| 46 |
+ useBuildKit = true |
|
| 47 |
+ options.SessionID = strings.TrimPrefix(options.SessionID, "buildkit:") |
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 43 | 50 |
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags) |
| 44 | 51 |
if err != nil {
|
| 45 | 52 |
return "", err |
| 46 | 53 |
} |
| 47 | 54 |
|
| 48 |
- build, err := b.builder.Build(ctx, config) |
|
| 49 |
- if err != nil {
|
|
| 50 |
- return "", err |
|
| 55 |
+ var build *builder.Result |
|
| 56 |
+ if useBuildKit {
|
|
| 57 |
+ build, err = b.buildkit.Build(ctx, config) |
|
| 58 |
+ if err != nil {
|
|
| 59 |
+ return "", err |
|
| 60 |
+ } |
|
| 61 |
+ } else {
|
|
| 62 |
+ build, err = b.builder.Build(ctx, config) |
|
| 63 |
+ if err != nil {
|
|
| 64 |
+ return "", err |
|
| 65 |
+ } |
|
| 51 | 66 |
} |
| 52 | 67 |
|
| 53 | 68 |
var imageID = build.ImageID |
| 54 | 69 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,416 @@ |
| 0 |
+package buildkit |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "context" |
|
| 4 |
+ "encoding/json" |
|
| 5 |
+ "io" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "sync" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/containerd/containerd/content" |
|
| 11 |
+ "github.com/containerd/containerd/content/local" |
|
| 12 |
+ "github.com/docker/docker/api/types/backend" |
|
| 13 |
+ "github.com/docker/docker/builder" |
|
| 14 |
+ "github.com/docker/docker/builder/builder-next/containerimage" |
|
| 15 |
+ containerimageexp "github.com/docker/docker/builder/builder-next/exporter" |
|
| 16 |
+ "github.com/docker/docker/builder/builder-next/snapshot" |
|
| 17 |
+ mobyworker "github.com/docker/docker/builder/builder-next/worker" |
|
| 18 |
+ "github.com/docker/docker/daemon/graphdriver" |
|
| 19 |
+ "github.com/docker/docker/daemon/images" |
|
| 20 |
+ "github.com/docker/docker/pkg/jsonmessage" |
|
| 21 |
+ controlapi "github.com/moby/buildkit/api/services/control" |
|
| 22 |
+ "github.com/moby/buildkit/cache" |
|
| 23 |
+ "github.com/moby/buildkit/cache/metadata" |
|
| 24 |
+ "github.com/moby/buildkit/control" |
|
| 25 |
+ "github.com/moby/buildkit/executor/runcexecutor" |
|
| 26 |
+ "github.com/moby/buildkit/exporter" |
|
| 27 |
+ "github.com/moby/buildkit/frontend" |
|
| 28 |
+ "github.com/moby/buildkit/frontend/dockerfile" |
|
| 29 |
+ "github.com/moby/buildkit/identity" |
|
| 30 |
+ "github.com/moby/buildkit/session" |
|
| 31 |
+ "github.com/moby/buildkit/snapshot/blobmapping" |
|
| 32 |
+ "github.com/moby/buildkit/solver-next/boltdbcachestorage" |
|
| 33 |
+ "github.com/moby/buildkit/worker" |
|
| 34 |
+ "github.com/pkg/errors" |
|
| 35 |
+ "github.com/sirupsen/logrus" |
|
| 36 |
+ netcontext "golang.org/x/net/context" |
|
| 37 |
+ "golang.org/x/sync/errgroup" |
|
| 38 |
+ grpcmetadata "google.golang.org/grpc/metadata" |
|
| 39 |
+) |
|
| 40 |
+ |
|
| 41 |
+// Builder defines interface for running a build |
|
| 42 |
+// type Builder interface {
|
|
| 43 |
+// Build(context.Context, backend.BuildConfig) (*builder.Result, error) |
|
| 44 |
+// } |
|
| 45 |
+ |
|
| 46 |
+// Result is the output produced by a Builder |
|
| 47 |
+// type Result struct {
|
|
| 48 |
+// ImageID string |
|
| 49 |
+// // FromImage Image |
|
| 50 |
+// } |
|
| 51 |
+ |
|
| 52 |
+type Opt struct {
|
|
| 53 |
+ SessionManager *session.Manager |
|
| 54 |
+ Root string |
|
| 55 |
+ Dist images.DistributionServices |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+type Builder struct {
|
|
| 59 |
+ controller *control.Controller |
|
| 60 |
+ results *results |
|
| 61 |
+} |
|
| 62 |
+ |
|
| 63 |
+func New(opt Opt) (*Builder, error) {
|
|
| 64 |
+ results := newResultsGetter() |
|
| 65 |
+ |
|
| 66 |
+ c, err := newController(opt, results.ch) |
|
| 67 |
+ if err != nil {
|
|
| 68 |
+ return nil, err |
|
| 69 |
+ } |
|
| 70 |
+ b := &Builder{
|
|
| 71 |
+ controller: c, |
|
| 72 |
+ results: results, |
|
| 73 |
+ } |
|
| 74 |
+ return b, nil |
|
| 75 |
+} |
|
| 76 |
+ |
|
| 77 |
+func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {
|
|
| 78 |
+ id := identity.NewID() |
|
| 79 |
+ |
|
| 80 |
+ attrs := map[string]string{
|
|
| 81 |
+ "ref": id, |
|
| 82 |
+ } |
|
| 83 |
+ |
|
| 84 |
+ frontendAttrs := map[string]string{}
|
|
| 85 |
+ |
|
| 86 |
+ if opt.Options.Target != "" {
|
|
| 87 |
+ frontendAttrs["target"] = opt.Options.Target |
|
| 88 |
+ } |
|
| 89 |
+ |
|
| 90 |
+ if opt.Options.Dockerfile != "" && opt.Options.Dockerfile != "." {
|
|
| 91 |
+ frontendAttrs["filename"] = opt.Options.Dockerfile |
|
| 92 |
+ } |
|
| 93 |
+ |
|
| 94 |
+ if opt.Options.RemoteContext != "" {
|
|
| 95 |
+ frontendAttrs["context"] = opt.Options.RemoteContext |
|
| 96 |
+ } |
|
| 97 |
+ |
|
| 98 |
+ logrus.Debugf("frontend: %+v", frontendAttrs)
|
|
| 99 |
+ |
|
| 100 |
+ for k, v := range opt.Options.BuildArgs {
|
|
| 101 |
+ if v == nil {
|
|
| 102 |
+ continue |
|
| 103 |
+ } |
|
| 104 |
+ frontendAttrs["build-arg:"+k] = *v |
|
| 105 |
+ } |
|
| 106 |
+ |
|
| 107 |
+ req := &controlapi.SolveRequest{
|
|
| 108 |
+ Ref: id, |
|
| 109 |
+ Exporter: "image", |
|
| 110 |
+ ExporterAttrs: attrs, |
|
| 111 |
+ Frontend: "dockerfile.v0", |
|
| 112 |
+ FrontendAttrs: frontendAttrs, |
|
| 113 |
+ Session: opt.Options.SessionID, |
|
| 114 |
+ } |
|
| 115 |
+ |
|
| 116 |
+ eg, ctx := errgroup.WithContext(ctx) |
|
| 117 |
+ |
|
| 118 |
+ eg.Go(func() error {
|
|
| 119 |
+ _, err := b.controller.Solve(ctx, req) |
|
| 120 |
+ return err |
|
| 121 |
+ }) |
|
| 122 |
+ |
|
| 123 |
+ ch := make(chan *controlapi.StatusResponse) |
|
| 124 |
+ |
|
| 125 |
+ eg.Go(func() error {
|
|
| 126 |
+ defer close(ch) |
|
| 127 |
+ return b.controller.Status(&controlapi.StatusRequest{
|
|
| 128 |
+ Ref: id, |
|
| 129 |
+ }, &statusProxy{ctx: ctx, ch: ch})
|
|
| 130 |
+ }) |
|
| 131 |
+ |
|
| 132 |
+ eg.Go(func() error {
|
|
| 133 |
+ for sr := range ch {
|
|
| 134 |
+ dt, err := sr.Marshal() |
|
| 135 |
+ if err != nil {
|
|
| 136 |
+ return err |
|
| 137 |
+ } |
|
| 138 |
+ |
|
| 139 |
+ auxJSONBytes, err := json.Marshal(dt) |
|
| 140 |
+ if err != nil {
|
|
| 141 |
+ return err |
|
| 142 |
+ } |
|
| 143 |
+ auxJSON := new(json.RawMessage) |
|
| 144 |
+ *auxJSON = auxJSONBytes |
|
| 145 |
+ msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{ID: "buildkit-trace", Aux: auxJSON})
|
|
| 146 |
+ if err != nil {
|
|
| 147 |
+ return err |
|
| 148 |
+ } |
|
| 149 |
+ msgJSON = append(msgJSON, []byte("\r\n")...)
|
|
| 150 |
+ n, err := opt.ProgressWriter.Output.Write(msgJSON) |
|
| 151 |
+ if err != nil {
|
|
| 152 |
+ return err |
|
| 153 |
+ } |
|
| 154 |
+ if n != len(msgJSON) {
|
|
| 155 |
+ return io.ErrShortWrite |
|
| 156 |
+ } |
|
| 157 |
+ } |
|
| 158 |
+ return nil |
|
| 159 |
+ }) |
|
| 160 |
+ |
|
| 161 |
+ out := &builder.Result{}
|
|
| 162 |
+ eg.Go(func() error {
|
|
| 163 |
+ res, err := b.results.wait(ctx, id) |
|
| 164 |
+ if err != nil {
|
|
| 165 |
+ return err |
|
| 166 |
+ } |
|
| 167 |
+ out.ImageID = string(res.ID) |
|
| 168 |
+ return nil |
|
| 169 |
+ }) |
|
| 170 |
+ |
|
| 171 |
+ if err := eg.Wait(); err != nil {
|
|
| 172 |
+ return nil, err |
|
| 173 |
+ } |
|
| 174 |
+ |
|
| 175 |
+ return out, nil |
|
| 176 |
+} |
|
| 177 |
+ |
|
| 178 |
+func newController(opt Opt, reporter chan containerimageexp.Result) (*control.Controller, error) {
|
|
| 179 |
+ if err := os.MkdirAll(opt.Root, 0700); err != nil {
|
|
| 180 |
+ return nil, err |
|
| 181 |
+ } |
|
| 182 |
+ |
|
| 183 |
+ dist := opt.Dist |
|
| 184 |
+ root := opt.Root |
|
| 185 |
+ |
|
| 186 |
+ var driver graphdriver.Driver |
|
| 187 |
+ if ls, ok := dist.LayerStore.(interface {
|
|
| 188 |
+ Driver() graphdriver.Driver |
|
| 189 |
+ }); ok {
|
|
| 190 |
+ driver = ls.Driver() |
|
| 191 |
+ } else {
|
|
| 192 |
+ return nil, errors.Errorf("could not access graphdriver")
|
|
| 193 |
+ } |
|
| 194 |
+ |
|
| 195 |
+ sbase, err := snapshot.NewSnapshotter(snapshot.Opt{
|
|
| 196 |
+ GraphDriver: driver, |
|
| 197 |
+ LayerStore: dist.LayerStore, |
|
| 198 |
+ Root: root, |
|
| 199 |
+ }) |
|
| 200 |
+ if err != nil {
|
|
| 201 |
+ return nil, err |
|
| 202 |
+ } |
|
| 203 |
+ |
|
| 204 |
+ store, err := local.NewStore(filepath.Join(root, "content")) |
|
| 205 |
+ if err != nil {
|
|
| 206 |
+ return nil, err |
|
| 207 |
+ } |
|
| 208 |
+ store = &contentStoreNoLabels{store}
|
|
| 209 |
+ |
|
| 210 |
+ md, err := metadata.NewStore(filepath.Join(root, "metadata.db")) |
|
| 211 |
+ if err != nil {
|
|
| 212 |
+ return nil, err |
|
| 213 |
+ } |
|
| 214 |
+ |
|
| 215 |
+ snapshotter := blobmapping.NewSnapshotter(blobmapping.Opt{
|
|
| 216 |
+ Content: store, |
|
| 217 |
+ Snapshotter: sbase, |
|
| 218 |
+ MetadataStore: md, |
|
| 219 |
+ }) |
|
| 220 |
+ |
|
| 221 |
+ cm, err := cache.NewManager(cache.ManagerOpt{
|
|
| 222 |
+ Snapshotter: snapshotter, |
|
| 223 |
+ MetadataStore: md, |
|
| 224 |
+ }) |
|
| 225 |
+ if err != nil {
|
|
| 226 |
+ return nil, err |
|
| 227 |
+ } |
|
| 228 |
+ |
|
| 229 |
+ src, err := containerimage.NewSource(containerimage.SourceOpt{
|
|
| 230 |
+ SessionManager: opt.SessionManager, |
|
| 231 |
+ CacheAccessor: cm, |
|
| 232 |
+ ContentStore: store, |
|
| 233 |
+ DownloadManager: dist.DownloadManager, |
|
| 234 |
+ MetadataStore: dist.V2MetadataService, |
|
| 235 |
+ }) |
|
| 236 |
+ if err != nil {
|
|
| 237 |
+ return nil, err |
|
| 238 |
+ } |
|
| 239 |
+ |
|
| 240 |
+ exec, err := runcexecutor.New(runcexecutor.Opt{
|
|
| 241 |
+ Root: filepath.Join(root, "executor"), |
|
| 242 |
+ CommandCandidates: []string{"docker-runc", "runc"},
|
|
| 243 |
+ }) |
|
| 244 |
+ if err != nil {
|
|
| 245 |
+ return nil, err |
|
| 246 |
+ } |
|
| 247 |
+ |
|
| 248 |
+ differ, ok := sbase.(containerimageexp.Differ) |
|
| 249 |
+ if !ok {
|
|
| 250 |
+ return nil, errors.Errorf("snapshotter doesn't support differ")
|
|
| 251 |
+ } |
|
| 252 |
+ |
|
| 253 |
+ exp, err := containerimageexp.New(containerimageexp.Opt{
|
|
| 254 |
+ ImageStore: dist.ImageStore, |
|
| 255 |
+ ReferenceStore: dist.ReferenceStore, |
|
| 256 |
+ Differ: differ, |
|
| 257 |
+ Reporter: reporter, |
|
| 258 |
+ }) |
|
| 259 |
+ if err != nil {
|
|
| 260 |
+ return nil, err |
|
| 261 |
+ } |
|
| 262 |
+ |
|
| 263 |
+ cacheStorage, err := boltdbcachestorage.NewStore(filepath.Join(opt.Root, "cache.db")) |
|
| 264 |
+ if err != nil {
|
|
| 265 |
+ return nil, err |
|
| 266 |
+ } |
|
| 267 |
+ |
|
| 268 |
+ frontends := map[string]frontend.Frontend{}
|
|
| 269 |
+ frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend() |
|
| 270 |
+ // frontends["gateway.v0"] = gateway.NewGatewayFrontend() |
|
| 271 |
+ |
|
| 272 |
+ // mdb := ctdmetadata.NewDB(db, c, map[string]ctdsnapshot.Snapshotter{
|
|
| 273 |
+ // "moby": s, |
|
| 274 |
+ // }) |
|
| 275 |
+ // if err := mdb.Init(context.TODO()); err != nil {
|
|
| 276 |
+ // return opt, err |
|
| 277 |
+ // } |
|
| 278 |
+ // |
|
| 279 |
+ // throttledGC := throttle.Throttle(time.Second, func() {
|
|
| 280 |
+ // if _, err := mdb.GarbageCollect(context.TODO()); err != nil {
|
|
| 281 |
+ // logrus.Errorf("GC error: %+v", err)
|
|
| 282 |
+ // } |
|
| 283 |
+ // }) |
|
| 284 |
+ // |
|
| 285 |
+ // gc := func(ctx context.Context) error {
|
|
| 286 |
+ // throttledGC() |
|
| 287 |
+ // return nil |
|
| 288 |
+ // } |
|
| 289 |
+ |
|
| 290 |
+ wopt := mobyworker.WorkerOpt{
|
|
| 291 |
+ ID: "moby", |
|
| 292 |
+ SessionManager: opt.SessionManager, |
|
| 293 |
+ MetadataStore: md, |
|
| 294 |
+ ContentStore: store, |
|
| 295 |
+ CacheManager: cm, |
|
| 296 |
+ Snapshotter: snapshotter, |
|
| 297 |
+ Executor: exec, |
|
| 298 |
+ ImageSource: src, |
|
| 299 |
+ Exporters: map[string]exporter.Exporter{
|
|
| 300 |
+ "image": exp, |
|
| 301 |
+ }, |
|
| 302 |
+ } |
|
| 303 |
+ |
|
| 304 |
+ wc := &worker.Controller{}
|
|
| 305 |
+ w, err := mobyworker.NewWorker(wopt) |
|
| 306 |
+ if err != nil {
|
|
| 307 |
+ return nil, err |
|
| 308 |
+ } |
|
| 309 |
+ wc.Add(w) |
|
| 310 |
+ |
|
| 311 |
+ return control.NewController(control.Opt{
|
|
| 312 |
+ SessionManager: opt.SessionManager, |
|
| 313 |
+ WorkerController: wc, |
|
| 314 |
+ Frontends: frontends, |
|
| 315 |
+ CacheKeyStorage: cacheStorage, |
|
| 316 |
+ // CacheExporter: ce, |
|
| 317 |
+ // CacheImporter: ci, |
|
| 318 |
+ }) |
|
| 319 |
+} |
|
| 320 |
+ |
|
| 321 |
+type statusProxy struct {
|
|
| 322 |
+ ctx context.Context |
|
| 323 |
+ ch chan *controlapi.StatusResponse |
|
| 324 |
+} |
|
| 325 |
+ |
|
| 326 |
+func (sp *statusProxy) SetHeader(_ grpcmetadata.MD) error {
|
|
| 327 |
+ return nil |
|
| 328 |
+} |
|
| 329 |
+ |
|
| 330 |
+func (sp *statusProxy) SendHeader(_ grpcmetadata.MD) error {
|
|
| 331 |
+ return nil |
|
| 332 |
+} |
|
| 333 |
+ |
|
| 334 |
+func (sp *statusProxy) SetTrailer(_ grpcmetadata.MD) {
|
|
| 335 |
+} |
|
| 336 |
+ |
|
| 337 |
+func (sp *statusProxy) Send(resp *controlapi.StatusResponse) error {
|
|
| 338 |
+ return sp.SendMsg(resp) |
|
| 339 |
+} |
|
| 340 |
+ |
|
| 341 |
+func (sp *statusProxy) Context() netcontext.Context {
|
|
| 342 |
+ return sp.ctx |
|
| 343 |
+} |
|
| 344 |
+func (sp *statusProxy) SendMsg(m interface{}) error {
|
|
| 345 |
+ if sr, ok := m.(*controlapi.StatusResponse); ok {
|
|
| 346 |
+ sp.ch <- sr |
|
| 347 |
+ } |
|
| 348 |
+ return nil |
|
| 349 |
+} |
|
| 350 |
+func (sp *statusProxy) RecvMsg(m interface{}) error {
|
|
| 351 |
+ return io.EOF |
|
| 352 |
+} |
|
| 353 |
+ |
|
| 354 |
+type results struct {
|
|
| 355 |
+ ch chan containerimageexp.Result |
|
| 356 |
+ res map[string]containerimageexp.Result |
|
| 357 |
+ mu sync.Mutex |
|
| 358 |
+ cond *sync.Cond |
|
| 359 |
+} |
|
| 360 |
+ |
|
| 361 |
+func newResultsGetter() *results {
|
|
| 362 |
+ r := &results{
|
|
| 363 |
+ ch: make(chan containerimageexp.Result), |
|
| 364 |
+ res: map[string]containerimageexp.Result{},
|
|
| 365 |
+ } |
|
| 366 |
+ r.cond = sync.NewCond(&r.mu) |
|
| 367 |
+ |
|
| 368 |
+ go func() {
|
|
| 369 |
+ for res := range r.ch {
|
|
| 370 |
+ r.mu.Lock() |
|
| 371 |
+ r.res[res.Ref] = res |
|
| 372 |
+ r.cond.Broadcast() |
|
| 373 |
+ r.mu.Unlock() |
|
| 374 |
+ } |
|
| 375 |
+ }() |
|
| 376 |
+ return r |
|
| 377 |
+} |
|
| 378 |
+ |
|
| 379 |
+func (r *results) wait(ctx context.Context, ref string) (*containerimageexp.Result, error) {
|
|
| 380 |
+ done := make(chan struct{})
|
|
| 381 |
+ defer close(done) |
|
| 382 |
+ go func() {
|
|
| 383 |
+ select {
|
|
| 384 |
+ case <-ctx.Done(): |
|
| 385 |
+ r.mu.Lock() |
|
| 386 |
+ r.cond.Broadcast() |
|
| 387 |
+ r.mu.Unlock() |
|
| 388 |
+ case <-done: |
|
| 389 |
+ } |
|
| 390 |
+ }() |
|
| 391 |
+ |
|
| 392 |
+ r.mu.Lock() |
|
| 393 |
+ for {
|
|
| 394 |
+ select {
|
|
| 395 |
+ case <-ctx.Done(): |
|
| 396 |
+ r.mu.Unlock() |
|
| 397 |
+ return nil, ctx.Err() |
|
| 398 |
+ default: |
|
| 399 |
+ } |
|
| 400 |
+ res, ok := r.res[ref] |
|
| 401 |
+ if ok {
|
|
| 402 |
+ r.mu.Unlock() |
|
| 403 |
+ return &res, nil |
|
| 404 |
+ } |
|
| 405 |
+ r.cond.Wait() |
|
| 406 |
+ } |
|
| 407 |
+} |
|
| 408 |
+ |
|
| 409 |
+type contentStoreNoLabels struct {
|
|
| 410 |
+ content.Store |
|
| 411 |
+} |
|
| 412 |
+ |
|
| 413 |
+func (c *contentStoreNoLabels) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
|
| 414 |
+ return content.Info{}, nil
|
|
| 415 |
+} |
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"encoding/json" |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"io" |
| 8 |
+ "io/ioutil" |
|
| 8 | 9 |
"runtime" |
| 9 | 10 |
"sync" |
| 10 | 11 |
"time" |
| ... | ... |
@@ -22,7 +23,6 @@ import ( |
| 22 | 22 |
"github.com/docker/docker/distribution/xfer" |
| 23 | 23 |
"github.com/docker/docker/image" |
| 24 | 24 |
"github.com/docker/docker/layer" |
| 25 |
- "github.com/docker/docker/pkg/ioutils" |
|
| 26 | 25 |
pkgprogress "github.com/docker/docker/pkg/progress" |
| 27 | 26 |
"github.com/docker/docker/reference" |
| 28 | 27 |
"github.com/moby/buildkit/cache" |
| ... | ... |
@@ -36,8 +36,8 @@ import ( |
| 36 | 36 |
digest "github.com/opencontainers/go-digest" |
| 37 | 37 |
ocispec "github.com/opencontainers/image-spec/specs-go/v1" |
| 38 | 38 |
"github.com/pkg/errors" |
| 39 |
- "github.com/sirupsen/logrus" |
|
| 40 | 39 |
netcontext "golang.org/x/net/context" |
| 40 |
+ "golang.org/x/time/rate" |
|
| 41 | 41 |
) |
| 42 | 42 |
|
| 43 | 43 |
type SourceOpt struct {
|
| ... | ... |
@@ -92,23 +92,22 @@ func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(strin |
| 92 | 92 |
} |
| 93 | 93 |
|
| 94 | 94 |
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) {
|
| 95 |
- // type t struct {
|
|
| 96 |
- // dgst digest.Digest |
|
| 97 |
- // dt []byte |
|
| 98 |
- // } |
|
| 99 |
- // res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
|
|
| 100 |
- // dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx), is.ContentStore) |
|
| 101 |
- // if err != nil {
|
|
| 102 |
- // return nil, err |
|
| 103 |
- // } |
|
| 104 |
- // return &t{dgst: dgst, dt: dt}, nil
|
|
| 105 |
- // }) |
|
| 106 |
- // if err != nil {
|
|
| 107 |
- // return "", nil, err |
|
| 108 |
- // } |
|
| 109 |
- // typed := res.(*t) |
|
| 110 |
- // return typed.dgst, typed.dt, nil |
|
| 111 |
- return "", nil, errors.Errorf("not-implemented")
|
|
| 95 |
+ type t struct {
|
|
| 96 |
+ dgst digest.Digest |
|
| 97 |
+ dt []byte |
|
| 98 |
+ } |
|
| 99 |
+ res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
|
|
| 100 |
+ dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx), is.ContentStore, "") |
|
| 101 |
+ if err != nil {
|
|
| 102 |
+ return nil, err |
|
| 103 |
+ } |
|
| 104 |
+ return &t{dgst: dgst, dt: dt}, nil
|
|
| 105 |
+ }) |
|
| 106 |
+ if err != nil {
|
|
| 107 |
+ return "", nil, err |
|
| 108 |
+ } |
|
| 109 |
+ typed := res.(*t) |
|
| 110 |
+ return typed.dgst, typed.dt, nil |
|
| 112 | 111 |
} |
| 113 | 112 |
|
| 114 | 113 |
func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (source.SourceInstance, error) {
|
| ... | ... |
@@ -189,7 +188,17 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
| 189 | 189 |
|
| 190 | 190 |
pctx, stopProgress := context.WithCancel(ctx) |
| 191 | 191 |
|
| 192 |
- go showProgress(pctx, ongoing, p.is.ContentStore) |
|
| 192 |
+ pw, _, ctx := progress.FromContext(ctx) |
|
| 193 |
+ defer pw.Close() |
|
| 194 |
+ |
|
| 195 |
+ progressDone := make(chan struct{})
|
|
| 196 |
+ go func() {
|
|
| 197 |
+ showProgress(pctx, ongoing, p.is.ContentStore, pw) |
|
| 198 |
+ close(progressDone) |
|
| 199 |
+ }() |
|
| 200 |
+ defer func() {
|
|
| 201 |
+ <-progressDone |
|
| 202 |
+ }() |
|
| 193 | 203 |
|
| 194 | 204 |
fetcher, err := p.resolver.Fetcher(ctx, p.ref) |
| 195 | 205 |
if err != nil {
|
| ... | ... |
@@ -213,14 +222,33 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
| 213 | 213 |
return nil, nil |
| 214 | 214 |
}), |
| 215 | 215 |
} |
| 216 |
+ // var schema1Converter *schema1.Converter |
|
| 217 |
+ // if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
|
| 218 |
+ // schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher) |
|
| 219 |
+ // handlers = append(handlers, schema1Converter) |
|
| 220 |
+ // } else {
|
|
| 221 |
+ // handlers = append(handlers, |
|
| 222 |
+ // remotes.FetchHandler(p.is.ContentStore, fetcher), |
|
| 223 |
+ // |
|
| 224 |
+ // images.ChildrenHandler(p.is.ContentStore), |
|
| 225 |
+ // ) |
|
| 226 |
+ // } |
|
| 227 |
+ // |
|
| 216 | 228 |
var schema1Converter *schema1.Converter |
| 217 | 229 |
if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
| 218 | 230 |
schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher) |
| 219 | 231 |
handlers = append(handlers, schema1Converter) |
| 220 | 232 |
} else {
|
| 233 |
+ // Get all the children for a descriptor |
|
| 234 |
+ childrenHandler := images.ChildrenHandler(p.is.ContentStore) |
|
| 235 |
+ // Set any children labels for that content |
|
| 236 |
+ childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler) |
|
| 237 |
+ // Filter the childen by the platform |
|
| 238 |
+ childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Default()) |
|
| 239 |
+ |
|
| 221 | 240 |
handlers = append(handlers, |
| 222 | 241 |
remotes.FetchHandler(p.is.ContentStore, fetcher), |
| 223 |
- images.ChildrenHandler(p.is.ContentStore, platforms.Default()), |
|
| 242 |
+ childrenHandler, |
|
| 224 | 243 |
) |
| 225 | 244 |
} |
| 226 | 245 |
|
| ... | ... |
@@ -228,7 +256,7 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
| 228 | 228 |
stopProgress() |
| 229 | 229 |
return nil, err |
| 230 | 230 |
} |
| 231 |
- stopProgress() |
|
| 231 |
+ defer stopProgress() |
|
| 232 | 232 |
|
| 233 | 233 |
mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platforms.Default()) |
| 234 | 234 |
if err != nil {
|
| ... | ... |
@@ -255,16 +283,41 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
| 255 | 255 |
} |
| 256 | 256 |
|
| 257 | 257 |
pchan := make(chan pkgprogress.Progress, 10) |
| 258 |
+ defer close(pchan) |
|
| 258 | 259 |
|
| 259 | 260 |
go func() {
|
| 261 |
+ m := map[string]struct {
|
|
| 262 |
+ st time.Time |
|
| 263 |
+ limiter *rate.Limiter |
|
| 264 |
+ }{}
|
|
| 260 | 265 |
for p := range pchan {
|
| 261 |
- logrus.Debugf("progress %+v", p)
|
|
| 266 |
+ if p.Action == "Extracting" {
|
|
| 267 |
+ st, ok := m[p.ID] |
|
| 268 |
+ if !ok {
|
|
| 269 |
+ st.st = time.Now() |
|
| 270 |
+ st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1) |
|
| 271 |
+ m[p.ID] = st |
|
| 272 |
+ } |
|
| 273 |
+ var end *time.Time |
|
| 274 |
+ if p.LastUpdate || st.limiter.Allow() {
|
|
| 275 |
+ if p.LastUpdate {
|
|
| 276 |
+ tm := time.Now() |
|
| 277 |
+ end = &tm |
|
| 278 |
+ } |
|
| 279 |
+ pw.Write("extracting "+p.ID, progress.Status{
|
|
| 280 |
+ Action: "extract", |
|
| 281 |
+ Started: &st.st, |
|
| 282 |
+ Completed: end, |
|
| 283 |
+ }) |
|
| 284 |
+ } |
|
| 285 |
+ } |
|
| 262 | 286 |
} |
| 263 | 287 |
}() |
| 264 | 288 |
|
| 265 | 289 |
layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers)) |
| 266 | 290 |
|
| 267 | 291 |
for i, desc := range mfst.Layers {
|
| 292 |
+ ongoing.add(desc) |
|
| 268 | 293 |
layers = append(layers, &layerDescriptor{
|
| 269 | 294 |
desc: desc, |
| 270 | 295 |
diffID: layer.DiffID(img.RootFS.DiffIDs[i]), |
| ... | ... |
@@ -274,11 +327,19 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
| 274 | 274 |
}) |
| 275 | 275 |
} |
| 276 | 276 |
|
| 277 |
+ defer func() {
|
|
| 278 |
+ <-progressDone |
|
| 279 |
+ for _, desc := range mfst.Layers {
|
|
| 280 |
+ p.is.ContentStore.Delete(context.TODO(), desc.Digest) |
|
| 281 |
+ } |
|
| 282 |
+ }() |
|
| 283 |
+ |
|
| 277 | 284 |
r := image.NewRootFS() |
| 278 | 285 |
rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan)) |
| 279 | 286 |
if err != nil {
|
| 280 | 287 |
return nil, err |
| 281 | 288 |
} |
| 289 |
+ stopProgress() |
|
| 282 | 290 |
|
| 283 | 291 |
ref, err := p.is.CacheAccessor.Get(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
|
| 284 | 292 |
release() |
| ... | ... |
@@ -317,8 +378,9 @@ func (ld *layerDescriptor) Download(ctx netcontext.Context, progressOutput pkgpr |
| 317 | 317 |
} |
| 318 | 318 |
defer rc.Close() |
| 319 | 319 |
|
| 320 |
- // TODO: progress |
|
| 321 |
- if err := content.WriteBlob(ctx, ld.is.ContentStore, ld.desc.Digest.String(), rc, ld.desc.Size, ld.desc.Digest); err != nil {
|
|
| 320 |
+ refKey := remotes.MakeRefKey(ctx, ld.desc) |
|
| 321 |
+ |
|
| 322 |
+ if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc.Size, ld.desc.Digest); err != nil {
|
|
| 322 | 323 |
return nil, 0, err |
| 323 | 324 |
} |
| 324 | 325 |
|
| ... | ... |
@@ -327,9 +389,7 @@ func (ld *layerDescriptor) Download(ctx netcontext.Context, progressOutput pkgpr |
| 327 | 327 |
return nil, 0, err |
| 328 | 328 |
} |
| 329 | 329 |
|
| 330 |
- return ioutils.NewReadCloserWrapper(content.NewReader(ra), func() error {
|
|
| 331 |
- return ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest) |
|
| 332 |
- }), ld.desc.Size, nil |
|
| 330 |
+ return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil |
|
| 333 | 331 |
} |
| 334 | 332 |
|
| 335 | 333 |
func (ld *layerDescriptor) Close() {
|
| ... | ... |
@@ -341,7 +401,7 @@ func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
|
| 341 | 341 |
ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
|
| 342 | 342 |
} |
| 343 | 343 |
|
| 344 |
-func showProgress(ctx context.Context, ongoing *jobs, cs content.Store) {
|
|
| 344 |
+func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
|
|
| 345 | 345 |
var ( |
| 346 | 346 |
ticker = time.NewTicker(100 * time.Millisecond) |
| 347 | 347 |
statuses = map[string]statusInfo{}
|
| ... | ... |
@@ -349,9 +409,6 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store) {
|
| 349 | 349 |
) |
| 350 | 350 |
defer ticker.Stop() |
| 351 | 351 |
|
| 352 |
- pw, _, ctx := progress.FromContext(ctx) |
|
| 353 |
- defer pw.Close() |
|
| 354 |
- |
|
| 355 | 352 |
for {
|
| 356 | 353 |
select {
|
| 357 | 354 |
case <-ticker.C: |
| ... | ... |
@@ -371,7 +428,7 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store) {
|
| 371 | 371 |
actives := make(map[string]statusInfo) |
| 372 | 372 |
|
| 373 | 373 |
if !done {
|
| 374 |
- active, err := cs.ListStatuses(ctx, "") |
|
| 374 |
+ active, err := cs.ListStatuses(ctx) |
|
| 375 | 375 |
if err != nil {
|
| 376 | 376 |
// log.G(ctx).WithError(err).Error("active check failed")
|
| 377 | 377 |
continue |
| ... | ... |
@@ -407,9 +464,9 @@ func showProgress(ctx context.Context, ongoing *jobs, cs content.Store) {
|
| 407 | 407 |
info, err := cs.Info(context.TODO(), j.Digest) |
| 408 | 408 |
if err != nil {
|
| 409 | 409 |
if errdefs.IsNotFound(err) {
|
| 410 |
- pw.Write(j.Digest.String(), progress.Status{
|
|
| 411 |
- Action: "waiting", |
|
| 412 |
- }) |
|
| 410 |
+ // pw.Write(j.Digest.String(), progress.Status{
|
|
| 411 |
+ // Action: "waiting", |
|
| 412 |
+ // }) |
|
| 413 | 413 |
continue |
| 414 | 414 |
} |
| 415 | 415 |
} else {
|
| ... | ... |
@@ -23,10 +23,17 @@ type Differ interface {
|
| 23 | 23 |
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error) |
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 |
+// TODO: this needs to be handled differently (return from solve) |
|
| 27 |
+type Result struct {
|
|
| 28 |
+ Ref string |
|
| 29 |
+ ID image.ID |
|
| 30 |
+} |
|
| 31 |
+ |
|
| 26 | 32 |
type Opt struct {
|
| 27 | 33 |
ImageStore image.Store |
| 28 | 34 |
ReferenceStore reference.Store |
| 29 | 35 |
Differ Differ |
| 36 |
+ Reporter chan Result |
|
| 30 | 37 |
} |
| 31 | 38 |
|
| 32 | 39 |
type imageExporter struct {
|
| ... | ... |
@@ -50,6 +57,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp |
| 50 | 50 |
i.targetName = ref |
| 51 | 51 |
case exporterImageConfig: |
| 52 | 52 |
i.config = []byte(v) |
| 53 |
+ case "ref": |
|
| 54 |
+ i.ref = v |
|
| 53 | 55 |
default: |
| 54 | 56 |
logrus.Warnf("image exporter: unknown option %s", k)
|
| 55 | 57 |
} |
| ... | ... |
@@ -61,6 +70,7 @@ type imageExporterInstance struct {
|
| 61 | 61 |
*imageExporter |
| 62 | 62 |
targetName distref.Named |
| 63 | 63 |
config []byte |
| 64 |
+ ref string |
|
| 64 | 65 |
} |
| 65 | 66 |
|
| 66 | 67 |
func (e *imageExporterInstance) Name() string {
|
| ... | ... |
@@ -131,5 +141,9 @@ func (e *imageExporterInstance) Export(ctx context.Context, ref cache.ImmutableR |
| 131 | 131 |
} |
| 132 | 132 |
} |
| 133 | 133 |
|
| 134 |
+ if e.opt.Reporter != nil {
|
|
| 135 |
+ e.opt.Reporter <- Result{ID: id, Ref: e.ref}
|
|
| 136 |
+ } |
|
| 137 |
+ |
|
| 134 | 138 |
return nil |
| 135 | 139 |
} |
| ... | ... |
@@ -65,12 +65,12 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History) |
| 65 | 65 |
} |
| 66 | 66 |
m["history"] = dt |
| 67 | 67 |
|
| 68 |
- now := time.Now() |
|
| 69 |
- dt, err = json.Marshal(&now) |
|
| 70 |
- if err != nil {
|
|
| 71 |
- return nil, errors.Wrap(err, "failed to marshal creation time") |
|
| 72 |
- } |
|
| 73 |
- m["created"] = dt |
|
| 68 |
+ // now := time.Now() |
|
| 69 |
+ // dt, err = json.Marshal(&now) |
|
| 70 |
+ // if err != nil {
|
|
| 71 |
+ // return nil, errors.Wrap(err, "failed to marshal creation time") |
|
| 72 |
+ // } |
|
| 73 |
+ // m["created"] = dt |
|
| 74 | 74 |
|
| 75 | 75 |
dt, err = json.Marshal(m) |
| 76 | 76 |
return dt, errors.Wrap(err, "failed to marshal config after patch") |
| ... | ... |
@@ -104,9 +104,9 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, |
| 104 | 104 |
if len(diffs) > historyLayers {
|
| 105 | 105 |
// some history items are missing. add them based on the ref metadata |
| 106 | 106 |
for _, msg := range getRefDesciptions(ref, len(diffs)-historyLayers) {
|
| 107 |
- tm := time.Now().UTC() |
|
| 107 |
+ // tm := time.Now().UTC() |
|
| 108 | 108 |
history = append(history, ocispec.History{
|
| 109 |
- Created: &tm, |
|
| 109 |
+ // Created: &tm, |
|
| 110 | 110 |
CreatedBy: msg, |
| 111 | 111 |
Comment: "buildkit.exporter.image.v0", |
| 112 | 112 |
}) |
| ... | ... |
@@ -15,7 +15,6 @@ import ( |
| 15 | 15 |
"github.com/moby/buildkit/snapshot" |
| 16 | 16 |
digest "github.com/opencontainers/go-digest" |
| 17 | 17 |
"github.com/pkg/errors" |
| 18 |
- "github.com/sirupsen/logrus" |
|
| 19 | 18 |
) |
| 20 | 19 |
|
| 21 | 20 |
var keyParent = []byte("parent")
|
| ... | ... |
@@ -121,8 +120,24 @@ func (s *snapshotter) getLayer(key string) (layer.Layer, error) {
|
| 121 | 121 |
if !ok {
|
| 122 | 122 |
id, ok := s.chainID(key) |
| 123 | 123 |
if !ok {
|
| 124 |
- s.mu.Unlock() |
|
| 125 |
- return nil, nil |
|
| 124 |
+ if err := s.db.View(func(tx *bolt.Tx) error {
|
|
| 125 |
+ b := tx.Bucket([]byte(key)) |
|
| 126 |
+ if b == nil {
|
|
| 127 |
+ return nil |
|
| 128 |
+ } |
|
| 129 |
+ v := b.Get(keyChainID) |
|
| 130 |
+ if v != nil {
|
|
| 131 |
+ id = layer.ChainID(v) |
|
| 132 |
+ } |
|
| 133 |
+ return nil |
|
| 134 |
+ }); err != nil {
|
|
| 135 |
+ s.mu.Unlock() |
|
| 136 |
+ return nil, err |
|
| 137 |
+ } |
|
| 138 |
+ if id == "" {
|
|
| 139 |
+ s.mu.Unlock() |
|
| 140 |
+ return nil, nil |
|
| 141 |
+ } |
|
| 126 | 142 |
} |
| 127 | 143 |
var err error |
| 128 | 144 |
l, err = s.opt.LayerStore.Get(id) |
| ... | ... |
@@ -132,7 +147,7 @@ func (s *snapshotter) getLayer(key string) (layer.Layer, error) {
|
| 132 | 132 |
} |
| 133 | 133 |
s.refs[string(id)] = l |
| 134 | 134 |
if err := s.db.Update(func(tx *bolt.Tx) error {
|
| 135 |
- _, err := tx.CreateBucketIfNotExists([]byte(id)) |
|
| 135 |
+ _, err := tx.CreateBucketIfNotExists([]byte(key)) |
|
| 136 | 136 |
return err |
| 137 | 137 |
}); err != nil {
|
| 138 | 138 |
s.mu.Unlock() |
| ... | ... |
@@ -282,7 +297,8 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error {
|
| 282 | 282 |
return nil |
| 283 | 283 |
} |
| 284 | 284 |
|
| 285 |
- return s.opt.GraphDriver.Remove(key) |
|
| 285 |
+ id, _ := s.getGraphDriverID(key) |
|
| 286 |
+ return s.opt.GraphDriver.Remove(id) |
|
| 286 | 287 |
} |
| 287 | 288 |
|
| 288 | 289 |
func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
|
| ... | ... |
@@ -298,7 +314,7 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap |
| 298 | 298 |
}); err != nil {
|
| 299 | 299 |
return err |
| 300 | 300 |
} |
| 301 |
- logrus.Debugf("committed %s as %s", name, key)
|
|
| 301 |
+ // logrus.Debugf("committed %s as %s", name, key));
|
|
| 302 | 302 |
return nil |
| 303 | 303 |
} |
| 304 | 304 |
|
| ... | ... |
@@ -307,61 +323,62 @@ func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snap |
| 307 | 307 |
} |
| 308 | 308 |
|
| 309 | 309 |
func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error {
|
| 310 |
- allKeys := map[string]struct{}{}
|
|
| 311 |
- commitedIDs := map[string]string{}
|
|
| 312 |
- chainIDs := map[string]layer.ChainID{}
|
|
| 313 |
- |
|
| 314 |
- if err := s.db.View(func(tx *bolt.Tx) error {
|
|
| 315 |
- tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
|
| 316 |
- allKeys[string(name)] = struct{}{}
|
|
| 317 |
- v := b.Get(keyCommitted) |
|
| 318 |
- if v != nil {
|
|
| 319 |
- commitedIDs[string(v)] = string(name) |
|
| 320 |
- } |
|
| 321 |
- |
|
| 322 |
- v = b.Get(keyChainID) |
|
| 323 |
- if v != nil {
|
|
| 324 |
- chainIDs[string(name)] = layer.ChainID(v) |
|
| 325 |
- } |
|
| 326 |
- return nil |
|
| 327 |
- }) |
|
| 328 |
- return nil |
|
| 329 |
- }); err != nil {
|
|
| 330 |
- return err |
|
| 331 |
- } |
|
| 332 |
- |
|
| 333 |
- for k := range allKeys {
|
|
| 334 |
- if _, ok := commitedIDs[k]; ok {
|
|
| 335 |
- continue |
|
| 336 |
- } |
|
| 337 |
- if chainID, ok := chainIDs[k]; ok {
|
|
| 338 |
- s.mu.Lock() |
|
| 339 |
- if _, ok := s.refs[k]; !ok {
|
|
| 340 |
- l, err := s.opt.LayerStore.Get(chainID) |
|
| 341 |
- if err != nil {
|
|
| 342 |
- s.mu.Unlock() |
|
| 343 |
- return err |
|
| 344 |
- } |
|
| 345 |
- s.refs[k] = l |
|
| 346 |
- } |
|
| 347 |
- s.mu.Unlock() |
|
| 348 |
- } |
|
| 349 |
- |
|
| 350 |
- if _, err := s.getLayer(k); err != nil {
|
|
| 351 |
- s.Remove(ctx, k) |
|
| 352 |
- continue |
|
| 353 |
- } |
|
| 354 |
- info, err := s.Stat(ctx, k) |
|
| 355 |
- if err != nil {
|
|
| 356 |
- s.Remove(ctx, k) |
|
| 357 |
- continue |
|
| 358 |
- } |
|
| 359 |
- if err := fn(ctx, info); err != nil {
|
|
| 360 |
- return err |
|
| 361 |
- } |
|
| 362 |
- } |
|
| 363 |
- |
|
| 364 |
- return nil |
|
| 310 |
+ // allKeys := map[string]struct{}{}
|
|
| 311 |
+ // commitedIDs := map[string]string{}
|
|
| 312 |
+ // chainIDs := map[string]layer.ChainID{}
|
|
| 313 |
+ // |
|
| 314 |
+ // if err := s.db.View(func(tx *bolt.Tx) error {
|
|
| 315 |
+ // tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
|
| 316 |
+ // allKeys[string(name)] = struct{}{}
|
|
| 317 |
+ // v := b.Get(keyCommitted) |
|
| 318 |
+ // if v != nil {
|
|
| 319 |
+ // commitedIDs[string(v)] = string(name) |
|
| 320 |
+ // } |
|
| 321 |
+ // |
|
| 322 |
+ // v = b.Get(keyChainID) |
|
| 323 |
+ // if v != nil {
|
|
| 324 |
+ // logrus.Debugf("loaded layer %s %s", name, v)
|
|
| 325 |
+ // chainIDs[string(name)] = layer.ChainID(v) |
|
| 326 |
+ // } |
|
| 327 |
+ // return nil |
|
| 328 |
+ // }) |
|
| 329 |
+ // return nil |
|
| 330 |
+ // }); err != nil {
|
|
| 331 |
+ // return err |
|
| 332 |
+ // } |
|
| 333 |
+ // |
|
| 334 |
+ // for k := range allKeys {
|
|
| 335 |
+ // if chainID, ok := chainIDs[k]; ok {
|
|
| 336 |
+ // s.mu.Lock() |
|
| 337 |
+ // if _, ok := s.refs[k]; !ok {
|
|
| 338 |
+ // l, err := s.opt.LayerStore.Get(chainID) |
|
| 339 |
+ // if err != nil {
|
|
| 340 |
+ // s.mu.Unlock() |
|
| 341 |
+ // return err |
|
| 342 |
+ // } |
|
| 343 |
+ // s.refs[k] = l |
|
| 344 |
+ // } |
|
| 345 |
+ // s.mu.Unlock() |
|
| 346 |
+ // } |
|
| 347 |
+ // if _, ok := commitedIDs[k]; ok {
|
|
| 348 |
+ // continue |
|
| 349 |
+ // } |
|
| 350 |
+ // |
|
| 351 |
+ // if _, err := s.getLayer(k); err != nil {
|
|
| 352 |
+ // s.Remove(ctx, k) |
|
| 353 |
+ // continue |
|
| 354 |
+ // } |
|
| 355 |
+ // info, err := s.Stat(ctx, k) |
|
| 356 |
+ // if err != nil {
|
|
| 357 |
+ // s.Remove(ctx, k) |
|
| 358 |
+ // continue |
|
| 359 |
+ // } |
|
| 360 |
+ // if err := fn(ctx, info); err != nil {
|
|
| 361 |
+ // return err |
|
| 362 |
+ // } |
|
| 363 |
+ // } |
|
| 364 |
+ |
|
| 365 |
+ return errors.Errorf("not-implemented")
|
|
| 365 | 366 |
} |
| 366 | 367 |
|
| 367 | 368 |
func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
|
| 368 | 369 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,301 @@ |
| 0 |
+package worker |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "context" |
|
| 4 |
+ "io" |
|
| 5 |
+ "time" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/containerd/containerd/content" |
|
| 8 |
+ "github.com/containerd/containerd/rootfs" |
|
| 9 |
+ "github.com/moby/buildkit/cache" |
|
| 10 |
+ "github.com/moby/buildkit/cache/metadata" |
|
| 11 |
+ "github.com/moby/buildkit/client" |
|
| 12 |
+ "github.com/moby/buildkit/executor" |
|
| 13 |
+ "github.com/moby/buildkit/exporter" |
|
| 14 |
+ "github.com/moby/buildkit/frontend" |
|
| 15 |
+ "github.com/moby/buildkit/session" |
|
| 16 |
+ "github.com/moby/buildkit/snapshot" |
|
| 17 |
+ "github.com/moby/buildkit/solver-next" |
|
| 18 |
+ "github.com/moby/buildkit/solver-next/llbsolver/ops" |
|
| 19 |
+ "github.com/moby/buildkit/solver/pb" |
|
| 20 |
+ "github.com/moby/buildkit/source" |
|
| 21 |
+ "github.com/moby/buildkit/source/git" |
|
| 22 |
+ "github.com/moby/buildkit/source/http" |
|
| 23 |
+ "github.com/moby/buildkit/source/local" |
|
| 24 |
+ "github.com/moby/buildkit/util/progress" |
|
| 25 |
+ digest "github.com/opencontainers/go-digest" |
|
| 26 |
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1" |
|
| 27 |
+ "github.com/pkg/errors" |
|
| 28 |
+) |
|
| 29 |
+ |
|
| 30 |
+// TODO: this file should be removed. containerd defines ContainerdWorker, oci defines OCIWorker. There is no base worker. |
|
| 31 |
+ |
|
| 32 |
+// WorkerOpt is specific to a worker. |
|
| 33 |
+// See also CommonOpt. |
|
| 34 |
+type WorkerOpt struct {
|
|
| 35 |
+ ID string |
|
| 36 |
+ Labels map[string]string |
|
| 37 |
+ SessionManager *session.Manager |
|
| 38 |
+ MetadataStore *metadata.Store |
|
| 39 |
+ Executor executor.Executor |
|
| 40 |
+ Snapshotter snapshot.Snapshotter |
|
| 41 |
+ ContentStore content.Store |
|
| 42 |
+ CacheManager cache.Manager |
|
| 43 |
+ ImageSource source.Source |
|
| 44 |
+ Exporters map[string]exporter.Exporter |
|
| 45 |
+ // ImageStore images.Store // optional |
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+// Worker is a local worker instance with dedicated snapshotter, cache, and so on. |
|
| 49 |
+// TODO: s/Worker/OpWorker/g ? |
|
| 50 |
+type Worker struct {
|
|
| 51 |
+ WorkerOpt |
|
| 52 |
+ SourceManager *source.Manager |
|
| 53 |
+ // Exporters map[string]exporter.Exporter |
|
| 54 |
+ // ImageSource source.Source |
|
| 55 |
+} |
|
| 56 |
+ |
|
| 57 |
+// NewWorker instantiates a local worker |
|
| 58 |
+func NewWorker(opt WorkerOpt) (*Worker, error) {
|
|
| 59 |
+ sm, err := source.NewManager() |
|
| 60 |
+ if err != nil {
|
|
| 61 |
+ return nil, err |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ cm := opt.CacheManager |
|
| 65 |
+ sm.Register(opt.ImageSource) |
|
| 66 |
+ |
|
| 67 |
+ gs, err := git.NewSource(git.Opt{
|
|
| 68 |
+ CacheAccessor: cm, |
|
| 69 |
+ MetadataStore: opt.MetadataStore, |
|
| 70 |
+ }) |
|
| 71 |
+ if err != nil {
|
|
| 72 |
+ return nil, err |
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ sm.Register(gs) |
|
| 76 |
+ |
|
| 77 |
+ hs, err := http.NewSource(http.Opt{
|
|
| 78 |
+ CacheAccessor: cm, |
|
| 79 |
+ MetadataStore: opt.MetadataStore, |
|
| 80 |
+ }) |
|
| 81 |
+ if err != nil {
|
|
| 82 |
+ return nil, err |
|
| 83 |
+ } |
|
| 84 |
+ |
|
| 85 |
+ sm.Register(hs) |
|
| 86 |
+ |
|
| 87 |
+ ss, err := local.NewSource(local.Opt{
|
|
| 88 |
+ SessionManager: opt.SessionManager, |
|
| 89 |
+ CacheAccessor: cm, |
|
| 90 |
+ MetadataStore: opt.MetadataStore, |
|
| 91 |
+ }) |
|
| 92 |
+ if err != nil {
|
|
| 93 |
+ return nil, err |
|
| 94 |
+ } |
|
| 95 |
+ sm.Register(ss) |
|
| 96 |
+ |
|
| 97 |
+ return &Worker{
|
|
| 98 |
+ WorkerOpt: opt, |
|
| 99 |
+ SourceManager: sm, |
|
| 100 |
+ }, nil |
|
| 101 |
+} |
|
| 102 |
+ |
|
| 103 |
+func (w *Worker) ID() string {
|
|
| 104 |
+ return w.WorkerOpt.ID |
|
| 105 |
+} |
|
| 106 |
+ |
|
| 107 |
+func (w *Worker) Labels() map[string]string {
|
|
| 108 |
+ return w.WorkerOpt.Labels |
|
| 109 |
+} |
|
| 110 |
+ |
|
| 111 |
+func (w *Worker) LoadRef(id string) (cache.ImmutableRef, error) {
|
|
| 112 |
+ return w.CacheManager.Get(context.TODO(), id) |
|
| 113 |
+} |
|
| 114 |
+ |
|
| 115 |
+func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge) (solver.Op, error) {
|
|
| 116 |
+ switch op := v.Sys().(type) {
|
|
| 117 |
+ case *pb.Op_Source: |
|
| 118 |
+ return ops.NewSourceOp(v, op, w.SourceManager, w) |
|
| 119 |
+ case *pb.Op_Exec: |
|
| 120 |
+ return ops.NewExecOp(v, op, w.CacheManager, w.Executor, w) |
|
| 121 |
+ case *pb.Op_Build: |
|
| 122 |
+ return ops.NewBuildOp(v, op, s, w) |
|
| 123 |
+ default: |
|
| 124 |
+ return nil, errors.Errorf("could not resolve %v", v)
|
|
| 125 |
+ } |
|
| 126 |
+} |
|
| 127 |
+ |
|
| 128 |
+func (w *Worker) ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) {
|
|
| 129 |
+ // ImageSource is typically source/containerimage |
|
| 130 |
+ resolveImageConfig, ok := w.ImageSource.(resolveImageConfig) |
|
| 131 |
+ if !ok {
|
|
| 132 |
+ return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID())
|
|
| 133 |
+ } |
|
| 134 |
+ return resolveImageConfig.ResolveImageConfig(ctx, ref) |
|
| 135 |
+} |
|
| 136 |
+ |
|
| 137 |
+type resolveImageConfig interface {
|
|
| 138 |
+ ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) |
|
| 139 |
+} |
|
| 140 |
+ |
|
| 141 |
+func (w *Worker) Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error {
|
|
| 142 |
+ active, err := w.CacheManager.New(ctx, rootFS) |
|
| 143 |
+ if err != nil {
|
|
| 144 |
+ return err |
|
| 145 |
+ } |
|
| 146 |
+ defer active.Release(context.TODO()) |
|
| 147 |
+ return w.Executor.Exec(ctx, meta, active, nil, stdin, stdout, stderr) |
|
| 148 |
+} |
|
| 149 |
+ |
|
| 150 |
+func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
|
|
| 151 |
+ return w.CacheManager.DiskUsage(ctx, opt) |
|
| 152 |
+} |
|
| 153 |
+ |
|
| 154 |
+func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo) error {
|
|
| 155 |
+ return w.CacheManager.Prune(ctx, ch) |
|
| 156 |
+} |
|
| 157 |
+ |
|
| 158 |
+func (w *Worker) Exporter(name string) (exporter.Exporter, error) {
|
|
| 159 |
+ exp, ok := w.Exporters[name] |
|
| 160 |
+ if !ok {
|
|
| 161 |
+ return nil, errors.Errorf("exporter %q could not be found", name)
|
|
| 162 |
+ } |
|
| 163 |
+ return exp, nil |
|
| 164 |
+} |
|
| 165 |
+ |
|
| 166 |
+func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef) (*solver.Remote, error) {
|
|
| 167 |
+ // diffPairs, err := blobs.GetDiffPairs(ctx, w.ContentStore, w.Snapshotter, w.Differ, ref) |
|
| 168 |
+ // if err != nil {
|
|
| 169 |
+ // return nil, errors.Wrap(err, "failed calculaing diff pairs for exported snapshot") |
|
| 170 |
+ // } |
|
| 171 |
+ // if len(diffPairs) == 0 {
|
|
| 172 |
+ // return nil, nil |
|
| 173 |
+ // } |
|
| 174 |
+ // |
|
| 175 |
+ // descs := make([]ocispec.Descriptor, len(diffPairs)) |
|
| 176 |
+ // |
|
| 177 |
+ // for i, dp := range diffPairs {
|
|
| 178 |
+ // info, err := w.ContentStore.Info(ctx, dp.Blobsum) |
|
| 179 |
+ // if err != nil {
|
|
| 180 |
+ // return nil, err |
|
| 181 |
+ // } |
|
| 182 |
+ // descs[i] = ocispec.Descriptor{
|
|
| 183 |
+ // Digest: dp.Blobsum, |
|
| 184 |
+ // Size: info.Size, |
|
| 185 |
+ // MediaType: schema2.MediaTypeLayer, |
|
| 186 |
+ // Annotations: map[string]string{
|
|
| 187 |
+ // "containerd.io/uncompressed": dp.DiffID.String(), |
|
| 188 |
+ // }, |
|
| 189 |
+ // } |
|
| 190 |
+ // } |
|
| 191 |
+ // |
|
| 192 |
+ // return &solver.Remote{
|
|
| 193 |
+ // Descriptors: descs, |
|
| 194 |
+ // Provider: w.ContentStore, |
|
| 195 |
+ // }, nil |
|
| 196 |
+ return nil, errors.Errorf("getremote not implemented")
|
|
| 197 |
+} |
|
| 198 |
+ |
|
| 199 |
+func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
|
|
| 200 |
+ // eg, gctx := errgroup.WithContext(ctx) |
|
| 201 |
+ // for _, desc := range remote.Descriptors {
|
|
| 202 |
+ // func(desc ocispec.Descriptor) {
|
|
| 203 |
+ // eg.Go(func() error {
|
|
| 204 |
+ // done := oneOffProgress(ctx, fmt.Sprintf("pulling %s", desc.Digest))
|
|
| 205 |
+ // return done(contentutil.Copy(gctx, w.ContentStore, remote.Provider, desc)) |
|
| 206 |
+ // }) |
|
| 207 |
+ // }(desc) |
|
| 208 |
+ // } |
|
| 209 |
+ // |
|
| 210 |
+ // if err := eg.Wait(); err != nil {
|
|
| 211 |
+ // return nil, err |
|
| 212 |
+ // } |
|
| 213 |
+ // |
|
| 214 |
+ // csh, release := snapshot.NewCompatibilitySnapshotter(w.Snapshotter) |
|
| 215 |
+ // defer release() |
|
| 216 |
+ // |
|
| 217 |
+ // unpackProgressDone := oneOffProgress(ctx, "unpacking") |
|
| 218 |
+ // chainID, err := w.unpack(ctx, remote.Descriptors, csh) |
|
| 219 |
+ // if err != nil {
|
|
| 220 |
+ // return nil, unpackProgressDone(err) |
|
| 221 |
+ // } |
|
| 222 |
+ // unpackProgressDone(nil) |
|
| 223 |
+ // |
|
| 224 |
+ // return w.CacheManager.Get(ctx, chainID, cache.WithDescription(fmt.Sprintf("imported %s", remote.Descriptors[len(remote.Descriptors)-1].Digest)))
|
|
| 225 |
+ return nil, errors.Errorf("fromremote not implemented")
|
|
| 226 |
+} |
|
| 227 |
+ |
|
| 228 |
+// utility function. could be moved to the constructor logic? |
|
| 229 |
+// func Labels(executor, snapshotter string) map[string]string {
|
|
| 230 |
+// hostname, err := os.Hostname() |
|
| 231 |
+// if err != nil {
|
|
| 232 |
+// hostname = "unknown" |
|
| 233 |
+// } |
|
| 234 |
+// labels := map[string]string{
|
|
| 235 |
+// worker.LabelOS: runtime.GOOS, |
|
| 236 |
+// worker.LabelArch: runtime.GOOSARCH, |
|
| 237 |
+// worker.LabelExecutor: executor, |
|
| 238 |
+// worker.LabelSnapshotter: snapshotter, |
|
| 239 |
+// worker.LabelHostname: hostname, |
|
| 240 |
+// } |
|
| 241 |
+// return labels |
|
| 242 |
+// } |
|
| 243 |
+// |
|
| 244 |
+// // ID reads the worker id from the `workerid` file. |
|
| 245 |
+// // If not exist, it creates a random one, |
|
| 246 |
+// func ID(root string) (string, error) {
|
|
| 247 |
+// f := filepath.Join(root, "workerid") |
|
| 248 |
+// b, err := ioutil.ReadFile(f) |
|
| 249 |
+// if err != nil {
|
|
| 250 |
+// if os.IsNotExist(err) {
|
|
| 251 |
+// id := identity.NewID() |
|
| 252 |
+// err := ioutil.WriteFile(f, []byte(id), 0400) |
|
| 253 |
+// return id, err |
|
| 254 |
+// } else {
|
|
| 255 |
+// return "", err |
|
| 256 |
+// } |
|
| 257 |
+// } |
|
| 258 |
+// return string(b), nil |
|
| 259 |
+// } |
|
| 260 |
+ |
|
| 261 |
+func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
|
|
| 262 |
+ layers := make([]rootfs.Layer, len(descs)) |
|
| 263 |
+ for i, desc := range descs {
|
|
| 264 |
+ diffIDStr := desc.Annotations["containerd.io/uncompressed"] |
|
| 265 |
+ if diffIDStr == "" {
|
|
| 266 |
+ return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
|
|
| 267 |
+ } |
|
| 268 |
+ diffID, err := digest.Parse(diffIDStr) |
|
| 269 |
+ if err != nil {
|
|
| 270 |
+ return nil, err |
|
| 271 |
+ } |
|
| 272 |
+ layers[i].Diff = ocispec.Descriptor{
|
|
| 273 |
+ MediaType: ocispec.MediaTypeImageLayer, |
|
| 274 |
+ Digest: diffID, |
|
| 275 |
+ } |
|
| 276 |
+ layers[i].Blob = ocispec.Descriptor{
|
|
| 277 |
+ MediaType: desc.MediaType, |
|
| 278 |
+ Digest: desc.Digest, |
|
| 279 |
+ Size: desc.Size, |
|
| 280 |
+ } |
|
| 281 |
+ } |
|
| 282 |
+ return layers, nil |
|
| 283 |
+} |
|
| 284 |
+ |
|
| 285 |
+func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
|
| 286 |
+ pw, _, _ := progress.FromContext(ctx) |
|
| 287 |
+ now := time.Now() |
|
| 288 |
+ st := progress.Status{
|
|
| 289 |
+ Started: &now, |
|
| 290 |
+ } |
|
| 291 |
+ pw.Write(id, st) |
|
| 292 |
+ return func(err error) error {
|
|
| 293 |
+ // TODO: set error on status |
|
| 294 |
+ now := time.Now() |
|
| 295 |
+ st.Completed = &now |
|
| 296 |
+ pw.Write(id, st) |
|
| 297 |
+ pw.Close() |
|
| 298 |
+ return err |
|
| 299 |
+ } |
|
| 300 |
+} |
| ... | ... |
@@ -27,6 +27,7 @@ import ( |
| 27 | 27 |
swarmrouter "github.com/docker/docker/api/server/router/swarm" |
| 28 | 28 |
systemrouter "github.com/docker/docker/api/server/router/system" |
| 29 | 29 |
"github.com/docker/docker/api/server/router/volume" |
| 30 |
+ buildkit "github.com/docker/docker/builder/builder-next" |
|
| 30 | 31 |
"github.com/docker/docker/builder/dockerfile" |
| 31 | 32 |
"github.com/docker/docker/builder/fscache" |
| 32 | 33 |
"github.com/docker/docker/cli/debug" |
| ... | ... |
@@ -270,7 +271,16 @@ func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptio |
| 270 | 270 |
return opts, err |
| 271 | 271 |
} |
| 272 | 272 |
|
| 273 |
- bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache) |
|
| 273 |
+ buildkit, err := buildkit.New(buildkit.Opt{
|
|
| 274 |
+ SessionManager: sm, |
|
| 275 |
+ Root: filepath.Join(config.Root, "buildkit"), |
|
| 276 |
+ Dist: daemon.DistributionServices(), |
|
| 277 |
+ }) |
|
| 278 |
+ if err != nil {
|
|
| 279 |
+ return opts, err |
|
| 280 |
+ } |
|
| 281 |
+ |
|
| 282 |
+ bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache, buildkit) |
|
| 274 | 283 |
if err != nil {
|
| 275 | 284 |
return opts, errors.Wrap(err, "failed to create buildmanager") |
| 276 | 285 |
} |