Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
| ... | ... |
@@ -11,6 +11,7 @@ import ( |
| 11 | 11 |
"github.com/containerd/containerd/snapshots" |
| 12 | 12 |
"github.com/docker/docker/daemon/graphdriver" |
| 13 | 13 |
"github.com/docker/docker/layer" |
| 14 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 14 | 15 |
"github.com/moby/buildkit/identity" |
| 15 | 16 |
"github.com/moby/buildkit/snapshot" |
| 16 | 17 |
digest "github.com/opencontainers/go-digest" |
| ... | ... |
@@ -73,6 +74,10 @@ func NewSnapshotter(opt Opt) (snapshot.SnapshotterBase, error) {
|
| 73 | 73 |
return s, nil |
| 74 | 74 |
} |
| 75 | 75 |
|
| 76 |
+func (s *snapshotter) IdentityMapping() *idtools.IdentityMapping {
|
|
| 77 |
+ return nil |
|
| 78 |
+} |
|
| 79 |
+ |
|
| 76 | 80 |
func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
|
| 77 | 81 |
origParent := parent |
| 78 | 82 |
if parent != "" {
|
| ... | ... |
@@ -469,3 +474,7 @@ func (m *mountable) Release() error {
|
| 469 | 469 |
m.mounts = nil |
| 470 | 470 |
return m.release() |
| 471 | 471 |
} |
| 472 |
+ |
|
| 473 |
+func (m *mountable) IdentityMapping() *idtools.IdentityMapping {
|
|
| 474 |
+ return nil |
|
| 475 |
+} |
| ... | ... |
@@ -25,7 +25,6 @@ import ( |
| 25 | 25 |
"github.com/moby/buildkit/control" |
| 26 | 26 |
"github.com/moby/buildkit/identity" |
| 27 | 27 |
"github.com/moby/buildkit/session" |
| 28 |
- "github.com/moby/buildkit/solver/llbsolver" |
|
| 29 | 28 |
"github.com/moby/buildkit/util/entitlements" |
| 30 | 29 |
"github.com/moby/buildkit/util/resolver" |
| 31 | 30 |
"github.com/moby/buildkit/util/tracing" |
| ... | ... |
@@ -64,10 +63,6 @@ var cacheFields = map[string]bool{
|
| 64 | 64 |
"immutable": false, |
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 |
-func init() {
|
|
| 68 |
- llbsolver.AllowNetworkHostUnstable = true |
|
| 69 |
-} |
|
| 70 |
- |
|
| 71 | 67 |
// Opt is option struct required for creating the builder |
| 72 | 68 |
type Opt struct {
|
| 73 | 69 |
SessionManager *session.Manager |
| ... | ... |
@@ -30,6 +30,7 @@ import ( |
| 30 | 30 |
"github.com/moby/buildkit/snapshot/blobmapping" |
| 31 | 31 |
"github.com/moby/buildkit/solver/bboltcachestorage" |
| 32 | 32 |
"github.com/moby/buildkit/util/binfmt_misc" |
| 33 |
+ "github.com/moby/buildkit/util/entitlements" |
|
| 33 | 34 |
"github.com/moby/buildkit/worker" |
| 34 | 35 |
specs "github.com/opencontainers/image-spec/specs-go/v1" |
| 35 | 36 |
"github.com/pkg/errors" |
| ... | ... |
@@ -189,6 +190,10 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
| 189 | 189 |
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
| 190 | 190 |
"inline": inlineremotecache.ResolveCacheExporterFunc(), |
| 191 | 191 |
}, |
| 192 |
+ Entitlements: []string{
|
|
| 193 |
+ string(entitlements.EntitlementNetworkHost), |
|
| 194 |
+ // string(entitlements.EntitlementSecurityInsecure), |
|
| 195 |
+ }, |
|
| 192 | 196 |
}) |
| 193 | 197 |
} |
| 194 | 198 |
|
| ... | ... |
@@ -25,6 +25,7 @@ import ( |
| 25 | 25 |
"github.com/moby/buildkit/executor" |
| 26 | 26 |
"github.com/moby/buildkit/exporter" |
| 27 | 27 |
localexporter "github.com/moby/buildkit/exporter/local" |
| 28 |
+ tarexporter "github.com/moby/buildkit/exporter/tar" |
|
| 28 | 29 |
"github.com/moby/buildkit/frontend" |
| 29 | 30 |
gw "github.com/moby/buildkit/frontend/gateway/client" |
| 30 | 31 |
"github.com/moby/buildkit/session" |
| ... | ... |
@@ -213,6 +214,10 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, |
| 213 | 213 |
return localexporter.New(localexporter.Opt{
|
| 214 | 214 |
SessionManager: sm, |
| 215 | 215 |
}) |
| 216 |
+ case client.ExporterTar: |
|
| 217 |
+ return tarexporter.New(tarexporter.Opt{
|
|
| 218 |
+ SessionManager: sm, |
|
| 219 |
+ }) |
|
| 216 | 220 |
default: |
| 217 | 221 |
return nil, errors.Errorf("exporter %q could not be found", name)
|
| 218 | 222 |
} |
| ... | ... |
@@ -27,8 +27,8 @@ github.com/imdario/mergo v0.3.6 |
| 27 | 27 |
golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca |
| 28 | 28 |
|
| 29 | 29 |
# buildkit |
| 30 |
-github.com/moby/buildkit e9aca5bef87e19173b99d8668db0338dcaaa5f33 |
|
| 31 |
-github.com/tonistiigi/fsutil 1bdbf124ad494a771e99e0cdcd16326375f8b2c9 |
|
| 30 |
+github.com/moby/buildkit b3028967ae6259c9a31c1a1deeccd30fe3469cce |
|
| 31 |
+github.com/tonistiigi/fsutil 3bbb99cdbd76619ab717299830c60f6f2a533a6b |
|
| 32 | 32 |
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 |
| 33 | 33 |
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 |
| 34 | 34 |
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 |
| ... | ... |
@@ -120,7 +120,7 @@ github.com/googleapis/gax-go v2.0.0 |
| 120 | 120 |
google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 |
| 121 | 121 |
|
| 122 | 122 |
# containerd |
| 123 |
-github.com/containerd/containerd a15b6e2097c48b632dbdc63254bad4c62b69e709 |
|
| 123 |
+github.com/containerd/containerd ceba56893a76f22cf0126c46d835c80fb3833408 |
|
| 124 | 124 |
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c |
| 125 | 125 |
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d |
| 126 | 126 |
github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 |
| ... | ... |
@@ -20,8 +20,8 @@ import ( |
| 20 | 20 |
"context" |
| 21 | 21 |
"fmt" |
| 22 | 22 |
"io" |
| 23 |
+ "net/url" |
|
| 23 | 24 |
"os" |
| 24 |
- "path/filepath" |
|
| 25 | 25 |
"sync" |
| 26 | 26 |
|
| 27 | 27 |
"github.com/containerd/containerd/defaults" |
| ... | ... |
@@ -222,46 +222,76 @@ type DirectIO struct {
|
| 222 | 222 |
cio |
| 223 | 223 |
} |
| 224 | 224 |
|
| 225 |
-var _ IO = &DirectIO{}
|
|
| 225 |
+var ( |
|
| 226 |
+ _ IO = &DirectIO{}
|
|
| 227 |
+ _ IO = &logURI{}
|
|
| 228 |
+) |
|
| 229 |
+ |
|
| 230 |
+// LogURI provides the raw logging URI |
|
| 231 |
+func LogURI(uri *url.URL) Creator {
|
|
| 232 |
+ return func(_ string) (IO, error) {
|
|
| 233 |
+ return &logURI{
|
|
| 234 |
+ config: Config{
|
|
| 235 |
+ Stdout: uri.String(), |
|
| 236 |
+ Stderr: uri.String(), |
|
| 237 |
+ }, |
|
| 238 |
+ }, nil |
|
| 239 |
+ } |
|
| 240 |
+} |
|
| 241 |
+ |
|
| 242 |
+// BinaryIO forwards container STDOUT|STDERR directly to a logging binary |
|
| 243 |
+func BinaryIO(binary string, args map[string]string) Creator {
|
|
| 244 |
+ return func(_ string) (IO, error) {
|
|
| 245 |
+ uri := &url.URL{
|
|
| 246 |
+ Scheme: "binary", |
|
| 247 |
+ Host: binary, |
|
| 248 |
+ } |
|
| 249 |
+ for k, v := range args {
|
|
| 250 |
+ uri.Query().Set(k, v) |
|
| 251 |
+ } |
|
| 252 |
+ return &logURI{
|
|
| 253 |
+ config: Config{
|
|
| 254 |
+ Stdout: uri.String(), |
|
| 255 |
+ Stderr: uri.String(), |
|
| 256 |
+ }, |
|
| 257 |
+ }, nil |
|
| 258 |
+ } |
|
| 259 |
+} |
|
| 226 | 260 |
|
| 227 | 261 |
// LogFile creates a file on disk that logs the task's STDOUT,STDERR. |
| 228 | 262 |
// If the log file already exists, the logs will be appended to the file. |
| 229 | 263 |
func LogFile(path string) Creator {
|
| 230 | 264 |
return func(_ string) (IO, error) {
|
| 231 |
- if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
|
| 232 |
- return nil, err |
|
| 233 |
- } |
|
| 234 |
- f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) |
|
| 235 |
- if err != nil {
|
|
| 236 |
- return nil, err |
|
| 265 |
+ uri := &url.URL{
|
|
| 266 |
+ Scheme: "file", |
|
| 267 |
+ Host: path, |
|
| 237 | 268 |
} |
| 238 |
- f.Close() |
|
| 239 |
- return &logIO{
|
|
| 269 |
+ return &logURI{
|
|
| 240 | 270 |
config: Config{
|
| 241 |
- Stdout: path, |
|
| 242 |
- Stderr: path, |
|
| 271 |
+ Stdout: uri.String(), |
|
| 272 |
+ Stderr: uri.String(), |
|
| 243 | 273 |
}, |
| 244 | 274 |
}, nil |
| 245 | 275 |
} |
| 246 | 276 |
} |
| 247 | 277 |
|
| 248 |
-type logIO struct {
|
|
| 278 |
+type logURI struct {
|
|
| 249 | 279 |
config Config |
| 250 | 280 |
} |
| 251 | 281 |
|
| 252 |
-func (l *logIO) Config() Config {
|
|
| 282 |
+func (l *logURI) Config() Config {
|
|
| 253 | 283 |
return l.config |
| 254 | 284 |
} |
| 255 | 285 |
|
| 256 |
-func (l *logIO) Cancel() {
|
|
| 286 |
+func (l *logURI) Cancel() {
|
|
| 257 | 287 |
|
| 258 | 288 |
} |
| 259 | 289 |
|
| 260 |
-func (l *logIO) Wait() {
|
|
| 290 |
+func (l *logURI) Wait() {
|
|
| 261 | 291 |
|
| 262 | 292 |
} |
| 263 | 293 |
|
| 264 |
-func (l *logIO) Close() error {
|
|
| 294 |
+func (l *logURI) Close() error {
|
|
| 265 | 295 |
return nil |
| 266 | 296 |
} |
| 267 | 297 |
|
| ... | ... |
@@ -300,6 +300,10 @@ type RemoteContext struct {
|
| 300 | 300 |
|
| 301 | 301 |
// MaxConcurrentDownloads is the max concurrent content downloads for each pull. |
| 302 | 302 |
MaxConcurrentDownloads int |
| 303 |
+ |
|
| 304 |
+ // AppendDistributionSourceLabel allows fetcher to add distribute source |
|
| 305 |
+ // label for each blob content, which doesn't work for legacy schema1. |
|
| 306 |
+ AppendDistributionSourceLabel bool |
|
| 303 | 307 |
} |
| 304 | 308 |
|
| 305 | 309 |
func defaultRemoteContext() *RemoteContext {
|
| ... | ... |
@@ -194,3 +194,12 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt {
|
| 194 | 194 |
return nil |
| 195 | 195 |
} |
| 196 | 196 |
} |
| 197 |
+ |
|
| 198 |
+// WithAppendDistributionSourceLabel allows fetcher to add distribute source |
|
| 199 |
+// label for each blob content, which doesn't work for legacy schema1. |
|
| 200 |
+func WithAppendDistributionSourceLabel() RemoteOpt {
|
|
| 201 |
+ return func(_ *Client, c *RemoteContext) error {
|
|
| 202 |
+ c.AppendDistributionSourceLabel = true |
|
| 203 |
+ return nil |
|
| 204 |
+ } |
|
| 205 |
+} |
| ... | ... |
@@ -29,7 +29,8 @@ import ( |
| 29 | 29 |
specs "github.com/opencontainers/runtime-spec/specs-go" |
| 30 | 30 |
) |
| 31 | 31 |
|
| 32 |
-const nvidiaCLI = "nvidia-container-cli" |
|
| 32 |
+// NvidiaCLI is the path to the Nvidia helper binary |
|
| 33 |
+const NvidiaCLI = "nvidia-container-cli" |
|
| 33 | 34 |
|
| 34 | 35 |
// Capability specifies capabilities for the gpu inside the container |
| 35 | 36 |
// Detailed explanation of options can be found: |
| ... | ... |
@@ -51,13 +52,16 @@ const ( |
| 51 | 51 |
Display Capability = "display" |
| 52 | 52 |
) |
| 53 | 53 |
|
| 54 |
-var allCaps = []Capability{
|
|
| 55 |
- Compute, |
|
| 56 |
- Compat32, |
|
| 57 |
- Graphics, |
|
| 58 |
- Utility, |
|
| 59 |
- Video, |
|
| 60 |
- Display, |
|
| 54 |
+// AllCaps returns the complete list of supported Nvidia capabilties. |
|
| 55 |
+func AllCaps() []Capability {
|
|
| 56 |
+ return []Capability{
|
|
| 57 |
+ Compute, |
|
| 58 |
+ Compat32, |
|
| 59 |
+ Graphics, |
|
| 60 |
+ Utility, |
|
| 61 |
+ Video, |
|
| 62 |
+ Display, |
|
| 63 |
+ } |
|
| 61 | 64 |
} |
| 62 | 65 |
|
| 63 | 66 |
// WithGPUs adds NVIDIA gpu support to a container |
| ... | ... |
@@ -76,7 +80,7 @@ func WithGPUs(opts ...Opts) oci.SpecOpts {
|
| 76 | 76 |
} |
| 77 | 77 |
c.OCIHookPath = path |
| 78 | 78 |
} |
| 79 |
- nvidiaPath, err := exec.LookPath(nvidiaCLI) |
|
| 79 |
+ nvidiaPath, err := exec.LookPath(NvidiaCLI) |
|
| 80 | 80 |
if err != nil {
|
| 81 | 81 |
return err |
| 82 | 82 |
} |
| ... | ... |
@@ -166,7 +170,7 @@ func WithAllDevices(c *config) error {
|
| 166 | 166 |
|
| 167 | 167 |
// WithAllCapabilities adds all capabilities to the container for the gpus |
| 168 | 168 |
func WithAllCapabilities(c *config) error {
|
| 169 |
- c.Capabilities = allCaps |
|
| 169 |
+ c.Capabilities = AllCaps() |
|
| 170 | 170 |
return nil |
| 171 | 171 |
} |
| 172 | 172 |
|
| ... | ... |
@@ -161,6 +161,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
| 161 | 161 |
"ioctl", |
| 162 | 162 |
"io_destroy", |
| 163 | 163 |
"io_getevents", |
| 164 |
+ "io_pgetevents", |
|
| 164 | 165 |
"ioprio_get", |
| 165 | 166 |
"ioprio_set", |
| 166 | 167 |
"io_setup", |
| ... | ... |
@@ -319,6 +320,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
| 319 | 319 |
"stat64", |
| 320 | 320 |
"statfs", |
| 321 | 321 |
"statfs64", |
| 322 |
+ "statx", |
|
| 322 | 323 |
"symlink", |
| 323 | 324 |
"symlinkat", |
| 324 | 325 |
"sync", |
| ... | ... |
@@ -25,14 +25,16 @@ import ( |
| 25 | 25 |
"github.com/containerd/containerd/errdefs" |
| 26 | 26 |
"github.com/containerd/containerd/images" |
| 27 | 27 |
"github.com/containerd/containerd/images/archive" |
| 28 |
+ "github.com/containerd/containerd/platforms" |
|
| 28 | 29 |
digest "github.com/opencontainers/go-digest" |
| 29 | 30 |
ocispec "github.com/opencontainers/image-spec/specs-go/v1" |
| 30 | 31 |
) |
| 31 | 32 |
|
| 32 | 33 |
type importOpts struct {
|
| 33 |
- indexName string |
|
| 34 |
- imageRefT func(string) string |
|
| 35 |
- dgstRefT func(digest.Digest) string |
|
| 34 |
+ indexName string |
|
| 35 |
+ imageRefT func(string) string |
|
| 36 |
+ dgstRefT func(digest.Digest) string |
|
| 37 |
+ allPlatforms bool |
|
| 36 | 38 |
} |
| 37 | 39 |
|
| 38 | 40 |
// ImportOpt allows the caller to specify import specific options |
| ... | ... |
@@ -64,6 +66,14 @@ func WithIndexName(name string) ImportOpt {
|
| 64 | 64 |
} |
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 |
+// WithAllPlatforms is used to import content for all platforms. |
|
| 68 |
+func WithAllPlatforms(allPlatforms bool) ImportOpt {
|
|
| 69 |
+ return func(c *importOpts) error {
|
|
| 70 |
+ c.allPlatforms = allPlatforms |
|
| 71 |
+ return nil |
|
| 72 |
+ } |
|
| 73 |
+} |
|
| 74 |
+ |
|
| 67 | 75 |
// Import imports an image from a Tar stream using reader. |
| 68 | 76 |
// Caller needs to specify importer. Future version may use oci.v1 as the default. |
| 69 | 77 |
// Note that unreferrenced blobs may be imported to the content store as well. |
| ... | ... |
@@ -98,6 +108,10 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt |
| 98 | 98 |
Target: index, |
| 99 | 99 |
}) |
| 100 | 100 |
} |
| 101 |
+ var platformMatcher = platforms.All |
|
| 102 |
+ if !iopts.allPlatforms {
|
|
| 103 |
+ platformMatcher = platforms.Default() |
|
| 104 |
+ } |
|
| 101 | 105 |
|
| 102 | 106 |
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
| 103 | 107 |
// Only save images at top level |
| ... | ... |
@@ -141,6 +155,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt |
| 141 | 141 |
return idx.Manifests, nil |
| 142 | 142 |
} |
| 143 | 143 |
|
| 144 |
+ handler = images.FilterPlatforms(handler, platformMatcher) |
|
| 144 | 145 |
handler = images.SetChildrenLabels(cs, handler) |
| 145 | 146 |
if err := images.Walk(ctx, handler, index); err != nil {
|
| 146 | 147 |
return nil, err |
| ... | ... |
@@ -33,7 +33,7 @@ import ( |
| 33 | 33 |
"github.com/containerd/containerd/namespaces" |
| 34 | 34 |
"github.com/containerd/containerd/platforms" |
| 35 | 35 |
"github.com/containerd/continuity/fs" |
| 36 |
- "github.com/opencontainers/image-spec/specs-go/v1" |
|
| 36 |
+ v1 "github.com/opencontainers/image-spec/specs-go/v1" |
|
| 37 | 37 |
"github.com/opencontainers/runc/libcontainer/user" |
| 38 | 38 |
specs "github.com/opencontainers/runtime-spec/specs-go" |
| 39 | 39 |
"github.com/pkg/errors" |
| ... | ... |
@@ -741,9 +741,11 @@ func WithCapabilities(caps []string) SpecOpts {
|
| 741 | 741 |
} |
| 742 | 742 |
|
| 743 | 743 |
// WithAllCapabilities sets all linux capabilities for the process |
| 744 |
-var WithAllCapabilities = WithCapabilities(getAllCapabilities()) |
|
| 744 |
+var WithAllCapabilities = WithCapabilities(GetAllCapabilities()) |
|
| 745 | 745 |
|
| 746 |
-func getAllCapabilities() []string {
|
|
| 746 |
+// GetAllCapabilities returns all caps up to CAP_LAST_CAP |
|
| 747 |
+// or CAP_BLOCK_SUSPEND on RHEL6 |
|
| 748 |
+func GetAllCapabilities() []string {
|
|
| 747 | 749 |
last := capability.CAP_LAST_CAP |
| 748 | 750 |
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap |
| 749 | 751 |
if last == capability.Cap(63) {
|
| ... | ... |
@@ -759,6 +761,61 @@ func getAllCapabilities() []string {
|
| 759 | 759 |
return caps |
| 760 | 760 |
} |
| 761 | 761 |
|
| 762 |
+func capsContain(caps []string, s string) bool {
|
|
| 763 |
+ for _, c := range caps {
|
|
| 764 |
+ if c == s {
|
|
| 765 |
+ return true |
|
| 766 |
+ } |
|
| 767 |
+ } |
|
| 768 |
+ return false |
|
| 769 |
+} |
|
| 770 |
+ |
|
| 771 |
+func removeCap(caps *[]string, s string) {
|
|
| 772 |
+ for i, c := range *caps {
|
|
| 773 |
+ if c == s {
|
|
| 774 |
+ *caps = append((*caps)[:i], (*caps)[i+1:]...) |
|
| 775 |
+ } |
|
| 776 |
+ } |
|
| 777 |
+} |
|
| 778 |
+ |
|
| 779 |
+// WithAddedCapabilities adds the provided capabilities |
|
| 780 |
+func WithAddedCapabilities(caps []string) SpecOpts {
|
|
| 781 |
+ return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
|
| 782 |
+ setCapabilities(s) |
|
| 783 |
+ for _, c := range caps {
|
|
| 784 |
+ for _, cl := range []*[]string{
|
|
| 785 |
+ &s.Process.Capabilities.Bounding, |
|
| 786 |
+ &s.Process.Capabilities.Effective, |
|
| 787 |
+ &s.Process.Capabilities.Permitted, |
|
| 788 |
+ &s.Process.Capabilities.Inheritable, |
|
| 789 |
+ } {
|
|
| 790 |
+ if !capsContain(*cl, c) {
|
|
| 791 |
+ *cl = append(*cl, c) |
|
| 792 |
+ } |
|
| 793 |
+ } |
|
| 794 |
+ } |
|
| 795 |
+ return nil |
|
| 796 |
+ } |
|
| 797 |
+} |
|
| 798 |
+ |
|
| 799 |
+// WithDroppedCapabilities removes the provided capabilities |
|
| 800 |
+func WithDroppedCapabilities(caps []string) SpecOpts {
|
|
| 801 |
+ return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
|
| 802 |
+ setCapabilities(s) |
|
| 803 |
+ for _, c := range caps {
|
|
| 804 |
+ for _, cl := range []*[]string{
|
|
| 805 |
+ &s.Process.Capabilities.Bounding, |
|
| 806 |
+ &s.Process.Capabilities.Effective, |
|
| 807 |
+ &s.Process.Capabilities.Permitted, |
|
| 808 |
+ &s.Process.Capabilities.Inheritable, |
|
| 809 |
+ } {
|
|
| 810 |
+ removeCap(cl, c) |
|
| 811 |
+ } |
|
| 812 |
+ } |
|
| 813 |
+ return nil |
|
| 814 |
+ } |
|
| 815 |
+} |
|
| 816 |
+ |
|
| 762 | 817 |
// WithAmbientCapabilities set the Linux ambient capabilities for the process |
| 763 | 818 |
// Ambient capabilities should only be set for non-root users or the caller should |
| 764 | 819 |
// understand how these capabilities are used and set |
| ... | ... |
@@ -112,8 +112,9 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim |
| 112 | 112 |
childrenHandler := images.ChildrenHandler(store) |
| 113 | 113 |
// Set any children labels for that content |
| 114 | 114 |
childrenHandler = images.SetChildrenLabels(store, childrenHandler) |
| 115 |
- // Filter children by platforms |
|
| 116 |
- childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher) |
|
| 115 |
+ // Filter manifests by platforms but allow to handle manifest |
|
| 116 |
+ // and configuration for not-target platforms |
|
| 117 |
+ childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) |
|
| 117 | 118 |
// Sort and limit manifests if a finite number is needed |
| 118 | 119 |
if limit > 0 {
|
| 119 | 120 |
childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit) |
| ... | ... |
@@ -130,11 +131,23 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim |
| 130 | 130 |
}, |
| 131 | 131 |
) |
| 132 | 132 |
|
| 133 |
- handler = images.Handlers(append(rCtx.BaseHandlers, |
|
| 133 |
+ handlers := append(rCtx.BaseHandlers, |
|
| 134 | 134 |
remotes.FetchHandler(store, fetcher), |
| 135 | 135 |
convertibleHandler, |
| 136 | 136 |
childrenHandler, |
| 137 |
- )...) |
|
| 137 |
+ ) |
|
| 138 |
+ |
|
| 139 |
+ // append distribution source label to blob data |
|
| 140 |
+ if rCtx.AppendDistributionSourceLabel {
|
|
| 141 |
+ appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref) |
|
| 142 |
+ if err != nil {
|
|
| 143 |
+ return images.Image{}, err
|
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ handlers = append(handlers, appendDistSrcLabelHandler) |
|
| 147 |
+ } |
|
| 148 |
+ |
|
| 149 |
+ handler = images.Handlers(handlers...) |
|
| 138 | 150 |
|
| 139 | 151 |
converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
|
| 140 | 152 |
return docker.ConvertManifest(ctx, store, desc) |
| ... | ... |
@@ -148,6 +161,7 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim |
| 148 | 148 |
if rCtx.MaxConcurrentDownloads > 0 {
|
| 149 | 149 |
limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads)) |
| 150 | 150 |
} |
| 151 |
+ |
|
| 151 | 152 |
if err := images.Dispatch(ctx, handler, limiter, desc); err != nil {
|
| 152 | 153 |
return images.Image{}, err
|
| 153 | 154 |
} |
| 154 | 155 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,112 @@ |
| 0 |
+/* |
|
| 1 |
+ Copyright The containerd Authors. |
|
| 2 |
+ |
|
| 3 |
+ Licensed under the Apache License, Version 2.0 (the "License"); |
|
| 4 |
+ you may not use this file except in compliance with the License. |
|
| 5 |
+ You may obtain a copy of the License at |
|
| 6 |
+ |
|
| 7 |
+ http://www.apache.org/licenses/LICENSE-2.0 |
|
| 8 |
+ |
|
| 9 |
+ Unless required by applicable law or agreed to in writing, software |
|
| 10 |
+ distributed under the License is distributed on an "AS IS" BASIS, |
|
| 11 |
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
| 12 |
+ See the License for the specific language governing permissions and |
|
| 13 |
+ limitations under the License. |
|
| 14 |
+*/ |
|
| 15 |
+ |
|
| 16 |
+package docker |
|
| 17 |
+ |
|
| 18 |
+import ( |
|
| 19 |
+ "context" |
|
| 20 |
+ "fmt" |
|
| 21 |
+ "net/url" |
|
| 22 |
+ "strings" |
|
| 23 |
+ |
|
| 24 |
+ "github.com/containerd/containerd/content" |
|
| 25 |
+ "github.com/containerd/containerd/images" |
|
| 26 |
+ "github.com/containerd/containerd/labels" |
|
| 27 |
+ "github.com/containerd/containerd/log" |
|
| 28 |
+ "github.com/containerd/containerd/reference" |
|
| 29 |
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1" |
|
| 30 |
+) |
|
| 31 |
+ |
|
| 32 |
+var ( |
|
| 33 |
+ // labelDistributionSource describes the source blob comes from. |
|
| 34 |
+ labelDistributionSource = "containerd.io/distribution.source" |
|
| 35 |
+) |
|
| 36 |
+ |
|
| 37 |
+// AppendDistributionSourceLabel updates the label of blob with distribution source. |
|
| 38 |
+func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) {
|
|
| 39 |
+ refspec, err := reference.Parse(ref) |
|
| 40 |
+ if err != nil {
|
|
| 41 |
+ return nil, err |
|
| 42 |
+ } |
|
| 43 |
+ |
|
| 44 |
+ u, err := url.Parse("dummy://" + refspec.Locator)
|
|
| 45 |
+ if err != nil {
|
|
| 46 |
+ return nil, err |
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") |
|
| 50 |
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
|
| 51 |
+ info, err := manager.Info(ctx, desc.Digest) |
|
| 52 |
+ if err != nil {
|
|
| 53 |
+ return nil, err |
|
| 54 |
+ } |
|
| 55 |
+ |
|
| 56 |
+ key := distributionSourceLabelKey(source) |
|
| 57 |
+ |
|
| 58 |
+ originLabel := "" |
|
| 59 |
+ if info.Labels != nil {
|
|
| 60 |
+ originLabel = info.Labels[key] |
|
| 61 |
+ } |
|
| 62 |
+ value := appendDistributionSourceLabel(originLabel, repo) |
|
| 63 |
+ |
|
| 64 |
+ // The repo name has been limited under 256 and the distribution |
|
| 65 |
+ // label might hit the limitation of label size, when blob data |
|
| 66 |
+ // is used as the very, very common layer. |
|
| 67 |
+ if err := labels.Validate(key, value); err != nil {
|
|
| 68 |
+ log.G(ctx).Warnf("skip to append distribution label: %s", err)
|
|
| 69 |
+ return nil, nil |
|
| 70 |
+ } |
|
| 71 |
+ |
|
| 72 |
+ info = content.Info{
|
|
| 73 |
+ Digest: desc.Digest, |
|
| 74 |
+ Labels: map[string]string{
|
|
| 75 |
+ key: value, |
|
| 76 |
+ }, |
|
| 77 |
+ } |
|
| 78 |
+ _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key))
|
|
| 79 |
+ return nil, err |
|
| 80 |
+ }, nil |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+func appendDistributionSourceLabel(originLabel, repo string) string {
|
|
| 84 |
+ repos := []string{}
|
|
| 85 |
+ if originLabel != "" {
|
|
| 86 |
+ repos = strings.Split(originLabel, ",") |
|
| 87 |
+ } |
|
| 88 |
+ repos = append(repos, repo) |
|
| 89 |
+ |
|
| 90 |
+ // use emtpy string to present duplicate items |
|
| 91 |
+ for i := 1; i < len(repos); i++ {
|
|
| 92 |
+ tmp, j := repos[i], i-1 |
|
| 93 |
+ for ; j >= 0 && repos[j] >= tmp; j-- {
|
|
| 94 |
+ if repos[j] == tmp {
|
|
| 95 |
+ tmp = "" |
|
| 96 |
+ } |
|
| 97 |
+ repos[j+1] = repos[j] |
|
| 98 |
+ } |
|
| 99 |
+ repos[j+1] = tmp |
|
| 100 |
+ } |
|
| 101 |
+ |
|
| 102 |
+ i := 0 |
|
| 103 |
+ for ; i < len(repos) && repos[i] == ""; i++ {
|
|
| 104 |
+ } |
|
| 105 |
+ |
|
| 106 |
+ return strings.Join(repos[i:], ",") |
|
| 107 |
+} |
|
| 108 |
+ |
|
| 109 |
+func distributionSourceLabelKey(source string) string {
|
|
| 110 |
+ return fmt.Sprintf("%s.%s", labelDistributionSource, source)
|
|
| 111 |
+} |
| ... | ... |
@@ -206,3 +206,38 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr |
| 206 | 206 |
|
| 207 | 207 |
return nil |
| 208 | 208 |
} |
| 209 |
+ |
|
| 210 |
+// FilterManifestByPlatformHandler allows Handler to handle non-target |
|
| 211 |
+// platform's manifest and configuration data. |
|
| 212 |
+func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc {
|
|
| 213 |
+ return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
|
| 214 |
+ children, err := f(ctx, desc) |
|
| 215 |
+ if err != nil {
|
|
| 216 |
+ return nil, err |
|
| 217 |
+ } |
|
| 218 |
+ |
|
| 219 |
+ // no platform information |
|
| 220 |
+ if desc.Platform == nil || m == nil {
|
|
| 221 |
+ return children, nil |
|
| 222 |
+ } |
|
| 223 |
+ |
|
| 224 |
+ var descs []ocispec.Descriptor |
|
| 225 |
+ switch desc.MediaType {
|
|
| 226 |
+ case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: |
|
| 227 |
+ if m.Match(*desc.Platform) {
|
|
| 228 |
+ descs = children |
|
| 229 |
+ } else {
|
|
| 230 |
+ for _, child := range children {
|
|
| 231 |
+ if child.MediaType == images.MediaTypeDockerSchema2Config || |
|
| 232 |
+ child.MediaType == ocispec.MediaTypeImageConfig {
|
|
| 233 |
+ |
|
| 234 |
+ descs = append(descs, child) |
|
| 235 |
+ } |
|
| 236 |
+ } |
|
| 237 |
+ } |
|
| 238 |
+ default: |
|
| 239 |
+ descs = children |
|
| 240 |
+ } |
|
| 241 |
+ return descs, nil |
|
| 242 |
+ } |
|
| 243 |
+} |
| ... | ... |
@@ -72,7 +72,7 @@ type Process interface {
|
| 72 | 72 |
// platform implementations |
| 73 | 73 |
type Platform interface {
|
| 74 | 74 |
CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, |
| 75 |
- wg, cwg *sync.WaitGroup) (console.Console, error) |
|
| 75 |
+ wg *sync.WaitGroup) (console.Console, error) |
|
| 76 | 76 |
ShutdownConsole(ctx context.Context, console console.Console) error |
| 77 | 77 |
Close() error |
| 78 | 78 |
} |
| ... | ... |
@@ -20,6 +20,7 @@ package linux |
| 20 | 20 |
|
| 21 | 21 |
import ( |
| 22 | 22 |
"context" |
| 23 |
+ "crypto/sha256" |
|
| 23 | 24 |
"fmt" |
| 24 | 25 |
"io/ioutil" |
| 25 | 26 |
"os" |
| ... | ... |
@@ -103,7 +104,7 @@ func ShimLocal(c *Config, exchange *exchange.Exchange) ShimOpt {
|
| 103 | 103 |
// ShimConnect is a ShimOpt for connecting to an existing remote shim |
| 104 | 104 |
func ShimConnect(c *Config, onClose func()) ShimOpt {
|
| 105 | 105 |
return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
|
| 106 |
- return b.shimConfig(ns, c, ropts), client.WithConnect(b.shimAddress(ns), onClose) |
|
| 106 |
+ return b.shimConfig(ns, c, ropts), client.WithConnect(b.decideShimAddress(ns), onClose) |
|
| 107 | 107 |
} |
| 108 | 108 |
} |
| 109 | 109 |
|
| ... | ... |
@@ -127,10 +128,32 @@ func (b *bundle) Delete() error {
|
| 127 | 127 |
return errors.Wrapf(err, "Failed to remove both bundle and workdir locations: %v", err2) |
| 128 | 128 |
} |
| 129 | 129 |
|
| 130 |
-func (b *bundle) shimAddress(namespace string) string {
|
|
| 130 |
+func (b *bundle) legacyShimAddress(namespace string) string {
|
|
| 131 | 131 |
return filepath.Join(string(filepath.Separator), "containerd-shim", namespace, b.id, "shim.sock") |
| 132 | 132 |
} |
| 133 | 133 |
|
| 134 |
+func (b *bundle) shimAddress(namespace string) string {
|
|
| 135 |
+ d := sha256.Sum256([]byte(filepath.Join(namespace, b.id))) |
|
| 136 |
+ return filepath.Join(string(filepath.Separator), "containerd-shim", fmt.Sprintf("%x.sock", d))
|
|
| 137 |
+} |
|
| 138 |
+ |
|
| 139 |
+func (b *bundle) loadAddress() (string, error) {
|
|
| 140 |
+ addressPath := filepath.Join(b.path, "address") |
|
| 141 |
+ data, err := ioutil.ReadFile(addressPath) |
|
| 142 |
+ if err != nil {
|
|
| 143 |
+ return "", err |
|
| 144 |
+ } |
|
| 145 |
+ return string(data), nil |
|
| 146 |
+} |
|
| 147 |
+ |
|
| 148 |
+func (b *bundle) decideShimAddress(namespace string) string {
|
|
| 149 |
+ address, err := b.loadAddress() |
|
| 150 |
+ if err != nil {
|
|
| 151 |
+ return b.legacyShimAddress(namespace) |
|
| 152 |
+ } |
|
| 153 |
+ return address |
|
| 154 |
+} |
|
| 155 |
+ |
|
| 134 | 156 |
func (b *bundle) shimConfig(namespace string, c *Config, runcOptions *runctypes.RuncOptions) shim.Config {
|
| 135 | 157 |
var ( |
| 136 | 158 |
criuPath string |
| ... | ... |
@@ -46,7 +46,7 @@ type execProcess struct {
|
| 46 | 46 |
mu sync.Mutex |
| 47 | 47 |
id string |
| 48 | 48 |
console console.Console |
| 49 |
- io runc.IO |
|
| 49 |
+ io *processIO |
|
| 50 | 50 |
status int |
| 51 | 51 |
exited time.Time |
| 52 | 52 |
pid *safePid |
| ... | ... |
@@ -172,29 +172,30 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
| 172 | 172 |
// access e.pid until it is updated. |
| 173 | 173 |
e.pid.Lock() |
| 174 | 174 |
defer e.pid.Unlock() |
| 175 |
+ |
|
| 175 | 176 |
var ( |
| 176 | 177 |
socket *runc.Socket |
| 177 |
- pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id))
|
|
| 178 |
+ pio *processIO |
|
| 179 |
+ pidFile = newExecPidFile(e.path, e.id) |
|
| 178 | 180 |
) |
| 179 | 181 |
if e.stdio.Terminal {
|
| 180 | 182 |
if socket, err = runc.NewTempConsoleSocket(); err != nil {
|
| 181 | 183 |
return errors.Wrap(err, "failed to create runc console socket") |
| 182 | 184 |
} |
| 183 | 185 |
defer socket.Close() |
| 184 |
- } else if e.stdio.IsNull() {
|
|
| 185 |
- if e.io, err = runc.NewNullIO(); err != nil {
|
|
| 186 |
- return errors.Wrap(err, "creating new NULL IO") |
|
| 187 |
- } |
|
| 188 | 186 |
} else {
|
| 189 |
- if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil {
|
|
| 190 |
- return errors.Wrap(err, "failed to create runc io pipes") |
|
| 187 |
+ if pio, err = createIO(ctx, e.id, e.parent.IoUID, e.parent.IoGID, e.stdio); err != nil {
|
|
| 188 |
+ return errors.Wrap(err, "failed to create init process I/O") |
|
| 191 | 189 |
} |
| 190 |
+ e.io = pio |
|
| 192 | 191 |
} |
| 193 | 192 |
opts := &runc.ExecOpts{
|
| 194 |
- PidFile: pidfile, |
|
| 195 |
- IO: e.io, |
|
| 193 |
+ PidFile: pidFile.Path(), |
|
| 196 | 194 |
Detach: true, |
| 197 | 195 |
} |
| 196 |
+ if pio != nil {
|
|
| 197 |
+ opts.IO = pio.IO() |
|
| 198 |
+ } |
|
| 198 | 199 |
if socket != nil {
|
| 199 | 200 |
opts.ConsoleSocket = socket |
| 200 | 201 |
} |
| ... | ... |
@@ -203,14 +204,10 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
| 203 | 203 |
return e.parent.runtimeError(err, "OCI runtime exec failed") |
| 204 | 204 |
} |
| 205 | 205 |
if e.stdio.Stdin != "" {
|
| 206 |
- sc, err := fifo.OpenFifo(context.Background(), e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) |
|
| 207 |
- if err != nil {
|
|
| 208 |
- return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin) |
|
| 206 |
+ if err := e.openStdin(e.stdio.Stdin); err != nil {
|
|
| 207 |
+ return err |
|
| 209 | 208 |
} |
| 210 |
- e.closers = append(e.closers, sc) |
|
| 211 |
- e.stdin = sc |
|
| 212 | 209 |
} |
| 213 |
- var copyWaitGroup sync.WaitGroup |
|
| 214 | 210 |
ctx, cancel := context.WithTimeout(ctx, 30*time.Second) |
| 215 | 211 |
defer cancel() |
| 216 | 212 |
if socket != nil {
|
| ... | ... |
@@ -218,16 +215,15 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
| 218 | 218 |
if err != nil {
|
| 219 | 219 |
return errors.Wrap(err, "failed to retrieve console master") |
| 220 | 220 |
} |
| 221 |
- if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil {
|
|
| 221 |
+ if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg); err != nil {
|
|
| 222 | 222 |
return errors.Wrap(err, "failed to start console copy") |
| 223 | 223 |
} |
| 224 |
- } else if !e.stdio.IsNull() {
|
|
| 225 |
- if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil {
|
|
| 224 |
+ } else {
|
|
| 225 |
+ if err := pio.Copy(ctx, &e.wg); err != nil {
|
|
| 226 | 226 |
return errors.Wrap(err, "failed to start io pipe copy") |
| 227 | 227 |
} |
| 228 | 228 |
} |
| 229 |
- copyWaitGroup.Wait() |
|
| 230 |
- pid, err := runc.ReadPidFile(opts.PidFile) |
|
| 229 |
+ pid, err := pidFile.Read() |
|
| 231 | 230 |
if err != nil {
|
| 232 | 231 |
return errors.Wrap(err, "failed to retrieve OCI runtime exec pid") |
| 233 | 232 |
} |
| ... | ... |
@@ -235,6 +231,16 @@ func (e *execProcess) start(ctx context.Context) (err error) {
|
| 235 | 235 |
return nil |
| 236 | 236 |
} |
| 237 | 237 |
|
| 238 |
+func (e *execProcess) openStdin(path string) error {
|
|
| 239 |
+ sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) |
|
| 240 |
+ if err != nil {
|
|
| 241 |
+ return errors.Wrapf(err, "failed to open stdin fifo %s", path) |
|
| 242 |
+ } |
|
| 243 |
+ e.stdin = sc |
|
| 244 |
+ e.closers = append(e.closers, sc) |
|
| 245 |
+ return nil |
|
| 246 |
+} |
|
| 247 |
+ |
|
| 238 | 248 |
func (e *execProcess) Status(ctx context.Context) (string, error) {
|
| 239 | 249 |
s, err := e.parent.Status(ctx) |
| 240 | 250 |
if err != nil {
|
| ... | ... |
@@ -41,9 +41,6 @@ import ( |
| 41 | 41 |
"github.com/pkg/errors" |
| 42 | 42 |
) |
| 43 | 43 |
|
| 44 |
-// InitPidFile name of the file that contains the init pid |
|
| 45 |
-const InitPidFile = "init.pid" |
|
| 46 |
- |
|
| 47 | 44 |
// Init represents an initial process for a container |
| 48 | 45 |
type Init struct {
|
| 49 | 46 |
wg sync.WaitGroup |
| ... | ... |
@@ -63,7 +60,7 @@ type Init struct {
|
| 63 | 63 |
Bundle string |
| 64 | 64 |
console console.Console |
| 65 | 65 |
Platform proc.Platform |
| 66 |
- io runc.IO |
|
| 66 |
+ io *processIO |
|
| 67 | 67 |
runtime *runc.Runc |
| 68 | 68 |
status int |
| 69 | 69 |
exited time.Time |
| ... | ... |
@@ -111,49 +108,33 @@ func New(id string, runtime *runc.Runc, stdio proc.Stdio) *Init {
|
| 111 | 111 |
// Create the process with the provided config |
| 112 | 112 |
func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
| 113 | 113 |
var ( |
| 114 |
- err error |
|
| 115 |
- socket *runc.Socket |
|
| 114 |
+ err error |
|
| 115 |
+ socket *runc.Socket |
|
| 116 |
+ pio *processIO |
|
| 117 |
+ pidFile = newPidFile(p.Bundle) |
|
| 116 | 118 |
) |
| 117 | 119 |
if r.Terminal {
|
| 118 | 120 |
if socket, err = runc.NewTempConsoleSocket(); err != nil {
|
| 119 | 121 |
return errors.Wrap(err, "failed to create OCI runtime console socket") |
| 120 | 122 |
} |
| 121 | 123 |
defer socket.Close() |
| 122 |
- } else if hasNoIO(r) {
|
|
| 123 |
- if p.io, err = runc.NewNullIO(); err != nil {
|
|
| 124 |
- return errors.Wrap(err, "creating new NULL IO") |
|
| 125 |
- } |
|
| 126 | 124 |
} else {
|
| 127 |
- if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID, withConditionalIO(p.stdio)); err != nil {
|
|
| 128 |
- return errors.Wrap(err, "failed to create OCI runtime io pipes") |
|
| 125 |
+ if pio, err = createIO(ctx, p.id, p.IoUID, p.IoGID, p.stdio); err != nil {
|
|
| 126 |
+ return errors.Wrap(err, "failed to create init process I/O") |
|
| 129 | 127 |
} |
| 128 |
+ p.io = pio |
|
| 130 | 129 |
} |
| 131 |
- pidFile := filepath.Join(p.Bundle, InitPidFile) |
|
| 132 | 130 |
if r.Checkpoint != "" {
|
| 133 |
- opts := &runc.RestoreOpts{
|
|
| 134 |
- CheckpointOpts: runc.CheckpointOpts{
|
|
| 135 |
- ImagePath: r.Checkpoint, |
|
| 136 |
- WorkDir: p.CriuWorkPath, |
|
| 137 |
- ParentPath: r.ParentCheckpoint, |
|
| 138 |
- }, |
|
| 139 |
- PidFile: pidFile, |
|
| 140 |
- IO: p.io, |
|
| 141 |
- NoPivot: p.NoPivotRoot, |
|
| 142 |
- Detach: true, |
|
| 143 |
- NoSubreaper: true, |
|
| 144 |
- } |
|
| 145 |
- p.initState = &createdCheckpointState{
|
|
| 146 |
- p: p, |
|
| 147 |
- opts: opts, |
|
| 148 |
- } |
|
| 149 |
- return nil |
|
| 131 |
+ return p.createCheckpointedState(r, pidFile) |
|
| 150 | 132 |
} |
| 151 | 133 |
opts := &runc.CreateOpts{
|
| 152 |
- PidFile: pidFile, |
|
| 153 |
- IO: p.io, |
|
| 134 |
+ PidFile: pidFile.Path(), |
|
| 154 | 135 |
NoPivot: p.NoPivotRoot, |
| 155 | 136 |
NoNewKeyring: p.NoNewKeyring, |
| 156 | 137 |
} |
| 138 |
+ if p.io != nil {
|
|
| 139 |
+ opts.IO = p.io.IO() |
|
| 140 |
+ } |
|
| 157 | 141 |
if socket != nil {
|
| 158 | 142 |
opts.ConsoleSocket = socket |
| 159 | 143 |
} |
| ... | ... |
@@ -161,14 +142,10 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
| 161 | 161 |
return p.runtimeError(err, "OCI runtime create failed") |
| 162 | 162 |
} |
| 163 | 163 |
if r.Stdin != "" {
|
| 164 |
- sc, err := fifo.OpenFifo(context.Background(), r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) |
|
| 165 |
- if err != nil {
|
|
| 166 |
- return errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin) |
|
| 164 |
+ if err := p.openStdin(r.Stdin); err != nil {
|
|
| 165 |
+ return err |
|
| 167 | 166 |
} |
| 168 |
- p.stdin = sc |
|
| 169 |
- p.closers = append(p.closers, sc) |
|
| 170 | 167 |
} |
| 171 |
- var copyWaitGroup sync.WaitGroup |
|
| 172 | 168 |
ctx, cancel := context.WithTimeout(ctx, 30*time.Second) |
| 173 | 169 |
defer cancel() |
| 174 | 170 |
if socket != nil {
|
| ... | ... |
@@ -176,19 +153,17 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
| 176 | 176 |
if err != nil {
|
| 177 | 177 |
return errors.Wrap(err, "failed to retrieve console master") |
| 178 | 178 |
} |
| 179 |
- console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) |
|
| 179 |
+ console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg) |
|
| 180 | 180 |
if err != nil {
|
| 181 | 181 |
return errors.Wrap(err, "failed to start console copy") |
| 182 | 182 |
} |
| 183 | 183 |
p.console = console |
| 184 |
- } else if !hasNoIO(r) {
|
|
| 185 |
- if err := copyPipes(ctx, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup); err != nil {
|
|
| 184 |
+ } else {
|
|
| 185 |
+ if err := pio.Copy(ctx, &p.wg); err != nil {
|
|
| 186 | 186 |
return errors.Wrap(err, "failed to start io pipe copy") |
| 187 | 187 |
} |
| 188 | 188 |
} |
| 189 |
- |
|
| 190 |
- copyWaitGroup.Wait() |
|
| 191 |
- pid, err := runc.ReadPidFile(pidFile) |
|
| 189 |
+ pid, err := pidFile.Read() |
|
| 192 | 190 |
if err != nil {
|
| 193 | 191 |
return errors.Wrap(err, "failed to retrieve OCI runtime container pid") |
| 194 | 192 |
} |
| ... | ... |
@@ -196,6 +171,36 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) error {
|
| 196 | 196 |
return nil |
| 197 | 197 |
} |
| 198 | 198 |
|
| 199 |
+func (p *Init) openStdin(path string) error {
|
|
| 200 |
+ sc, err := fifo.OpenFifo(context.Background(), path, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) |
|
| 201 |
+ if err != nil {
|
|
| 202 |
+ return errors.Wrapf(err, "failed to open stdin fifo %s", path) |
|
| 203 |
+ } |
|
| 204 |
+ p.stdin = sc |
|
| 205 |
+ p.closers = append(p.closers, sc) |
|
| 206 |
+ return nil |
|
| 207 |
+} |
|
| 208 |
+ |
|
| 209 |
+func (p *Init) createCheckpointedState(r *CreateConfig, pidFile *pidFile) error {
|
|
| 210 |
+ opts := &runc.RestoreOpts{
|
|
| 211 |
+ CheckpointOpts: runc.CheckpointOpts{
|
|
| 212 |
+ ImagePath: r.Checkpoint, |
|
| 213 |
+ WorkDir: p.CriuWorkPath, |
|
| 214 |
+ ParentPath: r.ParentCheckpoint, |
|
| 215 |
+ }, |
|
| 216 |
+ PidFile: pidFile.Path(), |
|
| 217 |
+ IO: p.io.IO(), |
|
| 218 |
+ NoPivot: p.NoPivotRoot, |
|
| 219 |
+ Detach: true, |
|
| 220 |
+ NoSubreaper: true, |
|
| 221 |
+ } |
|
| 222 |
+ p.initState = &createdCheckpointState{
|
|
| 223 |
+ p: p, |
|
| 224 |
+ opts: opts, |
|
| 225 |
+ } |
|
| 226 |
+ return nil |
|
| 227 |
+} |
|
| 228 |
+ |
|
| 199 | 229 |
// Wait for the process to exit |
| 200 | 230 |
func (p *Init) Wait() {
|
| 201 | 231 |
<-p.waitBlock |
| ... | ... |
@@ -20,12 +20,9 @@ package proc |
| 20 | 20 |
|
| 21 | 21 |
import ( |
| 22 | 22 |
"context" |
| 23 |
- "sync" |
|
| 24 |
- "syscall" |
|
| 25 | 23 |
|
| 26 | 24 |
"github.com/containerd/console" |
| 27 | 25 |
"github.com/containerd/containerd/runtime/proc" |
| 28 |
- "github.com/containerd/fifo" |
|
| 29 | 26 |
runc "github.com/containerd/go-runc" |
| 30 | 27 |
google_protobuf "github.com/gogo/protobuf/types" |
| 31 | 28 |
"github.com/pkg/errors" |
| ... | ... |
@@ -172,31 +169,25 @@ func (s *createdCheckpointState) Start(ctx context.Context) error {
|
| 172 | 172 |
return p.runtimeError(err, "OCI runtime restore failed") |
| 173 | 173 |
} |
| 174 | 174 |
if sio.Stdin != "" {
|
| 175 |
- sc, err := fifo.OpenFifo(context.Background(), sio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) |
|
| 176 |
- if err != nil {
|
|
| 175 |
+ if err := p.openStdin(sio.Stdin); err != nil {
|
|
| 177 | 176 |
return errors.Wrapf(err, "failed to open stdin fifo %s", sio.Stdin) |
| 178 | 177 |
} |
| 179 |
- p.stdin = sc |
|
| 180 |
- p.closers = append(p.closers, sc) |
|
| 181 | 178 |
} |
| 182 |
- var copyWaitGroup sync.WaitGroup |
|
| 183 | 179 |
if socket != nil {
|
| 184 | 180 |
console, err := socket.ReceiveMaster() |
| 185 | 181 |
if err != nil {
|
| 186 | 182 |
return errors.Wrap(err, "failed to retrieve console master") |
| 187 | 183 |
} |
| 188 |
- console, err = p.Platform.CopyConsole(ctx, console, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, ©WaitGroup) |
|
| 184 |
+ console, err = p.Platform.CopyConsole(ctx, console, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg) |
|
| 189 | 185 |
if err != nil {
|
| 190 | 186 |
return errors.Wrap(err, "failed to start console copy") |
| 191 | 187 |
} |
| 192 | 188 |
p.console = console |
| 193 |
- } else if !sio.IsNull() {
|
|
| 194 |
- if err := copyPipes(ctx, p.io, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, ©WaitGroup); err != nil {
|
|
| 189 |
+ } else {
|
|
| 190 |
+ if err := p.io.Copy(ctx, &p.wg); err != nil {
|
|
| 195 | 191 |
return errors.Wrap(err, "failed to start io pipe copy") |
| 196 | 192 |
} |
| 197 | 193 |
} |
| 198 |
- |
|
| 199 |
- copyWaitGroup.Wait() |
|
| 200 | 194 |
pid, err := runc.ReadPidFile(s.opts.PidFile) |
| 201 | 195 |
if err != nil {
|
| 202 | 196 |
return errors.Wrap(err, "failed to retrieve OCI runtime container pid") |
| ... | ... |
@@ -22,12 +22,18 @@ import ( |
| 22 | 22 |
"context" |
| 23 | 23 |
"fmt" |
| 24 | 24 |
"io" |
| 25 |
+ "net/url" |
|
| 25 | 26 |
"os" |
| 27 |
+ "os/exec" |
|
| 28 |
+ "path/filepath" |
|
| 26 | 29 |
"sync" |
| 27 | 30 |
"syscall" |
| 28 | 31 |
|
| 32 |
+ "github.com/containerd/containerd/namespaces" |
|
| 33 |
+ "github.com/containerd/containerd/runtime/proc" |
|
| 29 | 34 |
"github.com/containerd/fifo" |
| 30 | 35 |
runc "github.com/containerd/go-runc" |
| 36 |
+ "github.com/pkg/errors" |
|
| 31 | 37 |
) |
| 32 | 38 |
|
| 33 | 39 |
var bufPool = sync.Pool{
|
| ... | ... |
@@ -37,6 +43,84 @@ var bufPool = sync.Pool{
|
| 37 | 37 |
}, |
| 38 | 38 |
} |
| 39 | 39 |
|
| 40 |
+type processIO struct {
|
|
| 41 |
+ io runc.IO |
|
| 42 |
+ |
|
| 43 |
+ uri *url.URL |
|
| 44 |
+ copy bool |
|
| 45 |
+ stdio proc.Stdio |
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+func (p *processIO) Close() error {
|
|
| 49 |
+ if p.io != nil {
|
|
| 50 |
+ return p.io.Close() |
|
| 51 |
+ } |
|
| 52 |
+ return nil |
|
| 53 |
+} |
|
| 54 |
+ |
|
| 55 |
+func (p *processIO) IO() runc.IO {
|
|
| 56 |
+ return p.io |
|
| 57 |
+} |
|
| 58 |
+ |
|
| 59 |
+func (p *processIO) Copy(ctx context.Context, wg *sync.WaitGroup) error {
|
|
| 60 |
+ if !p.copy {
|
|
| 61 |
+ return nil |
|
| 62 |
+ } |
|
| 63 |
+ var cwg sync.WaitGroup |
|
| 64 |
+ if err := copyPipes(ctx, p.IO(), p.stdio.Stdin, p.stdio.Stdout, p.stdio.Stderr, wg, &cwg); err != nil {
|
|
| 65 |
+ return errors.Wrap(err, "unable to copy pipes") |
|
| 66 |
+ } |
|
| 67 |
+ cwg.Wait() |
|
| 68 |
+ return nil |
|
| 69 |
+} |
|
| 70 |
+ |
|
| 71 |
+func createIO(ctx context.Context, id string, ioUID, ioGID int, stdio proc.Stdio) (*processIO, error) {
|
|
| 72 |
+ pio := &processIO{
|
|
| 73 |
+ stdio: stdio, |
|
| 74 |
+ } |
|
| 75 |
+ if stdio.IsNull() {
|
|
| 76 |
+ i, err := runc.NewNullIO() |
|
| 77 |
+ if err != nil {
|
|
| 78 |
+ return nil, err |
|
| 79 |
+ } |
|
| 80 |
+ pio.io = i |
|
| 81 |
+ return pio, nil |
|
| 82 |
+ } |
|
| 83 |
+ u, err := url.Parse(stdio.Stdout) |
|
| 84 |
+ if err != nil {
|
|
| 85 |
+ return nil, errors.Wrap(err, "unable to parse stdout uri") |
|
| 86 |
+ } |
|
| 87 |
+ if u.Scheme == "" {
|
|
| 88 |
+ u.Scheme = "fifo" |
|
| 89 |
+ } |
|
| 90 |
+ pio.uri = u |
|
| 91 |
+ switch u.Scheme {
|
|
| 92 |
+ case "fifo": |
|
| 93 |
+ pio.copy = true |
|
| 94 |
+ pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) |
|
| 95 |
+ case "binary": |
|
| 96 |
+ pio.io, err = newBinaryIO(ctx, id, u) |
|
| 97 |
+ case "file": |
|
| 98 |
+ if err := os.MkdirAll(filepath.Dir(u.Host), 0755); err != nil {
|
|
| 99 |
+ return nil, err |
|
| 100 |
+ } |
|
| 101 |
+ var f *os.File |
|
| 102 |
+ f, err = os.OpenFile(u.Host, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) |
|
| 103 |
+ if err != nil {
|
|
| 104 |
+ return nil, err |
|
| 105 |
+ } |
|
| 106 |
+ f.Close() |
|
| 107 |
+ pio.copy = true |
|
| 108 |
+ pio.io, err = runc.NewPipeIO(ioUID, ioGID, withConditionalIO(stdio)) |
|
| 109 |
+ default: |
|
| 110 |
+ return nil, errors.Errorf("unknown STDIO scheme %s", u.Scheme)
|
|
| 111 |
+ } |
|
| 112 |
+ if err != nil {
|
|
| 113 |
+ return nil, err |
|
| 114 |
+ } |
|
| 115 |
+ return pio, nil |
|
| 116 |
+} |
|
| 117 |
+ |
|
| 40 | 118 |
func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
|
| 41 | 119 |
var sameFile io.WriteCloser |
| 42 | 120 |
for _, i := range []struct {
|
| ... | ... |
@@ -143,3 +227,134 @@ func isFifo(path string) (bool, error) {
|
| 143 | 143 |
} |
| 144 | 144 |
return false, nil |
| 145 | 145 |
} |
| 146 |
+ |
|
| 147 |
+func newBinaryIO(ctx context.Context, id string, uri *url.URL) (runc.IO, error) {
|
|
| 148 |
+ ns, err := namespaces.NamespaceRequired(ctx) |
|
| 149 |
+ if err != nil {
|
|
| 150 |
+ return nil, err |
|
| 151 |
+ } |
|
| 152 |
+ var args []string |
|
| 153 |
+ for k, vs := range uri.Query() {
|
|
| 154 |
+ args = append(args, k) |
|
| 155 |
+ if len(vs) > 0 {
|
|
| 156 |
+ args = append(args, vs[0]) |
|
| 157 |
+ } |
|
| 158 |
+ } |
|
| 159 |
+ ctx, cancel := context.WithCancel(ctx) |
|
| 160 |
+ cmd := exec.CommandContext(ctx, uri.Host, args...) |
|
| 161 |
+ cmd.Env = append(cmd.Env, |
|
| 162 |
+ "CONTAINER_ID="+id, |
|
| 163 |
+ "CONTAINER_NAMESPACE="+ns, |
|
| 164 |
+ ) |
|
| 165 |
+ out, err := newPipe() |
|
| 166 |
+ if err != nil {
|
|
| 167 |
+ return nil, err |
|
| 168 |
+ } |
|
| 169 |
+ serr, err := newPipe() |
|
| 170 |
+ if err != nil {
|
|
| 171 |
+ return nil, err |
|
| 172 |
+ } |
|
| 173 |
+ r, w, err := os.Pipe() |
|
| 174 |
+ if err != nil {
|
|
| 175 |
+ return nil, err |
|
| 176 |
+ } |
|
| 177 |
+ cmd.ExtraFiles = append(cmd.ExtraFiles, out.r, serr.r, w) |
|
| 178 |
+ // don't need to register this with the reaper or wait when |
|
| 179 |
+ // running inside a shim |
|
| 180 |
+ if err := cmd.Start(); err != nil {
|
|
| 181 |
+ return nil, err |
|
| 182 |
+ } |
|
| 183 |
+ // close our side of the pipe after start |
|
| 184 |
+ w.Close() |
|
| 185 |
+ // wait for the logging binary to be ready |
|
| 186 |
+ b := make([]byte, 1) |
|
| 187 |
+ if _, err := r.Read(b); err != nil && err != io.EOF {
|
|
| 188 |
+ return nil, err |
|
| 189 |
+ } |
|
| 190 |
+ return &binaryIO{
|
|
| 191 |
+ cmd: cmd, |
|
| 192 |
+ cancel: cancel, |
|
| 193 |
+ out: out, |
|
| 194 |
+ err: serr, |
|
| 195 |
+ }, nil |
|
| 196 |
+} |
|
| 197 |
+ |
|
| 198 |
+type binaryIO struct {
|
|
| 199 |
+ cmd *exec.Cmd |
|
| 200 |
+ cancel func() |
|
| 201 |
+ out, err *pipe |
|
| 202 |
+} |
|
| 203 |
+ |
|
| 204 |
+func (b *binaryIO) CloseAfterStart() (err error) {
|
|
| 205 |
+ for _, v := range []*pipe{
|
|
| 206 |
+ b.out, |
|
| 207 |
+ b.err, |
|
| 208 |
+ } {
|
|
| 209 |
+ if v != nil {
|
|
| 210 |
+ if cerr := v.r.Close(); err == nil {
|
|
| 211 |
+ err = cerr |
|
| 212 |
+ } |
|
| 213 |
+ } |
|
| 214 |
+ } |
|
| 215 |
+ return err |
|
| 216 |
+} |
|
| 217 |
+ |
|
| 218 |
+func (b *binaryIO) Close() (err error) {
|
|
| 219 |
+ b.cancel() |
|
| 220 |
+ for _, v := range []*pipe{
|
|
| 221 |
+ b.out, |
|
| 222 |
+ b.err, |
|
| 223 |
+ } {
|
|
| 224 |
+ if v != nil {
|
|
| 225 |
+ if cerr := v.Close(); err == nil {
|
|
| 226 |
+ err = cerr |
|
| 227 |
+ } |
|
| 228 |
+ } |
|
| 229 |
+ } |
|
| 230 |
+ return err |
|
| 231 |
+} |
|
| 232 |
+ |
|
| 233 |
+func (b *binaryIO) Stdin() io.WriteCloser {
|
|
| 234 |
+ return nil |
|
| 235 |
+} |
|
| 236 |
+ |
|
| 237 |
+func (b *binaryIO) Stdout() io.ReadCloser {
|
|
| 238 |
+ return nil |
|
| 239 |
+} |
|
| 240 |
+ |
|
| 241 |
+func (b *binaryIO) Stderr() io.ReadCloser {
|
|
| 242 |
+ return nil |
|
| 243 |
+} |
|
| 244 |
+ |
|
| 245 |
+func (b *binaryIO) Set(cmd *exec.Cmd) {
|
|
| 246 |
+ if b.out != nil {
|
|
| 247 |
+ cmd.Stdout = b.out.w |
|
| 248 |
+ } |
|
| 249 |
+ if b.err != nil {
|
|
| 250 |
+ cmd.Stderr = b.err.w |
|
| 251 |
+ } |
|
| 252 |
+} |
|
| 253 |
+ |
|
| 254 |
+func newPipe() (*pipe, error) {
|
|
| 255 |
+ r, w, err := os.Pipe() |
|
| 256 |
+ if err != nil {
|
|
| 257 |
+ return nil, err |
|
| 258 |
+ } |
|
| 259 |
+ return &pipe{
|
|
| 260 |
+ r: r, |
|
| 261 |
+ w: w, |
|
| 262 |
+ }, nil |
|
| 263 |
+} |
|
| 264 |
+ |
|
| 265 |
+type pipe struct {
|
|
| 266 |
+ r *os.File |
|
| 267 |
+ w *os.File |
|
| 268 |
+} |
|
| 269 |
+ |
|
| 270 |
+func (p *pipe) Close() error {
|
|
| 271 |
+ err := p.w.Close() |
|
| 272 |
+ if rerr := p.r.Close(); err == nil {
|
|
| 273 |
+ err = rerr |
|
| 274 |
+ } |
|
| 275 |
+ return err |
|
| 276 |
+} |
| ... | ... |
@@ -20,8 +20,10 @@ package proc |
| 20 | 20 |
|
| 21 | 21 |
import ( |
| 22 | 22 |
"encoding/json" |
| 23 |
+ "fmt" |
|
| 23 | 24 |
"io" |
| 24 | 25 |
"os" |
| 26 |
+ "path/filepath" |
|
| 25 | 27 |
"strings" |
| 26 | 28 |
"sync" |
| 27 | 29 |
"time" |
| ... | ... |
@@ -114,6 +116,29 @@ func checkKillError(err error) error {
|
| 114 | 114 |
return errors.Wrapf(err, "unknown error after kill") |
| 115 | 115 |
} |
| 116 | 116 |
|
| 117 |
-func hasNoIO(r *CreateConfig) bool {
|
|
| 118 |
- return r.Stdin == "" && r.Stdout == "" && r.Stderr == "" |
|
| 117 |
+// InitPidFile name of the file that contains the init pid |
|
| 118 |
+const InitPidFile = "init.pid" |
|
| 119 |
+ |
|
| 120 |
+func newPidFile(bundle string) *pidFile {
|
|
| 121 |
+ return &pidFile{
|
|
| 122 |
+ path: filepath.Join(bundle, InitPidFile), |
|
| 123 |
+ } |
|
| 124 |
+} |
|
| 125 |
+ |
|
| 126 |
+func newExecPidFile(bundle, id string) *pidFile {
|
|
| 127 |
+ return &pidFile{
|
|
| 128 |
+ path: filepath.Join(bundle, fmt.Sprintf("%s.pid", id)),
|
|
| 129 |
+ } |
|
| 130 |
+} |
|
| 131 |
+ |
|
| 132 |
+type pidFile struct {
|
|
| 133 |
+ path string |
|
| 134 |
+} |
|
| 135 |
+ |
|
| 136 |
+func (p *pidFile) Path() string {
|
|
| 137 |
+ return p.path |
|
| 138 |
+} |
|
| 139 |
+ |
|
| 140 |
+func (p *pidFile) Read() (int, error) {
|
|
| 141 |
+ return runc.ReadPidFile(p.path) |
|
| 119 | 142 |
} |
| ... | ... |
@@ -20,10 +20,12 @@ package client |
| 20 | 20 |
|
| 21 | 21 |
import ( |
| 22 | 22 |
"context" |
| 23 |
+ "fmt" |
|
| 23 | 24 |
"io" |
| 24 | 25 |
"net" |
| 25 | 26 |
"os" |
| 26 | 27 |
"os/exec" |
| 28 |
+ "path/filepath" |
|
| 27 | 29 |
"strings" |
| 28 | 30 |
"sync" |
| 29 | 31 |
"syscall" |
| ... | ... |
@@ -107,6 +109,10 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa |
| 107 | 107 |
"address": address, |
| 108 | 108 |
"debug": debug, |
| 109 | 109 |
}).Infof("shim %s started", binary)
|
| 110 |
+ |
|
| 111 |
+ if err := writeAddress(filepath.Join(config.Path, "address"), address); err != nil {
|
|
| 112 |
+ return nil, nil, err |
|
| 113 |
+ } |
|
| 110 | 114 |
// set shim in cgroup if it is provided |
| 111 | 115 |
if cgroup != "" {
|
| 112 | 116 |
if err := setCgroup(cgroup, cmd); err != nil {
|
| ... | ... |
@@ -166,6 +172,25 @@ func newCommand(binary, daemonAddress string, debug bool, config shim.Config, so |
| 166 | 166 |
return cmd, nil |
| 167 | 167 |
} |
| 168 | 168 |
|
| 169 |
+// writeAddress writes a address file atomically |
|
| 170 |
+func writeAddress(path, address string) error {
|
|
| 171 |
+ path, err := filepath.Abs(path) |
|
| 172 |
+ if err != nil {
|
|
| 173 |
+ return err |
|
| 174 |
+ } |
|
| 175 |
+ tempPath := filepath.Join(filepath.Dir(path), fmt.Sprintf(".%s", filepath.Base(path)))
|
|
| 176 |
+ f, err := os.OpenFile(tempPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) |
|
| 177 |
+ if err != nil {
|
|
| 178 |
+ return err |
|
| 179 |
+ } |
|
| 180 |
+ _, err = f.WriteString(address) |
|
| 181 |
+ f.Close() |
|
| 182 |
+ if err != nil {
|
|
| 183 |
+ return err |
|
| 184 |
+ } |
|
| 185 |
+ return os.Rename(tempPath, path) |
|
| 186 |
+} |
|
| 187 |
+ |
|
| 169 | 188 |
func newSocket(address string) (*net.UnixListener, error) {
|
| 170 | 189 |
if len(address) > 106 {
|
| 171 | 190 |
return nil, errors.Errorf("%q: unix socket path too long (> 106)", address)
|
| ... | ... |
@@ -31,7 +31,7 @@ type linuxPlatform struct {
|
| 31 | 31 |
epoller *console.Epoller |
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 |
-func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
|
|
| 34 |
+func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg *sync.WaitGroup) (console.Console, error) {
|
|
| 35 | 35 |
if p.epoller == nil {
|
| 36 | 36 |
return nil, errors.New("uninitialized epoller")
|
| 37 | 37 |
} |
| ... | ... |
@@ -40,6 +40,7 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console |
| 40 | 40 |
if err != nil {
|
| 41 | 41 |
return nil, err |
| 42 | 42 |
} |
| 43 |
+ var cwg sync.WaitGroup |
|
| 43 | 44 |
|
| 44 | 45 |
if stdin != "" {
|
| 45 | 46 |
in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) |
| ... | ... |
@@ -77,6 +78,7 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console |
| 77 | 77 |
outw.Close() |
| 78 | 78 |
wg.Done() |
| 79 | 79 |
}() |
| 80 |
+ cwg.Wait() |
|
| 80 | 81 |
return epollConsole, nil |
| 81 | 82 |
} |
| 82 | 83 |
|
| ... | ... |
@@ -31,7 +31,8 @@ import ( |
| 31 | 31 |
type unixPlatform struct {
|
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 |
-func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
|
|
| 34 |
+func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg *sync.WaitGroup) (console.Console, error) {
|
|
| 35 |
+ var cwg sync.WaitGroup |
|
| 35 | 36 |
if stdin != "" {
|
| 36 | 37 |
in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0) |
| 37 | 38 |
if err != nil {
|
| ... | ... |
@@ -67,6 +68,7 @@ func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, |
| 67 | 67 |
outw.Close() |
| 68 | 68 |
wg.Done() |
| 69 | 69 |
}() |
| 70 |
+ cwg.Wait() |
|
| 70 | 71 |
return console, nil |
| 71 | 72 |
} |
| 72 | 73 |
|
| ... | ... |
@@ -173,6 +173,68 @@ The Runtime v2 supports an async event model. In order for the an upstream calle |
| 173 | 173 |
| `runtime.TaskExitEventTopic` | MUST (follow `TaskExecStartedEventTopic`) | When an exec (other than the init exec) exits expected or unexpected | |
| 174 | 174 |
| `runtime.TaskDeleteEventTopic` | SHOULD (follow `TaskExitEventTopic` or `TaskExecAddedEventTopic` if never started) | When an exec is removed from a shim | |
| 175 | 175 |
|
| 176 |
+#### Logging |
|
| 177 |
+ |
|
| 178 |
+Shims may support pluggable logging via STDIO URIs. |
|
| 179 |
+Current supported schemes for logging are: |
|
| 180 |
+ |
|
| 181 |
+* fifo - Linux |
|
| 182 |
+* binary - Linux & Windows |
|
| 183 |
+* file - Linux & Windows |
|
| 184 |
+* npipe - Windows |
|
| 185 |
+ |
|
| 186 |
+Binary logging has the abilty to forward a container's STDIO to an external binary for consumption. |
|
| 187 |
+A sample logging driver that forwards the container's STDOUT and STDERR to `journald` is: |
|
| 188 |
+ |
|
| 189 |
+```go |
|
| 190 |
+package main |
|
| 191 |
+ |
|
| 192 |
+import ( |
|
| 193 |
+ "bufio" |
|
| 194 |
+ "context" |
|
| 195 |
+ "fmt" |
|
| 196 |
+ "io" |
|
| 197 |
+ "sync" |
|
| 198 |
+ |
|
| 199 |
+ "github.com/containerd/containerd/runtime/v2/logging" |
|
| 200 |
+ "github.com/coreos/go-systemd/journal" |
|
| 201 |
+) |
|
| 202 |
+ |
|
| 203 |
+func main() {
|
|
| 204 |
+ logging.Run(log) |
|
| 205 |
+} |
|
| 206 |
+ |
|
| 207 |
+func log(ctx context.Context, config *logging.Config, ready func() error) error {
|
|
| 208 |
+ // construct any log metadata for the container |
|
| 209 |
+ vars := map[string]string{
|
|
| 210 |
+ "SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID),
|
|
| 211 |
+ } |
|
| 212 |
+ var wg sync.WaitGroup |
|
| 213 |
+ wg.Add(2) |
|
| 214 |
+ // forward both stdout and stderr to the journal |
|
| 215 |
+ go copy(&wg, config.Stdout, journal.PriInfo, vars) |
|
| 216 |
+ go copy(&wg, config.Stderr, journal.PriErr, vars) |
|
| 217 |
+ |
|
| 218 |
+ // signal that we are ready and setup for the container to be started |
|
| 219 |
+ if err := ready(); err != nil {
|
|
| 220 |
+ return err |
|
| 221 |
+ } |
|
| 222 |
+ wg.Wait() |
|
| 223 |
+ return nil |
|
| 224 |
+} |
|
| 225 |
+ |
|
| 226 |
+func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) {
|
|
| 227 |
+ defer wg.Done() |
|
| 228 |
+ s := bufio.NewScanner(r) |
|
| 229 |
+ for s.Scan() {
|
|
| 230 |
+ if s.Err() != nil {
|
|
| 231 |
+ return |
|
| 232 |
+ } |
|
| 233 |
+ journal.Send(s.Text(), pri, vars) |
|
| 234 |
+ } |
|
| 235 |
+} |
|
| 236 |
+``` |
|
| 237 |
+ |
|
| 176 | 238 |
### Other |
| 177 | 239 |
|
| 178 | 240 |
#### Unsupported rpcs |
| 179 | 241 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,463 @@ |
| 0 |
+/* |
|
| 1 |
+ Copyright The containerd Authors. |
|
| 2 |
+ |
|
| 3 |
+ Licensed under the Apache License, Version 2.0 (the "License"); |
|
| 4 |
+ you may not use this file except in compliance with the License. |
|
| 5 |
+ You may obtain a copy of the License at |
|
| 6 |
+ |
|
| 7 |
+ http://www.apache.org/licenses/LICENSE-2.0 |
|
| 8 |
+ |
|
| 9 |
+ Unless required by applicable law or agreed to in writing, software |
|
| 10 |
+ distributed under the License is distributed on an "AS IS" BASIS, |
|
| 11 |
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
| 12 |
+ See the License for the specific language governing permissions and |
|
| 13 |
+ limitations under the License. |
|
| 14 |
+*/ |
|
| 15 |
+ |
|
| 16 |
+package contentserver |
|
| 17 |
+ |
|
| 18 |
+import ( |
|
| 19 |
+ "context" |
|
| 20 |
+ "io" |
|
| 21 |
+ "sync" |
|
| 22 |
+ |
|
| 23 |
+ api "github.com/containerd/containerd/api/services/content/v1" |
|
| 24 |
+ "github.com/containerd/containerd/content" |
|
| 25 |
+ "github.com/containerd/containerd/errdefs" |
|
| 26 |
+ "github.com/containerd/containerd/log" |
|
| 27 |
+ ptypes "github.com/gogo/protobuf/types" |
|
| 28 |
+ digest "github.com/opencontainers/go-digest" |
|
| 29 |
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1" |
|
| 30 |
+ "github.com/pkg/errors" |
|
| 31 |
+ "github.com/sirupsen/logrus" |
|
| 32 |
+ "google.golang.org/grpc" |
|
| 33 |
+ "google.golang.org/grpc/codes" |
|
| 34 |
+ "google.golang.org/grpc/status" |
|
| 35 |
+) |
|
| 36 |
+ |
|
| 37 |
+type service struct {
|
|
| 38 |
+ store content.Store |
|
| 39 |
+} |
|
| 40 |
+ |
|
| 41 |
+var bufPool = sync.Pool{
|
|
| 42 |
+ New: func() interface{} {
|
|
| 43 |
+ buffer := make([]byte, 1<<20) |
|
| 44 |
+ return &buffer |
|
| 45 |
+ }, |
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+// New returns the content GRPC server |
|
| 49 |
+func New(cs content.Store) api.ContentServer {
|
|
| 50 |
+ return &service{store: cs}
|
|
| 51 |
+} |
|
| 52 |
+ |
|
| 53 |
+func (s *service) Register(server *grpc.Server) error {
|
|
| 54 |
+ api.RegisterContentServer(server, s) |
|
| 55 |
+ return nil |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
|
|
| 59 |
+ if err := req.Digest.Validate(); err != nil {
|
|
| 60 |
+ return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) |
|
| 61 |
+ } |
|
| 62 |
+ |
|
| 63 |
+ bi, err := s.store.Info(ctx, req.Digest) |
|
| 64 |
+ if err != nil {
|
|
| 65 |
+ return nil, errdefs.ToGRPC(err) |
|
| 66 |
+ } |
|
| 67 |
+ |
|
| 68 |
+ return &api.InfoResponse{
|
|
| 69 |
+ Info: infoToGRPC(bi), |
|
| 70 |
+ }, nil |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
|
|
| 74 |
+ if err := req.Info.Digest.Validate(); err != nil {
|
|
| 75 |
+ return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) |
|
| 76 |
+ } |
|
| 77 |
+ |
|
| 78 |
+ info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...) |
|
| 79 |
+ if err != nil {
|
|
| 80 |
+ return nil, errdefs.ToGRPC(err) |
|
| 81 |
+ } |
|
| 82 |
+ |
|
| 83 |
+ return &api.UpdateResponse{
|
|
| 84 |
+ Info: infoToGRPC(info), |
|
| 85 |
+ }, nil |
|
| 86 |
+} |
|
| 87 |
+ |
|
| 88 |
+func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
|
|
| 89 |
+ var ( |
|
| 90 |
+ buffer []api.Info |
|
| 91 |
+ sendBlock = func(block []api.Info) error {
|
|
| 92 |
+ // send last block |
|
| 93 |
+ return session.Send(&api.ListContentResponse{
|
|
| 94 |
+ Info: block, |
|
| 95 |
+ }) |
|
| 96 |
+ } |
|
| 97 |
+ ) |
|
| 98 |
+ |
|
| 99 |
+ if err := s.store.Walk(session.Context(), func(info content.Info) error {
|
|
| 100 |
+ buffer = append(buffer, api.Info{
|
|
| 101 |
+ Digest: info.Digest, |
|
| 102 |
+ Size_: info.Size, |
|
| 103 |
+ CreatedAt: info.CreatedAt, |
|
| 104 |
+ Labels: info.Labels, |
|
| 105 |
+ }) |
|
| 106 |
+ |
|
| 107 |
+ if len(buffer) >= 100 {
|
|
| 108 |
+ if err := sendBlock(buffer); err != nil {
|
|
| 109 |
+ return err |
|
| 110 |
+ } |
|
| 111 |
+ |
|
| 112 |
+ buffer = buffer[:0] |
|
| 113 |
+ } |
|
| 114 |
+ |
|
| 115 |
+ return nil |
|
| 116 |
+ }, req.Filters...); err != nil {
|
|
| 117 |
+ return err |
|
| 118 |
+ } |
|
| 119 |
+ |
|
| 120 |
+ if len(buffer) > 0 {
|
|
| 121 |
+ // send last block |
|
| 122 |
+ if err := sendBlock(buffer); err != nil {
|
|
| 123 |
+ return err |
|
| 124 |
+ } |
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ return nil |
|
| 128 |
+} |
|
| 129 |
+ |
|
| 130 |
+func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) {
|
|
| 131 |
+ log.G(ctx).WithField("digest", req.Digest).Debugf("delete content")
|
|
| 132 |
+ if err := req.Digest.Validate(); err != nil {
|
|
| 133 |
+ return nil, status.Errorf(codes.InvalidArgument, err.Error()) |
|
| 134 |
+ } |
|
| 135 |
+ |
|
| 136 |
+ if err := s.store.Delete(ctx, req.Digest); err != nil {
|
|
| 137 |
+ return nil, errdefs.ToGRPC(err) |
|
| 138 |
+ } |
|
| 139 |
+ |
|
| 140 |
+ return &ptypes.Empty{}, nil
|
|
| 141 |
+} |
|
| 142 |
+ |
|
| 143 |
+func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
|
|
| 144 |
+ if err := req.Digest.Validate(); err != nil {
|
|
| 145 |
+ return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) |
|
| 146 |
+ } |
|
| 147 |
+ |
|
| 148 |
+ oi, err := s.store.Info(session.Context(), req.Digest) |
|
| 149 |
+ if err != nil {
|
|
| 150 |
+ return errdefs.ToGRPC(err) |
|
| 151 |
+ } |
|
| 152 |
+ |
|
| 153 |
+ ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest})
|
|
| 154 |
+ if err != nil {
|
|
| 155 |
+ return errdefs.ToGRPC(err) |
|
| 156 |
+ } |
|
| 157 |
+ defer ra.Close() |
|
| 158 |
+ |
|
| 159 |
+ var ( |
|
| 160 |
+ offset = req.Offset |
|
| 161 |
+ // size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of. |
|
| 162 |
+ // offset+size can be larger than oi.Size. |
|
| 163 |
+ size = req.Size_ |
|
| 164 |
+ |
|
| 165 |
+ // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably |
|
| 166 |
+ // little inefficient for work over a fast network. We can tune this later. |
|
| 167 |
+ p = bufPool.Get().(*[]byte) |
|
| 168 |
+ ) |
|
| 169 |
+ defer bufPool.Put(p) |
|
| 170 |
+ |
|
| 171 |
+ if offset < 0 {
|
|
| 172 |
+ offset = 0 |
|
| 173 |
+ } |
|
| 174 |
+ |
|
| 175 |
+ if offset > oi.Size {
|
|
| 176 |
+ return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size) |
|
| 177 |
+ } |
|
| 178 |
+ |
|
| 179 |
+ if size <= 0 || offset+size > oi.Size {
|
|
| 180 |
+ size = oi.Size - offset |
|
| 181 |
+ } |
|
| 182 |
+ |
|
| 183 |
+ _, err = io.CopyBuffer( |
|
| 184 |
+ &readResponseWriter{session: session},
|
|
| 185 |
+ io.NewSectionReader(ra, offset, size), *p) |
|
| 186 |
+ return errdefs.ToGRPC(err) |
|
| 187 |
+} |
|
| 188 |
+ |
|
| 189 |
+// readResponseWriter is a writer that places the output into ReadContentRequest messages. |
|
| 190 |
+// |
|
| 191 |
+// This allows io.CopyBuffer to do the heavy lifting of chunking the responses |
|
| 192 |
+// into the buffer size. |
|
| 193 |
+type readResponseWriter struct {
|
|
| 194 |
+ offset int64 |
|
| 195 |
+ session api.Content_ReadServer |
|
| 196 |
+} |
|
| 197 |
+ |
|
| 198 |
+func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
|
| 199 |
+ if err := rw.session.Send(&api.ReadContentResponse{
|
|
| 200 |
+ Offset: rw.offset, |
|
| 201 |
+ Data: p, |
|
| 202 |
+ }); err != nil {
|
|
| 203 |
+ return 0, err |
|
| 204 |
+ } |
|
| 205 |
+ |
|
| 206 |
+ rw.offset += int64(len(p)) |
|
| 207 |
+ return len(p), nil |
|
| 208 |
+} |
|
| 209 |
+ |
|
| 210 |
+func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
|
|
| 211 |
+ status, err := s.store.Status(ctx, req.Ref) |
|
| 212 |
+ if err != nil {
|
|
| 213 |
+ return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref) |
|
| 214 |
+ } |
|
| 215 |
+ |
|
| 216 |
+ var resp api.StatusResponse |
|
| 217 |
+ resp.Status = &api.Status{
|
|
| 218 |
+ StartedAt: status.StartedAt, |
|
| 219 |
+ UpdatedAt: status.UpdatedAt, |
|
| 220 |
+ Ref: status.Ref, |
|
| 221 |
+ Offset: status.Offset, |
|
| 222 |
+ Total: status.Total, |
|
| 223 |
+ Expected: status.Expected, |
|
| 224 |
+ } |
|
| 225 |
+ |
|
| 226 |
+ return &resp, nil |
|
| 227 |
+} |
|
| 228 |
+ |
|
| 229 |
+func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
|
|
| 230 |
+ statuses, err := s.store.ListStatuses(ctx, req.Filters...) |
|
| 231 |
+ if err != nil {
|
|
| 232 |
+ return nil, errdefs.ToGRPC(err) |
|
| 233 |
+ } |
|
| 234 |
+ |
|
| 235 |
+ var resp api.ListStatusesResponse |
|
| 236 |
+ for _, status := range statuses {
|
|
| 237 |
+ resp.Statuses = append(resp.Statuses, api.Status{
|
|
| 238 |
+ StartedAt: status.StartedAt, |
|
| 239 |
+ UpdatedAt: status.UpdatedAt, |
|
| 240 |
+ Ref: status.Ref, |
|
| 241 |
+ Offset: status.Offset, |
|
| 242 |
+ Total: status.Total, |
|
| 243 |
+ Expected: status.Expected, |
|
| 244 |
+ }) |
|
| 245 |
+ } |
|
| 246 |
+ |
|
| 247 |
+ return &resp, nil |
|
| 248 |
+} |
|
| 249 |
+ |
|
| 250 |
+func (s *service) Write(session api.Content_WriteServer) (err error) {
|
|
| 251 |
+ var ( |
|
| 252 |
+ ctx = session.Context() |
|
| 253 |
+ msg api.WriteContentResponse |
|
| 254 |
+ req *api.WriteContentRequest |
|
| 255 |
+ ref string |
|
| 256 |
+ total int64 |
|
| 257 |
+ expected digest.Digest |
|
| 258 |
+ ) |
|
| 259 |
+ |
|
| 260 |
+ defer func(msg *api.WriteContentResponse) {
|
|
| 261 |
+ // pump through the last message if no error was encountered |
|
| 262 |
+ if err != nil {
|
|
| 263 |
+ if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists {
|
|
| 264 |
+ // TODO(stevvooe): Really need a log line here to track which |
|
| 265 |
+ // errors are actually causing failure on the server side. May want |
|
| 266 |
+ // to configure the service with an interceptor to make this work |
|
| 267 |
+ // identically across all GRPC methods. |
|
| 268 |
+ // |
|
| 269 |
+ // This is pretty noisy, so we can remove it but leave it for now. |
|
| 270 |
+ log.G(ctx).WithError(err).Error("(*service).Write failed")
|
|
| 271 |
+ } |
|
| 272 |
+ |
|
| 273 |
+ return |
|
| 274 |
+ } |
|
| 275 |
+ |
|
| 276 |
+ err = session.Send(msg) |
|
| 277 |
+ }(&msg) |
|
| 278 |
+ |
|
| 279 |
+ // handle the very first request! |
|
| 280 |
+ req, err = session.Recv() |
|
| 281 |
+ if err != nil {
|
|
| 282 |
+ return err |
|
| 283 |
+ } |
|
| 284 |
+ |
|
| 285 |
+ ref = req.Ref |
|
| 286 |
+ |
|
| 287 |
+ if ref == "" {
|
|
| 288 |
+ return status.Errorf(codes.InvalidArgument, "first message must have a reference") |
|
| 289 |
+ } |
|
| 290 |
+ |
|
| 291 |
+ fields := logrus.Fields{
|
|
| 292 |
+ "ref": ref, |
|
| 293 |
+ } |
|
| 294 |
+ total = req.Total |
|
| 295 |
+ expected = req.Expected |
|
| 296 |
+ if total > 0 {
|
|
| 297 |
+ fields["total"] = total |
|
| 298 |
+ } |
|
| 299 |
+ |
|
| 300 |
+ if expected != "" {
|
|
| 301 |
+ fields["expected"] = expected |
|
| 302 |
+ } |
|
| 303 |
+ |
|
| 304 |
+ ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields)) |
|
| 305 |
+ |
|
| 306 |
+ log.G(ctx).Debug("(*service).Write started")
|
|
| 307 |
+ // this action locks the writer for the session. |
|
| 308 |
+ wr, err := s.store.Writer(ctx, |
|
| 309 |
+ content.WithRef(ref), |
|
| 310 |
+ content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected}))
|
|
| 311 |
+ if err != nil {
|
|
| 312 |
+ return errdefs.ToGRPC(err) |
|
| 313 |
+ } |
|
| 314 |
+ defer wr.Close() |
|
| 315 |
+ |
|
| 316 |
+ for {
|
|
| 317 |
+ msg.Action = req.Action |
|
| 318 |
+ ws, err := wr.Status() |
|
| 319 |
+ if err != nil {
|
|
| 320 |
+ return errdefs.ToGRPC(err) |
|
| 321 |
+ } |
|
| 322 |
+ |
|
| 323 |
+ msg.Offset = ws.Offset // always set the offset. |
|
| 324 |
+ |
|
| 325 |
+ // NOTE(stevvooe): In general, there are two cases underwhich a remote |
|
| 326 |
+ // writer is used. |
|
| 327 |
+ // |
|
| 328 |
+ // For pull, we almost always have this before fetching large content, |
|
| 329 |
+ // through descriptors. We allow predeclaration of the expected size |
|
| 330 |
+ // and digest. |
|
| 331 |
+ // |
|
| 332 |
+ // For push, it is more complex. If we want to cut through content into |
|
| 333 |
+ // storage, we may have no expectation until we are done processing the |
|
| 334 |
+ // content. The case here is the following: |
|
| 335 |
+ // |
|
| 336 |
+ // 1. Start writing content. |
|
| 337 |
+ // 2. Compress inline. |
|
| 338 |
+ // 3. Validate digest and size (maybe). |
|
| 339 |
+ // |
|
| 340 |
+ // Supporting these two paths is quite awkward but it lets both API |
|
| 341 |
+ // users use the same writer style for each with a minimum of overhead. |
|
| 342 |
+ if req.Expected != "" {
|
|
| 343 |
+ if expected != "" && expected != req.Expected {
|
|
| 344 |
+ log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected)
|
|
| 345 |
+ } |
|
| 346 |
+ expected = req.Expected |
|
| 347 |
+ |
|
| 348 |
+ if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
|
|
| 349 |
+ if err := wr.Close(); err != nil {
|
|
| 350 |
+ log.G(ctx).WithError(err).Error("failed to close writer")
|
|
| 351 |
+ } |
|
| 352 |
+ if err := s.store.Abort(session.Context(), ref); err != nil {
|
|
| 353 |
+ log.G(ctx).WithError(err).Error("failed to abort write")
|
|
| 354 |
+ } |
|
| 355 |
+ |
|
| 356 |
+ return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected) |
|
| 357 |
+ } |
|
| 358 |
+ } |
|
| 359 |
+ |
|
| 360 |
+ if req.Total > 0 {
|
|
| 361 |
+ // Update the expected total. Typically, this could be seen at |
|
| 362 |
+ // negotiation time or on a commit message. |
|
| 363 |
+ if total > 0 && req.Total != total {
|
|
| 364 |
+ log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total)
|
|
| 365 |
+ } |
|
| 366 |
+ total = req.Total |
|
| 367 |
+ } |
|
| 368 |
+ |
|
| 369 |
+ switch req.Action {
|
|
| 370 |
+ case api.WriteActionStat: |
|
| 371 |
+ msg.Digest = wr.Digest() |
|
| 372 |
+ msg.StartedAt = ws.StartedAt |
|
| 373 |
+ msg.UpdatedAt = ws.UpdatedAt |
|
| 374 |
+ msg.Total = total |
|
| 375 |
+ case api.WriteActionWrite, api.WriteActionCommit: |
|
| 376 |
+ if req.Offset > 0 {
|
|
| 377 |
+ // validate the offset if provided |
|
| 378 |
+ if req.Offset != ws.Offset {
|
|
| 379 |
+ return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset) |
|
| 380 |
+ } |
|
| 381 |
+ } |
|
| 382 |
+ |
|
| 383 |
+ if req.Offset == 0 && ws.Offset > 0 {
|
|
| 384 |
+ if err := wr.Truncate(req.Offset); err != nil {
|
|
| 385 |
+ return errors.Wrapf(err, "truncate failed") |
|
| 386 |
+ } |
|
| 387 |
+ msg.Offset = req.Offset |
|
| 388 |
+ } |
|
| 389 |
+ |
|
| 390 |
+ // issue the write if we actually have data. |
|
| 391 |
+ if len(req.Data) > 0 {
|
|
| 392 |
+ // While this looks like we could use io.WriterAt here, because we |
|
| 393 |
+ // maintain the offset as append only, we just issue the write. |
|
| 394 |
+ n, err := wr.Write(req.Data) |
|
| 395 |
+ if err != nil {
|
|
| 396 |
+ return errdefs.ToGRPC(err) |
|
| 397 |
+ } |
|
| 398 |
+ |
|
| 399 |
+ if n != len(req.Data) {
|
|
| 400 |
+ // TODO(stevvooe): Perhaps, we can recover this by including it |
|
| 401 |
+ // in the offset on the write return. |
|
| 402 |
+ return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data)) |
|
| 403 |
+ } |
|
| 404 |
+ |
|
| 405 |
+ msg.Offset += int64(n) |
|
| 406 |
+ } |
|
| 407 |
+ |
|
| 408 |
+ if req.Action == api.WriteActionCommit {
|
|
| 409 |
+ var opts []content.Opt |
|
| 410 |
+ if req.Labels != nil {
|
|
| 411 |
+ opts = append(opts, content.WithLabels(req.Labels)) |
|
| 412 |
+ } |
|
| 413 |
+ if err := wr.Commit(ctx, total, expected, opts...); err != nil {
|
|
| 414 |
+ return errdefs.ToGRPC(err) |
|
| 415 |
+ } |
|
| 416 |
+ } |
|
| 417 |
+ |
|
| 418 |
+ msg.Digest = wr.Digest() |
|
| 419 |
+ } |
|
| 420 |
+ |
|
| 421 |
+ if err := session.Send(&msg); err != nil {
|
|
| 422 |
+ return err |
|
| 423 |
+ } |
|
| 424 |
+ |
|
| 425 |
+ req, err = session.Recv() |
|
| 426 |
+ if err != nil {
|
|
| 427 |
+ if err == io.EOF {
|
|
| 428 |
+ return nil |
|
| 429 |
+ } |
|
| 430 |
+ |
|
| 431 |
+ return err |
|
| 432 |
+ } |
|
| 433 |
+ } |
|
| 434 |
+} |
|
| 435 |
+ |
|
| 436 |
+func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) {
|
|
| 437 |
+ if err := s.store.Abort(ctx, req.Ref); err != nil {
|
|
| 438 |
+ return nil, errdefs.ToGRPC(err) |
|
| 439 |
+ } |
|
| 440 |
+ |
|
| 441 |
+ return &ptypes.Empty{}, nil
|
|
| 442 |
+} |
|
| 443 |
+ |
|
| 444 |
+func infoToGRPC(info content.Info) api.Info {
|
|
| 445 |
+ return api.Info{
|
|
| 446 |
+ Digest: info.Digest, |
|
| 447 |
+ Size_: info.Size, |
|
| 448 |
+ CreatedAt: info.CreatedAt, |
|
| 449 |
+ UpdatedAt: info.UpdatedAt, |
|
| 450 |
+ Labels: info.Labels, |
|
| 451 |
+ } |
|
| 452 |
+} |
|
| 453 |
+ |
|
| 454 |
+func infoFromGRPC(info api.Info) content.Info {
|
|
| 455 |
+ return content.Info{
|
|
| 456 |
+ Digest: info.Digest, |
|
| 457 |
+ Size: info.Size_, |
|
| 458 |
+ CreatedAt: info.CreatedAt, |
|
| 459 |
+ UpdatedAt: info.UpdatedAt, |
|
| 460 |
+ Labels: info.Labels, |
|
| 461 |
+ } |
|
| 462 |
+} |
| 0 | 463 |
deleted file mode 100644 |
| ... | ... |
@@ -1,492 +0,0 @@ |
| 1 |
-/* |
|
| 2 |
- Copyright The containerd Authors. |
|
| 3 |
- |
|
| 4 |
- Licensed under the Apache License, Version 2.0 (the "License"); |
|
| 5 |
- you may not use this file except in compliance with the License. |
|
| 6 |
- You may obtain a copy of the License at |
|
| 7 |
- |
|
| 8 |
- http://www.apache.org/licenses/LICENSE-2.0 |
|
| 9 |
- |
|
| 10 |
- Unless required by applicable law or agreed to in writing, software |
|
| 11 |
- distributed under the License is distributed on an "AS IS" BASIS, |
|
| 12 |
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
| 13 |
- See the License for the specific language governing permissions and |
|
| 14 |
- limitations under the License. |
|
| 15 |
-*/ |
|
| 16 |
- |
|
| 17 |
-package content |
|
| 18 |
- |
|
| 19 |
-import ( |
|
| 20 |
- "context" |
|
| 21 |
- "io" |
|
| 22 |
- "sync" |
|
| 23 |
- |
|
| 24 |
- api "github.com/containerd/containerd/api/services/content/v1" |
|
| 25 |
- "github.com/containerd/containerd/content" |
|
| 26 |
- "github.com/containerd/containerd/errdefs" |
|
| 27 |
- "github.com/containerd/containerd/log" |
|
| 28 |
- "github.com/containerd/containerd/plugin" |
|
| 29 |
- "github.com/containerd/containerd/services" |
|
| 30 |
- ptypes "github.com/gogo/protobuf/types" |
|
| 31 |
- digest "github.com/opencontainers/go-digest" |
|
| 32 |
- ocispec "github.com/opencontainers/image-spec/specs-go/v1" |
|
| 33 |
- "github.com/pkg/errors" |
|
| 34 |
- "github.com/sirupsen/logrus" |
|
| 35 |
- "google.golang.org/grpc" |
|
| 36 |
- "google.golang.org/grpc/codes" |
|
| 37 |
- "google.golang.org/grpc/status" |
|
| 38 |
-) |
|
| 39 |
- |
|
| 40 |
-type service struct {
|
|
| 41 |
- store content.Store |
|
| 42 |
-} |
|
| 43 |
- |
|
| 44 |
-var bufPool = sync.Pool{
|
|
| 45 |
- New: func() interface{} {
|
|
| 46 |
- buffer := make([]byte, 1<<20) |
|
| 47 |
- return &buffer |
|
| 48 |
- }, |
|
| 49 |
-} |
|
| 50 |
- |
|
| 51 |
-var _ api.ContentServer = &service{}
|
|
| 52 |
- |
|
| 53 |
-func init() {
|
|
| 54 |
- plugin.Register(&plugin.Registration{
|
|
| 55 |
- Type: plugin.GRPCPlugin, |
|
| 56 |
- ID: "content", |
|
| 57 |
- Requires: []plugin.Type{
|
|
| 58 |
- plugin.ServicePlugin, |
|
| 59 |
- }, |
|
| 60 |
- InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
|
| 61 |
- plugins, err := ic.GetByType(plugin.ServicePlugin) |
|
| 62 |
- if err != nil {
|
|
| 63 |
- return nil, err |
|
| 64 |
- } |
|
| 65 |
- p, ok := plugins[services.ContentService] |
|
| 66 |
- if !ok {
|
|
| 67 |
- return nil, errors.New("content store service not found")
|
|
| 68 |
- } |
|
| 69 |
- cs, err := p.Instance() |
|
| 70 |
- if err != nil {
|
|
| 71 |
- return nil, err |
|
| 72 |
- } |
|
| 73 |
- return NewService(cs.(content.Store)), nil |
|
| 74 |
- }, |
|
| 75 |
- }) |
|
| 76 |
-} |
|
| 77 |
- |
|
| 78 |
-// NewService returns the content GRPC server |
|
| 79 |
-func NewService(cs content.Store) api.ContentServer {
|
|
| 80 |
- return &service{store: cs}
|
|
| 81 |
-} |
|
| 82 |
- |
|
| 83 |
-func (s *service) Register(server *grpc.Server) error {
|
|
| 84 |
- api.RegisterContentServer(server, s) |
|
| 85 |
- return nil |
|
| 86 |
-} |
|
| 87 |
- |
|
| 88 |
-func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
|
|
| 89 |
- if err := req.Digest.Validate(); err != nil {
|
|
| 90 |
- return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) |
|
| 91 |
- } |
|
| 92 |
- |
|
| 93 |
- bi, err := s.store.Info(ctx, req.Digest) |
|
| 94 |
- if err != nil {
|
|
| 95 |
- return nil, errdefs.ToGRPC(err) |
|
| 96 |
- } |
|
| 97 |
- |
|
| 98 |
- return &api.InfoResponse{
|
|
| 99 |
- Info: infoToGRPC(bi), |
|
| 100 |
- }, nil |
|
| 101 |
-} |
|
| 102 |
- |
|
| 103 |
-func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
|
|
| 104 |
- if err := req.Info.Digest.Validate(); err != nil {
|
|
| 105 |
- return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) |
|
| 106 |
- } |
|
| 107 |
- |
|
| 108 |
- info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...) |
|
| 109 |
- if err != nil {
|
|
| 110 |
- return nil, errdefs.ToGRPC(err) |
|
| 111 |
- } |
|
| 112 |
- |
|
| 113 |
- return &api.UpdateResponse{
|
|
| 114 |
- Info: infoToGRPC(info), |
|
| 115 |
- }, nil |
|
| 116 |
-} |
|
| 117 |
- |
|
| 118 |
-func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
|
|
| 119 |
- var ( |
|
| 120 |
- buffer []api.Info |
|
| 121 |
- sendBlock = func(block []api.Info) error {
|
|
| 122 |
- // send last block |
|
| 123 |
- return session.Send(&api.ListContentResponse{
|
|
| 124 |
- Info: block, |
|
| 125 |
- }) |
|
| 126 |
- } |
|
| 127 |
- ) |
|
| 128 |
- |
|
| 129 |
- if err := s.store.Walk(session.Context(), func(info content.Info) error {
|
|
| 130 |
- buffer = append(buffer, api.Info{
|
|
| 131 |
- Digest: info.Digest, |
|
| 132 |
- Size_: info.Size, |
|
| 133 |
- CreatedAt: info.CreatedAt, |
|
| 134 |
- Labels: info.Labels, |
|
| 135 |
- }) |
|
| 136 |
- |
|
| 137 |
- if len(buffer) >= 100 {
|
|
| 138 |
- if err := sendBlock(buffer); err != nil {
|
|
| 139 |
- return err |
|
| 140 |
- } |
|
| 141 |
- |
|
| 142 |
- buffer = buffer[:0] |
|
| 143 |
- } |
|
| 144 |
- |
|
| 145 |
- return nil |
|
| 146 |
- }, req.Filters...); err != nil {
|
|
| 147 |
- return err |
|
| 148 |
- } |
|
| 149 |
- |
|
| 150 |
- if len(buffer) > 0 {
|
|
| 151 |
- // send last block |
|
| 152 |
- if err := sendBlock(buffer); err != nil {
|
|
| 153 |
- return err |
|
| 154 |
- } |
|
| 155 |
- } |
|
| 156 |
- |
|
| 157 |
- return nil |
|
| 158 |
-} |
|
| 159 |
- |
|
| 160 |
-func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) {
|
|
| 161 |
- log.G(ctx).WithField("digest", req.Digest).Debugf("delete content")
|
|
| 162 |
- if err := req.Digest.Validate(); err != nil {
|
|
| 163 |
- return nil, status.Errorf(codes.InvalidArgument, err.Error()) |
|
| 164 |
- } |
|
| 165 |
- |
|
| 166 |
- if err := s.store.Delete(ctx, req.Digest); err != nil {
|
|
| 167 |
- return nil, errdefs.ToGRPC(err) |
|
| 168 |
- } |
|
| 169 |
- |
|
| 170 |
- return &ptypes.Empty{}, nil
|
|
| 171 |
-} |
|
| 172 |
- |
|
| 173 |
-func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
|
|
| 174 |
- if err := req.Digest.Validate(); err != nil {
|
|
| 175 |
- return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) |
|
| 176 |
- } |
|
| 177 |
- |
|
| 178 |
- oi, err := s.store.Info(session.Context(), req.Digest) |
|
| 179 |
- if err != nil {
|
|
| 180 |
- return errdefs.ToGRPC(err) |
|
| 181 |
- } |
|
| 182 |
- |
|
| 183 |
- ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest})
|
|
| 184 |
- if err != nil {
|
|
| 185 |
- return errdefs.ToGRPC(err) |
|
| 186 |
- } |
|
| 187 |
- defer ra.Close() |
|
| 188 |
- |
|
| 189 |
- var ( |
|
| 190 |
- offset = req.Offset |
|
| 191 |
- // size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of. |
|
| 192 |
- // offset+size can be larger than oi.Size. |
|
| 193 |
- size = req.Size_ |
|
| 194 |
- |
|
| 195 |
- // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably |
|
| 196 |
- // little inefficient for work over a fast network. We can tune this later. |
|
| 197 |
- p = bufPool.Get().(*[]byte) |
|
| 198 |
- ) |
|
| 199 |
- defer bufPool.Put(p) |
|
| 200 |
- |
|
| 201 |
- if offset < 0 {
|
|
| 202 |
- offset = 0 |
|
| 203 |
- } |
|
| 204 |
- |
|
| 205 |
- if offset > oi.Size {
|
|
| 206 |
- return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size) |
|
| 207 |
- } |
|
| 208 |
- |
|
| 209 |
- if size <= 0 || offset+size > oi.Size {
|
|
| 210 |
- size = oi.Size - offset |
|
| 211 |
- } |
|
| 212 |
- |
|
| 213 |
- _, err = io.CopyBuffer( |
|
| 214 |
- &readResponseWriter{session: session},
|
|
| 215 |
- io.NewSectionReader(ra, offset, size), *p) |
|
| 216 |
- return errdefs.ToGRPC(err) |
|
| 217 |
-} |
|
| 218 |
- |
|
| 219 |
-// readResponseWriter is a writer that places the output into ReadContentRequest messages. |
|
| 220 |
-// |
|
| 221 |
-// This allows io.CopyBuffer to do the heavy lifting of chunking the responses |
|
| 222 |
-// into the buffer size. |
|
| 223 |
-type readResponseWriter struct {
|
|
| 224 |
- offset int64 |
|
| 225 |
- session api.Content_ReadServer |
|
| 226 |
-} |
|
| 227 |
- |
|
| 228 |
-func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
|
| 229 |
- if err := rw.session.Send(&api.ReadContentResponse{
|
|
| 230 |
- Offset: rw.offset, |
|
| 231 |
- Data: p, |
|
| 232 |
- }); err != nil {
|
|
| 233 |
- return 0, err |
|
| 234 |
- } |
|
| 235 |
- |
|
| 236 |
- rw.offset += int64(len(p)) |
|
| 237 |
- return len(p), nil |
|
| 238 |
-} |
|
| 239 |
- |
|
| 240 |
-func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
|
|
| 241 |
- status, err := s.store.Status(ctx, req.Ref) |
|
| 242 |
- if err != nil {
|
|
| 243 |
- return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref) |
|
| 244 |
- } |
|
| 245 |
- |
|
| 246 |
- var resp api.StatusResponse |
|
| 247 |
- resp.Status = &api.Status{
|
|
| 248 |
- StartedAt: status.StartedAt, |
|
| 249 |
- UpdatedAt: status.UpdatedAt, |
|
| 250 |
- Ref: status.Ref, |
|
| 251 |
- Offset: status.Offset, |
|
| 252 |
- Total: status.Total, |
|
| 253 |
- Expected: status.Expected, |
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- return &resp, nil |
|
| 257 |
-} |
|
| 258 |
- |
|
| 259 |
-func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
|
|
| 260 |
- statuses, err := s.store.ListStatuses(ctx, req.Filters...) |
|
| 261 |
- if err != nil {
|
|
| 262 |
- return nil, errdefs.ToGRPC(err) |
|
| 263 |
- } |
|
| 264 |
- |
|
| 265 |
- var resp api.ListStatusesResponse |
|
| 266 |
- for _, status := range statuses {
|
|
| 267 |
- resp.Statuses = append(resp.Statuses, api.Status{
|
|
| 268 |
- StartedAt: status.StartedAt, |
|
| 269 |
- UpdatedAt: status.UpdatedAt, |
|
| 270 |
- Ref: status.Ref, |
|
| 271 |
- Offset: status.Offset, |
|
| 272 |
- Total: status.Total, |
|
| 273 |
- Expected: status.Expected, |
|
| 274 |
- }) |
|
| 275 |
- } |
|
| 276 |
- |
|
| 277 |
- return &resp, nil |
|
| 278 |
-} |
|
| 279 |
- |
|
| 280 |
-func (s *service) Write(session api.Content_WriteServer) (err error) {
|
|
| 281 |
- var ( |
|
| 282 |
- ctx = session.Context() |
|
| 283 |
- msg api.WriteContentResponse |
|
| 284 |
- req *api.WriteContentRequest |
|
| 285 |
- ref string |
|
| 286 |
- total int64 |
|
| 287 |
- expected digest.Digest |
|
| 288 |
- ) |
|
| 289 |
- |
|
| 290 |
- defer func(msg *api.WriteContentResponse) {
|
|
| 291 |
- // pump through the last message if no error was encountered |
|
| 292 |
- if err != nil {
|
|
| 293 |
- if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists {
|
|
| 294 |
- // TODO(stevvooe): Really need a log line here to track which |
|
| 295 |
- // errors are actually causing failure on the server side. May want |
|
| 296 |
- // to configure the service with an interceptor to make this work |
|
| 297 |
- // identically across all GRPC methods. |
|
| 298 |
- // |
|
| 299 |
- // This is pretty noisy, so we can remove it but leave it for now. |
|
| 300 |
- log.G(ctx).WithError(err).Error("(*service).Write failed")
|
|
| 301 |
- } |
|
| 302 |
- |
|
| 303 |
- return |
|
| 304 |
- } |
|
| 305 |
- |
|
| 306 |
- err = session.Send(msg) |
|
| 307 |
- }(&msg) |
|
| 308 |
- |
|
| 309 |
- // handle the very first request! |
|
| 310 |
- req, err = session.Recv() |
|
| 311 |
- if err != nil {
|
|
| 312 |
- return err |
|
| 313 |
- } |
|
| 314 |
- |
|
| 315 |
- ref = req.Ref |
|
| 316 |
- |
|
| 317 |
- if ref == "" {
|
|
| 318 |
- return status.Errorf(codes.InvalidArgument, "first message must have a reference") |
|
| 319 |
- } |
|
| 320 |
- |
|
| 321 |
- fields := logrus.Fields{
|
|
| 322 |
- "ref": ref, |
|
| 323 |
- } |
|
| 324 |
- total = req.Total |
|
| 325 |
- expected = req.Expected |
|
| 326 |
- if total > 0 {
|
|
| 327 |
- fields["total"] = total |
|
| 328 |
- } |
|
| 329 |
- |
|
| 330 |
- if expected != "" {
|
|
| 331 |
- fields["expected"] = expected |
|
| 332 |
- } |
|
| 333 |
- |
|
| 334 |
- ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields)) |
|
| 335 |
- |
|
| 336 |
- log.G(ctx).Debug("(*service).Write started")
|
|
| 337 |
- // this action locks the writer for the session. |
|
| 338 |
- wr, err := s.store.Writer(ctx, |
|
| 339 |
- content.WithRef(ref), |
|
| 340 |
- content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected}))
|
|
| 341 |
- if err != nil {
|
|
| 342 |
- return errdefs.ToGRPC(err) |
|
| 343 |
- } |
|
| 344 |
- defer wr.Close() |
|
| 345 |
- |
|
| 346 |
- for {
|
|
| 347 |
- msg.Action = req.Action |
|
| 348 |
- ws, err := wr.Status() |
|
| 349 |
- if err != nil {
|
|
| 350 |
- return errdefs.ToGRPC(err) |
|
| 351 |
- } |
|
| 352 |
- |
|
| 353 |
- msg.Offset = ws.Offset // always set the offset. |
|
| 354 |
- |
|
| 355 |
- // NOTE(stevvooe): In general, there are two cases underwhich a remote |
|
| 356 |
- // writer is used. |
|
| 357 |
- // |
|
| 358 |
- // For pull, we almost always have this before fetching large content, |
|
| 359 |
- // through descriptors. We allow predeclaration of the expected size |
|
| 360 |
- // and digest. |
|
| 361 |
- // |
|
| 362 |
- // For push, it is more complex. If we want to cut through content into |
|
| 363 |
- // storage, we may have no expectation until we are done processing the |
|
| 364 |
- // content. The case here is the following: |
|
| 365 |
- // |
|
| 366 |
- // 1. Start writing content. |
|
| 367 |
- // 2. Compress inline. |
|
| 368 |
- // 3. Validate digest and size (maybe). |
|
| 369 |
- // |
|
| 370 |
- // Supporting these two paths is quite awkward but it lets both API |
|
| 371 |
- // users use the same writer style for each with a minimum of overhead. |
|
| 372 |
- if req.Expected != "" {
|
|
| 373 |
- if expected != "" && expected != req.Expected {
|
|
| 374 |
- log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected)
|
|
| 375 |
- } |
|
| 376 |
- expected = req.Expected |
|
| 377 |
- |
|
| 378 |
- if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
|
|
| 379 |
- if err := wr.Close(); err != nil {
|
|
| 380 |
- log.G(ctx).WithError(err).Error("failed to close writer")
|
|
| 381 |
- } |
|
| 382 |
- if err := s.store.Abort(session.Context(), ref); err != nil {
|
|
| 383 |
- log.G(ctx).WithError(err).Error("failed to abort write")
|
|
| 384 |
- } |
|
| 385 |
- |
|
| 386 |
- return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected) |
|
| 387 |
- } |
|
| 388 |
- } |
|
| 389 |
- |
|
| 390 |
- if req.Total > 0 {
|
|
| 391 |
- // Update the expected total. Typically, this could be seen at |
|
| 392 |
- // negotiation time or on a commit message. |
|
| 393 |
- if total > 0 && req.Total != total {
|
|
| 394 |
- log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total)
|
|
| 395 |
- } |
|
| 396 |
- total = req.Total |
|
| 397 |
- } |
|
| 398 |
- |
|
| 399 |
- switch req.Action {
|
|
| 400 |
- case api.WriteActionStat: |
|
| 401 |
- msg.Digest = wr.Digest() |
|
| 402 |
- msg.StartedAt = ws.StartedAt |
|
| 403 |
- msg.UpdatedAt = ws.UpdatedAt |
|
| 404 |
- msg.Total = total |
|
| 405 |
- case api.WriteActionWrite, api.WriteActionCommit: |
|
| 406 |
- if req.Offset > 0 {
|
|
| 407 |
- // validate the offset if provided |
|
| 408 |
- if req.Offset != ws.Offset {
|
|
| 409 |
- return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset) |
|
| 410 |
- } |
|
| 411 |
- } |
|
| 412 |
- |
|
| 413 |
- if req.Offset == 0 && ws.Offset > 0 {
|
|
| 414 |
- if err := wr.Truncate(req.Offset); err != nil {
|
|
| 415 |
- return errors.Wrapf(err, "truncate failed") |
|
| 416 |
- } |
|
| 417 |
- msg.Offset = req.Offset |
|
| 418 |
- } |
|
| 419 |
- |
|
| 420 |
- // issue the write if we actually have data. |
|
| 421 |
- if len(req.Data) > 0 {
|
|
| 422 |
- // While this looks like we could use io.WriterAt here, because we |
|
| 423 |
- // maintain the offset as append only, we just issue the write. |
|
| 424 |
- n, err := wr.Write(req.Data) |
|
| 425 |
- if err != nil {
|
|
| 426 |
- return errdefs.ToGRPC(err) |
|
| 427 |
- } |
|
| 428 |
- |
|
| 429 |
- if n != len(req.Data) {
|
|
| 430 |
- // TODO(stevvooe): Perhaps, we can recover this by including it |
|
| 431 |
- // in the offset on the write return. |
|
| 432 |
- return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data)) |
|
| 433 |
- } |
|
| 434 |
- |
|
| 435 |
- msg.Offset += int64(n) |
|
| 436 |
- } |
|
| 437 |
- |
|
| 438 |
- if req.Action == api.WriteActionCommit {
|
|
| 439 |
- var opts []content.Opt |
|
| 440 |
- if req.Labels != nil {
|
|
| 441 |
- opts = append(opts, content.WithLabels(req.Labels)) |
|
| 442 |
- } |
|
| 443 |
- if err := wr.Commit(ctx, total, expected, opts...); err != nil {
|
|
| 444 |
- return errdefs.ToGRPC(err) |
|
| 445 |
- } |
|
| 446 |
- } |
|
| 447 |
- |
|
| 448 |
- msg.Digest = wr.Digest() |
|
| 449 |
- } |
|
| 450 |
- |
|
| 451 |
- if err := session.Send(&msg); err != nil {
|
|
| 452 |
- return err |
|
| 453 |
- } |
|
| 454 |
- |
|
| 455 |
- req, err = session.Recv() |
|
| 456 |
- if err != nil {
|
|
| 457 |
- if err == io.EOF {
|
|
| 458 |
- return nil |
|
| 459 |
- } |
|
| 460 |
- |
|
| 461 |
- return err |
|
| 462 |
- } |
|
| 463 |
- } |
|
| 464 |
-} |
|
| 465 |
- |
|
| 466 |
-func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) {
|
|
| 467 |
- if err := s.store.Abort(ctx, req.Ref); err != nil {
|
|
| 468 |
- return nil, errdefs.ToGRPC(err) |
|
| 469 |
- } |
|
| 470 |
- |
|
| 471 |
- return &ptypes.Empty{}, nil
|
|
| 472 |
-} |
|
| 473 |
- |
|
| 474 |
-func infoToGRPC(info content.Info) api.Info {
|
|
| 475 |
- return api.Info{
|
|
| 476 |
- Digest: info.Digest, |
|
| 477 |
- Size_: info.Size, |
|
| 478 |
- CreatedAt: info.CreatedAt, |
|
| 479 |
- UpdatedAt: info.UpdatedAt, |
|
| 480 |
- Labels: info.Labels, |
|
| 481 |
- } |
|
| 482 |
-} |
|
| 483 |
- |
|
| 484 |
-func infoFromGRPC(info api.Info) content.Info {
|
|
| 485 |
- return content.Info{
|
|
| 486 |
- Digest: info.Digest, |
|
| 487 |
- Size: info.Size_, |
|
| 488 |
- CreatedAt: info.CreatedAt, |
|
| 489 |
- UpdatedAt: info.UpdatedAt, |
|
| 490 |
- Labels: info.Labels, |
|
| 491 |
- } |
|
| 492 |
-} |
| 493 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,71 +0,0 @@ |
| 1 |
-/* |
|
| 2 |
- Copyright The containerd Authors. |
|
| 3 |
- |
|
| 4 |
- Licensed under the Apache License, Version 2.0 (the "License"); |
|
| 5 |
- you may not use this file except in compliance with the License. |
|
| 6 |
- You may obtain a copy of the License at |
|
| 7 |
- |
|
| 8 |
- http://www.apache.org/licenses/LICENSE-2.0 |
|
| 9 |
- |
|
| 10 |
- Unless required by applicable law or agreed to in writing, software |
|
| 11 |
- distributed under the License is distributed on an "AS IS" BASIS, |
|
| 12 |
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
| 13 |
- See the License for the specific language governing permissions and |
|
| 14 |
- limitations under the License. |
|
| 15 |
-*/ |
|
| 16 |
- |
|
| 17 |
-package content |
|
| 18 |
- |
|
| 19 |
-import ( |
|
| 20 |
- "context" |
|
| 21 |
- |
|
| 22 |
- eventstypes "github.com/containerd/containerd/api/events" |
|
| 23 |
- "github.com/containerd/containerd/content" |
|
| 24 |
- "github.com/containerd/containerd/events" |
|
| 25 |
- "github.com/containerd/containerd/metadata" |
|
| 26 |
- "github.com/containerd/containerd/plugin" |
|
| 27 |
- "github.com/containerd/containerd/services" |
|
| 28 |
- digest "github.com/opencontainers/go-digest" |
|
| 29 |
-) |
|
| 30 |
- |
|
| 31 |
-// store wraps content.Store with proper event published. |
|
| 32 |
-type store struct {
|
|
| 33 |
- content.Store |
|
| 34 |
- publisher events.Publisher |
|
| 35 |
-} |
|
| 36 |
- |
|
| 37 |
-func init() {
|
|
| 38 |
- plugin.Register(&plugin.Registration{
|
|
| 39 |
- Type: plugin.ServicePlugin, |
|
| 40 |
- ID: services.ContentService, |
|
| 41 |
- Requires: []plugin.Type{
|
|
| 42 |
- plugin.MetadataPlugin, |
|
| 43 |
- }, |
|
| 44 |
- InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
|
| 45 |
- m, err := ic.Get(plugin.MetadataPlugin) |
|
| 46 |
- if err != nil {
|
|
| 47 |
- return nil, err |
|
| 48 |
- } |
|
| 49 |
- |
|
| 50 |
- s, err := newContentStore(m.(*metadata.DB).ContentStore(), ic.Events) |
|
| 51 |
- return s, err |
|
| 52 |
- }, |
|
| 53 |
- }) |
|
| 54 |
-} |
|
| 55 |
- |
|
| 56 |
-func newContentStore(cs content.Store, publisher events.Publisher) (content.Store, error) {
|
|
| 57 |
- return &store{
|
|
| 58 |
- Store: cs, |
|
| 59 |
- publisher: publisher, |
|
| 60 |
- }, nil |
|
| 61 |
-} |
|
| 62 |
- |
|
| 63 |
-func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
| 64 |
- if err := s.Store.Delete(ctx, dgst); err != nil {
|
|
| 65 |
- return err |
|
| 66 |
- } |
|
| 67 |
- // TODO: Consider whether we should return error here. |
|
| 68 |
- return s.publisher.Publish(ctx, "/content/delete", &eventstypes.ContentDelete{
|
|
| 69 |
- Digest: dgst, |
|
| 70 |
- }) |
|
| 71 |
-} |
| 72 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,36 +0,0 @@ |
| 1 |
-/* |
|
| 2 |
- Copyright The containerd Authors. |
|
| 3 |
- |
|
| 4 |
- Licensed under the Apache License, Version 2.0 (the "License"); |
|
| 5 |
- you may not use this file except in compliance with the License. |
|
| 6 |
- You may obtain a copy of the License at |
|
| 7 |
- |
|
| 8 |
- http://www.apache.org/licenses/LICENSE-2.0 |
|
| 9 |
- |
|
| 10 |
- Unless required by applicable law or agreed to in writing, software |
|
| 11 |
- distributed under the License is distributed on an "AS IS" BASIS, |
|
| 12 |
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
| 13 |
- See the License for the specific language governing permissions and |
|
| 14 |
- limitations under the License. |
|
| 15 |
-*/ |
|
| 16 |
- |
|
| 17 |
-package services |
|
| 18 |
- |
|
| 19 |
-const ( |
|
| 20 |
- // ContentService is id of content service. |
|
| 21 |
- ContentService = "content-service" |
|
| 22 |
- // SnapshotsService is id of snapshots service. |
|
| 23 |
- SnapshotsService = "snapshots-service" |
|
| 24 |
- // ImagesService is id of images service. |
|
| 25 |
- ImagesService = "images-service" |
|
| 26 |
- // ContainersService is id of containers service. |
|
| 27 |
- ContainersService = "containers-service" |
|
| 28 |
- // TasksService is id of tasks service. |
|
| 29 |
- TasksService = "tasks-service" |
|
| 30 |
- // NamespacesService is id of namespaces service. |
|
| 31 |
- NamespacesService = "namespaces-service" |
|
| 32 |
- // LeasesService is id of leases service. |
|
| 33 |
- LeasesService = "leases-service" |
|
| 34 |
- // DiffService is id of diff service. |
|
| 35 |
- DiffService = "diff-service" |
|
| 36 |
-) |
| ... | ... |
@@ -184,6 +184,14 @@ The local client will copy the files directly to the client. This is useful if B |
| 184 | 184 |
buildctl build ... --output type=local,dest=path/to/output-dir |
| 185 | 185 |
``` |
| 186 | 186 |
|
| 187 |
+Tar exporter is similar to local exporter but transfers the files through a tarball. |
|
| 188 |
+ |
|
| 189 |
+``` |
|
| 190 |
+buildctl build ... --output type=tar,dest=out.tar |
|
| 191 |
+buildctl build ... --output type=tar > out.tar |
|
| 192 |
+``` |
|
| 193 |
+ |
|
| 194 |
+ |
|
| 187 | 195 |
##### Exporting built image to Docker |
| 188 | 196 |
|
| 189 | 197 |
``` |
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
"path/filepath" |
| 11 | 11 |
"sync" |
| 12 | 12 |
|
| 13 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 13 | 14 |
"github.com/docker/docker/pkg/locker" |
| 14 | 15 |
iradix "github.com/hashicorp/go-immutable-radix" |
| 15 | 16 |
"github.com/hashicorp/golang-lru/simplelru" |
| ... | ... |
@@ -51,8 +52,8 @@ func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, |
| 51 | 51 |
return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks) |
| 52 | 52 |
} |
| 53 | 53 |
|
| 54 |
-func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
|
|
| 55 |
- return getDefaultManager().GetCacheContext(ctx, md) |
|
| 54 |
+func GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
|
| 55 |
+ return getDefaultManager().GetCacheContext(ctx, md, idmap) |
|
| 56 | 56 |
} |
| 57 | 57 |
|
| 58 | 58 |
func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error {
|
| ... | ... |
@@ -81,7 +82,7 @@ type cacheManager struct {
|
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 | 83 |
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) {
|
| 84 |
- cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata())) |
|
| 84 |
+ cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping()) |
|
| 85 | 85 |
if err != nil {
|
| 86 | 86 |
return "", nil |
| 87 | 87 |
} |
| ... | ... |
@@ -89,14 +90,14 @@ func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p |
| 89 | 89 |
} |
| 90 | 90 |
|
| 91 | 91 |
func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) {
|
| 92 |
- cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata())) |
|
| 92 |
+ cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping()) |
|
| 93 | 93 |
if err != nil {
|
| 94 | 94 |
return "", nil |
| 95 | 95 |
} |
| 96 | 96 |
return cc.ChecksumWildcard(ctx, ref, p, followLinks) |
| 97 | 97 |
} |
| 98 | 98 |
|
| 99 |
-func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
|
|
| 99 |
+func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
|
| 100 | 100 |
cm.locker.Lock(md.ID()) |
| 101 | 101 |
cm.lruMu.Lock() |
| 102 | 102 |
v, ok := cm.lru.Get(md.ID()) |
| ... | ... |
@@ -106,7 +107,7 @@ func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.Storag |
| 106 | 106 |
v.(*cacheContext).linkMap = map[string][][]byte{}
|
| 107 | 107 |
return v.(*cacheContext), nil |
| 108 | 108 |
} |
| 109 |
- cc, err := newCacheContext(md) |
|
| 109 |
+ cc, err := newCacheContext(md, idmap) |
|
| 110 | 110 |
if err != nil {
|
| 111 | 111 |
cm.locker.Unlock(md.ID()) |
| 112 | 112 |
return nil, err |
| ... | ... |
@@ -152,6 +153,7 @@ type cacheContext struct {
|
| 152 | 152 |
node *iradix.Node |
| 153 | 153 |
dirtyMap map[string]struct{}
|
| 154 | 154 |
linkMap map[string][][]byte |
| 155 |
+ idmap *idtools.IdentityMapping |
|
| 155 | 156 |
} |
| 156 | 157 |
|
| 157 | 158 |
type mount struct {
|
| ... | ... |
@@ -191,12 +193,13 @@ func (m *mount) clean() error {
|
| 191 | 191 |
return nil |
| 192 | 192 |
} |
| 193 | 193 |
|
| 194 |
-func newCacheContext(md *metadata.StorageItem) (*cacheContext, error) {
|
|
| 194 |
+func newCacheContext(md *metadata.StorageItem, idmap *idtools.IdentityMapping) (*cacheContext, error) {
|
|
| 195 | 195 |
cc := &cacheContext{
|
| 196 | 196 |
md: md, |
| 197 | 197 |
tree: iradix.New(), |
| 198 | 198 |
dirtyMap: map[string]struct{}{},
|
| 199 | 199 |
linkMap: map[string][][]byte{},
|
| 200 |
+ idmap: idmap, |
|
| 200 | 201 |
} |
| 201 | 202 |
if err := cc.load(); err != nil {
|
| 202 | 203 |
return nil, err |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
|
| 9 | 9 |
"github.com/containerd/containerd/filters" |
| 10 | 10 |
"github.com/containerd/containerd/snapshots" |
| 11 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 11 | 12 |
"github.com/moby/buildkit/cache/metadata" |
| 12 | 13 |
"github.com/moby/buildkit/client" |
| 13 | 14 |
"github.com/moby/buildkit/identity" |
| ... | ... |
@@ -34,6 +35,7 @@ type Accessor interface {
|
| 34 | 34 |
GetFromSnapshotter(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) |
| 35 | 35 |
New(ctx context.Context, s ImmutableRef, opts ...RefOption) (MutableRef, error) |
| 36 | 36 |
GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase? |
| 37 |
+ IdentityMapping() *idtools.IdentityMapping |
|
| 37 | 38 |
} |
| 38 | 39 |
|
| 39 | 40 |
type Controller interface {
|
| ... | ... |
@@ -96,6 +98,11 @@ func (cm *cacheManager) init(ctx context.Context) error {
|
| 96 | 96 |
return nil |
| 97 | 97 |
} |
| 98 | 98 |
|
| 99 |
+// IdentityMapping returns the userns remapping used for refs |
|
| 100 |
+func (cm *cacheManager) IdentityMapping() *idtools.IdentityMapping {
|
|
| 101 |
+ return cm.Snapshotter.IdentityMapping() |
|
| 102 |
+} |
|
| 103 |
+ |
|
| 99 | 104 |
// Close closes the manager and releases the metadata database lock. No other |
| 100 | 105 |
// method should be called after Close. |
| 101 | 106 |
func (cm *cacheManager) Close() error {
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"sync" |
| 6 | 6 |
|
| 7 | 7 |
"github.com/containerd/containerd/mount" |
| 8 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 8 | 9 |
"github.com/moby/buildkit/cache/metadata" |
| 9 | 10 |
"github.com/moby/buildkit/identity" |
| 10 | 11 |
"github.com/moby/buildkit/snapshot" |
| ... | ... |
@@ -20,6 +21,7 @@ type Ref interface {
|
| 20 | 20 |
Release(context.Context) error |
| 21 | 21 |
Size(ctx context.Context) (int64, error) |
| 22 | 22 |
Metadata() *metadata.StorageItem |
| 23 |
+ IdentityMapping() *idtools.IdentityMapping |
|
| 23 | 24 |
} |
| 24 | 25 |
|
| 25 | 26 |
type ImmutableRef interface {
|
| ... | ... |
@@ -83,6 +85,10 @@ func (cr *cacheRecord) isDead() bool {
|
| 83 | 83 |
return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead) |
| 84 | 84 |
} |
| 85 | 85 |
|
| 86 |
+func (cr *cacheRecord) IdentityMapping() *idtools.IdentityMapping {
|
|
| 87 |
+ return cr.cm.IdentityMapping() |
|
| 88 |
+} |
|
| 89 |
+ |
|
| 86 | 90 |
func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
|
| 87 | 91 |
// this expects that usage() is implemented lazily |
| 88 | 92 |
s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) {
|
| ... | ... |
@@ -5,9 +5,12 @@ import ( |
| 5 | 5 |
"crypto/tls" |
| 6 | 6 |
"crypto/x509" |
| 7 | 7 |
"io/ioutil" |
| 8 |
+ "net" |
|
| 9 |
+ "time" |
|
| 8 | 10 |
|
| 9 | 11 |
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" |
| 10 | 12 |
controlapi "github.com/moby/buildkit/api/services/control" |
| 13 |
+ "github.com/moby/buildkit/client/connhelper" |
|
| 11 | 14 |
"github.com/moby/buildkit/util/appdefaults" |
| 12 | 15 |
opentracing "github.com/opentracing/opentracing-go" |
| 13 | 16 |
"github.com/pkg/errors" |
| ... | ... |
@@ -23,9 +26,8 @@ type ClientOpt interface{}
|
| 23 | 23 |
|
| 24 | 24 |
// New returns a new buildkit client. Address can be empty for the system-default address. |
| 25 | 25 |
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
|
| 26 |
- gopts := []grpc.DialOption{
|
|
| 27 |
- grpc.WithDialer(dialer), |
|
| 28 |
- } |
|
| 26 |
+ gopts := []grpc.DialOption{}
|
|
| 27 |
+ needDialer := true |
|
| 29 | 28 |
needWithInsecure := true |
| 30 | 29 |
for _, o := range opts {
|
| 31 | 30 |
if _, ok := o.(*withFailFast); ok {
|
| ... | ... |
@@ -44,6 +46,19 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error |
| 44 | 44 |
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())), |
| 45 | 45 |
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))) |
| 46 | 46 |
} |
| 47 |
+ if wd, ok := o.(*withDialer); ok {
|
|
| 48 |
+ gopts = append(gopts, grpc.WithDialer(wd.dialer)) |
|
| 49 |
+ needDialer = false |
|
| 50 |
+ } |
|
| 51 |
+ } |
|
| 52 |
+ if needDialer {
|
|
| 53 |
+ dialFn, err := resolveDialer(address) |
|
| 54 |
+ if err != nil {
|
|
| 55 |
+ return nil, err |
|
| 56 |
+ } |
|
| 57 |
+ // TODO(AkihiroSuda): use WithContextDialer (requires grpc 1.19) |
|
| 58 |
+ // https://github.com/grpc/grpc-go/commit/40cb5618f475e7b9d61aa7920ae4b04ef9bbaf89 |
|
| 59 |
+ gopts = append(gopts, grpc.WithDialer(dialFn)) |
|
| 47 | 60 |
} |
| 48 | 61 |
if needWithInsecure {
|
| 49 | 62 |
gopts = append(gopts, grpc.WithInsecure()) |
| ... | ... |
@@ -75,6 +90,14 @@ func WithFailFast() ClientOpt {
|
| 75 | 75 |
return &withFailFast{}
|
| 76 | 76 |
} |
| 77 | 77 |
|
| 78 |
+type withDialer struct {
|
|
| 79 |
+ dialer func(string, time.Duration) (net.Conn, error) |
|
| 80 |
+} |
|
| 81 |
+ |
|
| 82 |
+func WithDialer(df func(string, time.Duration) (net.Conn, error)) ClientOpt {
|
|
| 83 |
+ return &withDialer{dialer: df}
|
|
| 84 |
+} |
|
| 85 |
+ |
|
| 78 | 86 |
type withCredentials struct {
|
| 79 | 87 |
ServerName string |
| 80 | 88 |
CACert string |
| ... | ... |
@@ -128,3 +151,19 @@ func WithTracer(t opentracing.Tracer) ClientOpt {
|
| 128 | 128 |
type withTracer struct {
|
| 129 | 129 |
tracer opentracing.Tracer |
| 130 | 130 |
} |
| 131 |
+ |
|
| 132 |
+func resolveDialer(address string) (func(string, time.Duration) (net.Conn, error), error) {
|
|
| 133 |
+ ch, err := connhelper.GetConnectionHelper(address) |
|
| 134 |
+ if err != nil {
|
|
| 135 |
+ return nil, err |
|
| 136 |
+ } |
|
| 137 |
+ if ch != nil {
|
|
| 138 |
+ f := func(a string, _ time.Duration) (net.Conn, error) {
|
|
| 139 |
+ ctx := context.Background() |
|
| 140 |
+ return ch.ContextDialer(ctx, a) |
|
| 141 |
+ } |
|
| 142 |
+ return f, nil |
|
| 143 |
+ } |
|
| 144 |
+ // basic dialer |
|
| 145 |
+ return dialer, nil |
|
| 146 |
+} |
| 131 | 147 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,37 @@ |
| 0 |
+// Package connhelper provides helpers for connecting to a remote daemon host with custom logic. |
|
| 1 |
+package connhelper |
|
| 2 |
+ |
|
| 3 |
+import ( |
|
| 4 |
+ "context" |
|
| 5 |
+ "net" |
|
| 6 |
+ "net/url" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+var helpers = map[string]func(*url.URL) (*ConnectionHelper, error){}
|
|
| 10 |
+ |
|
| 11 |
+// ConnectionHelper allows to connect to a remote host with custom stream provider binary. |
|
| 12 |
+type ConnectionHelper struct {
|
|
| 13 |
+ // ContextDialer can be passed to grpc.WithContextDialer |
|
| 14 |
+ ContextDialer func(ctx context.Context, addr string) (net.Conn, error) |
|
| 15 |
+} |
|
| 16 |
+ |
|
| 17 |
+// GetConnectionHelper returns BuildKit-specific connection helper for the given URL. |
|
| 18 |
+// GetConnectionHelper returns nil without error when no helper is registered for the scheme. |
|
| 19 |
+func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) {
|
|
| 20 |
+ u, err := url.Parse(daemonURL) |
|
| 21 |
+ if err != nil {
|
|
| 22 |
+ return nil, err |
|
| 23 |
+ } |
|
| 24 |
+ |
|
| 25 |
+ fn, ok := helpers[u.Scheme] |
|
| 26 |
+ if !ok {
|
|
| 27 |
+ return nil, nil |
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ return fn(u) |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// Register registers new connectionhelper for scheme |
|
| 34 |
+func Register(scheme string, fn func(*url.URL) (*ConnectionHelper, error)) {
|
|
| 35 |
+ helpers[scheme] = fn |
|
| 36 |
+} |
| ... | ... |
@@ -20,6 +20,7 @@ type Meta struct {
|
| 20 | 20 |
ProxyEnv *ProxyEnv |
| 21 | 21 |
ExtraHosts []HostIP |
| 22 | 22 |
Network pb.NetMode |
| 23 |
+ Security pb.SecurityMode |
|
| 23 | 24 |
} |
| 24 | 25 |
|
| 25 | 26 |
func NewExecOp(root Output, meta Meta, readOnly bool, c Constraints) *ExecOp {
|
| ... | ... |
@@ -52,7 +53,7 @@ type mount struct {
|
| 52 | 52 |
cacheID string |
| 53 | 53 |
tmpfs bool |
| 54 | 54 |
cacheSharing CacheMountSharingMode |
| 55 |
- // hasOutput bool |
|
| 55 |
+ noOutput bool |
|
| 56 | 56 |
} |
| 57 | 57 |
|
| 58 | 58 |
type ExecOp struct {
|
| ... | ... |
@@ -79,6 +80,8 @@ func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Outp |
| 79 | 79 |
m.output = source |
| 80 | 80 |
} else if m.tmpfs {
|
| 81 | 81 |
m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)}
|
| 82 |
+ } else if m.noOutput {
|
|
| 83 |
+ m.output = &output{vertex: e, err: errors.Errorf("mount marked no-output and %s can't be used as a parent", target)}
|
|
| 82 | 84 |
} else {
|
| 83 | 85 |
o := &output{vertex: e, getIndex: e.getMountIndexFn(m)}
|
| 84 | 86 |
if p := e.constraints.Platform; p != nil {
|
| ... | ... |
@@ -166,13 +169,18 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, |
| 166 | 166 |
} |
| 167 | 167 |
|
| 168 | 168 |
peo := &pb.ExecOp{
|
| 169 |
- Meta: meta, |
|
| 170 |
- Network: e.meta.Network, |
|
| 169 |
+ Meta: meta, |
|
| 170 |
+ Network: e.meta.Network, |
|
| 171 |
+ Security: e.meta.Security, |
|
| 171 | 172 |
} |
| 172 | 173 |
if e.meta.Network != NetModeSandbox {
|
| 173 | 174 |
addCap(&e.constraints, pb.CapExecMetaNetwork) |
| 174 | 175 |
} |
| 175 | 176 |
|
| 177 |
+ if e.meta.Security != SecurityModeInsecure {
|
|
| 178 |
+ addCap(&e.constraints, pb.CapExecMetaSecurity) |
|
| 179 |
+ } |
|
| 180 |
+ |
|
| 176 | 181 |
if p := e.meta.ProxyEnv; p != nil {
|
| 177 | 182 |
peo.Meta.ProxyEnv = &pb.ProxyEnv{
|
| 178 | 183 |
HttpProxy: p.HttpProxy, |
| ... | ... |
@@ -242,7 +250,7 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, |
| 242 | 242 |
} |
| 243 | 243 |
|
| 244 | 244 |
outputIndex := pb.OutputIndex(-1) |
| 245 |
- if !m.readonly && m.cacheID == "" && !m.tmpfs {
|
|
| 245 |
+ if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs {
|
|
| 246 | 246 |
outputIndex = pb.OutputIndex(outIndex) |
| 247 | 247 |
outIndex++ |
| 248 | 248 |
} |
| ... | ... |
@@ -338,7 +346,7 @@ func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
|
| 338 | 338 |
|
| 339 | 339 |
i := 0 |
| 340 | 340 |
for _, m2 := range e.mounts {
|
| 341 |
- if m2.readonly || m2.cacheID != "" {
|
|
| 341 |
+ if m2.noOutput || m2.readonly || m2.cacheID != "" {
|
|
| 342 | 342 |
continue |
| 343 | 343 |
} |
| 344 | 344 |
if m == m2 {
|
| ... | ... |
@@ -379,6 +387,10 @@ func SourcePath(src string) MountOption {
|
| 379 | 379 |
} |
| 380 | 380 |
} |
| 381 | 381 |
|
| 382 |
+func ForceNoOutput(m *mount) {
|
|
| 383 |
+ m.noOutput = true |
|
| 384 |
+} |
|
| 385 |
+ |
|
| 382 | 386 |
func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption {
|
| 383 | 387 |
return func(m *mount) {
|
| 384 | 388 |
m.cacheID = id |
| ... | ... |
@@ -408,6 +420,12 @@ func Network(n pb.NetMode) RunOption {
|
| 408 | 408 |
}) |
| 409 | 409 |
} |
| 410 | 410 |
|
| 411 |
+func Security(s pb.SecurityMode) RunOption {
|
|
| 412 |
+ return runOptionFunc(func(ei *ExecInfo) {
|
|
| 413 |
+ ei.State = security(s)(ei.State) |
|
| 414 |
+ }) |
|
| 415 |
+} |
|
| 416 |
+ |
|
| 411 | 417 |
func Shlex(str string) RunOption {
|
| 412 | 418 |
return Shlexf(str) |
| 413 | 419 |
} |
| ... | ... |
@@ -623,3 +641,8 @@ const ( |
| 623 | 623 |
NetModeHost = pb.NetMode_HOST |
| 624 | 624 |
NetModeNone = pb.NetMode_NONE |
| 625 | 625 |
) |
| 626 |
+ |
|
| 627 |
+const ( |
|
| 628 |
+ SecurityModeInsecure = pb.SecurityMode_INSECURE |
|
| 629 |
+ SecurityModeSandbox = pb.SecurityMode_SANDBOX |
|
| 630 |
+) |
| ... | ... |
@@ -21,6 +21,7 @@ var ( |
| 21 | 21 |
keyExtraHost = contextKeyT("llb.exec.extrahost")
|
| 22 | 22 |
keyPlatform = contextKeyT("llb.platform")
|
| 23 | 23 |
keyNetwork = contextKeyT("llb.network")
|
| 24 |
+ keySecurity = contextKeyT("llb.security")
|
|
| 24 | 25 |
) |
| 25 | 26 |
|
| 26 | 27 |
func addEnvf(key, value string, v ...interface{}) StateOption {
|
| ... | ... |
@@ -148,7 +149,6 @@ func network(v pb.NetMode) StateOption {
|
| 148 | 148 |
return s.WithValue(keyNetwork, v) |
| 149 | 149 |
} |
| 150 | 150 |
} |
| 151 |
- |
|
| 152 | 151 |
func getNetwork(s State) pb.NetMode {
|
| 153 | 152 |
v := s.Value(keyNetwork) |
| 154 | 153 |
if v != nil {
|
| ... | ... |
@@ -158,6 +158,20 @@ func getNetwork(s State) pb.NetMode {
|
| 158 | 158 |
return NetModeSandbox |
| 159 | 159 |
} |
| 160 | 160 |
|
| 161 |
+func security(v pb.SecurityMode) StateOption {
|
|
| 162 |
+ return func(s State) State {
|
|
| 163 |
+ return s.WithValue(keySecurity, v) |
|
| 164 |
+ } |
|
| 165 |
+} |
|
| 166 |
+func getSecurity(s State) pb.SecurityMode {
|
|
| 167 |
+ v := s.Value(keySecurity) |
|
| 168 |
+ if v != nil {
|
|
| 169 |
+ n := v.(pb.SecurityMode) |
|
| 170 |
+ return n |
|
| 171 |
+ } |
|
| 172 |
+ return SecurityModeSandbox |
|
| 173 |
+} |
|
| 174 |
+ |
|
| 161 | 175 |
type EnvList []KeyValue |
| 162 | 176 |
|
| 163 | 177 |
type KeyValue struct {
|
| ... | ... |
@@ -214,6 +214,7 @@ func (s State) Run(ro ...RunOption) ExecState {
|
| 214 | 214 |
ProxyEnv: ei.ProxyEnv, |
| 215 | 215 |
ExtraHosts: getExtraHosts(ei.State), |
| 216 | 216 |
Network: getNetwork(ei.State), |
| 217 |
+ Security: getSecurity(ei.State), |
|
| 217 | 218 |
} |
| 218 | 219 |
|
| 219 | 220 |
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Constraints) |
| ... | ... |
@@ -292,6 +293,13 @@ func (s State) Network(n pb.NetMode) State {
|
| 292 | 292 |
func (s State) GetNetwork() pb.NetMode {
|
| 293 | 293 |
return getNetwork(s) |
| 294 | 294 |
} |
| 295 |
+func (s State) Security(n pb.SecurityMode) State {
|
|
| 296 |
+ return security(n)(s) |
|
| 297 |
+} |
|
| 298 |
+ |
|
| 299 |
+func (s State) GetSecurity() pb.SecurityMode {
|
|
| 300 |
+ return getSecurity(s) |
|
| 301 |
+} |
|
| 295 | 302 |
|
| 296 | 303 |
func (s State) With(so ...StateOption) State {
|
| 297 | 304 |
for _, o := range so {
|
| ... | ... |
@@ -124,7 +124,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG |
| 124 | 124 |
return nil, errors.New("output directory is required for local exporter")
|
| 125 | 125 |
} |
| 126 | 126 |
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir)) |
| 127 |
- case ExporterOCI, ExporterDocker: |
|
| 127 |
+ case ExporterOCI, ExporterDocker, ExporterTar: |
|
| 128 | 128 |
if ex.OutputDir != "" {
|
| 129 | 129 |
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
| 130 | 130 |
} |
| ... | ... |
@@ -32,6 +32,7 @@ type Opt struct {
|
| 32 | 32 |
CacheKeyStorage solver.CacheKeyStorage |
| 33 | 33 |
ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc |
| 34 | 34 |
ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc |
| 35 |
+ Entitlements []string |
|
| 35 | 36 |
} |
| 36 | 37 |
|
| 37 | 38 |
type Controller struct { // TODO: ControlService
|
| ... | ... |
@@ -48,7 +49,7 @@ func NewController(opt Opt) (*Controller, error) {
|
| 48 | 48 |
|
| 49 | 49 |
gatewayForwarder := controlgateway.NewGatewayForwarder() |
| 50 | 50 |
|
| 51 |
- solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager) |
|
| 51 |
+ solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements) |
|
| 52 | 52 |
if err != nil {
|
| 53 | 53 |
return nil, errors.Wrap(err, "failed to create solver") |
| 54 | 54 |
} |
| ... | ... |
@@ -8,14 +8,19 @@ import ( |
| 8 | 8 |
"sync" |
| 9 | 9 |
|
| 10 | 10 |
"github.com/containerd/containerd/containers" |
| 11 |
+ "github.com/containerd/containerd/contrib/seccomp" |
|
| 11 | 12 |
"github.com/containerd/containerd/mount" |
| 12 | 13 |
"github.com/containerd/containerd/namespaces" |
| 13 | 14 |
"github.com/containerd/containerd/oci" |
| 14 | 15 |
"github.com/containerd/continuity/fs" |
| 16 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 15 | 17 |
"github.com/mitchellh/hashstructure" |
| 16 | 18 |
"github.com/moby/buildkit/executor" |
| 17 | 19 |
"github.com/moby/buildkit/snapshot" |
| 20 |
+ "github.com/moby/buildkit/solver/pb" |
|
| 21 |
+ "github.com/moby/buildkit/util/entitlements" |
|
| 18 | 22 |
"github.com/moby/buildkit/util/network" |
| 23 |
+ "github.com/moby/buildkit/util/system" |
|
| 19 | 24 |
specs "github.com/opencontainers/runtime-spec/specs-go" |
| 20 | 25 |
"github.com/pkg/errors" |
| 21 | 26 |
) |
| ... | ... |
@@ -36,7 +41,7 @@ const ( |
| 36 | 36 |
|
| 37 | 37 |
// GenerateSpec generates spec using containerd functionality. |
| 38 | 38 |
// opts are ignored for s.Process, s.Hostname, and s.Mounts . |
| 39 |
-func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
|
|
| 39 |
+func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, idmap *idtools.IdentityMapping, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
|
|
| 40 | 40 |
c := &containers.Container{
|
| 41 | 41 |
ID: id, |
| 42 | 42 |
} |
| ... | ... |
@@ -44,6 +49,11 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou |
| 44 | 44 |
if !ok {
|
| 45 | 45 |
ctx = namespaces.WithNamespace(ctx, "buildkit") |
| 46 | 46 |
} |
| 47 |
+ if meta.SecurityMode == pb.SecurityMode_INSECURE {
|
|
| 48 |
+ opts = append(opts, entitlements.WithInsecureSpec()) |
|
| 49 |
+ } else if system.SeccompSupported() && meta.SecurityMode == pb.SecurityMode_SANDBOX {
|
|
| 50 |
+ opts = append(opts, seccomp.WithDefaultProfile()) |
|
| 51 |
+ } |
|
| 47 | 52 |
|
| 48 | 53 |
switch processMode {
|
| 49 | 54 |
case NoProcessSandbox: |
| ... | ... |
@@ -85,7 +95,22 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou |
| 85 | 85 |
Options: []string{"ro", "nosuid", "noexec", "nodev"},
|
| 86 | 86 |
}) |
| 87 | 87 |
|
| 88 |
- // TODO: User |
|
| 88 |
+ if meta.SecurityMode == pb.SecurityMode_INSECURE {
|
|
| 89 |
+ //make sysfs rw mount for insecure mode. |
|
| 90 |
+ for _, m := range s.Mounts {
|
|
| 91 |
+ if m.Type == "sysfs" {
|
|
| 92 |
+ m.Options = []string{"nosuid", "noexec", "nodev", "rw"}
|
|
| 93 |
+ } |
|
| 94 |
+ } |
|
| 95 |
+ } |
|
| 96 |
+ |
|
| 97 |
+ if idmap != nil {
|
|
| 98 |
+ s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{
|
|
| 99 |
+ Type: specs.UserNamespace, |
|
| 100 |
+ }) |
|
| 101 |
+ s.Linux.UIDMappings = specMapping(idmap.UIDs()) |
|
| 102 |
+ s.Linux.GIDMappings = specMapping(idmap.GIDs()) |
|
| 103 |
+ } |
|
| 89 | 104 |
|
| 90 | 105 |
sm := &submounts{}
|
| 91 | 106 |
|
| ... | ... |
@@ -210,3 +235,15 @@ func sub(m mount.Mount, subPath string) (mount.Mount, error) {
|
| 210 | 210 |
m.Source = src |
| 211 | 211 |
return m, nil |
| 212 | 212 |
} |
| 213 |
+ |
|
| 214 |
+func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping {
|
|
| 215 |
+ var ids []specs.LinuxIDMapping |
|
| 216 |
+ for _, item := range s {
|
|
| 217 |
+ ids = append(ids, specs.LinuxIDMapping{
|
|
| 218 |
+ HostID: uint32(item.HostID), |
|
| 219 |
+ ContainerID: uint32(item.ContainerID), |
|
| 220 |
+ Size: uint32(item.Size), |
|
| 221 |
+ }) |
|
| 222 |
+ } |
|
| 223 |
+ return ids |
|
| 224 |
+} |
| ... | ... |
@@ -13,11 +13,11 @@ import ( |
| 13 | 13 |
"syscall" |
| 14 | 14 |
"time" |
| 15 | 15 |
|
| 16 |
- "github.com/containerd/containerd/contrib/seccomp" |
|
| 17 | 16 |
"github.com/containerd/containerd/mount" |
| 18 | 17 |
containerdoci "github.com/containerd/containerd/oci" |
| 19 | 18 |
"github.com/containerd/continuity/fs" |
| 20 | 19 |
runc "github.com/containerd/go-runc" |
| 20 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 21 | 21 |
"github.com/moby/buildkit/cache" |
| 22 | 22 |
"github.com/moby/buildkit/executor" |
| 23 | 23 |
"github.com/moby/buildkit/executor/oci" |
| ... | ... |
@@ -25,7 +25,6 @@ import ( |
| 25 | 25 |
"github.com/moby/buildkit/solver/pb" |
| 26 | 26 |
"github.com/moby/buildkit/util/network" |
| 27 | 27 |
rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv" |
| 28 |
- "github.com/moby/buildkit/util/system" |
|
| 29 | 28 |
specs "github.com/opencontainers/runtime-spec/specs-go" |
| 30 | 29 |
"github.com/pkg/errors" |
| 31 | 30 |
"github.com/sirupsen/logrus" |
| ... | ... |
@@ -40,7 +39,8 @@ type Opt struct {
|
| 40 | 40 |
// DefaultCgroupParent is the cgroup-parent name for executor |
| 41 | 41 |
DefaultCgroupParent string |
| 42 | 42 |
// ProcessMode |
| 43 |
- ProcessMode oci.ProcessMode |
|
| 43 |
+ ProcessMode oci.ProcessMode |
|
| 44 |
+ IdentityMapping *idtools.IdentityMapping |
|
| 44 | 45 |
} |
| 45 | 46 |
|
| 46 | 47 |
var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
|
| ... | ... |
@@ -53,6 +53,7 @@ type runcExecutor struct {
|
| 53 | 53 |
rootless bool |
| 54 | 54 |
networkProviders map[pb.NetMode]network.Provider |
| 55 | 55 |
processMode oci.ProcessMode |
| 56 |
+ idmap *idtools.IdentityMapping |
|
| 56 | 57 |
} |
| 57 | 58 |
|
| 58 | 59 |
func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) {
|
| ... | ... |
@@ -109,6 +110,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex |
| 109 | 109 |
rootless: opt.Rootless, |
| 110 | 110 |
networkProviders: networkProviders, |
| 111 | 111 |
processMode: opt.ProcessMode, |
| 112 |
+ idmap: opt.IdentityMapping, |
|
| 112 | 113 |
} |
| 113 | 114 |
return w, nil |
| 114 | 115 |
} |
| ... | ... |
@@ -159,8 +161,14 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache. |
| 159 | 159 |
return err |
| 160 | 160 |
} |
| 161 | 161 |
defer os.RemoveAll(bundle) |
| 162 |
+ |
|
| 163 |
+ identity := idtools.Identity{}
|
|
| 164 |
+ if w.idmap != nil {
|
|
| 165 |
+ identity = w.idmap.RootPair() |
|
| 166 |
+ } |
|
| 167 |
+ |
|
| 162 | 168 |
rootFSPath := filepath.Join(bundle, "rootfs") |
| 163 |
- if err := os.Mkdir(rootFSPath, 0700); err != nil {
|
|
| 169 |
+ if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil {
|
|
| 164 | 170 |
return err |
| 165 | 171 |
} |
| 166 | 172 |
if err := mount.All(rootMount, rootFSPath); err != nil {
|
| ... | ... |
@@ -180,9 +188,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache. |
| 180 | 180 |
defer f.Close() |
| 181 | 181 |
|
| 182 | 182 |
opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
|
| 183 |
- if system.SeccompSupported() {
|
|
| 184 |
- opts = append(opts, seccomp.WithDefaultProfile()) |
|
| 185 |
- } |
|
| 183 |
+ |
|
| 186 | 184 |
if meta.ReadonlyRootFS {
|
| 187 | 185 |
opts = append(opts, containerdoci.WithRootFSReadonly()) |
| 188 | 186 |
} |
| ... | ... |
@@ -197,7 +203,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache. |
| 197 | 197 |
} |
| 198 | 198 |
opts = append(opts, containerdoci.WithCgroup(cgroupsPath)) |
| 199 | 199 |
} |
| 200 |
- spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.processMode, opts...) |
|
| 200 |
+ spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.processMode, w.idmap, opts...) |
|
| 201 | 201 |
if err != nil {
|
| 202 | 202 |
return err |
| 203 | 203 |
} |
| ... | ... |
@@ -212,7 +218,7 @@ func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache. |
| 212 | 212 |
if err != nil {
|
| 213 | 213 |
return errors.Wrapf(err, "working dir %s points to invalid target", newp) |
| 214 | 214 |
} |
| 215 |
- if err := os.MkdirAll(newp, 0755); err != nil {
|
|
| 215 |
+ if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
|
|
| 216 | 216 |
return errors.Wrapf(err, "failed to create working directory %s", newp) |
| 217 | 217 |
} |
| 218 | 218 |
|
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"strings" |
| 8 | 8 |
"time" |
| 9 | 9 |
|
| 10 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 10 | 11 |
"github.com/moby/buildkit/cache" |
| 11 | 12 |
"github.com/moby/buildkit/exporter" |
| 12 | 13 |
"github.com/moby/buildkit/session" |
| ... | ... |
@@ -68,6 +69,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source) |
| 68 | 68 |
return func() error {
|
| 69 | 69 |
var src string |
| 70 | 70 |
var err error |
| 71 |
+ var idmap *idtools.IdentityMapping |
|
| 71 | 72 |
if ref == nil {
|
| 72 | 73 |
src, err = ioutil.TempDir("", "buildkit")
|
| 73 | 74 |
if err != nil {
|
| ... | ... |
@@ -86,17 +88,40 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source) |
| 86 | 86 |
if err != nil {
|
| 87 | 87 |
return err |
| 88 | 88 |
} |
| 89 |
+ |
|
| 90 |
+ idmap = mount.IdentityMapping() |
|
| 91 |
+ |
|
| 89 | 92 |
defer lm.Unmount() |
| 90 | 93 |
} |
| 91 | 94 |
|
| 92 |
- fs := fsutil.NewFS(src, nil) |
|
| 95 |
+ walkOpt := &fsutil.WalkOpt{}
|
|
| 96 |
+ |
|
| 97 |
+ if idmap != nil {
|
|
| 98 |
+ walkOpt.Map = func(p string, st *fstypes.Stat) bool {
|
|
| 99 |
+ uid, gid, err := idmap.ToContainer(idtools.Identity{
|
|
| 100 |
+ UID: int(st.Uid), |
|
| 101 |
+ GID: int(st.Gid), |
|
| 102 |
+ }) |
|
| 103 |
+ if err != nil {
|
|
| 104 |
+ return false |
|
| 105 |
+ } |
|
| 106 |
+ st.Uid = uint32(uid) |
|
| 107 |
+ st.Gid = uint32(gid) |
|
| 108 |
+ return true |
|
| 109 |
+ } |
|
| 110 |
+ } |
|
| 111 |
+ |
|
| 112 |
+ fs := fsutil.NewFS(src, walkOpt) |
|
| 93 | 113 |
lbl := "copying files" |
| 94 | 114 |
if isMap {
|
| 95 | 115 |
lbl += " " + k |
| 96 |
- fs = fsutil.SubDirFS(fs, fstypes.Stat{
|
|
| 116 |
+ fs, err = fsutil.SubDirFS([]fsutil.Dir{{FS: fs, Stat: fstypes.Stat{
|
|
| 97 | 117 |
Mode: uint32(os.ModeDir | 0755), |
| 98 | 118 |
Path: strings.Replace(k, "/", "_", -1), |
| 99 |
- }) |
|
| 119 |
+ }}}) |
|
| 120 |
+ if err != nil {
|
|
| 121 |
+ return err |
|
| 122 |
+ } |
|
| 100 | 123 |
} |
| 101 | 124 |
|
| 102 | 125 |
progress := newProgressHandler(ctx, lbl) |
| 103 | 126 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,177 @@ |
| 0 |
+package local |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "context" |
|
| 4 |
+ "io/ioutil" |
|
| 5 |
+ "os" |
|
| 6 |
+ "strings" |
|
| 7 |
+ "time" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 10 |
+ "github.com/moby/buildkit/cache" |
|
| 11 |
+ "github.com/moby/buildkit/exporter" |
|
| 12 |
+ "github.com/moby/buildkit/session" |
|
| 13 |
+ "github.com/moby/buildkit/session/filesync" |
|
| 14 |
+ "github.com/moby/buildkit/snapshot" |
|
| 15 |
+ "github.com/moby/buildkit/util/progress" |
|
| 16 |
+ "github.com/pkg/errors" |
|
| 17 |
+ "github.com/tonistiigi/fsutil" |
|
| 18 |
+ fstypes "github.com/tonistiigi/fsutil/types" |
|
| 19 |
+) |
|
| 20 |
+ |
|
| 21 |
+type Opt struct {
|
|
| 22 |
+ SessionManager *session.Manager |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+type localExporter struct {
|
|
| 26 |
+ opt Opt |
|
| 27 |
+ // session manager |
|
| 28 |
+} |
|
| 29 |
+ |
|
| 30 |
+func New(opt Opt) (exporter.Exporter, error) {
|
|
| 31 |
+ le := &localExporter{opt: opt}
|
|
| 32 |
+ return le, nil |
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
|
|
| 36 |
+ id := session.FromContext(ctx) |
|
| 37 |
+ if id == "" {
|
|
| 38 |
+ return nil, errors.New("could not access local files without session")
|
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) |
|
| 42 |
+ defer cancel() |
|
| 43 |
+ |
|
| 44 |
+ caller, err := e.opt.SessionManager.Get(timeoutCtx, id) |
|
| 45 |
+ if err != nil {
|
|
| 46 |
+ return nil, err |
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ li := &localExporterInstance{localExporter: e, caller: caller}
|
|
| 50 |
+ return li, nil |
|
| 51 |
+} |
|
| 52 |
+ |
|
| 53 |
+type localExporterInstance struct {
|
|
| 54 |
+ *localExporter |
|
| 55 |
+ caller session.Caller |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+func (e *localExporterInstance) Name() string {
|
|
| 59 |
+ return "exporting to client" |
|
| 60 |
+} |
|
| 61 |
+ |
|
| 62 |
+func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) {
|
|
| 63 |
+ var defers []func() |
|
| 64 |
+ |
|
| 65 |
+ defer func() {
|
|
| 66 |
+ for i := len(defers) - 1; i >= 0; i-- {
|
|
| 67 |
+ defers[i]() |
|
| 68 |
+ } |
|
| 69 |
+ }() |
|
| 70 |
+ |
|
| 71 |
+ getDir := func(ctx context.Context, k string, ref cache.ImmutableRef) (*fsutil.Dir, error) {
|
|
| 72 |
+ var src string |
|
| 73 |
+ var err error |
|
| 74 |
+ var idmap *idtools.IdentityMapping |
|
| 75 |
+ if ref == nil {
|
|
| 76 |
+ src, err = ioutil.TempDir("", "buildkit")
|
|
| 77 |
+ if err != nil {
|
|
| 78 |
+ return nil, err |
|
| 79 |
+ } |
|
| 80 |
+ defers = append(defers, func() { os.RemoveAll(src) })
|
|
| 81 |
+ } else {
|
|
| 82 |
+ mount, err := ref.Mount(ctx, true) |
|
| 83 |
+ if err != nil {
|
|
| 84 |
+ return nil, err |
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ lm := snapshot.LocalMounter(mount) |
|
| 88 |
+ |
|
| 89 |
+ src, err = lm.Mount() |
|
| 90 |
+ if err != nil {
|
|
| 91 |
+ return nil, err |
|
| 92 |
+ } |
|
| 93 |
+ |
|
| 94 |
+ idmap = mount.IdentityMapping() |
|
| 95 |
+ |
|
| 96 |
+ defers = append(defers, func() { lm.Unmount() })
|
|
| 97 |
+ } |
|
| 98 |
+ |
|
| 99 |
+ walkOpt := &fsutil.WalkOpt{}
|
|
| 100 |
+ |
|
| 101 |
+ if idmap != nil {
|
|
| 102 |
+ walkOpt.Map = func(p string, st *fstypes.Stat) bool {
|
|
| 103 |
+ uid, gid, err := idmap.ToContainer(idtools.Identity{
|
|
| 104 |
+ UID: int(st.Uid), |
|
| 105 |
+ GID: int(st.Gid), |
|
| 106 |
+ }) |
|
| 107 |
+ if err != nil {
|
|
| 108 |
+ return false |
|
| 109 |
+ } |
|
| 110 |
+ st.Uid = uint32(uid) |
|
| 111 |
+ st.Gid = uint32(gid) |
|
| 112 |
+ return true |
|
| 113 |
+ } |
|
| 114 |
+ } |
|
| 115 |
+ |
|
| 116 |
+ return &fsutil.Dir{
|
|
| 117 |
+ FS: fsutil.NewFS(src, walkOpt), |
|
| 118 |
+ Stat: fstypes.Stat{
|
|
| 119 |
+ Mode: uint32(os.ModeDir | 0755), |
|
| 120 |
+ Path: strings.Replace(k, "/", "_", -1), |
|
| 121 |
+ }, |
|
| 122 |
+ }, nil |
|
| 123 |
+ } |
|
| 124 |
+ |
|
| 125 |
+ var fs fsutil.FS |
|
| 126 |
+ |
|
| 127 |
+ if len(inp.Refs) > 0 {
|
|
| 128 |
+ dirs := make([]fsutil.Dir, 0, len(inp.Refs)) |
|
| 129 |
+ for k, ref := range inp.Refs {
|
|
| 130 |
+ d, err := getDir(ctx, k, ref) |
|
| 131 |
+ if err != nil {
|
|
| 132 |
+ return nil, err |
|
| 133 |
+ } |
|
| 134 |
+ dirs = append(dirs, *d) |
|
| 135 |
+ } |
|
| 136 |
+ var err error |
|
| 137 |
+ fs, err = fsutil.SubDirFS(dirs) |
|
| 138 |
+ if err != nil {
|
|
| 139 |
+ return nil, err |
|
| 140 |
+ } |
|
| 141 |
+ } else {
|
|
| 142 |
+ d, err := getDir(ctx, "", inp.Ref) |
|
| 143 |
+ if err != nil {
|
|
| 144 |
+ return nil, err |
|
| 145 |
+ } |
|
| 146 |
+ fs = d.FS |
|
| 147 |
+ } |
|
| 148 |
+ |
|
| 149 |
+ w, err := filesync.CopyFileWriter(ctx, e.caller) |
|
| 150 |
+ if err != nil {
|
|
| 151 |
+ return nil, err |
|
| 152 |
+ } |
|
| 153 |
+ report := oneOffProgress(ctx, "sending tarball") |
|
| 154 |
+ if err := fsutil.WriteTar(ctx, fs, w); err != nil {
|
|
| 155 |
+ w.Close() |
|
| 156 |
+ return nil, report(err) |
|
| 157 |
+ } |
|
| 158 |
+ return nil, report(w.Close()) |
|
| 159 |
+} |
|
| 160 |
+ |
|
| 161 |
+func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
|
| 162 |
+ pw, _, _ := progress.FromContext(ctx) |
|
| 163 |
+ now := time.Now() |
|
| 164 |
+ st := progress.Status{
|
|
| 165 |
+ Started: &now, |
|
| 166 |
+ } |
|
| 167 |
+ pw.Write(id, st) |
|
| 168 |
+ return func(err error) error {
|
|
| 169 |
+ // TODO: set error on status |
|
| 170 |
+ now := time.Now() |
|
| 171 |
+ st.Completed = &now |
|
| 172 |
+ pw.Write(id, st) |
|
| 173 |
+ pw.Close() |
|
| 174 |
+ return err |
|
| 175 |
+ } |
|
| 176 |
+} |
| ... | ... |
@@ -113,7 +113,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
| 113 | 113 |
name := "load build definition from " + filename |
| 114 | 114 |
|
| 115 | 115 |
src := llb.Local(localNameDockerfile, |
| 116 |
- llb.FollowPaths([]string{filename}),
|
|
| 116 |
+ llb.FollowPaths([]string{filename, filename + ".dockerignore"}),
|
|
| 117 | 117 |
llb.SessionID(c.BuildOpts().SessionID), |
| 118 | 118 |
llb.SharedKeyHint(localNameDockerfile), |
| 119 | 119 |
dockerfile2llb.WithInternalName(name), |
| ... | ... |
@@ -175,6 +175,8 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
| 175 | 175 |
|
| 176 | 176 |
eg, ctx2 := errgroup.WithContext(ctx) |
| 177 | 177 |
var dtDockerfile []byte |
| 178 |
+ var dtDockerignore []byte |
|
| 179 |
+ var dtDockerignoreDefault []byte |
|
| 178 | 180 |
eg.Go(func() error {
|
| 179 | 181 |
res, err := c.Solve(ctx2, client.SolveRequest{
|
| 180 | 182 |
Definition: def.ToPB(), |
| ... | ... |
@@ -194,6 +196,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
| 194 | 194 |
if err != nil {
|
| 195 | 195 |
return errors.Wrapf(err, "failed to read dockerfile") |
| 196 | 196 |
} |
| 197 |
+ |
|
| 198 |
+ dt, err := ref.ReadFile(ctx2, client.ReadRequest{
|
|
| 199 |
+ Filename: filename + ".dockerignore", |
|
| 200 |
+ }) |
|
| 201 |
+ if err == nil {
|
|
| 202 |
+ dtDockerignore = dt |
|
| 203 |
+ } |
|
| 197 | 204 |
return nil |
| 198 | 205 |
}) |
| 199 | 206 |
var excludes []string |
| ... | ... |
@@ -223,14 +232,11 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
| 223 | 223 |
if err != nil {
|
| 224 | 224 |
return err |
| 225 | 225 |
} |
| 226 |
- dtDockerignore, err := ref.ReadFile(ctx2, client.ReadRequest{
|
|
| 226 |
+ dtDockerignoreDefault, err = ref.ReadFile(ctx2, client.ReadRequest{
|
|
| 227 | 227 |
Filename: dockerignoreFilename, |
| 228 | 228 |
}) |
| 229 |
- if err == nil {
|
|
| 230 |
- excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) |
|
| 231 |
- if err != nil {
|
|
| 232 |
- return errors.Wrap(err, "failed to parse dockerignore") |
|
| 233 |
- } |
|
| 229 |
+ if err != nil {
|
|
| 230 |
+ return nil |
|
| 234 | 231 |
} |
| 235 | 232 |
return nil |
| 236 | 233 |
}) |
| ... | ... |
@@ -240,6 +246,16 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
|
| 240 | 240 |
return nil, err |
| 241 | 241 |
} |
| 242 | 242 |
|
| 243 |
+ if dtDockerignore == nil {
|
|
| 244 |
+ dtDockerignore = dtDockerignoreDefault |
|
| 245 |
+ } |
|
| 246 |
+ if dtDockerignore != nil {
|
|
| 247 |
+ excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) |
|
| 248 |
+ if err != nil {
|
|
| 249 |
+ return nil, errors.Wrap(err, "failed to parse dockerignore") |
|
| 250 |
+ } |
|
| 251 |
+ } |
|
| 252 |
+ |
|
| 243 | 253 |
if _, ok := opts["cmdline"]; !ok {
|
| 244 | 254 |
ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)) |
| 245 | 255 |
if ok {
|
| ... | ... |
@@ -623,8 +623,10 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE |
| 623 | 623 |
env := d.state.Env() |
| 624 | 624 |
opt := []llb.RunOption{llb.Args(args)}
|
| 625 | 625 |
for _, arg := range d.buildArgs {
|
| 626 |
- env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
|
|
| 627 |
- opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString())) |
|
| 626 |
+ if arg.Value != nil {
|
|
| 627 |
+ env = append(env, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
|
|
| 628 |
+ opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString())) |
|
| 629 |
+ } |
|
| 628 | 630 |
} |
| 629 | 631 |
opt = append(opt, dfCmd(c)) |
| 630 | 632 |
if d.ignoreCache {
|
| ... | ... |
@@ -1066,7 +1068,9 @@ func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string |
| 1066 | 1066 |
if _, ok := m[arg.Key]; ok {
|
| 1067 | 1067 |
continue |
| 1068 | 1068 |
} |
| 1069 |
- m[arg.Key] = arg.ValueString() |
|
| 1069 |
+ if arg.Value != nil {
|
|
| 1070 |
+ m[arg.Key] = arg.ValueString() |
|
| 1071 |
+ } |
|
| 1070 | 1072 |
} |
| 1071 | 1073 |
return m |
| 1072 | 1074 |
} |
| ... | ... |
@@ -75,6 +75,8 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* |
| 75 | 75 |
} |
| 76 | 76 |
if mount.ReadOnly {
|
| 77 | 77 |
mountOpts = append(mountOpts, llb.Readonly) |
| 78 |
+ } else if mount.Type == instructions.MountTypeBind {
|
|
| 79 |
+ mountOpts = append(mountOpts, llb.ForceNoOutput) |
|
| 78 | 80 |
} |
| 79 | 81 |
if mount.Type == instructions.MountTypeCache {
|
| 80 | 82 |
sharing := llb.CacheMountShared |
| ... | ... |
@@ -6,7 +6,7 @@ import ( |
| 6 | 6 |
api "github.com/containerd/containerd/api/services/content/v1" |
| 7 | 7 |
"github.com/containerd/containerd/content" |
| 8 | 8 |
"github.com/containerd/containerd/errdefs" |
| 9 |
- contentservice "github.com/containerd/containerd/services/content" |
|
| 9 |
+ "github.com/containerd/containerd/services/content/contentserver" |
|
| 10 | 10 |
"github.com/moby/buildkit/session" |
| 11 | 11 |
digest "github.com/opencontainers/go-digest" |
| 12 | 12 |
ocispec "github.com/opencontainers/image-spec/specs-go/v1" |
| ... | ... |
@@ -120,7 +120,7 @@ type attachable struct {
|
| 120 | 120 |
// A key of the store map is an ID string that is used for choosing underlying store. |
| 121 | 121 |
func NewAttachable(stores map[string]content.Store) session.Attachable {
|
| 122 | 122 |
store := &attachableContentStore{stores: stores}
|
| 123 |
- service := contentservice.NewService(store) |
|
| 123 |
+ service := contentserver.New(store) |
|
| 124 | 124 |
a := attachable{
|
| 125 | 125 |
service: service, |
| 126 | 126 |
} |
| ... | ... |
@@ -57,7 +57,7 @@ func (wc *streamWriterCloser) Close() error {
|
| 57 | 57 |
return nil |
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 |
-func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error {
|
|
| 60 |
+func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error {
|
|
| 61 | 61 |
st := time.Now() |
| 62 | 62 |
defer func() {
|
| 63 | 63 |
logrus.Debugf("diffcopy took: %v", time.Since(st))
|
| ... | ... |
@@ -73,6 +73,7 @@ func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progres |
| 73 | 73 |
NotifyHashed: cf, |
| 74 | 74 |
ContentHasher: ch, |
| 75 | 75 |
ProgressCb: progress, |
| 76 |
+ Filter: fsutil.FilterFunc(filter), |
|
| 76 | 77 |
}) |
| 77 | 78 |
} |
| 78 | 79 |
|
| ... | ... |
@@ -129,7 +129,7 @@ type progressCb func(int, bool) |
| 129 | 129 |
type protocol struct {
|
| 130 | 130 |
name string |
| 131 | 131 |
sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error |
| 132 |
- recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error |
|
| 132 |
+ recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error |
|
| 133 | 133 |
} |
| 134 | 134 |
|
| 135 | 135 |
func isProtoSupported(p string) bool {
|
| ... | ... |
@@ -158,6 +158,7 @@ type FSSendRequestOpt struct {
|
| 158 | 158 |
DestDir string |
| 159 | 159 |
CacheUpdater CacheUpdater |
| 160 | 160 |
ProgressCb func(int, bool) |
| 161 |
+ Filter func(string, *fstypes.Stat) bool |
|
| 161 | 162 |
} |
| 162 | 163 |
|
| 163 | 164 |
// CacheUpdater is an object capable of sending notifications for the cache hash changes |
| ... | ... |
@@ -225,7 +226,7 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
|
| 225 | 225 |
panic(fmt.Sprintf("invalid protocol: %q", pr.name))
|
| 226 | 226 |
} |
| 227 | 227 |
|
| 228 |
- return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb) |
|
| 228 |
+ return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Filter) |
|
| 229 | 229 |
} |
| 230 | 230 |
|
| 231 | 231 |
// NewFSSyncTargetDir allows writing into a directory |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/containerd/containerd/mount" |
| 8 | 8 |
"github.com/containerd/containerd/snapshots" |
| 9 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 9 | 10 |
digest "github.com/opencontainers/go-digest" |
| 10 | 11 |
) |
| 11 | 12 |
|
| ... | ... |
@@ -13,6 +14,7 @@ type Mountable interface {
|
| 13 | 13 |
// ID() string |
| 14 | 14 |
Mount() ([]mount.Mount, error) |
| 15 | 15 |
Release() error |
| 16 |
+ IdentityMapping() *idtools.IdentityMapping |
|
| 16 | 17 |
} |
| 17 | 18 |
|
| 18 | 19 |
type SnapshotterBase interface {
|
| ... | ... |
@@ -27,6 +29,7 @@ type SnapshotterBase interface {
|
| 27 | 27 |
Remove(ctx context.Context, key string) error |
| 28 | 28 |
Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error |
| 29 | 29 |
Close() error |
| 30 |
+ IdentityMapping() *idtools.IdentityMapping |
|
| 30 | 31 |
} |
| 31 | 32 |
|
| 32 | 33 |
// Snapshotter defines interface that any snapshot implementation should satisfy |
| ... | ... |
@@ -40,12 +43,13 @@ type Blobmapper interface {
|
| 40 | 40 |
SetBlob(ctx context.Context, key string, diffID, blob digest.Digest) error |
| 41 | 41 |
} |
| 42 | 42 |
|
| 43 |
-func FromContainerdSnapshotter(s snapshots.Snapshotter) SnapshotterBase {
|
|
| 44 |
- return &fromContainerd{Snapshotter: s}
|
|
| 43 |
+func FromContainerdSnapshotter(s snapshots.Snapshotter, idmap *idtools.IdentityMapping) SnapshotterBase {
|
|
| 44 |
+ return &fromContainerd{Snapshotter: s, idmap: idmap}
|
|
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 | 47 |
type fromContainerd struct {
|
| 48 | 48 |
snapshots.Snapshotter |
| 49 |
+ idmap *idtools.IdentityMapping |
|
| 49 | 50 |
} |
| 50 | 51 |
|
| 51 | 52 |
func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, error) {
|
| ... | ... |
@@ -53,7 +57,7 @@ func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, err |
| 53 | 53 |
if err != nil {
|
| 54 | 54 |
return nil, err |
| 55 | 55 |
} |
| 56 |
- return &staticMountable{mounts}, nil
|
|
| 56 |
+ return &staticMountable{mounts, s.idmap}, nil
|
|
| 57 | 57 |
} |
| 58 | 58 |
func (s *fromContainerd) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error {
|
| 59 | 59 |
_, err := s.Snapshotter.Prepare(ctx, key, parent, opts...) |
| ... | ... |
@@ -64,11 +68,15 @@ func (s *fromContainerd) View(ctx context.Context, key, parent string, opts ...s |
| 64 | 64 |
if err != nil {
|
| 65 | 65 |
return nil, err |
| 66 | 66 |
} |
| 67 |
- return &staticMountable{mounts}, nil
|
|
| 67 |
+ return &staticMountable{mounts, s.idmap}, nil
|
|
| 68 |
+} |
|
| 69 |
+func (s *fromContainerd) IdentityMapping() *idtools.IdentityMapping {
|
|
| 70 |
+ return s.idmap |
|
| 68 | 71 |
} |
| 69 | 72 |
|
| 70 | 73 |
type staticMountable struct {
|
| 71 | 74 |
mounts []mount.Mount |
| 75 |
+ idmap *idtools.IdentityMapping |
|
| 72 | 76 |
} |
| 73 | 77 |
|
| 74 | 78 |
func (m *staticMountable) Mount() ([]mount.Mount, error) {
|
| ... | ... |
@@ -79,6 +87,10 @@ func (cm *staticMountable) Release() error {
|
| 79 | 79 |
return nil |
| 80 | 80 |
} |
| 81 | 81 |
|
| 82 |
+func (cm *staticMountable) IdentityMapping() *idtools.IdentityMapping {
|
|
| 83 |
+ return cm.idmap |
|
| 84 |
+} |
|
| 85 |
+ |
|
| 82 | 86 |
// NewContainerdSnapshotter converts snapshotter to containerd snapshotter |
| 83 | 87 |
func NewContainerdSnapshotter(s Snapshotter) (snapshots.Snapshotter, func() error) {
|
| 84 | 88 |
cs := &containerdSnapshotter{Snapshotter: s}
|
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
"time" |
| 11 | 11 |
|
| 12 | 12 |
"github.com/containerd/continuity/fs" |
| 13 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 13 | 14 |
"github.com/moby/buildkit/snapshot" |
| 14 | 15 |
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" |
| 15 | 16 |
"github.com/moby/buildkit/solver/pb" |
| ... | ... |
@@ -25,12 +26,35 @@ func timestampToTime(ts int64) *time.Time {
|
| 25 | 25 |
return &tm |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
-func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.ChownOpt) error {
|
|
| 28 |
+func mapUser(user *copy.ChownOpt, idmap *idtools.IdentityMapping) (*copy.ChownOpt, error) {
|
|
| 29 |
+ if idmap == nil {
|
|
| 30 |
+ return user, nil |
|
| 31 |
+ } |
|
| 32 |
+ if user == nil {
|
|
| 33 |
+ identity := idmap.RootPair() |
|
| 34 |
+ return ©.ChownOpt{Uid: identity.UID, Gid: identity.GID}, nil
|
|
| 35 |
+ } |
|
| 36 |
+ identity, err := idmap.ToHost(idtools.Identity{
|
|
| 37 |
+ UID: user.Uid, |
|
| 38 |
+ GID: user.Gid, |
|
| 39 |
+ }) |
|
| 40 |
+ if err != nil {
|
|
| 41 |
+ return nil, err |
|
| 42 |
+ } |
|
| 43 |
+ return ©.ChownOpt{Uid: identity.UID, Gid: identity.GID}, nil
|
|
| 44 |
+} |
|
| 45 |
+ |
|
| 46 |
+func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
|
|
| 29 | 47 |
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
| 30 | 48 |
if err != nil {
|
| 31 | 49 |
return err |
| 32 | 50 |
} |
| 33 | 51 |
|
| 52 |
+ user, err = mapUser(user, idmap) |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ return err |
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 34 | 57 |
if action.MakeParents {
|
| 35 | 58 |
if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, user, timestampToTime(action.Timestamp)); err != nil {
|
| 36 | 59 |
return err |
| ... | ... |
@@ -53,12 +77,17 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy. |
| 53 | 53 |
return nil |
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 |
-func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.ChownOpt) error {
|
|
| 56 |
+func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
|
|
| 57 | 57 |
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
| 58 | 58 |
if err != nil {
|
| 59 | 59 |
return err |
| 60 | 60 |
} |
| 61 | 61 |
|
| 62 |
+ user, err = mapUser(user, idmap) |
|
| 63 |
+ if err != nil {
|
|
| 64 |
+ return err |
|
| 65 |
+ } |
|
| 66 |
+ |
|
| 62 | 67 |
if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil {
|
| 63 | 68 |
return err |
| 64 | 69 |
} |
| ... | ... |
@@ -90,7 +119,7 @@ func rm(ctx context.Context, d string, action pb.FileActionRm) error {
|
| 90 | 90 |
return nil |
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 |
-func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.ChownOpt) error {
|
|
| 93 |
+func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.ChownOpt, idmap *idtools.IdentityMapping) error {
|
|
| 94 | 94 |
srcPath := cleanPath(action.Src) |
| 95 | 95 |
destPath := cleanPath(action.Dest) |
| 96 | 96 |
|
| ... | ... |
@@ -109,6 +138,12 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u * |
| 109 | 109 |
return nil |
| 110 | 110 |
} |
| 111 | 111 |
|
| 112 |
+ // TODO(tonistiigi): this is wrong. fsutil.Copy can't handle non-forced user |
|
| 113 |
+ u, err := mapUser(u, idmap) |
|
| 114 |
+ if err != nil {
|
|
| 115 |
+ return err |
|
| 116 |
+ } |
|
| 117 |
+ |
|
| 112 | 118 |
opt := []copy.Opt{
|
| 113 | 119 |
func(ci *copy.CopyInfo) {
|
| 114 | 120 |
ci.Chown = u |
| ... | ... |
@@ -195,7 +230,7 @@ func (fb *Backend) Mkdir(ctx context.Context, m, user, group fileoptypes.Mount, |
| 195 | 195 |
return err |
| 196 | 196 |
} |
| 197 | 197 |
|
| 198 |
- return mkdir(ctx, dir, action, u) |
|
| 198 |
+ return mkdir(ctx, dir, action, u, mnt.m.IdentityMapping()) |
|
| 199 | 199 |
} |
| 200 | 200 |
|
| 201 | 201 |
func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkFile) error {
|
| ... | ... |
@@ -216,7 +251,7 @@ func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount, |
| 216 | 216 |
return err |
| 217 | 217 |
} |
| 218 | 218 |
|
| 219 |
- return mkfile(ctx, dir, action, u) |
|
| 219 |
+ return mkfile(ctx, dir, action, u, mnt.m.IdentityMapping()) |
|
| 220 | 220 |
} |
| 221 | 221 |
func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error {
|
| 222 | 222 |
mnt, ok := m.(*Mount) |
| ... | ... |
@@ -262,5 +297,5 @@ func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mou |
| 262 | 262 |
return err |
| 263 | 263 |
} |
| 264 | 264 |
|
| 265 |
- return docopy(ctx, src, dest, action, u) |
|
| 265 |
+ return docopy(ctx, src, dest, action, u, mnt2.m.IdentityMapping()) |
|
| 266 | 266 |
} |
| ... | ... |
@@ -17,6 +17,7 @@ import ( |
| 17 | 17 |
|
| 18 | 18 |
"github.com/containerd/containerd/mount" |
| 19 | 19 |
"github.com/containerd/containerd/platforms" |
| 20 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 20 | 21 |
"github.com/docker/docker/pkg/locker" |
| 21 | 22 |
"github.com/moby/buildkit/cache" |
| 22 | 23 |
"github.com/moby/buildkit/cache/metadata" |
| ... | ... |
@@ -209,7 +210,7 @@ func (e *execOp) getMountDeps() ([]dep, error) {
|
| 209 | 209 |
deps[m.Input].Selectors = append(deps[m.Input].Selectors, sel) |
| 210 | 210 |
} |
| 211 | 211 |
|
| 212 |
- if !m.Readonly || m.Dest == pb.RootMount { // exclude read-only rootfs
|
|
| 212 |
+ if (!m.Readonly || m.Dest == pb.RootMount) && m.Output != -1 { // exclude read-only rootfs && read-write mounts
|
|
| 213 | 213 |
deps[m.Input].NoContentBasedHash = true |
| 214 | 214 |
} |
| 215 | 215 |
} |
| ... | ... |
@@ -329,30 +330,47 @@ func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mounta |
| 329 | 329 |
return nil, err |
| 330 | 330 |
} |
| 331 | 331 |
|
| 332 |
- return &sshMount{mount: m, caller: caller}, nil
|
|
| 332 |
+ return &sshMount{mount: m, caller: caller, idmap: e.cm.IdentityMapping()}, nil
|
|
| 333 | 333 |
} |
| 334 | 334 |
|
| 335 | 335 |
type sshMount struct {
|
| 336 | 336 |
mount *pb.Mount |
| 337 | 337 |
caller session.Caller |
| 338 |
+ idmap *idtools.IdentityMapping |
|
| 338 | 339 |
} |
| 339 | 340 |
|
| 340 | 341 |
func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
| 341 |
- return &sshMountInstance{sm: sm}, nil
|
|
| 342 |
+ return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil
|
|
| 342 | 343 |
} |
| 343 | 344 |
|
| 344 | 345 |
type sshMountInstance struct {
|
| 345 | 346 |
sm *sshMount |
| 346 | 347 |
cleanup func() error |
| 348 |
+ idmap *idtools.IdentityMapping |
|
| 347 | 349 |
} |
| 348 | 350 |
|
| 349 | 351 |
func (sm *sshMountInstance) Mount() ([]mount.Mount, error) {
|
| 350 | 352 |
ctx, cancel := context.WithCancel(context.TODO()) |
| 351 | 353 |
|
| 354 |
+ uid := int(sm.sm.mount.SSHOpt.Uid) |
|
| 355 |
+ gid := int(sm.sm.mount.SSHOpt.Gid) |
|
| 356 |
+ |
|
| 357 |
+ if sm.idmap != nil {
|
|
| 358 |
+ identity, err := sm.idmap.ToHost(idtools.Identity{
|
|
| 359 |
+ UID: uid, |
|
| 360 |
+ GID: gid, |
|
| 361 |
+ }) |
|
| 362 |
+ if err != nil {
|
|
| 363 |
+ return nil, err |
|
| 364 |
+ } |
|
| 365 |
+ uid = identity.UID |
|
| 366 |
+ gid = identity.GID |
|
| 367 |
+ } |
|
| 368 |
+ |
|
| 352 | 369 |
sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{
|
| 353 | 370 |
ID: sm.sm.mount.SSHOpt.ID, |
| 354 |
- UID: int(sm.sm.mount.SSHOpt.Uid), |
|
| 355 |
- GID: int(sm.sm.mount.SSHOpt.Gid), |
|
| 371 |
+ UID: uid, |
|
| 372 |
+ GID: gid, |
|
| 356 | 373 |
Mode: int(sm.sm.mount.SSHOpt.Mode & 0777), |
| 357 | 374 |
}) |
| 358 | 375 |
if err != nil {
|
| ... | ... |
@@ -384,6 +402,10 @@ func (sm *sshMountInstance) Release() error {
|
| 384 | 384 |
return nil |
| 385 | 385 |
} |
| 386 | 386 |
|
| 387 |
+func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
|
| 388 |
+ return sm.idmap |
|
| 389 |
+} |
|
| 390 |
+ |
|
| 387 | 391 |
func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) {
|
| 388 | 392 |
if m.SecretOpt == nil {
|
| 389 | 393 |
return nil, errors.Errorf("invalid sercet mount options")
|
| ... | ... |
@@ -416,21 +438,23 @@ func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mou |
| 416 | 416 |
return nil, err |
| 417 | 417 |
} |
| 418 | 418 |
|
| 419 |
- return &secretMount{mount: m, data: dt}, nil
|
|
| 419 |
+ return &secretMount{mount: m, data: dt, idmap: e.cm.IdentityMapping()}, nil
|
|
| 420 | 420 |
} |
| 421 | 421 |
|
| 422 | 422 |
type secretMount struct {
|
| 423 | 423 |
mount *pb.Mount |
| 424 | 424 |
data []byte |
| 425 |
+ idmap *idtools.IdentityMapping |
|
| 425 | 426 |
} |
| 426 | 427 |
|
| 427 | 428 |
func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
| 428 |
- return &secretMountInstance{sm: sm}, nil
|
|
| 429 |
+ return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil
|
|
| 429 | 430 |
} |
| 430 | 431 |
|
| 431 | 432 |
type secretMountInstance struct {
|
| 432 |
- sm *secretMount |
|
| 433 |
- root string |
|
| 433 |
+ sm *secretMount |
|
| 434 |
+ root string |
|
| 435 |
+ idmap *idtools.IdentityMapping |
|
| 434 | 436 |
} |
| 435 | 437 |
|
| 436 | 438 |
func (sm *secretMountInstance) Mount() ([]mount.Mount, error) {
|
| ... | ... |
@@ -465,7 +489,22 @@ func (sm *secretMountInstance) Mount() ([]mount.Mount, error) {
|
| 465 | 465 |
return nil, err |
| 466 | 466 |
} |
| 467 | 467 |
|
| 468 |
- if err := os.Chown(fp, int(sm.sm.mount.SecretOpt.Uid), int(sm.sm.mount.SecretOpt.Gid)); err != nil {
|
|
| 468 |
+ uid := int(sm.sm.mount.SecretOpt.Uid) |
|
| 469 |
+ gid := int(sm.sm.mount.SecretOpt.Gid) |
|
| 470 |
+ |
|
| 471 |
+ if sm.idmap != nil {
|
|
| 472 |
+ identity, err := sm.idmap.ToHost(idtools.Identity{
|
|
| 473 |
+ UID: uid, |
|
| 474 |
+ GID: gid, |
|
| 475 |
+ }) |
|
| 476 |
+ if err != nil {
|
|
| 477 |
+ return nil, err |
|
| 478 |
+ } |
|
| 479 |
+ uid = identity.UID |
|
| 480 |
+ gid = identity.GID |
|
| 481 |
+ } |
|
| 482 |
+ |
|
| 483 |
+ if err := os.Chown(fp, uid, gid); err != nil {
|
|
| 469 | 484 |
return nil, err |
| 470 | 485 |
} |
| 471 | 486 |
|
| ... | ... |
@@ -490,6 +529,10 @@ func (sm *secretMountInstance) Release() error {
|
| 490 | 490 |
return nil |
| 491 | 491 |
} |
| 492 | 492 |
|
| 493 |
+func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
|
| 494 |
+ return sm.idmap |
|
| 495 |
+} |
|
| 496 |
+ |
|
| 493 | 497 |
func addDefaultEnvvar(env []string, k, v string) []string {
|
| 494 | 498 |
for _, e := range env {
|
| 495 | 499 |
if strings.HasPrefix(e, k+"=") {
|
| ... | ... |
@@ -585,7 +628,7 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res |
| 585 | 585 |
} |
| 586 | 586 |
|
| 587 | 587 |
case pb.MountType_TMPFS: |
| 588 |
- mountable = newTmpfs() |
|
| 588 |
+ mountable = newTmpfs(e.cm.IdentityMapping()) |
|
| 589 | 589 |
|
| 590 | 590 |
case pb.MountType_SECRET: |
| 591 | 591 |
secretMount, err := e.getSecretMountable(ctx, m) |
| ... | ... |
@@ -653,6 +696,7 @@ func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res |
| 653 | 653 |
ReadonlyRootFS: readonlyRootFS, |
| 654 | 654 |
ExtraHosts: extraHosts, |
| 655 | 655 |
NetMode: e.op.Network, |
| 656 |
+ SecurityMode: e.op.Security, |
|
| 656 | 657 |
} |
| 657 | 658 |
|
| 658 | 659 |
if e.op.Meta.ProxyEnv != nil {
|
| ... | ... |
@@ -701,19 +745,21 @@ func proxyEnvList(p *pb.ProxyEnv) []string {
|
| 701 | 701 |
return out |
| 702 | 702 |
} |
| 703 | 703 |
|
| 704 |
-func newTmpfs() cache.Mountable {
|
|
| 705 |
- return &tmpfs{}
|
|
| 704 |
+func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable {
|
|
| 705 |
+ return &tmpfs{idmap: idmap}
|
|
| 706 | 706 |
} |
| 707 | 707 |
|
| 708 | 708 |
type tmpfs struct {
|
| 709 |
+ idmap *idtools.IdentityMapping |
|
| 709 | 710 |
} |
| 710 | 711 |
|
| 711 | 712 |
func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
| 712 |
- return &tmpfsMount{readonly: readonly}, nil
|
|
| 713 |
+ return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil
|
|
| 713 | 714 |
} |
| 714 | 715 |
|
| 715 | 716 |
type tmpfsMount struct {
|
| 716 | 717 |
readonly bool |
| 718 |
+ idmap *idtools.IdentityMapping |
|
| 717 | 719 |
} |
| 718 | 720 |
|
| 719 | 721 |
func (m *tmpfsMount) Mount() ([]mount.Mount, error) {
|
| ... | ... |
@@ -731,6 +777,10 @@ func (m *tmpfsMount) Release() error {
|
| 731 | 731 |
return nil |
| 732 | 732 |
} |
| 733 | 733 |
|
| 734 |
+func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping {
|
|
| 735 |
+ return m.idmap |
|
| 736 |
+} |
|
| 737 |
+ |
|
| 734 | 738 |
var cacheRefsLocker = locker.New() |
| 735 | 739 |
var sharedCacheRefs = &cacheRefs{}
|
| 736 | 740 |
|
| ... | ... |
@@ -45,9 +45,10 @@ type Solver struct {
|
| 45 | 45 |
platforms []specs.Platform |
| 46 | 46 |
gatewayForwarder *controlgateway.GatewayForwarder |
| 47 | 47 |
sm *session.Manager |
| 48 |
+ entitlements []string |
|
| 48 | 49 |
} |
| 49 | 50 |
|
| 50 |
-func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager) (*Solver, error) {
|
|
| 51 |
+func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager, ents []string) (*Solver, error) {
|
|
| 51 | 52 |
s := &Solver{
|
| 52 | 53 |
workerController: wc, |
| 53 | 54 |
resolveWorker: defaultResolver(wc), |
| ... | ... |
@@ -55,6 +56,7 @@ func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.Cac |
| 55 | 55 |
resolveCacheImporterFuncs: resolveCI, |
| 56 | 56 |
gatewayForwarder: gatewayForwarder, |
| 57 | 57 |
sm: sm, |
| 58 |
+ entitlements: ents, |
|
| 58 | 59 |
} |
| 59 | 60 |
|
| 60 | 61 |
// executing is currently only allowed on default worker |
| ... | ... |
@@ -101,7 +103,7 @@ func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest |
| 101 | 101 |
|
| 102 | 102 |
defer j.Discard() |
| 103 | 103 |
|
| 104 |
- set, err := entitlements.WhiteList(ent, supportedEntitlements()) |
|
| 104 |
+ set, err := entitlements.WhiteList(ent, supportedEntitlements(s.entitlements)) |
|
| 105 | 105 |
if err != nil {
|
| 106 | 106 |
return nil, err |
| 107 | 107 |
} |
| ... | ... |
@@ -343,12 +345,15 @@ func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bo |
| 343 | 343 |
pw.Write(v.Digest.String(), *v) |
| 344 | 344 |
} |
| 345 | 345 |
|
| 346 |
-var AllowNetworkHostUnstable = false // TODO: enable in constructor |
|
| 347 |
- |
|
| 348 |
-func supportedEntitlements() []entitlements.Entitlement {
|
|
| 346 |
+func supportedEntitlements(ents []string) []entitlements.Entitlement {
|
|
| 349 | 347 |
out := []entitlements.Entitlement{} // nil means no filter
|
| 350 |
- if AllowNetworkHostUnstable {
|
|
| 351 |
- out = append(out, entitlements.EntitlementNetworkHost) |
|
| 348 |
+ for _, e := range ents {
|
|
| 349 |
+ if e == string(entitlements.EntitlementNetworkHost) {
|
|
| 350 |
+ out = append(out, entitlements.EntitlementNetworkHost) |
|
| 351 |
+ } |
|
| 352 |
+ if e == string(entitlements.EntitlementSecurityInsecure) {
|
|
| 353 |
+ out = append(out, entitlements.EntitlementSecurityInsecure) |
|
| 354 |
+ } |
|
| 352 | 355 |
} |
| 353 | 356 |
return out |
| 354 | 357 |
} |
| ... | ... |
@@ -120,9 +120,10 @@ func ValidateEntitlements(ent entitlements.Set) LoadOpt {
|
| 120 | 120 |
return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkHost)
|
| 121 | 121 |
} |
| 122 | 122 |
} |
| 123 |
- if op.Exec.Network == pb.NetMode_NONE {
|
|
| 124 |
- if !ent.Allowed(entitlements.EntitlementNetworkNone) {
|
|
| 125 |
- return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkNone)
|
|
| 123 |
+ |
|
| 124 |
+ if op.Exec.Security == pb.SecurityMode_INSECURE {
|
|
| 125 |
+ if !ent.Allowed(entitlements.EntitlementSecurityInsecure) {
|
|
| 126 |
+ return errors.Errorf("%s is not allowed", entitlements.EntitlementSecurityInsecure)
|
|
| 126 | 127 |
} |
| 127 | 128 |
} |
| 128 | 129 |
} |
| ... | ... |
@@ -155,6 +156,7 @@ func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(d |
| 155 | 155 |
return nil, err |
| 156 | 156 |
} |
| 157 | 157 |
} |
| 158 |
+ |
|
| 158 | 159 |
vtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)}
|
| 159 | 160 |
for _, in := range op.Inputs {
|
| 160 | 161 |
sub, err := load(in.Digest) |
| ... | ... |
@@ -33,6 +33,7 @@ const ( |
| 33 | 33 |
CapExecMetaBase apicaps.CapID = "exec.meta.base" |
| 34 | 34 |
CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" |
| 35 | 35 |
CapExecMetaNetwork apicaps.CapID = "exec.meta.network" |
| 36 |
+ CapExecMetaSecurity apicaps.CapID = "exec.meta.security" |
|
| 36 | 37 |
CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" |
| 37 | 38 |
CapExecMountBind apicaps.CapID = "exec.mount.bind" |
| 38 | 39 |
CapExecMountCache apicaps.CapID = "exec.mount.cache" |
| ... | ... |
@@ -181,6 +182,12 @@ func init() {
|
| 181 | 181 |
}) |
| 182 | 182 |
|
| 183 | 183 |
Caps.Init(apicaps.Cap{
|
| 184 |
+ ID: CapExecMetaSecurity, |
|
| 185 |
+ Enabled: true, |
|
| 186 |
+ Status: apicaps.CapStatusExperimental, |
|
| 187 |
+ }) |
|
| 188 |
+ |
|
| 189 |
+ Caps.Init(apicaps.Cap{
|
|
| 184 | 190 |
ID: CapExecMountBind, |
| 185 | 191 |
Enabled: true, |
| 186 | 192 |
Status: apicaps.CapStatusExperimental, |
| ... | ... |
@@ -54,7 +54,30 @@ func (x NetMode) String() string {
|
| 54 | 54 |
return proto.EnumName(NetMode_name, int32(x)) |
| 55 | 55 |
} |
| 56 | 56 |
func (NetMode) EnumDescriptor() ([]byte, []int) {
|
| 57 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{0}
|
|
| 57 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{0}
|
|
| 58 |
+} |
|
| 59 |
+ |
|
| 60 |
+type SecurityMode int32 |
|
| 61 |
+ |
|
| 62 |
+const ( |
|
| 63 |
+ SecurityMode_SANDBOX SecurityMode = 0 |
|
| 64 |
+ SecurityMode_INSECURE SecurityMode = 1 |
|
| 65 |
+) |
|
| 66 |
+ |
|
| 67 |
+var SecurityMode_name = map[int32]string{
|
|
| 68 |
+ 0: "SANDBOX", |
|
| 69 |
+ 1: "INSECURE", |
|
| 70 |
+} |
|
| 71 |
+var SecurityMode_value = map[string]int32{
|
|
| 72 |
+ "SANDBOX": 0, |
|
| 73 |
+ "INSECURE": 1, |
|
| 74 |
+} |
|
| 75 |
+ |
|
| 76 |
+func (x SecurityMode) String() string {
|
|
| 77 |
+ return proto.EnumName(SecurityMode_name, int32(x)) |
|
| 78 |
+} |
|
| 79 |
+func (SecurityMode) EnumDescriptor() ([]byte, []int) {
|
|
| 80 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{1}
|
|
| 58 | 81 |
} |
| 59 | 82 |
|
| 60 | 83 |
// MountType defines a type of a mount from a supported set |
| ... | ... |
@@ -87,7 +110,7 @@ func (x MountType) String() string {
|
| 87 | 87 |
return proto.EnumName(MountType_name, int32(x)) |
| 88 | 88 |
} |
| 89 | 89 |
func (MountType) EnumDescriptor() ([]byte, []int) {
|
| 90 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{1}
|
|
| 90 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{2}
|
|
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 | 93 |
// CacheSharingOpt defines different sharing modes for cache mount |
| ... | ... |
@@ -117,7 +140,7 @@ func (x CacheSharingOpt) String() string {
|
| 117 | 117 |
return proto.EnumName(CacheSharingOpt_name, int32(x)) |
| 118 | 118 |
} |
| 119 | 119 |
func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) {
|
| 120 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{2}
|
|
| 120 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{3}
|
|
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 | 123 |
// Op represents a vertex of the LLB DAG. |
| ... | ... |
@@ -138,7 +161,7 @@ func (m *Op) Reset() { *m = Op{} }
|
| 138 | 138 |
func (m *Op) String() string { return proto.CompactTextString(m) }
|
| 139 | 139 |
func (*Op) ProtoMessage() {}
|
| 140 | 140 |
func (*Op) Descriptor() ([]byte, []int) {
|
| 141 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{0}
|
|
| 141 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{0}
|
|
| 142 | 142 |
} |
| 143 | 143 |
func (m *Op) XXX_Unmarshal(b []byte) error {
|
| 144 | 144 |
return m.Unmarshal(b) |
| ... | ... |
@@ -368,7 +391,7 @@ func (m *Platform) Reset() { *m = Platform{} }
|
| 368 | 368 |
func (m *Platform) String() string { return proto.CompactTextString(m) }
|
| 369 | 369 |
func (*Platform) ProtoMessage() {}
|
| 370 | 370 |
func (*Platform) Descriptor() ([]byte, []int) {
|
| 371 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{1}
|
|
| 371 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{1}
|
|
| 372 | 372 |
} |
| 373 | 373 |
func (m *Platform) XXX_Unmarshal(b []byte) error {
|
| 374 | 374 |
return m.Unmarshal(b) |
| ... | ... |
@@ -440,7 +463,7 @@ func (m *Input) Reset() { *m = Input{} }
|
| 440 | 440 |
func (m *Input) String() string { return proto.CompactTextString(m) }
|
| 441 | 441 |
func (*Input) ProtoMessage() {}
|
| 442 | 442 |
func (*Input) Descriptor() ([]byte, []int) {
|
| 443 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{2}
|
|
| 443 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{2}
|
|
| 444 | 444 |
} |
| 445 | 445 |
func (m *Input) XXX_Unmarshal(b []byte) error {
|
| 446 | 446 |
return m.Unmarshal(b) |
| ... | ... |
@@ -467,16 +490,17 @@ var xxx_messageInfo_Input proto.InternalMessageInfo |
| 467 | 467 |
|
| 468 | 468 |
// ExecOp executes a command in a container. |
| 469 | 469 |
type ExecOp struct {
|
| 470 |
- Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` |
|
| 471 |
- Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` |
|
| 472 |
- Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` |
|
| 470 |
+ Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` |
|
| 471 |
+ Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` |
|
| 472 |
+ Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` |
|
| 473 |
+ Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` |
|
| 473 | 474 |
} |
| 474 | 475 |
|
| 475 | 476 |
func (m *ExecOp) Reset() { *m = ExecOp{} }
|
| 476 | 477 |
func (m *ExecOp) String() string { return proto.CompactTextString(m) }
|
| 477 | 478 |
func (*ExecOp) ProtoMessage() {}
|
| 478 | 479 |
func (*ExecOp) Descriptor() ([]byte, []int) {
|
| 479 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{3}
|
|
| 480 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{3}
|
|
| 480 | 481 |
} |
| 481 | 482 |
func (m *ExecOp) XXX_Unmarshal(b []byte) error {
|
| 482 | 483 |
return m.Unmarshal(b) |
| ... | ... |
@@ -522,6 +546,13 @@ func (m *ExecOp) GetNetwork() NetMode {
|
| 522 | 522 |
return NetMode_UNSET |
| 523 | 523 |
} |
| 524 | 524 |
|
| 525 |
+func (m *ExecOp) GetSecurity() SecurityMode {
|
|
| 526 |
+ if m != nil {
|
|
| 527 |
+ return m.Security |
|
| 528 |
+ } |
|
| 529 |
+ return SecurityMode_SANDBOX |
|
| 530 |
+} |
|
| 531 |
+ |
|
| 525 | 532 |
// Meta is a set of arguments for ExecOp. |
| 526 | 533 |
// Meta is unrelated to LLB metadata. |
| 527 | 534 |
// FIXME: rename (ExecContext? ExecArgs?) |
| ... | ... |
@@ -538,7 +569,7 @@ func (m *Meta) Reset() { *m = Meta{} }
|
| 538 | 538 |
func (m *Meta) String() string { return proto.CompactTextString(m) }
|
| 539 | 539 |
func (*Meta) ProtoMessage() {}
|
| 540 | 540 |
func (*Meta) Descriptor() ([]byte, []int) {
|
| 541 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{4}
|
|
| 541 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{4}
|
|
| 542 | 542 |
} |
| 543 | 543 |
func (m *Meta) XXX_Unmarshal(b []byte) error {
|
| 544 | 544 |
return m.Unmarshal(b) |
| ... | ... |
@@ -622,7 +653,7 @@ func (m *Mount) Reset() { *m = Mount{} }
|
| 622 | 622 |
func (m *Mount) String() string { return proto.CompactTextString(m) }
|
| 623 | 623 |
func (*Mount) ProtoMessage() {}
|
| 624 | 624 |
func (*Mount) Descriptor() ([]byte, []int) {
|
| 625 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{5}
|
|
| 625 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{5}
|
|
| 626 | 626 |
} |
| 627 | 627 |
func (m *Mount) XXX_Unmarshal(b []byte) error {
|
| 628 | 628 |
return m.Unmarshal(b) |
| ... | ... |
@@ -708,7 +739,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} }
|
| 708 | 708 |
func (m *CacheOpt) String() string { return proto.CompactTextString(m) }
|
| 709 | 709 |
func (*CacheOpt) ProtoMessage() {}
|
| 710 | 710 |
func (*CacheOpt) Descriptor() ([]byte, []int) {
|
| 711 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{6}
|
|
| 711 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{6}
|
|
| 712 | 712 |
} |
| 713 | 713 |
func (m *CacheOpt) XXX_Unmarshal(b []byte) error {
|
| 714 | 714 |
return m.Unmarshal(b) |
| ... | ... |
@@ -766,7 +797,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} }
|
| 766 | 766 |
func (m *SecretOpt) String() string { return proto.CompactTextString(m) }
|
| 767 | 767 |
func (*SecretOpt) ProtoMessage() {}
|
| 768 | 768 |
func (*SecretOpt) Descriptor() ([]byte, []int) {
|
| 769 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{7}
|
|
| 769 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{7}
|
|
| 770 | 770 |
} |
| 771 | 771 |
func (m *SecretOpt) XXX_Unmarshal(b []byte) error {
|
| 772 | 772 |
return m.Unmarshal(b) |
| ... | ... |
@@ -845,7 +876,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} }
|
| 845 | 845 |
func (m *SSHOpt) String() string { return proto.CompactTextString(m) }
|
| 846 | 846 |
func (*SSHOpt) ProtoMessage() {}
|
| 847 | 847 |
func (*SSHOpt) Descriptor() ([]byte, []int) {
|
| 848 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{8}
|
|
| 848 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{8}
|
|
| 849 | 849 |
} |
| 850 | 850 |
func (m *SSHOpt) XXX_Unmarshal(b []byte) error {
|
| 851 | 851 |
return m.Unmarshal(b) |
| ... | ... |
@@ -918,7 +949,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} }
|
| 918 | 918 |
func (m *SourceOp) String() string { return proto.CompactTextString(m) }
|
| 919 | 919 |
func (*SourceOp) ProtoMessage() {}
|
| 920 | 920 |
func (*SourceOp) Descriptor() ([]byte, []int) {
|
| 921 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{9}
|
|
| 921 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{9}
|
|
| 922 | 922 |
} |
| 923 | 923 |
func (m *SourceOp) XXX_Unmarshal(b []byte) error {
|
| 924 | 924 |
return m.Unmarshal(b) |
| ... | ... |
@@ -970,7 +1001,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} }
|
| 970 | 970 |
func (m *BuildOp) String() string { return proto.CompactTextString(m) }
|
| 971 | 971 |
func (*BuildOp) ProtoMessage() {}
|
| 972 | 972 |
func (*BuildOp) Descriptor() ([]byte, []int) {
|
| 973 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{10}
|
|
| 973 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{10}
|
|
| 974 | 974 |
} |
| 975 | 975 |
func (m *BuildOp) XXX_Unmarshal(b []byte) error {
|
| 976 | 976 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1025,7 +1056,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} }
|
| 1025 | 1025 |
func (m *BuildInput) String() string { return proto.CompactTextString(m) }
|
| 1026 | 1026 |
func (*BuildInput) ProtoMessage() {}
|
| 1027 | 1027 |
func (*BuildInput) Descriptor() ([]byte, []int) {
|
| 1028 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{11}
|
|
| 1028 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{11}
|
|
| 1029 | 1029 |
} |
| 1030 | 1030 |
func (m *BuildInput) XXX_Unmarshal(b []byte) error {
|
| 1031 | 1031 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1066,7 +1097,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} }
|
| 1066 | 1066 |
func (m *OpMetadata) String() string { return proto.CompactTextString(m) }
|
| 1067 | 1067 |
func (*OpMetadata) ProtoMessage() {}
|
| 1068 | 1068 |
func (*OpMetadata) Descriptor() ([]byte, []int) {
|
| 1069 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{12}
|
|
| 1069 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{12}
|
|
| 1070 | 1070 |
} |
| 1071 | 1071 |
func (m *OpMetadata) XXX_Unmarshal(b []byte) error {
|
| 1072 | 1072 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1127,7 +1158,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} }
|
| 1127 | 1127 |
func (m *ExportCache) String() string { return proto.CompactTextString(m) }
|
| 1128 | 1128 |
func (*ExportCache) ProtoMessage() {}
|
| 1129 | 1129 |
func (*ExportCache) Descriptor() ([]byte, []int) {
|
| 1130 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{13}
|
|
| 1130 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{13}
|
|
| 1131 | 1131 |
} |
| 1132 | 1132 |
func (m *ExportCache) XXX_Unmarshal(b []byte) error {
|
| 1133 | 1133 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1170,7 +1201,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} }
|
| 1170 | 1170 |
func (m *ProxyEnv) String() string { return proto.CompactTextString(m) }
|
| 1171 | 1171 |
func (*ProxyEnv) ProtoMessage() {}
|
| 1172 | 1172 |
func (*ProxyEnv) Descriptor() ([]byte, []int) {
|
| 1173 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{14}
|
|
| 1173 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{14}
|
|
| 1174 | 1174 |
} |
| 1175 | 1175 |
func (m *ProxyEnv) XXX_Unmarshal(b []byte) error {
|
| 1176 | 1176 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1232,7 +1263,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} }
|
| 1232 | 1232 |
func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) }
|
| 1233 | 1233 |
func (*WorkerConstraints) ProtoMessage() {}
|
| 1234 | 1234 |
func (*WorkerConstraints) Descriptor() ([]byte, []int) {
|
| 1235 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{15}
|
|
| 1235 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{15}
|
|
| 1236 | 1236 |
} |
| 1237 | 1237 |
func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error {
|
| 1238 | 1238 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1277,7 +1308,7 @@ func (m *Definition) Reset() { *m = Definition{} }
|
| 1277 | 1277 |
func (m *Definition) String() string { return proto.CompactTextString(m) }
|
| 1278 | 1278 |
func (*Definition) ProtoMessage() {}
|
| 1279 | 1279 |
func (*Definition) Descriptor() ([]byte, []int) {
|
| 1280 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{16}
|
|
| 1280 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{16}
|
|
| 1281 | 1281 |
} |
| 1282 | 1282 |
func (m *Definition) XXX_Unmarshal(b []byte) error {
|
| 1283 | 1283 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1325,7 +1356,7 @@ func (m *HostIP) Reset() { *m = HostIP{} }
|
| 1325 | 1325 |
func (m *HostIP) String() string { return proto.CompactTextString(m) }
|
| 1326 | 1326 |
func (*HostIP) ProtoMessage() {}
|
| 1327 | 1327 |
func (*HostIP) Descriptor() ([]byte, []int) {
|
| 1328 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{17}
|
|
| 1328 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{17}
|
|
| 1329 | 1329 |
} |
| 1330 | 1330 |
func (m *HostIP) XXX_Unmarshal(b []byte) error {
|
| 1331 | 1331 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1372,7 +1403,7 @@ func (m *FileOp) Reset() { *m = FileOp{} }
|
| 1372 | 1372 |
func (m *FileOp) String() string { return proto.CompactTextString(m) }
|
| 1373 | 1373 |
func (*FileOp) ProtoMessage() {}
|
| 1374 | 1374 |
func (*FileOp) Descriptor() ([]byte, []int) {
|
| 1375 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{18}
|
|
| 1375 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{18}
|
|
| 1376 | 1376 |
} |
| 1377 | 1377 |
func (m *FileOp) XXX_Unmarshal(b []byte) error {
|
| 1378 | 1378 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1420,7 +1451,7 @@ func (m *FileAction) Reset() { *m = FileAction{} }
|
| 1420 | 1420 |
func (m *FileAction) String() string { return proto.CompactTextString(m) }
|
| 1421 | 1421 |
func (*FileAction) ProtoMessage() {}
|
| 1422 | 1422 |
func (*FileAction) Descriptor() ([]byte, []int) {
|
| 1423 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{19}
|
|
| 1423 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{19}
|
|
| 1424 | 1424 |
} |
| 1425 | 1425 |
func (m *FileAction) XXX_Unmarshal(b []byte) error {
|
| 1426 | 1426 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1645,7 +1676,7 @@ func (m *FileActionCopy) Reset() { *m = FileActionCopy{} }
|
| 1645 | 1645 |
func (m *FileActionCopy) String() string { return proto.CompactTextString(m) }
|
| 1646 | 1646 |
func (*FileActionCopy) ProtoMessage() {}
|
| 1647 | 1647 |
func (*FileActionCopy) Descriptor() ([]byte, []int) {
|
| 1648 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{20}
|
|
| 1648 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{20}
|
|
| 1649 | 1649 |
} |
| 1650 | 1650 |
func (m *FileActionCopy) XXX_Unmarshal(b []byte) error {
|
| 1651 | 1651 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1764,7 +1795,7 @@ func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} }
|
| 1764 | 1764 |
func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) }
|
| 1765 | 1765 |
func (*FileActionMkFile) ProtoMessage() {}
|
| 1766 | 1766 |
func (*FileActionMkFile) Descriptor() ([]byte, []int) {
|
| 1767 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{21}
|
|
| 1767 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{21}
|
|
| 1768 | 1768 |
} |
| 1769 | 1769 |
func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error {
|
| 1770 | 1770 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1841,7 +1872,7 @@ func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} }
|
| 1841 | 1841 |
func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) }
|
| 1842 | 1842 |
func (*FileActionMkDir) ProtoMessage() {}
|
| 1843 | 1843 |
func (*FileActionMkDir) Descriptor() ([]byte, []int) {
|
| 1844 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{22}
|
|
| 1844 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{22}
|
|
| 1845 | 1845 |
} |
| 1846 | 1846 |
func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error {
|
| 1847 | 1847 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1914,7 +1945,7 @@ func (m *FileActionRm) Reset() { *m = FileActionRm{} }
|
| 1914 | 1914 |
func (m *FileActionRm) String() string { return proto.CompactTextString(m) }
|
| 1915 | 1915 |
func (*FileActionRm) ProtoMessage() {}
|
| 1916 | 1916 |
func (*FileActionRm) Descriptor() ([]byte, []int) {
|
| 1917 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{23}
|
|
| 1917 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{23}
|
|
| 1918 | 1918 |
} |
| 1919 | 1919 |
func (m *FileActionRm) XXX_Unmarshal(b []byte) error {
|
| 1920 | 1920 |
return m.Unmarshal(b) |
| ... | ... |
@@ -1969,7 +2000,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} }
|
| 1969 | 1969 |
func (m *ChownOpt) String() string { return proto.CompactTextString(m) }
|
| 1970 | 1970 |
func (*ChownOpt) ProtoMessage() {}
|
| 1971 | 1971 |
func (*ChownOpt) Descriptor() ([]byte, []int) {
|
| 1972 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{24}
|
|
| 1972 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{24}
|
|
| 1973 | 1973 |
} |
| 1974 | 1974 |
func (m *ChownOpt) XXX_Unmarshal(b []byte) error {
|
| 1975 | 1975 |
return m.Unmarshal(b) |
| ... | ... |
@@ -2019,7 +2050,7 @@ func (m *UserOpt) Reset() { *m = UserOpt{} }
|
| 2019 | 2019 |
func (m *UserOpt) String() string { return proto.CompactTextString(m) }
|
| 2020 | 2020 |
func (*UserOpt) ProtoMessage() {}
|
| 2021 | 2021 |
func (*UserOpt) Descriptor() ([]byte, []int) {
|
| 2022 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{25}
|
|
| 2022 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{25}
|
|
| 2023 | 2023 |
} |
| 2024 | 2024 |
func (m *UserOpt) XXX_Unmarshal(b []byte) error {
|
| 2025 | 2025 |
return m.Unmarshal(b) |
| ... | ... |
@@ -2159,7 +2190,7 @@ func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} }
|
| 2159 | 2159 |
func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) }
|
| 2160 | 2160 |
func (*NamedUserOpt) ProtoMessage() {}
|
| 2161 | 2161 |
func (*NamedUserOpt) Descriptor() ([]byte, []int) {
|
| 2162 |
- return fileDescriptor_ops_8d64813b9835ab08, []int{26}
|
|
| 2162 |
+ return fileDescriptor_ops_0b9d2e829935306b, []int{26}
|
|
| 2163 | 2163 |
} |
| 2164 | 2164 |
func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error {
|
| 2165 | 2165 |
return m.Unmarshal(b) |
| ... | ... |
@@ -2226,6 +2257,7 @@ func init() {
|
| 2226 | 2226 |
proto.RegisterType((*UserOpt)(nil), "pb.UserOpt") |
| 2227 | 2227 |
proto.RegisterType((*NamedUserOpt)(nil), "pb.NamedUserOpt") |
| 2228 | 2228 |
proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value)
|
| 2229 |
+ proto.RegisterEnum("pb.SecurityMode", SecurityMode_name, SecurityMode_value)
|
|
| 2229 | 2230 |
proto.RegisterEnum("pb.MountType", MountType_name, MountType_value)
|
| 2230 | 2231 |
proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value)
|
| 2231 | 2232 |
} |
| ... | ... |
@@ -2470,6 +2502,11 @@ func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) {
|
| 2470 | 2470 |
i++ |
| 2471 | 2471 |
i = encodeVarintOps(dAtA, i, uint64(m.Network)) |
| 2472 | 2472 |
} |
| 2473 |
+ if m.Security != 0 {
|
|
| 2474 |
+ dAtA[i] = 0x20 |
|
| 2475 |
+ i++ |
|
| 2476 |
+ i = encodeVarintOps(dAtA, i, uint64(m.Security)) |
|
| 2477 |
+ } |
|
| 2473 | 2478 |
return i, nil |
| 2474 | 2479 |
} |
| 2475 | 2480 |
|
| ... | ... |
@@ -3852,6 +3889,9 @@ func (m *ExecOp) Size() (n int) {
|
| 3852 | 3852 |
if m.Network != 0 {
|
| 3853 | 3853 |
n += 1 + sovOps(uint64(m.Network)) |
| 3854 | 3854 |
} |
| 3855 |
+ if m.Security != 0 {
|
|
| 3856 |
+ n += 1 + sovOps(uint64(m.Security)) |
|
| 3857 |
+ } |
|
| 3855 | 3858 |
return n |
| 3856 | 3859 |
} |
| 3857 | 3860 |
|
| ... | ... |
@@ -5156,6 +5196,25 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error {
|
| 5156 | 5156 |
break |
| 5157 | 5157 |
} |
| 5158 | 5158 |
} |
| 5159 |
+ case 4: |
|
| 5160 |
+ if wireType != 0 {
|
|
| 5161 |
+ return fmt.Errorf("proto: wrong wireType = %d for field Security", wireType)
|
|
| 5162 |
+ } |
|
| 5163 |
+ m.Security = 0 |
|
| 5164 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 5165 |
+ if shift >= 64 {
|
|
| 5166 |
+ return ErrIntOverflowOps |
|
| 5167 |
+ } |
|
| 5168 |
+ if iNdEx >= l {
|
|
| 5169 |
+ return io.ErrUnexpectedEOF |
|
| 5170 |
+ } |
|
| 5171 |
+ b := dAtA[iNdEx] |
|
| 5172 |
+ iNdEx++ |
|
| 5173 |
+ m.Security |= (SecurityMode(b) & 0x7F) << shift |
|
| 5174 |
+ if b < 0x80 {
|
|
| 5175 |
+ break |
|
| 5176 |
+ } |
|
| 5177 |
+ } |
|
| 5159 | 5178 |
default: |
| 5160 | 5179 |
iNdEx = preIndex |
| 5161 | 5180 |
skippy, err := skipOps(dAtA[iNdEx:]) |
| ... | ... |
@@ -9171,129 +9230,132 @@ var ( |
| 9171 | 9171 |
ErrIntOverflowOps = fmt.Errorf("proto: integer overflow")
|
| 9172 | 9172 |
) |
| 9173 | 9173 |
|
| 9174 |
-func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_8d64813b9835ab08) }
|
|
| 9174 |
+func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_0b9d2e829935306b) }
|
|
| 9175 | 9175 |
|
| 9176 |
-var fileDescriptor_ops_8d64813b9835ab08 = []byte{
|
|
| 9177 |
- // 1924 bytes of a gzipped FileDescriptorProto |
|
| 9176 |
+var fileDescriptor_ops_0b9d2e829935306b = []byte{
|
|
| 9177 |
+ // 1978 bytes of a gzipped FileDescriptorProto |
|
| 9178 | 9178 |
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5f, 0x6f, 0x1b, 0xc7, |
| 9179 |
- 0x11, 0x17, 0x8f, 0x7f, 0x6f, 0x28, 0xc9, 0xec, 0xc6, 0x49, 0x59, 0xd5, 0x95, 0x94, 0x4b, 0x1a, |
|
| 9180 |
- 0x30, 0xb2, 0x4d, 0x01, 0x0a, 0x90, 0x04, 0x79, 0x28, 0x2a, 0xfe, 0x31, 0xc4, 0x24, 0x16, 0x85, |
|
| 9181 |
- 0xa5, 0xed, 0x3c, 0x1a, 0xc7, 0xbb, 0x25, 0x75, 0x20, 0xef, 0xf6, 0xb0, 0xb7, 0xb4, 0xc4, 0x97, |
|
| 9182 |
- 0x3e, 0xf8, 0x13, 0x04, 0x28, 0xd0, 0xb7, 0x3e, 0xf4, 0xa5, 0x40, 0x3f, 0x44, 0xdf, 0xf3, 0x18, |
|
| 9183 |
- 0x14, 0x7d, 0x48, 0xfb, 0x90, 0x16, 0xf6, 0x17, 0x29, 0x66, 0x77, 0x8f, 0x77, 0xa4, 0x15, 0xd8, |
|
| 9184 |
- 0x46, 0x8b, 0x3e, 0x71, 0x76, 0xe6, 0xb7, 0xb3, 0xb3, 0x33, 0xb3, 0x33, 0x73, 0x04, 0x9b, 0xc7, |
|
| 9185 |
- 0x49, 0x3b, 0x16, 0x5c, 0x72, 0x62, 0xc5, 0xe3, 0xbd, 0xfb, 0xd3, 0x40, 0x5e, 0x2e, 0xc6, 0x6d, |
|
| 9186 |
- 0x8f, 0x87, 0xc7, 0x53, 0x3e, 0xe5, 0xc7, 0x4a, 0x34, 0x5e, 0x4c, 0xd4, 0x4a, 0x2d, 0x14, 0xa5, |
|
| 9187 |
- 0xb7, 0x38, 0x7f, 0xb2, 0xc0, 0x1a, 0xc6, 0xe4, 0x7d, 0xa8, 0x04, 0x51, 0xbc, 0x90, 0x49, 0xb3, |
|
| 9188 |
- 0x70, 0x58, 0x6c, 0xd5, 0x4f, 0xec, 0x76, 0x3c, 0x6e, 0x0f, 0x90, 0x43, 0x8d, 0x80, 0x1c, 0x42, |
|
| 9189 |
- 0x89, 0x5d, 0x33, 0xaf, 0x69, 0x1d, 0x16, 0x5a, 0xf5, 0x13, 0x40, 0x40, 0xff, 0x9a, 0x79, 0xc3, |
|
| 9190 |
- 0xf8, 0x6c, 0x8b, 0x2a, 0x09, 0xf9, 0x08, 0x2a, 0x09, 0x5f, 0x08, 0x8f, 0x35, 0x8b, 0x0a, 0xb3, |
|
| 9191 |
- 0x8d, 0x98, 0x91, 0xe2, 0x28, 0x94, 0x91, 0xa2, 0xa6, 0x49, 0x30, 0x67, 0xcd, 0x52, 0xa6, 0xe9, |
|
| 9192 |
- 0x41, 0x30, 0xd7, 0x18, 0x25, 0x21, 0x1f, 0x40, 0x79, 0xbc, 0x08, 0xe6, 0x7e, 0xb3, 0xac, 0x20, |
|
| 9193 |
- 0x75, 0x84, 0x74, 0x90, 0xa1, 0x30, 0x5a, 0x46, 0x5a, 0x50, 0x8b, 0xe7, 0xae, 0x9c, 0x70, 0x11, |
|
| 9194 |
- 0x36, 0x21, 0x3b, 0xf0, 0xc2, 0xf0, 0xe8, 0x4a, 0x4a, 0x3e, 0x83, 0xba, 0xc7, 0xa3, 0x44, 0x0a, |
|
| 9195 |
- 0x37, 0x88, 0x64, 0xd2, 0xac, 0x2b, 0xf0, 0xbb, 0x08, 0xfe, 0x86, 0x8b, 0x19, 0x13, 0xdd, 0x4c, |
|
| 9196 |
- 0x48, 0xf3, 0xc8, 0x4e, 0x09, 0x2c, 0x1e, 0x3b, 0x7f, 0x28, 0x40, 0x2d, 0xd5, 0x4a, 0x1c, 0xd8, |
|
| 9197 |
- 0x3e, 0x15, 0xde, 0x65, 0x20, 0x99, 0x27, 0x17, 0x82, 0x35, 0x0b, 0x87, 0x85, 0x96, 0x4d, 0xd7, |
|
| 9198 |
- 0x78, 0x64, 0x17, 0xac, 0xe1, 0x48, 0x39, 0xca, 0xa6, 0xd6, 0x70, 0x44, 0x9a, 0x50, 0x7d, 0xe2, |
|
| 9199 |
- 0x8a, 0xc0, 0x8d, 0xa4, 0xf2, 0x8c, 0x4d, 0xd3, 0x25, 0xb9, 0x03, 0xf6, 0x70, 0xf4, 0x84, 0x89, |
|
| 9200 |
- 0x24, 0xe0, 0x91, 0xf2, 0x87, 0x4d, 0x33, 0x06, 0xd9, 0x07, 0x18, 0x8e, 0x1e, 0x30, 0x17, 0x95, |
|
| 9201 |
- 0x26, 0xcd, 0xf2, 0x61, 0xb1, 0x65, 0xd3, 0x1c, 0xc7, 0xf9, 0x1d, 0x94, 0x55, 0x8c, 0xc8, 0x97, |
|
| 9202 |
- 0x50, 0xf1, 0x83, 0x29, 0x4b, 0xa4, 0x36, 0xa7, 0x73, 0xf2, 0xdd, 0x8f, 0x07, 0x5b, 0xff, 0xfc, |
|
| 9203 |
- 0xf1, 0xe0, 0x28, 0x97, 0x0c, 0x3c, 0x66, 0x91, 0xc7, 0x23, 0xe9, 0x06, 0x11, 0x13, 0xc9, 0xf1, |
|
| 9204 |
- 0x94, 0xdf, 0xd7, 0x5b, 0xda, 0x3d, 0xf5, 0x43, 0x8d, 0x06, 0xf2, 0x31, 0x94, 0x83, 0xc8, 0x67, |
|
| 9205 |
- 0xd7, 0xca, 0xfe, 0x62, 0xe7, 0x1d, 0xa3, 0xaa, 0x3e, 0x5c, 0xc8, 0x78, 0x21, 0x07, 0x28, 0xa2, |
|
| 9206 |
- 0x1a, 0xe1, 0xc4, 0x50, 0xd1, 0x29, 0x40, 0xee, 0x40, 0x29, 0x64, 0xd2, 0x55, 0xc7, 0xd7, 0x4f, |
|
| 9207 |
- 0x6a, 0xe8, 0xda, 0x87, 0x4c, 0xba, 0x54, 0x71, 0x31, 0xbb, 0x42, 0xbe, 0x40, 0xd7, 0x5b, 0x59, |
|
| 9208 |
- 0x76, 0x3d, 0x44, 0x0e, 0x35, 0x02, 0xf2, 0x6b, 0xa8, 0x46, 0x4c, 0x5e, 0x71, 0x31, 0x53, 0x2e, |
|
| 9209 |
- 0xda, 0xd5, 0x31, 0x3f, 0x67, 0xf2, 0x21, 0xf7, 0x19, 0x4d, 0x65, 0xce, 0x5f, 0x0a, 0x50, 0x42, |
|
| 9210 |
- 0xc5, 0x84, 0x40, 0xc9, 0x15, 0x53, 0x9d, 0xae, 0x36, 0x55, 0x34, 0x69, 0x40, 0x91, 0x45, 0xcf, |
|
| 9211 |
- 0xd4, 0x19, 0x36, 0x45, 0x12, 0x39, 0xde, 0x95, 0x6f, 0x9c, 0x8e, 0x24, 0xee, 0x5b, 0x24, 0x4c, |
|
| 9212 |
- 0x18, 0x5f, 0x2b, 0x9a, 0x7c, 0x0c, 0x76, 0x2c, 0xf8, 0xf5, 0xf2, 0x29, 0xee, 0x2e, 0xe7, 0x32, |
|
| 9213 |
- 0x09, 0x99, 0xfd, 0xe8, 0x19, 0xad, 0xc5, 0x86, 0x22, 0x47, 0x00, 0xec, 0x5a, 0x0a, 0xf7, 0x8c, |
|
| 9214 |
- 0x27, 0x32, 0x69, 0x56, 0xd4, 0x6d, 0x54, 0x02, 0x23, 0x63, 0x70, 0x41, 0x73, 0x52, 0xe7, 0x6f, |
|
| 9215 |
- 0x16, 0x94, 0xd5, 0x25, 0x49, 0x0b, 0x5d, 0x1a, 0x2f, 0x74, 0x74, 0x8a, 0x1d, 0x62, 0x5c, 0x0a, |
|
| 9216 |
- 0x2a, 0x78, 0x2b, 0x8f, 0x62, 0x20, 0xf7, 0xa0, 0x96, 0xb0, 0x39, 0xf3, 0x24, 0x17, 0x26, 0x7f, |
|
| 9217 |
- 0x56, 0x6b, 0x34, 0xdd, 0xc7, 0x10, 0xeb, 0xdb, 0x28, 0x9a, 0xdc, 0x85, 0x0a, 0x57, 0x71, 0x51, |
|
| 9218 |
- 0x17, 0xfa, 0x89, 0x68, 0x19, 0x08, 0x2a, 0x17, 0xcc, 0xf5, 0x79, 0x34, 0x5f, 0xaa, 0x6b, 0xd6, |
|
| 9219 |
- 0xe8, 0x6a, 0x4d, 0xee, 0x82, 0xad, 0x22, 0xf1, 0x68, 0x19, 0xb3, 0x66, 0x45, 0x45, 0x60, 0x67, |
|
| 9220 |
- 0x15, 0x25, 0x64, 0xd2, 0x4c, 0x8e, 0x2f, 0xcf, 0x73, 0xbd, 0x4b, 0x36, 0x8c, 0x65, 0xf3, 0x76, |
|
| 9221 |
- 0xe6, 0xaf, 0xae, 0xe1, 0xd1, 0x95, 0x14, 0xd5, 0x26, 0xcc, 0x13, 0x4c, 0x22, 0xf4, 0x5d, 0x05, |
|
| 9222 |
- 0x55, 0x6a, 0x47, 0x29, 0x93, 0x66, 0x72, 0xe2, 0x40, 0x65, 0x34, 0x3a, 0x43, 0xe4, 0x7b, 0x59, |
|
| 9223 |
- 0x65, 0xd0, 0x1c, 0x6a, 0x24, 0xce, 0x00, 0x6a, 0xe9, 0x31, 0xf8, 0xcc, 0x06, 0x3d, 0xf3, 0x00, |
|
| 9224 |
- 0xad, 0x41, 0x8f, 0xdc, 0x87, 0x6a, 0x72, 0xe9, 0x8a, 0x20, 0x9a, 0x2a, 0xdf, 0xed, 0x9e, 0xbc, |
|
| 9225 |
- 0xb3, 0xb2, 0x6a, 0xa4, 0xf9, 0xa8, 0x29, 0xc5, 0x38, 0x1c, 0xec, 0x95, 0x19, 0xaf, 0xe8, 0x6a, |
|
| 9226 |
- 0x40, 0x71, 0x11, 0xf8, 0x4a, 0xcf, 0x0e, 0x45, 0x12, 0x39, 0xd3, 0x40, 0xe7, 0xd2, 0x0e, 0x45, |
|
| 9227 |
- 0x12, 0x03, 0x12, 0x72, 0x5f, 0xd7, 0xb1, 0x1d, 0xaa, 0x68, 0xf4, 0x31, 0x8f, 0x65, 0xc0, 0x23, |
|
| 9228 |
- 0x77, 0x9e, 0xfa, 0x38, 0x5d, 0x3b, 0xf3, 0xf4, 0x7e, 0xff, 0x97, 0xd3, 0x7e, 0x5f, 0x80, 0x5a, |
|
| 9229 |
- 0x5a, 0x7c, 0xb1, 0x92, 0x04, 0x3e, 0x8b, 0x64, 0x30, 0x09, 0x98, 0x30, 0x07, 0xe7, 0x38, 0xe4, |
|
| 9230 |
- 0x3e, 0x94, 0x5d, 0x29, 0x45, 0xfa, 0x40, 0x7f, 0x9e, 0xaf, 0xdc, 0xed, 0x53, 0x94, 0xf4, 0x23, |
|
| 9231 |
- 0x29, 0x96, 0x54, 0xa3, 0xf6, 0x3e, 0x07, 0xc8, 0x98, 0x68, 0xeb, 0x8c, 0x2d, 0x8d, 0x56, 0x24, |
|
| 9232 |
- 0xc9, 0x6d, 0x28, 0x3f, 0x73, 0xe7, 0x0b, 0x66, 0x72, 0x58, 0x2f, 0xbe, 0xb0, 0x3e, 0x2f, 0x38, |
|
| 9233 |
- 0x7f, 0xb5, 0xa0, 0x6a, 0x2a, 0x39, 0xb9, 0x07, 0x55, 0x55, 0xc9, 0x8d, 0x45, 0x37, 0x3f, 0x8c, |
|
| 9234 |
- 0x14, 0x42, 0x8e, 0x57, 0x2d, 0x2a, 0x67, 0xa3, 0x51, 0xa5, 0x5b, 0x95, 0xb1, 0x31, 0x6b, 0x58, |
|
| 9235 |
- 0x45, 0x9f, 0x4d, 0x4c, 0x2f, 0xda, 0x45, 0x74, 0x8f, 0x4d, 0x82, 0x28, 0x40, 0xff, 0x50, 0x14, |
|
| 9236 |
- 0x91, 0x7b, 0xe9, 0xad, 0x4b, 0x4a, 0xe3, 0x7b, 0x79, 0x8d, 0xaf, 0x5e, 0x7a, 0x00, 0xf5, 0xdc, |
|
| 9237 |
- 0x31, 0x37, 0xdc, 0xfa, 0xc3, 0xfc, 0xad, 0xcd, 0x91, 0x4a, 0x9d, 0x6e, 0xa4, 0x99, 0x17, 0xfe, |
|
| 9238 |
- 0x0b, 0xff, 0x7d, 0x0a, 0x90, 0xa9, 0x7c, 0xf3, 0xc2, 0xe2, 0x3c, 0x2f, 0x02, 0x0c, 0x63, 0x2c, |
|
| 9239 |
- 0x9d, 0xbe, 0xab, 0x2a, 0xf2, 0x76, 0x30, 0x8d, 0xb8, 0x60, 0x4f, 0xd5, 0x53, 0x55, 0xfb, 0x6b, |
|
| 9240 |
- 0xb4, 0xae, 0x79, 0xea, 0xc5, 0x90, 0x53, 0xa8, 0xfb, 0x2c, 0xf1, 0x44, 0xa0, 0x12, 0xca, 0x38, |
|
| 9241 |
- 0xfd, 0x00, 0xef, 0x94, 0xe9, 0x69, 0xf7, 0x32, 0x84, 0xf6, 0x55, 0x7e, 0x0f, 0x39, 0x81, 0x6d, |
|
| 9242 |
- 0x76, 0x1d, 0x73, 0x21, 0xcd, 0x29, 0xba, 0xe1, 0xdf, 0xd2, 0xa3, 0x03, 0xf2, 0xd5, 0x49, 0xb4, |
|
| 9243 |
- 0xce, 0xb2, 0x05, 0x71, 0xa1, 0xe4, 0xb9, 0xb1, 0xee, 0x76, 0xf5, 0x93, 0xe6, 0xc6, 0x79, 0x5d, |
|
| 9244 |
- 0x37, 0xd6, 0x4e, 0xeb, 0x7c, 0x82, 0x77, 0x7d, 0xfe, 0xaf, 0x83, 0xbb, 0xb9, 0x16, 0x17, 0xf2, |
|
| 9245 |
- 0xf1, 0xf2, 0x58, 0xe5, 0xcb, 0x2c, 0x90, 0xc7, 0x0b, 0x19, 0xcc, 0x8f, 0xdd, 0x38, 0x40, 0x75, |
|
| 9246 |
- 0xb8, 0x71, 0xd0, 0xa3, 0x4a, 0xf5, 0xde, 0x6f, 0xa0, 0xb1, 0x69, 0xf7, 0xdb, 0xc4, 0x60, 0xef, |
|
| 9247 |
- 0x33, 0xb0, 0x57, 0x76, 0xbc, 0x6e, 0x63, 0x2d, 0x1f, 0xbc, 0x0f, 0xa0, 0x9e, 0xbb, 0x37, 0x02, |
|
| 9248 |
- 0x9f, 0x28, 0xa0, 0xf6, 0xbe, 0x5e, 0x38, 0xcf, 0x71, 0xda, 0x48, 0xfb, 0xcd, 0xaf, 0x00, 0x2e, |
|
| 9249 |
- 0xa5, 0x8c, 0x9f, 0xaa, 0x06, 0x64, 0x0e, 0xb1, 0x91, 0xa3, 0x10, 0xe4, 0x00, 0xea, 0xb8, 0x48, |
|
| 9250 |
- 0x8c, 0x5c, 0x5b, 0xaa, 0x76, 0x24, 0x1a, 0xf0, 0x4b, 0xb0, 0x27, 0xab, 0xed, 0xba, 0x71, 0xd4, |
|
| 9251 |
- 0x26, 0xe9, 0xee, 0x5f, 0x40, 0x2d, 0xe2, 0x46, 0xa6, 0xfb, 0x61, 0x35, 0xe2, 0x4a, 0xe4, 0xdc, |
|
| 9252 |
- 0x85, 0x9f, 0xbd, 0x32, 0x1a, 0x91, 0xf7, 0xa0, 0x32, 0x09, 0xe6, 0x52, 0x3d, 0x57, 0x6c, 0xb1, |
|
| 9253 |
- 0x66, 0xe5, 0xfc, 0xa3, 0x00, 0x90, 0x3d, 0x2d, 0xf4, 0x08, 0xbe, 0x3b, 0xc4, 0x6c, 0xeb, 0x77, |
|
| 9254 |
- 0x36, 0x87, 0x5a, 0x68, 0x22, 0x68, 0xf2, 0xe8, 0xce, 0xfa, 0x73, 0x6c, 0xa7, 0x01, 0xd6, 0xb1, |
|
| 9255 |
- 0x3d, 0x31, 0xb1, 0x7d, 0x9b, 0xf1, 0x65, 0x75, 0xc2, 0xde, 0x57, 0xb0, 0xb3, 0xa6, 0xee, 0x0d, |
|
| 9256 |
- 0x5f, 0x6a, 0x96, 0x65, 0xf9, 0x90, 0xdd, 0x83, 0x8a, 0x6e, 0xed, 0x58, 0x7f, 0x91, 0x32, 0x6a, |
|
| 9257 |
- 0x14, 0xad, 0xea, 0xf8, 0x45, 0x3a, 0xe8, 0x0d, 0x2e, 0x9c, 0x13, 0xa8, 0xe8, 0x49, 0x96, 0xb4, |
|
| 9258 |
- 0xa0, 0xea, 0x7a, 0x78, 0xb5, 0xb4, 0x5c, 0xed, 0xa6, 0x63, 0xee, 0xa9, 0x62, 0xd3, 0x54, 0xec, |
|
| 9259 |
- 0xfc, 0xdd, 0x02, 0xc8, 0xf8, 0x6f, 0x31, 0x2b, 0x7c, 0x01, 0xbb, 0x09, 0xf3, 0x78, 0xe4, 0xbb, |
|
| 9260 |
- 0x62, 0xa9, 0xa4, 0x66, 0x62, 0xbb, 0x69, 0xcb, 0x06, 0x32, 0x37, 0x37, 0x14, 0x5f, 0x3f, 0x37, |
|
| 9261 |
- 0xb4, 0xa0, 0xe4, 0xf1, 0x78, 0x69, 0x9e, 0x2f, 0x59, 0xbf, 0x48, 0x97, 0xc7, 0x4b, 0x9c, 0xdb, |
|
| 9262 |
- 0x11, 0x41, 0xda, 0x50, 0x09, 0x67, 0x6a, 0xb6, 0xd7, 0x63, 0xd4, 0xed, 0x75, 0xec, 0xc3, 0x19, |
|
| 9263 |
- 0xd2, 0xf8, 0x25, 0xa0, 0x51, 0xe4, 0x2e, 0x94, 0xc3, 0x99, 0x1f, 0x08, 0x35, 0x71, 0xd4, 0x75, |
|
| 9264 |
- 0xbf, 0xce, 0xc3, 0x7b, 0x81, 0xc0, 0x79, 0x5f, 0x61, 0x88, 0x03, 0x96, 0x08, 0x9b, 0x55, 0x85, |
|
| 9265 |
- 0x6c, 0x6c, 0x78, 0x33, 0x3c, 0xdb, 0xa2, 0x96, 0x08, 0x3b, 0x35, 0xa8, 0x68, 0xbf, 0x3a, 0x7f, |
|
| 9266 |
- 0x2e, 0xc2, 0xee, 0xba, 0x95, 0x98, 0x07, 0x89, 0xf0, 0xd2, 0x3c, 0x48, 0x84, 0xb7, 0x1a, 0xa9, |
|
| 9267 |
- 0xac, 0xdc, 0x48, 0xe5, 0x40, 0x99, 0x5f, 0x45, 0x4c, 0xe4, 0x3f, 0x62, 0xba, 0x97, 0xfc, 0x2a, |
|
| 9268 |
- 0xc2, 0xe1, 0x41, 0x8b, 0xd6, 0x7a, 0x71, 0xd9, 0xf4, 0xe2, 0x0f, 0x61, 0x67, 0xc2, 0xe7, 0x73, |
|
| 9269 |
- 0x7e, 0x35, 0x5a, 0x86, 0xf3, 0x20, 0x9a, 0x99, 0x86, 0xbc, 0xce, 0x24, 0x2d, 0xb8, 0xe5, 0x07, |
|
| 9270 |
- 0x02, 0xcd, 0xe9, 0xf2, 0x48, 0xb2, 0x48, 0x4d, 0x91, 0x88, 0xdb, 0x64, 0x93, 0x2f, 0xe1, 0xd0, |
|
| 9271 |
- 0x95, 0x92, 0x85, 0xb1, 0x7c, 0x1c, 0xc5, 0xae, 0x37, 0xeb, 0x71, 0x4f, 0xbd, 0xc7, 0x30, 0x76, |
|
| 9272 |
- 0x65, 0x30, 0x0e, 0xe6, 0x81, 0x5c, 0x2a, 0x67, 0xd4, 0xe8, 0x6b, 0x71, 0xe4, 0x23, 0xd8, 0xf5, |
|
| 9273 |
- 0x04, 0x73, 0x25, 0xeb, 0xb1, 0x44, 0x5e, 0xb8, 0xf2, 0xb2, 0x59, 0x53, 0x3b, 0x37, 0xb8, 0x78, |
|
| 9274 |
- 0x07, 0x17, 0xad, 0xfd, 0x26, 0x98, 0xfb, 0x9e, 0x2b, 0xfc, 0xa6, 0xad, 0xef, 0xb0, 0xc6, 0x24, |
|
| 9275 |
- 0x6d, 0x20, 0x8a, 0xd1, 0x0f, 0x63, 0xb9, 0x5c, 0x41, 0x41, 0x41, 0x6f, 0x90, 0xe0, 0x47, 0x8e, |
|
| 9276 |
- 0x0c, 0x42, 0x96, 0x48, 0x37, 0x8c, 0xd5, 0xc7, 0x57, 0x91, 0x66, 0x0c, 0xe7, 0xdb, 0x02, 0x34, |
|
| 9277 |
- 0x36, 0x53, 0x04, 0x1d, 0x1c, 0xa3, 0x99, 0xe6, 0xb1, 0x21, 0xbd, 0x72, 0xba, 0x95, 0x73, 0x3a, |
|
| 9278 |
- 0x06, 0x10, 0xab, 0x0a, 0xc6, 0x6a, 0x9b, 0x2a, 0x3a, 0x0b, 0x60, 0xe9, 0xa7, 0x03, 0xb8, 0x66, |
|
| 9279 |
- 0x52, 0x79, 0xd3, 0xa4, 0x3f, 0x16, 0xe0, 0xd6, 0x46, 0x1a, 0xbe, 0xb1, 0x45, 0x87, 0x50, 0x0f, |
|
| 9280 |
- 0xdd, 0x19, 0xbb, 0x70, 0x85, 0x0a, 0x6e, 0x51, 0x37, 0xd6, 0x1c, 0xeb, 0x7f, 0x60, 0x5f, 0x04, |
|
| 9281 |
- 0xdb, 0xf9, 0xdc, 0xbf, 0xd1, 0xb6, 0x34, 0x94, 0xe7, 0x5c, 0x3e, 0xe0, 0x8b, 0xc8, 0x37, 0xdd, |
|
| 9282 |
- 0x68, 0x9d, 0xf9, 0x6a, 0xc0, 0x8b, 0x37, 0x04, 0xdc, 0x39, 0x87, 0x5a, 0x6a, 0x20, 0x39, 0x30, |
|
| 9283 |
- 0x1f, 0x50, 0x85, 0xec, 0xcb, 0xfc, 0x71, 0xc2, 0x04, 0xda, 0xae, 0xbf, 0xa6, 0xde, 0x87, 0xf2, |
|
| 9284 |
- 0x54, 0xf0, 0x45, 0x6c, 0x6a, 0xeb, 0x1a, 0x42, 0x4b, 0x9c, 0x11, 0x54, 0x0d, 0x87, 0x1c, 0x41, |
|
| 9285 |
- 0x65, 0xbc, 0x3c, 0x77, 0x43, 0x66, 0x14, 0xaa, 0x87, 0x8d, 0x6b, 0xdf, 0x20, 0xb0, 0x5a, 0x68, |
|
| 9286 |
- 0x04, 0xb9, 0x0d, 0xa5, 0xf1, 0x72, 0xd0, 0xd3, 0x63, 0x32, 0xd6, 0x1c, 0x5c, 0x75, 0x2a, 0xda, |
|
| 9287 |
- 0x20, 0xe7, 0x6b, 0xd8, 0xce, 0xef, 0x43, 0xa7, 0x44, 0xa9, 0x5e, 0x9b, 0x2a, 0x3a, 0x2b, 0xae, |
|
| 9288 |
- 0xd6, 0x6b, 0x8a, 0xeb, 0x51, 0x0b, 0xaa, 0xe6, 0xe3, 0x93, 0xd8, 0x50, 0x7e, 0x7c, 0x3e, 0xea, |
|
| 9289 |
- 0x3f, 0x6a, 0x6c, 0x91, 0x1a, 0x94, 0xce, 0x86, 0xa3, 0x47, 0x8d, 0x02, 0x52, 0xe7, 0xc3, 0xf3, |
|
| 9290 |
- 0x7e, 0xc3, 0x3a, 0xfa, 0x2d, 0xd8, 0xab, 0x8f, 0x24, 0x64, 0x77, 0x06, 0xe7, 0xbd, 0xc6, 0x16, |
|
| 9291 |
- 0x01, 0xa8, 0x8c, 0xfa, 0x5d, 0xda, 0x47, 0x70, 0x15, 0x8a, 0xa3, 0xd1, 0x59, 0xc3, 0x42, 0x55, |
|
| 9292 |
- 0xdd, 0xd3, 0xee, 0x59, 0xbf, 0x51, 0x44, 0xf2, 0xd1, 0xc3, 0x8b, 0x07, 0xa3, 0x46, 0xe9, 0xe8, |
|
| 9293 |
- 0x53, 0xb8, 0xb5, 0xf1, 0x91, 0xa2, 0x76, 0x9f, 0x9d, 0xd2, 0x3e, 0x6a, 0xaa, 0x43, 0xf5, 0x82, |
|
| 9294 |
- 0x0e, 0x9e, 0x9c, 0x3e, 0xea, 0x37, 0x0a, 0x28, 0xf8, 0x7a, 0xd8, 0xfd, 0xaa, 0xdf, 0x6b, 0x58, |
|
| 9295 |
- 0x9d, 0x3b, 0xdf, 0xbd, 0xd8, 0x2f, 0x7c, 0xff, 0x62, 0xbf, 0xf0, 0xc3, 0x8b, 0xfd, 0xc2, 0xbf, |
|
| 9296 |
- 0x5f, 0xec, 0x17, 0xbe, 0x7d, 0xb9, 0xbf, 0xf5, 0xfd, 0xcb, 0xfd, 0xad, 0x1f, 0x5e, 0xee, 0x6f, |
|
| 9297 |
- 0x8d, 0x2b, 0xea, 0x0f, 0x9e, 0x4f, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x24, 0xd0, 0xaa, |
|
| 9298 |
- 0x20, 0x12, 0x00, 0x00, |
|
| 9179 |
+ 0x11, 0xd7, 0x1d, 0xff, 0xde, 0x50, 0x92, 0xd9, 0x8d, 0x93, 0xb2, 0xaa, 0x2b, 0x29, 0x97, 0x34, |
|
| 9180 |
+ 0x90, 0x65, 0x5b, 0x02, 0x14, 0x20, 0x09, 0xf2, 0x50, 0x54, 0xfc, 0x63, 0x88, 0x49, 0x2c, 0x0a, |
|
| 9181 |
+ 0x4b, 0xdb, 0xe9, 0x9b, 0x71, 0xbc, 0x5b, 0x52, 0x07, 0xf2, 0x6e, 0x0f, 0x7b, 0x4b, 0x5b, 0x7c, |
|
| 9182 |
+ 0xe9, 0x83, 0x3f, 0x41, 0x80, 0x02, 0x7d, 0x6b, 0x81, 0xbe, 0x14, 0xe8, 0x87, 0xe8, 0x7b, 0x1e, |
|
| 9183 |
+ 0x83, 0xa2, 0x0f, 0x69, 0x1f, 0xd2, 0xc2, 0xfe, 0x22, 0xc5, 0xec, 0xee, 0xf1, 0x8e, 0xb4, 0x02, |
|
| 9184 |
+ 0xdb, 0x68, 0xd1, 0x27, 0xce, 0xcd, 0xfc, 0x76, 0x76, 0x76, 0x66, 0x76, 0x66, 0x96, 0xe0, 0xf0, |
|
| 9185 |
+ 0x24, 0x3d, 0x4a, 0x04, 0x97, 0x9c, 0xd8, 0xc9, 0x68, 0xe7, 0xde, 0x24, 0x94, 0x97, 0xf3, 0xd1, |
|
| 9186 |
+ 0x91, 0xcf, 0xa3, 0xe3, 0x09, 0x9f, 0xf0, 0x63, 0x25, 0x1a, 0xcd, 0xc7, 0xea, 0x4b, 0x7d, 0x28, |
|
| 9187 |
+ 0x4a, 0x2f, 0x71, 0xff, 0x64, 0x83, 0x3d, 0x48, 0xc8, 0xfb, 0x50, 0x0d, 0xe3, 0x64, 0x2e, 0xd3, |
|
| 9188 |
+ 0x96, 0xb5, 0x5f, 0x3a, 0x68, 0x9c, 0x38, 0x47, 0xc9, 0xe8, 0xa8, 0x8f, 0x1c, 0x6a, 0x04, 0x64, |
|
| 9189 |
+ 0x1f, 0xca, 0xec, 0x8a, 0xf9, 0x2d, 0x7b, 0xdf, 0x3a, 0x68, 0x9c, 0x00, 0x02, 0x7a, 0x57, 0xcc, |
|
| 9190 |
+ 0x1f, 0x24, 0x67, 0x1b, 0x54, 0x49, 0xc8, 0x47, 0x50, 0x4d, 0xf9, 0x5c, 0xf8, 0xac, 0x55, 0x52, |
|
| 9191 |
+ 0x98, 0x4d, 0xc4, 0x0c, 0x15, 0x47, 0xa1, 0x8c, 0x14, 0x35, 0x8d, 0xc3, 0x19, 0x6b, 0x95, 0x73, |
|
| 9192 |
+ 0x4d, 0xf7, 0xc3, 0x99, 0xc6, 0x28, 0x09, 0xf9, 0x00, 0x2a, 0xa3, 0x79, 0x38, 0x0b, 0x5a, 0x15, |
|
| 9193 |
+ 0x05, 0x69, 0x20, 0xa4, 0x8d, 0x0c, 0x85, 0xd1, 0x32, 0x72, 0x00, 0xf5, 0x64, 0xe6, 0xc9, 0x31, |
|
| 9194 |
+ 0x17, 0x51, 0x0b, 0xf2, 0x0d, 0x2f, 0x0c, 0x8f, 0x2e, 0xa5, 0xe4, 0x53, 0x68, 0xf8, 0x3c, 0x4e, |
|
| 9195 |
+ 0xa5, 0xf0, 0xc2, 0x58, 0xa6, 0xad, 0x86, 0x02, 0xbf, 0x8b, 0xe0, 0xaf, 0xb9, 0x98, 0x32, 0xd1, |
|
| 9196 |
+ 0xc9, 0x85, 0xb4, 0x88, 0x6c, 0x97, 0xc1, 0xe6, 0x89, 0xfb, 0x7b, 0x0b, 0xea, 0x99, 0x56, 0xe2, |
|
| 9197 |
+ 0xc2, 0xe6, 0xa9, 0xf0, 0x2f, 0x43, 0xc9, 0x7c, 0x39, 0x17, 0xac, 0x65, 0xed, 0x5b, 0x07, 0x0e, |
|
| 9198 |
+ 0x5d, 0xe1, 0x91, 0x6d, 0xb0, 0x07, 0x43, 0xe5, 0x28, 0x87, 0xda, 0x83, 0x21, 0x69, 0x41, 0xed, |
|
| 9199 |
+ 0xb1, 0x27, 0x42, 0x2f, 0x96, 0xca, 0x33, 0x0e, 0xcd, 0x3e, 0xc9, 0x2d, 0x70, 0x06, 0xc3, 0xc7, |
|
| 9200 |
+ 0x4c, 0xa4, 0x21, 0x8f, 0x95, 0x3f, 0x1c, 0x9a, 0x33, 0xc8, 0x2e, 0xc0, 0x60, 0x78, 0x9f, 0x79, |
|
| 9201 |
+ 0xa8, 0x34, 0x6d, 0x55, 0xf6, 0x4b, 0x07, 0x0e, 0x2d, 0x70, 0xdc, 0xdf, 0x42, 0x45, 0xc5, 0x88, |
|
| 9202 |
+ 0x7c, 0x01, 0xd5, 0x20, 0x9c, 0xb0, 0x54, 0x6a, 0x73, 0xda, 0x27, 0xdf, 0xfe, 0xb0, 0xb7, 0xf1, |
|
| 9203 |
+ 0xcf, 0x1f, 0xf6, 0x0e, 0x0b, 0xc9, 0xc0, 0x13, 0x16, 0xfb, 0x3c, 0x96, 0x5e, 0x18, 0x33, 0x91, |
|
| 9204 |
+ 0x1e, 0x4f, 0xf8, 0x3d, 0xbd, 0xe4, 0xa8, 0xab, 0x7e, 0xa8, 0xd1, 0x40, 0x6e, 0x43, 0x25, 0x8c, |
|
| 9205 |
+ 0x03, 0x76, 0xa5, 0xec, 0x2f, 0xb5, 0xdf, 0x31, 0xaa, 0x1a, 0x83, 0xb9, 0x4c, 0xe6, 0xb2, 0x8f, |
|
| 9206 |
+ 0x22, 0xaa, 0x11, 0xee, 0x1f, 0x2d, 0xa8, 0xea, 0x1c, 0x20, 0xb7, 0xa0, 0x1c, 0x31, 0xe9, 0xa9, |
|
| 9207 |
+ 0xfd, 0x1b, 0x27, 0x75, 0xf4, 0xed, 0x03, 0x26, 0x3d, 0xaa, 0xb8, 0x98, 0x5e, 0x11, 0x9f, 0xa3, |
|
| 9208 |
+ 0xef, 0xed, 0x3c, 0xbd, 0x1e, 0x20, 0x87, 0x1a, 0x01, 0xf9, 0x25, 0xd4, 0x62, 0x26, 0x9f, 0x71, |
|
| 9209 |
+ 0x31, 0x55, 0x3e, 0xda, 0xd6, 0x41, 0x3f, 0x67, 0xf2, 0x01, 0x0f, 0x18, 0xcd, 0x64, 0xe4, 0x2e, |
|
| 9210 |
+ 0xd4, 0x53, 0xe6, 0xcf, 0x45, 0x28, 0x17, 0xca, 0x5f, 0xdb, 0x27, 0x4d, 0x95, 0x65, 0x86, 0xa7, |
|
| 9211 |
+ 0xc0, 0x4b, 0x84, 0xfb, 0x17, 0x0b, 0xca, 0x68, 0x06, 0x21, 0x50, 0xf6, 0xc4, 0x44, 0x67, 0xb7, |
|
| 9212 |
+ 0x43, 0x15, 0x4d, 0x9a, 0x50, 0x62, 0xf1, 0x53, 0x65, 0x91, 0x43, 0x91, 0x44, 0x8e, 0xff, 0x2c, |
|
| 9213 |
+ 0x30, 0x31, 0x42, 0x12, 0xd7, 0xcd, 0x53, 0x26, 0x4c, 0x68, 0x14, 0x4d, 0x6e, 0x83, 0x93, 0x08, |
|
| 9214 |
+ 0x7e, 0xb5, 0x78, 0x82, 0xab, 0x2b, 0x85, 0xc4, 0x43, 0x66, 0x2f, 0x7e, 0x4a, 0xeb, 0x89, 0xa1, |
|
| 9215 |
+ 0xc8, 0x21, 0x00, 0xbb, 0x92, 0xc2, 0x3b, 0xe3, 0xa9, 0x4c, 0x5b, 0x55, 0x75, 0x76, 0x95, 0xef, |
|
| 9216 |
+ 0xc8, 0xe8, 0x5f, 0xd0, 0x82, 0xd4, 0xfd, 0x9b, 0x0d, 0x15, 0xe5, 0x12, 0x72, 0x80, 0x11, 0x48, |
|
| 9217 |
+ 0xe6, 0x3a, 0x98, 0xa5, 0x36, 0x31, 0x11, 0x00, 0x15, 0xeb, 0x65, 0x00, 0x30, 0xee, 0x3b, 0xe8, |
|
| 9218 |
+ 0x8d, 0x19, 0xf3, 0x25, 0x17, 0x26, 0xdd, 0x96, 0xdf, 0x68, 0x7a, 0x80, 0x19, 0xa1, 0x4f, 0xa3, |
|
| 9219 |
+ 0x68, 0x72, 0x07, 0xaa, 0x5c, 0x85, 0x51, 0x1d, 0xe8, 0x47, 0x82, 0x6b, 0x20, 0xa8, 0x5c, 0x30, |
|
| 9220 |
+ 0x2f, 0xe0, 0xf1, 0x6c, 0xa1, 0x8e, 0x59, 0xa7, 0xcb, 0x6f, 0x72, 0x07, 0x1c, 0x15, 0xb7, 0x87, |
|
| 9221 |
+ 0x8b, 0x84, 0xb5, 0xaa, 0x2a, 0x0e, 0x5b, 0xcb, 0x98, 0x22, 0x93, 0xe6, 0x72, 0xbc, 0xa8, 0xbe, |
|
| 9222 |
+ 0xe7, 0x5f, 0xb2, 0x41, 0x22, 0x5b, 0x37, 0x73, 0x7f, 0x75, 0x0c, 0x8f, 0x2e, 0xa5, 0xa8, 0x36, |
|
| 9223 |
+ 0x65, 0xbe, 0x60, 0x12, 0xa1, 0xef, 0x2a, 0xe8, 0x96, 0x09, 0xaf, 0x66, 0xd2, 0x5c, 0x4e, 0x5c, |
|
| 9224 |
+ 0xa8, 0x0e, 0x87, 0x67, 0x88, 0x7c, 0x2f, 0x2f, 0x24, 0x9a, 0x43, 0x8d, 0xc4, 0xed, 0x43, 0x3d, |
|
| 9225 |
+ 0xdb, 0x06, 0x6f, 0x65, 0xbf, 0x6b, 0xee, 0xab, 0xdd, 0xef, 0x92, 0x7b, 0x50, 0x4b, 0x2f, 0x3d, |
|
| 9226 |
+ 0x11, 0xc6, 0x13, 0xe5, 0xbb, 0xed, 0x93, 0x77, 0x96, 0x56, 0x0d, 0x35, 0x1f, 0x35, 0x65, 0x18, |
|
| 9227 |
+ 0x97, 0x83, 0xb3, 0x34, 0xe3, 0x15, 0x5d, 0x4d, 0x28, 0xcd, 0xc3, 0x40, 0xe9, 0xd9, 0xa2, 0x48, |
|
| 9228 |
+ 0x22, 0x67, 0x12, 0xea, 0x5c, 0xda, 0xa2, 0x48, 0x62, 0x40, 0x22, 0x1e, 0xe8, 0xb2, 0xb7, 0x45, |
|
| 9229 |
+ 0x15, 0x8d, 0x3e, 0xe6, 0x89, 0x0c, 0x79, 0xec, 0xcd, 0x32, 0x1f, 0x67, 0xdf, 0xee, 0x2c, 0x3b, |
|
| 9230 |
+ 0xdf, 0xff, 0x65, 0xb7, 0xdf, 0x59, 0x50, 0xcf, 0x6a, 0x35, 0x16, 0x9e, 0x30, 0x60, 0xb1, 0x0c, |
|
| 9231 |
+ 0xc7, 0x21, 0x13, 0x66, 0xe3, 0x02, 0x87, 0xdc, 0x83, 0x8a, 0x27, 0xa5, 0xc8, 0xae, 0xf3, 0x4f, |
|
| 9232 |
+ 0x8b, 0x85, 0xfe, 0xe8, 0x14, 0x25, 0xbd, 0x58, 0x8a, 0x05, 0xd5, 0xa8, 0x9d, 0xcf, 0x00, 0x72, |
|
| 9233 |
+ 0x26, 0xda, 0x3a, 0x65, 0x0b, 0xa3, 0x15, 0x49, 0x72, 0x13, 0x2a, 0x4f, 0xbd, 0xd9, 0x9c, 0x99, |
|
| 9234 |
+ 0x1c, 0xd6, 0x1f, 0x9f, 0xdb, 0x9f, 0x59, 0xee, 0x5f, 0x6d, 0xa8, 0x99, 0xc2, 0x4f, 0xee, 0x42, |
|
| 9235 |
+ 0x4d, 0x15, 0x7e, 0x63, 0xd1, 0xf5, 0x17, 0x23, 0x83, 0x90, 0xe3, 0x65, 0x47, 0x2b, 0xd8, 0x68, |
|
| 9236 |
+ 0x54, 0xe9, 0xce, 0x66, 0x6c, 0xcc, 0xfb, 0x5b, 0x29, 0x60, 0x63, 0xd3, 0xba, 0xb6, 0x11, 0xdd, |
|
| 9237 |
+ 0x65, 0xe3, 0x30, 0x0e, 0xd1, 0x3f, 0x14, 0x45, 0xe4, 0x6e, 0x76, 0xea, 0xb2, 0xd2, 0xf8, 0x5e, |
|
| 9238 |
+ 0x51, 0xe3, 0xab, 0x87, 0xee, 0x43, 0xa3, 0xb0, 0xcd, 0x35, 0xa7, 0xfe, 0xb0, 0x78, 0x6a, 0xb3, |
|
| 9239 |
+ 0xa5, 0x52, 0xa7, 0xfb, 0x6e, 0xee, 0x85, 0xff, 0xc2, 0x7f, 0x9f, 0x00, 0xe4, 0x2a, 0xdf, 0xbc, |
|
| 9240 |
+ 0xb0, 0xb8, 0xcf, 0x4b, 0x00, 0x83, 0x04, 0x4b, 0x67, 0xe0, 0xa9, 0xfa, 0xbd, 0x19, 0x4e, 0x62, |
|
| 9241 |
+ 0x2e, 0xd8, 0x13, 0x75, 0x55, 0xd5, 0xfa, 0x3a, 0x6d, 0x68, 0x9e, 0xba, 0x31, 0xe4, 0x14, 0x1a, |
|
| 9242 |
+ 0x01, 0x4b, 0x7d, 0x11, 0xaa, 0x84, 0x32, 0x4e, 0xdf, 0xc3, 0x33, 0xe5, 0x7a, 0x8e, 0xba, 0x39, |
|
| 9243 |
+ 0x42, 0xfb, 0xaa, 0xb8, 0x86, 0x9c, 0xc0, 0x26, 0xbb, 0x4a, 0xb8, 0x90, 0x66, 0x17, 0x3d, 0x1f, |
|
| 9244 |
+ 0xdc, 0xd0, 0x93, 0x06, 0xf2, 0xd5, 0x4e, 0xb4, 0xc1, 0xf2, 0x0f, 0xe2, 0x41, 0xd9, 0xf7, 0x12, |
|
| 9245 |
+ 0xdd, 0x1c, 0x1b, 0x27, 0xad, 0xb5, 0xfd, 0x3a, 0x5e, 0xa2, 0x9d, 0xd6, 0xfe, 0x18, 0xcf, 0xfa, |
|
| 9246 |
+ 0xfc, 0x5f, 0x7b, 0x77, 0x0a, 0x1d, 0x31, 0xe2, 0xa3, 0xc5, 0xb1, 0xca, 0x97, 0x69, 0x28, 0x8f, |
|
| 9247 |
+ 0xe7, 0x32, 0x9c, 0x1d, 0x7b, 0x49, 0x88, 0xea, 0x70, 0x61, 0xbf, 0x4b, 0x95, 0xea, 0x9d, 0x5f, |
|
| 9248 |
+ 0x41, 0x73, 0xdd, 0xee, 0xb7, 0x89, 0xc1, 0xce, 0xa7, 0xe0, 0x2c, 0xed, 0x78, 0xdd, 0xc2, 0x7a, |
|
| 9249 |
+ 0x31, 0x78, 0x1f, 0x40, 0xa3, 0x70, 0x6e, 0x04, 0x3e, 0x56, 0x40, 0xed, 0x7d, 0xfd, 0xe1, 0x3e, |
|
| 9250 |
+ 0xc7, 0xe1, 0x24, 0xeb, 0x37, 0xbf, 0x00, 0xb8, 0x94, 0x32, 0x79, 0xa2, 0x1a, 0x90, 0xd9, 0xc4, |
|
| 9251 |
+ 0x41, 0x8e, 0x42, 0x90, 0x3d, 0x68, 0xe0, 0x47, 0x6a, 0xe4, 0xda, 0x52, 0xb5, 0x22, 0xd5, 0x80, |
|
| 9252 |
+ 0x9f, 0x83, 0x33, 0x5e, 0x2e, 0xd7, 0x8d, 0xa3, 0x3e, 0xce, 0x56, 0xff, 0x0c, 0xea, 0x31, 0x37, |
|
| 9253 |
+ 0x32, 0xdd, 0x0f, 0x6b, 0x31, 0x57, 0x22, 0xf7, 0x0e, 0xfc, 0xe4, 0x95, 0x49, 0x8a, 0xbc, 0x07, |
|
| 9254 |
+ 0xd5, 0x71, 0x38, 0x93, 0xea, 0xba, 0x62, 0x8b, 0x35, 0x5f, 0xee, 0x3f, 0x2c, 0x80, 0xfc, 0x6a, |
|
| 9255 |
+ 0xa1, 0x47, 0xf0, 0xde, 0x21, 0x66, 0x53, 0xdf, 0xb3, 0x19, 0xd4, 0x23, 0x13, 0x41, 0x93, 0x47, |
|
| 9256 |
+ 0xb7, 0x56, 0xaf, 0xe3, 0x51, 0x16, 0x60, 0x1d, 0xdb, 0x13, 0x13, 0xdb, 0xb7, 0x99, 0x76, 0x96, |
|
| 9257 |
+ 0x3b, 0xec, 0x7c, 0x09, 0x5b, 0x2b, 0xea, 0xde, 0xf0, 0xa6, 0xe6, 0x59, 0x56, 0x0c, 0xd9, 0x5d, |
|
| 9258 |
+ 0xa8, 0xea, 0xd6, 0x8e, 0xf5, 0x17, 0x29, 0xa3, 0x46, 0xd1, 0xaa, 0x8e, 0x5f, 0x64, 0x73, 0x61, |
|
| 9259 |
+ 0xff, 0xc2, 0x3d, 0x81, 0xaa, 0x1e, 0x7c, 0xc9, 0x01, 0xd4, 0x3c, 0x1f, 0x8f, 0x96, 0x95, 0xab, |
|
| 9260 |
+ 0xed, 0x6c, 0x2a, 0x3e, 0x55, 0x6c, 0x9a, 0x89, 0xdd, 0xbf, 0xdb, 0x00, 0x39, 0xff, 0x2d, 0x66, |
|
| 9261 |
+ 0x85, 0xcf, 0x61, 0x3b, 0x65, 0x3e, 0x8f, 0x03, 0x4f, 0x2c, 0x94, 0xd4, 0x0c, 0x78, 0xd7, 0x2d, |
|
| 9262 |
+ 0x59, 0x43, 0x16, 0xe6, 0x86, 0xd2, 0xeb, 0xe7, 0x86, 0x03, 0x28, 0xfb, 0x3c, 0x59, 0x98, 0xeb, |
|
| 9263 |
+ 0x4b, 0x56, 0x0f, 0xd2, 0xe1, 0xc9, 0x02, 0xc7, 0x7c, 0x44, 0x90, 0x23, 0xa8, 0x46, 0x53, 0xf5, |
|
| 9264 |
+ 0x14, 0xd0, 0x63, 0xd4, 0xcd, 0x55, 0xec, 0x83, 0x29, 0xd2, 0xf8, 0x70, 0xd0, 0x28, 0x72, 0x07, |
|
| 9265 |
+ 0x2a, 0xd1, 0x34, 0x08, 0x85, 0x9a, 0x38, 0x1a, 0xba, 0x5f, 0x17, 0xe1, 0xdd, 0x50, 0xe0, 0xf3, |
|
| 9266 |
+ 0x40, 0x61, 0x88, 0x0b, 0xb6, 0x88, 0x5a, 0x35, 0x85, 0x6c, 0xae, 0x79, 0x33, 0x3a, 0xdb, 0xa0, |
|
| 9267 |
+ 0xb6, 0x88, 0xda, 0x75, 0xa8, 0x6a, 0xbf, 0xba, 0x7f, 0x2e, 0xc1, 0xf6, 0xaa, 0x95, 0x98, 0x07, |
|
| 9268 |
+ 0xa9, 0xf0, 0xb3, 0x3c, 0x48, 0x85, 0xbf, 0x1c, 0xa9, 0xec, 0xc2, 0x48, 0xe5, 0x42, 0x85, 0x3f, |
|
| 9269 |
+ 0x8b, 0x99, 0x28, 0xbe, 0x79, 0x3a, 0x97, 0xfc, 0x59, 0x8c, 0xc3, 0x83, 0x16, 0xad, 0xf4, 0xe2, |
|
| 9270 |
+ 0x8a, 0xe9, 0xc5, 0x1f, 0xc2, 0xd6, 0x98, 0xcf, 0x66, 0xfc, 0xd9, 0x70, 0x11, 0xcd, 0xc2, 0x78, |
|
| 9271 |
+ 0x6a, 0x1a, 0xf2, 0x2a, 0x93, 0x1c, 0xc0, 0x8d, 0x20, 0x14, 0x68, 0x4e, 0x87, 0xc7, 0x92, 0xc5, |
|
| 9272 |
+ 0x6a, 0x8a, 0x44, 0xdc, 0x3a, 0x9b, 0x7c, 0x01, 0xfb, 0x9e, 0x94, 0x2c, 0x4a, 0xe4, 0xa3, 0x38, |
|
| 9273 |
+ 0xf1, 0xfc, 0x69, 0x97, 0xfb, 0xea, 0x3e, 0x46, 0x89, 0x27, 0xc3, 0x51, 0x38, 0xc3, 0x81, 0xb9, |
|
| 9274 |
+ 0xa6, 0x96, 0xbe, 0x16, 0x47, 0x3e, 0x82, 0x6d, 0x5f, 0x30, 0x4f, 0xb2, 0x2e, 0x4b, 0xe5, 0x85, |
|
| 9275 |
+ 0x27, 0x2f, 0x5b, 0x75, 0xb5, 0x72, 0x8d, 0x8b, 0x67, 0xf0, 0xd0, 0xda, 0xaf, 0xc3, 0x59, 0xe0, |
|
| 9276 |
+ 0x7b, 0x22, 0x68, 0x39, 0xfa, 0x0c, 0x2b, 0x4c, 0x72, 0x04, 0x44, 0x31, 0x7a, 0x51, 0x22, 0x17, |
|
| 9277 |
+ 0x4b, 0x28, 0x28, 0xe8, 0x35, 0x12, 0x7c, 0x13, 0xc9, 0x30, 0x62, 0xa9, 0xf4, 0xa2, 0x44, 0xbd, |
|
| 9278 |
+ 0xd5, 0x4a, 0x34, 0x67, 0xb8, 0xdf, 0x58, 0xd0, 0x5c, 0x4f, 0x11, 0x74, 0x70, 0x82, 0x66, 0x9a, |
|
| 9279 |
+ 0xcb, 0x86, 0xf4, 0xd2, 0xe9, 0x76, 0xc1, 0xe9, 0x18, 0x40, 0xac, 0x2a, 0x18, 0xab, 0x4d, 0xaa, |
|
| 9280 |
+ 0xe8, 0x3c, 0x80, 0xe5, 0x1f, 0x0f, 0xe0, 0x8a, 0x49, 0x95, 0x75, 0x93, 0xfe, 0x60, 0xc1, 0x8d, |
|
| 9281 |
+ 0xb5, 0x34, 0x7c, 0x63, 0x8b, 0xf6, 0xa1, 0x11, 0x79, 0x53, 0x76, 0xe1, 0x09, 0x15, 0xdc, 0x92, |
|
| 9282 |
+ 0x6e, 0xac, 0x05, 0xd6, 0xff, 0xc0, 0xbe, 0x18, 0x36, 0x8b, 0xb9, 0x7f, 0xad, 0x6d, 0x59, 0x28, |
|
| 9283 |
+ 0xcf, 0xb9, 0xbc, 0xcf, 0xe7, 0x71, 0x60, 0xba, 0xd1, 0x2a, 0xf3, 0xd5, 0x80, 0x97, 0xae, 0x09, |
|
| 9284 |
+ 0xb8, 0x7b, 0x0e, 0xf5, 0xcc, 0x40, 0xb2, 0x67, 0x1e, 0x50, 0x56, 0xfe, 0x90, 0x7f, 0x94, 0x32, |
|
| 9285 |
+ 0x81, 0xb6, 0xeb, 0xd7, 0xd4, 0xfb, 0x50, 0x99, 0x08, 0x3e, 0x4f, 0x4c, 0x6d, 0x5d, 0x41, 0x68, |
|
| 9286 |
+ 0x89, 0x3b, 0x84, 0x9a, 0xe1, 0x90, 0x43, 0xa8, 0x8e, 0x16, 0xe7, 0x5e, 0xc4, 0x8c, 0x42, 0x75, |
|
| 9287 |
+ 0xb1, 0xf1, 0x3b, 0x30, 0x08, 0xac, 0x16, 0x1a, 0x41, 0x6e, 0x42, 0x79, 0xb4, 0xe8, 0x77, 0xf5, |
|
| 9288 |
+ 0x98, 0x8c, 0x35, 0x07, 0xbf, 0xda, 0x55, 0x6d, 0x90, 0xfb, 0x15, 0x6c, 0x16, 0xd7, 0xa1, 0x53, |
|
| 9289 |
+ 0xe2, 0x4c, 0xaf, 0x43, 0x15, 0x9d, 0x17, 0x57, 0xfb, 0x35, 0xc5, 0xf5, 0xf0, 0x00, 0x6a, 0xe6, |
|
| 9290 |
+ 0xa9, 0x4a, 0x1c, 0xa8, 0x3c, 0x3a, 0x1f, 0xf6, 0x1e, 0x36, 0x37, 0x48, 0x1d, 0xca, 0x67, 0x83, |
|
| 9291 |
+ 0xe1, 0xc3, 0xa6, 0x85, 0xd4, 0xf9, 0xe0, 0xbc, 0xd7, 0xb4, 0x0f, 0x6f, 0xc3, 0x66, 0xf1, 0xb1, |
|
| 9292 |
+ 0x4a, 0x1a, 0x50, 0x1b, 0x9e, 0x9e, 0x77, 0xdb, 0x83, 0xdf, 0x34, 0x37, 0xc8, 0x26, 0xd4, 0xfb, |
|
| 9293 |
+ 0xe7, 0xc3, 0x5e, 0xe7, 0x11, 0xed, 0x35, 0xad, 0xc3, 0x5f, 0x83, 0xb3, 0x7c, 0x4f, 0xa1, 0x86, |
|
| 9294 |
+ 0x76, 0xff, 0xbc, 0xdb, 0xdc, 0x20, 0x00, 0xd5, 0x61, 0xaf, 0x43, 0x7b, 0xa8, 0xb7, 0x06, 0xa5, |
|
| 9295 |
+ 0xe1, 0xf0, 0xac, 0x69, 0xe3, 0xae, 0x9d, 0xd3, 0xce, 0x59, 0xaf, 0x59, 0x42, 0xf2, 0xe1, 0x83, |
|
| 9296 |
+ 0x8b, 0xfb, 0xc3, 0x66, 0xf9, 0xf0, 0x13, 0xb8, 0xb1, 0xf6, 0x9e, 0x51, 0xab, 0xcf, 0x4e, 0x69, |
|
| 9297 |
+ 0x0f, 0x35, 0x35, 0xa0, 0x76, 0x41, 0xfb, 0x8f, 0x4f, 0x1f, 0xf6, 0x9a, 0x16, 0x0a, 0xbe, 0x1a, |
|
| 9298 |
+ 0x74, 0xbe, 0xec, 0x75, 0x9b, 0x76, 0xfb, 0xd6, 0xb7, 0x2f, 0x76, 0xad, 0xef, 0x5e, 0xec, 0x5a, |
|
| 9299 |
+ 0xdf, 0xbf, 0xd8, 0xb5, 0xfe, 0xfd, 0x62, 0xd7, 0xfa, 0xe6, 0xe5, 0xee, 0xc6, 0x77, 0x2f, 0x77, |
|
| 9300 |
+ 0x37, 0xbe, 0x7f, 0xb9, 0xbb, 0x31, 0xaa, 0xaa, 0xbf, 0x8e, 0x3e, 0xfe, 0x4f, 0x00, 0x00, 0x00, |
|
| 9301 |
+ 0xff, 0xff, 0x87, 0x95, 0x80, 0x20, 0x7a, 0x12, 0x00, 0x00, |
|
| 9299 | 9302 |
} |
| ... | ... |
@@ -44,6 +44,7 @@ message ExecOp {
|
| 44 | 44 |
Meta meta = 1; |
| 45 | 45 |
repeated Mount mounts = 2; |
| 46 | 46 |
NetMode network = 3; |
| 47 |
+ SecurityMode security = 4; |
|
| 47 | 48 |
} |
| 48 | 49 |
|
| 49 | 50 |
// Meta is a set of arguments for ExecOp. |
| ... | ... |
@@ -64,6 +65,11 @@ enum NetMode {
|
| 64 | 64 |
NONE = 2; |
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 |
+enum SecurityMode {
|
|
| 68 |
+ SANDBOX = 0; |
|
| 69 |
+ INSECURE = 1; // privileged mode |
|
| 70 |
+} |
|
| 71 |
+ |
|
| 67 | 72 |
// Mount specifies how to mount an input Op as a filesystem. |
| 68 | 73 |
message Mount {
|
| 69 | 74 |
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; |
| ... | ... |
@@ -335,6 +335,16 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRe |
| 335 | 335 |
return nil, errors.Wrapf(err, "failed to update submodules for %s", gs.src.Remote) |
| 336 | 336 |
} |
| 337 | 337 |
|
| 338 |
+ if idmap := mount.IdentityMapping(); idmap != nil {
|
|
| 339 |
+ u := idmap.RootPair() |
|
| 340 |
+ err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error {
|
|
| 341 |
+ return os.Lchown(p, u.UID, u.GID) |
|
| 342 |
+ }) |
|
| 343 |
+ if err != nil {
|
|
| 344 |
+ return nil, errors.Wrap(err, "failed to remap git checkout") |
|
| 345 |
+ } |
|
| 346 |
+ } |
|
| 347 |
+ |
|
| 338 | 348 |
lm.Unmount() |
| 339 | 349 |
lm = nil |
| 340 | 350 |
|
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"strings" |
| 16 | 16 |
"time" |
| 17 | 17 |
|
| 18 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 18 | 19 |
"github.com/docker/docker/pkg/locker" |
| 19 | 20 |
"github.com/moby/buildkit/cache" |
| 20 | 21 |
"github.com/moby/buildkit/cache/metadata" |
| ... | ... |
@@ -278,8 +279,22 @@ func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref |
| 278 | 278 |
} |
| 279 | 279 |
f = nil |
| 280 | 280 |
|
| 281 |
- if hs.src.UID != 0 || hs.src.GID != 0 {
|
|
| 282 |
- if err := os.Chown(fp, hs.src.UID, hs.src.GID); err != nil {
|
|
| 281 |
+ uid := hs.src.UID |
|
| 282 |
+ gid := hs.src.GID |
|
| 283 |
+ if idmap := mount.IdentityMapping(); idmap != nil {
|
|
| 284 |
+ identity, err := idmap.ToHost(idtools.Identity{
|
|
| 285 |
+ UID: int(uid), |
|
| 286 |
+ GID: int(gid), |
|
| 287 |
+ }) |
|
| 288 |
+ if err != nil {
|
|
| 289 |
+ return nil, "", err |
|
| 290 |
+ } |
|
| 291 |
+ uid = identity.UID |
|
| 292 |
+ gid = identity.GID |
|
| 293 |
+ } |
|
| 294 |
+ |
|
| 295 |
+ if gid != 0 || uid != 0 {
|
|
| 296 |
+ if err := os.Chown(fp, uid, gid); err != nil {
|
|
| 283 | 297 |
return nil, "", err |
| 284 | 298 |
} |
| 285 | 299 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"time" |
| 8 | 8 |
|
| 9 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 9 | 10 |
"github.com/moby/buildkit/cache" |
| 10 | 11 |
"github.com/moby/buildkit/cache/contenthash" |
| 11 | 12 |
"github.com/moby/buildkit/cache/metadata" |
| ... | ... |
@@ -19,6 +20,7 @@ import ( |
| 19 | 19 |
"github.com/pkg/errors" |
| 20 | 20 |
"github.com/sirupsen/logrus" |
| 21 | 21 |
"github.com/tonistiigi/fsutil" |
| 22 |
+ fstypes "github.com/tonistiigi/fsutil/types" |
|
| 22 | 23 |
bolt "go.etcd.io/bbolt" |
| 23 | 24 |
"golang.org/x/time/rate" |
| 24 | 25 |
"google.golang.org/grpc/codes" |
| ... | ... |
@@ -153,7 +155,7 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.Immutable |
| 153 | 153 |
} |
| 154 | 154 |
}() |
| 155 | 155 |
|
| 156 |
- cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata()) |
|
| 156 |
+ cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata(), mount.IdentityMapping()) |
|
| 157 | 157 |
if err != nil {
|
| 158 | 158 |
return nil, err |
| 159 | 159 |
} |
| ... | ... |
@@ -165,10 +167,25 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.Immutable |
| 165 | 165 |
FollowPaths: ls.src.FollowPaths, |
| 166 | 166 |
OverrideExcludes: false, |
| 167 | 167 |
DestDir: dest, |
| 168 |
- CacheUpdater: &cacheUpdater{cc},
|
|
| 168 |
+ CacheUpdater: &cacheUpdater{cc, mount.IdentityMapping()},
|
|
| 169 | 169 |
ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"), |
| 170 | 170 |
} |
| 171 | 171 |
|
| 172 |
+ if idmap := mount.IdentityMapping(); idmap != nil {
|
|
| 173 |
+ opt.Filter = func(p string, stat *fstypes.Stat) bool {
|
|
| 174 |
+ identity, err := idmap.ToHost(idtools.Identity{
|
|
| 175 |
+ UID: int(stat.Uid), |
|
| 176 |
+ GID: int(stat.Gid), |
|
| 177 |
+ }) |
|
| 178 |
+ if err != nil {
|
|
| 179 |
+ return false |
|
| 180 |
+ } |
|
| 181 |
+ stat.Uid = uint32(identity.UID) |
|
| 182 |
+ stat.Gid = uint32(identity.GID) |
|
| 183 |
+ return true |
|
| 184 |
+ } |
|
| 185 |
+ } |
|
| 186 |
+ |
|
| 172 | 187 |
if err := filesync.FSSync(ctx, caller, opt); err != nil {
|
| 173 | 188 |
if status.Code(err) == codes.NotFound {
|
| 174 | 189 |
return nil, errors.Errorf("local source %s not enabled from the client", ls.src.Name)
|
| ... | ... |
@@ -245,6 +262,7 @@ func newProgressHandler(ctx context.Context, id string) func(int, bool) {
|
| 245 | 245 |
|
| 246 | 246 |
type cacheUpdater struct {
|
| 247 | 247 |
contenthash.CacheContext |
| 248 |
+ idmap *idtools.IdentityMapping |
|
| 248 | 249 |
} |
| 249 | 250 |
|
| 250 | 251 |
func (cu *cacheUpdater) MarkSupported(bool) {
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"sync" |
| 6 | 6 |
|
| 7 | 7 |
"github.com/containerd/containerd/platforms" |
| 8 |
+ "github.com/sirupsen/logrus" |
|
| 8 | 9 |
) |
| 9 | 10 |
|
| 10 | 11 |
var once sync.Once |
| ... | ... |
@@ -14,19 +15,53 @@ func SupportedPlatforms() []string {
|
| 14 | 14 |
once.Do(func() {
|
| 15 | 15 |
def := platforms.DefaultString() |
| 16 | 16 |
arr = append(arr, def) |
| 17 |
- |
|
| 18 |
- if p := "linux/amd64"; def != p && amd64Supported() {
|
|
| 17 |
+ if p := "linux/amd64"; def != p && amd64Supported() == nil {
|
|
| 19 | 18 |
arr = append(arr, p) |
| 20 | 19 |
} |
| 21 |
- if p := "linux/arm64"; def != p && arm64Supported() {
|
|
| 20 |
+ if p := "linux/arm64"; def != p && arm64Supported() == nil {
|
|
| 22 | 21 |
arr = append(arr, p) |
| 23 | 22 |
} |
| 24 |
- if !strings.HasPrefix(def, "linux/arm/") && armSupported() {
|
|
| 23 |
+ if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil {
|
|
| 25 | 24 |
arr = append(arr, "linux/arm/v7", "linux/arm/v6") |
| 26 | 25 |
} else if def == "linux/arm/v7" {
|
| 27 | 26 |
arr = append(arr, "linux/arm/v6") |
| 28 | 27 |
} |
| 29 | 28 |
}) |
| 30 |
- |
|
| 31 | 29 |
return arr |
| 32 | 30 |
} |
| 31 |
+ |
|
| 32 |
+//WarnIfUnsupported validates the platforms and show warning message if there is, |
|
| 33 |
+//the end user could fix the issue based on those warning, and thus no need to drop |
|
| 34 |
+//the platform from the candidates. |
|
| 35 |
+func WarnIfUnsupported(pfs []string) {
|
|
| 36 |
+ def := platforms.DefaultString() |
|
| 37 |
+ for _, p := range pfs {
|
|
| 38 |
+ if p != def {
|
|
| 39 |
+ if p == "linux/amd64" {
|
|
| 40 |
+ if err := amd64Supported(); err != nil {
|
|
| 41 |
+ printPlatfromWarning(p, err) |
|
| 42 |
+ } |
|
| 43 |
+ } |
|
| 44 |
+ if p == "linux/arm64" {
|
|
| 45 |
+ if err := arm64Supported(); err != nil {
|
|
| 46 |
+ printPlatfromWarning(p, err) |
|
| 47 |
+ } |
|
| 48 |
+ } |
|
| 49 |
+ if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") {
|
|
| 50 |
+ if err := armSupported(); err != nil {
|
|
| 51 |
+ printPlatfromWarning(p, err) |
|
| 52 |
+ } |
|
| 53 |
+ } |
|
| 54 |
+ } |
|
| 55 |
+ } |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+func printPlatfromWarning(p string, err error) {
|
|
| 59 |
+ if strings.Contains(err.Error(), "exec format error") {
|
|
| 60 |
+ logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p)
|
|
| 61 |
+ } else if strings.Contains(err.Error(), "no such file or directory") {
|
|
| 62 |
+ logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'binfmt_misc'.", p)
|
|
| 63 |
+ } else {
|
|
| 64 |
+ logrus.Warnf("platforms %s cannot pass the validation: %s", p, err.Error())
|
|
| 65 |
+ } |
|
| 66 |
+} |
| ... | ... |
@@ -1,26 +1,19 @@ |
| 1 | 1 |
package entitlements |
| 2 | 2 |
|
| 3 |
-import "github.com/pkg/errors" |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/pkg/errors" |
|
| 5 |
+) |
|
| 4 | 6 |
|
| 5 | 7 |
type Entitlement string |
| 6 | 8 |
|
| 7 | 9 |
const ( |
| 8 |
- EntitlementSecurityConfined Entitlement = "security.confined" |
|
| 9 |
- EntitlementSecurityUnconfined Entitlement = "security.unconfined" // unimplemented |
|
| 10 |
- EntitlementNetworkHost Entitlement = "network.host" |
|
| 11 |
- EntitlementNetworkNone Entitlement = "network.none" |
|
| 10 |
+ EntitlementSecurityInsecure Entitlement = "security.insecure" |
|
| 11 |
+ EntitlementNetworkHost Entitlement = "network.host" |
|
| 12 | 12 |
) |
| 13 | 13 |
|
| 14 | 14 |
var all = map[Entitlement]struct{}{
|
| 15 |
- EntitlementSecurityConfined: {},
|
|
| 16 |
- EntitlementSecurityUnconfined: {},
|
|
| 17 |
- EntitlementNetworkHost: {},
|
|
| 18 |
- EntitlementNetworkNone: {},
|
|
| 19 |
-} |
|
| 20 |
- |
|
| 21 |
-var defaults = map[Entitlement]struct{}{
|
|
| 22 |
- EntitlementSecurityConfined: {},
|
|
| 23 |
- EntitlementNetworkNone: {},
|
|
| 15 |
+ EntitlementSecurityInsecure: {},
|
|
| 16 |
+ EntitlementNetworkHost: {},
|
|
| 24 | 17 |
} |
| 25 | 18 |
|
| 26 | 19 |
func Parse(s string) (Entitlement, error) {
|
| ... | ... |
@@ -56,9 +49,6 @@ func WhiteList(allowed, supported []Entitlement) (Set, error) {
|
| 56 | 56 |
m[e] = struct{}{}
|
| 57 | 57 |
} |
| 58 | 58 |
|
| 59 |
- for e := range defaults {
|
|
| 60 |
- m[e] = struct{}{}
|
|
| 61 |
- } |
|
| 62 | 59 |
return Set(m), nil |
| 63 | 60 |
} |
| 64 | 61 |
|
| 65 | 62 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,67 @@ |
| 0 |
+package entitlements |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "context" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/containerd/containerd/containers" |
|
| 6 |
+ "github.com/containerd/containerd/oci" |
|
| 7 |
+ specs "github.com/opencontainers/runtime-spec/specs-go" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// WithInsecureSpec sets spec with All capability. |
|
| 11 |
+func WithInsecureSpec() oci.SpecOpts {
|
|
| 12 |
+ return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
|
| 13 |
+ addCaps := []string{
|
|
| 14 |
+ "CAP_FSETID", |
|
| 15 |
+ "CAP_KILL", |
|
| 16 |
+ "CAP_FOWNER", |
|
| 17 |
+ "CAP_MKNOD", |
|
| 18 |
+ "CAP_CHOWN", |
|
| 19 |
+ "CAP_DAC_OVERRIDE", |
|
| 20 |
+ "CAP_NET_RAW", |
|
| 21 |
+ "CAP_SETGID", |
|
| 22 |
+ "CAP_SETUID", |
|
| 23 |
+ "CAP_SETPCAP", |
|
| 24 |
+ "CAP_SETFCAP", |
|
| 25 |
+ "CAP_NET_BIND_SERVICE", |
|
| 26 |
+ "CAP_SYS_CHROOT", |
|
| 27 |
+ "CAP_AUDIT_WRITE", |
|
| 28 |
+ "CAP_MAC_ADMIN", |
|
| 29 |
+ "CAP_MAC_OVERRIDE", |
|
| 30 |
+ "CAP_DAC_READ_SEARCH", |
|
| 31 |
+ "CAP_SYS_PTRACE", |
|
| 32 |
+ "CAP_SYS_MODULE", |
|
| 33 |
+ "CAP_SYSLOG", |
|
| 34 |
+ "CAP_SYS_RAWIO", |
|
| 35 |
+ "CAP_SYS_ADMIN", |
|
| 36 |
+ "CAP_LINUX_IMMUTABLE", |
|
| 37 |
+ "CAP_SYS_BOOT", |
|
| 38 |
+ "CAP_SYS_NICE", |
|
| 39 |
+ "CAP_SYS_PACCT", |
|
| 40 |
+ "CAP_SYS_TTY_CONFIG", |
|
| 41 |
+ "CAP_SYS_TIME", |
|
| 42 |
+ "CAP_WAKE_ALARM", |
|
| 43 |
+ "CAP_AUDIT_READ", |
|
| 44 |
+ "CAP_AUDIT_CONTROL", |
|
| 45 |
+ "CAP_SYS_RESOURCE", |
|
| 46 |
+ "CAP_BLOCK_SUSPEND", |
|
| 47 |
+ "CAP_IPC_LOCK", |
|
| 48 |
+ "CAP_IPC_OWNER", |
|
| 49 |
+ "CAP_LEASE", |
|
| 50 |
+ "CAP_NET_ADMIN", |
|
| 51 |
+ "CAP_NET_BROADCAST", |
|
| 52 |
+ } |
|
| 53 |
+ for _, cap := range addCaps {
|
|
| 54 |
+ s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, cap) |
|
| 55 |
+ s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, cap) |
|
| 56 |
+ s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, cap) |
|
| 57 |
+ s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, cap) |
|
| 58 |
+ s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, cap) |
|
| 59 |
+ } |
|
| 60 |
+ s.Linux.ReadonlyPaths = []string{}
|
|
| 61 |
+ s.Linux.MaskedPaths = []string{}
|
|
| 62 |
+ s.Process.ApparmorProfile = "" |
|
| 63 |
+ |
|
| 64 |
+ return nil |
|
| 65 |
+ } |
|
| 66 |
+} |
| ... | ... |
@@ -3,9 +3,11 @@ package fsutil |
| 3 | 3 |
import ( |
| 4 | 4 |
"context" |
| 5 | 5 |
"io" |
| 6 |
+ "io/ioutil" |
|
| 6 | 7 |
"os" |
| 7 | 8 |
"path" |
| 8 | 9 |
"path/filepath" |
| 10 |
+ "sort" |
|
| 9 | 11 |
"strings" |
| 10 | 12 |
|
| 11 | 13 |
"github.com/pkg/errors" |
| ... | ... |
@@ -37,36 +39,80 @@ func (fs *fs) Open(p string) (io.ReadCloser, error) {
|
| 37 | 37 |
return os.Open(filepath.Join(fs.root, p)) |
| 38 | 38 |
} |
| 39 | 39 |
|
| 40 |
-func SubDirFS(fs FS, stat types.Stat) FS {
|
|
| 41 |
- return &subDirFS{fs: fs, stat: stat}
|
|
| 40 |
+type Dir struct {
|
|
| 41 |
+ Stat types.Stat |
|
| 42 |
+ FS FS |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+func SubDirFS(dirs []Dir) (FS, error) {
|
|
| 46 |
+ sort.Slice(dirs, func(i, j int) bool {
|
|
| 47 |
+ return dirs[i].Stat.Path < dirs[j].Stat.Path |
|
| 48 |
+ }) |
|
| 49 |
+ m := map[string]Dir{}
|
|
| 50 |
+ for _, d := range dirs {
|
|
| 51 |
+ if path.Base(d.Stat.Path) != d.Stat.Path {
|
|
| 52 |
+ return nil, errors.Errorf("subdir %s must be single file", d.Stat.Path)
|
|
| 53 |
+ } |
|
| 54 |
+ if _, ok := m[d.Stat.Path]; ok {
|
|
| 55 |
+ return nil, errors.Errorf("invalid path %s", d.Stat.Path)
|
|
| 56 |
+ } |
|
| 57 |
+ m[d.Stat.Path] = d |
|
| 58 |
+ } |
|
| 59 |
+ return &subDirFS{m: m, dirs: dirs}, nil
|
|
| 42 | 60 |
} |
| 43 | 61 |
|
| 44 | 62 |
type subDirFS struct {
|
| 45 |
- fs FS |
|
| 46 |
- stat types.Stat |
|
| 63 |
+ m map[string]Dir |
|
| 64 |
+ dirs []Dir |
|
| 47 | 65 |
} |
| 48 | 66 |
|
| 49 | 67 |
func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error {
|
| 50 |
- main := &StatInfo{Stat: &fs.stat}
|
|
| 51 |
- if !main.IsDir() {
|
|
| 52 |
- return errors.Errorf("fs subdir not mode directory")
|
|
| 68 |
+ for _, d := range fs.dirs {
|
|
| 69 |
+ fi := &StatInfo{Stat: &d.Stat}
|
|
| 70 |
+ if !fi.IsDir() {
|
|
| 71 |
+ return errors.Errorf("fs subdir %s not mode directory", d.Stat.Path)
|
|
| 72 |
+ } |
|
| 73 |
+ if err := fn(d.Stat.Path, fi, nil); err != nil {
|
|
| 74 |
+ return err |
|
| 75 |
+ } |
|
| 76 |
+ if err := d.FS.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
|
|
| 77 |
+ stat, ok := fi.Sys().(*types.Stat) |
|
| 78 |
+ if !ok {
|
|
| 79 |
+ return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p) |
|
| 80 |
+ } |
|
| 81 |
+ stat.Path = path.Join(d.Stat.Path, stat.Path) |
|
| 82 |
+ if stat.Linkname != "" {
|
|
| 83 |
+ if fi.Mode()&os.ModeSymlink != 0 {
|
|
| 84 |
+ if strings.HasPrefix(stat.Linkname, "/") {
|
|
| 85 |
+ stat.Linkname = path.Join("/"+d.Stat.Path, stat.Linkname)
|
|
| 86 |
+ } |
|
| 87 |
+ } else {
|
|
| 88 |
+ stat.Linkname = path.Join(d.Stat.Path, stat.Linkname) |
|
| 89 |
+ } |
|
| 90 |
+ } |
|
| 91 |
+ return fn(filepath.Join(d.Stat.Path, p), &StatInfo{stat}, nil)
|
|
| 92 |
+ }); err != nil {
|
|
| 93 |
+ return err |
|
| 94 |
+ } |
|
| 53 | 95 |
} |
| 54 |
- if main.Name() != fs.stat.Path {
|
|
| 55 |
- return errors.Errorf("subdir path must be single file")
|
|
| 96 |
+ return nil |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+func (fs *subDirFS) Open(p string) (io.ReadCloser, error) {
|
|
| 100 |
+ parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2) |
|
| 101 |
+ if len(parts) == 0 {
|
|
| 102 |
+ return ioutil.NopCloser(&emptyReader{}), nil
|
|
| 56 | 103 |
} |
| 57 |
- if err := fn(fs.stat.Path, main, nil); err != nil {
|
|
| 58 |
- return err |
|
| 104 |
+ d, ok := fs.m[parts[0]] |
|
| 105 |
+ if !ok {
|
|
| 106 |
+ return nil, os.ErrNotExist |
|
| 59 | 107 |
} |
| 60 |
- return fs.fs.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
|
|
| 61 |
- stat, ok := fi.Sys().(*types.Stat) |
|
| 62 |
- if !ok {
|
|
| 63 |
- return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p) |
|
| 64 |
- } |
|
| 65 |
- stat.Path = path.Join(fs.stat.Path, stat.Path) |
|
| 66 |
- return fn(filepath.Join(fs.stat.Path, p), &StatInfo{stat}, nil)
|
|
| 67 |
- }) |
|
| 108 |
+ return d.FS.Open(parts[1]) |
|
| 68 | 109 |
} |
| 69 | 110 |
|
| 70 |
-func (fs *subDirFS) Open(p string) (io.ReadCloser, error) {
|
|
| 71 |
- return fs.fs.Open(strings.TrimPrefix(p, fs.stat.Path+"/")) |
|
| 111 |
+type emptyReader struct {
|
|
| 112 |
+} |
|
| 113 |
+ |
|
| 114 |
+func (*emptyReader) Read([]byte) (int, error) {
|
|
| 115 |
+ return 0, io.EOF |
|
| 72 | 116 |
} |
| 73 | 117 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,72 @@ |
| 0 |
+package fsutil |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "archive/tar" |
|
| 4 |
+ "context" |
|
| 5 |
+ "io" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "strings" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/pkg/errors" |
|
| 11 |
+ "github.com/tonistiigi/fsutil/types" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+func WriteTar(ctx context.Context, fs FS, w io.Writer) error {
|
|
| 15 |
+ tw := tar.NewWriter(w) |
|
| 16 |
+ err := fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error {
|
|
| 17 |
+ stat, ok := fi.Sys().(*types.Stat) |
|
| 18 |
+ if !ok {
|
|
| 19 |
+ return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path) |
|
| 20 |
+ } |
|
| 21 |
+ hdr, err := tar.FileInfoHeader(fi, stat.Linkname) |
|
| 22 |
+ if err != nil {
|
|
| 23 |
+ return err |
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ name := filepath.ToSlash(path) |
|
| 27 |
+ if fi.IsDir() && !strings.HasSuffix(name, "/") {
|
|
| 28 |
+ name += "/" |
|
| 29 |
+ } |
|
| 30 |
+ hdr.Name = name |
|
| 31 |
+ |
|
| 32 |
+ hdr.Uid = int(stat.Uid) |
|
| 33 |
+ hdr.Gid = int(stat.Gid) |
|
| 34 |
+ hdr.Devmajor = stat.Devmajor |
|
| 35 |
+ hdr.Devminor = stat.Devminor |
|
| 36 |
+ hdr.Linkname = stat.Linkname |
|
| 37 |
+ if hdr.Linkname != "" {
|
|
| 38 |
+ hdr.Size = 0 |
|
| 39 |
+ hdr.Typeflag = tar.TypeLink |
|
| 40 |
+ } |
|
| 41 |
+ |
|
| 42 |
+ if len(stat.Xattrs) > 0 {
|
|
| 43 |
+ hdr.PAXRecords = map[string]string{}
|
|
| 44 |
+ } |
|
| 45 |
+ for k, v := range stat.Xattrs {
|
|
| 46 |
+ hdr.PAXRecords["SCHILY.xattr."+k] = string(v) |
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ if err := tw.WriteHeader(hdr); err != nil {
|
|
| 50 |
+ return errors.Wrap(err, "failed to write file header") |
|
| 51 |
+ } |
|
| 52 |
+ |
|
| 53 |
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 && hdr.Linkname == "" {
|
|
| 54 |
+ rc, err := fs.Open(path) |
|
| 55 |
+ if err != nil {
|
|
| 56 |
+ return err |
|
| 57 |
+ } |
|
| 58 |
+ if _, err := io.Copy(tw, rc); err != nil {
|
|
| 59 |
+ return err |
|
| 60 |
+ } |
|
| 61 |
+ if err := rc.Close(); err != nil {
|
|
| 62 |
+ return err |
|
| 63 |
+ } |
|
| 64 |
+ } |
|
| 65 |
+ return nil |
|
| 66 |
+ }) |
|
| 67 |
+ if err != nil {
|
|
| 68 |
+ return err |
|
| 69 |
+ } |
|
| 70 |
+ return tw.Close() |
|
| 71 |
+} |