This fix revendor swarmkit to 0ec7c6ee4b3185ec4e3d6bd65f8f5542b1761421.
Related docker PR and issues:
(#27567)
(#25437)
(#26988)
(#25644)
Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
| ... | ... |
@@ -147,7 +147,7 @@ clone git github.com/docker/containerd 52ef1ceb4b660c42cf4ea9013180a5663968d4c7 |
| 147 | 147 |
clone git github.com/tonistiigi/fifo 8c56881ce5e63e19e2dfc495c8af0fb90916467d |
| 148 | 148 |
|
| 149 | 149 |
# cluster |
| 150 |
-clone git github.com/docker/swarmkit 3b221eb0391d34ae0b9dac65df02b5b64de6dff2 |
|
| 150 |
+clone git github.com/docker/swarmkit 0ec7c6ee4b3185ec4e3d6bd65f8f5542b1761421 |
|
| 151 | 151 |
clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 |
| 152 | 152 |
clone git github.com/gogo/protobuf v0.3 |
| 153 | 153 |
clone git github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a |
| ... | ... |
@@ -48,9 +48,11 @@ func New(config *Config) (*Agent, error) {
|
| 48 | 48 |
return nil, err |
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 |
+ worker := newWorker(config.DB, config.Executor) |
|
| 52 |
+ |
|
| 51 | 53 |
a := &Agent{
|
| 52 | 54 |
config: config, |
| 53 |
- worker: newWorker(config.DB, config.Executor), |
|
| 55 |
+ worker: worker, |
|
| 54 | 56 |
sessionq: make(chan sessionOperation), |
| 55 | 57 |
started: make(chan struct{}),
|
| 56 | 58 |
stopped: make(chan struct{}),
|
| ... | ... |
@@ -278,8 +280,8 @@ func (a *Agent) handleSessionMessage(ctx context.Context, message *api.SessionMe |
| 278 | 278 |
|
| 279 | 279 |
if message.Node != nil {
|
| 280 | 280 |
if a.node == nil || !nodesEqual(a.node, message.Node) {
|
| 281 |
- if a.config.NotifyRoleChange != nil {
|
|
| 282 |
- a.config.NotifyRoleChange <- message.Node.Spec.Role |
|
| 281 |
+ if a.config.NotifyNodeChange != nil {
|
|
| 282 |
+ a.config.NotifyNodeChange <- message.Node.Copy() |
|
| 283 | 283 |
} |
| 284 | 284 |
a.node = message.Node.Copy() |
| 285 | 285 |
if err := a.config.Executor.Configure(ctx, a.node); err != nil {
|
| ... | ... |
@@ -24,8 +24,8 @@ type Config struct {
|
| 24 | 24 |
// DB used for task storage. Must be open for the lifetime of the agent. |
| 25 | 25 |
DB *bolt.DB |
| 26 | 26 |
|
| 27 |
- // NotifyRoleChange channel receives new roles from session messages. |
|
| 28 |
- NotifyRoleChange chan<- api.NodeRole |
|
| 27 |
+ // NotifyNodeChange channel receives new node changes from session messages. |
|
| 28 |
+ NotifyNodeChange chan<- *api.Node |
|
| 29 | 29 |
|
| 30 | 30 |
// Credentials is credentials for grpc connection to manager. |
| 31 | 31 |
Credentials credentials.TransportCredentials |
| ... | ... |
@@ -277,7 +277,7 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus, |
| 277 | 277 |
} |
| 278 | 278 |
|
| 279 | 279 |
switch status.State {
|
| 280 |
- case api.TaskStateNew, api.TaskStateAllocated, api.TaskStateAssigned: |
|
| 280 |
+ case api.TaskStateNew, api.TaskStatePending, api.TaskStateAssigned: |
|
| 281 | 281 |
return transition(api.TaskStateAccepted, "accepted") |
| 282 | 282 |
case api.TaskStateAccepted: |
| 283 | 283 |
return transition(api.TaskStatePreparing, "preparing") |
| ... | ... |
@@ -21,3 +21,25 @@ type Executor interface {
|
| 21 | 21 |
// manager to the executor. |
| 22 | 22 |
SetNetworkBootstrapKeys([]*api.EncryptionKey) error |
| 23 | 23 |
} |
| 24 |
+ |
|
| 25 |
+// SecretsProvider is implemented by objects that can store secrets, typically |
|
| 26 |
+// an executor. |
|
| 27 |
+type SecretsProvider interface {
|
|
| 28 |
+ Secrets() SecretsManager |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+// SecretGetter contains secret data necessary for the Controller. |
|
| 32 |
+type SecretGetter interface {
|
|
| 33 |
+ // Get returns the the secret with a specific secret ID, if available. |
|
| 34 |
+ // When the secret is not available, the return will be nil. |
|
| 35 |
+ Get(secretID string) *api.Secret |
|
| 36 |
+} |
|
| 37 |
+ |
|
| 38 |
+// SecretsManager is the interface for secret storage and updates. |
|
| 39 |
+type SecretsManager interface {
|
|
| 40 |
+ SecretGetter |
|
| 41 |
+ |
|
| 42 |
+ Add(secrets ...api.Secret) // add one or more secrets |
|
| 43 |
+ Remove(secrets []string) // remove the secrets by ID |
|
| 44 |
+ Reset() // remove all secrets |
|
| 45 |
+} |
| 24 | 46 |
deleted file mode 100644 |
| ... | ... |
@@ -1,53 +0,0 @@ |
| 1 |
-package agent |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "sync" |
|
| 5 |
- |
|
| 6 |
- "github.com/docker/swarmkit/api" |
|
| 7 |
-) |
|
| 8 |
- |
|
| 9 |
-// secrets is a map that keeps all the currenty available secrets to the agent |
|
| 10 |
-// mapped by secret ID |
|
| 11 |
-type secrets struct {
|
|
| 12 |
- mu sync.RWMutex |
|
| 13 |
- m map[string]api.Secret |
|
| 14 |
-} |
|
| 15 |
- |
|
| 16 |
-func newSecrets() *secrets {
|
|
| 17 |
- return &secrets{
|
|
| 18 |
- m: make(map[string]api.Secret), |
|
| 19 |
- } |
|
| 20 |
-} |
|
| 21 |
- |
|
| 22 |
-// Get returns a secret by ID. If the secret doesn't exist, returns nil. |
|
| 23 |
-func (s *secrets) Get(secretID string) api.Secret {
|
|
| 24 |
- s.mu.RLock() |
|
| 25 |
- defer s.mu.RUnlock() |
|
| 26 |
- return s.m[secretID] |
|
| 27 |
-} |
|
| 28 |
- |
|
| 29 |
-// Add adds one or more secrets to the secret map |
|
| 30 |
-func (s *secrets) Add(secrets ...api.Secret) {
|
|
| 31 |
- s.mu.Lock() |
|
| 32 |
- defer s.mu.Unlock() |
|
| 33 |
- for _, secret := range secrets {
|
|
| 34 |
- s.m[secret.ID] = secret |
|
| 35 |
- } |
|
| 36 |
-} |
|
| 37 |
- |
|
| 38 |
-// Remove removes one or more secrets by ID from the secret map. Succeeds |
|
| 39 |
-// whether or not the given IDs are in the map. |
|
| 40 |
-func (s *secrets) Remove(secrets []string) {
|
|
| 41 |
- s.mu.Lock() |
|
| 42 |
- defer s.mu.Unlock() |
|
| 43 |
- for _, secret := range secrets {
|
|
| 44 |
- delete(s.m, secret) |
|
| 45 |
- } |
|
| 46 |
-} |
|
| 47 |
- |
|
| 48 |
-// Reset removes all the secrets |
|
| 49 |
-func (s *secrets) Reset() {
|
|
| 50 |
- s.mu.Lock() |
|
| 51 |
- defer s.mu.Unlock() |
|
| 52 |
- s.m = make(map[string]api.Secret) |
|
| 53 |
-} |
| ... | ... |
@@ -42,7 +42,6 @@ type worker struct {
|
| 42 | 42 |
db *bolt.DB |
| 43 | 43 |
executor exec.Executor |
| 44 | 44 |
listeners map[*statusReporterKey]struct{}
|
| 45 |
- secrets *secrets |
|
| 46 | 45 |
|
| 47 | 46 |
taskManagers map[string]*taskManager |
| 48 | 47 |
mu sync.RWMutex |
| ... | ... |
@@ -54,7 +53,6 @@ func newWorker(db *bolt.DB, executor exec.Executor) *worker {
|
| 54 | 54 |
executor: executor, |
| 55 | 55 |
listeners: make(map[*statusReporterKey]struct{}),
|
| 56 | 56 |
taskManagers: make(map[string]*taskManager), |
| 57 |
- secrets: newSecrets(), |
|
| 58 | 57 |
} |
| 59 | 58 |
} |
| 60 | 59 |
|
| ... | ... |
@@ -255,6 +253,15 @@ func reconcileTaskState(ctx context.Context, w *worker, assignments []*api.Assig |
| 255 | 255 |
} |
| 256 | 256 |
|
| 257 | 257 |
func reconcileSecrets(ctx context.Context, w *worker, assignments []*api.AssignmentChange, fullSnapshot bool) error {
|
| 258 |
+ var secrets exec.SecretsManager |
|
| 259 |
+ provider, ok := w.executor.(exec.SecretsProvider) |
|
| 260 |
+ if !ok {
|
|
| 261 |
+ log.G(ctx).Warn("secrets update ignored; executor does not support secrets")
|
|
| 262 |
+ return nil |
|
| 263 |
+ } |
|
| 264 |
+ |
|
| 265 |
+ secrets = provider.Secrets() |
|
| 266 |
+ |
|
| 258 | 267 |
var ( |
| 259 | 268 |
updatedSecrets []api.Secret |
| 260 | 269 |
removedSecrets []string |
| ... | ... |
@@ -278,11 +285,11 @@ func reconcileSecrets(ctx context.Context, w *worker, assignments []*api.Assignm |
| 278 | 278 |
|
| 279 | 279 |
// If this was a complete set of secrets, we're going to clear the secrets map and add all of them |
| 280 | 280 |
if fullSnapshot {
|
| 281 |
- w.secrets.Reset() |
|
| 281 |
+ secrets.Reset() |
|
| 282 | 282 |
} else {
|
| 283 |
- w.secrets.Remove(removedSecrets) |
|
| 283 |
+ secrets.Remove(removedSecrets) |
|
| 284 | 284 |
} |
| 285 |
- w.secrets.Add(updatedSecrets...) |
|
| 285 |
+ secrets.Add(updatedSecrets...) |
|
| 286 | 286 |
|
| 287 | 287 |
return nil |
| 288 | 288 |
} |
| ... | ... |
@@ -459,6 +459,28 @@ func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{
|
| 459 | 459 |
func (*GetSecretResponse) ProtoMessage() {}
|
| 460 | 460 |
func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{40} }
|
| 461 | 461 |
|
| 462 |
+type UpdateSecretRequest struct {
|
|
| 463 |
+ // SecretID is the secret ID to update. |
|
| 464 |
+ SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` |
|
| 465 |
+ // SecretVersion is the version of the secret being updated. |
|
| 466 |
+ SecretVersion *Version `protobuf:"bytes,2,opt,name=secret_version,json=secretVersion" json:"secret_version,omitempty"` |
|
| 467 |
+ // Spec is the new spec to apply to the Secret |
|
| 468 |
+ // Only some fields are allowed to be updated. |
|
| 469 |
+ Spec *SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` |
|
| 470 |
+} |
|
| 471 |
+ |
|
| 472 |
+func (m *UpdateSecretRequest) Reset() { *m = UpdateSecretRequest{} }
|
|
| 473 |
+func (*UpdateSecretRequest) ProtoMessage() {}
|
|
| 474 |
+func (*UpdateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{41} }
|
|
| 475 |
+ |
|
| 476 |
+type UpdateSecretResponse struct {
|
|
| 477 |
+ Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` |
|
| 478 |
+} |
|
| 479 |
+ |
|
| 480 |
+func (m *UpdateSecretResponse) Reset() { *m = UpdateSecretResponse{} }
|
|
| 481 |
+func (*UpdateSecretResponse) ProtoMessage() {}
|
|
| 482 |
+func (*UpdateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{42} }
|
|
| 483 |
+ |
|
| 462 | 484 |
// ListSecretRequest is the request to list all non-internal secrets in the secret store, |
| 463 | 485 |
// or all secrets filtered by (name or name prefix or id prefix) and labels. |
| 464 | 486 |
type ListSecretsRequest struct {
|
| ... | ... |
@@ -467,7 +489,7 @@ type ListSecretsRequest struct {
|
| 467 | 467 |
|
| 468 | 468 |
func (m *ListSecretsRequest) Reset() { *m = ListSecretsRequest{} }
|
| 469 | 469 |
func (*ListSecretsRequest) ProtoMessage() {}
|
| 470 |
-func (*ListSecretsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{41} }
|
|
| 470 |
+func (*ListSecretsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{43} }
|
|
| 471 | 471 |
|
| 472 | 472 |
type ListSecretsRequest_Filters struct {
|
| 473 | 473 |
Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` |
| ... | ... |
@@ -479,7 +501,7 @@ type ListSecretsRequest_Filters struct {
|
| 479 | 479 |
func (m *ListSecretsRequest_Filters) Reset() { *m = ListSecretsRequest_Filters{} }
|
| 480 | 480 |
func (*ListSecretsRequest_Filters) ProtoMessage() {}
|
| 481 | 481 |
func (*ListSecretsRequest_Filters) Descriptor() ([]byte, []int) {
|
| 482 |
- return fileDescriptorControl, []int{41, 0}
|
|
| 482 |
+ return fileDescriptorControl, []int{43, 0}
|
|
| 483 | 483 |
} |
| 484 | 484 |
|
| 485 | 485 |
// ListSecretResponse contains a list of all the secrets that match the name or |
| ... | ... |
@@ -492,7 +514,7 @@ type ListSecretsResponse struct {
|
| 492 | 492 |
|
| 493 | 493 |
func (m *ListSecretsResponse) Reset() { *m = ListSecretsResponse{} }
|
| 494 | 494 |
func (*ListSecretsResponse) ProtoMessage() {}
|
| 495 |
-func (*ListSecretsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{42} }
|
|
| 495 |
+func (*ListSecretsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{44} }
|
|
| 496 | 496 |
|
| 497 | 497 |
// CreateSecretRequest specifies a new secret (it will not update an existing |
| 498 | 498 |
// secret) to create. |
| ... | ... |
@@ -502,7 +524,7 @@ type CreateSecretRequest struct {
|
| 502 | 502 |
|
| 503 | 503 |
func (m *CreateSecretRequest) Reset() { *m = CreateSecretRequest{} }
|
| 504 | 504 |
func (*CreateSecretRequest) ProtoMessage() {}
|
| 505 |
-func (*CreateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{43} }
|
|
| 505 |
+func (*CreateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{45} }
|
|
| 506 | 506 |
|
| 507 | 507 |
// CreateSecretResponse contains the newly created `Secret`` corresponding to the |
| 508 | 508 |
// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead |
| ... | ... |
@@ -513,7 +535,7 @@ type CreateSecretResponse struct {
|
| 513 | 513 |
|
| 514 | 514 |
func (m *CreateSecretResponse) Reset() { *m = CreateSecretResponse{} }
|
| 515 | 515 |
func (*CreateSecretResponse) ProtoMessage() {}
|
| 516 |
-func (*CreateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{44} }
|
|
| 516 |
+func (*CreateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{46} }
|
|
| 517 | 517 |
|
| 518 | 518 |
// RemoveSecretRequest contains the ID of the secret that should be removed. This |
| 519 | 519 |
// removes all versions of the secret. |
| ... | ... |
@@ -523,7 +545,7 @@ type RemoveSecretRequest struct {
|
| 523 | 523 |
|
| 524 | 524 |
func (m *RemoveSecretRequest) Reset() { *m = RemoveSecretRequest{} }
|
| 525 | 525 |
func (*RemoveSecretRequest) ProtoMessage() {}
|
| 526 |
-func (*RemoveSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{45} }
|
|
| 526 |
+func (*RemoveSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{47} }
|
|
| 527 | 527 |
|
| 528 | 528 |
// RemoveSecretResponse is an empty object indicating the successful removal of |
| 529 | 529 |
// a secret. |
| ... | ... |
@@ -532,7 +554,7 @@ type RemoveSecretResponse struct {
|
| 532 | 532 |
|
| 533 | 533 |
func (m *RemoveSecretResponse) Reset() { *m = RemoveSecretResponse{} }
|
| 534 | 534 |
func (*RemoveSecretResponse) ProtoMessage() {}
|
| 535 |
-func (*RemoveSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{46} }
|
|
| 535 |
+func (*RemoveSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{48} }
|
|
| 536 | 536 |
|
| 537 | 537 |
func init() {
|
| 538 | 538 |
proto.RegisterType((*GetNodeRequest)(nil), "docker.swarmkit.v1.GetNodeRequest") |
| ... | ... |
@@ -581,6 +603,8 @@ func init() {
|
| 581 | 581 |
proto.RegisterType((*UpdateClusterResponse)(nil), "docker.swarmkit.v1.UpdateClusterResponse") |
| 582 | 582 |
proto.RegisterType((*GetSecretRequest)(nil), "docker.swarmkit.v1.GetSecretRequest") |
| 583 | 583 |
proto.RegisterType((*GetSecretResponse)(nil), "docker.swarmkit.v1.GetSecretResponse") |
| 584 |
+ proto.RegisterType((*UpdateSecretRequest)(nil), "docker.swarmkit.v1.UpdateSecretRequest") |
|
| 585 |
+ proto.RegisterType((*UpdateSecretResponse)(nil), "docker.swarmkit.v1.UpdateSecretResponse") |
|
| 584 | 586 |
proto.RegisterType((*ListSecretsRequest)(nil), "docker.swarmkit.v1.ListSecretsRequest") |
| 585 | 587 |
proto.RegisterType((*ListSecretsRequest_Filters)(nil), "docker.swarmkit.v1.ListSecretsRequest.Filters") |
| 586 | 588 |
proto.RegisterType((*ListSecretsResponse)(nil), "docker.swarmkit.v1.ListSecretsResponse") |
| ... | ... |
@@ -762,6 +786,14 @@ func (p *authenticatedWrapperControlServer) GetSecret(ctx context.Context, r *Ge |
| 762 | 762 |
return p.local.GetSecret(ctx, r) |
| 763 | 763 |
} |
| 764 | 764 |
|
| 765 |
+func (p *authenticatedWrapperControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) {
|
|
| 766 |
+ |
|
| 767 |
+ if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
|
|
| 768 |
+ return nil, err |
|
| 769 |
+ } |
|
| 770 |
+ return p.local.UpdateSecret(ctx, r) |
|
| 771 |
+} |
|
| 772 |
+ |
|
| 765 | 773 |
func (p *authenticatedWrapperControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) {
|
| 766 | 774 |
|
| 767 | 775 |
if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
|
| ... | ... |
@@ -1491,6 +1523,32 @@ func (m *GetSecretResponse) Copy() *GetSecretResponse {
|
| 1491 | 1491 |
return o |
| 1492 | 1492 |
} |
| 1493 | 1493 |
|
| 1494 |
+func (m *UpdateSecretRequest) Copy() *UpdateSecretRequest {
|
|
| 1495 |
+ if m == nil {
|
|
| 1496 |
+ return nil |
|
| 1497 |
+ } |
|
| 1498 |
+ |
|
| 1499 |
+ o := &UpdateSecretRequest{
|
|
| 1500 |
+ SecretID: m.SecretID, |
|
| 1501 |
+ SecretVersion: m.SecretVersion.Copy(), |
|
| 1502 |
+ Spec: m.Spec.Copy(), |
|
| 1503 |
+ } |
|
| 1504 |
+ |
|
| 1505 |
+ return o |
|
| 1506 |
+} |
|
| 1507 |
+ |
|
| 1508 |
+func (m *UpdateSecretResponse) Copy() *UpdateSecretResponse {
|
|
| 1509 |
+ if m == nil {
|
|
| 1510 |
+ return nil |
|
| 1511 |
+ } |
|
| 1512 |
+ |
|
| 1513 |
+ o := &UpdateSecretResponse{
|
|
| 1514 |
+ Secret: m.Secret.Copy(), |
|
| 1515 |
+ } |
|
| 1516 |
+ |
|
| 1517 |
+ return o |
|
| 1518 |
+} |
|
| 1519 |
+ |
|
| 1494 | 1520 |
func (m *ListSecretsRequest) Copy() *ListSecretsRequest {
|
| 1495 | 1521 |
if m == nil {
|
| 1496 | 1522 |
return nil |
| ... | ... |
@@ -2203,6 +2261,34 @@ func (this *GetSecretResponse) GoString() string {
|
| 2203 | 2203 |
s = append(s, "}") |
| 2204 | 2204 |
return strings.Join(s, "") |
| 2205 | 2205 |
} |
| 2206 |
+func (this *UpdateSecretRequest) GoString() string {
|
|
| 2207 |
+ if this == nil {
|
|
| 2208 |
+ return "nil" |
|
| 2209 |
+ } |
|
| 2210 |
+ s := make([]string, 0, 7) |
|
| 2211 |
+ s = append(s, "&api.UpdateSecretRequest{")
|
|
| 2212 |
+ s = append(s, "SecretID: "+fmt.Sprintf("%#v", this.SecretID)+",\n")
|
|
| 2213 |
+ if this.SecretVersion != nil {
|
|
| 2214 |
+ s = append(s, "SecretVersion: "+fmt.Sprintf("%#v", this.SecretVersion)+",\n")
|
|
| 2215 |
+ } |
|
| 2216 |
+ if this.Spec != nil {
|
|
| 2217 |
+ s = append(s, "Spec: "+fmt.Sprintf("%#v", this.Spec)+",\n")
|
|
| 2218 |
+ } |
|
| 2219 |
+ s = append(s, "}") |
|
| 2220 |
+ return strings.Join(s, "") |
|
| 2221 |
+} |
|
| 2222 |
+func (this *UpdateSecretResponse) GoString() string {
|
|
| 2223 |
+ if this == nil {
|
|
| 2224 |
+ return "nil" |
|
| 2225 |
+ } |
|
| 2226 |
+ s := make([]string, 0, 5) |
|
| 2227 |
+ s = append(s, "&api.UpdateSecretResponse{")
|
|
| 2228 |
+ if this.Secret != nil {
|
|
| 2229 |
+ s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n")
|
|
| 2230 |
+ } |
|
| 2231 |
+ s = append(s, "}") |
|
| 2232 |
+ return strings.Join(s, "") |
|
| 2233 |
+} |
|
| 2206 | 2234 |
func (this *ListSecretsRequest) GoString() string {
|
| 2207 | 2235 |
if this == nil {
|
| 2208 | 2236 |
return "nil" |
| ... | ... |
@@ -2358,6 +2444,12 @@ type ControlClient interface {
|
| 2358 | 2358 |
// - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. |
| 2359 | 2359 |
// - Returns an error if getting fails. |
| 2360 | 2360 |
GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) |
| 2361 |
+ // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same |
|
| 2362 |
+ // id as `GetSecretRequest.SecretID` |
|
| 2363 |
+ // - Returns `NotFound` if the Secret with the given id is not found. |
|
| 2364 |
+ // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. |
|
| 2365 |
+ // - Returns an error if updating fails. |
|
| 2366 |
+ UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) |
|
| 2361 | 2367 |
// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being |
| 2362 | 2368 |
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any |
| 2363 | 2369 |
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in |
| ... | ... |
@@ -2565,6 +2657,15 @@ func (c *controlClient) GetSecret(ctx context.Context, in *GetSecretRequest, opt |
| 2565 | 2565 |
return out, nil |
| 2566 | 2566 |
} |
| 2567 | 2567 |
|
| 2568 |
+func (c *controlClient) UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) {
|
|
| 2569 |
+ out := new(UpdateSecretResponse) |
|
| 2570 |
+ err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateSecret", in, out, c.cc, opts...) |
|
| 2571 |
+ if err != nil {
|
|
| 2572 |
+ return nil, err |
|
| 2573 |
+ } |
|
| 2574 |
+ return out, nil |
|
| 2575 |
+} |
|
| 2576 |
+ |
|
| 2568 | 2577 |
func (c *controlClient) ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) {
|
| 2569 | 2578 |
out := new(ListSecretsResponse) |
| 2570 | 2579 |
err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListSecrets", in, out, c.cc, opts...) |
| ... | ... |
@@ -2620,6 +2721,12 @@ type ControlServer interface {
|
| 2620 | 2620 |
// - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. |
| 2621 | 2621 |
// - Returns an error if getting fails. |
| 2622 | 2622 |
GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) |
| 2623 |
+ // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same |
|
| 2624 |
+ // id as `GetSecretRequest.SecretID` |
|
| 2625 |
+ // - Returns `NotFound` if the Secret with the given id is not found. |
|
| 2626 |
+ // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. |
|
| 2627 |
+ // - Returns an error if updating fails. |
|
| 2628 |
+ UpdateSecret(context.Context, *UpdateSecretRequest) (*UpdateSecretResponse, error) |
|
| 2623 | 2629 |
// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being |
| 2624 | 2630 |
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any |
| 2625 | 2631 |
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in |
| ... | ... |
@@ -3003,6 +3110,24 @@ func _Control_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(i
|
| 3003 | 3003 |
return interceptor(ctx, in, info, handler) |
| 3004 | 3004 |
} |
| 3005 | 3005 |
|
| 3006 |
+func _Control_UpdateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
| 3007 |
+ in := new(UpdateSecretRequest) |
|
| 3008 |
+ if err := dec(in); err != nil {
|
|
| 3009 |
+ return nil, err |
|
| 3010 |
+ } |
|
| 3011 |
+ if interceptor == nil {
|
|
| 3012 |
+ return srv.(ControlServer).UpdateSecret(ctx, in) |
|
| 3013 |
+ } |
|
| 3014 |
+ info := &grpc.UnaryServerInfo{
|
|
| 3015 |
+ Server: srv, |
|
| 3016 |
+ FullMethod: "/docker.swarmkit.v1.Control/UpdateSecret", |
|
| 3017 |
+ } |
|
| 3018 |
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
| 3019 |
+ return srv.(ControlServer).UpdateSecret(ctx, req.(*UpdateSecretRequest)) |
|
| 3020 |
+ } |
|
| 3021 |
+ return interceptor(ctx, in, info, handler) |
|
| 3022 |
+} |
|
| 3023 |
+ |
|
| 3006 | 3024 |
func _Control_ListSecrets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
| 3007 | 3025 |
in := new(ListSecretsRequest) |
| 3008 | 3026 |
if err := dec(in); err != nil {
|
| ... | ... |
@@ -3142,6 +3267,10 @@ var _Control_serviceDesc = grpc.ServiceDesc{
|
| 3142 | 3142 |
Handler: _Control_GetSecret_Handler, |
| 3143 | 3143 |
}, |
| 3144 | 3144 |
{
|
| 3145 |
+ MethodName: "UpdateSecret", |
|
| 3146 |
+ Handler: _Control_UpdateSecret_Handler, |
|
| 3147 |
+ }, |
|
| 3148 |
+ {
|
|
| 3145 | 3149 |
MethodName: "ListSecrets", |
| 3146 | 3150 |
Handler: _Control_ListSecrets_Handler, |
| 3147 | 3151 |
}, |
| ... | ... |
@@ -4775,6 +4904,78 @@ func (m *GetSecretResponse) MarshalTo(data []byte) (int, error) {
|
| 4775 | 4775 |
return i, nil |
| 4776 | 4776 |
} |
| 4777 | 4777 |
|
| 4778 |
+func (m *UpdateSecretRequest) Marshal() (data []byte, err error) {
|
|
| 4779 |
+ size := m.Size() |
|
| 4780 |
+ data = make([]byte, size) |
|
| 4781 |
+ n, err := m.MarshalTo(data) |
|
| 4782 |
+ if err != nil {
|
|
| 4783 |
+ return nil, err |
|
| 4784 |
+ } |
|
| 4785 |
+ return data[:n], nil |
|
| 4786 |
+} |
|
| 4787 |
+ |
|
| 4788 |
+func (m *UpdateSecretRequest) MarshalTo(data []byte) (int, error) {
|
|
| 4789 |
+ var i int |
|
| 4790 |
+ _ = i |
|
| 4791 |
+ var l int |
|
| 4792 |
+ _ = l |
|
| 4793 |
+ if len(m.SecretID) > 0 {
|
|
| 4794 |
+ data[i] = 0xa |
|
| 4795 |
+ i++ |
|
| 4796 |
+ i = encodeVarintControl(data, i, uint64(len(m.SecretID))) |
|
| 4797 |
+ i += copy(data[i:], m.SecretID) |
|
| 4798 |
+ } |
|
| 4799 |
+ if m.SecretVersion != nil {
|
|
| 4800 |
+ data[i] = 0x12 |
|
| 4801 |
+ i++ |
|
| 4802 |
+ i = encodeVarintControl(data, i, uint64(m.SecretVersion.Size())) |
|
| 4803 |
+ n26, err := m.SecretVersion.MarshalTo(data[i:]) |
|
| 4804 |
+ if err != nil {
|
|
| 4805 |
+ return 0, err |
|
| 4806 |
+ } |
|
| 4807 |
+ i += n26 |
|
| 4808 |
+ } |
|
| 4809 |
+ if m.Spec != nil {
|
|
| 4810 |
+ data[i] = 0x1a |
|
| 4811 |
+ i++ |
|
| 4812 |
+ i = encodeVarintControl(data, i, uint64(m.Spec.Size())) |
|
| 4813 |
+ n27, err := m.Spec.MarshalTo(data[i:]) |
|
| 4814 |
+ if err != nil {
|
|
| 4815 |
+ return 0, err |
|
| 4816 |
+ } |
|
| 4817 |
+ i += n27 |
|
| 4818 |
+ } |
|
| 4819 |
+ return i, nil |
|
| 4820 |
+} |
|
| 4821 |
+ |
|
| 4822 |
+func (m *UpdateSecretResponse) Marshal() (data []byte, err error) {
|
|
| 4823 |
+ size := m.Size() |
|
| 4824 |
+ data = make([]byte, size) |
|
| 4825 |
+ n, err := m.MarshalTo(data) |
|
| 4826 |
+ if err != nil {
|
|
| 4827 |
+ return nil, err |
|
| 4828 |
+ } |
|
| 4829 |
+ return data[:n], nil |
|
| 4830 |
+} |
|
| 4831 |
+ |
|
| 4832 |
+func (m *UpdateSecretResponse) MarshalTo(data []byte) (int, error) {
|
|
| 4833 |
+ var i int |
|
| 4834 |
+ _ = i |
|
| 4835 |
+ var l int |
|
| 4836 |
+ _ = l |
|
| 4837 |
+ if m.Secret != nil {
|
|
| 4838 |
+ data[i] = 0xa |
|
| 4839 |
+ i++ |
|
| 4840 |
+ i = encodeVarintControl(data, i, uint64(m.Secret.Size())) |
|
| 4841 |
+ n28, err := m.Secret.MarshalTo(data[i:]) |
|
| 4842 |
+ if err != nil {
|
|
| 4843 |
+ return 0, err |
|
| 4844 |
+ } |
|
| 4845 |
+ i += n28 |
|
| 4846 |
+ } |
|
| 4847 |
+ return i, nil |
|
| 4848 |
+} |
|
| 4849 |
+ |
|
| 4778 | 4850 |
func (m *ListSecretsRequest) Marshal() (data []byte, err error) {
|
| 4779 | 4851 |
size := m.Size() |
| 4780 | 4852 |
data = make([]byte, size) |
| ... | ... |
@@ -4794,11 +4995,11 @@ func (m *ListSecretsRequest) MarshalTo(data []byte) (int, error) {
|
| 4794 | 4794 |
data[i] = 0xa |
| 4795 | 4795 |
i++ |
| 4796 | 4796 |
i = encodeVarintControl(data, i, uint64(m.Filters.Size())) |
| 4797 |
- n26, err := m.Filters.MarshalTo(data[i:]) |
|
| 4797 |
+ n29, err := m.Filters.MarshalTo(data[i:]) |
|
| 4798 | 4798 |
if err != nil {
|
| 4799 | 4799 |
return 0, err |
| 4800 | 4800 |
} |
| 4801 |
- i += n26 |
|
| 4801 |
+ i += n29 |
|
| 4802 | 4802 |
} |
| 4803 | 4803 |
return i, nil |
| 4804 | 4804 |
} |
| ... | ... |
@@ -4932,11 +5133,11 @@ func (m *CreateSecretRequest) MarshalTo(data []byte) (int, error) {
|
| 4932 | 4932 |
data[i] = 0xa |
| 4933 | 4933 |
i++ |
| 4934 | 4934 |
i = encodeVarintControl(data, i, uint64(m.Spec.Size())) |
| 4935 |
- n27, err := m.Spec.MarshalTo(data[i:]) |
|
| 4935 |
+ n30, err := m.Spec.MarshalTo(data[i:]) |
|
| 4936 | 4936 |
if err != nil {
|
| 4937 | 4937 |
return 0, err |
| 4938 | 4938 |
} |
| 4939 |
- i += n27 |
|
| 4939 |
+ i += n30 |
|
| 4940 | 4940 |
} |
| 4941 | 4941 |
return i, nil |
| 4942 | 4942 |
} |
| ... | ... |
@@ -4960,11 +5161,11 @@ func (m *CreateSecretResponse) MarshalTo(data []byte) (int, error) {
|
| 4960 | 4960 |
data[i] = 0xa |
| 4961 | 4961 |
i++ |
| 4962 | 4962 |
i = encodeVarintControl(data, i, uint64(m.Secret.Size())) |
| 4963 |
- n28, err := m.Secret.MarshalTo(data[i:]) |
|
| 4963 |
+ n31, err := m.Secret.MarshalTo(data[i:]) |
|
| 4964 | 4964 |
if err != nil {
|
| 4965 | 4965 |
return 0, err |
| 4966 | 4966 |
} |
| 4967 |
- i += n28 |
|
| 4967 |
+ i += n31 |
|
| 4968 | 4968 |
} |
| 4969 | 4969 |
return i, nil |
| 4970 | 4970 |
} |
| ... | ... |
@@ -5725,6 +5926,37 @@ func (p *raftProxyControlServer) GetSecret(ctx context.Context, r *GetSecretRequ |
| 5725 | 5725 |
return resp, err |
| 5726 | 5726 |
} |
| 5727 | 5727 |
|
| 5728 |
+func (p *raftProxyControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) {
|
|
| 5729 |
+ |
|
| 5730 |
+ conn, err := p.connSelector.LeaderConn(ctx) |
|
| 5731 |
+ if err != nil {
|
|
| 5732 |
+ if err == raftselector.ErrIsLeader {
|
|
| 5733 |
+ return p.local.UpdateSecret(ctx, r) |
|
| 5734 |
+ } |
|
| 5735 |
+ return nil, err |
|
| 5736 |
+ } |
|
| 5737 |
+ modCtx, err := p.runCtxMods(ctx) |
|
| 5738 |
+ if err != nil {
|
|
| 5739 |
+ return nil, err |
|
| 5740 |
+ } |
|
| 5741 |
+ |
|
| 5742 |
+ resp, err := NewControlClient(conn).UpdateSecret(modCtx, r) |
|
| 5743 |
+ if err != nil {
|
|
| 5744 |
+ if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
|
|
| 5745 |
+ return resp, err |
|
| 5746 |
+ } |
|
| 5747 |
+ conn, err := p.pollNewLeaderConn(ctx) |
|
| 5748 |
+ if err != nil {
|
|
| 5749 |
+ if err == raftselector.ErrIsLeader {
|
|
| 5750 |
+ return p.local.UpdateSecret(ctx, r) |
|
| 5751 |
+ } |
|
| 5752 |
+ return nil, err |
|
| 5753 |
+ } |
|
| 5754 |
+ return NewControlClient(conn).UpdateSecret(modCtx, r) |
|
| 5755 |
+ } |
|
| 5756 |
+ return resp, err |
|
| 5757 |
+} |
|
| 5758 |
+ |
|
| 5728 | 5759 |
func (p *raftProxyControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) {
|
| 5729 | 5760 |
|
| 5730 | 5761 |
conn, err := p.connSelector.LeaderConn(ctx) |
| ... | ... |
@@ -6448,6 +6680,34 @@ func (m *GetSecretResponse) Size() (n int) {
|
| 6448 | 6448 |
return n |
| 6449 | 6449 |
} |
| 6450 | 6450 |
|
| 6451 |
+func (m *UpdateSecretRequest) Size() (n int) {
|
|
| 6452 |
+ var l int |
|
| 6453 |
+ _ = l |
|
| 6454 |
+ l = len(m.SecretID) |
|
| 6455 |
+ if l > 0 {
|
|
| 6456 |
+ n += 1 + l + sovControl(uint64(l)) |
|
| 6457 |
+ } |
|
| 6458 |
+ if m.SecretVersion != nil {
|
|
| 6459 |
+ l = m.SecretVersion.Size() |
|
| 6460 |
+ n += 1 + l + sovControl(uint64(l)) |
|
| 6461 |
+ } |
|
| 6462 |
+ if m.Spec != nil {
|
|
| 6463 |
+ l = m.Spec.Size() |
|
| 6464 |
+ n += 1 + l + sovControl(uint64(l)) |
|
| 6465 |
+ } |
|
| 6466 |
+ return n |
|
| 6467 |
+} |
|
| 6468 |
+ |
|
| 6469 |
+func (m *UpdateSecretResponse) Size() (n int) {
|
|
| 6470 |
+ var l int |
|
| 6471 |
+ _ = l |
|
| 6472 |
+ if m.Secret != nil {
|
|
| 6473 |
+ l = m.Secret.Size() |
|
| 6474 |
+ n += 1 + l + sovControl(uint64(l)) |
|
| 6475 |
+ } |
|
| 6476 |
+ return n |
|
| 6477 |
+} |
|
| 6478 |
+ |
|
| 6451 | 6479 |
func (m *ListSecretsRequest) Size() (n int) {
|
| 6452 | 6480 |
var l int |
| 6453 | 6481 |
_ = l |
| ... | ... |
@@ -7088,6 +7348,28 @@ func (this *GetSecretResponse) String() string {
|
| 7088 | 7088 |
}, "") |
| 7089 | 7089 |
return s |
| 7090 | 7090 |
} |
| 7091 |
+func (this *UpdateSecretRequest) String() string {
|
|
| 7092 |
+ if this == nil {
|
|
| 7093 |
+ return "nil" |
|
| 7094 |
+ } |
|
| 7095 |
+ s := strings.Join([]string{`&UpdateSecretRequest{`,
|
|
| 7096 |
+ `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`,
|
|
| 7097 |
+ `SecretVersion:` + strings.Replace(fmt.Sprintf("%v", this.SecretVersion), "Version", "Version", 1) + `,`,
|
|
| 7098 |
+ `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`,
|
|
| 7099 |
+ `}`, |
|
| 7100 |
+ }, "") |
|
| 7101 |
+ return s |
|
| 7102 |
+} |
|
| 7103 |
+func (this *UpdateSecretResponse) String() string {
|
|
| 7104 |
+ if this == nil {
|
|
| 7105 |
+ return "nil" |
|
| 7106 |
+ } |
|
| 7107 |
+ s := strings.Join([]string{`&UpdateSecretResponse{`,
|
|
| 7108 |
+ `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`,
|
|
| 7109 |
+ `}`, |
|
| 7110 |
+ }, "") |
|
| 7111 |
+ return s |
|
| 7112 |
+} |
|
| 7091 | 7113 |
func (this *ListSecretsRequest) String() string {
|
| 7092 | 7114 |
if this == nil {
|
| 7093 | 7115 |
return "nil" |
| ... | ... |
@@ -12083,6 +12365,234 @@ func (m *GetSecretResponse) Unmarshal(data []byte) error {
|
| 12083 | 12083 |
} |
| 12084 | 12084 |
return nil |
| 12085 | 12085 |
} |
| 12086 |
+func (m *UpdateSecretRequest) Unmarshal(data []byte) error {
|
|
| 12087 |
+ l := len(data) |
|
| 12088 |
+ iNdEx := 0 |
|
| 12089 |
+ for iNdEx < l {
|
|
| 12090 |
+ preIndex := iNdEx |
|
| 12091 |
+ var wire uint64 |
|
| 12092 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 12093 |
+ if shift >= 64 {
|
|
| 12094 |
+ return ErrIntOverflowControl |
|
| 12095 |
+ } |
|
| 12096 |
+ if iNdEx >= l {
|
|
| 12097 |
+ return io.ErrUnexpectedEOF |
|
| 12098 |
+ } |
|
| 12099 |
+ b := data[iNdEx] |
|
| 12100 |
+ iNdEx++ |
|
| 12101 |
+ wire |= (uint64(b) & 0x7F) << shift |
|
| 12102 |
+ if b < 0x80 {
|
|
| 12103 |
+ break |
|
| 12104 |
+ } |
|
| 12105 |
+ } |
|
| 12106 |
+ fieldNum := int32(wire >> 3) |
|
| 12107 |
+ wireType := int(wire & 0x7) |
|
| 12108 |
+ if wireType == 4 {
|
|
| 12109 |
+ return fmt.Errorf("proto: UpdateSecretRequest: wiretype end group for non-group")
|
|
| 12110 |
+ } |
|
| 12111 |
+ if fieldNum <= 0 {
|
|
| 12112 |
+ return fmt.Errorf("proto: UpdateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
| 12113 |
+ } |
|
| 12114 |
+ switch fieldNum {
|
|
| 12115 |
+ case 1: |
|
| 12116 |
+ if wireType != 2 {
|
|
| 12117 |
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType)
|
|
| 12118 |
+ } |
|
| 12119 |
+ var stringLen uint64 |
|
| 12120 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 12121 |
+ if shift >= 64 {
|
|
| 12122 |
+ return ErrIntOverflowControl |
|
| 12123 |
+ } |
|
| 12124 |
+ if iNdEx >= l {
|
|
| 12125 |
+ return io.ErrUnexpectedEOF |
|
| 12126 |
+ } |
|
| 12127 |
+ b := data[iNdEx] |
|
| 12128 |
+ iNdEx++ |
|
| 12129 |
+ stringLen |= (uint64(b) & 0x7F) << shift |
|
| 12130 |
+ if b < 0x80 {
|
|
| 12131 |
+ break |
|
| 12132 |
+ } |
|
| 12133 |
+ } |
|
| 12134 |
+ intStringLen := int(stringLen) |
|
| 12135 |
+ if intStringLen < 0 {
|
|
| 12136 |
+ return ErrInvalidLengthControl |
|
| 12137 |
+ } |
|
| 12138 |
+ postIndex := iNdEx + intStringLen |
|
| 12139 |
+ if postIndex > l {
|
|
| 12140 |
+ return io.ErrUnexpectedEOF |
|
| 12141 |
+ } |
|
| 12142 |
+ m.SecretID = string(data[iNdEx:postIndex]) |
|
| 12143 |
+ iNdEx = postIndex |
|
| 12144 |
+ case 2: |
|
| 12145 |
+ if wireType != 2 {
|
|
| 12146 |
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretVersion", wireType)
|
|
| 12147 |
+ } |
|
| 12148 |
+ var msglen int |
|
| 12149 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 12150 |
+ if shift >= 64 {
|
|
| 12151 |
+ return ErrIntOverflowControl |
|
| 12152 |
+ } |
|
| 12153 |
+ if iNdEx >= l {
|
|
| 12154 |
+ return io.ErrUnexpectedEOF |
|
| 12155 |
+ } |
|
| 12156 |
+ b := data[iNdEx] |
|
| 12157 |
+ iNdEx++ |
|
| 12158 |
+ msglen |= (int(b) & 0x7F) << shift |
|
| 12159 |
+ if b < 0x80 {
|
|
| 12160 |
+ break |
|
| 12161 |
+ } |
|
| 12162 |
+ } |
|
| 12163 |
+ if msglen < 0 {
|
|
| 12164 |
+ return ErrInvalidLengthControl |
|
| 12165 |
+ } |
|
| 12166 |
+ postIndex := iNdEx + msglen |
|
| 12167 |
+ if postIndex > l {
|
|
| 12168 |
+ return io.ErrUnexpectedEOF |
|
| 12169 |
+ } |
|
| 12170 |
+ if m.SecretVersion == nil {
|
|
| 12171 |
+ m.SecretVersion = &Version{}
|
|
| 12172 |
+ } |
|
| 12173 |
+ if err := m.SecretVersion.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
|
| 12174 |
+ return err |
|
| 12175 |
+ } |
|
| 12176 |
+ iNdEx = postIndex |
|
| 12177 |
+ case 3: |
|
| 12178 |
+ if wireType != 2 {
|
|
| 12179 |
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
|
| 12180 |
+ } |
|
| 12181 |
+ var msglen int |
|
| 12182 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 12183 |
+ if shift >= 64 {
|
|
| 12184 |
+ return ErrIntOverflowControl |
|
| 12185 |
+ } |
|
| 12186 |
+ if iNdEx >= l {
|
|
| 12187 |
+ return io.ErrUnexpectedEOF |
|
| 12188 |
+ } |
|
| 12189 |
+ b := data[iNdEx] |
|
| 12190 |
+ iNdEx++ |
|
| 12191 |
+ msglen |= (int(b) & 0x7F) << shift |
|
| 12192 |
+ if b < 0x80 {
|
|
| 12193 |
+ break |
|
| 12194 |
+ } |
|
| 12195 |
+ } |
|
| 12196 |
+ if msglen < 0 {
|
|
| 12197 |
+ return ErrInvalidLengthControl |
|
| 12198 |
+ } |
|
| 12199 |
+ postIndex := iNdEx + msglen |
|
| 12200 |
+ if postIndex > l {
|
|
| 12201 |
+ return io.ErrUnexpectedEOF |
|
| 12202 |
+ } |
|
| 12203 |
+ if m.Spec == nil {
|
|
| 12204 |
+ m.Spec = &SecretSpec{}
|
|
| 12205 |
+ } |
|
| 12206 |
+ if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
|
| 12207 |
+ return err |
|
| 12208 |
+ } |
|
| 12209 |
+ iNdEx = postIndex |
|
| 12210 |
+ default: |
|
| 12211 |
+ iNdEx = preIndex |
|
| 12212 |
+ skippy, err := skipControl(data[iNdEx:]) |
|
| 12213 |
+ if err != nil {
|
|
| 12214 |
+ return err |
|
| 12215 |
+ } |
|
| 12216 |
+ if skippy < 0 {
|
|
| 12217 |
+ return ErrInvalidLengthControl |
|
| 12218 |
+ } |
|
| 12219 |
+ if (iNdEx + skippy) > l {
|
|
| 12220 |
+ return io.ErrUnexpectedEOF |
|
| 12221 |
+ } |
|
| 12222 |
+ iNdEx += skippy |
|
| 12223 |
+ } |
|
| 12224 |
+ } |
|
| 12225 |
+ |
|
| 12226 |
+ if iNdEx > l {
|
|
| 12227 |
+ return io.ErrUnexpectedEOF |
|
| 12228 |
+ } |
|
| 12229 |
+ return nil |
|
| 12230 |
+} |
|
| 12231 |
+func (m *UpdateSecretResponse) Unmarshal(data []byte) error {
|
|
| 12232 |
+ l := len(data) |
|
| 12233 |
+ iNdEx := 0 |
|
| 12234 |
+ for iNdEx < l {
|
|
| 12235 |
+ preIndex := iNdEx |
|
| 12236 |
+ var wire uint64 |
|
| 12237 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 12238 |
+ if shift >= 64 {
|
|
| 12239 |
+ return ErrIntOverflowControl |
|
| 12240 |
+ } |
|
| 12241 |
+ if iNdEx >= l {
|
|
| 12242 |
+ return io.ErrUnexpectedEOF |
|
| 12243 |
+ } |
|
| 12244 |
+ b := data[iNdEx] |
|
| 12245 |
+ iNdEx++ |
|
| 12246 |
+ wire |= (uint64(b) & 0x7F) << shift |
|
| 12247 |
+ if b < 0x80 {
|
|
| 12248 |
+ break |
|
| 12249 |
+ } |
|
| 12250 |
+ } |
|
| 12251 |
+ fieldNum := int32(wire >> 3) |
|
| 12252 |
+ wireType := int(wire & 0x7) |
|
| 12253 |
+ if wireType == 4 {
|
|
| 12254 |
+ return fmt.Errorf("proto: UpdateSecretResponse: wiretype end group for non-group")
|
|
| 12255 |
+ } |
|
| 12256 |
+ if fieldNum <= 0 {
|
|
| 12257 |
+ return fmt.Errorf("proto: UpdateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
| 12258 |
+ } |
|
| 12259 |
+ switch fieldNum {
|
|
| 12260 |
+ case 1: |
|
| 12261 |
+ if wireType != 2 {
|
|
| 12262 |
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
|
|
| 12263 |
+ } |
|
| 12264 |
+ var msglen int |
|
| 12265 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 12266 |
+ if shift >= 64 {
|
|
| 12267 |
+ return ErrIntOverflowControl |
|
| 12268 |
+ } |
|
| 12269 |
+ if iNdEx >= l {
|
|
| 12270 |
+ return io.ErrUnexpectedEOF |
|
| 12271 |
+ } |
|
| 12272 |
+ b := data[iNdEx] |
|
| 12273 |
+ iNdEx++ |
|
| 12274 |
+ msglen |= (int(b) & 0x7F) << shift |
|
| 12275 |
+ if b < 0x80 {
|
|
| 12276 |
+ break |
|
| 12277 |
+ } |
|
| 12278 |
+ } |
|
| 12279 |
+ if msglen < 0 {
|
|
| 12280 |
+ return ErrInvalidLengthControl |
|
| 12281 |
+ } |
|
| 12282 |
+ postIndex := iNdEx + msglen |
|
| 12283 |
+ if postIndex > l {
|
|
| 12284 |
+ return io.ErrUnexpectedEOF |
|
| 12285 |
+ } |
|
| 12286 |
+ if m.Secret == nil {
|
|
| 12287 |
+ m.Secret = &Secret{}
|
|
| 12288 |
+ } |
|
| 12289 |
+ if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
|
| 12290 |
+ return err |
|
| 12291 |
+ } |
|
| 12292 |
+ iNdEx = postIndex |
|
| 12293 |
+ default: |
|
| 12294 |
+ iNdEx = preIndex |
|
| 12295 |
+ skippy, err := skipControl(data[iNdEx:]) |
|
| 12296 |
+ if err != nil {
|
|
| 12297 |
+ return err |
|
| 12298 |
+ } |
|
| 12299 |
+ if skippy < 0 {
|
|
| 12300 |
+ return ErrInvalidLengthControl |
|
| 12301 |
+ } |
|
| 12302 |
+ if (iNdEx + skippy) > l {
|
|
| 12303 |
+ return io.ErrUnexpectedEOF |
|
| 12304 |
+ } |
|
| 12305 |
+ iNdEx += skippy |
|
| 12306 |
+ } |
|
| 12307 |
+ } |
|
| 12308 |
+ |
|
| 12309 |
+ if iNdEx > l {
|
|
| 12310 |
+ return io.ErrUnexpectedEOF |
|
| 12311 |
+ } |
|
| 12312 |
+ return nil |
|
| 12313 |
+} |
|
| 12086 | 12314 |
func (m *ListSecretsRequest) Unmarshal(data []byte) error {
|
| 12087 | 12315 |
l := len(data) |
| 12088 | 12316 |
iNdEx := 0 |
| ... | ... |
@@ -12903,114 +13413,117 @@ var ( |
| 12903 | 12903 |
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
|
| 12904 | 12904 |
|
| 12905 | 12905 |
var fileDescriptorControl = []byte{
|
| 12906 |
- // 1731 bytes of a gzipped FileDescriptorProto |
|
| 12907 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x5a, 0x5f, 0x6f, 0x1b, 0xc5, |
|
| 12908 |
- 0x16, 0xaf, 0xed, 0x24, 0x4e, 0x8e, 0x93, 0xb4, 0x99, 0x38, 0xf7, 0x5a, 0xdb, 0x5e, 0xa7, 0xda, |
|
| 12909 |
- 0xde, 0xa6, 0x8e, 0xd4, 0xeb, 0xf4, 0xba, 0x54, 0x94, 0x22, 0xfe, 0x25, 0xa6, 0xc1, 0xfd, 0x13, |
|
| 12910 |
- 0xaa, 0x4d, 0x0b, 0xbc, 0x45, 0x8e, 0x3d, 0x0d, 0x8b, 0x1d, 0xaf, 0xd9, 0xdd, 0xa4, 0xad, 0x78, |
|
| 12911 |
- 0x01, 0x09, 0x24, 0x3e, 0x02, 0xaf, 0xbc, 0xf2, 0x80, 0xc4, 0x27, 0xe0, 0xb5, 0xe2, 0x89, 0x47, |
|
| 12912 |
- 0x9e, 0x2c, 0x6a, 0x09, 0x89, 0x27, 0xc4, 0x27, 0x40, 0x68, 0x66, 0xce, 0xec, 0xae, 0xd7, 0xb3, |
|
| 12913 |
- 0xbb, 0x76, 0x12, 0x94, 0x3e, 0x65, 0x77, 0xf6, 0x77, 0xe6, 0x9c, 0x99, 0xf3, 0x9b, 0x9f, 0xcf, |
|
| 12914 |
- 0x1c, 0x05, 0xe6, 0x1a, 0x56, 0xc7, 0xb5, 0xad, 0x76, 0xb9, 0x6b, 0x5b, 0xae, 0x45, 0x48, 0xd3, |
|
| 12915 |
- 0x6a, 0xb4, 0xa8, 0x5d, 0x76, 0x9e, 0xd4, 0xed, 0xfd, 0x96, 0xe9, 0x96, 0x0f, 0xff, 0xaf, 0xe5, |
|
| 12916 |
- 0x9c, 0x2e, 0x6d, 0x38, 0x02, 0xa0, 0xcd, 0x59, 0xbb, 0x9f, 0xd0, 0x86, 0x2b, 0x5f, 0x73, 0xee, |
|
| 12917 |
- 0xb3, 0x2e, 0x95, 0x2f, 0xf9, 0x3d, 0x6b, 0xcf, 0xe2, 0x8f, 0x6b, 0xec, 0x09, 0x47, 0x17, 0xbb, |
|
| 12918 |
- 0xed, 0x83, 0x3d, 0xb3, 0xb3, 0x26, 0xfe, 0x88, 0x41, 0xfd, 0x06, 0xcc, 0x6f, 0x52, 0x77, 0xcb, |
|
| 12919 |
- 0x6a, 0x52, 0x83, 0x7e, 0x7a, 0x40, 0x1d, 0x97, 0x5c, 0x82, 0x6c, 0xc7, 0x6a, 0xd2, 0x1d, 0xb3, |
|
| 12920 |
- 0x59, 0x48, 0x5d, 0x4c, 0x95, 0x66, 0xd6, 0xa1, 0xdf, 0x5b, 0x9e, 0x62, 0x88, 0x5a, 0xd5, 0x98, |
|
| 12921 |
- 0x62, 0x9f, 0x6a, 0x4d, 0xfd, 0x2d, 0x38, 0xeb, 0x99, 0x39, 0x5d, 0xab, 0xe3, 0x50, 0x72, 0x15, |
|
| 12922 |
- 0x26, 0xd8, 0x47, 0x6e, 0x94, 0xab, 0x14, 0xca, 0xc3, 0x0b, 0x28, 0x73, 0x3c, 0x47, 0xe9, 0xbd, |
|
| 12923 |
- 0x0c, 0x9c, 0xbb, 0x67, 0x3a, 0x7c, 0x0a, 0x47, 0xba, 0xbe, 0x0d, 0xd9, 0xc7, 0x66, 0xdb, 0xa5, |
|
| 12924 |
- 0xb6, 0x83, 0xb3, 0x5c, 0x55, 0xcd, 0x12, 0x36, 0x2b, 0xdf, 0x16, 0x36, 0x86, 0x34, 0xd6, 0xbe, |
|
| 12925 |
- 0xc8, 0x40, 0x16, 0x07, 0x49, 0x1e, 0x26, 0x3b, 0xf5, 0x7d, 0xca, 0x66, 0xcc, 0x94, 0x66, 0x0c, |
|
| 12926 |
- 0xf1, 0x42, 0xd6, 0x20, 0x67, 0x36, 0x77, 0xba, 0x36, 0x7d, 0x6c, 0x3e, 0xa5, 0x4e, 0x21, 0xcd, |
|
| 12927 |
- 0xbe, 0xad, 0xcf, 0xf7, 0x7b, 0xcb, 0x50, 0xab, 0x3e, 0xc0, 0x51, 0x03, 0xcc, 0xa6, 0x7c, 0x26, |
|
| 12928 |
- 0x0f, 0x60, 0xaa, 0x5d, 0xdf, 0xa5, 0x6d, 0xa7, 0x90, 0xb9, 0x98, 0x29, 0xe5, 0x2a, 0x37, 0xc7, |
|
| 12929 |
- 0x89, 0xac, 0x7c, 0x8f, 0x9b, 0xbe, 0xdb, 0x71, 0xed, 0x67, 0x06, 0xce, 0x43, 0x6a, 0x90, 0xdb, |
|
| 12930 |
- 0xa7, 0xfb, 0xbb, 0xd4, 0x76, 0x3e, 0x36, 0xbb, 0x4e, 0x61, 0xe2, 0x62, 0xa6, 0x34, 0x5f, 0xb9, |
|
| 12931 |
- 0x12, 0xb5, 0x6d, 0xdb, 0x5d, 0xda, 0x28, 0xdf, 0xf7, 0xf0, 0x46, 0xd0, 0x96, 0x54, 0x60, 0xd2, |
|
| 12932 |
- 0xb6, 0xda, 0xd4, 0x29, 0x4c, 0xf2, 0x49, 0x2e, 0x44, 0xee, 0xbd, 0xd5, 0xa6, 0x86, 0x80, 0x92, |
|
| 12933 |
- 0x4b, 0x30, 0xc7, 0xb6, 0xc2, 0xdf, 0x83, 0x29, 0xbe, 0x3f, 0xb3, 0x6c, 0x50, 0xae, 0x5a, 0x7b, |
|
| 12934 |
- 0x0d, 0x72, 0x81, 0xd0, 0xc9, 0x39, 0xc8, 0xb4, 0xe8, 0x33, 0x41, 0x0b, 0x83, 0x3d, 0xb2, 0xdd, |
|
| 12935 |
- 0x3d, 0xac, 0xb7, 0x0f, 0x68, 0x21, 0xcd, 0xc7, 0xc4, 0xcb, 0xad, 0xf4, 0xcd, 0x94, 0xbe, 0x01, |
|
| 12936 |
- 0x0b, 0x81, 0xed, 0x40, 0x8e, 0x94, 0x61, 0x92, 0x65, 0x5f, 0x24, 0x23, 0x8e, 0x24, 0x02, 0xa6, |
|
| 12937 |
- 0x7f, 0x97, 0x82, 0x85, 0x47, 0xdd, 0x66, 0xdd, 0xa5, 0xe3, 0x32, 0x94, 0xbc, 0x09, 0xb3, 0x1c, |
|
| 12938 |
- 0x74, 0x48, 0x6d, 0xc7, 0xb4, 0x3a, 0x3c, 0xc0, 0x5c, 0xe5, 0xbc, 0xca, 0xe3, 0x07, 0x02, 0x62, |
|
| 12939 |
- 0xe4, 0x98, 0x01, 0xbe, 0x90, 0x6b, 0x30, 0xc1, 0x8e, 0x5b, 0x21, 0xc3, 0xed, 0x2e, 0xc4, 0xe5, |
|
| 12940 |
- 0xc5, 0xe0, 0x48, 0x7d, 0x1d, 0x48, 0x30, 0xd6, 0x23, 0x1d, 0x8b, 0x2d, 0x58, 0x30, 0xe8, 0xbe, |
|
| 12941 |
- 0x75, 0x38, 0xfe, 0x7a, 0xf3, 0x30, 0xf9, 0xd8, 0xb2, 0x1b, 0x22, 0x13, 0xd3, 0x86, 0x78, 0xd1, |
|
| 12942 |
- 0xf3, 0x40, 0x82, 0xf3, 0x89, 0x98, 0xf0, 0xd0, 0x3f, 0xac, 0x3b, 0xad, 0x80, 0x0b, 0xb7, 0xee, |
|
| 12943 |
- 0xb4, 0x42, 0x2e, 0x18, 0x82, 0xb9, 0x60, 0x9f, 0xbc, 0x43, 0x2f, 0xcc, 0xfc, 0xd5, 0xb1, 0x8f, |
|
| 12944 |
- 0x71, 0xab, 0xe3, 0x78, 0x8e, 0xd2, 0x6f, 0xca, 0xd5, 0x8d, 0xed, 0xda, 0x5b, 0x47, 0xd0, 0xbb, |
|
| 12945 |
- 0xfe, 0x17, 0x8a, 0x08, 0x1b, 0x3c, 0x82, 0x88, 0x04, 0xcd, 0x86, 0x45, 0xe4, 0xdb, 0x53, 0x14, |
|
| 12946 |
- 0x11, 0x55, 0x64, 0x4a, 0x11, 0x59, 0x83, 0x9c, 0x43, 0xed, 0x43, 0xb3, 0xc1, 0xd8, 0x21, 0x44, |
|
| 12947 |
- 0x04, 0x43, 0xd8, 0x16, 0xc3, 0xb5, 0xaa, 0x63, 0x00, 0x42, 0x6a, 0x4d, 0x87, 0xac, 0xc0, 0x34, |
|
| 12948 |
- 0x72, 0x49, 0xa8, 0xc5, 0xcc, 0x7a, 0xae, 0xdf, 0x5b, 0xce, 0x0a, 0x32, 0x39, 0x46, 0x56, 0xb0, |
|
| 12949 |
- 0xc9, 0x21, 0x55, 0x98, 0x6f, 0x52, 0xc7, 0xb4, 0x69, 0x73, 0xc7, 0x71, 0xeb, 0x2e, 0xea, 0xc3, |
|
| 12950 |
- 0x7c, 0xe5, 0x3f, 0x51, 0x29, 0xde, 0x66, 0x28, 0x63, 0x0e, 0x8d, 0xf8, 0x9b, 0x42, 0x64, 0xb2, |
|
| 12906 |
+ // 1777 bytes of a gzipped FileDescriptorProto |
|
| 12907 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x6f, 0xdb, 0xc6, |
|
| 12908 |
+ 0x12, 0x8e, 0x24, 0xdb, 0xb2, 0x47, 0x96, 0x13, 0xaf, 0x95, 0x3c, 0x81, 0xc9, 0x93, 0x03, 0xe6, |
|
| 12909 |
+ 0xc5, 0x91, 0x81, 0x3c, 0x39, 0x4f, 0x79, 0x41, 0xd3, 0x14, 0xfd, 0x65, 0xbb, 0x71, 0x95, 0x1f, |
|
| 12910 |
+ 0x6e, 0x40, 0x27, 0x6d, 0x6f, 0x86, 0x2c, 0x6d, 0x5c, 0x56, 0xb2, 0xa8, 0x92, 0xb4, 0x93, 0xa0, |
|
| 12911 |
+ 0x97, 0x16, 0x68, 0x81, 0xfe, 0x09, 0xbd, 0xf6, 0xda, 0x02, 0x3d, 0xf7, 0xd6, 0x6b, 0xd0, 0x53, |
|
| 12912 |
+ 0x8f, 0x3d, 0x19, 0x8d, 0x80, 0x02, 0x3d, 0x15, 0xfd, 0x0b, 0x8a, 0x62, 0x77, 0x67, 0x49, 0x8a, |
|
| 12913 |
+ 0x5a, 0x92, 0x92, 0xe5, 0xc2, 0x39, 0x99, 0x5c, 0x7e, 0xb3, 0x33, 0xbb, 0xf3, 0xed, 0xa7, 0xd9, |
|
| 12914 |
+ 0x81, 0x21, 0xdf, 0xb0, 0x3a, 0xae, 0x6d, 0xb5, 0x2b, 0x5d, 0xdb, 0x72, 0x2d, 0x42, 0x9a, 0x56, |
|
| 12915 |
+ 0xa3, 0x45, 0xed, 0x8a, 0xf3, 0xa4, 0x6e, 0xef, 0xb5, 0x4c, 0xb7, 0x72, 0xf0, 0x3f, 0x2d, 0xe7, |
|
| 12916 |
+ 0x74, 0x69, 0xc3, 0x11, 0x00, 0x2d, 0x6f, 0xed, 0x7c, 0x4c, 0x1b, 0xae, 0x7c, 0xcd, 0xb9, 0xcf, |
|
| 12917 |
+ 0xba, 0x54, 0xbe, 0x14, 0x76, 0xad, 0x5d, 0x8b, 0x3f, 0xae, 0xb0, 0x27, 0x1c, 0x5d, 0xe8, 0xb6, |
|
| 12918 |
+ 0xf7, 0x77, 0xcd, 0xce, 0x8a, 0xf8, 0x23, 0x06, 0xf5, 0x1b, 0x30, 0xb7, 0x41, 0xdd, 0x4d, 0xab, |
|
| 12919 |
+ 0x49, 0x0d, 0xfa, 0xc9, 0x3e, 0x75, 0x5c, 0x72, 0x09, 0xb2, 0x1d, 0xab, 0x49, 0xb7, 0xcd, 0x66, |
|
| 12920 |
+ 0x31, 0x75, 0x31, 0x55, 0x9e, 0x59, 0x85, 0xde, 0xe1, 0xe2, 0x14, 0x43, 0xd4, 0xd6, 0x8d, 0x29, |
|
| 12921 |
+ 0xf6, 0xa9, 0xd6, 0xd4, 0xdf, 0x84, 0xd3, 0x9e, 0x99, 0xd3, 0xb5, 0x3a, 0x0e, 0x25, 0x57, 0x61, |
|
| 12922 |
+ 0x82, 0x7d, 0xe4, 0x46, 0xb9, 0x6a, 0xb1, 0x32, 0xb8, 0x80, 0x0a, 0xc7, 0x73, 0x94, 0x7e, 0x98, |
|
| 12923 |
+ 0x81, 0x33, 0xf7, 0x4c, 0x87, 0x4f, 0xe1, 0x48, 0xd7, 0xb7, 0x21, 0xfb, 0xd8, 0x6c, 0xbb, 0xd4, |
|
| 12924 |
+ 0x76, 0x70, 0x96, 0xab, 0xaa, 0x59, 0xc2, 0x66, 0x95, 0xdb, 0xc2, 0xc6, 0x90, 0xc6, 0xda, 0xe7, |
|
| 12925 |
+ 0x19, 0xc8, 0xe2, 0x20, 0x29, 0xc0, 0x64, 0xa7, 0xbe, 0x47, 0xd9, 0x8c, 0x99, 0xf2, 0x8c, 0x21, |
|
| 12926 |
+ 0x5e, 0xc8, 0x0a, 0xe4, 0xcc, 0xe6, 0x76, 0xd7, 0xa6, 0x8f, 0xcd, 0xa7, 0xd4, 0x29, 0xa6, 0xd9, |
|
| 12927 |
+ 0xb7, 0xd5, 0xb9, 0xde, 0xe1, 0x22, 0xd4, 0xd6, 0x1f, 0xe0, 0xa8, 0x01, 0x66, 0x53, 0x3e, 0x93, |
|
| 12928 |
+ 0x07, 0x30, 0xd5, 0xae, 0xef, 0xd0, 0xb6, 0x53, 0xcc, 0x5c, 0xcc, 0x94, 0x73, 0xd5, 0x9b, 0xa3, |
|
| 12929 |
+ 0x44, 0x56, 0xb9, 0xc7, 0x4d, 0xdf, 0xe9, 0xb8, 0xf6, 0x33, 0x03, 0xe7, 0x21, 0x35, 0xc8, 0xed, |
|
| 12930 |
+ 0xd1, 0xbd, 0x1d, 0x6a, 0x3b, 0x1f, 0x99, 0x5d, 0xa7, 0x38, 0x71, 0x31, 0x53, 0x9e, 0xab, 0x5e, |
|
| 12931 |
+ 0x89, 0xda, 0xb6, 0xad, 0x2e, 0x6d, 0x54, 0xee, 0x7b, 0x78, 0x23, 0x68, 0x4b, 0xaa, 0x30, 0x69, |
|
| 12932 |
+ 0x5b, 0x6d, 0xea, 0x14, 0x27, 0xf9, 0x24, 0x17, 0x22, 0xf7, 0xde, 0x6a, 0x53, 0x43, 0x40, 0xc9, |
|
| 12933 |
+ 0x25, 0xc8, 0xb3, 0xad, 0xf0, 0xf7, 0x60, 0x8a, 0xef, 0xcf, 0x2c, 0x1b, 0x94, 0xab, 0xd6, 0x5e, |
|
| 12934 |
+ 0x85, 0x5c, 0x20, 0x74, 0x72, 0x06, 0x32, 0x2d, 0xfa, 0x4c, 0xd0, 0xc2, 0x60, 0x8f, 0x6c, 0x77, |
|
| 12935 |
+ 0x0f, 0xea, 0xed, 0x7d, 0x5a, 0x4c, 0xf3, 0x31, 0xf1, 0x72, 0x2b, 0x7d, 0x33, 0xa5, 0xaf, 0xc1, |
|
| 12936 |
+ 0x7c, 0x60, 0x3b, 0x90, 0x23, 0x15, 0x98, 0x64, 0xd9, 0x17, 0xc9, 0x88, 0x23, 0x89, 0x80, 0xe9, |
|
| 12937 |
+ 0xdf, 0xa6, 0x60, 0xfe, 0x51, 0xb7, 0x59, 0x77, 0xe9, 0xa8, 0x0c, 0x25, 0x6f, 0xc0, 0x2c, 0x07, |
|
| 12938 |
+ 0x1d, 0x50, 0xdb, 0x31, 0xad, 0x0e, 0x0f, 0x30, 0x57, 0x3d, 0xaf, 0xf2, 0xf8, 0xbe, 0x80, 0x18, |
|
| 12939 |
+ 0x39, 0x66, 0x80, 0x2f, 0xe4, 0x1a, 0x4c, 0xb0, 0xe3, 0x56, 0xcc, 0x70, 0xbb, 0x0b, 0x71, 0x79, |
|
| 12940 |
+ 0x31, 0x38, 0x52, 0x5f, 0x05, 0x12, 0x8c, 0xf5, 0x48, 0xc7, 0x62, 0x13, 0xe6, 0x0d, 0xba, 0x67, |
|
| 12941 |
+ 0x1d, 0x8c, 0xbe, 0xde, 0x02, 0x4c, 0x3e, 0xb6, 0xec, 0x86, 0xc8, 0xc4, 0xb4, 0x21, 0x5e, 0xf4, |
|
| 12942 |
+ 0x02, 0x90, 0xe0, 0x7c, 0x22, 0x26, 0x3c, 0xf4, 0x0f, 0xeb, 0x4e, 0x2b, 0xe0, 0xc2, 0xad, 0x3b, |
|
| 12943 |
+ 0xad, 0x90, 0x0b, 0x86, 0x60, 0x2e, 0xd8, 0x27, 0xef, 0xd0, 0x0b, 0x33, 0x7f, 0x75, 0xec, 0x63, |
|
| 12944 |
+ 0xdc, 0xea, 0x38, 0x9e, 0xa3, 0xf4, 0x9b, 0x72, 0x75, 0x23, 0xbb, 0xf6, 0xd6, 0x11, 0xf4, 0xae, |
|
| 12945 |
+ 0xff, 0x85, 0x22, 0xc2, 0x06, 0x8f, 0x20, 0x22, 0x41, 0xb3, 0x41, 0x11, 0xf9, 0xe6, 0x04, 0x45, |
|
| 12946 |
+ 0x44, 0x15, 0x99, 0x52, 0x44, 0x56, 0x20, 0xe7, 0x50, 0xfb, 0xc0, 0x6c, 0x30, 0x76, 0x08, 0x11, |
|
| 12947 |
+ 0xc1, 0x10, 0xb6, 0xc4, 0x70, 0x6d, 0xdd, 0x31, 0x00, 0x21, 0xb5, 0xa6, 0x43, 0x96, 0x60, 0x1a, |
|
| 12948 |
+ 0xb9, 0x24, 0xd4, 0x62, 0x66, 0x35, 0xd7, 0x3b, 0x5c, 0xcc, 0x0a, 0x32, 0x39, 0x46, 0x56, 0xb0, |
|
| 12949 |
+ 0xc9, 0x21, 0xeb, 0x30, 0xd7, 0xa4, 0x8e, 0x69, 0xd3, 0xe6, 0xb6, 0xe3, 0xd6, 0x5d, 0xd4, 0x87, |
|
| 12950 |
+ 0xb9, 0xea, 0xbf, 0xa3, 0x52, 0xbc, 0xc5, 0x50, 0x46, 0x1e, 0x8d, 0xf8, 0x9b, 0x42, 0x64, 0xb2, |
|
| 12951 | 12951 |
0xff, 0x88, 0xc8, 0xe0, 0x76, 0xf9, 0x22, 0xc3, 0x58, 0x13, 0x2b, 0x32, 0x9c, 0x46, 0x02, 0xa6, |
| 12952 |
- 0xdf, 0x85, 0xfc, 0x86, 0x4d, 0xeb, 0x2e, 0xc5, 0x2d, 0x93, 0x44, 0xba, 0x8e, 0x0a, 0x20, 0x58, |
|
| 12953 |
- 0xb4, 0xac, 0x9a, 0x06, 0x2d, 0x02, 0x22, 0xb0, 0x05, 0x4b, 0xa1, 0xc9, 0x30, 0xaa, 0x1b, 0x90, |
|
| 12954 |
- 0xc5, 0x34, 0xe0, 0x84, 0xe7, 0x63, 0x26, 0x34, 0x24, 0x56, 0x7f, 0x07, 0x16, 0x36, 0xa9, 0x1b, |
|
| 12955 |
- 0x8a, 0xec, 0x2a, 0x80, 0x9f, 0x75, 0x3c, 0x35, 0x73, 0xfd, 0xde, 0xf2, 0x8c, 0x97, 0x74, 0x63, |
|
| 12956 |
- 0xc6, 0xcb, 0xb9, 0x7e, 0x17, 0x48, 0x70, 0x8a, 0xe3, 0xc5, 0xf3, 0x63, 0x0a, 0xf2, 0x42, 0xe5, |
|
| 12957 |
- 0x8e, 0x13, 0x13, 0xa9, 0xc2, 0x59, 0x89, 0x1e, 0x43, 0xa0, 0xe7, 0xd1, 0x46, 0x6a, 0xf4, 0xf5, |
|
| 12958 |
- 0x01, 0x8d, 0x1e, 0x3d, 0x43, 0xa1, 0x05, 0x1c, 0x6f, 0x47, 0xaa, 0x90, 0x17, 0xd2, 0x74, 0xac, |
|
| 12959 |
- 0x24, 0xfd, 0x1b, 0x96, 0x42, 0xb3, 0xa0, 0xc6, 0xfd, 0x9e, 0x86, 0x45, 0xc6, 0x71, 0x1c, 0xf7, |
|
| 12960 |
- 0x64, 0xae, 0x16, 0x96, 0xb9, 0xb5, 0x28, 0x31, 0x09, 0x59, 0x0e, 0x2b, 0xdd, 0x57, 0xe9, 0x13, |
|
| 12961 |
- 0x57, 0xba, 0xed, 0x90, 0xd2, 0xbd, 0x3e, 0x66, 0x70, 0x4a, 0xb1, 0x1b, 0x52, 0x93, 0x89, 0x93, |
|
| 12962 |
- 0x55, 0x93, 0xf7, 0x21, 0x3f, 0x18, 0x12, 0x12, 0xe3, 0x55, 0x98, 0xc6, 0x44, 0x49, 0x4d, 0x89, |
|
| 12963 |
- 0x65, 0x86, 0x07, 0xf6, 0x95, 0x65, 0x8b, 0xba, 0x4f, 0x2c, 0xbb, 0x35, 0x86, 0xb2, 0xa0, 0x85, |
|
| 12952 |
+ 0xdf, 0x85, 0xc2, 0x9a, 0x4d, 0xeb, 0x2e, 0xc5, 0x2d, 0x93, 0x44, 0xba, 0x8e, 0x0a, 0x20, 0x58, |
|
| 12953 |
+ 0xb4, 0xa8, 0x9a, 0x06, 0x2d, 0x02, 0x22, 0xb0, 0x09, 0x67, 0x43, 0x93, 0x61, 0x54, 0x37, 0x20, |
|
| 12954 |
+ 0x8b, 0x69, 0xc0, 0x09, 0xcf, 0xc7, 0x4c, 0x68, 0x48, 0xac, 0xfe, 0x36, 0xcc, 0x6f, 0x50, 0x37, |
|
| 12955 |
+ 0x14, 0xd9, 0x55, 0x00, 0x3f, 0xeb, 0x78, 0x6a, 0xf2, 0xbd, 0xc3, 0xc5, 0x19, 0x2f, 0xe9, 0xc6, |
|
| 12956 |
+ 0x8c, 0x97, 0x73, 0xfd, 0x2e, 0x90, 0xe0, 0x14, 0xe3, 0xc5, 0xf3, 0x63, 0x0a, 0x0a, 0x42, 0xe5, |
|
| 12957 |
+ 0xc6, 0x89, 0x89, 0xac, 0xc3, 0x69, 0x89, 0x1e, 0x41, 0xa0, 0xe7, 0xd0, 0x46, 0x6a, 0xf4, 0xf5, |
|
| 12958 |
+ 0x3e, 0x8d, 0x1e, 0x3e, 0x43, 0xa1, 0x05, 0x8c, 0xb7, 0x23, 0xeb, 0x50, 0x10, 0xd2, 0x34, 0x56, |
|
| 12959 |
+ 0x92, 0xfe, 0x05, 0x67, 0x43, 0xb3, 0xa0, 0xc6, 0xfd, 0x9e, 0x86, 0x05, 0xc6, 0x71, 0x1c, 0xf7, |
|
| 12960 |
+ 0x64, 0xae, 0x16, 0x96, 0xb9, 0x95, 0x28, 0x31, 0x09, 0x59, 0x0e, 0x2a, 0xdd, 0x97, 0xe9, 0x63, |
|
| 12961 |
+ 0x57, 0xba, 0xad, 0x90, 0xd2, 0xbd, 0x36, 0x62, 0x70, 0x4a, 0xb1, 0x1b, 0x50, 0x93, 0x89, 0xe3, |
|
| 12962 |
+ 0x55, 0x93, 0xf7, 0xa0, 0xd0, 0x1f, 0x12, 0x12, 0xe3, 0x15, 0x98, 0xc6, 0x44, 0x49, 0x4d, 0x89, |
|
| 12963 |
+ 0x65, 0x86, 0x07, 0xf6, 0x95, 0x65, 0x93, 0xba, 0x4f, 0x2c, 0xbb, 0x35, 0x82, 0xb2, 0xa0, 0x85, |
|
| 12964 | 12964 |
0x4a, 0x59, 0xbc, 0xc9, 0x7c, 0xde, 0x76, 0xc4, 0x50, 0x1c, 0x6f, 0xa5, 0x95, 0xc4, 0xea, 0x8f, |
| 12965 | 12965 |
0xb8, 0xb2, 0x84, 0x22, 0x23, 0x30, 0xc1, 0x76, 0x13, 0xf7, 0x8b, 0x3f, 0x33, 0x22, 0xa3, 0x0d, |
| 12966 |
- 0x23, 0x72, 0xda, 0x27, 0x32, 0xda, 0x32, 0x22, 0x23, 0xc0, 0x53, 0x9b, 0x13, 0x8a, 0xf1, 0x23, |
|
| 12967 |
- 0x79, 0xb6, 0x4e, 0x3c, 0x4c, 0xef, 0xbc, 0x85, 0x22, 0xf5, 0xce, 0x1b, 0x8e, 0x1f, 0xe1, 0xbc, |
|
| 12968 |
- 0x85, 0x2c, 0x5f, 0xae, 0xf3, 0x16, 0x11, 0xdc, 0x69, 0x9e, 0x37, 0x3f, 0x24, 0xff, 0xbc, 0x61, |
|
| 12969 |
- 0xa2, 0x62, 0xcf, 0x9b, 0xcc, 0x9c, 0x07, 0xc6, 0x1f, 0xcb, 0x8d, 0xf6, 0x81, 0xe3, 0x52, 0x3b, |
|
| 12966 |
+ 0x23, 0x72, 0xda, 0x27, 0x32, 0xda, 0x32, 0x22, 0x23, 0xc0, 0x53, 0x9b, 0x63, 0x8a, 0xf1, 0x43, |
|
| 12967 |
+ 0x79, 0xb6, 0x8e, 0x3d, 0x4c, 0xef, 0xbc, 0x85, 0x22, 0xf5, 0xce, 0x1b, 0x8e, 0x1f, 0xe1, 0xbc, |
|
| 12968 |
+ 0x85, 0x2c, 0x5f, 0xae, 0xf3, 0x16, 0x11, 0xdc, 0x49, 0x9e, 0x37, 0x3f, 0x24, 0xff, 0xbc, 0x61, |
|
| 12969 |
+ 0xa2, 0x62, 0xcf, 0x9b, 0xcc, 0x9c, 0x07, 0xc6, 0x1f, 0xcb, 0xb5, 0xf6, 0xbe, 0xe3, 0x52, 0x3b, |
|
| 12970 | 12970 |
0xa0, 0xc3, 0x0d, 0x31, 0x12, 0xd2, 0x61, 0xc4, 0x31, 0x5e, 0x20, 0xc0, 0xa3, 0xaf, 0x37, 0x85, |
| 12971 | 12971 |
0x4f, 0x5f, 0x84, 0xc4, 0xd1, 0x57, 0x5a, 0x49, 0xac, 0xc7, 0x25, 0xfc, 0x70, 0x04, 0x2e, 0x85, |
| 12972 |
- 0x2c, 0x5f, 0x2e, 0x2e, 0x45, 0x04, 0x77, 0x9a, 0x5c, 0xf2, 0x43, 0xf2, 0xb9, 0x84, 0xd9, 0x88, |
|
| 12973 |
- 0xe5, 0x92, 0x4c, 0x9d, 0x07, 0xd6, 0x0f, 0x60, 0xe1, 0x8e, 0x65, 0x76, 0x1e, 0x5a, 0x2d, 0xda, |
|
| 12974 |
- 0x31, 0x2c, 0xb7, 0xee, 0xb2, 0x82, 0xa3, 0x0c, 0x8b, 0x36, 0x7b, 0xa6, 0x3b, 0x8c, 0x70, 0xd4, |
|
| 12975 |
- 0xde, 0x71, 0xd9, 0x67, 0x1e, 0xe1, 0xb4, 0xb1, 0x20, 0x3e, 0x7d, 0xc8, 0xbf, 0x70, 0x3b, 0x72, |
|
| 12976 |
- 0x0d, 0xf2, 0x88, 0xdf, 0xaf, 0x77, 0xea, 0x7b, 0x9e, 0x81, 0xb8, 0xa3, 0x11, 0xf1, 0xed, 0xbe, |
|
| 12977 |
- 0xf8, 0xc4, 0x2d, 0xf4, 0xaf, 0xd3, 0xb2, 0xbe, 0x3a, 0x0e, 0x8d, 0x59, 0x7d, 0x25, 0xd1, 0xe3, |
|
| 12978 |
- 0xd4, 0x57, 0x68, 0x33, 0x46, 0x7d, 0x85, 0xde, 0xfd, 0xdf, 0x29, 0xb2, 0x09, 0xd3, 0x36, 0xee, |
|
| 12979 |
- 0x57, 0x61, 0x82, 0x1b, 0x5e, 0x56, 0x19, 0x0e, 0x6d, 0xee, 0xfa, 0xc4, 0xf3, 0xde, 0xf2, 0x19, |
|
| 12980 |
- 0xc3, 0x33, 0xf6, 0x0b, 0xb5, 0x13, 0x3a, 0x8d, 0x6f, 0xc0, 0x39, 0x5e, 0x07, 0x37, 0x6c, 0xea, |
|
| 12981 |
- 0xca, 0x5d, 0x5d, 0x85, 0x19, 0x87, 0x0f, 0xf8, 0x9b, 0x3a, 0xdb, 0xef, 0x2d, 0x4f, 0x0b, 0x54, |
|
| 12982 |
- 0xad, 0xca, 0x7e, 0xcc, 0xf9, 0x53, 0x53, 0xdf, 0xc4, 0x4a, 0x5c, 0x98, 0x63, 0x28, 0x15, 0x98, |
|
| 12983 |
- 0x12, 0x00, 0x8c, 0x44, 0x53, 0x17, 0x06, 0xdc, 0x06, 0x91, 0xfa, 0x6f, 0x69, 0x20, 0xa2, 0xce, |
|
| 12984 |
- 0x60, 0xaf, 0x9e, 0x28, 0xbc, 0x17, 0x16, 0x85, 0x72, 0x74, 0xcd, 0x14, 0x34, 0x1c, 0xd6, 0x84, |
|
| 12985 |
- 0x2f, 0x4f, 0x5e, 0x13, 0x8c, 0x90, 0x26, 0xdc, 0x1a, 0x2f, 0xb6, 0x53, 0x91, 0x84, 0xbb, 0xb2, |
|
| 12986 |
- 0x70, 0xc6, 0x88, 0x30, 0x65, 0xaf, 0xb0, 0x32, 0x9f, 0x0f, 0xa1, 0x20, 0xc4, 0xe5, 0x4c, 0x42, |
|
| 12987 |
- 0xf5, 0x1a, 0x2c, 0xca, 0x7b, 0x5d, 0x90, 0x3f, 0x95, 0x81, 0x4a, 0xae, 0x18, 0x3d, 0x53, 0xa0, |
|
| 12988 |
- 0x90, 0xbb, 0xe3, 0xdf, 0x37, 0x8f, 0xcd, 0xa5, 0xb7, 0x61, 0x51, 0x5e, 0x1b, 0x8e, 0x48, 0xeb, |
|
| 12989 |
- 0x7f, 0xf9, 0xd7, 0x97, 0x60, 0x34, 0x95, 0x1f, 0x96, 0x20, 0xbb, 0x21, 0x5a, 0xd2, 0xc4, 0x84, |
|
| 12990 |
- 0x2c, 0x76, 0x7b, 0x89, 0xae, 0x0a, 0x6a, 0xb0, 0x83, 0xac, 0x5d, 0x8a, 0xc5, 0x60, 0x9d, 0xb5, |
|
| 12991 |
- 0xf4, 0xd3, 0xf7, 0x7f, 0x7c, 0x93, 0x3e, 0x0b, 0x73, 0x1c, 0xf4, 0x3f, 0xd4, 0x47, 0x62, 0xc1, |
|
| 12992 |
- 0x8c, 0xd7, 0x36, 0x24, 0xff, 0x1d, 0xa5, 0xc9, 0xaa, 0x5d, 0x4e, 0x40, 0xc5, 0x3b, 0xb4, 0x01, |
|
| 12993 |
- 0xfc, 0xae, 0x1d, 0x51, 0xce, 0x35, 0xd4, 0x81, 0xd4, 0x56, 0x92, 0x60, 0x89, 0x3e, 0xfd, 0xae, |
|
| 12994 |
- 0x9c, 0xda, 0xe7, 0x50, 0x17, 0x50, 0xed, 0x53, 0xd1, 0xdc, 0x8b, 0xf0, 0x29, 0x72, 0xf8, 0xb0, |
|
| 12995 |
- 0xee, 0xb4, 0x22, 0x73, 0x18, 0xe8, 0xca, 0x45, 0xe6, 0x70, 0xa0, 0xff, 0x16, 0x9f, 0x43, 0xde, |
|
| 12996 |
- 0x95, 0x89, 0xce, 0x61, 0xb0, 0xc7, 0x15, 0x9d, 0xc3, 0x81, 0xd6, 0x4e, 0xe2, 0x7e, 0xf2, 0xe5, |
|
| 12997 |
- 0xc5, 0xec, 0x67, 0x70, 0x85, 0x2b, 0x49, 0xb0, 0x44, 0x9f, 0x7e, 0x57, 0x45, 0xed, 0x73, 0xa8, |
|
| 12998 |
- 0x71, 0xa3, 0xf6, 0x39, 0xdc, 0x9c, 0x89, 0xf2, 0xf9, 0x14, 0x66, 0x83, 0x17, 0x54, 0x72, 0x65, |
|
| 12999 |
- 0xc4, 0x5b, 0xb5, 0x56, 0x4a, 0x06, 0xc6, 0x7b, 0xfe, 0x0c, 0xe6, 0x06, 0xda, 0x5a, 0x44, 0x39, |
|
| 13000 |
- 0xa3, 0xaa, 0x8d, 0xa6, 0xad, 0x8e, 0x80, 0x4c, 0x74, 0x3e, 0xd0, 0xb1, 0x51, 0x3b, 0x57, 0x75, |
|
| 13001 |
- 0xa5, 0xd4, 0xce, 0x95, 0xed, 0x9f, 0x18, 0xe7, 0x03, 0x8d, 0x19, 0xb5, 0x73, 0x55, 0x07, 0x48, |
|
| 13002 |
- 0xed, 0x5c, 0xdd, 0xe5, 0x89, 0x25, 0x19, 0x5e, 0x74, 0x22, 0x49, 0x36, 0x78, 0x39, 0x8e, 0x24, |
|
| 13003 |
- 0x59, 0xf8, 0xa6, 0x1b, 0x4f, 0x32, 0x79, 0x2b, 0x8b, 0x26, 0x59, 0xe8, 0x2a, 0x19, 0x4d, 0xb2, |
|
| 13004 |
- 0xf0, 0x05, 0x2f, 0x91, 0x64, 0x72, 0xc1, 0x31, 0x24, 0x0b, 0xad, 0x79, 0x75, 0x04, 0xe4, 0x88, |
|
| 13005 |
- 0x79, 0x8e, 0x75, 0xae, 0xea, 0x46, 0xc4, 0xe5, 0x79, 0x44, 0xe7, 0x22, 0xcf, 0x58, 0xb1, 0x46, |
|
| 13006 |
- 0xe6, 0x79, 0xf0, 0x46, 0x10, 0x99, 0xe7, 0x50, 0xb9, 0x9c, 0x90, 0x67, 0x79, 0x63, 0x8a, 0xce, |
|
| 13007 |
- 0x73, 0xe8, 0x9a, 0x17, 0x9d, 0xe7, 0xf0, 0xe5, 0x2b, 0xf1, 0x3c, 0xcb, 0x05, 0xc7, 0x9c, 0xe7, |
|
| 13008 |
- 0xd0, 0x9a, 0x57, 0x47, 0x40, 0x26, 0xfe, 0x38, 0x79, 0x65, 0xbc, 0xfa, 0xc7, 0x29, 0x7c, 0x49, |
|
| 13009 |
- 0xd0, 0x2e, 0x27, 0xa0, 0xe2, 0x1d, 0x1e, 0x40, 0x2e, 0x50, 0x86, 0x92, 0x95, 0xd1, 0x2a, 0x67, |
|
| 13010 |
- 0xed, 0x4a, 0x22, 0x2e, 0x31, 0xbd, 0xc1, 0x2a, 0x53, 0x9d, 0x5e, 0x45, 0x49, 0xab, 0x95, 0x92, |
|
| 13011 |
- 0x81, 0x89, 0x9e, 0x83, 0x15, 0xa5, 0xda, 0xb3, 0xa2, 0x6a, 0xd5, 0x4a, 0xc9, 0xc0, 0x58, 0xcf, |
|
| 13012 |
- 0xeb, 0x17, 0x9e, 0xbf, 0x28, 0x9e, 0xf9, 0xe5, 0x45, 0xf1, 0xcc, 0x9f, 0x2f, 0x8a, 0xa9, 0xcf, |
|
| 13013 |
- 0xfb, 0xc5, 0xd4, 0xf3, 0x7e, 0x31, 0xf5, 0x73, 0xbf, 0x98, 0xfa, 0xb5, 0x5f, 0x4c, 0xed, 0x4e, |
|
| 13014 |
- 0xf1, 0xff, 0x78, 0xb8, 0xfe, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xde, 0x36, 0x11, 0xa9, 0x6a, |
|
| 13015 |
- 0x21, 0x00, 0x00, |
|
| 12972 |
+ 0x2c, 0x5f, 0x2e, 0x2e, 0x45, 0x04, 0x77, 0x92, 0x5c, 0xf2, 0x43, 0xf2, 0xb9, 0x84, 0xd9, 0x88, |
|
| 12973 |
+ 0xe5, 0x92, 0x4c, 0x9d, 0x07, 0xd6, 0xf7, 0x61, 0xfe, 0x8e, 0x65, 0x76, 0x1e, 0x5a, 0x2d, 0xda, |
|
| 12974 |
+ 0x31, 0x2c, 0xb7, 0xee, 0xb2, 0x82, 0xa3, 0x02, 0x0b, 0x36, 0x7b, 0xa6, 0xdb, 0x8c, 0x70, 0xd4, |
|
| 12975 |
+ 0xde, 0x76, 0xd9, 0x67, 0x1e, 0xe1, 0xb4, 0x31, 0x2f, 0x3e, 0x7d, 0xc0, 0xbf, 0x70, 0x3b, 0x72, |
|
| 12976 |
+ 0x0d, 0x0a, 0x88, 0xdf, 0xab, 0x77, 0xea, 0xbb, 0x9e, 0x81, 0xb8, 0xa3, 0x11, 0xf1, 0xed, 0xbe, |
|
| 12977 |
+ 0xf8, 0xc4, 0x2d, 0xf4, 0xaf, 0xd2, 0xb2, 0xbe, 0x1a, 0x87, 0xc6, 0xac, 0xbe, 0x92, 0xe8, 0x51, |
|
| 12978 |
+ 0xea, 0x2b, 0xb4, 0x19, 0xa1, 0xbe, 0x42, 0xef, 0xfe, 0xef, 0x14, 0xd9, 0x80, 0x69, 0x1b, 0xf7, |
|
| 12979 |
+ 0xab, 0x38, 0xc1, 0x0d, 0x2f, 0xab, 0x0c, 0x07, 0x36, 0x77, 0x75, 0xe2, 0xf9, 0xe1, 0xe2, 0x29, |
|
| 12980 |
+ 0xc3, 0x33, 0xf6, 0x0b, 0xb5, 0x63, 0x3a, 0x8d, 0xaf, 0xc3, 0x19, 0x5e, 0x07, 0x37, 0x6c, 0xea, |
|
| 12981 |
+ 0xca, 0x5d, 0x5d, 0x86, 0x19, 0x87, 0x0f, 0xf8, 0x9b, 0x3a, 0xdb, 0x3b, 0x5c, 0x9c, 0x16, 0xa8, |
|
| 12982 |
+ 0xda, 0x3a, 0xfb, 0x31, 0xe7, 0x4f, 0x4d, 0x7d, 0x03, 0x2b, 0x71, 0x61, 0x8e, 0xa1, 0x54, 0x61, |
|
| 12983 |
+ 0x4a, 0x00, 0x30, 0x12, 0x4d, 0x5d, 0x18, 0x70, 0x1b, 0x44, 0xea, 0x3f, 0xa4, 0x60, 0x41, 0x56, |
|
| 12984 |
+ 0xa0, 0x47, 0x8b, 0x85, 0xac, 0xc2, 0x1c, 0x42, 0x47, 0xc8, 0x6e, 0x5e, 0x98, 0xc8, 0xe4, 0x56, |
|
| 12985 |
+ 0xfb, 0x92, 0x5b, 0x8a, 0x0e, 0x3c, 0x50, 0x83, 0xdc, 0xf1, 0x8b, 0xff, 0xb1, 0xb7, 0xe1, 0xb7, |
|
| 12986 |
+ 0x34, 0x10, 0x51, 0x6e, 0xb1, 0x57, 0x4f, 0x1b, 0xdf, 0x0d, 0x6b, 0x63, 0x25, 0xba, 0x74, 0x0c, |
|
| 12987 |
+ 0x1a, 0x0e, 0x4a, 0xe3, 0x17, 0xc7, 0x2f, 0x8d, 0x46, 0x48, 0x1a, 0x6f, 0x8d, 0x16, 0xdb, 0x89, |
|
| 12988 |
+ 0x28, 0xe3, 0x5d, 0x79, 0x7f, 0xc0, 0x88, 0x30, 0x65, 0xff, 0x67, 0xb7, 0x1d, 0x3e, 0x84, 0xba, |
|
| 12989 |
+ 0x18, 0x97, 0x33, 0x09, 0xd5, 0x6b, 0xb0, 0x20, 0xaf, 0xb7, 0x41, 0xea, 0x56, 0xfb, 0x0a, 0xda, |
|
| 12990 |
+ 0xa1, 0xb9, 0xd4, 0x3f, 0xd5, 0x18, 0x5c, 0x7a, 0x0b, 0x16, 0xe4, 0xed, 0xe9, 0x88, 0xa7, 0xfb, |
|
| 12991 |
+ 0x9c, 0x7f, 0x8b, 0x0b, 0x46, 0x53, 0xfd, 0xee, 0x1c, 0x64, 0xd7, 0x44, 0x67, 0x9e, 0x98, 0x90, |
|
| 12992 |
+ 0xc5, 0xa6, 0x37, 0xd1, 0x55, 0x41, 0xf5, 0x37, 0xd2, 0xb5, 0x4b, 0xb1, 0x18, 0x2c, 0x37, 0xcf, |
|
| 12993 |
+ 0xfe, 0xf4, 0xfd, 0x1f, 0x5f, 0xa7, 0x4f, 0x43, 0x9e, 0x83, 0xfe, 0x8b, 0x3f, 0x13, 0xc4, 0x82, |
|
| 12994 |
+ 0x19, 0xaf, 0x7b, 0x4a, 0xfe, 0x33, 0x4c, 0xaf, 0x59, 0xbb, 0x9c, 0x80, 0x8a, 0x77, 0x68, 0x03, |
|
| 12995 |
+ 0xf8, 0xcd, 0x4b, 0xa2, 0x9c, 0x6b, 0xa0, 0x11, 0xab, 0x2d, 0x25, 0xc1, 0x12, 0x7d, 0xfa, 0xcd, |
|
| 12996 |
+ 0x49, 0xb5, 0xcf, 0x81, 0x66, 0xa8, 0xda, 0xa7, 0xa2, 0xc7, 0x19, 0xe1, 0x53, 0xe4, 0xf0, 0x61, |
|
| 12997 |
+ 0xdd, 0x69, 0x45, 0xe6, 0x30, 0xd0, 0x9c, 0x8c, 0xcc, 0x61, 0x5f, 0x1b, 0x32, 0x3e, 0x87, 0xbc, |
|
| 12998 |
+ 0x39, 0x15, 0x9d, 0xc3, 0x60, 0xab, 0x2f, 0x3a, 0x87, 0x7d, 0x1d, 0xae, 0xc4, 0xfd, 0xe4, 0xcb, |
|
| 12999 |
+ 0x8b, 0xd9, 0xcf, 0xe0, 0x0a, 0x97, 0x92, 0x60, 0x89, 0x3e, 0xfd, 0xe6, 0x92, 0xda, 0xe7, 0x40, |
|
| 13000 |
+ 0xff, 0x4a, 0xed, 0x73, 0xb0, 0x47, 0x15, 0xe5, 0xf3, 0x29, 0xcc, 0x06, 0xef, 0xe9, 0xe4, 0xca, |
|
| 13001 |
+ 0x90, 0xcd, 0x05, 0xad, 0x9c, 0x0c, 0x8c, 0xf7, 0xfc, 0x29, 0xe4, 0xfb, 0xba, 0x7b, 0x44, 0x39, |
|
| 13002 |
+ 0xa3, 0xaa, 0x9b, 0xa8, 0x2d, 0x0f, 0x81, 0x4c, 0x74, 0xde, 0xd7, 0xb8, 0x52, 0x3b, 0x57, 0x35, |
|
| 13003 |
+ 0xe7, 0xd4, 0xce, 0x95, 0x5d, 0xb0, 0x18, 0xe7, 0x7d, 0xfd, 0x29, 0xb5, 0x73, 0x55, 0x23, 0x4c, |
|
| 13004 |
+ 0xed, 0x5c, 0xdd, 0xec, 0x8a, 0x25, 0x19, 0xde, 0xf7, 0x22, 0x49, 0xd6, 0xdf, 0x23, 0x88, 0x24, |
|
| 13005 |
+ 0x59, 0xf8, 0xc2, 0x1f, 0x4f, 0x32, 0x79, 0x39, 0x8d, 0x26, 0x59, 0xe8, 0x46, 0x1d, 0x4d, 0xb2, |
|
| 13006 |
+ 0xf0, 0x3d, 0x37, 0x91, 0x64, 0x72, 0xc1, 0x31, 0x24, 0x0b, 0xad, 0x79, 0x79, 0x08, 0xe4, 0x90, |
|
| 13007 |
+ 0x79, 0x8e, 0x75, 0xae, 0x6a, 0xca, 0xc4, 0xe5, 0x79, 0x48, 0xe7, 0x22, 0xcf, 0x58, 0xb8, 0x47, |
|
| 13008 |
+ 0xe6, 0xb9, 0xff, 0x62, 0x14, 0x99, 0xe7, 0xd0, 0xad, 0x21, 0x21, 0xcf, 0xf2, 0xe2, 0x18, 0x9d, |
|
| 13009 |
+ 0xe7, 0xd0, 0x6d, 0x37, 0x3a, 0xcf, 0xe1, 0x3b, 0x68, 0xe2, 0x79, 0x96, 0x0b, 0x8e, 0x39, 0xcf, |
|
| 13010 |
+ 0xa1, 0x35, 0x2f, 0x0f, 0x81, 0x4c, 0xfc, 0x71, 0xf2, 0x6e, 0x33, 0xea, 0x1f, 0xa7, 0xf0, 0x5d, |
|
| 13011 |
+ 0x49, 0xbb, 0x9c, 0x80, 0x4a, 0xdc, 0xe7, 0xe0, 0xd5, 0x41, 0xbd, 0xcf, 0x8a, 0x6b, 0x91, 0x56, |
|
| 13012 |
+ 0x4e, 0x06, 0xc6, 0x7b, 0xde, 0x87, 0x5c, 0xa0, 0x00, 0x26, 0x4b, 0xc3, 0xd5, 0xec, 0xda, 0x95, |
|
| 13013 |
+ 0x44, 0x5c, 0xe2, 0x82, 0x83, 0xf5, 0xad, 0x7a, 0xc1, 0x8a, 0x62, 0x5a, 0x2b, 0x27, 0x03, 0x13, |
|
| 13014 |
+ 0x3d, 0x07, 0x6b, 0x59, 0xb5, 0x67, 0x45, 0xbd, 0xac, 0x95, 0x93, 0x81, 0xb1, 0x9e, 0x57, 0x2f, |
|
| 13015 |
+ 0x3c, 0x7f, 0x51, 0x3a, 0xf5, 0xcb, 0x8b, 0xd2, 0xa9, 0x3f, 0x5f, 0x94, 0x52, 0x9f, 0xf5, 0x4a, |
|
| 13016 |
+ 0xa9, 0xe7, 0xbd, 0x52, 0xea, 0xe7, 0x5e, 0x29, 0xf5, 0x6b, 0xaf, 0x94, 0xda, 0x99, 0xe2, 0xff, |
|
| 13017 |
+ 0x72, 0x72, 0xfd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x7c, 0x4c, 0x3e, 0xeb, 0x22, 0x00, |
|
| 13018 |
+ 0x00, |
|
| 13016 | 13019 |
} |
| ... | ... |
@@ -83,6 +83,15 @@ service Control {
|
| 83 | 83 |
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
| 84 | 84 |
} |
| 85 | 85 |
|
| 86 |
+ // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same |
|
| 87 |
+ // id as `GetSecretRequest.SecretID` |
|
| 88 |
+ // - Returns `NotFound` if the Secret with the given id is not found. |
|
| 89 |
+ // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. |
|
| 90 |
+ // - Returns an error if updating fails. |
|
| 91 |
+ rpc UpdateSecret(UpdateSecretRequest) returns (UpdateSecretResponse) {
|
|
| 92 |
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
|
| 93 |
+ }; |
|
| 94 |
+ |
|
| 86 | 95 |
// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being |
| 87 | 96 |
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any |
| 88 | 97 |
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in |
| ... | ... |
@@ -342,6 +351,23 @@ message GetSecretResponse {
|
| 342 | 342 |
Secret secret = 1; |
| 343 | 343 |
} |
| 344 | 344 |
|
| 345 |
+message UpdateSecretRequest {
|
|
| 346 |
+ // SecretID is the secret ID to update. |
|
| 347 |
+ string secret_id = 1 [(gogoproto.customname) = "SecretID"]; |
|
| 348 |
+ |
|
| 349 |
+ // SecretVersion is the version of the secret being updated. |
|
| 350 |
+ Version secret_version = 2; |
|
| 351 |
+ |
|
| 352 |
+ // Spec is the new spec to apply to the Secret |
|
| 353 |
+ // Only some fields are allowed to be updated. |
|
| 354 |
+ SecretSpec spec = 3; |
|
| 355 |
+} |
|
| 356 |
+ |
|
| 357 |
+message UpdateSecretResponse {
|
|
| 358 |
+ Secret secret = 1; |
|
| 359 |
+} |
|
| 360 |
+ |
|
| 361 |
+ |
|
| 345 | 362 |
// ListSecretRequest is the request to list all non-internal secrets in the secret store, |
| 346 | 363 |
// or all secrets filtered by (name or name prefix or id prefix) and labels. |
| 347 | 364 |
message ListSecretsRequest {
|
| ... | ... |
@@ -1,3 +1,3 @@ |
| 1 | 1 |
package api |
| 2 | 2 |
|
| 3 |
-//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mtimestamp/timestamp.proto=github.com/docker/swarmkit/api/timestamp,Mduration/duration.proto=github.com/docker/swarmkit/api/duration,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto health.proto resource.proto |
|
| 3 |
+//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf:../vendor/github.com/gogo/protobuf/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mtimestamp/timestamp.proto=github.com/docker/swarmkit/api/timestamp,Mduration/duration.proto=github.com/docker/swarmkit/api/duration,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto health.proto resource.proto |
| 4 | 4 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,29 @@ |
| 0 |
+// Package naming centralizes the naming of SwarmKit objects. |
|
| 1 |
+package naming |
|
| 2 |
+ |
|
| 3 |
+import ( |
|
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/swarmkit/api" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// Task returns the task name from Annotations.Name, |
|
| 10 |
+// and, in case Annotations.Name is missing, fallback |
|
| 11 |
+// to construct the name from othere information. |
|
| 12 |
+func Task(t *api.Task) string {
|
|
| 13 |
+ if t.Annotations.Name != "" {
|
|
| 14 |
+ // if set, use the container Annotations.Name field, set in the orchestrator. |
|
| 15 |
+ return t.Annotations.Name |
|
| 16 |
+ } |
|
| 17 |
+ |
|
| 18 |
+ slot := fmt.Sprint(t.Slot) |
|
| 19 |
+ if slot == "" || t.Slot == 0 {
|
|
| 20 |
+ // when no slot id is assigned, we assume that this is node-bound task. |
|
| 21 |
+ slot = t.NodeID |
|
| 22 |
+ } |
|
| 23 |
+ |
|
| 24 |
+ // fallback to service.instance.id. |
|
| 25 |
+ return fmt.Sprintf("%s.%s.%s", t.ServiceAnnotations.Name, slot, t.ID)
|
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// TODO(stevvooe): Consolidate "Hostname" style validation here. |
| ... | ... |
@@ -15,6 +15,7 @@ import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" |
| 15 | 15 |
import sort "sort" |
| 16 | 16 |
import strconv "strconv" |
| 17 | 17 |
import reflect "reflect" |
| 18 |
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" |
|
| 18 | 19 |
|
| 19 | 20 |
import io "io" |
| 20 | 21 |
|
| ... | ... |
@@ -227,9 +228,10 @@ type Cluster struct {
|
| 227 | 227 |
// and agents to unambiguously identify the older key to be deleted when |
| 228 | 228 |
// a new key is allocated on key rotation. |
| 229 | 229 |
EncryptionKeyLamportClock uint64 `protobuf:"varint,6,opt,name=encryption_key_lamport_clock,json=encryptionKeyLamportClock,proto3" json:"encryption_key_lamport_clock,omitempty"` |
| 230 |
- // RemovedNodes is the list of nodes that have been removed from the |
|
| 230 |
+ // BlacklistedCertificates tracks certificates that should no longer |
|
| 231 |
+ // be honored. It's a mapping from CN -> BlacklistedCertificate. |
|
| 231 | 232 |
// swarm. Their certificates should effectively be blacklisted. |
| 232 |
- RemovedNodes []*RemovedNode `protobuf:"bytes,7,rep,name=removed_nodes,json=removedNodes" json:"removed_nodes,omitempty"` |
|
| 233 |
+ BlacklistedCertificates map[string]*BlacklistedCertificate `protobuf:"bytes,8,rep,name=blacklisted_certificates,json=blacklistedCertificates" json:"blacklisted_certificates,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` |
|
| 233 | 234 |
} |
| 234 | 235 |
|
| 235 | 236 |
func (m *Cluster) Reset() { *m = Cluster{} }
|
| ... | ... |
@@ -451,10 +453,10 @@ func (m *Cluster) Copy() *Cluster {
|
| 451 | 451 |
} |
| 452 | 452 |
} |
| 453 | 453 |
|
| 454 |
- if m.RemovedNodes != nil {
|
|
| 455 |
- o.RemovedNodes = make([]*RemovedNode, 0, len(m.RemovedNodes)) |
|
| 456 |
- for _, v := range m.RemovedNodes {
|
|
| 457 |
- o.RemovedNodes = append(o.RemovedNodes, v.Copy()) |
|
| 454 |
+ if m.BlacklistedCertificates != nil {
|
|
| 455 |
+ o.BlacklistedCertificates = make(map[string]*BlacklistedCertificate) |
|
| 456 |
+ for k, v := range m.BlacklistedCertificates {
|
|
| 457 |
+ o.BlacklistedCertificates[k] = v.Copy() |
|
| 458 | 458 |
} |
| 459 | 459 |
} |
| 460 | 460 |
|
| ... | ... |
@@ -641,8 +643,18 @@ func (this *Cluster) GoString() string {
|
| 641 | 641 |
s = append(s, "NetworkBootstrapKeys: "+fmt.Sprintf("%#v", this.NetworkBootstrapKeys)+",\n")
|
| 642 | 642 |
} |
| 643 | 643 |
s = append(s, "EncryptionKeyLamportClock: "+fmt.Sprintf("%#v", this.EncryptionKeyLamportClock)+",\n")
|
| 644 |
- if this.RemovedNodes != nil {
|
|
| 645 |
- s = append(s, "RemovedNodes: "+fmt.Sprintf("%#v", this.RemovedNodes)+",\n")
|
|
| 644 |
+ keysForBlacklistedCertificates := make([]string, 0, len(this.BlacklistedCertificates)) |
|
| 645 |
+ for k, _ := range this.BlacklistedCertificates {
|
|
| 646 |
+ keysForBlacklistedCertificates = append(keysForBlacklistedCertificates, k) |
|
| 647 |
+ } |
|
| 648 |
+ github_com_gogo_protobuf_sortkeys.Strings(keysForBlacklistedCertificates) |
|
| 649 |
+ mapStringForBlacklistedCertificates := "map[string]*BlacklistedCertificate{"
|
|
| 650 |
+ for _, k := range keysForBlacklistedCertificates {
|
|
| 651 |
+ mapStringForBlacklistedCertificates += fmt.Sprintf("%#v: %#v,", k, this.BlacklistedCertificates[k])
|
|
| 652 |
+ } |
|
| 653 |
+ mapStringForBlacklistedCertificates += "}" |
|
| 654 |
+ if this.BlacklistedCertificates != nil {
|
|
| 655 |
+ s = append(s, "BlacklistedCertificates: "+mapStringForBlacklistedCertificates+",\n") |
|
| 646 | 656 |
} |
| 647 | 657 |
s = append(s, "}") |
| 648 | 658 |
return strings.Join(s, "") |
| ... | ... |
@@ -1270,16 +1282,32 @@ func (m *Cluster) MarshalTo(data []byte) (int, error) {
|
| 1270 | 1270 |
i++ |
| 1271 | 1271 |
i = encodeVarintObjects(data, i, uint64(m.EncryptionKeyLamportClock)) |
| 1272 | 1272 |
} |
| 1273 |
- if len(m.RemovedNodes) > 0 {
|
|
| 1274 |
- for _, msg := range m.RemovedNodes {
|
|
| 1275 |
- data[i] = 0x3a |
|
| 1273 |
+ if len(m.BlacklistedCertificates) > 0 {
|
|
| 1274 |
+ for k, _ := range m.BlacklistedCertificates {
|
|
| 1275 |
+ data[i] = 0x42 |
|
| 1276 | 1276 |
i++ |
| 1277 |
- i = encodeVarintObjects(data, i, uint64(msg.Size())) |
|
| 1278 |
- n, err := msg.MarshalTo(data[i:]) |
|
| 1279 |
- if err != nil {
|
|
| 1280 |
- return 0, err |
|
| 1277 |
+ v := m.BlacklistedCertificates[k] |
|
| 1278 |
+ msgSize := 0 |
|
| 1279 |
+ if v != nil {
|
|
| 1280 |
+ msgSize = v.Size() |
|
| 1281 |
+ msgSize += 1 + sovObjects(uint64(msgSize)) |
|
| 1282 |
+ } |
|
| 1283 |
+ mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + msgSize |
|
| 1284 |
+ i = encodeVarintObjects(data, i, uint64(mapSize)) |
|
| 1285 |
+ data[i] = 0xa |
|
| 1286 |
+ i++ |
|
| 1287 |
+ i = encodeVarintObjects(data, i, uint64(len(k))) |
|
| 1288 |
+ i += copy(data[i:], k) |
|
| 1289 |
+ if v != nil {
|
|
| 1290 |
+ data[i] = 0x12 |
|
| 1291 |
+ i++ |
|
| 1292 |
+ i = encodeVarintObjects(data, i, uint64(v.Size())) |
|
| 1293 |
+ n32, err := v.MarshalTo(data[i:]) |
|
| 1294 |
+ if err != nil {
|
|
| 1295 |
+ return 0, err |
|
| 1296 |
+ } |
|
| 1297 |
+ i += n32 |
|
| 1281 | 1298 |
} |
| 1282 |
- i += n |
|
| 1283 | 1299 |
} |
| 1284 | 1300 |
} |
| 1285 | 1301 |
return i, nil |
| ... | ... |
@@ -1309,19 +1337,19 @@ func (m *Secret) MarshalTo(data []byte) (int, error) {
|
| 1309 | 1309 |
data[i] = 0x12 |
| 1310 | 1310 |
i++ |
| 1311 | 1311 |
i = encodeVarintObjects(data, i, uint64(m.Meta.Size())) |
| 1312 |
- n32, err := m.Meta.MarshalTo(data[i:]) |
|
| 1312 |
+ n33, err := m.Meta.MarshalTo(data[i:]) |
|
| 1313 | 1313 |
if err != nil {
|
| 1314 | 1314 |
return 0, err |
| 1315 | 1315 |
} |
| 1316 |
- i += n32 |
|
| 1316 |
+ i += n33 |
|
| 1317 | 1317 |
data[i] = 0x1a |
| 1318 | 1318 |
i++ |
| 1319 | 1319 |
i = encodeVarintObjects(data, i, uint64(m.Spec.Size())) |
| 1320 |
- n33, err := m.Spec.MarshalTo(data[i:]) |
|
| 1320 |
+ n34, err := m.Spec.MarshalTo(data[i:]) |
|
| 1321 | 1321 |
if err != nil {
|
| 1322 | 1322 |
return 0, err |
| 1323 | 1323 |
} |
| 1324 |
- i += n33 |
|
| 1324 |
+ i += n34 |
|
| 1325 | 1325 |
if len(m.Digest) > 0 {
|
| 1326 | 1326 |
data[i] = 0x22 |
| 1327 | 1327 |
i++ |
| ... | ... |
@@ -1596,10 +1624,17 @@ func (m *Cluster) Size() (n int) {
|
| 1596 | 1596 |
if m.EncryptionKeyLamportClock != 0 {
|
| 1597 | 1597 |
n += 1 + sovObjects(uint64(m.EncryptionKeyLamportClock)) |
| 1598 | 1598 |
} |
| 1599 |
- if len(m.RemovedNodes) > 0 {
|
|
| 1600 |
- for _, e := range m.RemovedNodes {
|
|
| 1601 |
- l = e.Size() |
|
| 1602 |
- n += 1 + l + sovObjects(uint64(l)) |
|
| 1599 |
+ if len(m.BlacklistedCertificates) > 0 {
|
|
| 1600 |
+ for k, v := range m.BlacklistedCertificates {
|
|
| 1601 |
+ _ = k |
|
| 1602 |
+ _ = v |
|
| 1603 |
+ l = 0 |
|
| 1604 |
+ if v != nil {
|
|
| 1605 |
+ l = v.Size() |
|
| 1606 |
+ l += 1 + sovObjects(uint64(l)) |
|
| 1607 |
+ } |
|
| 1608 |
+ mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + l |
|
| 1609 |
+ n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) |
|
| 1603 | 1610 |
} |
| 1604 | 1611 |
} |
| 1605 | 1612 |
return n |
| ... | ... |
@@ -1761,6 +1796,16 @@ func (this *Cluster) String() string {
|
| 1761 | 1761 |
if this == nil {
|
| 1762 | 1762 |
return "nil" |
| 1763 | 1763 |
} |
| 1764 |
+ keysForBlacklistedCertificates := make([]string, 0, len(this.BlacklistedCertificates)) |
|
| 1765 |
+ for k, _ := range this.BlacklistedCertificates {
|
|
| 1766 |
+ keysForBlacklistedCertificates = append(keysForBlacklistedCertificates, k) |
|
| 1767 |
+ } |
|
| 1768 |
+ github_com_gogo_protobuf_sortkeys.Strings(keysForBlacklistedCertificates) |
|
| 1769 |
+ mapStringForBlacklistedCertificates := "map[string]*BlacklistedCertificate{"
|
|
| 1770 |
+ for _, k := range keysForBlacklistedCertificates {
|
|
| 1771 |
+ mapStringForBlacklistedCertificates += fmt.Sprintf("%v: %v,", k, this.BlacklistedCertificates[k])
|
|
| 1772 |
+ } |
|
| 1773 |
+ mapStringForBlacklistedCertificates += "}" |
|
| 1764 | 1774 |
s := strings.Join([]string{`&Cluster{`,
|
| 1765 | 1775 |
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
| 1766 | 1776 |
`Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, |
| ... | ... |
@@ -1768,7 +1813,7 @@ func (this *Cluster) String() string {
|
| 1768 | 1768 |
`RootCA:` + strings.Replace(strings.Replace(this.RootCA.String(), "RootCA", "RootCA", 1), `&`, ``, 1) + `,`, |
| 1769 | 1769 |
`NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`,
|
| 1770 | 1770 |
`EncryptionKeyLamportClock:` + fmt.Sprintf("%v", this.EncryptionKeyLamportClock) + `,`,
|
| 1771 |
- `RemovedNodes:` + strings.Replace(fmt.Sprintf("%v", this.RemovedNodes), "RemovedNode", "RemovedNode", 1) + `,`,
|
|
| 1771 |
+ `BlacklistedCertificates:` + mapStringForBlacklistedCertificates + `,`, |
|
| 1772 | 1772 |
`}`, |
| 1773 | 1773 |
}, "") |
| 1774 | 1774 |
return s |
| ... | ... |
@@ -3697,9 +3742,9 @@ func (m *Cluster) Unmarshal(data []byte) error {
|
| 3697 | 3697 |
break |
| 3698 | 3698 |
} |
| 3699 | 3699 |
} |
| 3700 |
- case 7: |
|
| 3700 |
+ case 8: |
|
| 3701 | 3701 |
if wireType != 2 {
|
| 3702 |
- return fmt.Errorf("proto: wrong wireType = %d for field RemovedNodes", wireType)
|
|
| 3702 |
+ return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedCertificates", wireType)
|
|
| 3703 | 3703 |
} |
| 3704 | 3704 |
var msglen int |
| 3705 | 3705 |
for shift := uint(0); ; shift += 7 {
|
| ... | ... |
@@ -3723,9 +3768,99 @@ func (m *Cluster) Unmarshal(data []byte) error {
|
| 3723 | 3723 |
if postIndex > l {
|
| 3724 | 3724 |
return io.ErrUnexpectedEOF |
| 3725 | 3725 |
} |
| 3726 |
- m.RemovedNodes = append(m.RemovedNodes, &RemovedNode{})
|
|
| 3727 |
- if err := m.RemovedNodes[len(m.RemovedNodes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
|
| 3728 |
- return err |
|
| 3726 |
+ var keykey uint64 |
|
| 3727 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3728 |
+ if shift >= 64 {
|
|
| 3729 |
+ return ErrIntOverflowObjects |
|
| 3730 |
+ } |
|
| 3731 |
+ if iNdEx >= l {
|
|
| 3732 |
+ return io.ErrUnexpectedEOF |
|
| 3733 |
+ } |
|
| 3734 |
+ b := data[iNdEx] |
|
| 3735 |
+ iNdEx++ |
|
| 3736 |
+ keykey |= (uint64(b) & 0x7F) << shift |
|
| 3737 |
+ if b < 0x80 {
|
|
| 3738 |
+ break |
|
| 3739 |
+ } |
|
| 3740 |
+ } |
|
| 3741 |
+ var stringLenmapkey uint64 |
|
| 3742 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3743 |
+ if shift >= 64 {
|
|
| 3744 |
+ return ErrIntOverflowObjects |
|
| 3745 |
+ } |
|
| 3746 |
+ if iNdEx >= l {
|
|
| 3747 |
+ return io.ErrUnexpectedEOF |
|
| 3748 |
+ } |
|
| 3749 |
+ b := data[iNdEx] |
|
| 3750 |
+ iNdEx++ |
|
| 3751 |
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift |
|
| 3752 |
+ if b < 0x80 {
|
|
| 3753 |
+ break |
|
| 3754 |
+ } |
|
| 3755 |
+ } |
|
| 3756 |
+ intStringLenmapkey := int(stringLenmapkey) |
|
| 3757 |
+ if intStringLenmapkey < 0 {
|
|
| 3758 |
+ return ErrInvalidLengthObjects |
|
| 3759 |
+ } |
|
| 3760 |
+ postStringIndexmapkey := iNdEx + intStringLenmapkey |
|
| 3761 |
+ if postStringIndexmapkey > l {
|
|
| 3762 |
+ return io.ErrUnexpectedEOF |
|
| 3763 |
+ } |
|
| 3764 |
+ mapkey := string(data[iNdEx:postStringIndexmapkey]) |
|
| 3765 |
+ iNdEx = postStringIndexmapkey |
|
| 3766 |
+ if m.BlacklistedCertificates == nil {
|
|
| 3767 |
+ m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate) |
|
| 3768 |
+ } |
|
| 3769 |
+ if iNdEx < postIndex {
|
|
| 3770 |
+ var valuekey uint64 |
|
| 3771 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3772 |
+ if shift >= 64 {
|
|
| 3773 |
+ return ErrIntOverflowObjects |
|
| 3774 |
+ } |
|
| 3775 |
+ if iNdEx >= l {
|
|
| 3776 |
+ return io.ErrUnexpectedEOF |
|
| 3777 |
+ } |
|
| 3778 |
+ b := data[iNdEx] |
|
| 3779 |
+ iNdEx++ |
|
| 3780 |
+ valuekey |= (uint64(b) & 0x7F) << shift |
|
| 3781 |
+ if b < 0x80 {
|
|
| 3782 |
+ break |
|
| 3783 |
+ } |
|
| 3784 |
+ } |
|
| 3785 |
+ var mapmsglen int |
|
| 3786 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3787 |
+ if shift >= 64 {
|
|
| 3788 |
+ return ErrIntOverflowObjects |
|
| 3789 |
+ } |
|
| 3790 |
+ if iNdEx >= l {
|
|
| 3791 |
+ return io.ErrUnexpectedEOF |
|
| 3792 |
+ } |
|
| 3793 |
+ b := data[iNdEx] |
|
| 3794 |
+ iNdEx++ |
|
| 3795 |
+ mapmsglen |= (int(b) & 0x7F) << shift |
|
| 3796 |
+ if b < 0x80 {
|
|
| 3797 |
+ break |
|
| 3798 |
+ } |
|
| 3799 |
+ } |
|
| 3800 |
+ if mapmsglen < 0 {
|
|
| 3801 |
+ return ErrInvalidLengthObjects |
|
| 3802 |
+ } |
|
| 3803 |
+ postmsgIndex := iNdEx + mapmsglen |
|
| 3804 |
+ if mapmsglen < 0 {
|
|
| 3805 |
+ return ErrInvalidLengthObjects |
|
| 3806 |
+ } |
|
| 3807 |
+ if postmsgIndex > l {
|
|
| 3808 |
+ return io.ErrUnexpectedEOF |
|
| 3809 |
+ } |
|
| 3810 |
+ mapvalue := &BlacklistedCertificate{}
|
|
| 3811 |
+ if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
|
|
| 3812 |
+ return err |
|
| 3813 |
+ } |
|
| 3814 |
+ iNdEx = postmsgIndex |
|
| 3815 |
+ m.BlacklistedCertificates[mapkey] = mapvalue |
|
| 3816 |
+ } else {
|
|
| 3817 |
+ var mapvalue *BlacklistedCertificate |
|
| 3818 |
+ m.BlacklistedCertificates[mapkey] = mapvalue |
|
| 3729 | 3819 |
} |
| 3730 | 3820 |
iNdEx = postIndex |
| 3731 | 3821 |
default: |
| ... | ... |
@@ -4064,76 +4199,79 @@ var ( |
| 4064 | 4064 |
func init() { proto.RegisterFile("objects.proto", fileDescriptorObjects) }
|
| 4065 | 4065 |
|
| 4066 | 4066 |
var fileDescriptorObjects = []byte{
|
| 4067 |
- // 1126 bytes of a gzipped FileDescriptorProto |
|
| 4068 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, |
|
| 4069 |
- 0x1b, 0xef, 0xda, 0x5b, 0xbf, 0x3c, 0xb6, 0x23, 0xfd, 0xe7, 0x5f, 0x45, 0xdb, 0x10, 0xec, 0xe0, |
|
| 4070 |
- 0x0a, 0xd4, 0x43, 0xe5, 0x8a, 0x52, 0x50, 0x2b, 0x5a, 0x21, 0xbf, 0x44, 0x60, 0x95, 0x40, 0x34, |
|
| 4071 |
- 0x29, 0xe9, 0x71, 0x35, 0xd9, 0x9d, 0x9a, 0xc5, 0xf6, 0xce, 0x6a, 0x66, 0xec, 0x2a, 0x3d, 0x21, |
|
| 4072 |
- 0x3e, 0x00, 0x1f, 0x81, 0x6f, 0xc1, 0x99, 0x6b, 0x0e, 0x1c, 0xb8, 0xc1, 0xc9, 0x22, 0x3e, 0x20, |
|
| 4073 |
- 0x71, 0xe3, 0x23, 0xa0, 0x79, 0x59, 0xdb, 0x91, 0xd7, 0x21, 0x95, 0xaa, 0xdc, 0xe6, 0xf1, 0xfc, |
|
| 4074 |
- 0x7e, 0xbf, 0x79, 0xde, 0xe6, 0x99, 0x35, 0xd4, 0xd8, 0xc9, 0x77, 0x34, 0x90, 0xa2, 0x95, 0x70, |
|
| 4075 |
- 0x26, 0x19, 0x42, 0x21, 0x0b, 0x86, 0x94, 0xb7, 0xc4, 0x2b, 0xc2, 0xc7, 0xc3, 0x48, 0xb6, 0xa6, |
|
| 4076 |
- 0x1f, 0xee, 0x54, 0xe4, 0x69, 0x42, 0x2d, 0x60, 0xa7, 0x22, 0x12, 0x1a, 0xa4, 0xc6, 0x6d, 0x19, |
|
| 4077 |
- 0x8d, 0xa9, 0x90, 0x64, 0x9c, 0xdc, 0x5f, 0xac, 0xec, 0xd6, 0xad, 0x01, 0x1b, 0x30, 0xbd, 0xbc, |
|
| 4078 |
- 0xaf, 0x56, 0xe6, 0xd7, 0xe6, 0x2f, 0x0e, 0xb8, 0x07, 0x54, 0x12, 0xf4, 0x29, 0x14, 0xa7, 0x94, |
|
| 4079 |
- 0x8b, 0x88, 0xc5, 0x9e, 0xb3, 0xe7, 0xdc, 0xad, 0x3c, 0x78, 0xa7, 0xb5, 0x7e, 0x72, 0xeb, 0xd8, |
|
| 4080 |
- 0x40, 0x3a, 0xee, 0xd9, 0xac, 0x71, 0x03, 0xa7, 0x0c, 0xf4, 0x04, 0x20, 0xe0, 0x94, 0x48, 0x1a, |
|
| 4081 |
- 0xfa, 0x44, 0x7a, 0x39, 0xcd, 0x7f, 0x37, 0x8b, 0xff, 0x3c, 0x75, 0x0a, 0x97, 0x2d, 0xa1, 0x2d, |
|
| 4082 |
- 0x15, 0x7b, 0x92, 0x84, 0x29, 0x3b, 0x7f, 0x25, 0xb6, 0x25, 0xb4, 0x65, 0xf3, 0xef, 0x3c, 0xb8, |
|
| 4083 |
- 0x5f, 0xb1, 0x90, 0xa2, 0x6d, 0xc8, 0x45, 0xa1, 0x76, 0xbe, 0xdc, 0x29, 0xcc, 0x67, 0x8d, 0x5c, |
|
| 4084 |
- 0xbf, 0x87, 0x73, 0x51, 0x88, 0x1e, 0x80, 0x3b, 0xa6, 0x92, 0x58, 0xb7, 0xbc, 0x2c, 0x61, 0x95, |
|
| 4085 |
- 0x01, 0x1b, 0x93, 0xc6, 0xa2, 0x4f, 0xc0, 0x55, 0x69, 0xb5, 0xce, 0xec, 0x66, 0x71, 0xd4, 0x99, |
|
| 4086 |
- 0x47, 0x09, 0x0d, 0x52, 0x9e, 0xc2, 0xa3, 0x7d, 0xa8, 0x84, 0x54, 0x04, 0x3c, 0x4a, 0xa4, 0xca, |
|
| 4087 |
- 0xa4, 0xab, 0xe9, 0x77, 0x36, 0xd1, 0x7b, 0x4b, 0x28, 0x5e, 0xe5, 0xa1, 0x27, 0x50, 0x10, 0x92, |
|
| 4088 |
- 0xc8, 0x89, 0xf0, 0x6e, 0x6a, 0x85, 0xfa, 0x46, 0x07, 0x34, 0xca, 0xba, 0x60, 0x39, 0xe8, 0x0b, |
|
| 4089 |
- 0xd8, 0x1a, 0x93, 0x98, 0x0c, 0x28, 0xf7, 0xad, 0x4a, 0x41, 0xab, 0xbc, 0x97, 0x19, 0xba, 0x41, |
|
| 4090 |
- 0x1a, 0x21, 0x5c, 0x1b, 0xaf, 0x9a, 0x68, 0x1f, 0x80, 0x48, 0x49, 0x82, 0x6f, 0xc7, 0x34, 0x96, |
|
| 4091 |
- 0x5e, 0x51, 0xab, 0xbc, 0x9f, 0xe9, 0x0b, 0x95, 0xaf, 0x18, 0x1f, 0xb6, 0x17, 0x60, 0xbc, 0x42, |
|
| 4092 |
- 0x44, 0x9f, 0x43, 0x25, 0xa0, 0x5c, 0x46, 0x2f, 0xa3, 0x80, 0x48, 0xea, 0x95, 0xb4, 0x4e, 0x23, |
|
| 4093 |
- 0x4b, 0xa7, 0xbb, 0x84, 0xd9, 0xa0, 0x56, 0x99, 0xcd, 0xdf, 0x73, 0x50, 0x3c, 0xa2, 0x7c, 0x1a, |
|
| 4094 |
- 0x05, 0x6f, 0xb7, 0xdc, 0x8f, 0x2f, 0x94, 0x3b, 0xd3, 0x33, 0x7b, 0xec, 0x5a, 0xc5, 0x1f, 0x41, |
|
| 4095 |
- 0x89, 0xc6, 0x61, 0xc2, 0xa2, 0x58, 0xda, 0x72, 0x67, 0x76, 0xcb, 0xbe, 0xc5, 0xe0, 0x05, 0x1a, |
|
| 4096 |
- 0xed, 0x43, 0xcd, 0x74, 0xb1, 0x7f, 0xa1, 0xd6, 0x7b, 0x59, 0xf4, 0x6f, 0x34, 0xd0, 0x16, 0xa9, |
|
| 4097 |
- 0x3a, 0x59, 0xb1, 0x50, 0x0f, 0x6a, 0x09, 0xa7, 0xd3, 0x88, 0x4d, 0x84, 0xaf, 0x83, 0x28, 0x5c, |
|
| 4098 |
- 0x29, 0x08, 0x5c, 0x4d, 0x59, 0xca, 0x6a, 0xfe, 0x94, 0x83, 0x52, 0xea, 0x23, 0x7a, 0x68, 0xd3, |
|
| 4099 |
- 0xe1, 0x6c, 0x76, 0x28, 0xc5, 0x6a, 0x29, 0x93, 0x89, 0x87, 0x70, 0x33, 0x61, 0x5c, 0x0a, 0x2f, |
|
| 4100 |
- 0xb7, 0x97, 0xdf, 0xd4, 0xb3, 0x87, 0x8c, 0xcb, 0x2e, 0x8b, 0x5f, 0x46, 0x03, 0x6c, 0xc0, 0xe8, |
|
| 4101 |
- 0x05, 0x54, 0xa6, 0x11, 0x97, 0x13, 0x32, 0xf2, 0xa3, 0x44, 0x78, 0x79, 0xcd, 0xfd, 0xe0, 0xb2, |
|
| 4102 |
- 0x23, 0x5b, 0xc7, 0x06, 0xdf, 0x3f, 0xec, 0x6c, 0xcd, 0x67, 0x0d, 0x58, 0x98, 0x02, 0x83, 0x95, |
|
| 4103 |
- 0xea, 0x27, 0x62, 0xe7, 0x00, 0xca, 0x8b, 0x1d, 0x74, 0x0f, 0x20, 0x36, 0x2d, 0xea, 0x2f, 0x9a, |
|
| 4104 |
- 0xa6, 0x36, 0x9f, 0x35, 0xca, 0xb6, 0x71, 0xfb, 0x3d, 0x5c, 0xb6, 0x80, 0x7e, 0x88, 0x10, 0xb8, |
|
| 4105 |
- 0x24, 0x0c, 0xb9, 0x6e, 0xa1, 0x32, 0xd6, 0xeb, 0xe6, 0xaf, 0x37, 0xc1, 0x7d, 0x4e, 0xc4, 0xf0, |
|
| 4106 |
- 0xba, 0xc7, 0x8c, 0x3a, 0x73, 0xad, 0xe9, 0xee, 0x01, 0x08, 0x53, 0x4a, 0x15, 0x8e, 0xbb, 0x0c, |
|
| 4107 |
- 0xc7, 0x16, 0x58, 0x85, 0x63, 0x01, 0x26, 0x1c, 0x31, 0x62, 0x52, 0xf7, 0x97, 0x8b, 0xf5, 0x1a, |
|
| 4108 |
- 0xdd, 0x81, 0x62, 0xcc, 0x42, 0x4d, 0x2f, 0x68, 0x3a, 0xcc, 0x67, 0x8d, 0x82, 0x1a, 0x29, 0xfd, |
|
| 4109 |
- 0x1e, 0x2e, 0xa8, 0xad, 0x7e, 0xa8, 0xee, 0x2d, 0x89, 0x63, 0x26, 0x89, 0x1a, 0x4a, 0xc2, 0xde, |
|
| 4110 |
- 0xff, 0xcc, 0xc6, 0x6a, 0x2f, 0x61, 0xe9, 0xbd, 0x5d, 0x61, 0xa2, 0x63, 0xf8, 0x7f, 0xea, 0xef, |
|
| 4111 |
- 0xaa, 0x60, 0xe9, 0x4d, 0x04, 0x91, 0x55, 0x58, 0xd9, 0x59, 0x99, 0x93, 0xe5, 0xcd, 0x73, 0x52, |
|
| 4112 |
- 0x67, 0x30, 0x6b, 0x4e, 0x76, 0xa0, 0x16, 0x52, 0x11, 0x71, 0x1a, 0xea, 0x1b, 0x48, 0x3d, 0xd8, |
|
| 4113 |
- 0x73, 0xee, 0x6e, 0x6d, 0x78, 0x7a, 0xac, 0x08, 0xc5, 0x55, 0xcb, 0xd1, 0x16, 0x6a, 0x43, 0xc9, |
|
| 4114 |
- 0xf6, 0x8d, 0xf0, 0x2a, 0xba, 0x77, 0xaf, 0x38, 0x1f, 0x17, 0xb4, 0x0b, 0x13, 0xa4, 0xfa, 0x46, |
|
| 4115 |
- 0x13, 0xe4, 0x31, 0xc0, 0x88, 0x0d, 0xfc, 0x90, 0x47, 0x53, 0xca, 0xbd, 0x9a, 0xe6, 0xee, 0x64, |
|
| 4116 |
- 0x71, 0x7b, 0x1a, 0x81, 0xcb, 0x23, 0x36, 0x30, 0xcb, 0xe6, 0x0f, 0x0e, 0xfc, 0x6f, 0xcd, 0x29, |
|
| 4117 |
- 0xf4, 0x31, 0x14, 0xad, 0x5b, 0x97, 0x7d, 0x04, 0x58, 0x1e, 0x4e, 0xb1, 0x68, 0x17, 0xca, 0xea, |
|
| 4118 |
- 0x8e, 0x50, 0x21, 0xa8, 0xb9, 0xfd, 0x65, 0xbc, 0xfc, 0x01, 0x79, 0x50, 0x24, 0xa3, 0x88, 0xa8, |
|
| 4119 |
- 0xbd, 0xbc, 0xde, 0x4b, 0xcd, 0xe6, 0x8f, 0x39, 0x28, 0x5a, 0xb1, 0xeb, 0x1e, 0xe7, 0xf6, 0xd8, |
|
| 4120 |
- 0xb5, 0x9b, 0xf5, 0x14, 0xaa, 0x26, 0x9d, 0xb6, 0x25, 0xdc, 0xff, 0x4c, 0x6a, 0xc5, 0xe0, 0x4d, |
|
| 4121 |
- 0x3b, 0x3c, 0x05, 0x37, 0x4a, 0xc8, 0xd8, 0x8e, 0xf2, 0xcc, 0x93, 0xfb, 0x87, 0xed, 0x83, 0xaf, |
|
| 4122 |
- 0x13, 0xd3, 0xd9, 0xa5, 0xf9, 0xac, 0xe1, 0xaa, 0x1f, 0xb0, 0xa6, 0x35, 0x7f, 0xce, 0x43, 0xb1, |
|
| 4123 |
- 0x3b, 0x9a, 0x08, 0x49, 0xf9, 0x75, 0x27, 0xc4, 0x1e, 0xbb, 0x96, 0x90, 0x2e, 0x14, 0x39, 0x63, |
|
| 4124 |
- 0xd2, 0x0f, 0xc8, 0x65, 0xb9, 0xc0, 0x8c, 0xc9, 0x6e, 0xbb, 0xb3, 0xa5, 0x88, 0x6a, 0x90, 0x18, |
|
| 4125 |
- 0x1b, 0x17, 0x14, 0xb5, 0x4b, 0xd0, 0x0b, 0xd8, 0x4e, 0xc7, 0xef, 0x09, 0x63, 0x52, 0x48, 0x4e, |
|
| 4126 |
- 0x12, 0x7f, 0x48, 0x4f, 0xd5, 0x9b, 0x97, 0xdf, 0xf4, 0x65, 0xb2, 0x1f, 0x07, 0xfc, 0x54, 0x27, |
|
| 4127 |
- 0xea, 0x19, 0x3d, 0xc5, 0xb7, 0xac, 0x40, 0x27, 0xe5, 0x3f, 0xa3, 0xa7, 0x02, 0x7d, 0x06, 0xbb, |
|
| 4128 |
- 0x74, 0x01, 0x53, 0x8a, 0xfe, 0x88, 0x8c, 0xd5, 0xc3, 0xe2, 0x07, 0x23, 0x16, 0x0c, 0xf5, 0x6c, |
|
| 4129 |
- 0x73, 0xf1, 0x6d, 0xba, 0x2a, 0xf5, 0xa5, 0x41, 0x74, 0x15, 0x40, 0xbd, 0x9e, 0x9c, 0x8e, 0xd9, |
|
| 4130 |
- 0x94, 0x86, 0xbe, 0x1a, 0x7a, 0x6a, 0xc8, 0xe5, 0x37, 0xa5, 0x08, 0x1b, 0xa0, 0x1a, 0x92, 0xb8, |
|
| 4131 |
- 0xca, 0x97, 0x86, 0x68, 0xfe, 0xe5, 0x40, 0xe1, 0x88, 0x06, 0x9c, 0xca, 0xb7, 0x5a, 0xb6, 0x47, |
|
| 4132 |
- 0x17, 0xca, 0x56, 0xcf, 0x7e, 0xd1, 0xd5, 0xa9, 0x6b, 0x55, 0xdb, 0x86, 0x42, 0x18, 0x0d, 0xa8, |
|
| 4133 |
- 0x30, 0xdf, 0x24, 0x65, 0x6c, 0x2d, 0xd4, 0x04, 0x57, 0x44, 0xaf, 0xa9, 0xee, 0xcf, 0xbc, 0x79, |
|
| 4134 |
- 0x3e, 0xad, 0x42, 0xf4, 0x9a, 0x62, 0xbd, 0x87, 0x76, 0xa0, 0x14, 0xc5, 0x92, 0xf2, 0x98, 0x8c, |
|
| 4135 |
- 0x74, 0xfe, 0x4a, 0x78, 0x61, 0x77, 0x76, 0xcf, 0xce, 0xeb, 0x37, 0xfe, 0x38, 0xaf, 0xdf, 0xf8, |
|
| 4136 |
- 0xe7, 0xbc, 0xee, 0x7c, 0x3f, 0xaf, 0x3b, 0x67, 0xf3, 0xba, 0xf3, 0xdb, 0xbc, 0xee, 0xfc, 0x39, |
|
| 4137 |
- 0xaf, 0x3b, 0x27, 0x05, 0xfd, 0x9f, 0xe2, 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x71, |
|
| 4138 |
- 0xe0, 0xa4, 0xc3, 0x0c, 0x00, 0x00, |
|
| 4067 |
+ // 1174 bytes of a gzipped FileDescriptorProto |
|
| 4068 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x8f, 0x1b, 0x35, |
|
| 4069 |
+ 0x18, 0xee, 0x24, 0xb3, 0xf9, 0x78, 0xb3, 0x59, 0x81, 0xa9, 0xca, 0x34, 0x2c, 0xc9, 0x92, 0x0a, |
|
| 4070 |
+ 0x54, 0xa1, 0x2a, 0x15, 0xa5, 0xa0, 0x2d, 0xb4, 0x82, 0x7c, 0x09, 0xa2, 0x52, 0xa8, 0xdc, 0xd2, |
|
| 4071 |
+ 0x1e, 0x23, 0xef, 0x8c, 0x1b, 0x86, 0x4c, 0xc6, 0x23, 0xdb, 0x49, 0x95, 0x9e, 0x10, 0x3f, 0x80, |
|
| 4072 |
+ 0x9f, 0xc0, 0x5f, 0xe1, 0xba, 0x07, 0x0e, 0xdc, 0xe0, 0x80, 0x22, 0x36, 0x07, 0x24, 0x6e, 0xfc, |
|
| 4073 |
+ 0x04, 0x64, 0x8f, 0x27, 0x99, 0x55, 0x26, 0xcb, 0x56, 0xaa, 0xf6, 0xe6, 0x37, 0x7e, 0x9e, 0xc7, |
|
| 4074 |
+ 0xef, 0x97, 0xdf, 0x71, 0xa0, 0xca, 0x8e, 0xbe, 0xa7, 0xae, 0x14, 0xad, 0x88, 0x33, 0xc9, 0x10, |
|
| 4075 |
+ 0xf2, 0x98, 0x3b, 0xa6, 0xbc, 0x25, 0x9e, 0x13, 0x3e, 0x19, 0xfb, 0xb2, 0x35, 0xfb, 0xa0, 0x56, |
|
| 4076 |
+ 0x91, 0xf3, 0x88, 0x1a, 0x40, 0xad, 0x22, 0x22, 0xea, 0x26, 0xc6, 0x55, 0xe9, 0x4f, 0xa8, 0x90, |
|
| 4077 |
+ 0x64, 0x12, 0xdd, 0x5c, 0xad, 0xcc, 0xd6, 0xe5, 0x11, 0x1b, 0x31, 0xbd, 0xbc, 0xa9, 0x56, 0xf1, |
|
| 4078 |
+ 0xaf, 0xcd, 0x5f, 0x2c, 0xb0, 0x1f, 0x50, 0x49, 0xd0, 0xa7, 0x50, 0x9c, 0x51, 0x2e, 0x7c, 0x16, |
|
| 4079 |
+ 0x3a, 0xd6, 0x81, 0x75, 0xbd, 0x72, 0xeb, 0xad, 0xd6, 0xe6, 0xc9, 0xad, 0x27, 0x31, 0xa4, 0x63, |
|
| 4080 |
+ 0x1f, 0x2f, 0x1a, 0x97, 0x70, 0xc2, 0x40, 0x77, 0x01, 0x5c, 0x4e, 0x89, 0xa4, 0xde, 0x90, 0x48, |
|
| 4081 |
+ 0x27, 0xa7, 0xf9, 0x6f, 0x67, 0xf1, 0x1f, 0x27, 0x4e, 0xe1, 0xb2, 0x21, 0xb4, 0xa5, 0x62, 0x4f, |
|
| 4082 |
+ 0x23, 0x2f, 0x61, 0xe7, 0xcf, 0xc5, 0x36, 0x84, 0xb6, 0x6c, 0xfe, 0x93, 0x07, 0xfb, 0x6b, 0xe6, |
|
| 4083 |
+ 0x51, 0x74, 0x05, 0x72, 0xbe, 0xa7, 0x9d, 0x2f, 0x77, 0x0a, 0xcb, 0x45, 0x23, 0x37, 0xe8, 0xe1, |
|
| 4084 |
+ 0x9c, 0xef, 0xa1, 0x5b, 0x60, 0x4f, 0xa8, 0x24, 0xc6, 0x2d, 0x27, 0x4b, 0x58, 0x65, 0xc0, 0xc4, |
|
| 4085 |
+ 0xa4, 0xb1, 0xe8, 0x63, 0xb0, 0x55, 0x5a, 0x8d, 0x33, 0xfb, 0x59, 0x1c, 0x75, 0xe6, 0xa3, 0x88, |
|
| 4086 |
+ 0xba, 0x09, 0x4f, 0xe1, 0x51, 0x1f, 0x2a, 0x1e, 0x15, 0x2e, 0xf7, 0x23, 0xa9, 0x32, 0x69, 0x6b, |
|
| 4087 |
+ 0xfa, 0xb5, 0x6d, 0xf4, 0xde, 0x1a, 0x8a, 0xd3, 0x3c, 0x74, 0x17, 0x0a, 0x42, 0x12, 0x39, 0x15, |
|
| 4088 |
+ 0xce, 0x8e, 0x56, 0xa8, 0x6f, 0x75, 0x40, 0xa3, 0x8c, 0x0b, 0x86, 0x83, 0xbe, 0x84, 0xbd, 0x09, |
|
| 4089 |
+ 0x09, 0xc9, 0x88, 0xf2, 0xa1, 0x51, 0x29, 0x68, 0x95, 0x77, 0x32, 0x43, 0x8f, 0x91, 0xb1, 0x10, |
|
| 4090 |
+ 0xae, 0x4e, 0xd2, 0x26, 0xea, 0x03, 0x10, 0x29, 0x89, 0xfb, 0xdd, 0x84, 0x86, 0xd2, 0x29, 0x6a, |
|
| 4091 |
+ 0x95, 0x77, 0x33, 0x7d, 0xa1, 0xf2, 0x39, 0xe3, 0xe3, 0xf6, 0x0a, 0x8c, 0x53, 0x44, 0xf4, 0x05, |
|
| 4092 |
+ 0x54, 0x5c, 0xca, 0xa5, 0xff, 0xcc, 0x77, 0x89, 0xa4, 0x4e, 0x49, 0xeb, 0x34, 0xb2, 0x74, 0xba, |
|
| 4093 |
+ 0x6b, 0x98, 0x09, 0x2a, 0xcd, 0x6c, 0xfe, 0x9e, 0x83, 0xe2, 0x23, 0xca, 0x67, 0xbe, 0xfb, 0x6a, |
|
| 4094 |
+ 0xcb, 0x7d, 0xe7, 0x54, 0xb9, 0x33, 0x3d, 0x33, 0xc7, 0x6e, 0x54, 0xfc, 0x10, 0x4a, 0x34, 0xf4, |
|
| 4095 |
+ 0x22, 0xe6, 0x87, 0xd2, 0x94, 0x3b, 0xb3, 0x5b, 0xfa, 0x06, 0x83, 0x57, 0x68, 0xd4, 0x87, 0x6a, |
|
| 4096 |
+ 0xdc, 0xc5, 0xc3, 0x53, 0xb5, 0x3e, 0xc8, 0xa2, 0x7f, 0xab, 0x81, 0xa6, 0x48, 0xbb, 0xd3, 0x94, |
|
| 4097 |
+ 0x85, 0x7a, 0x50, 0x8d, 0x38, 0x9d, 0xf9, 0x6c, 0x2a, 0x86, 0x3a, 0x88, 0xc2, 0xb9, 0x82, 0xc0, |
|
| 4098 |
+ 0xbb, 0x09, 0x4b, 0x59, 0xcd, 0x9f, 0x73, 0x50, 0x4a, 0x7c, 0x44, 0xb7, 0x4d, 0x3a, 0xac, 0xed, |
|
| 4099 |
+ 0x0e, 0x25, 0x58, 0x2d, 0x15, 0x67, 0xe2, 0x36, 0xec, 0x44, 0x8c, 0x4b, 0xe1, 0xe4, 0x0e, 0xf2, |
|
| 4100 |
+ 0xdb, 0x7a, 0xf6, 0x21, 0xe3, 0xb2, 0xcb, 0xc2, 0x67, 0xfe, 0x08, 0xc7, 0x60, 0xf4, 0x14, 0x2a, |
|
| 4101 |
+ 0x33, 0x9f, 0xcb, 0x29, 0x09, 0x86, 0x7e, 0x24, 0x9c, 0xbc, 0xe6, 0xbe, 0x77, 0xd6, 0x91, 0xad, |
|
| 4102 |
+ 0x27, 0x31, 0x7e, 0xf0, 0xb0, 0xb3, 0xb7, 0x5c, 0x34, 0x60, 0x65, 0x0a, 0x0c, 0x46, 0x6a, 0x10, |
|
| 4103 |
+ 0x89, 0xda, 0x03, 0x28, 0xaf, 0x76, 0xd0, 0x0d, 0x80, 0x30, 0x6e, 0xd1, 0xe1, 0xaa, 0x69, 0xaa, |
|
| 4104 |
+ 0xcb, 0x45, 0xa3, 0x6c, 0x1a, 0x77, 0xd0, 0xc3, 0x65, 0x03, 0x18, 0x78, 0x08, 0x81, 0x4d, 0x3c, |
|
| 4105 |
+ 0x8f, 0xeb, 0x16, 0x2a, 0x63, 0xbd, 0x6e, 0xfe, 0xba, 0x03, 0xf6, 0x63, 0x22, 0xc6, 0x17, 0x3d, |
|
| 4106 |
+ 0x66, 0xd4, 0x99, 0x1b, 0x4d, 0x77, 0x03, 0x40, 0xc4, 0xa5, 0x54, 0xe1, 0xd8, 0xeb, 0x70, 0x4c, |
|
| 4107 |
+ 0x81, 0x55, 0x38, 0x06, 0x10, 0x87, 0x23, 0x02, 0x26, 0x75, 0x7f, 0xd9, 0x58, 0xaf, 0xd1, 0x35, |
|
| 4108 |
+ 0x28, 0x86, 0xcc, 0xd3, 0xf4, 0x82, 0xa6, 0xc3, 0x72, 0xd1, 0x28, 0xa8, 0x91, 0x32, 0xe8, 0xe1, |
|
| 4109 |
+ 0x82, 0xda, 0x1a, 0x78, 0xea, 0xde, 0x92, 0x30, 0x64, 0x92, 0xa8, 0xa1, 0x24, 0xcc, 0xfd, 0xcf, |
|
| 4110 |
+ 0x6c, 0xac, 0xf6, 0x1a, 0x96, 0xdc, 0xdb, 0x14, 0x13, 0x3d, 0x81, 0x37, 0x12, 0x7f, 0xd3, 0x82, |
|
| 4111 |
+ 0xa5, 0x97, 0x11, 0x44, 0x46, 0x21, 0xb5, 0x93, 0x9a, 0x93, 0xe5, 0xed, 0x73, 0x52, 0x67, 0x30, |
|
| 4112 |
+ 0x6b, 0x4e, 0x76, 0xa0, 0xea, 0x51, 0xe1, 0x73, 0xea, 0xe9, 0x1b, 0x48, 0x1d, 0x38, 0xb0, 0xae, |
|
| 4113 |
+ 0xef, 0x6d, 0xf9, 0xf4, 0x18, 0x11, 0x8a, 0x77, 0x0d, 0x47, 0x5b, 0xa8, 0x0d, 0x25, 0xd3, 0x37, |
|
| 4114 |
+ 0xc2, 0xa9, 0xe8, 0xde, 0x3d, 0xe7, 0x7c, 0x5c, 0xd1, 0x4e, 0x4d, 0x90, 0xdd, 0x97, 0x9a, 0x20, |
|
| 4115 |
+ 0x77, 0x00, 0x02, 0x36, 0x1a, 0x7a, 0xdc, 0x9f, 0x51, 0xee, 0x54, 0x35, 0xb7, 0x96, 0xc5, 0xed, |
|
| 4116 |
+ 0x69, 0x04, 0x2e, 0x07, 0x6c, 0x14, 0x2f, 0x9b, 0x3f, 0x5a, 0xf0, 0xfa, 0x86, 0x53, 0xe8, 0x23, |
|
| 4117 |
+ 0x28, 0x1a, 0xb7, 0xce, 0x7a, 0x04, 0x18, 0x1e, 0x4e, 0xb0, 0x68, 0x1f, 0xca, 0xea, 0x8e, 0x50, |
|
| 4118 |
+ 0x21, 0x68, 0x7c, 0xfb, 0xcb, 0x78, 0xfd, 0x03, 0x72, 0xa0, 0x48, 0x02, 0x9f, 0xa8, 0xbd, 0xbc, |
|
| 4119 |
+ 0xde, 0x4b, 0xcc, 0xe6, 0x4f, 0x39, 0x28, 0x1a, 0xb1, 0x8b, 0x1e, 0xe7, 0xe6, 0xd8, 0x8d, 0x9b, |
|
| 4120 |
+ 0x75, 0x0f, 0x76, 0xe3, 0x74, 0x9a, 0x96, 0xb0, 0xff, 0x37, 0xa9, 0x95, 0x18, 0x1f, 0xb7, 0xc3, |
|
| 4121 |
+ 0x3d, 0xb0, 0xfd, 0x88, 0x4c, 0xcc, 0x28, 0xcf, 0x3c, 0x79, 0xf0, 0xb0, 0xfd, 0xe0, 0x9b, 0x28, |
|
| 4122 |
+ 0xee, 0xec, 0xd2, 0x72, 0xd1, 0xb0, 0xd5, 0x0f, 0x58, 0xd3, 0x9a, 0x7f, 0xda, 0x50, 0xec, 0x06, |
|
| 4123 |
+ 0x53, 0x21, 0x29, 0xbf, 0xe8, 0x84, 0x98, 0x63, 0x37, 0x12, 0xd2, 0x85, 0x22, 0x67, 0x4c, 0x0e, |
|
| 4124 |
+ 0x5d, 0x72, 0x56, 0x2e, 0x30, 0x63, 0xb2, 0xdb, 0xee, 0xec, 0x29, 0xa2, 0x1a, 0x24, 0xb1, 0x8d, |
|
| 4125 |
+ 0x0b, 0x8a, 0xda, 0x25, 0xe8, 0x29, 0x5c, 0x49, 0xc6, 0xef, 0x11, 0x63, 0x52, 0x48, 0x4e, 0xa2, |
|
| 4126 |
+ 0xe1, 0x98, 0xce, 0xd5, 0x37, 0x2f, 0xbf, 0xed, 0x65, 0xd2, 0x0f, 0x5d, 0x3e, 0xd7, 0x89, 0xba, |
|
| 4127 |
+ 0x4f, 0xe7, 0xf8, 0xb2, 0x11, 0xe8, 0x24, 0xfc, 0xfb, 0x74, 0x2e, 0xd0, 0x67, 0xb0, 0x4f, 0x57, |
|
| 4128 |
+ 0x30, 0xa5, 0x38, 0x0c, 0xc8, 0x44, 0x7d, 0x58, 0x86, 0x6e, 0xc0, 0xdc, 0xb1, 0x9e, 0x6d, 0x36, |
|
| 4129 |
+ 0xbe, 0x4a, 0xd3, 0x52, 0x5f, 0xc5, 0x88, 0xae, 0x02, 0x20, 0x01, 0xce, 0x51, 0x40, 0xdc, 0x71, |
|
| 4130 |
+ 0xe0, 0x0b, 0xf5, 0xfe, 0x4c, 0x3d, 0x36, 0xd4, 0x78, 0x52, 0xbe, 0x1d, 0x9e, 0x91, 0xad, 0x56, |
|
| 4131 |
+ 0x67, 0xcd, 0x4d, 0x3d, 0x5d, 0x44, 0x3f, 0x94, 0x7c, 0x8e, 0xdf, 0x3c, 0xca, 0xde, 0xad, 0xcd, |
|
| 4132 |
+ 0x60, 0xff, 0x2c, 0x22, 0x7a, 0x0d, 0xf2, 0x63, 0x3a, 0x8f, 0x6b, 0x8f, 0xd5, 0x12, 0x7d, 0x0e, |
|
| 4133 |
+ 0x3b, 0x33, 0x12, 0x4c, 0xa9, 0xa9, 0xfa, 0xfb, 0x59, 0x3e, 0x65, 0x4b, 0xe2, 0x98, 0xf8, 0x49, |
|
| 4134 |
+ 0xee, 0xd0, 0x6a, 0xfe, 0x6d, 0x41, 0xe1, 0x11, 0x75, 0x39, 0x95, 0xaf, 0xb4, 0xbb, 0x0e, 0x4f, |
|
| 4135 |
+ 0x75, 0x57, 0x3d, 0xfb, 0xe1, 0xa1, 0x4e, 0xdd, 0x68, 0xae, 0x2b, 0x50, 0xf0, 0xfc, 0x11, 0x15, |
|
| 4136 |
+ 0xf1, 0xd3, 0xa9, 0x8c, 0x8d, 0x85, 0x9a, 0x60, 0x0b, 0xff, 0x05, 0xd5, 0xd7, 0x28, 0x1f, 0x7f, |
|
| 4137 |
+ 0xe5, 0x8d, 0x82, 0xff, 0x82, 0x62, 0xbd, 0x87, 0x6a, 0x50, 0xf2, 0x43, 0x49, 0x79, 0x48, 0x02, |
|
| 4138 |
+ 0x5d, 0xe6, 0x12, 0x5e, 0xd9, 0x9d, 0xfd, 0xe3, 0x93, 0xfa, 0xa5, 0x3f, 0x4e, 0xea, 0x97, 0xfe, |
|
| 4139 |
+ 0x3d, 0xa9, 0x5b, 0x3f, 0x2c, 0xeb, 0xd6, 0xf1, 0xb2, 0x6e, 0xfd, 0xb6, 0xac, 0x5b, 0x7f, 0x2d, |
|
| 4140 |
+ 0xeb, 0xd6, 0x51, 0x41, 0xff, 0xf5, 0xf9, 0xf0, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x40, 0xcf, |
|
| 4141 |
+ 0x57, 0x63, 0x6a, 0x0d, 0x00, 0x00, |
|
| 4139 | 4142 |
} |
| ... | ... |
@@ -226,9 +226,10 @@ message Cluster {
|
| 226 | 226 |
// a new key is allocated on key rotation. |
| 227 | 227 |
uint64 encryption_key_lamport_clock = 6; |
| 228 | 228 |
|
| 229 |
- // RemovedNodes is the list of nodes that have been removed from the |
|
| 229 |
+ // BlacklistedCertificates tracks certificates that should no longer |
|
| 230 |
+ // be honored. It's a mapping from CN -> BlacklistedCertificate. |
|
| 230 | 231 |
// swarm. Their certificates should effectively be blacklisted. |
| 231 |
- repeated RemovedNode removed_nodes = 7; |
|
| 232 |
+ map<string, BlacklistedCertificate> blacklisted_certificates = 8; |
|
| 232 | 233 |
} |
| 233 | 234 |
|
| 234 | 235 |
// Secret represents a secret that should be passed to a container or a node, |
| ... | ... |
@@ -458,6 +458,9 @@ type ContainerSpec struct {
|
| 458 | 458 |
// |
| 459 | 459 |
// If Command and Args are provided, Args will be appended to Command. |
| 460 | 460 |
Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` |
| 461 |
+ // Hostname specifies the hostname that will be set on containers created by docker swarm. |
|
| 462 |
+ // All containers for a given service will have the same hostname |
|
| 463 |
+ Hostname string `protobuf:"bytes,14,opt,name=hostname,proto3" json:"hostname,omitempty"` |
|
| 461 | 464 |
// Env specifies the environment variables for the container in NAME=VALUE |
| 462 | 465 |
// format. These must be compliant with [IEEE Std |
| 463 | 466 |
// 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). |
| ... | ... |
@@ -472,7 +475,10 @@ type ContainerSpec struct {
|
| 472 | 472 |
User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"` |
| 473 | 473 |
// Groups specifies supplementary groups available to the user. |
| 474 | 474 |
Groups []string `protobuf:"bytes,11,rep,name=groups" json:"groups,omitempty"` |
| 475 |
- Mounts []Mount `protobuf:"bytes,8,rep,name=mounts" json:"mounts"` |
|
| 475 |
+ // TTY declares that a TTY should be attached to the standard streams, |
|
| 476 |
+ // including stdin if it is still open. |
|
| 477 |
+ TTY bool `protobuf:"varint,13,opt,name=tty,proto3" json:"tty,omitempty"` |
|
| 478 |
+ Mounts []Mount `protobuf:"bytes,8,rep,name=mounts" json:"mounts"` |
|
| 476 | 479 |
// StopGracePeriod the grace period for stopping the container before |
| 477 | 480 |
// forcefully killing the container. |
| 478 | 481 |
StopGracePeriod *docker_swarmkit_v11.Duration `protobuf:"bytes,9,opt,name=stop_grace_period,json=stopGracePeriod" json:"stop_grace_period,omitempty"` |
| ... | ... |
@@ -481,6 +487,8 @@ type ContainerSpec struct {
|
| 481 | 481 |
// SecretReference contains references to zero or more secrets that |
| 482 | 482 |
// will be exposed to the container. |
| 483 | 483 |
Secrets []*SecretReference `protobuf:"bytes,12,rep,name=secrets" json:"secrets,omitempty"` |
| 484 |
+ // DNSConfig allows one to specify DNS related configuration in resolv.conf |
|
| 485 |
+ DNSConfig *ContainerSpec_DNSConfig `protobuf:"bytes,15,opt,name=dns_config,json=dnsConfig" json:"dns_config,omitempty"` |
|
| 484 | 486 |
} |
| 485 | 487 |
|
| 486 | 488 |
func (m *ContainerSpec) Reset() { *m = ContainerSpec{} }
|
| ... | ... |
@@ -502,6 +510,23 @@ func (*ContainerSpec_PullOptions) Descriptor() ([]byte, []int) {
|
| 502 | 502 |
return fileDescriptorSpecs, []int{6, 1}
|
| 503 | 503 |
} |
| 504 | 504 |
|
| 505 |
+// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) |
|
| 506 |
+// Detailed documentation is available in: |
|
| 507 |
+// http://man7.org/linux/man-pages/man5/resolv.conf.5.html |
|
| 508 |
+// TODO: domain is not supported yet |
|
| 509 |
+type ContainerSpec_DNSConfig struct {
|
|
| 510 |
+ // Nameservers specifies the IP addresses of the name servers |
|
| 511 |
+ Nameservers []string `protobuf:"bytes,1,rep,name=nameservers" json:"nameservers,omitempty"` |
|
| 512 |
+ // Search specifies the search list for host-name lookup |
|
| 513 |
+ Search []string `protobuf:"bytes,2,rep,name=search" json:"search,omitempty"` |
|
| 514 |
+ // Options allows certain internal resolver variables to be modified |
|
| 515 |
+ Options []string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` |
|
| 516 |
+} |
|
| 517 |
+ |
|
| 518 |
+func (m *ContainerSpec_DNSConfig) Reset() { *m = ContainerSpec_DNSConfig{} }
|
|
| 519 |
+func (*ContainerSpec_DNSConfig) ProtoMessage() {}
|
|
| 520 |
+func (*ContainerSpec_DNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6, 2} }
|
|
| 521 |
+ |
|
| 505 | 522 |
// EndpointSpec defines the properties that can be configured to |
| 506 | 523 |
// access and loadbalance the service. |
| 507 | 524 |
type EndpointSpec struct {
|
| ... | ... |
@@ -583,6 +608,7 @@ func init() {
|
| 583 | 583 |
proto.RegisterType((*NetworkAttachmentSpec)(nil), "docker.swarmkit.v1.NetworkAttachmentSpec") |
| 584 | 584 |
proto.RegisterType((*ContainerSpec)(nil), "docker.swarmkit.v1.ContainerSpec") |
| 585 | 585 |
proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions") |
| 586 |
+ proto.RegisterType((*ContainerSpec_DNSConfig)(nil), "docker.swarmkit.v1.ContainerSpec.DNSConfig") |
|
| 586 | 587 |
proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec") |
| 587 | 588 |
proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec") |
| 588 | 589 |
proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec") |
| ... | ... |
@@ -723,10 +749,13 @@ func (m *ContainerSpec) Copy() *ContainerSpec {
|
| 723 | 723 |
|
| 724 | 724 |
o := &ContainerSpec{
|
| 725 | 725 |
Image: m.Image, |
| 726 |
+ Hostname: m.Hostname, |
|
| 726 | 727 |
Dir: m.Dir, |
| 727 | 728 |
User: m.User, |
| 729 |
+ TTY: m.TTY, |
|
| 728 | 730 |
StopGracePeriod: m.StopGracePeriod.Copy(), |
| 729 | 731 |
PullOptions: m.PullOptions.Copy(), |
| 732 |
+ DNSConfig: m.DNSConfig.Copy(), |
|
| 730 | 733 |
} |
| 731 | 734 |
|
| 732 | 735 |
if m.Labels != nil {
|
| ... | ... |
@@ -785,6 +814,31 @@ func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions {
|
| 785 | 785 |
return o |
| 786 | 786 |
} |
| 787 | 787 |
|
| 788 |
+func (m *ContainerSpec_DNSConfig) Copy() *ContainerSpec_DNSConfig {
|
|
| 789 |
+ if m == nil {
|
|
| 790 |
+ return nil |
|
| 791 |
+ } |
|
| 792 |
+ |
|
| 793 |
+ o := &ContainerSpec_DNSConfig{}
|
|
| 794 |
+ |
|
| 795 |
+ if m.Nameservers != nil {
|
|
| 796 |
+ o.Nameservers = make([]string, 0, len(m.Nameservers)) |
|
| 797 |
+ o.Nameservers = append(o.Nameservers, m.Nameservers...) |
|
| 798 |
+ } |
|
| 799 |
+ |
|
| 800 |
+ if m.Search != nil {
|
|
| 801 |
+ o.Search = make([]string, 0, len(m.Search)) |
|
| 802 |
+ o.Search = append(o.Search, m.Search...) |
|
| 803 |
+ } |
|
| 804 |
+ |
|
| 805 |
+ if m.Options != nil {
|
|
| 806 |
+ o.Options = make([]string, 0, len(m.Options)) |
|
| 807 |
+ o.Options = append(o.Options, m.Options...) |
|
| 808 |
+ } |
|
| 809 |
+ |
|
| 810 |
+ return o |
|
| 811 |
+} |
|
| 812 |
+ |
|
| 788 | 813 |
func (m *EndpointSpec) Copy() *EndpointSpec {
|
| 789 | 814 |
if m == nil {
|
| 790 | 815 |
return nil |
| ... | ... |
@@ -981,7 +1035,7 @@ func (this *ContainerSpec) GoString() string {
|
| 981 | 981 |
if this == nil {
|
| 982 | 982 |
return "nil" |
| 983 | 983 |
} |
| 984 |
- s := make([]string, 0, 16) |
|
| 984 |
+ s := make([]string, 0, 19) |
|
| 985 | 985 |
s = append(s, "&api.ContainerSpec{")
|
| 986 | 986 |
s = append(s, "Image: "+fmt.Sprintf("%#v", this.Image)+",\n")
|
| 987 | 987 |
keysForLabels := make([]string, 0, len(this.Labels)) |
| ... | ... |
@@ -999,10 +1053,12 @@ func (this *ContainerSpec) GoString() string {
|
| 999 | 999 |
} |
| 1000 | 1000 |
s = append(s, "Command: "+fmt.Sprintf("%#v", this.Command)+",\n")
|
| 1001 | 1001 |
s = append(s, "Args: "+fmt.Sprintf("%#v", this.Args)+",\n")
|
| 1002 |
+ s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n")
|
|
| 1002 | 1003 |
s = append(s, "Env: "+fmt.Sprintf("%#v", this.Env)+",\n")
|
| 1003 | 1004 |
s = append(s, "Dir: "+fmt.Sprintf("%#v", this.Dir)+",\n")
|
| 1004 | 1005 |
s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n")
|
| 1005 | 1006 |
s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n")
|
| 1007 |
+ s = append(s, "TTY: "+fmt.Sprintf("%#v", this.TTY)+",\n")
|
|
| 1006 | 1008 |
if this.Mounts != nil {
|
| 1007 | 1009 |
s = append(s, "Mounts: "+fmt.Sprintf("%#v", this.Mounts)+",\n")
|
| 1008 | 1010 |
} |
| ... | ... |
@@ -1015,6 +1071,9 @@ func (this *ContainerSpec) GoString() string {
|
| 1015 | 1015 |
if this.Secrets != nil {
|
| 1016 | 1016 |
s = append(s, "Secrets: "+fmt.Sprintf("%#v", this.Secrets)+",\n")
|
| 1017 | 1017 |
} |
| 1018 |
+ if this.DNSConfig != nil {
|
|
| 1019 |
+ s = append(s, "DNSConfig: "+fmt.Sprintf("%#v", this.DNSConfig)+",\n")
|
|
| 1020 |
+ } |
|
| 1018 | 1021 |
s = append(s, "}") |
| 1019 | 1022 |
return strings.Join(s, "") |
| 1020 | 1023 |
} |
| ... | ... |
@@ -1028,6 +1087,18 @@ func (this *ContainerSpec_PullOptions) GoString() string {
|
| 1028 | 1028 |
s = append(s, "}") |
| 1029 | 1029 |
return strings.Join(s, "") |
| 1030 | 1030 |
} |
| 1031 |
+func (this *ContainerSpec_DNSConfig) GoString() string {
|
|
| 1032 |
+ if this == nil {
|
|
| 1033 |
+ return "nil" |
|
| 1034 |
+ } |
|
| 1035 |
+ s := make([]string, 0, 7) |
|
| 1036 |
+ s = append(s, "&api.ContainerSpec_DNSConfig{")
|
|
| 1037 |
+ s = append(s, "Nameservers: "+fmt.Sprintf("%#v", this.Nameservers)+",\n")
|
|
| 1038 |
+ s = append(s, "Search: "+fmt.Sprintf("%#v", this.Search)+",\n")
|
|
| 1039 |
+ s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
|
|
| 1040 |
+ s = append(s, "}") |
|
| 1041 |
+ return strings.Join(s, "") |
|
| 1042 |
+} |
|
| 1031 | 1043 |
func (this *EndpointSpec) GoString() string {
|
| 1032 | 1044 |
if this == nil {
|
| 1033 | 1045 |
return "nil" |
| ... | ... |
@@ -1584,6 +1655,32 @@ func (m *ContainerSpec) MarshalTo(data []byte) (int, error) {
|
| 1584 | 1584 |
i += n |
| 1585 | 1585 |
} |
| 1586 | 1586 |
} |
| 1587 |
+ if m.TTY {
|
|
| 1588 |
+ data[i] = 0x68 |
|
| 1589 |
+ i++ |
|
| 1590 |
+ if m.TTY {
|
|
| 1591 |
+ data[i] = 1 |
|
| 1592 |
+ } else {
|
|
| 1593 |
+ data[i] = 0 |
|
| 1594 |
+ } |
|
| 1595 |
+ i++ |
|
| 1596 |
+ } |
|
| 1597 |
+ if len(m.Hostname) > 0 {
|
|
| 1598 |
+ data[i] = 0x72 |
|
| 1599 |
+ i++ |
|
| 1600 |
+ i = encodeVarintSpecs(data, i, uint64(len(m.Hostname))) |
|
| 1601 |
+ i += copy(data[i:], m.Hostname) |
|
| 1602 |
+ } |
|
| 1603 |
+ if m.DNSConfig != nil {
|
|
| 1604 |
+ data[i] = 0x7a |
|
| 1605 |
+ i++ |
|
| 1606 |
+ i = encodeVarintSpecs(data, i, uint64(m.DNSConfig.Size())) |
|
| 1607 |
+ n18, err := m.DNSConfig.MarshalTo(data[i:]) |
|
| 1608 |
+ if err != nil {
|
|
| 1609 |
+ return 0, err |
|
| 1610 |
+ } |
|
| 1611 |
+ i += n18 |
|
| 1612 |
+ } |
|
| 1587 | 1613 |
return i, nil |
| 1588 | 1614 |
} |
| 1589 | 1615 |
|
| ... | ... |
@@ -1613,6 +1710,69 @@ func (m *ContainerSpec_PullOptions) MarshalTo(data []byte) (int, error) {
|
| 1613 | 1613 |
return i, nil |
| 1614 | 1614 |
} |
| 1615 | 1615 |
|
| 1616 |
+func (m *ContainerSpec_DNSConfig) Marshal() (data []byte, err error) {
|
|
| 1617 |
+ size := m.Size() |
|
| 1618 |
+ data = make([]byte, size) |
|
| 1619 |
+ n, err := m.MarshalTo(data) |
|
| 1620 |
+ if err != nil {
|
|
| 1621 |
+ return nil, err |
|
| 1622 |
+ } |
|
| 1623 |
+ return data[:n], nil |
|
| 1624 |
+} |
|
| 1625 |
+ |
|
| 1626 |
+func (m *ContainerSpec_DNSConfig) MarshalTo(data []byte) (int, error) {
|
|
| 1627 |
+ var i int |
|
| 1628 |
+ _ = i |
|
| 1629 |
+ var l int |
|
| 1630 |
+ _ = l |
|
| 1631 |
+ if len(m.Nameservers) > 0 {
|
|
| 1632 |
+ for _, s := range m.Nameservers {
|
|
| 1633 |
+ data[i] = 0xa |
|
| 1634 |
+ i++ |
|
| 1635 |
+ l = len(s) |
|
| 1636 |
+ for l >= 1<<7 {
|
|
| 1637 |
+ data[i] = uint8(uint64(l)&0x7f | 0x80) |
|
| 1638 |
+ l >>= 7 |
|
| 1639 |
+ i++ |
|
| 1640 |
+ } |
|
| 1641 |
+ data[i] = uint8(l) |
|
| 1642 |
+ i++ |
|
| 1643 |
+ i += copy(data[i:], s) |
|
| 1644 |
+ } |
|
| 1645 |
+ } |
|
| 1646 |
+ if len(m.Search) > 0 {
|
|
| 1647 |
+ for _, s := range m.Search {
|
|
| 1648 |
+ data[i] = 0x12 |
|
| 1649 |
+ i++ |
|
| 1650 |
+ l = len(s) |
|
| 1651 |
+ for l >= 1<<7 {
|
|
| 1652 |
+ data[i] = uint8(uint64(l)&0x7f | 0x80) |
|
| 1653 |
+ l >>= 7 |
|
| 1654 |
+ i++ |
|
| 1655 |
+ } |
|
| 1656 |
+ data[i] = uint8(l) |
|
| 1657 |
+ i++ |
|
| 1658 |
+ i += copy(data[i:], s) |
|
| 1659 |
+ } |
|
| 1660 |
+ } |
|
| 1661 |
+ if len(m.Options) > 0 {
|
|
| 1662 |
+ for _, s := range m.Options {
|
|
| 1663 |
+ data[i] = 0x1a |
|
| 1664 |
+ i++ |
|
| 1665 |
+ l = len(s) |
|
| 1666 |
+ for l >= 1<<7 {
|
|
| 1667 |
+ data[i] = uint8(uint64(l)&0x7f | 0x80) |
|
| 1668 |
+ l >>= 7 |
|
| 1669 |
+ i++ |
|
| 1670 |
+ } |
|
| 1671 |
+ data[i] = uint8(l) |
|
| 1672 |
+ i++ |
|
| 1673 |
+ i += copy(data[i:], s) |
|
| 1674 |
+ } |
|
| 1675 |
+ } |
|
| 1676 |
+ return i, nil |
|
| 1677 |
+} |
|
| 1678 |
+ |
|
| 1616 | 1679 |
func (m *EndpointSpec) Marshal() (data []byte, err error) {
|
| 1617 | 1680 |
size := m.Size() |
| 1618 | 1681 |
data = make([]byte, size) |
| ... | ... |
@@ -1666,20 +1826,20 @@ func (m *NetworkSpec) MarshalTo(data []byte) (int, error) {
|
| 1666 | 1666 |
data[i] = 0xa |
| 1667 | 1667 |
i++ |
| 1668 | 1668 |
i = encodeVarintSpecs(data, i, uint64(m.Annotations.Size())) |
| 1669 |
- n18, err := m.Annotations.MarshalTo(data[i:]) |
|
| 1669 |
+ n19, err := m.Annotations.MarshalTo(data[i:]) |
|
| 1670 | 1670 |
if err != nil {
|
| 1671 | 1671 |
return 0, err |
| 1672 | 1672 |
} |
| 1673 |
- i += n18 |
|
| 1673 |
+ i += n19 |
|
| 1674 | 1674 |
if m.DriverConfig != nil {
|
| 1675 | 1675 |
data[i] = 0x12 |
| 1676 | 1676 |
i++ |
| 1677 | 1677 |
i = encodeVarintSpecs(data, i, uint64(m.DriverConfig.Size())) |
| 1678 |
- n19, err := m.DriverConfig.MarshalTo(data[i:]) |
|
| 1678 |
+ n20, err := m.DriverConfig.MarshalTo(data[i:]) |
|
| 1679 | 1679 |
if err != nil {
|
| 1680 | 1680 |
return 0, err |
| 1681 | 1681 |
} |
| 1682 |
- i += n19 |
|
| 1682 |
+ i += n20 |
|
| 1683 | 1683 |
} |
| 1684 | 1684 |
if m.Ipv6Enabled {
|
| 1685 | 1685 |
data[i] = 0x18 |
| ... | ... |
@@ -1705,11 +1865,11 @@ func (m *NetworkSpec) MarshalTo(data []byte) (int, error) {
|
| 1705 | 1705 |
data[i] = 0x2a |
| 1706 | 1706 |
i++ |
| 1707 | 1707 |
i = encodeVarintSpecs(data, i, uint64(m.IPAM.Size())) |
| 1708 |
- n20, err := m.IPAM.MarshalTo(data[i:]) |
|
| 1708 |
+ n21, err := m.IPAM.MarshalTo(data[i:]) |
|
| 1709 | 1709 |
if err != nil {
|
| 1710 | 1710 |
return 0, err |
| 1711 | 1711 |
} |
| 1712 |
- i += n20 |
|
| 1712 |
+ i += n21 |
|
| 1713 | 1713 |
} |
| 1714 | 1714 |
if m.Attachable {
|
| 1715 | 1715 |
data[i] = 0x30 |
| ... | ... |
@@ -1742,59 +1902,59 @@ func (m *ClusterSpec) MarshalTo(data []byte) (int, error) {
|
| 1742 | 1742 |
data[i] = 0xa |
| 1743 | 1743 |
i++ |
| 1744 | 1744 |
i = encodeVarintSpecs(data, i, uint64(m.Annotations.Size())) |
| 1745 |
- n21, err := m.Annotations.MarshalTo(data[i:]) |
|
| 1745 |
+ n22, err := m.Annotations.MarshalTo(data[i:]) |
|
| 1746 | 1746 |
if err != nil {
|
| 1747 | 1747 |
return 0, err |
| 1748 | 1748 |
} |
| 1749 |
- i += n21 |
|
| 1749 |
+ i += n22 |
|
| 1750 | 1750 |
data[i] = 0x12 |
| 1751 | 1751 |
i++ |
| 1752 | 1752 |
i = encodeVarintSpecs(data, i, uint64(m.AcceptancePolicy.Size())) |
| 1753 |
- n22, err := m.AcceptancePolicy.MarshalTo(data[i:]) |
|
| 1753 |
+ n23, err := m.AcceptancePolicy.MarshalTo(data[i:]) |
|
| 1754 | 1754 |
if err != nil {
|
| 1755 | 1755 |
return 0, err |
| 1756 | 1756 |
} |
| 1757 |
- i += n22 |
|
| 1757 |
+ i += n23 |
|
| 1758 | 1758 |
data[i] = 0x1a |
| 1759 | 1759 |
i++ |
| 1760 | 1760 |
i = encodeVarintSpecs(data, i, uint64(m.Orchestration.Size())) |
| 1761 |
- n23, err := m.Orchestration.MarshalTo(data[i:]) |
|
| 1761 |
+ n24, err := m.Orchestration.MarshalTo(data[i:]) |
|
| 1762 | 1762 |
if err != nil {
|
| 1763 | 1763 |
return 0, err |
| 1764 | 1764 |
} |
| 1765 |
- i += n23 |
|
| 1765 |
+ i += n24 |
|
| 1766 | 1766 |
data[i] = 0x22 |
| 1767 | 1767 |
i++ |
| 1768 | 1768 |
i = encodeVarintSpecs(data, i, uint64(m.Raft.Size())) |
| 1769 |
- n24, err := m.Raft.MarshalTo(data[i:]) |
|
| 1769 |
+ n25, err := m.Raft.MarshalTo(data[i:]) |
|
| 1770 | 1770 |
if err != nil {
|
| 1771 | 1771 |
return 0, err |
| 1772 | 1772 |
} |
| 1773 |
- i += n24 |
|
| 1773 |
+ i += n25 |
|
| 1774 | 1774 |
data[i] = 0x2a |
| 1775 | 1775 |
i++ |
| 1776 | 1776 |
i = encodeVarintSpecs(data, i, uint64(m.Dispatcher.Size())) |
| 1777 |
- n25, err := m.Dispatcher.MarshalTo(data[i:]) |
|
| 1777 |
+ n26, err := m.Dispatcher.MarshalTo(data[i:]) |
|
| 1778 | 1778 |
if err != nil {
|
| 1779 | 1779 |
return 0, err |
| 1780 | 1780 |
} |
| 1781 |
- i += n25 |
|
| 1781 |
+ i += n26 |
|
| 1782 | 1782 |
data[i] = 0x32 |
| 1783 | 1783 |
i++ |
| 1784 | 1784 |
i = encodeVarintSpecs(data, i, uint64(m.CAConfig.Size())) |
| 1785 |
- n26, err := m.CAConfig.MarshalTo(data[i:]) |
|
| 1785 |
+ n27, err := m.CAConfig.MarshalTo(data[i:]) |
|
| 1786 | 1786 |
if err != nil {
|
| 1787 | 1787 |
return 0, err |
| 1788 | 1788 |
} |
| 1789 |
- i += n26 |
|
| 1789 |
+ i += n27 |
|
| 1790 | 1790 |
data[i] = 0x3a |
| 1791 | 1791 |
i++ |
| 1792 | 1792 |
i = encodeVarintSpecs(data, i, uint64(m.TaskDefaults.Size())) |
| 1793 |
- n27, err := m.TaskDefaults.MarshalTo(data[i:]) |
|
| 1793 |
+ n28, err := m.TaskDefaults.MarshalTo(data[i:]) |
|
| 1794 | 1794 |
if err != nil {
|
| 1795 | 1795 |
return 0, err |
| 1796 | 1796 |
} |
| 1797 |
- i += n27 |
|
| 1797 |
+ i += n28 |
|
| 1798 | 1798 |
return i, nil |
| 1799 | 1799 |
} |
| 1800 | 1800 |
|
| ... | ... |
@@ -1816,11 +1976,11 @@ func (m *SecretSpec) MarshalTo(data []byte) (int, error) {
|
| 1816 | 1816 |
data[i] = 0xa |
| 1817 | 1817 |
i++ |
| 1818 | 1818 |
i = encodeVarintSpecs(data, i, uint64(m.Annotations.Size())) |
| 1819 |
- n28, err := m.Annotations.MarshalTo(data[i:]) |
|
| 1819 |
+ n29, err := m.Annotations.MarshalTo(data[i:]) |
|
| 1820 | 1820 |
if err != nil {
|
| 1821 | 1821 |
return 0, err |
| 1822 | 1822 |
} |
| 1823 |
- i += n28 |
|
| 1823 |
+ i += n29 |
|
| 1824 | 1824 |
if len(m.Data) > 0 {
|
| 1825 | 1825 |
data[i] = 0x12 |
| 1826 | 1826 |
i++ |
| ... | ... |
@@ -2064,6 +2224,17 @@ func (m *ContainerSpec) Size() (n int) {
|
| 2064 | 2064 |
n += 1 + l + sovSpecs(uint64(l)) |
| 2065 | 2065 |
} |
| 2066 | 2066 |
} |
| 2067 |
+ if m.TTY {
|
|
| 2068 |
+ n += 2 |
|
| 2069 |
+ } |
|
| 2070 |
+ l = len(m.Hostname) |
|
| 2071 |
+ if l > 0 {
|
|
| 2072 |
+ n += 1 + l + sovSpecs(uint64(l)) |
|
| 2073 |
+ } |
|
| 2074 |
+ if m.DNSConfig != nil {
|
|
| 2075 |
+ l = m.DNSConfig.Size() |
|
| 2076 |
+ n += 1 + l + sovSpecs(uint64(l)) |
|
| 2077 |
+ } |
|
| 2067 | 2078 |
return n |
| 2068 | 2079 |
} |
| 2069 | 2080 |
|
| ... | ... |
@@ -2077,6 +2248,30 @@ func (m *ContainerSpec_PullOptions) Size() (n int) {
|
| 2077 | 2077 |
return n |
| 2078 | 2078 |
} |
| 2079 | 2079 |
|
| 2080 |
+func (m *ContainerSpec_DNSConfig) Size() (n int) {
|
|
| 2081 |
+ var l int |
|
| 2082 |
+ _ = l |
|
| 2083 |
+ if len(m.Nameservers) > 0 {
|
|
| 2084 |
+ for _, s := range m.Nameservers {
|
|
| 2085 |
+ l = len(s) |
|
| 2086 |
+ n += 1 + l + sovSpecs(uint64(l)) |
|
| 2087 |
+ } |
|
| 2088 |
+ } |
|
| 2089 |
+ if len(m.Search) > 0 {
|
|
| 2090 |
+ for _, s := range m.Search {
|
|
| 2091 |
+ l = len(s) |
|
| 2092 |
+ n += 1 + l + sovSpecs(uint64(l)) |
|
| 2093 |
+ } |
|
| 2094 |
+ } |
|
| 2095 |
+ if len(m.Options) > 0 {
|
|
| 2096 |
+ for _, s := range m.Options {
|
|
| 2097 |
+ l = len(s) |
|
| 2098 |
+ n += 1 + l + sovSpecs(uint64(l)) |
|
| 2099 |
+ } |
|
| 2100 |
+ } |
|
| 2101 |
+ return n |
|
| 2102 |
+} |
|
| 2103 |
+ |
|
| 2080 | 2104 |
func (m *EndpointSpec) Size() (n int) {
|
| 2081 | 2105 |
var l int |
| 2082 | 2106 |
_ = l |
| ... | ... |
@@ -2302,6 +2497,9 @@ func (this *ContainerSpec) String() string {
|
| 2302 | 2302 |
`PullOptions:` + strings.Replace(fmt.Sprintf("%v", this.PullOptions), "ContainerSpec_PullOptions", "ContainerSpec_PullOptions", 1) + `,`,
|
| 2303 | 2303 |
`Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
|
| 2304 | 2304 |
`Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "SecretReference", "SecretReference", 1) + `,`,
|
| 2305 |
+ `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`,
|
|
| 2306 |
+ `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
|
|
| 2307 |
+ `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "ContainerSpec_DNSConfig", "ContainerSpec_DNSConfig", 1) + `,`,
|
|
| 2305 | 2308 |
`}`, |
| 2306 | 2309 |
}, "") |
| 2307 | 2310 |
return s |
| ... | ... |
@@ -2316,6 +2514,18 @@ func (this *ContainerSpec_PullOptions) String() string {
|
| 2316 | 2316 |
}, "") |
| 2317 | 2317 |
return s |
| 2318 | 2318 |
} |
| 2319 |
+func (this *ContainerSpec_DNSConfig) String() string {
|
|
| 2320 |
+ if this == nil {
|
|
| 2321 |
+ return "nil" |
|
| 2322 |
+ } |
|
| 2323 |
+ s := strings.Join([]string{`&ContainerSpec_DNSConfig{`,
|
|
| 2324 |
+ `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`,
|
|
| 2325 |
+ `Search:` + fmt.Sprintf("%v", this.Search) + `,`,
|
|
| 2326 |
+ `Options:` + fmt.Sprintf("%v", this.Options) + `,`,
|
|
| 2327 |
+ `}`, |
|
| 2328 |
+ }, "") |
|
| 2329 |
+ return s |
|
| 2330 |
+} |
|
| 2319 | 2331 |
func (this *EndpointSpec) String() string {
|
| 2320 | 2332 |
if this == nil {
|
| 2321 | 2333 |
return "nil" |
| ... | ... |
@@ -3755,6 +3965,88 @@ func (m *ContainerSpec) Unmarshal(data []byte) error {
|
| 3755 | 3755 |
return err |
| 3756 | 3756 |
} |
| 3757 | 3757 |
iNdEx = postIndex |
| 3758 |
+ case 13: |
|
| 3759 |
+ if wireType != 0 {
|
|
| 3760 |
+ return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
|
|
| 3761 |
+ } |
|
| 3762 |
+ var v int |
|
| 3763 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3764 |
+ if shift >= 64 {
|
|
| 3765 |
+ return ErrIntOverflowSpecs |
|
| 3766 |
+ } |
|
| 3767 |
+ if iNdEx >= l {
|
|
| 3768 |
+ return io.ErrUnexpectedEOF |
|
| 3769 |
+ } |
|
| 3770 |
+ b := data[iNdEx] |
|
| 3771 |
+ iNdEx++ |
|
| 3772 |
+ v |= (int(b) & 0x7F) << shift |
|
| 3773 |
+ if b < 0x80 {
|
|
| 3774 |
+ break |
|
| 3775 |
+ } |
|
| 3776 |
+ } |
|
| 3777 |
+ m.TTY = bool(v != 0) |
|
| 3778 |
+ case 14: |
|
| 3779 |
+ if wireType != 2 {
|
|
| 3780 |
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
|
|
| 3781 |
+ } |
|
| 3782 |
+ var stringLen uint64 |
|
| 3783 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3784 |
+ if shift >= 64 {
|
|
| 3785 |
+ return ErrIntOverflowSpecs |
|
| 3786 |
+ } |
|
| 3787 |
+ if iNdEx >= l {
|
|
| 3788 |
+ return io.ErrUnexpectedEOF |
|
| 3789 |
+ } |
|
| 3790 |
+ b := data[iNdEx] |
|
| 3791 |
+ iNdEx++ |
|
| 3792 |
+ stringLen |= (uint64(b) & 0x7F) << shift |
|
| 3793 |
+ if b < 0x80 {
|
|
| 3794 |
+ break |
|
| 3795 |
+ } |
|
| 3796 |
+ } |
|
| 3797 |
+ intStringLen := int(stringLen) |
|
| 3798 |
+ if intStringLen < 0 {
|
|
| 3799 |
+ return ErrInvalidLengthSpecs |
|
| 3800 |
+ } |
|
| 3801 |
+ postIndex := iNdEx + intStringLen |
|
| 3802 |
+ if postIndex > l {
|
|
| 3803 |
+ return io.ErrUnexpectedEOF |
|
| 3804 |
+ } |
|
| 3805 |
+ m.Hostname = string(data[iNdEx:postIndex]) |
|
| 3806 |
+ iNdEx = postIndex |
|
| 3807 |
+ case 15: |
|
| 3808 |
+ if wireType != 2 {
|
|
| 3809 |
+ return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType)
|
|
| 3810 |
+ } |
|
| 3811 |
+ var msglen int |
|
| 3812 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3813 |
+ if shift >= 64 {
|
|
| 3814 |
+ return ErrIntOverflowSpecs |
|
| 3815 |
+ } |
|
| 3816 |
+ if iNdEx >= l {
|
|
| 3817 |
+ return io.ErrUnexpectedEOF |
|
| 3818 |
+ } |
|
| 3819 |
+ b := data[iNdEx] |
|
| 3820 |
+ iNdEx++ |
|
| 3821 |
+ msglen |= (int(b) & 0x7F) << shift |
|
| 3822 |
+ if b < 0x80 {
|
|
| 3823 |
+ break |
|
| 3824 |
+ } |
|
| 3825 |
+ } |
|
| 3826 |
+ if msglen < 0 {
|
|
| 3827 |
+ return ErrInvalidLengthSpecs |
|
| 3828 |
+ } |
|
| 3829 |
+ postIndex := iNdEx + msglen |
|
| 3830 |
+ if postIndex > l {
|
|
| 3831 |
+ return io.ErrUnexpectedEOF |
|
| 3832 |
+ } |
|
| 3833 |
+ if m.DNSConfig == nil {
|
|
| 3834 |
+ m.DNSConfig = &ContainerSpec_DNSConfig{}
|
|
| 3835 |
+ } |
|
| 3836 |
+ if err := m.DNSConfig.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
|
| 3837 |
+ return err |
|
| 3838 |
+ } |
|
| 3839 |
+ iNdEx = postIndex |
|
| 3758 | 3840 |
default: |
| 3759 | 3841 |
iNdEx = preIndex |
| 3760 | 3842 |
skippy, err := skipSpecs(data[iNdEx:]) |
| ... | ... |
@@ -3855,6 +4147,143 @@ func (m *ContainerSpec_PullOptions) Unmarshal(data []byte) error {
|
| 3855 | 3855 |
} |
| 3856 | 3856 |
return nil |
| 3857 | 3857 |
} |
| 3858 |
+func (m *ContainerSpec_DNSConfig) Unmarshal(data []byte) error {
|
|
| 3859 |
+ l := len(data) |
|
| 3860 |
+ iNdEx := 0 |
|
| 3861 |
+ for iNdEx < l {
|
|
| 3862 |
+ preIndex := iNdEx |
|
| 3863 |
+ var wire uint64 |
|
| 3864 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3865 |
+ if shift >= 64 {
|
|
| 3866 |
+ return ErrIntOverflowSpecs |
|
| 3867 |
+ } |
|
| 3868 |
+ if iNdEx >= l {
|
|
| 3869 |
+ return io.ErrUnexpectedEOF |
|
| 3870 |
+ } |
|
| 3871 |
+ b := data[iNdEx] |
|
| 3872 |
+ iNdEx++ |
|
| 3873 |
+ wire |= (uint64(b) & 0x7F) << shift |
|
| 3874 |
+ if b < 0x80 {
|
|
| 3875 |
+ break |
|
| 3876 |
+ } |
|
| 3877 |
+ } |
|
| 3878 |
+ fieldNum := int32(wire >> 3) |
|
| 3879 |
+ wireType := int(wire & 0x7) |
|
| 3880 |
+ if wireType == 4 {
|
|
| 3881 |
+ return fmt.Errorf("proto: DNSConfig: wiretype end group for non-group")
|
|
| 3882 |
+ } |
|
| 3883 |
+ if fieldNum <= 0 {
|
|
| 3884 |
+ return fmt.Errorf("proto: DNSConfig: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
| 3885 |
+ } |
|
| 3886 |
+ switch fieldNum {
|
|
| 3887 |
+ case 1: |
|
| 3888 |
+ if wireType != 2 {
|
|
| 3889 |
+ return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType)
|
|
| 3890 |
+ } |
|
| 3891 |
+ var stringLen uint64 |
|
| 3892 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3893 |
+ if shift >= 64 {
|
|
| 3894 |
+ return ErrIntOverflowSpecs |
|
| 3895 |
+ } |
|
| 3896 |
+ if iNdEx >= l {
|
|
| 3897 |
+ return io.ErrUnexpectedEOF |
|
| 3898 |
+ } |
|
| 3899 |
+ b := data[iNdEx] |
|
| 3900 |
+ iNdEx++ |
|
| 3901 |
+ stringLen |= (uint64(b) & 0x7F) << shift |
|
| 3902 |
+ if b < 0x80 {
|
|
| 3903 |
+ break |
|
| 3904 |
+ } |
|
| 3905 |
+ } |
|
| 3906 |
+ intStringLen := int(stringLen) |
|
| 3907 |
+ if intStringLen < 0 {
|
|
| 3908 |
+ return ErrInvalidLengthSpecs |
|
| 3909 |
+ } |
|
| 3910 |
+ postIndex := iNdEx + intStringLen |
|
| 3911 |
+ if postIndex > l {
|
|
| 3912 |
+ return io.ErrUnexpectedEOF |
|
| 3913 |
+ } |
|
| 3914 |
+ m.Nameservers = append(m.Nameservers, string(data[iNdEx:postIndex])) |
|
| 3915 |
+ iNdEx = postIndex |
|
| 3916 |
+ case 2: |
|
| 3917 |
+ if wireType != 2 {
|
|
| 3918 |
+ return fmt.Errorf("proto: wrong wireType = %d for field Search", wireType)
|
|
| 3919 |
+ } |
|
| 3920 |
+ var stringLen uint64 |
|
| 3921 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3922 |
+ if shift >= 64 {
|
|
| 3923 |
+ return ErrIntOverflowSpecs |
|
| 3924 |
+ } |
|
| 3925 |
+ if iNdEx >= l {
|
|
| 3926 |
+ return io.ErrUnexpectedEOF |
|
| 3927 |
+ } |
|
| 3928 |
+ b := data[iNdEx] |
|
| 3929 |
+ iNdEx++ |
|
| 3930 |
+ stringLen |= (uint64(b) & 0x7F) << shift |
|
| 3931 |
+ if b < 0x80 {
|
|
| 3932 |
+ break |
|
| 3933 |
+ } |
|
| 3934 |
+ } |
|
| 3935 |
+ intStringLen := int(stringLen) |
|
| 3936 |
+ if intStringLen < 0 {
|
|
| 3937 |
+ return ErrInvalidLengthSpecs |
|
| 3938 |
+ } |
|
| 3939 |
+ postIndex := iNdEx + intStringLen |
|
| 3940 |
+ if postIndex > l {
|
|
| 3941 |
+ return io.ErrUnexpectedEOF |
|
| 3942 |
+ } |
|
| 3943 |
+ m.Search = append(m.Search, string(data[iNdEx:postIndex])) |
|
| 3944 |
+ iNdEx = postIndex |
|
| 3945 |
+ case 3: |
|
| 3946 |
+ if wireType != 2 {
|
|
| 3947 |
+ return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
|
|
| 3948 |
+ } |
|
| 3949 |
+ var stringLen uint64 |
|
| 3950 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 3951 |
+ if shift >= 64 {
|
|
| 3952 |
+ return ErrIntOverflowSpecs |
|
| 3953 |
+ } |
|
| 3954 |
+ if iNdEx >= l {
|
|
| 3955 |
+ return io.ErrUnexpectedEOF |
|
| 3956 |
+ } |
|
| 3957 |
+ b := data[iNdEx] |
|
| 3958 |
+ iNdEx++ |
|
| 3959 |
+ stringLen |= (uint64(b) & 0x7F) << shift |
|
| 3960 |
+ if b < 0x80 {
|
|
| 3961 |
+ break |
|
| 3962 |
+ } |
|
| 3963 |
+ } |
|
| 3964 |
+ intStringLen := int(stringLen) |
|
| 3965 |
+ if intStringLen < 0 {
|
|
| 3966 |
+ return ErrInvalidLengthSpecs |
|
| 3967 |
+ } |
|
| 3968 |
+ postIndex := iNdEx + intStringLen |
|
| 3969 |
+ if postIndex > l {
|
|
| 3970 |
+ return io.ErrUnexpectedEOF |
|
| 3971 |
+ } |
|
| 3972 |
+ m.Options = append(m.Options, string(data[iNdEx:postIndex])) |
|
| 3973 |
+ iNdEx = postIndex |
|
| 3974 |
+ default: |
|
| 3975 |
+ iNdEx = preIndex |
|
| 3976 |
+ skippy, err := skipSpecs(data[iNdEx:]) |
|
| 3977 |
+ if err != nil {
|
|
| 3978 |
+ return err |
|
| 3979 |
+ } |
|
| 3980 |
+ if skippy < 0 {
|
|
| 3981 |
+ return ErrInvalidLengthSpecs |
|
| 3982 |
+ } |
|
| 3983 |
+ if (iNdEx + skippy) > l {
|
|
| 3984 |
+ return io.ErrUnexpectedEOF |
|
| 3985 |
+ } |
|
| 3986 |
+ iNdEx += skippy |
|
| 3987 |
+ } |
|
| 3988 |
+ } |
|
| 3989 |
+ |
|
| 3990 |
+ if iNdEx > l {
|
|
| 3991 |
+ return io.ErrUnexpectedEOF |
|
| 3992 |
+ } |
|
| 3993 |
+ return nil |
|
| 3994 |
+} |
|
| 3858 | 3995 |
func (m *EndpointSpec) Unmarshal(data []byte) error {
|
| 3859 | 3996 |
l := len(data) |
| 3860 | 3997 |
iNdEx := 0 |
| ... | ... |
@@ -4640,97 +5069,103 @@ var ( |
| 4640 | 4640 |
func init() { proto.RegisterFile("specs.proto", fileDescriptorSpecs) }
|
| 4641 | 4641 |
|
| 4642 | 4642 |
var fileDescriptorSpecs = []byte{
|
| 4643 |
- // 1457 bytes of a gzipped FileDescriptorProto |
|
| 4644 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0x4d, 0x6f, 0x1b, 0xb9, |
|
| 4645 |
- 0x19, 0xd6, 0xd8, 0x63, 0x59, 0x7a, 0x47, 0x4e, 0x14, 0x22, 0x1f, 0x13, 0x25, 0x95, 0x15, 0x25, |
|
| 4646 |
- 0x4d, 0x9d, 0x16, 0xb5, 0x5b, 0xb5, 0x48, 0x93, 0xa6, 0x41, 0xab, 0xaf, 0x3a, 0xaa, 0x6b, 0x47, |
|
| 4647 |
- 0xa0, 0x93, 0x00, 0x3d, 0x09, 0xf4, 0x0c, 0x2d, 0x0f, 0x3c, 0x1a, 0x4e, 0x39, 0x1c, 0x05, 0xbe, |
|
| 4648 |
- 0xed, 0x31, 0xf0, 0x61, 0xff, 0x81, 0x4f, 0x0b, 0xec, 0x3f, 0xd8, 0xff, 0x90, 0xe3, 0x1e, 0xf7, |
|
| 4649 |
- 0x64, 0xac, 0x7d, 0x5f, 0x60, 0x81, 0xfd, 0x03, 0x0b, 0x72, 0x28, 0x69, 0xb4, 0x19, 0x27, 0x01, |
|
| 4650 |
- 0xd6, 0x37, 0xf2, 0xe5, 0xf3, 0xbc, 0xa4, 0xde, 0xf7, 0x99, 0x87, 0x14, 0x58, 0x51, 0x48, 0x9d, |
|
| 4651 |
- 0x68, 0x3d, 0xe4, 0x4c, 0x30, 0x84, 0x5c, 0xe6, 0x1c, 0x52, 0xbe, 0x1e, 0xbd, 0x25, 0x7c, 0x74, |
|
| 4652 |
- 0xe8, 0x89, 0xf5, 0xf1, 0x9f, 0x2b, 0x96, 0x38, 0x0a, 0xa9, 0x06, 0x54, 0xae, 0x0f, 0xd9, 0x90, |
|
| 4653 |
- 0xa9, 0xe1, 0x86, 0x1c, 0xe9, 0xe8, 0x2d, 0x37, 0xe6, 0x44, 0x78, 0x2c, 0xd8, 0x98, 0x0c, 0x92, |
|
| 4654 |
- 0x85, 0xfa, 0x97, 0x26, 0x14, 0x76, 0x98, 0x4b, 0x77, 0x43, 0xea, 0xa0, 0x4d, 0xb0, 0x48, 0x10, |
|
| 4655 |
- 0x30, 0xa1, 0x00, 0x91, 0x6d, 0xd4, 0x8c, 0x35, 0xab, 0xb1, 0xba, 0xfe, 0xe1, 0x96, 0xeb, 0xcd, |
|
| 4656 |
- 0x19, 0xac, 0x65, 0xbe, 0x3f, 0x5d, 0xcd, 0xe1, 0x34, 0x13, 0xfd, 0x09, 0x4c, 0xce, 0x7c, 0x6a, |
|
| 4657 |
- 0x2f, 0xd4, 0x8c, 0xb5, 0x2b, 0x8d, 0xbb, 0x59, 0x19, 0xe4, 0xa6, 0x98, 0xf9, 0x14, 0x2b, 0x24, |
|
| 4658 |
- 0xda, 0x04, 0x18, 0xd1, 0xd1, 0x1e, 0xe5, 0xd1, 0x81, 0x17, 0xda, 0x8b, 0x8a, 0xf7, 0xbb, 0x8b, |
|
| 4659 |
- 0x78, 0xf2, 0xb0, 0xeb, 0xdb, 0x53, 0x38, 0x4e, 0x51, 0xd1, 0x36, 0x94, 0xc8, 0x98, 0x78, 0x3e, |
|
| 4660 |
- 0xd9, 0xf3, 0x7c, 0x4f, 0x1c, 0xd9, 0xa6, 0x4a, 0xf5, 0xe8, 0xa3, 0xa9, 0x9a, 0x29, 0x02, 0x9e, |
|
| 4661 |
- 0xa3, 0xd7, 0x5d, 0x80, 0xd9, 0x46, 0xe8, 0x21, 0x2c, 0xf7, 0xbb, 0x3b, 0x9d, 0xde, 0xce, 0x66, |
|
| 4662 |
- 0x39, 0x57, 0xb9, 0x7d, 0x7c, 0x52, 0xbb, 0x21, 0x73, 0xcc, 0x00, 0x7d, 0x1a, 0xb8, 0x5e, 0x30, |
|
| 4663 |
- 0x44, 0x6b, 0x50, 0x68, 0xb6, 0xdb, 0xdd, 0xfe, 0xab, 0x6e, 0xa7, 0x6c, 0x54, 0x2a, 0xc7, 0x27, |
|
| 4664 |
- 0xb5, 0x9b, 0xf3, 0xc0, 0xa6, 0xe3, 0xd0, 0x50, 0x50, 0xb7, 0x62, 0xbe, 0xfb, 0xaa, 0x9a, 0xab, |
|
| 4665 |
- 0xbf, 0x33, 0xa0, 0x94, 0x3e, 0x04, 0x7a, 0x08, 0xf9, 0x66, 0xfb, 0x55, 0xef, 0x4d, 0xb7, 0x9c, |
|
| 4666 |
- 0x9b, 0xd1, 0xd3, 0x88, 0xa6, 0x23, 0xbc, 0x31, 0x45, 0x0f, 0x60, 0xa9, 0xdf, 0x7c, 0xbd, 0xdb, |
|
| 4667 |
- 0x2d, 0x1b, 0xb3, 0xe3, 0xa4, 0x61, 0x7d, 0x12, 0x47, 0x0a, 0xd5, 0xc1, 0xcd, 0xde, 0x4e, 0x79, |
|
| 4668 |
- 0x21, 0x1b, 0xd5, 0xe1, 0xc4, 0x0b, 0xf4, 0x51, 0xce, 0x16, 0xc1, 0xda, 0xa5, 0x7c, 0xec, 0x39, |
|
| 4669 |
- 0x97, 0xac, 0x89, 0xc7, 0x60, 0x0a, 0x12, 0x1d, 0x2a, 0x4d, 0x58, 0xd9, 0x9a, 0x78, 0x45, 0xa2, |
|
| 4670 |
- 0x43, 0xb9, 0xa9, 0xa6, 0x2b, 0xbc, 0x54, 0x06, 0xa7, 0xa1, 0xef, 0x39, 0x44, 0x50, 0x57, 0x29, |
|
| 4671 |
- 0xc3, 0x6a, 0xfc, 0x36, 0x8b, 0x8d, 0xa7, 0x28, 0x7d, 0xfe, 0x17, 0x39, 0x9c, 0xa2, 0xa2, 0x67, |
|
| 4672 |
- 0x90, 0x1f, 0xfa, 0x6c, 0x8f, 0xf8, 0x4a, 0x13, 0x56, 0xe3, 0x5e, 0x56, 0x92, 0x4d, 0x85, 0x98, |
|
| 4673 |
- 0x25, 0xd0, 0x14, 0xf4, 0x04, 0xf2, 0x71, 0xe8, 0x12, 0x41, 0xed, 0xbc, 0x22, 0xd7, 0xb2, 0xc8, |
|
| 4674 |
- 0xaf, 0x15, 0xa2, 0xcd, 0x82, 0x7d, 0x6f, 0x88, 0x35, 0x1e, 0x6d, 0x41, 0x21, 0xa0, 0xe2, 0x2d, |
|
| 4675 |
- 0xe3, 0x87, 0x91, 0xbd, 0x5c, 0x5b, 0x5c, 0xb3, 0x1a, 0x7f, 0xc8, 0x14, 0x63, 0x82, 0x69, 0x0a, |
|
| 4676 |
- 0x41, 0x9c, 0x83, 0x11, 0x0d, 0x44, 0x92, 0xa6, 0xb5, 0x60, 0x1b, 0x78, 0x9a, 0x00, 0xfd, 0x03, |
|
| 4677 |
- 0x0a, 0x34, 0x70, 0x43, 0xe6, 0x05, 0xc2, 0x2e, 0x5c, 0x7c, 0x90, 0xae, 0xc6, 0xc8, 0x62, 0xe2, |
|
| 4678 |
- 0x29, 0xa3, 0x95, 0x07, 0x73, 0xc4, 0x5c, 0x5a, 0xdf, 0x80, 0x6b, 0x1f, 0x14, 0x0b, 0x55, 0xa0, |
|
| 4679 |
- 0xa0, 0x8b, 0x95, 0x74, 0xd9, 0xc4, 0xd3, 0x79, 0xfd, 0x2a, 0xac, 0xcc, 0x15, 0x46, 0xd9, 0xc6, |
|
| 4680 |
- 0xa4, 0x5b, 0xa8, 0x09, 0x45, 0x87, 0x05, 0x82, 0x78, 0x01, 0xe5, 0x5a, 0x20, 0x99, 0xb5, 0x6d, |
|
| 4681 |
- 0x4f, 0x40, 0x92, 0xf5, 0x22, 0x87, 0x67, 0x2c, 0xf4, 0x6f, 0x28, 0x72, 0x1a, 0xb1, 0x98, 0x3b, |
|
| 4682 |
- 0x34, 0xd2, 0x0a, 0x59, 0xcb, 0xee, 0x71, 0x02, 0xc2, 0xf4, 0xff, 0xb1, 0xc7, 0xa9, 0xac, 0x53, |
|
| 4683 |
- 0x84, 0x67, 0x54, 0xf4, 0x0c, 0x96, 0x39, 0x8d, 0x04, 0xe1, 0xe2, 0x63, 0x4d, 0xc6, 0x09, 0xa4, |
|
| 4684 |
- 0xcf, 0x7c, 0xcf, 0x39, 0xc2, 0x13, 0x06, 0x7a, 0x06, 0xc5, 0xd0, 0x27, 0x8e, 0xca, 0x6a, 0x2f, |
|
| 4685 |
- 0x29, 0xfa, 0x6f, 0xb2, 0xe8, 0xfd, 0x09, 0x08, 0xcf, 0xf0, 0xe8, 0x29, 0x80, 0xcf, 0x86, 0x03, |
|
| 4686 |
- 0x97, 0x7b, 0x63, 0xca, 0xb5, 0x48, 0x2a, 0x59, 0xec, 0x8e, 0x42, 0xe0, 0xa2, 0xcf, 0x86, 0xc9, |
|
| 4687 |
- 0x10, 0x6d, 0xfe, 0x2a, 0x85, 0xa4, 0xd4, 0xb1, 0x05, 0x40, 0xa6, 0xab, 0x5a, 0x1f, 0x8f, 0x3e, |
|
| 4688 |
- 0x2b, 0x95, 0xee, 0x48, 0x8a, 0x8e, 0xee, 0x41, 0x69, 0x9f, 0x71, 0x87, 0x0e, 0xb4, 0xee, 0x8b, |
|
| 4689 |
- 0x4a, 0x13, 0x96, 0x8a, 0x25, 0x42, 0x6f, 0x15, 0x61, 0x99, 0xc7, 0x81, 0xf0, 0x46, 0xb4, 0xbe, |
|
| 4690 |
- 0x05, 0x37, 0x32, 0x93, 0xa2, 0x06, 0x94, 0xa6, 0x6d, 0x1e, 0x78, 0xae, 0xd2, 0x47, 0xb1, 0x75, |
|
| 4691 |
- 0xf5, 0xfc, 0x74, 0xd5, 0x9a, 0xea, 0xa1, 0xd7, 0xc1, 0xd6, 0x14, 0xd4, 0x73, 0xeb, 0x3f, 0x98, |
|
| 4692 |
- 0xb0, 0x32, 0x27, 0x16, 0x74, 0x1d, 0x96, 0xbc, 0x11, 0x19, 0xd2, 0x84, 0x8e, 0x93, 0x09, 0xea, |
|
| 4693 |
- 0x42, 0xde, 0x27, 0x7b, 0xd4, 0x97, 0x92, 0x91, 0x65, 0xfb, 0xe3, 0x27, 0x55, 0xb7, 0xfe, 0x5f, |
|
| 4694 |
- 0x85, 0xef, 0x06, 0x82, 0x1f, 0x61, 0x4d, 0x46, 0x36, 0x2c, 0x3b, 0x6c, 0x34, 0x22, 0x81, 0xb4, |
|
| 4695 |
- 0x97, 0xc5, 0xb5, 0x22, 0x9e, 0x4c, 0x11, 0x02, 0x93, 0xf0, 0x61, 0x64, 0x9b, 0x2a, 0xac, 0xc6, |
|
| 4696 |
- 0xa8, 0x0c, 0x8b, 0x34, 0x18, 0xdb, 0x4b, 0x2a, 0x24, 0x87, 0x32, 0xe2, 0x7a, 0x49, 0xcf, 0x8b, |
|
| 4697 |
- 0x58, 0x0e, 0x25, 0x2f, 0x8e, 0x28, 0xb7, 0x97, 0x55, 0x48, 0x8d, 0xd1, 0xdf, 0x20, 0x3f, 0x62, |
|
| 4698 |
- 0x71, 0x20, 0x22, 0xbb, 0xa0, 0x0e, 0x7b, 0x3b, 0xeb, 0xb0, 0xdb, 0x12, 0xa1, 0xed, 0x4f, 0xc3, |
|
| 4699 |
- 0xd1, 0x0b, 0xb8, 0x16, 0x09, 0x16, 0x0e, 0x86, 0x9c, 0x38, 0x74, 0x10, 0x52, 0xee, 0x31, 0x57, |
|
| 4700 |
- 0x75, 0xe3, 0x02, 0x17, 0xed, 0xe8, 0x1b, 0x1e, 0x5f, 0x95, 0xb4, 0x4d, 0xc9, 0xea, 0x2b, 0x12, |
|
| 4701 |
- 0xea, 0x43, 0x29, 0x8c, 0x7d, 0x7f, 0xc0, 0xc2, 0xc4, 0xcc, 0x41, 0x25, 0xf9, 0x8c, 0xaa, 0xf5, |
|
| 4702 |
- 0x63, 0xdf, 0x7f, 0x99, 0x90, 0xb0, 0x15, 0xce, 0x26, 0xe8, 0x26, 0xe4, 0x87, 0x9c, 0xc5, 0x61, |
|
| 4703 |
- 0x64, 0x5b, 0xaa, 0x1e, 0x7a, 0x86, 0x9e, 0xc3, 0x72, 0x44, 0x1d, 0x4e, 0x45, 0x64, 0x97, 0xd4, |
|
| 4704 |
- 0xaf, 0xbd, 0x9f, 0xb5, 0xc9, 0xae, 0x82, 0x60, 0xba, 0x4f, 0x39, 0x0d, 0x1c, 0x8a, 0x27, 0x9c, |
|
| 4705 |
- 0xca, 0x53, 0xb0, 0x52, 0x8d, 0x92, 0x05, 0x3e, 0xa4, 0x47, 0xba, 0xf7, 0x72, 0x28, 0xf5, 0x30, |
|
| 4706 |
- 0x26, 0x7e, 0x9c, 0xbc, 0x30, 0x8a, 0x38, 0x99, 0xfc, 0x7d, 0xe1, 0x89, 0x51, 0x69, 0x80, 0x95, |
|
| 4707 |
- 0x3a, 0x2d, 0xba, 0x0f, 0x2b, 0x9c, 0x0e, 0xbd, 0x48, 0xf0, 0xa3, 0x01, 0x89, 0xc5, 0x81, 0xfd, |
|
| 4708 |
- 0x2f, 0x45, 0x28, 0x4d, 0x82, 0xcd, 0x58, 0x1c, 0xd4, 0x7f, 0x32, 0xa0, 0x94, 0xb6, 0x4c, 0xd4, |
|
| 4709 |
- 0x4e, 0x8c, 0x52, 0xed, 0x78, 0xa5, 0xb1, 0xf1, 0x29, 0x8b, 0x55, 0xb6, 0xe4, 0xc7, 0x72, 0xc7, |
|
| 4710 |
- 0x6d, 0xf9, 0xac, 0x51, 0x64, 0xf4, 0x57, 0x58, 0x0a, 0x19, 0x17, 0x13, 0x71, 0x56, 0x33, 0xad, |
|
| 4711 |
- 0x84, 0xf1, 0xc9, 0x67, 0x9c, 0x80, 0xeb, 0x07, 0x70, 0x65, 0x3e, 0x1b, 0x7a, 0x00, 0x8b, 0x6f, |
|
| 4712 |
- 0x7a, 0xfd, 0x72, 0xae, 0x72, 0xe7, 0xf8, 0xa4, 0x76, 0x6b, 0x7e, 0xf1, 0x8d, 0xc7, 0x45, 0x4c, |
|
| 4713 |
- 0xfc, 0x5e, 0x1f, 0xfd, 0x1e, 0x96, 0x3a, 0x3b, 0xbb, 0x18, 0x97, 0x8d, 0xca, 0xea, 0xf1, 0x49, |
|
| 4714 |
- 0xed, 0xce, 0x3c, 0x4e, 0x2e, 0xb1, 0x38, 0x70, 0x31, 0xdb, 0x9b, 0xde, 0xf4, 0xdf, 0x2c, 0x80, |
|
| 4715 |
- 0xa5, 0xbf, 0xd9, 0xcb, 0xbd, 0xe9, 0xff, 0x09, 0x2b, 0x89, 0x0d, 0x0e, 0x1c, 0xf5, 0xd3, 0xb4, |
|
| 4716 |
- 0xa1, 0x7f, 0xcc, 0x0d, 0x4b, 0x09, 0x21, 0x29, 0x85, 0xb4, 0x1e, 0x2f, 0x1c, 0x3f, 0x1e, 0xd0, |
|
| 4717 |
- 0x80, 0xec, 0xf9, 0xfa, 0xd2, 0x2f, 0x60, 0x4b, 0xc6, 0xba, 0x49, 0x48, 0xde, 0x56, 0x5e, 0x20, |
|
| 4718 |
- 0x28, 0x0f, 0xf4, 0x75, 0x5e, 0xc0, 0xd3, 0x39, 0x7a, 0x0e, 0xa6, 0x17, 0x92, 0x91, 0xb6, 0xf0, |
|
| 4719 |
- 0xcc, 0x5f, 0xd0, 0xeb, 0x37, 0xb7, 0xb5, 0x44, 0x5a, 0x85, 0xf3, 0xd3, 0x55, 0x53, 0x06, 0xb0, |
|
| 4720 |
- 0xa2, 0xa1, 0xea, 0xc4, 0x45, 0xe5, 0x4e, 0xea, 0xab, 0x2e, 0xe0, 0x54, 0xa4, 0xfe, 0xb5, 0x09, |
|
| 4721 |
- 0x56, 0xdb, 0x8f, 0x23, 0xa1, 0xbd, 0xe9, 0xd2, 0xea, 0xf6, 0x3f, 0xb8, 0x46, 0xd4, 0xbb, 0x90, |
|
| 4722 |
- 0x04, 0xf2, 0x43, 0x57, 0xb7, 0x93, 0xae, 0xdd, 0x83, 0xcc, 0x74, 0x53, 0x70, 0x72, 0x93, 0xb5, |
|
| 4723 |
- 0xf2, 0x32, 0xa7, 0x6d, 0xe0, 0x32, 0xf9, 0xc5, 0x0a, 0xda, 0x85, 0x15, 0xc6, 0x9d, 0x03, 0x1a, |
|
| 4724 |
- 0x89, 0xc4, 0x1b, 0xf4, 0x3b, 0x2a, 0xf3, 0x85, 0xfd, 0x32, 0x0d, 0xd4, 0xaf, 0x90, 0xe4, 0xb4, |
|
| 4725 |
- 0xf3, 0x39, 0xd0, 0x13, 0x30, 0x39, 0xd9, 0x9f, 0xdc, 0xb4, 0x99, 0xfa, 0xc6, 0x64, 0x5f, 0xcc, |
|
| 4726 |
- 0xa5, 0x50, 0x0c, 0xf4, 0x1f, 0x00, 0xd7, 0x8b, 0x42, 0x22, 0x9c, 0x03, 0xca, 0x75, 0x9f, 0x32, |
|
| 4727 |
- 0x7f, 0x62, 0x67, 0x8a, 0x9a, 0xcb, 0x92, 0x62, 0xa3, 0x2d, 0x28, 0x3a, 0x64, 0xa2, 0xb4, 0xfc, |
|
| 4728 |
- 0xc5, 0xb6, 0xd8, 0x6e, 0xea, 0x14, 0x65, 0x99, 0xe2, 0xfc, 0x74, 0xb5, 0x30, 0x89, 0xe0, 0x82, |
|
| 4729 |
- 0x43, 0xb4, 0xf2, 0xb6, 0x60, 0x45, 0x3e, 0x3a, 0x07, 0x2e, 0xdd, 0x27, 0xb1, 0x2f, 0x22, 0xe5, |
|
| 4730 |
- 0xe0, 0x17, 0x3c, 0xb2, 0xe4, 0xfb, 0xa7, 0xa3, 0x71, 0xfa, 0x5c, 0x25, 0x91, 0x8a, 0xd5, 0x3d, |
|
| 4731 |
- 0x80, 0xc4, 0xe1, 0x2e, 0x57, 0x26, 0x08, 0x4c, 0x97, 0x08, 0xa2, 0x94, 0x51, 0xc2, 0x6a, 0xdc, |
|
| 4732 |
- 0xba, 0xfb, 0xfe, 0xac, 0x9a, 0xfb, 0xee, 0xac, 0x9a, 0xfb, 0xf1, 0xac, 0x6a, 0x7c, 0x71, 0x5e, |
|
| 4733 |
- 0x35, 0xde, 0x9f, 0x57, 0x8d, 0x6f, 0xcf, 0xab, 0xc6, 0xf7, 0xe7, 0x55, 0x63, 0x2f, 0xaf, 0xfe, |
|
| 4734 |
- 0xeb, 0xfd, 0xe5, 0xe7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x26, 0x4a, 0x64, 0x4a, 0x0e, 0x00, |
|
| 4735 |
- 0x00, |
|
| 4643 |
+ // 1563 bytes of a gzipped FileDescriptorProto |
|
| 4644 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6e, 0x23, 0xc7, |
|
| 4645 |
+ 0x11, 0xe6, 0x88, 0x14, 0x7f, 0x6a, 0xc8, 0x5d, 0x6e, 0xc3, 0x3f, 0xb3, 0xb4, 0x43, 0x72, 0xe9, |
|
| 4646 |
+ 0x8d, 0x23, 0xc7, 0x88, 0x36, 0x61, 0x02, 0x67, 0x9d, 0x8d, 0x91, 0xf0, 0x2f, 0x5a, 0x46, 0x91, |
|
| 4647 |
+ 0x4c, 0xb4, 0xe4, 0x05, 0xf6, 0x44, 0xb4, 0x66, 0x5a, 0xe4, 0x40, 0xc3, 0xe9, 0x49, 0x4f, 0x0f, |
|
| 4648 |
+ 0x0d, 0xdd, 0x72, 0x34, 0xf6, 0x90, 0x37, 0xd0, 0x29, 0x40, 0xde, 0x20, 0xef, 0xb0, 0xc7, 0x1c, |
|
| 4649 |
+ 0x73, 0x12, 0x2c, 0x3e, 0x41, 0x80, 0xbc, 0x40, 0xd0, 0x3d, 0x3d, 0xe4, 0x30, 0x1e, 0x59, 0x06, |
|
| 4650 |
+ 0xa2, 0x5b, 0x77, 0xcd, 0xf7, 0x55, 0x77, 0x57, 0x7d, 0xac, 0x2a, 0x82, 0x19, 0x06, 0xd4, 0x0e, |
|
| 4651 |
+ 0xf7, 0x03, 0xce, 0x04, 0x43, 0xc8, 0x61, 0xf6, 0x05, 0xe5, 0xfb, 0xe1, 0xd7, 0x84, 0x2f, 0x2e, |
|
| 4652 |
+ 0x5c, 0xb1, 0xbf, 0xfc, 0x45, 0xc3, 0x14, 0x97, 0x01, 0xd5, 0x80, 0xc6, 0x3b, 0x33, 0x36, 0x63, |
|
| 4653 |
+ 0x6a, 0xf9, 0x4c, 0xae, 0xb4, 0xf5, 0x7d, 0x27, 0xe2, 0x44, 0xb8, 0xcc, 0x7f, 0x96, 0x2c, 0xe2, |
|
| 4654 |
+ 0x0f, 0x9d, 0xbf, 0x16, 0xa0, 0x7c, 0xcc, 0x1c, 0x7a, 0x12, 0x50, 0x1b, 0x1d, 0x80, 0x49, 0x7c, |
|
| 4655 |
+ 0x9f, 0x09, 0x05, 0x08, 0x2d, 0xa3, 0x6d, 0xec, 0x99, 0xdd, 0xd6, 0xfe, 0x77, 0x8f, 0xdc, 0xef, |
|
| 4656 |
+ 0x6d, 0x60, 0xfd, 0xc2, 0xdb, 0xeb, 0x56, 0x0e, 0xa7, 0x99, 0xe8, 0xe7, 0x50, 0xe0, 0xcc, 0xa3, |
|
| 4657 |
+ 0xd6, 0x4e, 0xdb, 0xd8, 0x7b, 0xd0, 0xfd, 0x30, 0xcb, 0x83, 0x3c, 0x14, 0x33, 0x8f, 0x62, 0x85, |
|
| 4658 |
+ 0x44, 0x07, 0x00, 0x0b, 0xba, 0x38, 0xa3, 0x3c, 0x9c, 0xbb, 0x81, 0x95, 0x57, 0xbc, 0x9f, 0xdc, |
|
| 4659 |
+ 0xc6, 0x93, 0x97, 0xdd, 0x3f, 0x5a, 0xc3, 0x71, 0x8a, 0x8a, 0x8e, 0xa0, 0x4a, 0x96, 0xc4, 0xf5, |
|
| 4660 |
+ 0xc8, 0x99, 0xeb, 0xb9, 0xe2, 0xd2, 0x2a, 0x28, 0x57, 0x9f, 0x7c, 0xaf, 0xab, 0x5e, 0x8a, 0x80, |
|
| 4661 |
+ 0xb7, 0xe8, 0x1d, 0x07, 0x60, 0x73, 0x10, 0xfa, 0x18, 0x4a, 0x93, 0xd1, 0xf1, 0x70, 0x7c, 0x7c, |
|
| 4662 |
+ 0x50, 0xcf, 0x35, 0x1e, 0xbf, 0xb9, 0x6a, 0xbf, 0x2b, 0x7d, 0x6c, 0x00, 0x13, 0xea, 0x3b, 0xae, |
|
| 4663 |
+ 0x3f, 0x43, 0x7b, 0x50, 0xee, 0x0d, 0x06, 0xa3, 0xc9, 0xe9, 0x68, 0x58, 0x37, 0x1a, 0x8d, 0x37, |
|
| 4664 |
+ 0x57, 0xed, 0xf7, 0xb6, 0x81, 0x3d, 0xdb, 0xa6, 0x81, 0xa0, 0x4e, 0xa3, 0xf0, 0xcd, 0xdf, 0x9a, |
|
| 4665 |
+ 0xb9, 0xce, 0x37, 0x06, 0x54, 0xd3, 0x97, 0x40, 0x1f, 0x43, 0xb1, 0x37, 0x38, 0x1d, 0xbf, 0x1a, |
|
| 4666 |
+ 0xd5, 0x73, 0x1b, 0x7a, 0x1a, 0xd1, 0xb3, 0x85, 0xbb, 0xa4, 0xe8, 0x29, 0xec, 0x4e, 0x7a, 0x5f, |
|
| 4667 |
+ 0x9d, 0x8c, 0xea, 0xc6, 0xe6, 0x3a, 0x69, 0xd8, 0x84, 0x44, 0xa1, 0x42, 0x0d, 0x71, 0x6f, 0x7c, |
|
| 4668 |
+ 0x5c, 0xdf, 0xc9, 0x46, 0x0d, 0x39, 0x71, 0x7d, 0x7d, 0x95, 0x9b, 0x3c, 0x98, 0x27, 0x94, 0x2f, |
|
| 4669 |
+ 0x5d, 0xfb, 0x9e, 0x35, 0xf1, 0x19, 0x14, 0x04, 0x09, 0x2f, 0x94, 0x26, 0xcc, 0x6c, 0x4d, 0x9c, |
|
| 4670 |
+ 0x92, 0xf0, 0x42, 0x1e, 0xaa, 0xe9, 0x0a, 0x2f, 0x95, 0xc1, 0x69, 0xe0, 0xb9, 0x36, 0x11, 0xd4, |
|
| 4671 |
+ 0x51, 0xca, 0x30, 0xbb, 0x3f, 0xce, 0x62, 0xe3, 0x35, 0x4a, 0xdf, 0xff, 0x65, 0x0e, 0xa7, 0xa8, |
|
| 4672 |
+ 0xe8, 0x05, 0x14, 0x67, 0x1e, 0x3b, 0x23, 0x9e, 0xd2, 0x84, 0xd9, 0x7d, 0x92, 0xe5, 0xe4, 0x40, |
|
| 4673 |
+ 0x21, 0x36, 0x0e, 0x34, 0x05, 0x3d, 0x87, 0x62, 0x14, 0x38, 0x44, 0x50, 0xab, 0xa8, 0xc8, 0xed, |
|
| 4674 |
+ 0x2c, 0xf2, 0x57, 0x0a, 0x31, 0x60, 0xfe, 0xb9, 0x3b, 0xc3, 0x1a, 0x8f, 0x0e, 0xa1, 0xec, 0x53, |
|
| 4675 |
+ 0xf1, 0x35, 0xe3, 0x17, 0xa1, 0x55, 0x6a, 0xe7, 0xf7, 0xcc, 0xee, 0xa7, 0x99, 0x62, 0x8c, 0x31, |
|
| 4676 |
+ 0x3d, 0x21, 0x88, 0x3d, 0x5f, 0x50, 0x5f, 0xc4, 0x6e, 0xfa, 0x3b, 0x96, 0x81, 0xd7, 0x0e, 0xd0, |
|
| 4677 |
+ 0x6f, 0xa1, 0x4c, 0x7d, 0x27, 0x60, 0xae, 0x2f, 0xac, 0xf2, 0xed, 0x17, 0x19, 0x69, 0x8c, 0x0c, |
|
| 4678 |
+ 0x26, 0x5e, 0x33, 0xfa, 0x45, 0x28, 0x2c, 0x98, 0x43, 0x3b, 0xcf, 0xe0, 0xd1, 0x77, 0x82, 0x85, |
|
| 4679 |
+ 0x1a, 0x50, 0xd6, 0xc1, 0x8a, 0xb3, 0x5c, 0xc0, 0xeb, 0x7d, 0xe7, 0x21, 0xd4, 0xb6, 0x02, 0xa3, |
|
| 4680 |
+ 0xca, 0x46, 0x92, 0x2d, 0xd4, 0x83, 0x8a, 0xcd, 0x7c, 0x41, 0x5c, 0x9f, 0x72, 0x2d, 0x90, 0xcc, |
|
| 4681 |
+ 0xd8, 0x0e, 0x12, 0x90, 0x64, 0xbd, 0xcc, 0xe1, 0x0d, 0x0b, 0xfd, 0x01, 0x2a, 0x9c, 0x86, 0x2c, |
|
| 4682 |
+ 0xe2, 0x36, 0x0d, 0xb5, 0x42, 0xf6, 0xb2, 0x73, 0x1c, 0x83, 0x30, 0xfd, 0x73, 0xe4, 0x72, 0x2a, |
|
| 4683 |
+ 0xe3, 0x14, 0xe2, 0x0d, 0x15, 0xbd, 0x80, 0x12, 0xa7, 0xa1, 0x20, 0x5c, 0x7c, 0x5f, 0x92, 0x71, |
|
| 4684 |
+ 0x0c, 0x99, 0x30, 0xcf, 0xb5, 0x2f, 0x71, 0xc2, 0x40, 0x2f, 0xa0, 0x12, 0x78, 0xc4, 0x56, 0x5e, |
|
| 4685 |
+ 0xad, 0x5d, 0x45, 0xff, 0x51, 0x16, 0x7d, 0x92, 0x80, 0xf0, 0x06, 0x8f, 0x3e, 0x07, 0xf0, 0xd8, |
|
| 4686 |
+ 0x6c, 0xea, 0x70, 0x77, 0x49, 0xb9, 0x16, 0x49, 0x23, 0x8b, 0x3d, 0x54, 0x08, 0x5c, 0xf1, 0xd8, |
|
| 4687 |
+ 0x2c, 0x5e, 0xa2, 0x83, 0xff, 0x4b, 0x21, 0x29, 0x75, 0x1c, 0x02, 0x90, 0xf5, 0x57, 0xad, 0x8f, |
|
| 4688 |
+ 0x4f, 0x7e, 0x90, 0x2b, 0x9d, 0x91, 0x14, 0x1d, 0x3d, 0x81, 0xea, 0x39, 0xe3, 0x36, 0x9d, 0x6a, |
|
| 4689 |
+ 0xdd, 0x57, 0x94, 0x26, 0x4c, 0x65, 0x8b, 0x85, 0xde, 0xaf, 0x40, 0x89, 0x47, 0xbe, 0x70, 0x17, |
|
| 4690 |
+ 0xb4, 0x73, 0x08, 0xef, 0x66, 0x3a, 0x45, 0x5d, 0xa8, 0xae, 0xd3, 0x3c, 0x75, 0x1d, 0xa5, 0x8f, |
|
| 4691 |
+ 0x4a, 0xff, 0xe1, 0xea, 0xba, 0x65, 0xae, 0xf5, 0x30, 0x1e, 0x62, 0x73, 0x0d, 0x1a, 0x3b, 0x9d, |
|
| 4692 |
+ 0x6f, 0x8b, 0x50, 0xdb, 0x12, 0x0b, 0x7a, 0x07, 0x76, 0xdd, 0x05, 0x99, 0xd1, 0x98, 0x8e, 0xe3, |
|
| 4693 |
+ 0x0d, 0x1a, 0x41, 0xd1, 0x23, 0x67, 0xd4, 0x93, 0x92, 0x91, 0x61, 0xfb, 0xd9, 0x9d, 0xaa, 0xdb, |
|
| 4694 |
+ 0xff, 0x93, 0xc2, 0x8f, 0x7c, 0xc1, 0x2f, 0xb1, 0x26, 0x23, 0x0b, 0x4a, 0x36, 0x5b, 0x2c, 0x88, |
|
| 4695 |
+ 0x2f, 0xcb, 0x4b, 0x7e, 0xaf, 0x82, 0x93, 0x2d, 0x42, 0x50, 0x20, 0x7c, 0x16, 0x5a, 0x05, 0x65, |
|
| 4696 |
+ 0x56, 0x6b, 0x54, 0x87, 0x3c, 0xf5, 0x97, 0xd6, 0xae, 0x32, 0xc9, 0xa5, 0xb4, 0x38, 0x6e, 0x9c, |
|
| 4697 |
+ 0xf3, 0x0a, 0x96, 0x4b, 0xc9, 0x8b, 0x42, 0xca, 0xad, 0x92, 0x32, 0xa9, 0x35, 0xfa, 0x35, 0x14, |
|
| 4698 |
+ 0x17, 0x2c, 0xf2, 0x45, 0x68, 0x95, 0xd5, 0x65, 0x1f, 0x67, 0x5d, 0xf6, 0x48, 0x22, 0x74, 0xf9, |
|
| 4699 |
+ 0xd3, 0x70, 0xf4, 0x12, 0x1e, 0x85, 0x82, 0x05, 0xd3, 0x19, 0x27, 0x36, 0x9d, 0x06, 0x94, 0xbb, |
|
| 4700 |
+ 0xcc, 0x51, 0xd9, 0xb8, 0xa5, 0x8a, 0x0e, 0x75, 0x87, 0xc7, 0x0f, 0x25, 0xed, 0x40, 0xb2, 0x26, |
|
| 4701 |
+ 0x8a, 0x84, 0x26, 0x50, 0x0d, 0x22, 0xcf, 0x9b, 0xb2, 0x20, 0x2e, 0xe6, 0xa0, 0x9c, 0xfc, 0x80, |
|
| 4702 |
+ 0xa8, 0x4d, 0x22, 0xcf, 0xfb, 0x32, 0x26, 0x61, 0x33, 0xd8, 0x6c, 0xd0, 0x7b, 0x50, 0x9c, 0x71, |
|
| 4703 |
+ 0x16, 0x05, 0xa1, 0x65, 0xaa, 0x78, 0xe8, 0x1d, 0xfa, 0x02, 0x4a, 0x21, 0xb5, 0x39, 0x15, 0xa1, |
|
| 4704 |
+ 0x55, 0x55, 0xaf, 0xfd, 0x28, 0xeb, 0x90, 0x13, 0x05, 0xc1, 0xf4, 0x9c, 0x72, 0xea, 0xdb, 0x14, |
|
| 4705 |
+ 0x27, 0x1c, 0xf4, 0x18, 0xf2, 0x42, 0x5c, 0x5a, 0xb5, 0xb6, 0xb1, 0x57, 0xee, 0x97, 0x56, 0xd7, |
|
| 4706 |
+ 0xad, 0xfc, 0xe9, 0xe9, 0x6b, 0x2c, 0x6d, 0xb2, 0x4c, 0xcd, 0x59, 0x28, 0x7c, 0xb2, 0xa0, 0xd6, |
|
| 4707 |
+ 0x03, 0x15, 0xde, 0xf5, 0x1e, 0xbd, 0x06, 0x70, 0xfc, 0x70, 0x6a, 0xab, 0xdf, 0x85, 0xf5, 0x50, |
|
| 4708 |
+ 0xbd, 0xee, 0xd3, 0xbb, 0x5f, 0x37, 0x3c, 0x3e, 0xd1, 0xc5, 0xb6, 0xb6, 0xba, 0x6e, 0x55, 0xd6, |
|
| 4709 |
+ 0x5b, 0x5c, 0x71, 0xfc, 0x30, 0x5e, 0x36, 0x3e, 0x07, 0x33, 0x25, 0x1d, 0x99, 0xf2, 0x0b, 0x7a, |
|
| 4710 |
+ 0xa9, 0xd5, 0x28, 0x97, 0x52, 0xa1, 0x4b, 0xe2, 0x45, 0xf1, 0xcc, 0x53, 0xc1, 0xf1, 0xe6, 0x37, |
|
| 4711 |
+ 0x3b, 0xcf, 0x8d, 0x46, 0x17, 0xcc, 0x54, 0xfc, 0xd0, 0x47, 0x50, 0xe3, 0x74, 0xe6, 0x86, 0x82, |
|
| 4712 |
+ 0x5f, 0x4e, 0x49, 0x24, 0xe6, 0xd6, 0xef, 0x15, 0xa1, 0x9a, 0x18, 0x7b, 0x91, 0x98, 0x37, 0xa6, |
|
| 4713 |
+ 0xb0, 0xb9, 0x06, 0x6a, 0x83, 0x29, 0x9f, 0x17, 0x52, 0xbe, 0xa4, 0x5c, 0x16, 0x67, 0x19, 0xe9, |
|
| 4714 |
+ 0xb4, 0x49, 0xa6, 0x21, 0xa4, 0x84, 0xdb, 0x73, 0xf5, 0x43, 0xa8, 0x60, 0xbd, 0x93, 0xca, 0x4e, |
|
| 4715 |
+ 0x72, 0xad, 0x95, 0xad, 0xb7, 0x9d, 0xff, 0x18, 0x50, 0x4d, 0x77, 0x09, 0x34, 0x88, 0x7b, 0x83, |
|
| 4716 |
+ 0x7a, 0xd2, 0x83, 0xee, 0xb3, 0xbb, 0xba, 0x8a, 0xaa, 0xc4, 0x5e, 0x24, 0x9d, 0x1d, 0xc9, 0x49, |
|
| 4717 |
+ 0x4e, 0x91, 0xd1, 0xaf, 0x60, 0x37, 0x60, 0x5c, 0x24, 0xbf, 0xc7, 0x66, 0x66, 0xf5, 0x64, 0x3c, |
|
| 4718 |
+ 0xa9, 0x5c, 0x31, 0xb8, 0x33, 0x87, 0x07, 0xdb, 0xde, 0xd0, 0x53, 0xc8, 0xbf, 0x1a, 0x4f, 0xea, |
|
| 4719 |
+ 0xb9, 0xc6, 0x07, 0x6f, 0xae, 0xda, 0xef, 0x6f, 0x7f, 0x7c, 0xe5, 0x72, 0x11, 0x11, 0x6f, 0x3c, |
|
| 4720 |
+ 0x41, 0x3f, 0x85, 0xdd, 0xe1, 0xf1, 0x09, 0xc6, 0x75, 0xa3, 0xd1, 0x7a, 0x73, 0xd5, 0xfe, 0x60, |
|
| 4721 |
+ 0x1b, 0x27, 0x3f, 0xb1, 0xc8, 0x77, 0x30, 0x3b, 0x5b, 0x0f, 0x37, 0xff, 0xd8, 0x01, 0x53, 0x97, |
|
| 4722 |
+ 0xa9, 0xfb, 0x1d, 0x6e, 0x7e, 0x07, 0xb5, 0xb8, 0xf2, 0x27, 0xe2, 0xdb, 0xb9, 0xb3, 0x01, 0x54, |
|
| 4723 |
+ 0x63, 0x82, 0xce, 0xf1, 0x13, 0xa8, 0xba, 0xc1, 0xf2, 0xb3, 0x29, 0xf5, 0xc9, 0x99, 0xa7, 0xe7, |
|
| 4724 |
+ 0x9c, 0x32, 0x36, 0xa5, 0x6d, 0x14, 0x9b, 0xa4, 0xf2, 0x5d, 0x5f, 0x50, 0xee, 0xeb, 0x09, 0xa6, |
|
| 4725 |
+ 0x8c, 0xd7, 0x7b, 0xf4, 0x05, 0x14, 0xdc, 0x80, 0x2c, 0x74, 0xd7, 0xca, 0x7c, 0xc1, 0x78, 0xd2, |
|
| 4726 |
+ 0x3b, 0xd2, 0x1a, 0xec, 0x97, 0x57, 0xd7, 0xad, 0x82, 0x34, 0x60, 0x45, 0x43, 0xcd, 0xa4, 0x71, |
|
| 4727 |
+ 0xc8, 0x93, 0x54, 0x21, 0x2b, 0xe3, 0x94, 0xa5, 0xf3, 0xf7, 0x02, 0x98, 0x03, 0x2f, 0x0a, 0x85, |
|
| 4728 |
+ 0x2e, 0xc7, 0xf7, 0x16, 0xb7, 0xd7, 0xf0, 0x88, 0xa8, 0x51, 0x98, 0xf8, 0xb2, 0xb6, 0xa9, 0x86, |
|
| 4729 |
+ 0xac, 0x63, 0xf7, 0x34, 0xd3, 0xdd, 0x1a, 0x1c, 0x37, 0xef, 0x7e, 0x51, 0xfa, 0xb4, 0x0c, 0x5c, |
|
| 4730 |
+ 0x27, 0xff, 0xf3, 0x05, 0x9d, 0x40, 0x8d, 0x71, 0x7b, 0x4e, 0x43, 0x11, 0x97, 0x43, 0x3d, 0x3a, |
|
| 4731 |
+ 0x66, 0xfe, 0xa9, 0xf8, 0x32, 0x0d, 0xd4, 0xb5, 0x20, 0xbe, 0xed, 0xb6, 0x0f, 0xf4, 0x1c, 0x0a, |
|
| 4732 |
+ 0x9c, 0x9c, 0x27, 0xc3, 0x45, 0xa6, 0xbe, 0x31, 0x39, 0x17, 0x5b, 0x2e, 0x14, 0x03, 0xfd, 0x11, |
|
| 4733 |
+ 0xc0, 0x71, 0xc3, 0x80, 0x08, 0x7b, 0x4e, 0xb9, 0xce, 0x53, 0xe6, 0x13, 0x87, 0x6b, 0xd4, 0x96, |
|
| 4734 |
+ 0x97, 0x14, 0x1b, 0x1d, 0x42, 0xc5, 0x26, 0x89, 0xd2, 0x8a, 0xb7, 0x77, 0x82, 0x41, 0x4f, 0xbb, |
|
| 4735 |
+ 0xa8, 0x4b, 0x17, 0xab, 0xeb, 0x56, 0x39, 0xb1, 0xe0, 0xb2, 0x4d, 0xb4, 0xf2, 0x0e, 0xa1, 0x26, |
|
| 4736 |
+ 0xe7, 0xec, 0xa9, 0x43, 0xcf, 0x49, 0xe4, 0x89, 0x50, 0x35, 0xad, 0x5b, 0xe6, 0x4a, 0x39, 0xf2, |
|
| 4737 |
+ 0x0d, 0x35, 0x4e, 0xdf, 0xab, 0x2a, 0x52, 0xb6, 0x8e, 0x0b, 0x10, 0x17, 0xf5, 0xfb, 0x95, 0x09, |
|
| 4738 |
+ 0x82, 0x82, 0x43, 0x04, 0x51, 0xca, 0xa8, 0x62, 0xb5, 0xee, 0x7f, 0xf8, 0xf6, 0xa6, 0x99, 0xfb, |
|
| 4739 |
+ 0xd7, 0x4d, 0x33, 0xf7, 0xef, 0x9b, 0xa6, 0xf1, 0x97, 0x55, 0xd3, 0x78, 0xbb, 0x6a, 0x1a, 0xff, |
|
| 4740 |
+ 0x5c, 0x35, 0x8d, 0x6f, 0x57, 0x4d, 0xe3, 0xac, 0xa8, 0xfe, 0xde, 0xfe, 0xf2, 0xbf, 0x01, 0x00, |
|
| 4741 |
+ 0x00, 0xff, 0xff, 0x2a, 0x90, 0x7c, 0x40, 0x3d, 0x0f, 0x00, 0x00, |
|
| 4736 | 4742 |
} |
| ... | ... |
@@ -128,6 +128,7 @@ message NetworkAttachmentSpec {
|
| 128 | 128 |
string container_id = 1 [(gogoproto.customname) = "ContainerID"]; |
| 129 | 129 |
} |
| 130 | 130 |
|
| 131 |
+ |
|
| 131 | 132 |
// Container specifies runtime parameters for a container. |
| 132 | 133 |
message ContainerSpec {
|
| 133 | 134 |
// image defines the image reference, as specified in the |
| ... | ... |
@@ -159,6 +160,10 @@ message ContainerSpec {
|
| 159 | 159 |
// If Command and Args are provided, Args will be appended to Command. |
| 160 | 160 |
repeated string args = 4; |
| 161 | 161 |
|
| 162 |
+ // Hostname specifies the hostname that will be set on containers created by docker swarm. |
|
| 163 |
+ // All containers for a given service will have the same hostname |
|
| 164 |
+ string hostname = 14; |
|
| 165 |
+ |
|
| 162 | 166 |
// Env specifies the environment variables for the container in NAME=VALUE |
| 163 | 167 |
// format. These must be compliant with [IEEE Std |
| 164 | 168 |
// 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). |
| ... | ... |
@@ -177,6 +182,10 @@ message ContainerSpec {
|
| 177 | 177 |
// Groups specifies supplementary groups available to the user. |
| 178 | 178 |
repeated string groups = 11; |
| 179 | 179 |
|
| 180 |
+ // TTY declares that a TTY should be attached to the standard streams, |
|
| 181 |
+ // including stdin if it is still open. |
|
| 182 |
+ bool tty = 13 [(gogoproto.customname) = "TTY"]; |
|
| 183 |
+ |
|
| 180 | 184 |
repeated Mount mounts = 8 [(gogoproto.nullable) = false]; |
| 181 | 185 |
|
| 182 | 186 |
// StopGracePeriod the grace period for stopping the container before |
| ... | ... |
@@ -198,6 +207,24 @@ message ContainerSpec {
|
| 198 | 198 |
// SecretReference contains references to zero or more secrets that |
| 199 | 199 |
// will be exposed to the container. |
| 200 | 200 |
repeated SecretReference secrets = 12; |
| 201 |
+ |
|
| 202 |
+ // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) |
|
| 203 |
+ // Detailed documentation is available in: |
|
| 204 |
+ // http://man7.org/linux/man-pages/man5/resolv.conf.5.html |
|
| 205 |
+ // TODO: domain is not supported yet |
|
| 206 |
+ message DNSConfig {
|
|
| 207 |
+ // Nameservers specifies the IP addresses of the name servers |
|
| 208 |
+ repeated string nameservers = 1; |
|
| 209 |
+ |
|
| 210 |
+ // Search specifies the search list for host-name lookup |
|
| 211 |
+ repeated string search = 2; |
|
| 212 |
+ |
|
| 213 |
+ // Options allows certain internal resolver variables to be modified |
|
| 214 |
+ repeated string options = 3; |
|
| 215 |
+ } |
|
| 216 |
+ |
|
| 217 |
+ // DNSConfig allows one to specify DNS related configuration in resolv.conf |
|
| 218 |
+ DNSConfig dns_config = 15 [(gogoproto.customname) = "DNSConfig"]; |
|
| 201 | 219 |
} |
| 202 | 220 |
|
| 203 | 221 |
// EndpointSpec defines the properties that can be configured to |
| ... | ... |
@@ -57,7 +57,7 @@ |
| 57 | 57 |
EncryptionKey |
| 58 | 58 |
ManagerStatus |
| 59 | 59 |
SecretReference |
| 60 |
- RemovedNode |
|
| 60 |
+ BlacklistedCertificate |
|
| 61 | 61 |
NodeSpec |
| 62 | 62 |
ServiceSpec |
| 63 | 63 |
ReplicatedService |
| ... | ... |
@@ -119,6 +119,8 @@ |
| 119 | 119 |
UpdateClusterResponse |
| 120 | 120 |
GetSecretRequest |
| 121 | 121 |
GetSecretResponse |
| 122 |
+ UpdateSecretRequest |
|
| 123 |
+ UpdateSecretResponse |
|
| 122 | 124 |
ListSecretsRequest |
| 123 | 125 |
ListSecretsResponse |
| 124 | 126 |
CreateSecretRequest |
| ... | ... |
@@ -202,8 +204,7 @@ type TaskState int32 |
| 202 | 202 |
|
| 203 | 203 |
const ( |
| 204 | 204 |
TaskStateNew TaskState = 0 |
| 205 |
- TaskStateAllocated TaskState = 64 |
|
| 206 |
- TaskStatePending TaskState = 128 |
|
| 205 |
+ TaskStatePending TaskState = 64 |
|
| 207 | 206 |
TaskStateAssigned TaskState = 192 |
| 208 | 207 |
TaskStateAccepted TaskState = 256 |
| 209 | 208 |
TaskStatePreparing TaskState = 320 |
| ... | ... |
@@ -218,8 +219,7 @@ const ( |
| 218 | 218 |
|
| 219 | 219 |
var TaskState_name = map[int32]string{
|
| 220 | 220 |
0: "NEW", |
| 221 |
- 64: "ALLOCATED", |
|
| 222 |
- 128: "PENDING", |
|
| 221 |
+ 64: "PENDING", |
|
| 223 | 222 |
192: "ASSIGNED", |
| 224 | 223 |
256: "ACCEPTED", |
| 225 | 224 |
320: "PREPARING", |
| ... | ... |
@@ -233,8 +233,7 @@ var TaskState_name = map[int32]string{
|
| 233 | 233 |
} |
| 234 | 234 |
var TaskState_value = map[string]int32{
|
| 235 | 235 |
"NEW": 0, |
| 236 |
- "ALLOCATED": 64, |
|
| 237 |
- "PENDING": 128, |
|
| 236 |
+ "PENDING": 64, |
|
| 238 | 237 |
"ASSIGNED": 192, |
| 239 | 238 |
"ACCEPTED": 256, |
| 240 | 239 |
"PREPARING": 320, |
| ... | ... |
@@ -540,14 +539,50 @@ func (x PortConfig_Protocol) String() string {
|
| 540 | 540 |
} |
| 541 | 541 |
func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19, 0} }
|
| 542 | 542 |
|
| 543 |
+// PublishMode controls how ports are published on the swarm. |
|
| 544 |
+type PortConfig_PublishMode int32 |
|
| 545 |
+ |
|
| 546 |
+const ( |
|
| 547 |
+ // PublishModeIngress exposes the port across the cluster on all nodes. |
|
| 548 |
+ PublishModeIngress PortConfig_PublishMode = 0 |
|
| 549 |
+ // PublishModeHost exposes the port on just the target host. If the |
|
| 550 |
+ // published port is undefined, an ephemeral port will be allocated. If |
|
| 551 |
+ // the published port is defined, the node will attempt to allocate it, |
|
| 552 |
+ // erroring the task if it fails. |
|
| 553 |
+ PublishModeHost PortConfig_PublishMode = 1 |
|
| 554 |
+) |
|
| 555 |
+ |
|
| 556 |
+var PortConfig_PublishMode_name = map[int32]string{
|
|
| 557 |
+ 0: "INGRESS", |
|
| 558 |
+ 1: "HOST", |
|
| 559 |
+} |
|
| 560 |
+var PortConfig_PublishMode_value = map[string]int32{
|
|
| 561 |
+ "INGRESS": 0, |
|
| 562 |
+ "HOST": 1, |
|
| 563 |
+} |
|
| 564 |
+ |
|
| 565 |
+func (x PortConfig_PublishMode) String() string {
|
|
| 566 |
+ return proto.EnumName(PortConfig_PublishMode_name, int32(x)) |
|
| 567 |
+} |
|
| 568 |
+func (PortConfig_PublishMode) EnumDescriptor() ([]byte, []int) {
|
|
| 569 |
+ return fileDescriptorTypes, []int{19, 1}
|
|
| 570 |
+} |
|
| 571 |
+ |
|
| 543 | 572 |
type IssuanceStatus_State int32 |
| 544 | 573 |
|
| 545 | 574 |
const ( |
| 546 | 575 |
IssuanceStateUnknown IssuanceStatus_State = 0 |
| 547 |
- IssuanceStateRenew IssuanceStatus_State = 1 |
|
| 576 |
+ // A new certificate should be issued |
|
| 577 |
+ IssuanceStateRenew IssuanceStatus_State = 1 |
|
| 578 |
+ // Certificate is pending acceptance |
|
| 548 | 579 |
IssuanceStatePending IssuanceStatus_State = 2 |
| 549 |
- IssuanceStateIssued IssuanceStatus_State = 3 |
|
| 550 |
- IssuanceStateFailed IssuanceStatus_State = 4 |
|
| 580 |
+ // successful completion certificate issuance |
|
| 581 |
+ IssuanceStateIssued IssuanceStatus_State = 3 |
|
| 582 |
+ // Certificate issuance failed |
|
| 583 |
+ IssuanceStateFailed IssuanceStatus_State = 4 |
|
| 584 |
+ // Signals workers to renew their certificate. From the CA's perspective |
|
| 585 |
+ // this is equivalent to IssuanceStateIssued: a noop. |
|
| 586 |
+ IssuanceStateRotate IssuanceStatus_State = 5 |
|
| 551 | 587 |
) |
| 552 | 588 |
|
| 553 | 589 |
var IssuanceStatus_State_name = map[int32]string{
|
| ... | ... |
@@ -556,6 +591,7 @@ var IssuanceStatus_State_name = map[int32]string{
|
| 556 | 556 |
2: "PENDING", |
| 557 | 557 |
3: "ISSUED", |
| 558 | 558 |
4: "FAILED", |
| 559 |
+ 5: "ROTATE", |
|
| 559 | 560 |
} |
| 560 | 561 |
var IssuanceStatus_State_value = map[string]int32{
|
| 561 | 562 |
"UNKNOWN": 0, |
| ... | ... |
@@ -563,6 +599,7 @@ var IssuanceStatus_State_value = map[string]int32{
|
| 563 | 563 |
"PENDING": 2, |
| 564 | 564 |
"ISSUED": 3, |
| 565 | 565 |
"FAILED": 4, |
| 566 |
+ "ROTATE": 5, |
|
| 566 | 567 |
} |
| 567 | 568 |
|
| 568 | 569 |
func (x IssuanceStatus_State) String() string {
|
| ... | ... |
@@ -1106,11 +1143,12 @@ type PortConfig struct {
|
| 1106 | 1106 |
Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=docker.swarmkit.v1.PortConfig_Protocol" json:"protocol,omitempty"` |
| 1107 | 1107 |
// The port which the application is exposing and is bound to. |
| 1108 | 1108 |
TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"` |
| 1109 |
- // PublishedPort specifies the port on which the service is |
|
| 1110 |
- // exposed. If specified, the port must be |
|
| 1111 |
- // within the available range. If not specified, an available |
|
| 1112 |
- // port is automatically assigned. |
|
| 1109 |
+ // PublishedPort specifies the port on which the service is exposed. If |
|
| 1110 |
+ // specified, the port must be within the available range. If not specified |
|
| 1111 |
+ // (value is zero), an available port is automatically assigned. |
|
| 1113 | 1112 |
PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"` |
| 1113 |
+ // PublishMode controls how the port is published. |
|
| 1114 |
+ PublishMode PortConfig_PublishMode `protobuf:"varint,5,opt,name=publish_mode,json=publishMode,proto3,enum=docker.swarmkit.v1.PortConfig_PublishMode" json:"publish_mode,omitempty"` |
|
| 1114 | 1115 |
} |
| 1115 | 1116 |
|
| 1116 | 1117 |
func (m *PortConfig) Reset() { *m = PortConfig{} }
|
| ... | ... |
@@ -1397,18 +1435,17 @@ func (m *SecretReference) Reset() { *m = SecretReference{} }
|
| 1397 | 1397 |
func (*SecretReference) ProtoMessage() {}
|
| 1398 | 1398 |
func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{38} }
|
| 1399 | 1399 |
|
| 1400 |
-// RemovedNode is a record for a node that has been removed from the swarm. |
|
| 1401 |
-type RemovedNode struct {
|
|
| 1402 |
- // ID is the ID of the removed node. |
|
| 1403 |
- ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` |
|
| 1400 |
+// BlacklistedCertificate is a record for a blacklisted certificate. It does not |
|
| 1401 |
+// contain the certificate's CN, because these records are indexed by CN. |
|
| 1402 |
+type BlacklistedCertificate struct {
|
|
| 1404 | 1403 |
// Expiry is the latest known expiration time of a certificate that |
| 1405 |
- // was issued to this node. |
|
| 1406 |
- Expiry *docker_swarmkit_v1.Timestamp `protobuf:"bytes,2,opt,name=expiry" json:"expiry,omitempty"` |
|
| 1404 |
+ // was issued for the given CN. |
|
| 1405 |
+ Expiry *docker_swarmkit_v1.Timestamp `protobuf:"bytes,1,opt,name=expiry" json:"expiry,omitempty"` |
|
| 1407 | 1406 |
} |
| 1408 | 1407 |
|
| 1409 |
-func (m *RemovedNode) Reset() { *m = RemovedNode{} }
|
|
| 1410 |
-func (*RemovedNode) ProtoMessage() {}
|
|
| 1411 |
-func (*RemovedNode) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{39} }
|
|
| 1408 |
+func (m *BlacklistedCertificate) Reset() { *m = BlacklistedCertificate{} }
|
|
| 1409 |
+func (*BlacklistedCertificate) ProtoMessage() {}
|
|
| 1410 |
+func (*BlacklistedCertificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{39} }
|
|
| 1412 | 1411 |
|
| 1413 | 1412 |
func init() {
|
| 1414 | 1413 |
proto.RegisterType((*Version)(nil), "docker.swarmkit.v1.Version") |
| ... | ... |
@@ -1455,7 +1492,7 @@ func init() {
|
| 1455 | 1455 |
proto.RegisterType((*EncryptionKey)(nil), "docker.swarmkit.v1.EncryptionKey") |
| 1456 | 1456 |
proto.RegisterType((*ManagerStatus)(nil), "docker.swarmkit.v1.ManagerStatus") |
| 1457 | 1457 |
proto.RegisterType((*SecretReference)(nil), "docker.swarmkit.v1.SecretReference") |
| 1458 |
- proto.RegisterType((*RemovedNode)(nil), "docker.swarmkit.v1.RemovedNode") |
|
| 1458 |
+ proto.RegisterType((*BlacklistedCertificate)(nil), "docker.swarmkit.v1.BlacklistedCertificate") |
|
| 1459 | 1459 |
proto.RegisterEnum("docker.swarmkit.v1.TaskState", TaskState_name, TaskState_value)
|
| 1460 | 1460 |
proto.RegisterEnum("docker.swarmkit.v1.NodeRole", NodeRole_name, NodeRole_value)
|
| 1461 | 1461 |
proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value)
|
| ... | ... |
@@ -1467,6 +1504,7 @@ func init() {
|
| 1467 | 1467 |
proto.RegisterEnum("docker.swarmkit.v1.UpdateStatus_UpdateState", UpdateStatus_UpdateState_name, UpdateStatus_UpdateState_value)
|
| 1468 | 1468 |
proto.RegisterEnum("docker.swarmkit.v1.IPAMConfig_AddressFamily", IPAMConfig_AddressFamily_name, IPAMConfig_AddressFamily_value)
|
| 1469 | 1469 |
proto.RegisterEnum("docker.swarmkit.v1.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value)
|
| 1470 |
+ proto.RegisterEnum("docker.swarmkit.v1.PortConfig_PublishMode", PortConfig_PublishMode_name, PortConfig_PublishMode_value)
|
|
| 1470 | 1471 |
proto.RegisterEnum("docker.swarmkit.v1.IssuanceStatus_State", IssuanceStatus_State_name, IssuanceStatus_State_value)
|
| 1471 | 1472 |
proto.RegisterEnum("docker.swarmkit.v1.ExternalCA_CAProtocol", ExternalCA_CAProtocol_name, ExternalCA_CAProtocol_value)
|
| 1472 | 1473 |
proto.RegisterEnum("docker.swarmkit.v1.EncryptionKey_Algorithm", EncryptionKey_Algorithm_name, EncryptionKey_Algorithm_value)
|
| ... | ... |
@@ -1837,6 +1875,7 @@ func (m *PortConfig) Copy() *PortConfig {
|
| 1837 | 1837 |
Protocol: m.Protocol, |
| 1838 | 1838 |
TargetPort: m.TargetPort, |
| 1839 | 1839 |
PublishedPort: m.PublishedPort, |
| 1840 |
+ PublishMode: m.PublishMode, |
|
| 1840 | 1841 |
} |
| 1841 | 1842 |
|
| 1842 | 1843 |
return o |
| ... | ... |
@@ -2158,13 +2197,12 @@ func (m *SecretReference) Copy() *SecretReference {
|
| 2158 | 2158 |
return o |
| 2159 | 2159 |
} |
| 2160 | 2160 |
|
| 2161 |
-func (m *RemovedNode) Copy() *RemovedNode {
|
|
| 2161 |
+func (m *BlacklistedCertificate) Copy() *BlacklistedCertificate {
|
|
| 2162 | 2162 |
if m == nil {
|
| 2163 | 2163 |
return nil |
| 2164 | 2164 |
} |
| 2165 | 2165 |
|
| 2166 |
- o := &RemovedNode{
|
|
| 2167 |
- ID: m.ID, |
|
| 2166 |
+ o := &BlacklistedCertificate{
|
|
| 2168 | 2167 |
Expiry: m.Expiry.Copy(), |
| 2169 | 2168 |
} |
| 2170 | 2169 |
|
| ... | ... |
@@ -2529,12 +2567,13 @@ func (this *PortConfig) GoString() string {
|
| 2529 | 2529 |
if this == nil {
|
| 2530 | 2530 |
return "nil" |
| 2531 | 2531 |
} |
| 2532 |
- s := make([]string, 0, 8) |
|
| 2532 |
+ s := make([]string, 0, 9) |
|
| 2533 | 2533 |
s = append(s, "&api.PortConfig{")
|
| 2534 | 2534 |
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
|
| 2535 | 2535 |
s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n")
|
| 2536 | 2536 |
s = append(s, "TargetPort: "+fmt.Sprintf("%#v", this.TargetPort)+",\n")
|
| 2537 | 2537 |
s = append(s, "PublishedPort: "+fmt.Sprintf("%#v", this.PublishedPort)+",\n")
|
| 2538 |
+ s = append(s, "PublishMode: "+fmt.Sprintf("%#v", this.PublishMode)+",\n")
|
|
| 2538 | 2539 |
s = append(s, "}") |
| 2539 | 2540 |
return strings.Join(s, "") |
| 2540 | 2541 |
} |
| ... | ... |
@@ -2822,13 +2861,12 @@ func (this *SecretReference) GoString() string {
|
| 2822 | 2822 |
s = append(s, "}") |
| 2823 | 2823 |
return strings.Join(s, "") |
| 2824 | 2824 |
} |
| 2825 |
-func (this *RemovedNode) GoString() string {
|
|
| 2825 |
+func (this *BlacklistedCertificate) GoString() string {
|
|
| 2826 | 2826 |
if this == nil {
|
| 2827 | 2827 |
return "nil" |
| 2828 | 2828 |
} |
| 2829 |
- s := make([]string, 0, 6) |
|
| 2830 |
- s = append(s, "&api.RemovedNode{")
|
|
| 2831 |
- s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
|
|
| 2829 |
+ s := make([]string, 0, 5) |
|
| 2830 |
+ s = append(s, "&api.BlacklistedCertificate{")
|
|
| 2832 | 2831 |
if this.Expiry != nil {
|
| 2833 | 2832 |
s = append(s, "Expiry: "+fmt.Sprintf("%#v", this.Expiry)+",\n")
|
| 2834 | 2833 |
} |
| ... | ... |
@@ -3827,6 +3865,11 @@ func (m *PortConfig) MarshalTo(data []byte) (int, error) {
|
| 3827 | 3827 |
i++ |
| 3828 | 3828 |
i = encodeVarintTypes(data, i, uint64(m.PublishedPort)) |
| 3829 | 3829 |
} |
| 3830 |
+ if m.PublishMode != 0 {
|
|
| 3831 |
+ data[i] = 0x28 |
|
| 3832 |
+ i++ |
|
| 3833 |
+ i = encodeVarintTypes(data, i, uint64(m.PublishMode)) |
|
| 3834 |
+ } |
|
| 3830 | 3835 |
return i, nil |
| 3831 | 3836 |
} |
| 3832 | 3837 |
|
| ... | ... |
@@ -4595,7 +4638,7 @@ func (m *SecretReference) MarshalTo(data []byte) (int, error) {
|
| 4595 | 4595 |
return i, nil |
| 4596 | 4596 |
} |
| 4597 | 4597 |
|
| 4598 |
-func (m *RemovedNode) Marshal() (data []byte, err error) {
|
|
| 4598 |
+func (m *BlacklistedCertificate) Marshal() (data []byte, err error) {
|
|
| 4599 | 4599 |
size := m.Size() |
| 4600 | 4600 |
data = make([]byte, size) |
| 4601 | 4601 |
n, err := m.MarshalTo(data) |
| ... | ... |
@@ -4605,19 +4648,13 @@ func (m *RemovedNode) Marshal() (data []byte, err error) {
|
| 4605 | 4605 |
return data[:n], nil |
| 4606 | 4606 |
} |
| 4607 | 4607 |
|
| 4608 |
-func (m *RemovedNode) MarshalTo(data []byte) (int, error) {
|
|
| 4608 |
+func (m *BlacklistedCertificate) MarshalTo(data []byte) (int, error) {
|
|
| 4609 | 4609 |
var i int |
| 4610 | 4610 |
_ = i |
| 4611 | 4611 |
var l int |
| 4612 | 4612 |
_ = l |
| 4613 |
- if len(m.ID) > 0 {
|
|
| 4614 |
- data[i] = 0xa |
|
| 4615 |
- i++ |
|
| 4616 |
- i = encodeVarintTypes(data, i, uint64(len(m.ID))) |
|
| 4617 |
- i += copy(data[i:], m.ID) |
|
| 4618 |
- } |
|
| 4619 | 4613 |
if m.Expiry != nil {
|
| 4620 |
- data[i] = 0x12 |
|
| 4614 |
+ data[i] = 0xa |
|
| 4621 | 4615 |
i++ |
| 4622 | 4616 |
i = encodeVarintTypes(data, i, uint64(m.Expiry.Size())) |
| 4623 | 4617 |
n27, err := m.Expiry.MarshalTo(data[i:]) |
| ... | ... |
@@ -5075,6 +5112,9 @@ func (m *PortConfig) Size() (n int) {
|
| 5075 | 5075 |
if m.PublishedPort != 0 {
|
| 5076 | 5076 |
n += 1 + sovTypes(uint64(m.PublishedPort)) |
| 5077 | 5077 |
} |
| 5078 |
+ if m.PublishMode != 0 {
|
|
| 5079 |
+ n += 1 + sovTypes(uint64(m.PublishMode)) |
|
| 5080 |
+ } |
|
| 5078 | 5081 |
return n |
| 5079 | 5082 |
} |
| 5080 | 5083 |
|
| ... | ... |
@@ -5410,13 +5450,9 @@ func (m *SecretReference) Size() (n int) {
|
| 5410 | 5410 |
return n |
| 5411 | 5411 |
} |
| 5412 | 5412 |
|
| 5413 |
-func (m *RemovedNode) Size() (n int) {
|
|
| 5413 |
+func (m *BlacklistedCertificate) Size() (n int) {
|
|
| 5414 | 5414 |
var l int |
| 5415 | 5415 |
_ = l |
| 5416 |
- l = len(m.ID) |
|
| 5417 |
- if l > 0 {
|
|
| 5418 |
- n += 1 + l + sovTypes(uint64(l)) |
|
| 5419 |
- } |
|
| 5420 | 5416 |
if m.Expiry != nil {
|
| 5421 | 5417 |
l = m.Expiry.Size() |
| 5422 | 5418 |
n += 1 + l + sovTypes(uint64(l)) |
| ... | ... |
@@ -5760,6 +5796,7 @@ func (this *PortConfig) String() string {
|
| 5760 | 5760 |
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
|
| 5761 | 5761 |
`TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`,
|
| 5762 | 5762 |
`PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`,
|
| 5763 |
+ `PublishMode:` + fmt.Sprintf("%v", this.PublishMode) + `,`,
|
|
| 5763 | 5764 |
`}`, |
| 5764 | 5765 |
}, "") |
| 5765 | 5766 |
return s |
| ... | ... |
@@ -6026,12 +6063,11 @@ func (this *SecretReference) String() string {
|
| 6026 | 6026 |
}, "") |
| 6027 | 6027 |
return s |
| 6028 | 6028 |
} |
| 6029 |
-func (this *RemovedNode) String() string {
|
|
| 6029 |
+func (this *BlacklistedCertificate) String() string {
|
|
| 6030 | 6030 |
if this == nil {
|
| 6031 | 6031 |
return "nil" |
| 6032 | 6032 |
} |
| 6033 |
- s := strings.Join([]string{`&RemovedNode{`,
|
|
| 6034 |
- `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
|
| 6033 |
+ s := strings.Join([]string{`&BlacklistedCertificate{`,
|
|
| 6035 | 6034 |
`Expiry:` + strings.Replace(fmt.Sprintf("%v", this.Expiry), "Timestamp", "docker_swarmkit_v1.Timestamp", 1) + `,`,
|
| 6036 | 6035 |
`}`, |
| 6037 | 6036 |
}, "") |
| ... | ... |
@@ -9366,6 +9402,25 @@ func (m *PortConfig) Unmarshal(data []byte) error {
|
| 9366 | 9366 |
break |
| 9367 | 9367 |
} |
| 9368 | 9368 |
} |
| 9369 |
+ case 5: |
|
| 9370 |
+ if wireType != 0 {
|
|
| 9371 |
+ return fmt.Errorf("proto: wrong wireType = %d for field PublishMode", wireType)
|
|
| 9372 |
+ } |
|
| 9373 |
+ m.PublishMode = 0 |
|
| 9374 |
+ for shift := uint(0); ; shift += 7 {
|
|
| 9375 |
+ if shift >= 64 {
|
|
| 9376 |
+ return ErrIntOverflowTypes |
|
| 9377 |
+ } |
|
| 9378 |
+ if iNdEx >= l {
|
|
| 9379 |
+ return io.ErrUnexpectedEOF |
|
| 9380 |
+ } |
|
| 9381 |
+ b := data[iNdEx] |
|
| 9382 |
+ iNdEx++ |
|
| 9383 |
+ m.PublishMode |= (PortConfig_PublishMode(b) & 0x7F) << shift |
|
| 9384 |
+ if b < 0x80 {
|
|
| 9385 |
+ break |
|
| 9386 |
+ } |
|
| 9387 |
+ } |
|
| 9369 | 9388 |
default: |
| 9370 | 9389 |
iNdEx = preIndex |
| 9371 | 9390 |
skippy, err := skipTypes(data[iNdEx:]) |
| ... | ... |
@@ -12014,7 +12069,7 @@ func (m *SecretReference) Unmarshal(data []byte) error {
|
| 12014 | 12014 |
} |
| 12015 | 12015 |
return nil |
| 12016 | 12016 |
} |
| 12017 |
-func (m *RemovedNode) Unmarshal(data []byte) error {
|
|
| 12017 |
+func (m *BlacklistedCertificate) Unmarshal(data []byte) error {
|
|
| 12018 | 12018 |
l := len(data) |
| 12019 | 12019 |
iNdEx := 0 |
| 12020 | 12020 |
for iNdEx < l {
|
| ... | ... |
@@ -12037,43 +12092,14 @@ func (m *RemovedNode) Unmarshal(data []byte) error {
|
| 12037 | 12037 |
fieldNum := int32(wire >> 3) |
| 12038 | 12038 |
wireType := int(wire & 0x7) |
| 12039 | 12039 |
if wireType == 4 {
|
| 12040 |
- return fmt.Errorf("proto: RemovedNode: wiretype end group for non-group")
|
|
| 12040 |
+ return fmt.Errorf("proto: BlacklistedCertificate: wiretype end group for non-group")
|
|
| 12041 | 12041 |
} |
| 12042 | 12042 |
if fieldNum <= 0 {
|
| 12043 |
- return fmt.Errorf("proto: RemovedNode: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
| 12043 |
+ return fmt.Errorf("proto: BlacklistedCertificate: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
| 12044 | 12044 |
} |
| 12045 | 12045 |
switch fieldNum {
|
| 12046 | 12046 |
case 1: |
| 12047 | 12047 |
if wireType != 2 {
|
| 12048 |
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
|
|
| 12049 |
- } |
|
| 12050 |
- var stringLen uint64 |
|
| 12051 |
- for shift := uint(0); ; shift += 7 {
|
|
| 12052 |
- if shift >= 64 {
|
|
| 12053 |
- return ErrIntOverflowTypes |
|
| 12054 |
- } |
|
| 12055 |
- if iNdEx >= l {
|
|
| 12056 |
- return io.ErrUnexpectedEOF |
|
| 12057 |
- } |
|
| 12058 |
- b := data[iNdEx] |
|
| 12059 |
- iNdEx++ |
|
| 12060 |
- stringLen |= (uint64(b) & 0x7F) << shift |
|
| 12061 |
- if b < 0x80 {
|
|
| 12062 |
- break |
|
| 12063 |
- } |
|
| 12064 |
- } |
|
| 12065 |
- intStringLen := int(stringLen) |
|
| 12066 |
- if intStringLen < 0 {
|
|
| 12067 |
- return ErrInvalidLengthTypes |
|
| 12068 |
- } |
|
| 12069 |
- postIndex := iNdEx + intStringLen |
|
| 12070 |
- if postIndex > l {
|
|
| 12071 |
- return io.ErrUnexpectedEOF |
|
| 12072 |
- } |
|
| 12073 |
- m.ID = string(data[iNdEx:postIndex]) |
|
| 12074 |
- iNdEx = postIndex |
|
| 12075 |
- case 2: |
|
| 12076 |
- if wireType != 2 {
|
|
| 12077 | 12048 |
return fmt.Errorf("proto: wrong wireType = %d for field Expiry", wireType)
|
| 12078 | 12049 |
} |
| 12079 | 12050 |
var msglen int |
| ... | ... |
@@ -12234,233 +12260,236 @@ var ( |
| 12234 | 12234 |
func init() { proto.RegisterFile("types.proto", fileDescriptorTypes) }
|
| 12235 | 12235 |
|
| 12236 | 12236 |
var fileDescriptorTypes = []byte{
|
| 12237 |
- // 3635 bytes of a gzipped FileDescriptorProto |
|
| 12237 |
+ // 3694 bytes of a gzipped FileDescriptorProto |
|
| 12238 | 12238 |
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6f, 0x23, 0x47, |
| 12239 | 12239 |
0x76, 0x17, 0x3f, 0x45, 0x3e, 0x52, 0x52, 0x4f, 0xcd, 0xec, 0x58, 0x43, 0x8f, 0x25, 0xba, 0xc7, |
| 12240 |
- 0xb3, 0x1e, 0x7b, 0x1d, 0xda, 0x96, 0x77, 0x8d, 0x59, 0xcf, 0x66, 0xed, 0x16, 0x49, 0xcd, 0x70, |
|
| 12241 |
- 0x47, 0xa2, 0x88, 0x22, 0x39, 0x03, 0x23, 0x41, 0x88, 0x52, 0x77, 0x89, 0x6a, 0xab, 0xd9, 0xc5, |
|
| 12240 |
+ 0xb3, 0x1e, 0xcf, 0x3a, 0xb4, 0x2d, 0xef, 0x1a, 0xb3, 0x9e, 0xcd, 0xda, 0x2d, 0x92, 0x1a, 0x71, |
|
| 12241 |
+ 0x47, 0x22, 0x89, 0x22, 0x39, 0x03, 0x23, 0x40, 0x88, 0x52, 0x77, 0x89, 0x6a, 0xab, 0xd9, 0xcd, |
|
| 12242 | 12242 |
0x74, 0x17, 0xa5, 0x61, 0x82, 0x00, 0x93, 0x1c, 0x92, 0x40, 0xa7, 0xdc, 0x03, 0x61, 0x11, 0x24, |
| 12243 |
- 0xc8, 0x2d, 0xe7, 0x00, 0x39, 0xf9, 0xe8, 0xe3, 0x06, 0x01, 0x82, 0xc5, 0x06, 0x19, 0xc4, 0xca, |
|
| 12244 |
- 0x3f, 0xb0, 0x40, 0x10, 0xec, 0x21, 0x39, 0x04, 0xf5, 0xd1, 0xcd, 0x8f, 0xe1, 0xc8, 0x72, 0xd6, |
|
| 12245 |
- 0x27, 0xb2, 0x5e, 0xfd, 0xde, 0xab, 0x57, 0x55, 0xaf, 0x5e, 0xfd, 0x5e, 0x35, 0x14, 0xf8, 0x78, |
|
| 12246 |
- 0x48, 0xc3, 0xca, 0x30, 0x60, 0x9c, 0x21, 0xe4, 0x30, 0xfb, 0x98, 0x06, 0x95, 0xf0, 0x94, 0x04, |
|
| 12247 |
- 0x83, 0x63, 0x97, 0x57, 0x4e, 0x3e, 0x2c, 0xdd, 0xe2, 0xee, 0x80, 0x86, 0x9c, 0x0c, 0x86, 0xef, |
|
| 12248 |
- 0xc7, 0xff, 0x14, 0xbc, 0xf4, 0x9a, 0x33, 0x0a, 0x08, 0x77, 0x99, 0xff, 0x7e, 0xf4, 0x47, 0x77, |
|
| 12249 |
- 0xdc, 0xe8, 0xb3, 0x3e, 0x93, 0x7f, 0xdf, 0x17, 0xff, 0x94, 0xd4, 0xdc, 0x84, 0xe5, 0x27, 0x34, |
|
| 12250 |
- 0x08, 0x5d, 0xe6, 0xa3, 0x1b, 0x90, 0x71, 0x7d, 0x87, 0x3e, 0x5b, 0x4f, 0x94, 0x13, 0xf7, 0xd2, |
|
| 12251 |
- 0x58, 0x35, 0xcc, 0xbf, 0x49, 0x40, 0xc1, 0xf2, 0x7d, 0xc6, 0xa5, 0xad, 0x10, 0x21, 0x48, 0xfb, |
|
| 12252 |
- 0x64, 0x40, 0x25, 0x28, 0x8f, 0xe5, 0x7f, 0x54, 0x85, 0xac, 0x47, 0x0e, 0xa8, 0x17, 0xae, 0x27, |
|
| 12253 |
- 0xcb, 0xa9, 0x7b, 0x85, 0xad, 0x1f, 0x54, 0x5e, 0xf6, 0xb9, 0x32, 0x65, 0xa4, 0xb2, 0x2b, 0xd1, |
|
| 12254 |
- 0x75, 0x9f, 0x07, 0x63, 0xac, 0x55, 0x4b, 0x3f, 0x86, 0xc2, 0x94, 0x18, 0x19, 0x90, 0x3a, 0xa6, |
|
| 12255 |
- 0x63, 0x3d, 0x8c, 0xf8, 0x2b, 0xfc, 0x3b, 0x21, 0xde, 0x88, 0xae, 0x27, 0xa5, 0x4c, 0x35, 0x3e, |
|
| 12256 |
- 0x49, 0xde, 0x4f, 0x98, 0x9f, 0x43, 0x1e, 0xd3, 0x90, 0x8d, 0x02, 0x9b, 0x86, 0xe8, 0x1d, 0xc8, |
|
| 12257 |
- 0xfb, 0xc4, 0x67, 0x3d, 0x7b, 0x38, 0x0a, 0xa5, 0x7a, 0x6a, 0xbb, 0x78, 0xf1, 0x62, 0x33, 0xd7, |
|
| 12258 |
- 0x24, 0x3e, 0xab, 0xb6, 0xba, 0x21, 0xce, 0x89, 0xee, 0xea, 0x70, 0x14, 0xa2, 0x37, 0xa1, 0x38, |
|
| 12259 |
- 0xa0, 0x03, 0x16, 0x8c, 0x7b, 0x07, 0x63, 0x4e, 0x43, 0x69, 0x38, 0x85, 0x0b, 0x4a, 0xb6, 0x2d, |
|
| 12260 |
- 0x44, 0xe6, 0x5f, 0x25, 0xe0, 0x46, 0x64, 0x1b, 0xd3, 0x3f, 0x1c, 0xb9, 0x01, 0x1d, 0x50, 0x9f, |
|
| 12261 |
- 0x87, 0xe8, 0x47, 0x90, 0xf5, 0xdc, 0x81, 0xcb, 0xd5, 0x18, 0x85, 0xad, 0x37, 0x16, 0xcd, 0x39, |
|
| 12262 |
- 0xf6, 0x0a, 0x6b, 0x30, 0xb2, 0xa0, 0x18, 0xd0, 0x90, 0x06, 0x27, 0x6a, 0x25, 0xe4, 0x90, 0xdf, |
|
| 12263 |
- 0xa8, 0x3c, 0xa3, 0x62, 0xee, 0x40, 0xae, 0xe5, 0x11, 0x7e, 0xc8, 0x82, 0x01, 0x32, 0xa1, 0x48, |
|
| 12264 |
- 0x02, 0xfb, 0xc8, 0xe5, 0xd4, 0xe6, 0xa3, 0x20, 0xda, 0x95, 0x19, 0x19, 0xba, 0x09, 0x49, 0xa6, |
|
| 12265 |
- 0x06, 0xca, 0x6f, 0x67, 0x2f, 0x5e, 0x6c, 0x26, 0xf7, 0xdb, 0x38, 0xc9, 0x42, 0xf3, 0x01, 0x5c, |
|
| 12266 |
- 0x6b, 0x79, 0xa3, 0xbe, 0xeb, 0xd7, 0x68, 0x68, 0x07, 0xee, 0x50, 0x58, 0x17, 0xdb, 0x2b, 0x82, |
|
| 12267 |
- 0x2f, 0xda, 0x5e, 0xf1, 0x3f, 0xde, 0xf2, 0xe4, 0x64, 0xcb, 0xcd, 0xbf, 0x48, 0xc2, 0xb5, 0xba, |
|
| 12268 |
- 0xdf, 0x77, 0x7d, 0x3a, 0xad, 0x7d, 0x17, 0x56, 0xa9, 0x14, 0xf6, 0x4e, 0x54, 0x50, 0x69, 0x3b, |
|
| 12269 |
- 0x2b, 0x4a, 0x1a, 0x45, 0x5a, 0x63, 0x2e, 0x5e, 0x3e, 0x5c, 0x34, 0xfd, 0x97, 0xac, 0x2f, 0x8a, |
|
| 12270 |
- 0x1a, 0x54, 0x87, 0xe5, 0xa1, 0x9c, 0x44, 0xb8, 0x9e, 0x92, 0xb6, 0xee, 0x2e, 0xb2, 0xf5, 0xd2, |
|
| 12271 |
- 0x3c, 0xb7, 0xd3, 0x5f, 0xbd, 0xd8, 0x5c, 0xc2, 0x91, 0xee, 0x6f, 0x13, 0x7c, 0xff, 0x99, 0x80, |
|
| 12272 |
- 0xb5, 0x26, 0x73, 0x66, 0xd6, 0xa1, 0x04, 0xb9, 0x23, 0x16, 0xf2, 0xa9, 0x83, 0x12, 0xb7, 0xd1, |
|
| 12273 |
- 0x7d, 0xc8, 0x0d, 0xf5, 0xf6, 0xe9, 0xdd, 0xbf, 0xbd, 0xd8, 0x65, 0x85, 0xc1, 0x31, 0x1a, 0x3d, |
|
| 12274 |
- 0x80, 0x7c, 0x10, 0xc5, 0xc4, 0x7a, 0xea, 0x2a, 0x81, 0x33, 0xc1, 0xa3, 0xdf, 0x85, 0xac, 0xda, |
|
| 12275 |
- 0x84, 0xf5, 0xb4, 0xd4, 0xbc, 0x7b, 0xa5, 0x35, 0xc7, 0x5a, 0xc9, 0xfc, 0x65, 0x02, 0x0c, 0x4c, |
|
| 12276 |
- 0x0e, 0xf9, 0x1e, 0x1d, 0x1c, 0xd0, 0xa0, 0xcd, 0x09, 0x1f, 0x85, 0xe8, 0x26, 0x64, 0x3d, 0x4a, |
|
| 12277 |
- 0x1c, 0x1a, 0xc8, 0x49, 0xe6, 0xb0, 0x6e, 0xa1, 0xae, 0x08, 0x72, 0x62, 0x1f, 0x91, 0x03, 0xd7, |
|
| 12278 |
- 0x73, 0xf9, 0x58, 0x4e, 0x73, 0x75, 0xf1, 0x2e, 0xcf, 0xdb, 0xac, 0xe0, 0x29, 0x45, 0x3c, 0x63, |
|
| 12279 |
- 0x06, 0xad, 0xc3, 0xf2, 0x80, 0x86, 0x21, 0xe9, 0x53, 0x39, 0xfb, 0x3c, 0x8e, 0x9a, 0xe6, 0x03, |
|
| 12280 |
- 0x28, 0x4e, 0xeb, 0xa1, 0x02, 0x2c, 0x77, 0x9b, 0x8f, 0x9b, 0xfb, 0x4f, 0x9b, 0xc6, 0x12, 0x5a, |
|
| 12281 |
- 0x83, 0x42, 0xb7, 0x89, 0xeb, 0x56, 0xf5, 0x91, 0xb5, 0xbd, 0x5b, 0x37, 0x12, 0x68, 0x05, 0xf2, |
|
| 12282 |
- 0x93, 0x66, 0xd2, 0xfc, 0x79, 0x02, 0x40, 0x6c, 0xa0, 0x9e, 0xd4, 0x27, 0x90, 0x09, 0x39, 0xe1, |
|
| 12283 |
- 0x6a, 0xe3, 0x56, 0xb7, 0xde, 0x5a, 0xe4, 0xf5, 0x04, 0x5e, 0x11, 0x3f, 0x14, 0x2b, 0x95, 0x69, |
|
| 12284 |
- 0x0f, 0x93, 0xf3, 0x1e, 0x66, 0x24, 0x72, 0xd6, 0xb5, 0x1c, 0xa4, 0x6b, 0xe2, 0x5f, 0x02, 0xe5, |
|
| 12285 |
- 0x21, 0x83, 0xeb, 0x56, 0xed, 0x73, 0x23, 0x89, 0x0c, 0x28, 0xd6, 0x1a, 0xed, 0xea, 0x7e, 0xb3, |
|
| 12286 |
- 0x59, 0xaf, 0x76, 0xea, 0x35, 0x23, 0x65, 0xde, 0x85, 0x4c, 0x63, 0x40, 0xfa, 0x14, 0xdd, 0x16, |
|
| 12287 |
- 0x11, 0x70, 0x48, 0x03, 0xea, 0xdb, 0x51, 0x60, 0x4d, 0x04, 0xe6, 0x2f, 0xf2, 0x90, 0xd9, 0x63, |
|
| 12288 |
- 0x23, 0x9f, 0xa3, 0xad, 0xa9, 0x53, 0xbc, 0xba, 0xb5, 0xb1, 0x68, 0x0a, 0x12, 0x58, 0xe9, 0x8c, |
|
| 12289 |
- 0x87, 0x54, 0x9f, 0xf2, 0x9b, 0x90, 0x55, 0xb1, 0xa2, 0x5d, 0xd7, 0x2d, 0x21, 0xe7, 0x24, 0xe8, |
|
| 12290 |
- 0x53, 0xae, 0x17, 0x5d, 0xb7, 0xd0, 0x3d, 0xc8, 0x05, 0x94, 0x38, 0xcc, 0xf7, 0xc6, 0x32, 0xa4, |
|
| 12291 |
- 0x72, 0x2a, 0xcd, 0x62, 0x4a, 0x9c, 0x7d, 0xdf, 0x1b, 0xe3, 0xb8, 0x17, 0x3d, 0x82, 0xe2, 0x81, |
|
| 12292 |
- 0xeb, 0x3b, 0x3d, 0x36, 0x54, 0x39, 0x2f, 0xf3, 0xea, 0x00, 0x54, 0x5e, 0x6d, 0xbb, 0xbe, 0xb3, |
|
| 12293 |
- 0xaf, 0xc0, 0xb8, 0x70, 0x30, 0x69, 0xa0, 0x26, 0xac, 0x9e, 0x30, 0x6f, 0x34, 0xa0, 0xb1, 0xad, |
|
| 12294 |
- 0xac, 0xb4, 0xf5, 0xf6, 0xab, 0x6d, 0x3d, 0x91, 0xf8, 0xc8, 0xda, 0xca, 0xc9, 0x74, 0x13, 0x3d, |
|
| 12295 |
- 0x86, 0x15, 0x3e, 0x18, 0x1e, 0x86, 0xb1, 0xb9, 0x65, 0x69, 0xee, 0xfb, 0x97, 0x2c, 0x98, 0x80, |
|
| 12296 |
- 0x47, 0xd6, 0x8a, 0x7c, 0xaa, 0x55, 0xfa, 0xb3, 0x14, 0x14, 0xa6, 0x3c, 0x47, 0x6d, 0x28, 0x0c, |
|
| 12297 |
- 0x03, 0x36, 0x24, 0x7d, 0x99, 0xb7, 0xf5, 0x5e, 0x7c, 0x78, 0xa5, 0x59, 0x57, 0x5a, 0x13, 0x45, |
|
| 12298 |
- 0x3c, 0x6d, 0xc5, 0x3c, 0x4f, 0x42, 0x61, 0xaa, 0x13, 0xbd, 0x0b, 0x39, 0xdc, 0xc2, 0x8d, 0x27, |
|
| 12299 |
- 0x56, 0xa7, 0x6e, 0x2c, 0x95, 0x6e, 0x9f, 0x9d, 0x97, 0xd7, 0xa5, 0xb5, 0x69, 0x03, 0xad, 0xc0, |
|
| 12300 |
- 0x3d, 0x11, 0xa1, 0x77, 0x0f, 0x96, 0x23, 0x68, 0xa2, 0xf4, 0xfa, 0xd9, 0x79, 0xf9, 0xb5, 0x79, |
|
| 12301 |
- 0xe8, 0x14, 0x12, 0xb7, 0x1f, 0x59, 0xb8, 0x5e, 0x33, 0x92, 0x8b, 0x91, 0xb8, 0x7d, 0x44, 0x02, |
|
| 12302 |
- 0xea, 0xa0, 0xef, 0x43, 0x56, 0x03, 0x53, 0xa5, 0xd2, 0xd9, 0x79, 0xf9, 0xe6, 0x3c, 0x70, 0x82, |
|
| 12303 |
- 0xc3, 0xed, 0x5d, 0xeb, 0x49, 0xdd, 0x48, 0x2f, 0xc6, 0xe1, 0xb6, 0x47, 0x4e, 0x28, 0x7a, 0x0b, |
|
| 12304 |
- 0x32, 0x0a, 0x96, 0x29, 0xdd, 0x3a, 0x3b, 0x2f, 0x7f, 0xef, 0x25, 0x73, 0x02, 0x55, 0x5a, 0xff, |
|
| 12305 |
- 0xcb, 0xbf, 0xdd, 0x58, 0xfa, 0xa7, 0xbf, 0xdb, 0x30, 0xe6, 0xbb, 0x4b, 0xff, 0x9b, 0x80, 0x95, |
|
| 12306 |
- 0x99, 0x2d, 0x47, 0x26, 0x64, 0x7d, 0x66, 0xb3, 0xa1, 0x4a, 0xe7, 0xb9, 0x6d, 0xb8, 0x78, 0xb1, |
|
| 12307 |
- 0x99, 0x6d, 0xb2, 0x2a, 0x1b, 0x8e, 0xb1, 0xee, 0x41, 0x8f, 0xe7, 0x2e, 0xa4, 0x8f, 0xae, 0x18, |
|
| 12308 |
- 0x4f, 0x0b, 0xaf, 0xa4, 0x4f, 0x61, 0xc5, 0x09, 0xdc, 0x13, 0x1a, 0xf4, 0x6c, 0xe6, 0x1f, 0xba, |
|
| 12309 |
- 0x7d, 0x9d, 0xaa, 0x4b, 0x8b, 0x6c, 0xd6, 0x24, 0x10, 0x17, 0x95, 0x42, 0x55, 0xe2, 0x7f, 0x8b, |
|
| 12310 |
- 0xcb, 0xa8, 0xf4, 0x04, 0x8a, 0xd3, 0x11, 0x8a, 0xde, 0x00, 0x08, 0xdd, 0x3f, 0xa2, 0x9a, 0xdf, |
|
| 12311 |
- 0x48, 0x36, 0x84, 0xf3, 0x42, 0x22, 0xd9, 0x0d, 0x7a, 0x1b, 0xd2, 0x03, 0xe6, 0x28, 0x3b, 0x99, |
|
| 12312 |
- 0xed, 0xeb, 0xe2, 0x4e, 0xfc, 0xd5, 0x8b, 0xcd, 0x02, 0x0b, 0x2b, 0x3b, 0xae, 0x47, 0xf7, 0x98, |
|
| 12313 |
- 0x43, 0xb1, 0x04, 0x98, 0x27, 0x90, 0x16, 0xa9, 0x02, 0xbd, 0x0e, 0xe9, 0xed, 0x46, 0xb3, 0x66, |
|
| 12314 |
- 0x2c, 0x95, 0xae, 0x9d, 0x9d, 0x97, 0x57, 0xe4, 0x92, 0x88, 0x0e, 0x11, 0xbb, 0x68, 0x13, 0xb2, |
|
| 12315 |
- 0x4f, 0xf6, 0x77, 0xbb, 0x7b, 0x22, 0xbc, 0xae, 0x9f, 0x9d, 0x97, 0xd7, 0xe2, 0x6e, 0xb5, 0x68, |
|
| 12316 |
- 0xe8, 0x0d, 0xc8, 0x74, 0xf6, 0x5a, 0x3b, 0x6d, 0x23, 0x59, 0x42, 0x67, 0xe7, 0xe5, 0xd5, 0xb8, |
|
| 12317 |
- 0x5f, 0xfa, 0x5c, 0xba, 0xa6, 0x77, 0x35, 0x1f, 0xcb, 0xcd, 0xff, 0x49, 0xc2, 0x0a, 0x16, 0xfc, |
|
| 12318 |
- 0x36, 0xe0, 0x2d, 0xe6, 0xb9, 0xf6, 0x18, 0xb5, 0x20, 0x6f, 0x33, 0xdf, 0x71, 0xa7, 0xce, 0xd4, |
|
| 12319 |
- 0xd6, 0x2b, 0x2e, 0xc1, 0x89, 0x56, 0xd4, 0xaa, 0x46, 0x9a, 0x78, 0x62, 0x04, 0x6d, 0x41, 0xc6, |
|
| 12320 |
- 0xa1, 0x1e, 0x19, 0x5f, 0x76, 0x1b, 0xd7, 0x34, 0x97, 0xc6, 0x0a, 0x2a, 0x99, 0x23, 0x79, 0xd6, |
|
| 12321 |
- 0x23, 0x9c, 0xd3, 0xc1, 0x90, 0xab, 0xdb, 0x38, 0x8d, 0x0b, 0x03, 0xf2, 0xcc, 0xd2, 0x22, 0xf4, |
|
| 12322 |
- 0x43, 0xc8, 0x9e, 0xba, 0xbe, 0xc3, 0x4e, 0xf5, 0x85, 0x7b, 0xb9, 0x5d, 0x8d, 0x35, 0xcf, 0xc4, |
|
| 12323 |
- 0x3d, 0x3b, 0xe7, 0xac, 0x58, 0xf5, 0xe6, 0x7e, 0xb3, 0x1e, 0xad, 0xba, 0xee, 0xdf, 0xf7, 0x9b, |
|
| 12324 |
- 0xcc, 0x17, 0x27, 0x06, 0xf6, 0x9b, 0xbd, 0x1d, 0xab, 0xb1, 0xdb, 0xc5, 0x62, 0xe5, 0x6f, 0x9c, |
|
| 12325 |
- 0x9d, 0x97, 0x8d, 0x18, 0xb2, 0x43, 0x5c, 0x4f, 0x90, 0xc0, 0x5b, 0x90, 0xb2, 0x9a, 0x9f, 0x1b, |
|
| 12326 |
- 0xc9, 0x92, 0x71, 0x76, 0x5e, 0x2e, 0xc6, 0xdd, 0x96, 0x3f, 0x9e, 0x1c, 0xa6, 0xf9, 0x71, 0xcd, |
|
| 12327 |
- 0x7f, 0x4f, 0x42, 0xb1, 0x3b, 0x74, 0x08, 0xa7, 0x2a, 0x32, 0x51, 0x19, 0x0a, 0x43, 0x12, 0x10, |
|
| 12328 |
- 0xcf, 0xa3, 0x9e, 0x1b, 0x0e, 0x74, 0xa1, 0x30, 0x2d, 0x42, 0xf7, 0xbf, 0xc5, 0x62, 0x6a, 0x12, |
|
| 12329 |
- 0xa6, 0x97, 0xb4, 0x0b, 0xab, 0x87, 0xca, 0xd9, 0x1e, 0xb1, 0xe5, 0xee, 0xa6, 0xe4, 0xee, 0x56, |
|
| 12330 |
- 0x16, 0x99, 0x98, 0xf6, 0xaa, 0xa2, 0xe7, 0x68, 0x49, 0x2d, 0xbc, 0x72, 0x38, 0xdd, 0x44, 0x1f, |
|
| 12331 |
- 0xc3, 0xf2, 0x80, 0xf9, 0x2e, 0x67, 0xc1, 0x95, 0xf6, 0x21, 0x02, 0xa3, 0x77, 0xe1, 0x9a, 0xd8, |
|
| 12332 |
- 0xe1, 0xc8, 0x25, 0xd9, 0x2d, 0x6f, 0xae, 0x24, 0x5e, 0x1b, 0x90, 0x67, 0x7a, 0x4c, 0x2c, 0xc4, |
|
| 12333 |
- 0xe6, 0xc7, 0xb0, 0x32, 0xe3, 0x83, 0xb8, 0xcd, 0x5b, 0x56, 0xb7, 0x5d, 0x37, 0x96, 0x50, 0x11, |
|
| 12334 |
- 0x72, 0xd5, 0xfd, 0x66, 0xa7, 0xd1, 0xec, 0x0a, 0xea, 0x51, 0x84, 0x1c, 0xde, 0xdf, 0xdd, 0xdd, |
|
| 12335 |
- 0xb6, 0xaa, 0x8f, 0x8d, 0xa4, 0xf9, 0xdf, 0xf1, 0xfa, 0x6a, 0xee, 0xb1, 0x3d, 0xcb, 0x3d, 0xde, |
|
| 12336 |
- 0x7b, 0xf5, 0xd4, 0x35, 0xfb, 0x98, 0x34, 0x62, 0x0e, 0xf2, 0x13, 0x00, 0xb9, 0x8d, 0xd4, 0xe9, |
|
| 12337 |
- 0x11, 0x7e, 0x59, 0x7d, 0xd1, 0x89, 0x2a, 0x47, 0x9c, 0xd7, 0x0a, 0x16, 0x47, 0x9f, 0x41, 0xd1, |
|
| 12338 |
- 0x66, 0x83, 0xa1, 0x47, 0xb5, 0x7e, 0xea, 0x2a, 0xfa, 0x85, 0x58, 0xc5, 0xe2, 0xd3, 0x1c, 0x28, |
|
| 12339 |
- 0x3d, 0xcb, 0x81, 0xfe, 0x3c, 0x01, 0x85, 0x29, 0x87, 0x67, 0xa9, 0x50, 0x11, 0x72, 0xdd, 0x56, |
|
| 12340 |
- 0xcd, 0xea, 0x34, 0x9a, 0x0f, 0x8d, 0x04, 0x02, 0xc8, 0xca, 0x05, 0xac, 0x19, 0x49, 0x41, 0xd7, |
|
| 12341 |
- 0xaa, 0xfb, 0x7b, 0xad, 0xdd, 0xba, 0x24, 0x43, 0xe8, 0x06, 0x18, 0xd1, 0x12, 0xf6, 0xda, 0x1d, |
|
| 12342 |
- 0x0b, 0x0b, 0x69, 0x1a, 0x5d, 0x87, 0xb5, 0x58, 0xaa, 0x35, 0x33, 0xe8, 0x26, 0xa0, 0x58, 0x38, |
|
| 12343 |
- 0x31, 0x91, 0x35, 0xff, 0x04, 0xd6, 0xaa, 0xcc, 0xe7, 0xc4, 0xf5, 0x63, 0x2a, 0xbb, 0x25, 0xe6, |
|
| 12344 |
- 0xad, 0x45, 0x3d, 0xd7, 0x51, 0xd9, 0x76, 0x7b, 0xed, 0xe2, 0xc5, 0x66, 0x21, 0x86, 0x36, 0x6a, |
|
| 12345 |
- 0x62, 0xa6, 0x51, 0xc3, 0x11, 0x67, 0x6a, 0xe8, 0x3a, 0x3a, 0x79, 0x2e, 0x5f, 0xbc, 0xd8, 0x4c, |
|
| 12346 |
- 0xb5, 0x1a, 0x35, 0x2c, 0x64, 0xe8, 0x75, 0xc8, 0xd3, 0x67, 0x2e, 0xef, 0xd9, 0x22, 0xbb, 0x8a, |
|
| 12347 |
- 0x35, 0xcc, 0xe0, 0x9c, 0x10, 0x54, 0x45, 0x32, 0xfd, 0xd3, 0x24, 0x40, 0x87, 0x84, 0xc7, 0x7a, |
|
| 12348 |
- 0xe8, 0x07, 0x90, 0x8f, 0x8b, 0xf8, 0xcb, 0x8a, 0xc9, 0xa9, 0xfd, 0x8a, 0xf1, 0xe8, 0xa3, 0x28, |
|
| 12349 |
- 0x62, 0x14, 0xc7, 0x5e, 0xac, 0xa8, 0xc7, 0x5a, 0x44, 0x53, 0x67, 0x89, 0xb4, 0xb8, 0x6b, 0x68, |
|
| 12350 |
- 0x10, 0xe8, 0x8d, 0x13, 0x7f, 0x51, 0x55, 0xe6, 0x5b, 0x35, 0x67, 0xcd, 0xdc, 0xee, 0x2c, 0x1a, |
|
| 12351 |
- 0x64, 0x6e, 0x41, 0x1f, 0x2d, 0xe1, 0x89, 0xde, 0xb6, 0x01, 0xab, 0xc1, 0xc8, 0x17, 0x5e, 0xf7, |
|
| 12352 |
- 0x42, 0xd9, 0x6d, 0xba, 0xf0, 0x5a, 0x93, 0xf2, 0x53, 0x16, 0x1c, 0x5b, 0x9c, 0x13, 0xfb, 0x48, |
|
| 12353 |
- 0x14, 0xd5, 0x3a, 0xc9, 0x4c, 0x08, 0x67, 0x62, 0x86, 0x70, 0xae, 0xc3, 0x32, 0xf1, 0x5c, 0x12, |
|
| 12354 |
- 0x52, 0x75, 0x4b, 0xe7, 0x71, 0xd4, 0x14, 0xb4, 0x98, 0x38, 0x4e, 0x40, 0xc3, 0x90, 0xaa, 0x32, |
|
| 12355 |
- 0x30, 0x8f, 0x27, 0x02, 0xf3, 0x5f, 0x92, 0x00, 0x8d, 0x96, 0xb5, 0xa7, 0xcd, 0xd7, 0x20, 0x7b, |
|
| 12356 |
- 0x48, 0x06, 0xae, 0x37, 0xbe, 0xec, 0x90, 0x4d, 0xf0, 0x15, 0x4b, 0x19, 0xda, 0x91, 0x3a, 0x58, |
|
| 12357 |
- 0xeb, 0x4a, 0xb6, 0x3c, 0x3a, 0xf0, 0x29, 0x8f, 0xd9, 0xb2, 0x6c, 0x89, 0xab, 0x39, 0x20, 0x7e, |
|
| 12358 |
- 0xbc, 0xb0, 0xaa, 0x21, 0x5c, 0xef, 0x13, 0x4e, 0x4f, 0xc9, 0x38, 0x3a, 0x13, 0xba, 0x89, 0x1e, |
|
| 12359 |
- 0x09, 0x16, 0x2d, 0x8a, 0x7b, 0xea, 0xac, 0x67, 0x24, 0xf7, 0xf8, 0x26, 0x7f, 0xb0, 0x86, 0x2b, |
|
| 12360 |
- 0xd2, 0x11, 0x6b, 0x97, 0x1e, 0xc8, 0x9b, 0x72, 0xd2, 0xf5, 0xad, 0x8a, 0xd8, 0x0f, 0x60, 0x65, |
|
| 12361 |
- 0x66, 0x9e, 0x2f, 0x95, 0x29, 0x8d, 0xd6, 0x93, 0x1f, 0x1a, 0x69, 0xfd, 0xef, 0x63, 0x23, 0x6b, |
|
| 12362 |
- 0xfe, 0x57, 0x02, 0xa0, 0xc5, 0x82, 0x68, 0xd3, 0x16, 0x3f, 0x0b, 0xe5, 0xe4, 0x23, 0x93, 0xcd, |
|
| 12363 |
- 0x3c, 0x1d, 0x9e, 0x0b, 0x79, 0xfa, 0xc4, 0x8a, 0xa0, 0xbd, 0x12, 0x8e, 0x63, 0x45, 0xb4, 0x09, |
|
| 12364 |
- 0x05, 0xb5, 0xff, 0xbd, 0x21, 0x0b, 0x54, 0x3e, 0x5a, 0xc1, 0xa0, 0x44, 0x42, 0x13, 0xdd, 0x85, |
|
| 12365 |
- 0xd5, 0xe1, 0xe8, 0xc0, 0x73, 0xc3, 0x23, 0xea, 0x28, 0x4c, 0x5a, 0x62, 0x56, 0x62, 0xa9, 0x80, |
|
| 12366 |
- 0x99, 0x35, 0xc8, 0x45, 0xd6, 0xd1, 0x3a, 0xa4, 0x3a, 0xd5, 0x96, 0xb1, 0x54, 0x5a, 0x3b, 0x3b, |
|
| 12367 |
- 0x2f, 0x17, 0x22, 0x71, 0xa7, 0xda, 0x12, 0x3d, 0xdd, 0x5a, 0xcb, 0x48, 0xcc, 0xf6, 0x74, 0x6b, |
|
| 12368 |
- 0xad, 0x52, 0x5a, 0xdc, 0x92, 0xe6, 0x5f, 0x27, 0x20, 0xab, 0x38, 0xdb, 0xc2, 0x19, 0x5b, 0xb0, |
|
| 12369 |
- 0x1c, 0x55, 0x12, 0x8a, 0x48, 0xbe, 0xfd, 0x6a, 0xd2, 0x57, 0xd1, 0x1c, 0x4d, 0xed, 0x63, 0xa4, |
|
| 12370 |
- 0x57, 0xfa, 0x04, 0x8a, 0xd3, 0x1d, 0xdf, 0x6a, 0x17, 0xff, 0x18, 0x0a, 0x22, 0x50, 0x22, 0xf2, |
|
| 12371 |
- 0xb7, 0x05, 0x59, 0xc5, 0x2b, 0x75, 0x56, 0xb9, 0x8c, 0x81, 0x6a, 0x24, 0xba, 0x0f, 0xcb, 0x8a, |
|
| 12372 |
- 0xb5, 0x46, 0xef, 0x29, 0x1b, 0x97, 0x87, 0x23, 0x8e, 0xe0, 0xe6, 0xa7, 0x90, 0x6e, 0x51, 0x1a, |
|
| 12373 |
- 0xa0, 0x3b, 0xb0, 0xec, 0x33, 0x87, 0x4e, 0x92, 0xa8, 0x26, 0xdc, 0x0e, 0x6d, 0xd4, 0x04, 0xe1, |
|
| 12374 |
- 0x76, 0x68, 0xc3, 0x11, 0x8b, 0x27, 0x0e, 0x68, 0xf4, 0xa4, 0x24, 0xfe, 0x9b, 0x1d, 0x28, 0x3e, |
|
| 12375 |
- 0xa5, 0x6e, 0xff, 0x88, 0x53, 0x47, 0x1a, 0x7a, 0x0f, 0xd2, 0x43, 0x1a, 0x3b, 0xbf, 0xbe, 0x30, |
|
| 12376 |
- 0x74, 0x28, 0x0d, 0xb0, 0x44, 0x89, 0x03, 0x79, 0x2a, 0xb5, 0xf5, 0x2b, 0x9e, 0x6e, 0x99, 0xff, |
|
| 12377 |
- 0x90, 0x84, 0xd5, 0x46, 0x18, 0x8e, 0x88, 0x6f, 0x47, 0xb7, 0xec, 0x4f, 0x67, 0x6f, 0xd9, 0x7b, |
|
| 12378 |
- 0x0b, 0x67, 0x38, 0xa3, 0x32, 0x5b, 0xe5, 0xeb, 0x24, 0x99, 0x8c, 0x93, 0xa4, 0xf9, 0x55, 0x22, |
|
| 12379 |
- 0x2a, 0xef, 0xef, 0x4e, 0x9d, 0x9b, 0xd2, 0xfa, 0xd9, 0x79, 0xf9, 0xc6, 0xb4, 0x25, 0xda, 0xf5, |
|
| 12380 |
- 0x8f, 0x7d, 0x76, 0xea, 0xa3, 0x37, 0x45, 0xb9, 0xdf, 0xac, 0x3f, 0x35, 0x12, 0xa5, 0x9b, 0x67, |
|
| 12381 |
- 0xe7, 0x65, 0x34, 0x03, 0xc2, 0xd4, 0xa7, 0xa7, 0xc2, 0x52, 0xab, 0xde, 0xac, 0x89, 0xfb, 0x30, |
|
| 12382 |
- 0xb9, 0xc0, 0x52, 0x8b, 0xfa, 0x8e, 0xeb, 0xf7, 0xd1, 0x1d, 0xc8, 0x36, 0xda, 0xed, 0xae, 0x2c, |
|
| 12383 |
- 0xc0, 0x5e, 0x3b, 0x3b, 0x2f, 0x5f, 0x9f, 0x41, 0x89, 0x06, 0x75, 0x04, 0x48, 0x10, 0x44, 0x71, |
|
| 12384 |
- 0x53, 0x2e, 0x00, 0x09, 0xee, 0x42, 0x1d, 0x1d, 0xe1, 0xff, 0x96, 0x04, 0xc3, 0xb2, 0x6d, 0x3a, |
|
| 12385 |
- 0xe4, 0xa2, 0x5f, 0x93, 0xee, 0x0e, 0xe4, 0x86, 0xe2, 0x9f, 0x2b, 0x8b, 0x08, 0x11, 0x16, 0xf7, |
|
| 12386 |
- 0x17, 0x3e, 0xf1, 0xce, 0xe9, 0x55, 0x30, 0xf3, 0xa8, 0xe5, 0x0c, 0xdc, 0x30, 0x14, 0xc5, 0xa5, |
|
| 12387 |
- 0x94, 0xe1, 0xd8, 0x52, 0xe9, 0xd7, 0x09, 0xb8, 0xbe, 0x00, 0x81, 0x3e, 0x80, 0x74, 0xc0, 0xbc, |
|
| 12388 |
- 0x68, 0x7b, 0x6e, 0xbf, 0xea, 0x01, 0x46, 0xa8, 0x62, 0x89, 0x44, 0x1b, 0x00, 0x64, 0xc4, 0x19, |
|
| 12389 |
- 0x91, 0xe3, 0xcb, 0x8d, 0xc9, 0xe1, 0x29, 0x09, 0x7a, 0x0a, 0xd9, 0x90, 0xda, 0x01, 0x8d, 0xf8, |
|
| 12390 |
- 0xcc, 0xa7, 0xff, 0x5f, 0xef, 0x2b, 0x6d, 0x69, 0x06, 0x6b, 0x73, 0xa5, 0x0a, 0x64, 0x95, 0x44, |
|
| 12391 |
- 0x44, 0xb4, 0x43, 0x38, 0x91, 0x4e, 0x17, 0xb1, 0xfc, 0x2f, 0x02, 0x85, 0x78, 0xfd, 0x28, 0x50, |
|
| 12392 |
- 0x88, 0xd7, 0x37, 0x7f, 0x9e, 0x04, 0xa8, 0x3f, 0xe3, 0x34, 0xf0, 0x89, 0x57, 0xb5, 0x50, 0x7d, |
|
| 12393 |
- 0x2a, 0x43, 0xaa, 0xd9, 0xbe, 0xb3, 0xf0, 0x59, 0x2e, 0xd6, 0xa8, 0x54, 0xad, 0x05, 0x39, 0xf2, |
|
| 12394 |
- 0x16, 0xa4, 0x46, 0x81, 0xa7, 0x9f, 0x78, 0x25, 0x11, 0xe9, 0xe2, 0x5d, 0x2c, 0x64, 0xa8, 0x3e, |
|
| 12395 |
- 0xc9, 0x48, 0xa9, 0x57, 0xbf, 0xcd, 0x4f, 0x0d, 0xf0, 0xdd, 0x67, 0xa5, 0xf7, 0x00, 0x26, 0x5e, |
|
| 12396 |
- 0xa3, 0x0d, 0xc8, 0x54, 0x77, 0xda, 0xed, 0x5d, 0x63, 0x49, 0xd5, 0x88, 0x93, 0x2e, 0x29, 0x36, |
|
| 12397 |
- 0xff, 0x3e, 0x01, 0xb9, 0xaa, 0xa5, 0x6f, 0x95, 0x1d, 0x30, 0x64, 0x2e, 0xb1, 0x69, 0xc0, 0x7b, |
|
| 12398 |
- 0xf4, 0xd9, 0xd0, 0x0d, 0xc6, 0x3a, 0x1d, 0x5c, 0xce, 0xe2, 0x57, 0x85, 0x56, 0x95, 0x06, 0xbc, |
|
| 12399 |
- 0x2e, 0x75, 0x10, 0x86, 0x22, 0xd5, 0x53, 0xec, 0xd9, 0x24, 0x4a, 0xce, 0x1b, 0x97, 0x2f, 0x85, |
|
| 12400 |
- 0x62, 0x7f, 0x93, 0x76, 0x88, 0x0b, 0x91, 0x91, 0x2a, 0x09, 0xcd, 0x27, 0x70, 0x7d, 0x3f, 0xb0, |
|
| 12401 |
- 0x8f, 0x68, 0xc8, 0xd5, 0xa0, 0xda, 0xe5, 0x4f, 0xe1, 0x36, 0x27, 0xe1, 0x71, 0xef, 0xc8, 0x0d, |
|
| 12402 |
- 0x39, 0x0b, 0xc6, 0xbd, 0x80, 0x72, 0xea, 0x8b, 0xfe, 0x9e, 0xfc, 0x02, 0xa0, 0x6b, 0xf0, 0x5b, |
|
| 12403 |
- 0x02, 0xf3, 0x48, 0x41, 0x70, 0x84, 0xd8, 0x15, 0x00, 0xb3, 0x01, 0x45, 0x41, 0xd8, 0x6a, 0xf4, |
|
| 12404 |
- 0x90, 0x8c, 0x3c, 0x1e, 0xa2, 0x1f, 0x03, 0x78, 0xac, 0xdf, 0xbb, 0x72, 0x26, 0xcf, 0x7b, 0xac, |
|
| 12405 |
- 0xaf, 0xfe, 0x9a, 0xbf, 0x07, 0x46, 0xcd, 0x0d, 0x87, 0x84, 0xdb, 0x47, 0xd1, 0xe3, 0x02, 0x7a, |
|
| 12406 |
- 0x08, 0xc6, 0x11, 0x25, 0x01, 0x3f, 0xa0, 0x84, 0xf7, 0x86, 0x34, 0x70, 0x99, 0x73, 0xa5, 0x25, |
|
| 12407 |
- 0x5d, 0x8b, 0xb5, 0x5a, 0x52, 0xc9, 0xfc, 0x4d, 0x02, 0x00, 0x93, 0xc3, 0x88, 0x00, 0xfc, 0x00, |
|
| 12408 |
- 0xae, 0x85, 0x3e, 0x19, 0x86, 0x47, 0x8c, 0xf7, 0x5c, 0x9f, 0xd3, 0xe0, 0x84, 0x78, 0xba, 0x40, |
|
| 12409 |
- 0x34, 0xa2, 0x8e, 0x86, 0x96, 0xa3, 0xf7, 0x00, 0x1d, 0x53, 0x3a, 0xec, 0x31, 0xcf, 0xe9, 0x45, |
|
| 12410 |
- 0x9d, 0xea, 0x13, 0x45, 0x1a, 0x1b, 0xa2, 0x67, 0xdf, 0x73, 0xda, 0x91, 0x1c, 0x6d, 0xc3, 0x86, |
|
| 12411 |
- 0x58, 0x01, 0xea, 0xf3, 0xc0, 0xa5, 0x61, 0xef, 0x90, 0x05, 0xbd, 0xd0, 0x63, 0xa7, 0xbd, 0x43, |
|
| 12412 |
- 0xe6, 0x79, 0xec, 0x94, 0x06, 0x51, 0xf9, 0x5d, 0xf2, 0x58, 0xbf, 0xae, 0x40, 0x3b, 0x2c, 0x68, |
|
| 12413 |
- 0x7b, 0xec, 0x74, 0x27, 0x42, 0x08, 0x96, 0x30, 0x99, 0x36, 0x77, 0xed, 0xe3, 0x88, 0x25, 0xc4, |
|
| 12414 |
- 0xd2, 0x8e, 0x6b, 0x1f, 0xa3, 0x3b, 0xb0, 0x42, 0x3d, 0x2a, 0x8b, 0x38, 0x85, 0xca, 0x48, 0x54, |
|
| 12415 |
- 0x31, 0x12, 0x0a, 0x90, 0xf9, 0x3b, 0x90, 0x6f, 0x79, 0xc4, 0x96, 0x1f, 0x82, 0x44, 0x49, 0x6c, |
|
| 12416 |
- 0x33, 0x5f, 0x04, 0x81, 0xeb, 0x73, 0x95, 0x1d, 0xf3, 0x78, 0x5a, 0x64, 0xfe, 0x14, 0xe0, 0x67, |
|
| 12417 |
- 0xcc, 0xf5, 0x3b, 0xec, 0x98, 0xfa, 0xf2, 0xcd, 0x5c, 0xb0, 0x5e, 0xbd, 0x95, 0x79, 0xac, 0x5b, |
|
| 12418 |
- 0x92, 0x93, 0x13, 0x9f, 0xf4, 0x69, 0x10, 0x3f, 0x1d, 0xab, 0xa6, 0xb8, 0x5c, 0xb2, 0x98, 0x31, |
|
| 12419 |
- 0x5e, 0xb5, 0x50, 0x19, 0xb2, 0x36, 0xe9, 0x45, 0x27, 0xaf, 0xb8, 0x9d, 0xbf, 0x78, 0xb1, 0x99, |
|
| 12420 |
- 0xa9, 0x5a, 0x8f, 0xe9, 0x18, 0x67, 0x6c, 0xf2, 0x98, 0x8e, 0xc5, 0xed, 0x6b, 0x13, 0x79, 0x5e, |
|
| 12421 |
- 0xa4, 0x99, 0xa2, 0xba, 0x7d, 0xab, 0x96, 0x38, 0x0c, 0x38, 0x6b, 0x13, 0xf1, 0x8b, 0x3e, 0x80, |
|
| 12422 |
- 0xa2, 0x06, 0xf5, 0x8e, 0x48, 0x78, 0xa4, 0xb8, 0xea, 0xf6, 0xea, 0xc5, 0x8b, 0x4d, 0x50, 0xc8, |
|
| 12423 |
- 0x47, 0x24, 0x3c, 0xc2, 0xa0, 0xd0, 0xe2, 0x3f, 0xaa, 0x43, 0xe1, 0x0b, 0xe6, 0xfa, 0x3d, 0x2e, |
|
| 12424 |
- 0x27, 0xa1, 0x2b, 0xe9, 0x85, 0xe7, 0x67, 0x32, 0x55, 0x5d, 0xde, 0xc3, 0x17, 0xb1, 0xc4, 0xfc, |
|
| 12425 |
- 0xd7, 0x04, 0x14, 0x84, 0x4d, 0xf7, 0xd0, 0xb5, 0xc5, 0x6d, 0xf9, 0xed, 0x33, 0xfd, 0x2d, 0x48, |
|
| 12426 |
- 0xd9, 0x61, 0xa0, 0xe7, 0x26, 0x53, 0x5d, 0xb5, 0x8d, 0xb1, 0x90, 0xa1, 0xcf, 0x20, 0xab, 0x8a, |
|
| 12427 |
- 0x0b, 0x9d, 0xe4, 0xcd, 0x6f, 0xbe, 0xd7, 0xb5, 0x8b, 0x5a, 0x4f, 0xee, 0xe5, 0xc4, 0x3b, 0x39, |
|
| 12428 |
- 0xcb, 0x22, 0x9e, 0x16, 0xa1, 0x9b, 0x90, 0xb4, 0x7d, 0x19, 0x14, 0xfa, 0x5b, 0x5a, 0xb5, 0x89, |
|
| 12429 |
- 0x93, 0xb6, 0x6f, 0xfe, 0x73, 0x02, 0x56, 0xea, 0xbe, 0x1d, 0x8c, 0x65, 0x92, 0x14, 0x1b, 0x71, |
|
| 12430 |
- 0x1b, 0xf2, 0xe1, 0xe8, 0x20, 0x1c, 0x87, 0x9c, 0x0e, 0xa2, 0xa7, 0xfa, 0x58, 0x80, 0x1a, 0x90, |
|
| 12431 |
- 0x27, 0x5e, 0x9f, 0x05, 0x2e, 0x3f, 0x1a, 0x68, 0x6e, 0xbc, 0x38, 0x31, 0x4f, 0xdb, 0xac, 0x58, |
|
| 12432 |
- 0x91, 0x0a, 0x9e, 0x68, 0x47, 0xa9, 0x38, 0x25, 0x9d, 0x95, 0xa9, 0xf8, 0x4d, 0x28, 0x7a, 0x64, |
|
| 12433 |
- 0x20, 0xa8, 0x70, 0x4f, 0x94, 0x5c, 0x72, 0x1e, 0x69, 0x5c, 0xd0, 0x32, 0x51, 0x46, 0x9a, 0x26, |
|
| 12434 |
- 0xe4, 0x63, 0x63, 0x68, 0x0d, 0x0a, 0x56, 0xbd, 0xdd, 0xfb, 0x70, 0xeb, 0x7e, 0xef, 0x61, 0x75, |
|
| 12435 |
- 0xcf, 0x58, 0xd2, 0x4c, 0xe0, 0x1f, 0x13, 0xb0, 0xb2, 0xa7, 0x62, 0x50, 0x13, 0xa7, 0x3b, 0xb0, |
|
| 12436 |
- 0x1c, 0x90, 0x43, 0x1e, 0x51, 0xbb, 0xb4, 0x0a, 0x2e, 0x91, 0x04, 0x04, 0xb5, 0x13, 0x5d, 0x8b, |
|
| 12437 |
- 0xa9, 0xdd, 0xd4, 0x87, 0xa2, 0xd4, 0xa5, 0x1f, 0x8a, 0xd2, 0xdf, 0xc9, 0x87, 0x22, 0xf3, 0x57, |
|
| 12438 |
- 0x09, 0x58, 0xd3, 0x17, 0x75, 0xf4, 0x71, 0x04, 0xbd, 0x03, 0x79, 0x75, 0x67, 0x4f, 0x88, 0xa9, |
|
| 12439 |
- 0xfc, 0x5e, 0xa1, 0x70, 0x8d, 0x1a, 0xce, 0xa9, 0xee, 0x86, 0x83, 0x7e, 0x32, 0xf5, 0x2a, 0xfa, |
|
| 12440 |
- 0x0a, 0x7a, 0x38, 0x67, 0xbd, 0x32, 0x79, 0x2a, 0x7d, 0xe5, 0xf7, 0x92, 0x4d, 0x28, 0x68, 0x07, |
|
| 12441 |
- 0x64, 0xd9, 0xa0, 0xea, 0x40, 0x50, 0xa2, 0x26, 0x19, 0x50, 0xf3, 0x2e, 0xa4, 0x85, 0x19, 0x04, |
|
| 12442 |
- 0x90, 0x6d, 0x7f, 0xde, 0xee, 0xd4, 0xf7, 0x54, 0xe5, 0xb5, 0xd3, 0x90, 0x1f, 0xad, 0x96, 0x21, |
|
| 12443 |
- 0x55, 0x6f, 0x3e, 0x31, 0x92, 0xe6, 0xef, 0x43, 0x01, 0xd3, 0x01, 0x3b, 0xa1, 0x4e, 0x53, 0x0d, |
|
| 12444 |
- 0x97, 0x8c, 0x27, 0x24, 0x23, 0xb2, 0x51, 0xc3, 0x49, 0xd7, 0x41, 0x3f, 0x82, 0xac, 0xbe, 0x30, |
|
| 12445 |
- 0xaf, 0xf4, 0x04, 0xa4, 0xc1, 0xef, 0xfe, 0x26, 0x05, 0xf9, 0xf8, 0xbd, 0x40, 0x9c, 0x36, 0x41, |
|
| 12446 |
- 0x52, 0x97, 0xd4, 0xab, 0x61, 0x2c, 0x6f, 0x4a, 0x7a, 0x9a, 0xb7, 0x76, 0x77, 0xf7, 0xab, 0x56, |
|
| 12447 |
- 0xa7, 0x5e, 0x33, 0x3e, 0x53, 0x2c, 0x36, 0x06, 0x58, 0x9e, 0xc7, 0xc4, 0x79, 0x71, 0x90, 0x39, |
|
| 12448 |
- 0x61, 0xb1, 0xcf, 0xf5, 0xdb, 0x64, 0x8c, 0x8a, 0x28, 0xec, 0x5b, 0x90, 0xb3, 0xda, 0xed, 0xc6, |
|
| 12449 |
- 0xc3, 0x66, 0xbd, 0x66, 0x7c, 0x99, 0x28, 0x7d, 0xef, 0xec, 0xbc, 0x7c, 0x6d, 0x62, 0x2a, 0x0c, |
|
| 12450 |
- 0xdd, 0xbe, 0x4f, 0x1d, 0x89, 0xaa, 0x56, 0xeb, 0x2d, 0x31, 0xde, 0xf3, 0xe4, 0x3c, 0x4a, 0x72, |
|
| 12451 |
- 0x37, 0xf9, 0x9d, 0x21, 0xdf, 0xc2, 0xf5, 0x96, 0x85, 0xc5, 0x88, 0x5f, 0x26, 0xe7, 0xfc, 0x6a, |
|
| 12452 |
- 0x05, 0x74, 0x48, 0x02, 0x31, 0xe6, 0x46, 0xf4, 0xbd, 0xed, 0x79, 0x4a, 0xbd, 0x45, 0x4f, 0x1e, |
|
| 12453 |
- 0x49, 0x28, 0x71, 0xc6, 0x62, 0x34, 0xf9, 0xb8, 0x24, 0xcd, 0xa4, 0xe6, 0x46, 0x6b, 0x73, 0x12, |
|
| 12454 |
- 0x70, 0x61, 0xc5, 0x84, 0x65, 0xdc, 0x6d, 0x36, 0xe5, 0xec, 0xd2, 0x73, 0xb3, 0xc3, 0x23, 0xdf, |
|
| 12455 |
- 0x17, 0x98, 0xbb, 0x90, 0x8b, 0xde, 0x9e, 0x8c, 0x2f, 0xd3, 0x73, 0x0e, 0x55, 0xa3, 0x87, 0x33, |
|
| 12456 |
- 0x39, 0xe0, 0xa3, 0x6e, 0x47, 0x7e, 0x0e, 0x7c, 0x9e, 0x99, 0x1f, 0xf0, 0x68, 0xc4, 0x1d, 0x51, |
|
| 12457 |
- 0x37, 0x94, 0x63, 0x22, 0xff, 0x65, 0x46, 0xf1, 0xa7, 0x18, 0xa3, 0x58, 0xbc, 0xb0, 0x83, 0xeb, |
|
| 12458 |
- 0x3f, 0x53, 0x5f, 0x0e, 0x9f, 0x67, 0xe7, 0xec, 0x60, 0xfa, 0x05, 0xb5, 0x39, 0x75, 0x26, 0x4f, |
|
| 12459 |
- 0xed, 0x71, 0xd7, 0xbb, 0x7f, 0x00, 0xb9, 0x28, 0xd7, 0xa2, 0x0d, 0xc8, 0x3e, 0xdd, 0xc7, 0x8f, |
|
| 12460 |
- 0xeb, 0xd8, 0x58, 0x52, 0xab, 0x13, 0xf5, 0x3c, 0x55, 0x97, 0x55, 0x19, 0x96, 0xf7, 0xac, 0xa6, |
|
| 12461 |
- 0xf5, 0xb0, 0x8e, 0xa3, 0xa7, 0xfe, 0x08, 0xa0, 0x13, 0x46, 0xc9, 0xd0, 0x03, 0xc4, 0x36, 0xb7, |
|
| 12462 |
- 0x6f, 0x7f, 0xf5, 0xf5, 0xc6, 0xd2, 0x2f, 0xbf, 0xde, 0x58, 0xfa, 0xf5, 0xd7, 0x1b, 0x89, 0xe7, |
|
| 12463 |
- 0x17, 0x1b, 0x89, 0xaf, 0x2e, 0x36, 0x12, 0xbf, 0xb8, 0xd8, 0x48, 0xfc, 0xc7, 0xc5, 0x46, 0xe2, |
|
| 12464 |
- 0x20, 0x2b, 0xc9, 0xec, 0x47, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x9b, 0xee, 0x67, 0xfb, |
|
| 12465 |
- 0x22, 0x00, 0x00, |
|
| 12243 |
+ 0xc8, 0x21, 0xff, 0x40, 0x80, 0x9c, 0x7c, 0xf4, 0x2d, 0x9b, 0x04, 0x08, 0x16, 0x1b, 0x64, 0x10, |
|
| 12244 |
+ 0x2b, 0xe7, 0x00, 0x7b, 0x09, 0x72, 0x48, 0x0e, 0x41, 0x7d, 0x74, 0xb3, 0xc9, 0xa1, 0x34, 0xe3, |
|
| 12245 |
+ 0xac, 0x2f, 0x64, 0xd7, 0xab, 0xdf, 0x7b, 0xf5, 0xaa, 0xea, 0xd5, 0xab, 0x5f, 0x55, 0x41, 0x81, |
|
| 12246 |
+ 0x4d, 0x46, 0x34, 0xa8, 0x8c, 0x7c, 0x8f, 0x79, 0x08, 0x59, 0x9e, 0x79, 0x4c, 0xfd, 0x4a, 0x70, |
|
| 12247 |
+ 0x4a, 0xfc, 0xe1, 0xb1, 0xcd, 0x2a, 0x27, 0x1f, 0x95, 0x6e, 0x31, 0x7b, 0x48, 0x03, 0x46, 0x86, |
|
| 12248 |
+ 0xa3, 0x0f, 0xa2, 0x2f, 0x09, 0x2f, 0xbd, 0x61, 0x8d, 0x7d, 0xc2, 0x6c, 0xcf, 0xfd, 0x20, 0xfc, |
|
| 12249 |
+ 0x50, 0x15, 0x37, 0x06, 0xde, 0xc0, 0x13, 0x9f, 0x1f, 0xf0, 0x2f, 0x29, 0xd5, 0x37, 0x61, 0xf9, |
|
| 12250 |
+ 0x09, 0xf5, 0x03, 0xdb, 0x73, 0xd1, 0x0d, 0xc8, 0xd8, 0xae, 0x45, 0x9f, 0xad, 0x27, 0xca, 0x89, |
|
| 12251 |
+ 0x7b, 0x69, 0x2c, 0x0b, 0xfa, 0x5f, 0x26, 0xa0, 0x60, 0xb8, 0xae, 0xc7, 0x84, 0xad, 0x00, 0x21, |
|
| 12252 |
+ 0x48, 0xbb, 0x64, 0x48, 0x05, 0x28, 0x8f, 0xc5, 0x37, 0xaa, 0x42, 0xd6, 0x21, 0x07, 0xd4, 0x09, |
|
| 12253 |
+ 0xd6, 0x93, 0xe5, 0xd4, 0xbd, 0xc2, 0xd6, 0x0f, 0x2a, 0x2f, 0xfb, 0x5c, 0x89, 0x19, 0xa9, 0xec, |
|
| 12254 |
+ 0x09, 0x74, 0xdd, 0x65, 0xfe, 0x04, 0x2b, 0xd5, 0xd2, 0x8f, 0xa1, 0x10, 0x13, 0x23, 0x0d, 0x52, |
|
| 12255 |
+ 0xc7, 0x74, 0xa2, 0x9a, 0xe1, 0x9f, 0xdc, 0xbf, 0x13, 0xe2, 0x8c, 0xe9, 0x7a, 0x52, 0xc8, 0x64, |
|
| 12256 |
+ 0xe1, 0xd3, 0xe4, 0x83, 0x84, 0xfe, 0x05, 0xe4, 0x31, 0x0d, 0xbc, 0xb1, 0x6f, 0xd2, 0x00, 0xbd, |
|
| 12257 |
+ 0x07, 0x79, 0x97, 0xb8, 0x5e, 0xdf, 0x1c, 0x8d, 0x03, 0xa1, 0x9e, 0xda, 0x2e, 0x5e, 0xbc, 0xd8, |
|
| 12258 |
+ 0xcc, 0x35, 0x89, 0xeb, 0x55, 0xdb, 0xbd, 0x00, 0xe7, 0x78, 0x75, 0x75, 0x34, 0x0e, 0xd0, 0xdb, |
|
| 12259 |
+ 0x50, 0x1c, 0xd2, 0xa1, 0xe7, 0x4f, 0xfa, 0x07, 0x13, 0x46, 0x03, 0x61, 0x38, 0x85, 0x0b, 0x52, |
|
| 12260 |
+ 0xb6, 0xcd, 0x45, 0xfa, 0x9f, 0x27, 0xe0, 0x46, 0x68, 0x1b, 0xd3, 0xdf, 0x1b, 0xdb, 0x3e, 0x1d, |
|
| 12261 |
+ 0x52, 0x97, 0x05, 0xe8, 0x47, 0x90, 0x75, 0xec, 0xa1, 0xcd, 0x64, 0x1b, 0x85, 0xad, 0xb7, 0x16, |
|
| 12262 |
+ 0xf5, 0x39, 0xf2, 0x0a, 0x2b, 0x30, 0x32, 0xa0, 0xe8, 0xd3, 0x80, 0xfa, 0x27, 0x72, 0x24, 0x44, |
|
| 12263 |
+ 0x93, 0xaf, 0x54, 0x9e, 0x51, 0xd1, 0x77, 0x20, 0xd7, 0x76, 0x08, 0x3b, 0xf4, 0xfc, 0x21, 0xd2, |
|
| 12264 |
+ 0xa1, 0x48, 0x7c, 0xf3, 0xc8, 0x66, 0xd4, 0x64, 0x63, 0x3f, 0x9c, 0x95, 0x19, 0x19, 0xba, 0x09, |
|
| 12265 |
+ 0x49, 0x4f, 0x36, 0x94, 0xdf, 0xce, 0x5e, 0xbc, 0xd8, 0x4c, 0xb6, 0x3a, 0x38, 0xe9, 0x05, 0xfa, |
|
| 12266 |
+ 0x43, 0xb8, 0xd6, 0x76, 0xc6, 0x03, 0xdb, 0xad, 0xd1, 0xc0, 0xf4, 0xed, 0x11, 0xb7, 0xce, 0xa7, |
|
| 12267 |
+ 0x97, 0x07, 0x5f, 0x38, 0xbd, 0xfc, 0x3b, 0x9a, 0xf2, 0xe4, 0x74, 0xca, 0xf5, 0x3f, 0x4d, 0xc2, |
|
| 12268 |
+ 0xb5, 0xba, 0x3b, 0xb0, 0x5d, 0x1a, 0xd7, 0xbe, 0x0b, 0xab, 0x54, 0x08, 0xfb, 0x27, 0x32, 0xa8, |
|
| 12269 |
+ 0x94, 0x9d, 0x15, 0x29, 0x0d, 0x23, 0xad, 0x31, 0x17, 0x2f, 0x1f, 0x2d, 0xea, 0xfe, 0x4b, 0xd6, |
|
| 12270 |
+ 0x17, 0x45, 0x0d, 0xaa, 0xc3, 0xf2, 0x48, 0x74, 0x22, 0x58, 0x4f, 0x09, 0x5b, 0x77, 0x17, 0xd9, |
|
| 12271 |
+ 0x7a, 0xa9, 0x9f, 0xdb, 0xe9, 0xaf, 0x5f, 0x6c, 0x2e, 0xe1, 0x50, 0xf7, 0x37, 0x09, 0xbe, 0xff, |
|
| 12272 |
+ 0x48, 0xc0, 0x5a, 0xd3, 0xb3, 0x66, 0xc6, 0xa1, 0x04, 0xb9, 0x23, 0x2f, 0x60, 0xb1, 0x85, 0x12, |
|
| 12273 |
+ 0x95, 0xd1, 0x03, 0xc8, 0x8d, 0xd4, 0xf4, 0xa9, 0xd9, 0xbf, 0xbd, 0xd8, 0x65, 0x89, 0xc1, 0x11, |
|
| 12274 |
+ 0x1a, 0x3d, 0x84, 0xbc, 0x1f, 0xc6, 0xc4, 0x7a, 0xea, 0x75, 0x02, 0x67, 0x8a, 0x47, 0xbf, 0x0d, |
|
| 12275 |
+ 0x59, 0x39, 0x09, 0xeb, 0x69, 0xa1, 0x79, 0xf7, 0xb5, 0xc6, 0x1c, 0x2b, 0x25, 0xfd, 0x97, 0x09, |
|
| 12276 |
+ 0xd0, 0x30, 0x39, 0x64, 0xfb, 0x74, 0x78, 0x40, 0xfd, 0x0e, 0x23, 0x6c, 0x1c, 0xa0, 0x9b, 0x90, |
|
| 12277 |
+ 0x75, 0x28, 0xb1, 0xa8, 0x2f, 0x3a, 0x99, 0xc3, 0xaa, 0x84, 0x7a, 0x3c, 0xc8, 0x89, 0x79, 0x44, |
|
| 12278 |
+ 0x0e, 0x6c, 0xc7, 0x66, 0x13, 0xd1, 0xcd, 0xd5, 0xc5, 0xb3, 0x3c, 0x6f, 0xb3, 0x82, 0x63, 0x8a, |
|
| 12279 |
+ 0x78, 0xc6, 0x0c, 0x5a, 0x87, 0xe5, 0x21, 0x0d, 0x02, 0x32, 0xa0, 0xa2, 0xf7, 0x79, 0x1c, 0x16, |
|
| 12280 |
+ 0xf5, 0x87, 0x50, 0x8c, 0xeb, 0xa1, 0x02, 0x2c, 0xf7, 0x9a, 0x8f, 0x9b, 0xad, 0xa7, 0x4d, 0x6d, |
|
| 12281 |
+ 0x09, 0xad, 0x41, 0xa1, 0xd7, 0xc4, 0x75, 0xa3, 0xba, 0x6b, 0x6c, 0xef, 0xd5, 0xb5, 0x04, 0x5a, |
|
| 12282 |
+ 0x81, 0xfc, 0xb4, 0x98, 0xd4, 0x7f, 0x9e, 0x00, 0xe0, 0x13, 0xa8, 0x3a, 0xf5, 0x29, 0x64, 0x02, |
|
| 12283 |
+ 0x46, 0x98, 0x9c, 0xb8, 0xd5, 0xad, 0x77, 0x16, 0x79, 0x3d, 0x85, 0x57, 0xf8, 0x1f, 0xc5, 0x52, |
|
| 12284 |
+ 0x25, 0xee, 0x61, 0x72, 0xde, 0xc3, 0x8c, 0x40, 0xce, 0xba, 0x96, 0x83, 0x74, 0x8d, 0x7f, 0x25, |
|
| 12285 |
+ 0x50, 0x1e, 0x32, 0xb8, 0x6e, 0xd4, 0xbe, 0xd0, 0x92, 0x48, 0x83, 0x62, 0xad, 0xd1, 0xa9, 0xb6, |
|
| 12286 |
+ 0x9a, 0xcd, 0x7a, 0xb5, 0x5b, 0xaf, 0x69, 0x29, 0xfd, 0x2e, 0x64, 0x1a, 0x43, 0x32, 0xa0, 0xe8, |
|
| 12287 |
+ 0x36, 0x8f, 0x80, 0x43, 0xea, 0x53, 0xd7, 0x0c, 0x03, 0x6b, 0x2a, 0xd0, 0x7f, 0x91, 0x87, 0xcc, |
|
| 12288 |
+ 0xbe, 0x37, 0x76, 0x19, 0xda, 0x8a, 0xad, 0xe2, 0xd5, 0xad, 0x8d, 0x45, 0x5d, 0x10, 0xc0, 0x4a, |
|
| 12289 |
+ 0x77, 0x32, 0xa2, 0x6a, 0x95, 0xdf, 0x84, 0xac, 0x8c, 0x15, 0xe5, 0xba, 0x2a, 0x71, 0x39, 0x23, |
|
| 12290 |
+ 0xfe, 0x80, 0x32, 0x35, 0xe8, 0xaa, 0x84, 0xee, 0x41, 0xce, 0xa7, 0xc4, 0xf2, 0x5c, 0x67, 0x22, |
|
| 12291 |
+ 0x42, 0x2a, 0x27, 0xd3, 0x2c, 0xa6, 0xc4, 0x6a, 0xb9, 0xce, 0x04, 0x47, 0xb5, 0x68, 0x17, 0x8a, |
|
| 12292 |
+ 0x07, 0xb6, 0x6b, 0xf5, 0xbd, 0x91, 0xcc, 0x79, 0x99, 0xcb, 0x03, 0x50, 0x7a, 0xb5, 0x6d, 0xbb, |
|
| 12293 |
+ 0x56, 0x4b, 0x82, 0x71, 0xe1, 0x60, 0x5a, 0x40, 0x4d, 0x58, 0x3d, 0xf1, 0x9c, 0xf1, 0x90, 0x46, |
|
| 12294 |
+ 0xb6, 0xb2, 0xc2, 0xd6, 0xbb, 0x97, 0xdb, 0x7a, 0x22, 0xf0, 0xa1, 0xb5, 0x95, 0x93, 0x78, 0x11, |
|
| 12295 |
+ 0x3d, 0x86, 0x15, 0x36, 0x1c, 0x1d, 0x06, 0x91, 0xb9, 0x65, 0x61, 0xee, 0xfb, 0x57, 0x0c, 0x18, |
|
| 12296 |
+ 0x87, 0x87, 0xd6, 0x8a, 0x2c, 0x56, 0x2a, 0xfd, 0x71, 0x0a, 0x0a, 0x31, 0xcf, 0x51, 0x07, 0x0a, |
|
| 12297 |
+ 0x23, 0xdf, 0x1b, 0x91, 0x81, 0xc8, 0xdb, 0x6a, 0x2e, 0x3e, 0x7a, 0xad, 0x5e, 0x57, 0xda, 0x53, |
|
| 12298 |
+ 0x45, 0x1c, 0xb7, 0xa2, 0x9f, 0x27, 0xa1, 0x10, 0xab, 0x44, 0xf7, 0x21, 0x87, 0xdb, 0xb8, 0xf1, |
|
| 12299 |
+ 0xc4, 0xe8, 0xd6, 0xb5, 0xa5, 0xd2, 0xed, 0xb3, 0xf3, 0xf2, 0xba, 0xb0, 0x16, 0x37, 0xd0, 0xf6, |
|
| 12300 |
+ 0xed, 0x13, 0x1e, 0x7a, 0xf7, 0x60, 0x39, 0x84, 0x26, 0x4a, 0x6f, 0x9e, 0x9d, 0x97, 0xdf, 0x98, |
|
| 12301 |
+ 0x87, 0xc6, 0x90, 0xb8, 0xb3, 0x6b, 0xe0, 0x7a, 0x4d, 0x4b, 0x2e, 0x46, 0xe2, 0xce, 0x11, 0xf1, |
|
| 12302 |
+ 0xa9, 0x85, 0xbe, 0x0f, 0x59, 0x05, 0x4c, 0x95, 0x4a, 0x67, 0xe7, 0xe5, 0x9b, 0xf3, 0xc0, 0x29, |
|
| 12303 |
+ 0x0e, 0x77, 0xf6, 0x8c, 0x27, 0x75, 0x2d, 0xbd, 0x18, 0x87, 0x3b, 0x0e, 0x39, 0xa1, 0xe8, 0x1d, |
|
| 12304 |
+ 0xc8, 0x48, 0x58, 0xa6, 0x74, 0xeb, 0xec, 0xbc, 0xfc, 0xbd, 0x97, 0xcc, 0x71, 0x54, 0x69, 0xfd, |
|
| 12305 |
+ 0xcf, 0xfe, 0x6a, 0x63, 0xe9, 0xef, 0xff, 0x7a, 0x43, 0x9b, 0xaf, 0x2e, 0xfd, 0x6f, 0x02, 0x56, |
|
| 12306 |
+ 0x66, 0xa6, 0x1c, 0xe9, 0x90, 0x75, 0x3d, 0xd3, 0x1b, 0xc9, 0x74, 0x9e, 0xdb, 0x86, 0x8b, 0x17, |
|
| 12307 |
+ 0x9b, 0xd9, 0xa6, 0x57, 0xf5, 0x46, 0x13, 0xac, 0x6a, 0xd0, 0xe3, 0xb9, 0x0d, 0xe9, 0xe3, 0xd7, |
|
| 12308 |
+ 0x8c, 0xa7, 0x85, 0x5b, 0xd2, 0x67, 0xb0, 0x62, 0xf9, 0xf6, 0x09, 0xf5, 0xfb, 0xa6, 0xe7, 0x1e, |
|
| 12309 |
+ 0xda, 0x03, 0x95, 0xaa, 0x4b, 0x8b, 0x6c, 0xd6, 0x04, 0x10, 0x17, 0xa5, 0x42, 0x55, 0xe0, 0x7f, |
|
| 12310 |
+ 0x83, 0xcd, 0xa8, 0xf4, 0x04, 0x8a, 0xf1, 0x08, 0x45, 0x6f, 0x01, 0x04, 0xf6, 0xef, 0x53, 0xc5, |
|
| 12311 |
+ 0x6f, 0x04, 0x1b, 0xc2, 0x79, 0x2e, 0x11, 0xec, 0x06, 0xbd, 0x0b, 0xe9, 0xa1, 0x67, 0x49, 0x3b, |
|
| 12312 |
+ 0x99, 0xed, 0xeb, 0x7c, 0x4f, 0xfc, 0xd5, 0x8b, 0xcd, 0x82, 0x17, 0x54, 0x76, 0x6c, 0x87, 0xee, |
|
| 12313 |
+ 0x7b, 0x16, 0xc5, 0x02, 0xa0, 0x9f, 0x40, 0x9a, 0xa7, 0x0a, 0xf4, 0x26, 0xa4, 0xb7, 0x1b, 0xcd, |
|
| 12314 |
+ 0x9a, 0xb6, 0x54, 0xba, 0x76, 0x76, 0x5e, 0x5e, 0x11, 0x43, 0xc2, 0x2b, 0x78, 0xec, 0xa2, 0x4d, |
|
| 12315 |
+ 0xc8, 0x3e, 0x69, 0xed, 0xf5, 0xf6, 0x79, 0x78, 0x5d, 0x3f, 0x3b, 0x2f, 0xaf, 0x45, 0xd5, 0x72, |
|
| 12316 |
+ 0xd0, 0xd0, 0x5b, 0x90, 0xe9, 0xee, 0xb7, 0x77, 0x3a, 0x5a, 0xb2, 0x84, 0xce, 0xce, 0xcb, 0xab, |
|
| 12317 |
+ 0x51, 0xbd, 0xf0, 0xb9, 0x74, 0x4d, 0xcd, 0x6a, 0x3e, 0x92, 0xeb, 0xff, 0x93, 0x84, 0x15, 0xcc, |
|
| 12318 |
+ 0xf9, 0xad, 0xcf, 0xda, 0x9e, 0x63, 0x9b, 0x13, 0xd4, 0x86, 0xbc, 0xe9, 0xb9, 0x96, 0x1d, 0x5b, |
|
| 12319 |
+ 0x53, 0x5b, 0x97, 0x6c, 0x82, 0x53, 0xad, 0xb0, 0x54, 0x0d, 0x35, 0xf1, 0xd4, 0x08, 0xda, 0x82, |
|
| 12320 |
+ 0x8c, 0x45, 0x1d, 0x32, 0xb9, 0x6a, 0x37, 0xae, 0x29, 0x2e, 0x8d, 0x25, 0x54, 0x30, 0x47, 0xf2, |
|
| 12321 |
+ 0xac, 0x4f, 0x18, 0xa3, 0xc3, 0x11, 0x93, 0xbb, 0x71, 0x1a, 0x17, 0x86, 0xe4, 0x99, 0xa1, 0x44, |
|
| 12322 |
+ 0xe8, 0x87, 0x90, 0x3d, 0xb5, 0x5d, 0xcb, 0x3b, 0x55, 0x1b, 0xee, 0xd5, 0x76, 0x15, 0x56, 0x3f, |
|
| 12323 |
+ 0xe3, 0xfb, 0xec, 0x9c, 0xb3, 0x7c, 0xd4, 0x9b, 0xad, 0x66, 0x3d, 0x1c, 0x75, 0x55, 0xdf, 0x72, |
|
| 12324 |
+ 0x9b, 0x9e, 0xcb, 0x57, 0x0c, 0xb4, 0x9a, 0xfd, 0x1d, 0xa3, 0xb1, 0xd7, 0xc3, 0x7c, 0xe4, 0x6f, |
|
| 12325 |
+ 0x9c, 0x9d, 0x97, 0xb5, 0x08, 0xb2, 0x43, 0x6c, 0x87, 0x93, 0xc0, 0x5b, 0x90, 0x32, 0x9a, 0x5f, |
|
| 12326 |
+ 0x68, 0xc9, 0x92, 0x76, 0x76, 0x5e, 0x2e, 0x46, 0xd5, 0x86, 0x3b, 0x99, 0x2e, 0xa6, 0xf9, 0x76, |
|
| 12327 |
+ 0xf5, 0x7f, 0x4b, 0x42, 0xb1, 0x37, 0xb2, 0x08, 0xa3, 0x32, 0x32, 0x51, 0x19, 0x0a, 0x23, 0xe2, |
|
| 12328 |
+ 0x13, 0xc7, 0xa1, 0x8e, 0x1d, 0x0c, 0xd5, 0x41, 0x21, 0x2e, 0x42, 0x0f, 0xbe, 0xc5, 0x60, 0x2a, |
|
| 12329 |
+ 0x12, 0xa6, 0x86, 0xb4, 0x07, 0xab, 0x87, 0xd2, 0xd9, 0x3e, 0x31, 0xc5, 0xec, 0xa6, 0xc4, 0xec, |
|
| 12330 |
+ 0x56, 0x16, 0x99, 0x88, 0x7b, 0x55, 0x51, 0x7d, 0x34, 0x84, 0x16, 0x5e, 0x39, 0x8c, 0x17, 0xd1, |
|
| 12331 |
+ 0x27, 0xb0, 0x3c, 0xf4, 0x5c, 0x9b, 0x79, 0xfe, 0x6b, 0xcd, 0x43, 0x08, 0x46, 0xf7, 0xe1, 0x1a, |
|
| 12332 |
+ 0x9f, 0xe1, 0xd0, 0x25, 0x51, 0x2d, 0x76, 0xae, 0x24, 0x5e, 0x1b, 0x92, 0x67, 0xaa, 0x4d, 0xcc, |
|
| 12333 |
+ 0xc5, 0xfa, 0x27, 0xb0, 0x32, 0xe3, 0x03, 0xdf, 0xcd, 0xdb, 0x46, 0xaf, 0x53, 0xd7, 0x96, 0x50, |
|
| 12334 |
+ 0x11, 0x72, 0xd5, 0x56, 0xb3, 0xdb, 0x68, 0xf6, 0x38, 0xf5, 0x28, 0x42, 0x0e, 0xb7, 0xf6, 0xf6, |
|
| 12335 |
+ 0xb6, 0x8d, 0xea, 0x63, 0x2d, 0xa9, 0xff, 0x57, 0x34, 0xbe, 0x8a, 0x7b, 0x6c, 0xcf, 0x72, 0x8f, |
|
| 12336 |
+ 0xf7, 0x2f, 0xef, 0xba, 0x62, 0x1f, 0xd3, 0x42, 0xc4, 0x41, 0x7e, 0x02, 0x20, 0xa6, 0x91, 0x5a, |
|
| 12337 |
+ 0x7d, 0xc2, 0xae, 0x3a, 0x5f, 0x74, 0xc3, 0x93, 0x23, 0xce, 0x2b, 0x05, 0x83, 0xa1, 0xcf, 0xa1, |
|
| 12338 |
+ 0x68, 0x7a, 0xc3, 0x91, 0x43, 0x95, 0x7e, 0xea, 0x75, 0xf4, 0x0b, 0x91, 0x8a, 0xc1, 0xe2, 0x1c, |
|
| 12339 |
+ 0x28, 0x3d, 0xcb, 0x81, 0xfe, 0x24, 0x01, 0x85, 0x98, 0xc3, 0xb3, 0x54, 0xa8, 0x08, 0xb9, 0x5e, |
|
| 12340 |
+ 0xbb, 0x66, 0x74, 0x1b, 0xcd, 0x47, 0x5a, 0x02, 0x01, 0x64, 0xc5, 0x00, 0xd6, 0xb4, 0x24, 0xa7, |
|
| 12341 |
+ 0x6b, 0xd5, 0xd6, 0x7e, 0x7b, 0xaf, 0x2e, 0xc8, 0x10, 0xba, 0x01, 0x5a, 0x38, 0x84, 0xfd, 0x4e, |
|
| 12342 |
+ 0xd7, 0xc0, 0x5c, 0x9a, 0x46, 0xd7, 0x61, 0x2d, 0x92, 0x2a, 0xcd, 0x0c, 0xba, 0x09, 0x28, 0x12, |
|
| 12343 |
+ 0x4e, 0x4d, 0x64, 0xf5, 0x3f, 0x84, 0xb5, 0xaa, 0xe7, 0x32, 0x62, 0xbb, 0x11, 0x95, 0xdd, 0xe2, |
|
| 12344 |
+ 0xfd, 0x56, 0xa2, 0xbe, 0x6d, 0xc9, 0x6c, 0xbb, 0xbd, 0x76, 0xf1, 0x62, 0xb3, 0x10, 0x41, 0x1b, |
|
| 12345 |
+ 0x35, 0xde, 0xd3, 0xb0, 0x60, 0xf1, 0x35, 0x35, 0xb2, 0x2d, 0x95, 0x3c, 0x97, 0x2f, 0x5e, 0x6c, |
|
| 12346 |
+ 0xa6, 0xda, 0x8d, 0x1a, 0xe6, 0x32, 0xf4, 0x26, 0xe4, 0xe9, 0x33, 0x9b, 0xf5, 0x4d, 0x9e, 0x5d, |
|
| 12347 |
+ 0xf9, 0x18, 0x66, 0x70, 0x8e, 0x0b, 0xaa, 0x3c, 0x99, 0xfe, 0x51, 0x12, 0xa0, 0x4b, 0x82, 0x63, |
|
| 12348 |
+ 0xd5, 0xf4, 0x43, 0xc8, 0x47, 0x87, 0xf8, 0xab, 0x0e, 0x93, 0xb1, 0xf9, 0x8a, 0xf0, 0xe8, 0xe3, |
|
| 12349 |
+ 0x30, 0x62, 0x24, 0xc7, 0x5e, 0xac, 0xa8, 0xda, 0x5a, 0x44, 0x53, 0x67, 0x89, 0x34, 0xdf, 0x6b, |
|
| 12350 |
+ 0xa8, 0xef, 0xab, 0x89, 0xe3, 0x9f, 0xa8, 0x2a, 0xf2, 0xad, 0xec, 0xb3, 0x62, 0x6e, 0x77, 0x16, |
|
| 12351 |
+ 0x35, 0x32, 0x37, 0xa0, 0xbb, 0x4b, 0x78, 0xaa, 0xb7, 0xad, 0xc1, 0xaa, 0x3f, 0x76, 0xb9, 0xd7, |
|
| 12352 |
+ 0xfd, 0x40, 0x54, 0xeb, 0x36, 0xbc, 0xd1, 0xa4, 0xec, 0xd4, 0xf3, 0x8f, 0x0d, 0xc6, 0x88, 0x79, |
|
| 12353 |
+ 0xc4, 0x0f, 0xd5, 0x2a, 0xc9, 0x4c, 0x09, 0x67, 0x62, 0x86, 0x70, 0xae, 0xc3, 0x32, 0x71, 0x6c, |
|
| 12354 |
+ 0x12, 0x50, 0xb9, 0x4b, 0xe7, 0x71, 0x58, 0xe4, 0xb4, 0x98, 0x58, 0x96, 0x4f, 0x83, 0x80, 0xca, |
|
| 12355 |
+ 0x63, 0x60, 0x1e, 0x4f, 0x05, 0xfa, 0x3f, 0x27, 0x01, 0x1a, 0x6d, 0x63, 0x5f, 0x99, 0xaf, 0x41, |
|
| 12356 |
+ 0xf6, 0x90, 0x0c, 0x6d, 0x67, 0x72, 0xd5, 0x22, 0x9b, 0xe2, 0x2b, 0x86, 0x34, 0xb4, 0x23, 0x74, |
|
| 12357 |
+ 0xb0, 0xd2, 0x15, 0x6c, 0x79, 0x7c, 0xe0, 0x52, 0x16, 0xb1, 0x65, 0x51, 0xe2, 0x5b, 0xb3, 0x4f, |
|
| 12358 |
+ 0xdc, 0x68, 0x60, 0x65, 0x81, 0xbb, 0x3e, 0x20, 0x8c, 0x9e, 0x92, 0x49, 0xb8, 0x26, 0x54, 0x11, |
|
| 12359 |
+ 0xed, 0x72, 0x16, 0xcd, 0x0f, 0xf7, 0xd4, 0x5a, 0xcf, 0x08, 0xee, 0xf1, 0x2a, 0x7f, 0xb0, 0x82, |
|
| 12360 |
+ 0x4b, 0xd2, 0x11, 0x69, 0x97, 0x1e, 0x8a, 0x9d, 0x72, 0x5a, 0xf5, 0xad, 0x0e, 0xb1, 0x1f, 0xc2, |
|
| 12361 |
+ 0xca, 0x4c, 0x3f, 0x5f, 0x3a, 0xa6, 0x34, 0xda, 0x4f, 0x7e, 0xa8, 0xa5, 0xd5, 0xd7, 0x27, 0x5a, |
|
| 12362 |
+ 0x56, 0xff, 0xdb, 0x14, 0x40, 0xdb, 0xf3, 0xc3, 0x49, 0x5b, 0x7c, 0x2d, 0x94, 0x13, 0x97, 0x4c, |
|
| 12363 |
+ 0xa6, 0xe7, 0xa8, 0xf0, 0x5c, 0xc8, 0xd3, 0xa7, 0x56, 0x38, 0xed, 0x15, 0x70, 0x1c, 0x29, 0xa2, |
|
| 12364 |
+ 0x4d, 0x28, 0xc8, 0xf9, 0xef, 0x8f, 0x3c, 0x5f, 0xe6, 0xa3, 0x15, 0x0c, 0x52, 0xc4, 0x35, 0xd1, |
|
| 12365 |
+ 0x5d, 0x58, 0x1d, 0x8d, 0x0f, 0x1c, 0x3b, 0x38, 0xa2, 0x96, 0xc4, 0xa4, 0x05, 0x66, 0x25, 0x92, |
|
| 12366 |
+ 0x0a, 0xd8, 0x3e, 0x14, 0x95, 0xa0, 0x2f, 0x28, 0x4f, 0x46, 0x38, 0x74, 0xff, 0x55, 0x0e, 0x49, |
|
| 12367 |
+ 0x15, 0xc1, 0x84, 0x0a, 0xa3, 0x69, 0x41, 0xaf, 0x41, 0x2e, 0x74, 0x16, 0xad, 0x43, 0xaa, 0x5b, |
|
| 12368 |
+ 0x6d, 0x6b, 0x4b, 0xa5, 0xb5, 0xb3, 0xf3, 0x72, 0x21, 0x14, 0x77, 0xab, 0x6d, 0x5e, 0xd3, 0xab, |
|
| 12369 |
+ 0xb5, 0xb5, 0xc4, 0x6c, 0x4d, 0xaf, 0xd6, 0x2e, 0xa5, 0xf9, 0xa6, 0xab, 0x1f, 0x42, 0x21, 0xd6, |
|
| 12370 |
+ 0x02, 0xba, 0x03, 0xcb, 0x8d, 0xe6, 0x23, 0x5c, 0xef, 0x74, 0xb4, 0xa5, 0xd2, 0xcd, 0xb3, 0xf3, |
|
| 12371 |
+ 0x32, 0x8a, 0xd5, 0x36, 0xdc, 0x01, 0x9f, 0x1f, 0xf4, 0x16, 0xa4, 0x77, 0x5b, 0x9d, 0x6e, 0xc8, |
|
| 12372 |
+ 0xb1, 0x62, 0x88, 0x5d, 0x2f, 0x60, 0xa5, 0xeb, 0x6a, 0x37, 0x8f, 0x1b, 0xd6, 0xff, 0x22, 0x01, |
|
| 12373 |
+ 0x59, 0x49, 0x35, 0x17, 0x4e, 0x94, 0x01, 0xcb, 0xe1, 0x01, 0x48, 0xf2, 0xdf, 0x77, 0x2f, 0xe7, |
|
| 12374 |
+ 0xaa, 0x15, 0x45, 0x2d, 0x65, 0xf8, 0x85, 0x7a, 0xa5, 0x4f, 0xa1, 0x18, 0xaf, 0xf8, 0x56, 0xc1, |
|
| 12375 |
+ 0xf7, 0x07, 0x50, 0xe0, 0xf1, 0x1d, 0x72, 0xd6, 0x2d, 0xc8, 0x4a, 0x3a, 0xac, 0x92, 0xe1, 0x55, |
|
| 12376 |
+ 0xc4, 0x59, 0x21, 0xd1, 0x03, 0x58, 0x96, 0x64, 0x3b, 0xbc, 0x06, 0xda, 0xb8, 0x7a, 0x15, 0xe1, |
|
| 12377 |
+ 0x10, 0xae, 0x7f, 0x06, 0xe9, 0x36, 0xa5, 0x3e, 0x1f, 0x7b, 0xd7, 0xb3, 0xe8, 0x34, 0xf7, 0xab, |
|
| 12378 |
+ 0x73, 0x82, 0x45, 0x1b, 0x35, 0x7e, 0x4e, 0xb0, 0x68, 0xc3, 0xe2, 0x83, 0xc7, 0xf3, 0x4a, 0x78, |
|
| 12379 |
+ 0x13, 0xc6, 0xbf, 0xf5, 0x2e, 0x14, 0x9f, 0x52, 0x7b, 0x70, 0xc4, 0xa8, 0x25, 0x0c, 0xbd, 0x0f, |
|
| 12380 |
+ 0xe9, 0x11, 0x8d, 0x9c, 0x5f, 0x5f, 0x18, 0x60, 0x94, 0xfa, 0x58, 0xa0, 0x78, 0x1e, 0x39, 0x15, |
|
| 12381 |
+ 0xda, 0xea, 0xf2, 0x51, 0x95, 0xf4, 0x7f, 0x4a, 0xc2, 0x6a, 0x23, 0x08, 0xc6, 0xc4, 0x35, 0x43, |
|
| 12382 |
+ 0x72, 0xf0, 0xd3, 0x59, 0x72, 0x70, 0x6f, 0x61, 0x0f, 0x67, 0x54, 0x66, 0x2f, 0x27, 0x54, 0x6e, |
|
| 12383 |
+ 0x4f, 0x46, 0xb9, 0x5d, 0xff, 0xcf, 0x44, 0x78, 0x2b, 0x71, 0x37, 0xb6, 0xdc, 0x4b, 0xeb, 0x67, |
|
| 12384 |
+ 0xe7, 0xe5, 0x1b, 0x71, 0x4b, 0xb4, 0xe7, 0x1e, 0xbb, 0xde, 0xa9, 0x8b, 0xde, 0x86, 0x0c, 0xae, |
|
| 12385 |
+ 0x37, 0xeb, 0x4f, 0xb5, 0x84, 0x0c, 0xcf, 0x19, 0x10, 0xa6, 0x2e, 0x3d, 0xe5, 0x96, 0xda, 0xf5, |
|
| 12386 |
+ 0x66, 0x8d, 0x6f, 0xe3, 0xc9, 0x05, 0x96, 0xda, 0xd4, 0xb5, 0x6c, 0x77, 0x80, 0xee, 0x40, 0xb6, |
|
| 12387 |
+ 0xd1, 0xe9, 0xf4, 0xc4, 0xb9, 0xf1, 0x8d, 0xb3, 0xf3, 0xf2, 0xf5, 0x19, 0x14, 0x2f, 0x50, 0x8b, |
|
| 12388 |
+ 0x83, 0x38, 0xaf, 0xe5, 0x1b, 0xfc, 0x02, 0x10, 0xa7, 0x5c, 0x12, 0x84, 0x5b, 0x5d, 0x7e, 0xa8, |
|
| 12389 |
+ 0xcd, 0x2c, 0x00, 0x61, 0x8f, 0xff, 0xaa, 0xe5, 0xf6, 0xaf, 0x49, 0xd0, 0x0c, 0xd3, 0xa4, 0x23, |
|
| 12390 |
+ 0xc6, 0xeb, 0xd5, 0x81, 0xa2, 0x0b, 0xb9, 0x11, 0xff, 0xb2, 0xc5, 0x01, 0x89, 0xc7, 0xce, 0x83, |
|
| 12391 |
+ 0x85, 0xd7, 0xd7, 0x73, 0x7a, 0x15, 0xec, 0x39, 0xd4, 0xb0, 0x86, 0x76, 0x10, 0xf0, 0x83, 0xb3, |
|
| 12392 |
+ 0x90, 0xe1, 0xc8, 0x52, 0xe9, 0xd7, 0x09, 0xb8, 0xbe, 0x00, 0x81, 0x3e, 0x84, 0xb4, 0xef, 0x39, |
|
| 12393 |
+ 0xe1, 0x1c, 0xde, 0xbe, 0xec, 0x72, 0x89, 0xab, 0x62, 0x81, 0x44, 0x1b, 0x00, 0x64, 0xcc, 0x3c, |
|
| 12394 |
+ 0x22, 0xda, 0x17, 0xb3, 0x97, 0xc3, 0x31, 0x09, 0x7a, 0x0a, 0xd9, 0x80, 0x9a, 0x3e, 0x0d, 0xb9, |
|
| 12395 |
+ 0xda, 0x67, 0xff, 0x5f, 0xef, 0x2b, 0x1d, 0x61, 0x06, 0x2b, 0x73, 0xa5, 0x0a, 0x64, 0xa5, 0x84, |
|
| 12396 |
+ 0x87, 0xbd, 0x45, 0x18, 0x11, 0x4e, 0x17, 0xb1, 0xf8, 0xe6, 0xd1, 0x44, 0x9c, 0x41, 0x18, 0x4d, |
|
| 12397 |
+ 0xc4, 0x19, 0xe8, 0x3f, 0x4f, 0x02, 0xd4, 0x9f, 0x31, 0xea, 0xbb, 0xc4, 0xa9, 0x1a, 0xa8, 0x1e, |
|
| 12398 |
+ 0xcb, 0xfe, 0xb2, 0xb7, 0xef, 0x2d, 0xbc, 0x72, 0x8c, 0x34, 0x2a, 0x55, 0x63, 0x41, 0xfe, 0xbf, |
|
| 12399 |
+ 0x05, 0xa9, 0xb1, 0xef, 0xa8, 0xeb, 0x6b, 0x41, 0xb2, 0x7a, 0x78, 0x0f, 0x73, 0x19, 0xaa, 0x4f, |
|
| 12400 |
+ 0xd3, 0x56, 0xea, 0xf2, 0x77, 0x87, 0x58, 0x03, 0xdf, 0x7d, 0xea, 0x7a, 0x1f, 0x60, 0xea, 0x35, |
|
| 12401 |
+ 0xda, 0x80, 0x4c, 0x75, 0xa7, 0xd3, 0xd9, 0xd3, 0x96, 0x64, 0x6e, 0x9e, 0x56, 0x09, 0xb1, 0xfe, |
|
| 12402 |
+ 0x37, 0x09, 0xc8, 0x55, 0x0d, 0xb5, 0x63, 0xee, 0x80, 0x26, 0x12, 0x8e, 0x49, 0x7d, 0xd6, 0xa7, |
|
| 12403 |
+ 0xcf, 0x46, 0xb6, 0x3f, 0x51, 0x39, 0xe3, 0xea, 0x13, 0xca, 0x2a, 0xd7, 0xaa, 0x52, 0x9f, 0xd5, |
|
| 12404 |
+ 0x85, 0x0e, 0xc2, 0x50, 0xa4, 0xaa, 0x8b, 0x7d, 0x93, 0x84, 0x19, 0x7c, 0xe3, 0xea, 0xa1, 0x90, |
|
| 12405 |
+ 0xcc, 0x76, 0x5a, 0x0e, 0x70, 0x21, 0x34, 0x52, 0x25, 0x81, 0xfe, 0x04, 0xae, 0xb7, 0x7c, 0xf3, |
|
| 12406 |
+ 0x88, 0x06, 0x4c, 0x36, 0xaa, 0x5c, 0xfe, 0x0c, 0x6e, 0x33, 0x12, 0x1c, 0xf7, 0x8f, 0xec, 0x80, |
|
| 12407 |
+ 0x79, 0xfe, 0xa4, 0xef, 0x53, 0x46, 0x5d, 0x5e, 0xdf, 0x17, 0xaf, 0x1b, 0xea, 0x7e, 0xe1, 0x16, |
|
| 12408 |
+ 0xc7, 0xec, 0x4a, 0x08, 0x0e, 0x11, 0x7b, 0x1c, 0xa0, 0x37, 0xa0, 0xc8, 0xc9, 0x68, 0x8d, 0x1e, |
|
| 12409 |
+ 0x92, 0xb1, 0xc3, 0x02, 0xf4, 0x63, 0x00, 0xc7, 0x1b, 0xf4, 0x5f, 0x3b, 0xdd, 0xe7, 0x1d, 0x6f, |
|
| 12410 |
+ 0x20, 0x3f, 0xf5, 0xdf, 0x01, 0xad, 0x66, 0x07, 0x23, 0xc2, 0xcc, 0xa3, 0xf0, 0xe2, 0x04, 0x3d, |
|
| 12411 |
+ 0x02, 0xed, 0x88, 0x12, 0x9f, 0x1d, 0x50, 0xc2, 0xfa, 0x23, 0xea, 0xdb, 0x9e, 0xf5, 0x5a, 0x43, |
|
| 12412 |
+ 0xba, 0x16, 0x69, 0xb5, 0x85, 0x92, 0xfe, 0xdf, 0x09, 0x00, 0x4c, 0x0e, 0x43, 0x72, 0xf3, 0x03, |
|
| 12413 |
+ 0xb8, 0x16, 0xb8, 0x64, 0x14, 0x1c, 0x79, 0xac, 0x6f, 0xbb, 0x8c, 0xfa, 0x27, 0xc4, 0x51, 0x87, |
|
| 12414 |
+ 0x5f, 0x2d, 0xac, 0x68, 0x28, 0x39, 0x7a, 0x1f, 0xd0, 0x31, 0xa5, 0xa3, 0xbe, 0xe7, 0x58, 0xfd, |
|
| 12415 |
+ 0xb0, 0x52, 0x3e, 0xbf, 0xa4, 0xb1, 0xc6, 0x6b, 0x5a, 0x8e, 0xd5, 0x09, 0xe5, 0x68, 0x1b, 0x36, |
|
| 12416 |
+ 0xf8, 0x08, 0x50, 0x97, 0xf9, 0x36, 0x0d, 0xfa, 0x87, 0x9e, 0xdf, 0x0f, 0x1c, 0xef, 0xb4, 0x7f, |
|
| 12417 |
+ 0xe8, 0x39, 0x8e, 0x77, 0x4a, 0xfd, 0xf0, 0x6a, 0xa1, 0xe4, 0x78, 0x83, 0xba, 0x04, 0xed, 0x78, |
|
| 12418 |
+ 0x7e, 0xc7, 0xf1, 0x4e, 0x77, 0x42, 0x04, 0x67, 0x40, 0xd3, 0x6e, 0x33, 0xdb, 0x3c, 0x0e, 0x19, |
|
| 12419 |
+ 0x50, 0x24, 0xed, 0xda, 0xe6, 0x31, 0xba, 0x03, 0x2b, 0xd4, 0xa1, 0xe2, 0x80, 0x2a, 0x51, 0x19, |
|
| 12420 |
+ 0x81, 0x2a, 0x86, 0x42, 0x0e, 0xd2, 0x7f, 0x0b, 0xf2, 0x6d, 0x87, 0x98, 0xe2, 0x91, 0x8b, 0x1f, |
|
| 12421 |
+ 0xf7, 0x4d, 0xcf, 0xe5, 0x41, 0x60, 0xbb, 0x4c, 0x66, 0xc7, 0x3c, 0x8e, 0x8b, 0xf4, 0x9f, 0x02, |
|
| 12422 |
+ 0xfc, 0xcc, 0xb3, 0xdd, 0xae, 0x77, 0x4c, 0x5d, 0xf1, 0x1e, 0xc0, 0x19, 0xbd, 0x9a, 0xca, 0x3c, |
|
| 12423 |
+ 0x56, 0x25, 0x71, 0xde, 0x20, 0x2e, 0x19, 0x50, 0x3f, 0xba, 0x16, 0x97, 0x45, 0xfd, 0xeb, 0x04, |
|
| 12424 |
+ 0x64, 0xb1, 0xe7, 0xb1, 0xaa, 0x81, 0xca, 0x90, 0x35, 0x49, 0x3f, 0x5c, 0x79, 0xc5, 0xed, 0xfc, |
|
| 12425 |
+ 0xc5, 0x8b, 0xcd, 0x4c, 0xd5, 0x78, 0x4c, 0x27, 0x38, 0x63, 0x92, 0xc7, 0x74, 0xc2, 0xb7, 0x68, |
|
| 12426 |
+ 0x93, 0x88, 0xf5, 0x22, 0xcc, 0x14, 0xe5, 0x16, 0x5d, 0x35, 0xf8, 0x62, 0xc0, 0x59, 0x93, 0xf0, |
|
| 12427 |
+ 0x7f, 0xf4, 0x21, 0x14, 0x15, 0xa8, 0x7f, 0x44, 0x82, 0x23, 0xc9, 0xc3, 0xb7, 0x57, 0x2f, 0x5e, |
|
| 12428 |
+ 0x6c, 0x82, 0x44, 0xee, 0x92, 0xe0, 0x08, 0x83, 0x44, 0xf3, 0x6f, 0x54, 0x87, 0xc2, 0x97, 0x9e, |
|
| 12429 |
+ 0xed, 0xf6, 0x99, 0xe8, 0x84, 0xba, 0x25, 0x58, 0xb8, 0x7e, 0xa6, 0x5d, 0x55, 0x57, 0x17, 0xf0, |
|
| 12430 |
+ 0x65, 0x24, 0xd1, 0xff, 0x25, 0x01, 0x05, 0x6e, 0xd3, 0x3e, 0xb4, 0x4d, 0xbe, 0xa5, 0x7e, 0xfb, |
|
| 12431 |
+ 0x4c, 0x7f, 0x0b, 0x52, 0x66, 0xe0, 0xab, 0xbe, 0x89, 0x54, 0x57, 0xed, 0x60, 0xcc, 0x65, 0xe8, |
|
| 12432 |
+ 0x73, 0xc8, 0xca, 0x83, 0x93, 0x4a, 0xf2, 0xfa, 0xab, 0x37, 0x7f, 0xe5, 0xa2, 0xd2, 0x13, 0x73, |
|
| 12433 |
+ 0x39, 0xf5, 0x4e, 0xf4, 0xb2, 0x88, 0xe3, 0x22, 0x74, 0x13, 0x92, 0xa6, 0x2b, 0x82, 0x42, 0xbd, |
|
| 12434 |
+ 0x13, 0x56, 0x9b, 0x38, 0x69, 0xba, 0xfa, 0x3f, 0x26, 0x60, 0xa5, 0xee, 0x9a, 0xfe, 0x44, 0x24, |
|
| 12435 |
+ 0x49, 0x3e, 0x11, 0xb7, 0x21, 0x1f, 0x8c, 0x0f, 0x82, 0x49, 0xc0, 0xe8, 0x30, 0x7c, 0x86, 0x88, |
|
| 12436 |
+ 0x04, 0xa8, 0x01, 0x79, 0xe2, 0x0c, 0x3c, 0xdf, 0x66, 0x47, 0x43, 0xc5, 0xfb, 0x17, 0x27, 0xe6, |
|
| 12437 |
+ 0xb8, 0xcd, 0x8a, 0x11, 0xaa, 0xe0, 0xa9, 0x76, 0x98, 0x8a, 0x53, 0xc2, 0x59, 0x91, 0x8a, 0xdf, |
|
| 12438 |
+ 0x86, 0xa2, 0x43, 0x86, 0x9c, 0xe6, 0xf7, 0xf9, 0x71, 0x52, 0xf4, 0x23, 0x8d, 0x0b, 0x4a, 0xc6, |
|
| 12439 |
+ 0x8f, 0xc8, 0xba, 0x0e, 0xf9, 0xc8, 0x18, 0x5a, 0x83, 0x82, 0x51, 0xef, 0xf4, 0x3f, 0xda, 0x7a, |
|
| 12440 |
+ 0xd0, 0x7f, 0x54, 0xdd, 0xd7, 0x96, 0x14, 0x13, 0xf8, 0xbb, 0x04, 0xac, 0xec, 0xcb, 0x18, 0x54, |
|
| 12441 |
+ 0xec, 0xea, 0x0e, 0x2c, 0xfb, 0xe4, 0x90, 0x85, 0xfc, 0x2f, 0x2d, 0x83, 0x8b, 0x27, 0x01, 0xce, |
|
| 12442 |
+ 0xff, 0x78, 0xd5, 0x62, 0xfe, 0x17, 0x7b, 0x04, 0x4b, 0x5d, 0xf9, 0x08, 0x96, 0xfe, 0x4e, 0x1e, |
|
| 12443 |
+ 0xc1, 0xf4, 0x5f, 0x25, 0x60, 0x4d, 0x6d, 0xd4, 0xe1, 0xc3, 0x0f, 0x7a, 0x0f, 0xf2, 0x72, 0xcf, |
|
| 12444 |
+ 0x9e, 0xb2, 0x57, 0xf1, 0x16, 0x23, 0x71, 0x8d, 0x1a, 0xce, 0xc9, 0xea, 0x86, 0x85, 0x7e, 0x12, |
|
| 12445 |
+ 0xbb, 0xf1, 0xbd, 0x84, 0x43, 0xce, 0x59, 0xaf, 0x4c, 0xaf, 0x81, 0x2f, 0x7d, 0x0b, 0xda, 0x84, |
|
| 12446 |
+ 0x82, 0x72, 0x40, 0x9c, 0x2d, 0xe4, 0x19, 0x17, 0xa4, 0xa8, 0x49, 0x86, 0x54, 0xbf, 0x0b, 0x69, |
|
| 12447 |
+ 0x71, 0xc2, 0x01, 0xc8, 0x76, 0xbe, 0xe8, 0x74, 0xeb, 0xfb, 0xf2, 0x54, 0xb9, 0xd3, 0x10, 0x0f, |
|
| 12448 |
+ 0x72, 0xcb, 0x90, 0xaa, 0x37, 0x9f, 0x68, 0x49, 0xbd, 0x05, 0x37, 0xb7, 0x1d, 0x62, 0x1e, 0x3b, |
|
| 12449 |
+ 0x76, 0xc0, 0xa8, 0x15, 0x5f, 0x4d, 0x3f, 0x82, 0xec, 0xcc, 0x1e, 0xf9, 0x8a, 0x1b, 0x12, 0x05, |
|
| 12450 |
+ 0xbe, 0xff, 0x0f, 0x29, 0xc8, 0x47, 0xd7, 0x1f, 0x7c, 0x81, 0x71, 0xf2, 0xba, 0x24, 0x2f, 0x41, |
|
| 12451 |
+ 0x23, 0x79, 0x93, 0x9e, 0xa2, 0xb7, 0xa7, 0xb4, 0xf5, 0x73, 0x79, 0x85, 0x1a, 0x55, 0x87, 0x94, |
|
| 12452 |
+ 0xf5, 0x1d, 0xc8, 0x19, 0x9d, 0x4e, 0xe3, 0x51, 0xb3, 0x5e, 0xd3, 0xbe, 0x4a, 0x94, 0xbe, 0x77, |
|
| 12453 |
+ 0x76, 0x5e, 0xbe, 0x16, 0x81, 0x8c, 0x20, 0xb0, 0x07, 0x2e, 0xb5, 0x04, 0xaa, 0x5a, 0xad, 0xb7, |
|
| 12454 |
+ 0xbb, 0xf5, 0x9a, 0xf6, 0x3c, 0x39, 0x8f, 0x12, 0x34, 0x4c, 0x3c, 0x87, 0xe4, 0xdb, 0xb8, 0xde, |
|
| 12455 |
+ 0x36, 0x30, 0x6f, 0xf0, 0xab, 0xa4, 0x64, 0xd3, 0xd3, 0x16, 0x7d, 0x3a, 0x22, 0x3e, 0x6f, 0x73, |
|
| 12456 |
+ 0x23, 0x7c, 0x16, 0x7c, 0x9e, 0x92, 0x57, 0xe6, 0xd3, 0xbb, 0x1c, 0x4a, 0xac, 0x09, 0x6f, 0x4d, |
|
| 12457 |
+ 0xdc, 0x81, 0x09, 0x33, 0xa9, 0xb9, 0xd6, 0x3a, 0x8c, 0xf8, 0x8c, 0x5b, 0xd1, 0x61, 0x19, 0xf7, |
|
| 12458 |
+ 0x9a, 0x4d, 0x0e, 0x7a, 0x9e, 0x9e, 0xeb, 0x1d, 0x1e, 0xbb, 0x2e, 0xc7, 0xdc, 0x85, 0x5c, 0x78, |
|
| 12459 |
+ 0x45, 0xa6, 0x7d, 0x95, 0x9e, 0x73, 0xa8, 0x1a, 0xde, 0xef, 0x89, 0x06, 0x77, 0x7b, 0x5d, 0xf1, |
|
| 12460 |
+ 0x6a, 0xf9, 0x3c, 0x33, 0xdf, 0xe0, 0xd1, 0x98, 0x59, 0xfc, 0x9c, 0x50, 0x8e, 0x88, 0xfb, 0x57, |
|
| 12461 |
+ 0x19, 0x49, 0x85, 0x22, 0x8c, 0x62, 0xed, 0xef, 0x40, 0x0e, 0xd7, 0x7f, 0x26, 0x1f, 0x38, 0x9f, |
|
| 12462 |
+ 0x67, 0xe7, 0xec, 0x60, 0xfa, 0x25, 0x35, 0x19, 0xb5, 0xa6, 0x2f, 0x02, 0x51, 0xd5, 0xfd, 0xdf, |
|
| 12463 |
+ 0x85, 0x5c, 0x98, 0x36, 0xd1, 0x06, 0x64, 0x9f, 0xb6, 0xf0, 0xe3, 0x3a, 0xd6, 0x96, 0xe4, 0xe8, |
|
| 12464 |
+ 0x84, 0x35, 0x4f, 0xe5, 0xbe, 0x53, 0x86, 0xe5, 0x7d, 0xa3, 0x69, 0x3c, 0xaa, 0xe3, 0xf0, 0xb4, |
|
| 12465 |
+ 0x1c, 0x02, 0xd4, 0xda, 0x2f, 0x69, 0xaa, 0x81, 0xc8, 0xe6, 0xf6, 0xed, 0xaf, 0xbf, 0xd9, 0x58, |
|
| 12466 |
+ 0xfa, 0xe5, 0x37, 0x1b, 0x4b, 0xbf, 0xfe, 0x66, 0x23, 0xf1, 0xfc, 0x62, 0x23, 0xf1, 0xf5, 0xc5, |
|
| 12467 |
+ 0x46, 0xe2, 0x17, 0x17, 0x1b, 0x89, 0x7f, 0xbf, 0xd8, 0x48, 0x1c, 0x64, 0x05, 0x2f, 0xfd, 0xf8, |
|
| 12468 |
+ 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x2d, 0x4d, 0x67, 0xa2, 0x23, 0x00, 0x00, |
|
| 12466 | 12469 |
} |
| ... | ... |
@@ -371,8 +371,7 @@ enum TaskState {
|
| 371 | 371 |
option (gogoproto.goproto_enum_prefix) = false; |
| 372 | 372 |
option (gogoproto.enum_customname) = "TaskState"; |
| 373 | 373 |
NEW = 0 [(gogoproto.enumvalue_customname)="TaskStateNew"]; |
| 374 |
- ALLOCATED = 64 [(gogoproto.enumvalue_customname)="TaskStateAllocated"]; // successful allocation of resources that the task needs |
|
| 375 |
- PENDING = 128 [(gogoproto.enumvalue_customname) = "TaskStatePending"]; // observed by scheduler but unassigned. |
|
| 374 |
+ PENDING = 64 [(gogoproto.enumvalue_customname)="TaskStatePending"]; // waiting for scheduling decision |
|
| 376 | 375 |
ASSIGNED = 192 [(gogoproto.enumvalue_customname)="TaskStateAssigned"]; |
| 377 | 376 |
ACCEPTED = 256 [(gogoproto.enumvalue_customname)="TaskStateAccepted"]; // task has been accepted by an agent. |
| 378 | 377 |
PREPARING = 320 [(gogoproto.enumvalue_customname)="TaskStatePreparing"]; |
| ... | ... |
@@ -506,6 +505,21 @@ message PortConfig {
|
| 506 | 506 |
UDP = 1 [(gogoproto.enumvalue_customname) = "ProtocolUDP"]; |
| 507 | 507 |
} |
| 508 | 508 |
|
| 509 |
+ // PublishMode controls how ports are published on the swarm. |
|
| 510 |
+ enum PublishMode {
|
|
| 511 |
+ option (gogoproto.enum_customname) = "PublishMode"; |
|
| 512 |
+ option (gogoproto.goproto_enum_prefix) = false; |
|
| 513 |
+ |
|
| 514 |
+ // PublishModeIngress exposes the port across the cluster on all nodes. |
|
| 515 |
+ INGRESS = 0 [(gogoproto.enumvalue_customname) = "PublishModeIngress"]; |
|
| 516 |
+ |
|
| 517 |
+ // PublishModeHost exposes the port on just the target host. If the |
|
| 518 |
+ // published port is undefined, an ephemeral port will be allocated. If |
|
| 519 |
+ // the published port is defined, the node will attempt to allocate it, |
|
| 520 |
+ // erroring the task if it fails. |
|
| 521 |
+ HOST = 1 [(gogoproto.enumvalue_customname) = "PublishModeHost"]; |
|
| 522 |
+ } |
|
| 523 |
+ |
|
| 509 | 524 |
// Name for the port. If provided the port information can |
| 510 | 525 |
// be queried using the name as in a DNS SRV query. |
| 511 | 526 |
string name = 1; |
| ... | ... |
@@ -516,11 +530,13 @@ message PortConfig {
|
| 516 | 516 |
// The port which the application is exposing and is bound to. |
| 517 | 517 |
uint32 target_port = 3; |
| 518 | 518 |
|
| 519 |
- // PublishedPort specifies the port on which the service is |
|
| 520 |
- // exposed. If specified, the port must be |
|
| 521 |
- // within the available range. If not specified, an available |
|
| 522 |
- // port is automatically assigned. |
|
| 519 |
+ // PublishedPort specifies the port on which the service is exposed. If |
|
| 520 |
+ // specified, the port must be within the available range. If not specified |
|
| 521 |
+ // (value is zero), an available port is automatically assigned. |
|
| 523 | 522 |
uint32 published_port = 4; |
| 523 |
+ |
|
| 524 |
+ // PublishMode controls how the port is published. |
|
| 525 |
+ PublishMode publish_mode = 5; |
|
| 524 | 526 |
} |
| 525 | 527 |
|
| 526 | 528 |
// Driver is a generic driver type to be used throughout the API. For now, a |
| ... | ... |
@@ -556,10 +572,17 @@ message IssuanceStatus {
|
| 556 | 556 |
option (gogoproto.goproto_enum_prefix) = false; |
| 557 | 557 |
|
| 558 | 558 |
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "IssuanceStateUnknown"]; |
| 559 |
- RENEW = 1 [(gogoproto.enumvalue_customname)="IssuanceStateRenew"]; // Certificate should be issued |
|
| 560 |
- PENDING = 2 [(gogoproto.enumvalue_customname)="IssuanceStatePending"]; // Certificate is pending acceptance |
|
| 561 |
- ISSUED = 3 [(gogoproto.enumvalue_customname)="IssuanceStateIssued"]; // successful completion certificate issuance |
|
| 562 |
- FAILED = 4 [(gogoproto.enumvalue_customname)="IssuanceStateFailed"]; // Certificate issuance failed |
|
| 559 |
+ // A new certificate should be issued |
|
| 560 |
+ RENEW = 1 [(gogoproto.enumvalue_customname)="IssuanceStateRenew"]; |
|
| 561 |
+ // Certificate is pending acceptance |
|
| 562 |
+ PENDING = 2 [(gogoproto.enumvalue_customname)="IssuanceStatePending"]; |
|
| 563 |
+ // successful completion certificate issuance |
|
| 564 |
+ ISSUED = 3 [(gogoproto.enumvalue_customname)="IssuanceStateIssued"]; |
|
| 565 |
+ // Certificate issuance failed |
|
| 566 |
+ FAILED = 4 [(gogoproto.enumvalue_customname)="IssuanceStateFailed"]; |
|
| 567 |
+ // Signals workers to renew their certificate. From the CA's perspective |
|
| 568 |
+ // this is equivalent to IssuanceStateIssued: a noop. |
|
| 569 |
+ ROTATE = 5 [(gogoproto.enumvalue_customname)="IssuanceStateRotate"]; |
|
| 563 | 570 |
} |
| 564 | 571 |
State state = 1; |
| 565 | 572 |
|
| ... | ... |
@@ -777,12 +800,10 @@ message SecretReference {
|
| 777 | 777 |
string secret_name = 4; |
| 778 | 778 |
} |
| 779 | 779 |
|
| 780 |
-// RemovedNode is a record for a node that has been removed from the swarm. |
|
| 781 |
-message RemovedNode {
|
|
| 782 |
- // ID is the ID of the removed node. |
|
| 783 |
- string id = 1 [(gogoproto.customname) = "ID"]; |
|
| 784 |
- |
|
| 780 |
+// BlacklistedCertificate is a record for a blacklisted certificate. It does not |
|
| 781 |
+// contain the certificate's CN, because these records are indexed by CN. |
|
| 782 |
+message BlacklistedCertificate {
|
|
| 785 | 783 |
// Expiry is the latest known expiration time of a certificate that |
| 786 |
- // was issued to this node. |
|
| 787 |
- Timestamp expiry = 2; |
|
| 784 |
+ // was issued for the given CN. |
|
| 785 |
+ Timestamp expiry = 1; |
|
| 788 | 786 |
} |
| ... | ... |
@@ -80,7 +80,7 @@ func certSubjectFromContext(ctx context.Context) (pkix.Name, error) {
|
| 80 | 80 |
|
| 81 | 81 |
// AuthorizeOrgAndRole takes in a context and a list of roles, and returns |
| 82 | 82 |
// the Node ID of the node. |
| 83 |
-func AuthorizeOrgAndRole(ctx context.Context, org string, removedNodes []*api.RemovedNode, ou ...string) (string, error) {
|
|
| 83 |
+func AuthorizeOrgAndRole(ctx context.Context, org string, blacklistedCerts map[string]*api.BlacklistedCertificate, ou ...string) (string, error) {
|
|
| 84 | 84 |
certSubj, err := certSubjectFromContext(ctx) |
| 85 | 85 |
if err != nil {
|
| 86 | 86 |
return "", err |
| ... | ... |
@@ -88,7 +88,7 @@ func AuthorizeOrgAndRole(ctx context.Context, org string, removedNodes []*api.Re |
| 88 | 88 |
// Check if the current certificate has an OU that authorizes |
| 89 | 89 |
// access to this method |
| 90 | 90 |
if intersectArrays(certSubj.OrganizationalUnit, ou) {
|
| 91 |
- return authorizeOrg(certSubj, org, removedNodes) |
|
| 91 |
+ return authorizeOrg(certSubj, org, blacklistedCerts) |
|
| 92 | 92 |
} |
| 93 | 93 |
|
| 94 | 94 |
return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of OUs: %v", ou) |
| ... | ... |
@@ -96,11 +96,9 @@ func AuthorizeOrgAndRole(ctx context.Context, org string, removedNodes []*api.Re |
| 96 | 96 |
|
| 97 | 97 |
// authorizeOrg takes in a certificate subject and an organization, and returns |
| 98 | 98 |
// the Node ID of the node. |
| 99 |
-func authorizeOrg(certSubj pkix.Name, org string, removedNodes []*api.RemovedNode) (string, error) {
|
|
| 100 |
- for _, removedNode := range removedNodes {
|
|
| 101 |
- if removedNode.ID == certSubj.CommonName {
|
|
| 102 |
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: node %s was removed from swarm", certSubj.CommonName) |
|
| 103 |
- } |
|
| 99 |
+func authorizeOrg(certSubj pkix.Name, org string, blacklistedCerts map[string]*api.BlacklistedCertificate) (string, error) {
|
|
| 100 |
+ if _, ok := blacklistedCerts[certSubj.CommonName]; ok {
|
|
| 101 |
+ return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: node %s was removed from swarm", certSubj.CommonName) |
|
| 104 | 102 |
} |
| 105 | 103 |
|
| 106 | 104 |
if len(certSubj.Organization) > 0 && certSubj.Organization[0] == org {
|
| ... | ... |
@@ -114,9 +112,9 @@ func authorizeOrg(certSubj pkix.Name, org string, removedNodes []*api.RemovedNod |
| 114 | 114 |
// been proxied by a manager, in which case the manager is authenticated and |
| 115 | 115 |
// so is the certificate information that it forwarded. It returns the node ID |
| 116 | 116 |
// of the original client. |
| 117 |
-func AuthorizeForwardedRoleAndOrg(ctx context.Context, authorizedRoles, forwarderRoles []string, org string, removedNodes []*api.RemovedNode) (string, error) {
|
|
| 117 |
+func AuthorizeForwardedRoleAndOrg(ctx context.Context, authorizedRoles, forwarderRoles []string, org string, blacklistedCerts map[string]*api.BlacklistedCertificate) (string, error) {
|
|
| 118 | 118 |
if isForwardedRequest(ctx) {
|
| 119 |
- _, err := AuthorizeOrgAndRole(ctx, org, removedNodes, forwarderRoles...) |
|
| 119 |
+ _, err := AuthorizeOrgAndRole(ctx, org, blacklistedCerts, forwarderRoles...) |
|
| 120 | 120 |
if err != nil {
|
| 121 | 121 |
return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarder role: %v", err) |
| 122 | 122 |
} |
| ... | ... |
@@ -142,7 +140,7 @@ func AuthorizeForwardedRoleAndOrg(ctx context.Context, authorizedRoles, forwarde |
| 142 | 142 |
} |
| 143 | 143 |
|
| 144 | 144 |
// There wasn't any node being forwarded, check if this is a direct call by the expected role |
| 145 |
- nodeID, err := AuthorizeOrgAndRole(ctx, org, removedNodes, authorizedRoles...) |
|
| 145 |
+ nodeID, err := AuthorizeOrgAndRole(ctx, org, blacklistedCerts, authorizedRoles...) |
|
| 146 | 146 |
if err == nil {
|
| 147 | 147 |
return nodeID, nil |
| 148 | 148 |
} |
| ... | ... |
@@ -168,9 +168,9 @@ func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNod |
| 168 | 168 |
defer s.doneTask() |
| 169 | 169 |
|
| 170 | 170 |
var ( |
| 171 |
- removedNodes []*api.RemovedNode |
|
| 172 |
- clusters []*api.Cluster |
|
| 173 |
- err error |
|
| 171 |
+ blacklistedCerts map[string]*api.BlacklistedCertificate |
|
| 172 |
+ clusters []*api.Cluster |
|
| 173 |
+ err error |
|
| 174 | 174 |
) |
| 175 | 175 |
|
| 176 | 176 |
s.store.View(func(readTx store.ReadTx) {
|
| ... | ... |
@@ -181,19 +181,19 @@ func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNod |
| 181 | 181 |
// Not having a cluster object yet means we can't check |
| 182 | 182 |
// the blacklist. |
| 183 | 183 |
if err == nil && len(clusters) == 1 {
|
| 184 |
- removedNodes = clusters[0].RemovedNodes |
|
| 184 |
+ blacklistedCerts = clusters[0].BlacklistedCertificates |
|
| 185 | 185 |
} |
| 186 | 186 |
|
| 187 | 187 |
// If the remote node is a worker (either forwarded by a manager, or calling directly), |
| 188 | 188 |
// issue a renew worker certificate entry with the correct ID |
| 189 |
- nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{WorkerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization(), removedNodes)
|
|
| 189 |
+ nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{WorkerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
|
|
| 190 | 190 |
if err == nil {
|
| 191 | 191 |
return s.issueRenewCertificate(ctx, nodeID, request.CSR) |
| 192 | 192 |
} |
| 193 | 193 |
|
| 194 | 194 |
// If the remote node is a manager (either forwarded by another manager, or calling directly), |
| 195 | 195 |
// issue a renew certificate entry with the correct ID |
| 196 |
- nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization(), removedNodes)
|
|
| 196 |
+ nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
|
|
| 197 | 197 |
if err == nil {
|
| 198 | 198 |
return s.issueRenewCertificate(ctx, nodeID, request.CSR) |
| 199 | 199 |
} |
| ... | ... |
@@ -542,18 +542,20 @@ func (s *Server) updateCluster(ctx context.Context, cluster *api.Cluster) {
|
| 542 | 542 |
func (s *Server) evaluateAndSignNodeCert(ctx context.Context, node *api.Node) error {
|
| 543 | 543 |
// If the desired membership and actual state are in sync, there's |
| 544 | 544 |
// nothing to do. |
| 545 |
- if node.Spec.Membership == api.NodeMembershipAccepted && node.Certificate.Status.State == api.IssuanceStateIssued {
|
|
| 545 |
+ certState := node.Certificate.Status.State |
|
| 546 |
+ if node.Spec.Membership == api.NodeMembershipAccepted && |
|
| 547 |
+ (certState == api.IssuanceStateIssued || certState == api.IssuanceStateRotate) {
|
|
| 546 | 548 |
return nil |
| 547 | 549 |
} |
| 548 | 550 |
|
| 549 | 551 |
// If the certificate state is renew, then it is a server-sided accepted cert (cert renewals) |
| 550 |
- if node.Certificate.Status.State == api.IssuanceStateRenew {
|
|
| 552 |
+ if certState == api.IssuanceStateRenew {
|
|
| 551 | 553 |
return s.signNodeCert(ctx, node) |
| 552 | 554 |
} |
| 553 | 555 |
|
| 554 | 556 |
// Sign this certificate if a user explicitly changed it to Accepted, and |
| 555 | 557 |
// the certificate is in pending state |
| 556 |
- if node.Spec.Membership == api.NodeMembershipAccepted && node.Certificate.Status.State == api.IssuanceStatePending {
|
|
| 558 |
+ if node.Spec.Membership == api.NodeMembershipAccepted && certState == api.IssuanceStatePending {
|
|
| 557 | 559 |
return s.signNodeCert(ctx, node) |
| 558 | 560 |
} |
| 559 | 561 |
|
| ... | ... |
@@ -688,7 +690,8 @@ func (s *Server) reconcileNodeCertificates(ctx context.Context, nodes []*api.Nod |
| 688 | 688 |
|
| 689 | 689 |
// A successfully issued certificate and a failed certificate are our current final states |
| 690 | 690 |
func isFinalState(status api.IssuanceStatus) bool {
|
| 691 |
- if status.State == api.IssuanceStateIssued || status.State == api.IssuanceStateFailed {
|
|
| 691 |
+ if status.State == api.IssuanceStateIssued || status.State == api.IssuanceStateFailed || |
|
| 692 |
+ status.State == api.IssuanceStateRotate {
|
|
| 692 | 693 |
return true |
| 693 | 694 |
} |
| 694 | 695 |
|
| ... | ... |
@@ -22,8 +22,12 @@ const ( |
| 22 | 22 |
|
| 23 | 23 |
ingressNetworkName = "ingress" |
| 24 | 24 |
ingressSubnet = "10.255.0.0/16" |
| 25 |
+ |
|
| 26 |
+ allocatedStatusMessage = "pending task scheduling" |
|
| 25 | 27 |
) |
| 26 | 28 |
|
| 29 |
+var errNoChanges = errors.New("task unchanged")
|
|
| 30 |
+ |
|
| 27 | 31 |
func newIngressNetwork() *api.Network {
|
| 28 | 32 |
return &api.Network{
|
| 29 | 33 |
Spec: api.NetworkSpec{
|
| ... | ... |
@@ -134,17 +138,13 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 134 | 134 |
if !na.IsAllocated(nc.ingressNetwork) {
|
| 135 | 135 |
if err := a.allocateNetwork(ctx, nc.ingressNetwork); err != nil {
|
| 136 | 136 |
log.G(ctx).WithError(err).Error("failed allocating ingress network during init")
|
| 137 |
- } |
|
| 138 |
- |
|
| 139 |
- // Update store after allocation |
|
| 140 |
- if err := a.store.Update(func(tx store.Tx) error {
|
|
| 141 |
- if err := store.UpdateNetwork(tx, nc.ingressNetwork); err != nil {
|
|
| 142 |
- return err |
|
| 137 |
+ } else if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 138 |
+ if err := a.commitAllocatedNetwork(ctx, batch, nc.ingressNetwork); err != nil {
|
|
| 139 |
+ log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init")
|
|
| 143 | 140 |
} |
| 144 |
- |
|
| 145 | 141 |
return nil |
| 146 | 142 |
}); err != nil {
|
| 147 |
- return errors.Wrap(err, "failed to create ingress network") |
|
| 143 |
+ log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init")
|
|
| 148 | 144 |
} |
| 149 | 145 |
} |
| 150 | 146 |
|
| ... | ... |
@@ -157,6 +157,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 157 | 157 |
return errors.Wrap(err, "error listing all networks in store while trying to allocate during init") |
| 158 | 158 |
} |
| 159 | 159 |
|
| 160 |
+ var allocatedNetworks []*api.Network |
|
| 160 | 161 |
for _, n := range networks {
|
| 161 | 162 |
if na.IsAllocated(n) {
|
| 162 | 163 |
continue |
| ... | ... |
@@ -164,7 +165,20 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 164 | 164 |
|
| 165 | 165 |
if err := a.allocateNetwork(ctx, n); err != nil {
|
| 166 | 166 |
log.G(ctx).WithError(err).Errorf("failed allocating network %s during init", n.ID)
|
| 167 |
+ continue |
|
| 168 |
+ } |
|
| 169 |
+ allocatedNetworks = append(allocatedNetworks, n) |
|
| 170 |
+ } |
|
| 171 |
+ |
|
| 172 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 173 |
+ for _, n := range allocatedNetworks {
|
|
| 174 |
+ if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil {
|
|
| 175 |
+ log.G(ctx).WithError(err).Errorf("failed committing allocation of network %s during init", n.ID)
|
|
| 176 |
+ } |
|
| 167 | 177 |
} |
| 178 |
+ return nil |
|
| 179 |
+ }); err != nil {
|
|
| 180 |
+ log.G(ctx).WithError(err).Error("failed committing allocation of networks during init")
|
|
| 168 | 181 |
} |
| 169 | 182 |
|
| 170 | 183 |
// Allocate nodes in the store so far before we process watched events. |
| ... | ... |
@@ -176,6 +190,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 176 | 176 |
return errors.Wrap(err, "error listing all nodes in store while trying to allocate during init") |
| 177 | 177 |
} |
| 178 | 178 |
|
| 179 |
+ var allocatedNodes []*api.Node |
|
| 179 | 180 |
for _, node := range nodes {
|
| 180 | 181 |
if na.IsNodeAllocated(node) {
|
| 181 | 182 |
continue |
| ... | ... |
@@ -188,7 +203,21 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 188 | 188 |
node.Attachment.Network = nc.ingressNetwork.Copy() |
| 189 | 189 |
if err := a.allocateNode(ctx, node); err != nil {
|
| 190 | 190 |
log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s during init", node.ID)
|
| 191 |
+ continue |
|
| 192 |
+ } |
|
| 193 |
+ |
|
| 194 |
+ allocatedNodes = append(allocatedNodes, node) |
|
| 195 |
+ } |
|
| 196 |
+ |
|
| 197 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 198 |
+ for _, node := range allocatedNodes {
|
|
| 199 |
+ if err := a.commitAllocatedNode(ctx, batch, node); err != nil {
|
|
| 200 |
+ log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s during init", node.ID)
|
|
| 201 |
+ } |
|
| 191 | 202 |
} |
| 203 |
+ return nil |
|
| 204 |
+ }); err != nil {
|
|
| 205 |
+ log.G(ctx).WithError(err).Error("Failed to commit allocation of network resources for nodes during init")
|
|
| 192 | 206 |
} |
| 193 | 207 |
|
| 194 | 208 |
// Allocate services in the store so far before we process watched events. |
| ... | ... |
@@ -200,6 +229,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 200 | 200 |
return errors.Wrap(err, "error listing all services in store while trying to allocate during init") |
| 201 | 201 |
} |
| 202 | 202 |
|
| 203 |
+ var allocatedServices []*api.Service |
|
| 203 | 204 |
for _, s := range services {
|
| 204 | 205 |
if nc.nwkAllocator.IsServiceAllocated(s) {
|
| 205 | 206 |
continue |
| ... | ... |
@@ -207,11 +237,27 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 207 | 207 |
|
| 208 | 208 |
if err := a.allocateService(ctx, s); err != nil {
|
| 209 | 209 |
log.G(ctx).WithError(err).Errorf("failed allocating service %s during init", s.ID)
|
| 210 |
+ continue |
|
| 211 |
+ } |
|
| 212 |
+ allocatedServices = append(allocatedServices, s) |
|
| 213 |
+ } |
|
| 214 |
+ |
|
| 215 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 216 |
+ for _, s := range allocatedServices {
|
|
| 217 |
+ if err := a.commitAllocatedService(ctx, batch, s); err != nil {
|
|
| 218 |
+ log.G(ctx).WithError(err).Errorf("failed committing allocation of service %s during init", s.ID)
|
|
| 219 |
+ } |
|
| 210 | 220 |
} |
| 221 |
+ return nil |
|
| 222 |
+ }); err != nil {
|
|
| 223 |
+ log.G(ctx).WithError(err).Error("failed committing allocation of services during init")
|
|
| 211 | 224 |
} |
| 212 | 225 |
|
| 213 | 226 |
// Allocate tasks in the store so far before we started watching. |
| 214 |
- var tasks []*api.Task |
|
| 227 |
+ var ( |
|
| 228 |
+ tasks []*api.Task |
|
| 229 |
+ allocatedTasks []*api.Task |
|
| 230 |
+ ) |
|
| 215 | 231 |
a.store.View(func(tx store.ReadTx) {
|
| 216 | 232 |
tasks, err = store.FindTasks(tx, store.All) |
| 217 | 233 |
}) |
| ... | ... |
@@ -219,66 +265,56 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
| 219 | 219 |
return errors.Wrap(err, "error listing all tasks in store while trying to allocate during init") |
| 220 | 220 |
} |
| 221 | 221 |
|
| 222 |
- if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 223 |
- for _, t := range tasks {
|
|
| 224 |
- if taskDead(t) {
|
|
| 225 |
- continue |
|
| 226 |
- } |
|
| 227 |
- |
|
| 228 |
- var s *api.Service |
|
| 229 |
- if t.ServiceID != "" {
|
|
| 230 |
- a.store.View(func(tx store.ReadTx) {
|
|
| 231 |
- s = store.GetService(tx, t.ServiceID) |
|
| 232 |
- }) |
|
| 233 |
- } |
|
| 222 |
+ for _, t := range tasks {
|
|
| 223 |
+ if taskDead(t) {
|
|
| 224 |
+ continue |
|
| 225 |
+ } |
|
| 234 | 226 |
|
| 235 |
- // Populate network attachments in the task |
|
| 236 |
- // based on service spec. |
|
| 237 |
- a.taskCreateNetworkAttachments(t, s) |
|
| 227 |
+ var s *api.Service |
|
| 228 |
+ if t.ServiceID != "" {
|
|
| 229 |
+ a.store.View(func(tx store.ReadTx) {
|
|
| 230 |
+ s = store.GetService(tx, t.ServiceID) |
|
| 231 |
+ }) |
|
| 232 |
+ } |
|
| 238 | 233 |
|
| 239 |
- if taskReadyForNetworkVote(t, s, nc) {
|
|
| 240 |
- if t.Status.State >= api.TaskStateAllocated {
|
|
| 241 |
- continue |
|
| 242 |
- } |
|
| 234 |
+ // Populate network attachments in the task |
|
| 235 |
+ // based on service spec. |
|
| 236 |
+ a.taskCreateNetworkAttachments(t, s) |
|
| 243 | 237 |
|
| 244 |
- if a.taskAllocateVote(networkVoter, t.ID) {
|
|
| 245 |
- // If the task is not attached to any network, network |
|
| 246 |
- // allocators job is done. Immediately cast a vote so |
|
| 247 |
- // that the task can be moved to ALLOCATED state as |
|
| 248 |
- // soon as possible. |
|
| 249 |
- if err := batch.Update(func(tx store.Tx) error {
|
|
| 250 |
- storeT := store.GetTask(tx, t.ID) |
|
| 251 |
- if storeT == nil {
|
|
| 252 |
- return fmt.Errorf("task %s not found while trying to update state", t.ID)
|
|
| 253 |
- } |
|
| 254 |
- |
|
| 255 |
- updateTaskStatus(storeT, api.TaskStateAllocated, "allocated") |
|
| 256 |
- |
|
| 257 |
- if err := store.UpdateTask(tx, storeT); err != nil {
|
|
| 258 |
- return errors.Wrapf(err, "failed updating state in store transaction for task %s", storeT.ID) |
|
| 259 |
- } |
|
| 260 |
- |
|
| 261 |
- return nil |
|
| 262 |
- }); err != nil {
|
|
| 263 |
- log.G(ctx).WithError(err).Error("error updating task network")
|
|
| 264 |
- } |
|
| 265 |
- } |
|
| 238 |
+ if taskReadyForNetworkVote(t, s, nc) {
|
|
| 239 |
+ if t.Status.State >= api.TaskStatePending {
|
|
| 266 | 240 |
continue |
| 267 | 241 |
} |
| 268 | 242 |
|
| 269 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 270 |
- _, err := a.allocateTask(ctx, tx, t) |
|
| 271 |
- return err |
|
| 272 |
- }) |
|
| 273 |
- if err != nil {
|
|
| 274 |
- log.G(ctx).WithError(err).Errorf("failed allocating task %s during init", t.ID)
|
|
| 275 |
- nc.unallocatedTasks[t.ID] = t |
|
| 243 |
+ if a.taskAllocateVote(networkVoter, t.ID) {
|
|
| 244 |
+ // If the task is not attached to any network, network |
|
| 245 |
+ // allocators job is done. Immediately cast a vote so |
|
| 246 |
+ // that the task can be moved to ALLOCATED state as |
|
| 247 |
+ // soon as possible. |
|
| 248 |
+ allocatedTasks = append(allocatedTasks, t) |
|
| 249 |
+ } |
|
| 250 |
+ continue |
|
| 251 |
+ } |
|
| 252 |
+ |
|
| 253 |
+ err := a.allocateTask(ctx, t) |
|
| 254 |
+ if err == nil {
|
|
| 255 |
+ allocatedTasks = append(allocatedTasks, t) |
|
| 256 |
+ } else if err != errNoChanges {
|
|
| 257 |
+ log.G(ctx).WithError(err).Errorf("failed allocating task %s during init", t.ID)
|
|
| 258 |
+ nc.unallocatedTasks[t.ID] = t |
|
| 259 |
+ } |
|
| 260 |
+ } |
|
| 261 |
+ |
|
| 262 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 263 |
+ for _, t := range allocatedTasks {
|
|
| 264 |
+ if err := a.commitAllocatedTask(ctx, batch, t); err != nil {
|
|
| 265 |
+ log.G(ctx).WithError(err).Errorf("failed committing allocation of task %s during init", t.ID)
|
|
| 276 | 266 |
} |
| 277 | 267 |
} |
| 278 | 268 |
|
| 279 | 269 |
return nil |
| 280 | 270 |
}); err != nil {
|
| 281 |
- return err |
|
| 271 |
+ log.G(ctx).WithError(err).Error("failed committing allocation of tasks during init")
|
|
| 282 | 272 |
} |
| 283 | 273 |
|
| 284 | 274 |
return nil |
| ... | ... |
@@ -298,6 +334,12 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
| 298 | 298 |
log.G(ctx).WithError(err).Errorf("Failed allocation for network %s", n.ID)
|
| 299 | 299 |
break |
| 300 | 300 |
} |
| 301 |
+ |
|
| 302 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 303 |
+ return a.commitAllocatedNetwork(ctx, batch, n) |
|
| 304 |
+ }); err != nil {
|
|
| 305 |
+ log.G(ctx).WithError(err).Errorf("Failed to commit allocation for network %s", n.ID)
|
|
| 306 |
+ } |
|
| 301 | 307 |
case state.EventDeleteNetwork: |
| 302 | 308 |
n := v.Network.Copy() |
| 303 | 309 |
|
| ... | ... |
@@ -319,6 +361,12 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
| 319 | 319 |
log.G(ctx).WithError(err).Errorf("Failed allocation for service %s", s.ID)
|
| 320 | 320 |
break |
| 321 | 321 |
} |
| 322 |
+ |
|
| 323 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 324 |
+ return a.commitAllocatedService(ctx, batch, s) |
|
| 325 |
+ }); err != nil {
|
|
| 326 |
+ log.G(ctx).WithError(err).Errorf("Failed to commit allocation for service %s", s.ID)
|
|
| 327 |
+ } |
|
| 322 | 328 |
case state.EventUpdateService: |
| 323 | 329 |
s := v.Service.Copy() |
| 324 | 330 |
|
| ... | ... |
@@ -330,6 +378,12 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
| 330 | 330 |
log.G(ctx).WithError(err).Errorf("Failed allocation during update of service %s", s.ID)
|
| 331 | 331 |
break |
| 332 | 332 |
} |
| 333 |
+ |
|
| 334 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 335 |
+ return a.commitAllocatedService(ctx, batch, s) |
|
| 336 |
+ }); err != nil {
|
|
| 337 |
+ log.G(ctx).WithError(err).Errorf("Failed to commit allocation during update for service %s", s.ID)
|
|
| 338 |
+ } |
|
| 333 | 339 |
case state.EventDeleteService: |
| 334 | 340 |
s := v.Service.Copy() |
| 335 | 341 |
|
| ... | ... |
@@ -387,6 +441,13 @@ func (a *Allocator) doNodeAlloc(ctx context.Context, ev events.Event) {
|
| 387 | 387 |
node.Attachment.Network = nc.ingressNetwork.Copy() |
| 388 | 388 |
if err := a.allocateNode(ctx, node); err != nil {
|
| 389 | 389 |
log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID)
|
| 390 |
+ return |
|
| 391 |
+ } |
|
| 392 |
+ |
|
| 393 |
+ if _, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 394 |
+ return a.commitAllocatedNode(ctx, batch, node) |
|
| 395 |
+ }); err != nil {
|
|
| 396 |
+ log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s", node.ID)
|
|
| 390 | 397 |
} |
| 391 | 398 |
} |
| 392 | 399 |
} |
| ... | ... |
@@ -503,7 +564,7 @@ func (a *Allocator) doTaskAlloc(ctx context.Context, ev events.Event) {
|
| 503 | 503 |
|
| 504 | 504 |
// If we are already in allocated state, there is |
| 505 | 505 |
// absolutely nothing else to do. |
| 506 |
- if t.Status.State >= api.TaskStateAllocated {
|
|
| 506 |
+ if t.Status.State >= api.TaskStatePending {
|
|
| 507 | 507 |
delete(nc.unallocatedTasks, t.ID) |
| 508 | 508 |
return |
| 509 | 509 |
} |
| ... | ... |
@@ -537,31 +598,22 @@ func (a *Allocator) doTaskAlloc(ctx context.Context, ev events.Event) {
|
| 537 | 537 |
} |
| 538 | 538 |
|
| 539 | 539 |
func (a *Allocator) allocateNode(ctx context.Context, node *api.Node) error {
|
| 540 |
- nc := a.netCtx |
|
| 541 |
- |
|
| 542 |
- if err := nc.nwkAllocator.AllocateNode(node); err != nil {
|
|
| 543 |
- return err |
|
| 544 |
- } |
|
| 545 |
- |
|
| 546 |
- if err := a.store.Update(func(tx store.Tx) error {
|
|
| 547 |
- for {
|
|
| 548 |
- err := store.UpdateNode(tx, node) |
|
| 549 |
- if err != nil && err != store.ErrSequenceConflict {
|
|
| 550 |
- return errors.Wrapf(err, "failed updating state in store transaction for node %s", node.ID) |
|
| 551 |
- } |
|
| 540 |
+ return a.netCtx.nwkAllocator.AllocateNode(node) |
|
| 541 |
+} |
|
| 552 | 542 |
|
| 553 |
- if err == store.ErrSequenceConflict {
|
|
| 554 |
- storeNode := store.GetNode(tx, node.ID) |
|
| 555 |
- storeNode.Attachment = node.Attachment.Copy() |
|
| 556 |
- node = storeNode |
|
| 557 |
- continue |
|
| 558 |
- } |
|
| 543 |
+func (a *Allocator) commitAllocatedNode(ctx context.Context, batch *store.Batch, node *api.Node) error {
|
|
| 544 |
+ if err := batch.Update(func(tx store.Tx) error {
|
|
| 545 |
+ err := store.UpdateNode(tx, node) |
|
| 559 | 546 |
|
| 560 |
- break |
|
| 547 |
+ if err == store.ErrSequenceConflict {
|
|
| 548 |
+ storeNode := store.GetNode(tx, node.ID) |
|
| 549 |
+ storeNode.Attachment = node.Attachment.Copy() |
|
| 550 |
+ err = store.UpdateNode(tx, storeNode) |
|
| 561 | 551 |
} |
| 562 |
- return nil |
|
| 552 |
+ |
|
| 553 |
+ return errors.Wrapf(err, "failed updating state in store transaction for node %s", node.ID) |
|
| 563 | 554 |
}); err != nil {
|
| 564 |
- if err := nc.nwkAllocator.DeallocateNode(node); err != nil {
|
|
| 555 |
+ if err := a.netCtx.nwkAllocator.DeallocateNode(node); err != nil {
|
|
| 565 | 556 |
log.G(ctx).WithError(err).Errorf("failed rolling back allocation of node %s", node.ID)
|
| 566 | 557 |
} |
| 567 | 558 |
|
| ... | ... |
@@ -628,27 +680,22 @@ func (a *Allocator) allocateService(ctx context.Context, s *api.Service) error {
|
| 628 | 628 |
} |
| 629 | 629 |
} |
| 630 | 630 |
} |
| 631 |
+ return nil |
|
| 632 |
+} |
|
| 631 | 633 |
|
| 632 |
- if err := a.store.Update(func(tx store.Tx) error {
|
|
| 633 |
- for {
|
|
| 634 |
- err := store.UpdateService(tx, s) |
|
| 635 |
- |
|
| 636 |
- if err != nil && err != store.ErrSequenceConflict {
|
|
| 637 |
- return errors.Wrapf(err, "failed updating state in store transaction for service %s", s.ID) |
|
| 638 |
- } |
|
| 639 |
- |
|
| 640 |
- if err == store.ErrSequenceConflict {
|
|
| 641 |
- storeService := store.GetService(tx, s.ID) |
|
| 642 |
- storeService.Endpoint = s.Endpoint |
|
| 643 |
- s = storeService |
|
| 644 |
- continue |
|
| 645 |
- } |
|
| 634 |
+func (a *Allocator) commitAllocatedService(ctx context.Context, batch *store.Batch, s *api.Service) error {
|
|
| 635 |
+ if err := batch.Update(func(tx store.Tx) error {
|
|
| 636 |
+ err := store.UpdateService(tx, s) |
|
| 646 | 637 |
|
| 647 |
- break |
|
| 638 |
+ if err == store.ErrSequenceConflict {
|
|
| 639 |
+ storeService := store.GetService(tx, s.ID) |
|
| 640 |
+ storeService.Endpoint = s.Endpoint |
|
| 641 |
+ err = store.UpdateService(tx, storeService) |
|
| 648 | 642 |
} |
| 649 |
- return nil |
|
| 643 |
+ |
|
| 644 |
+ return errors.Wrapf(err, "failed updating state in store transaction for service %s", s.ID) |
|
| 650 | 645 |
}); err != nil {
|
| 651 |
- if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
|
|
| 646 |
+ if err := a.netCtx.nwkAllocator.ServiceDeallocate(s); err != nil {
|
|
| 652 | 647 |
log.G(ctx).WithError(err).Errorf("failed rolling back allocation of service %s", s.ID)
|
| 653 | 648 |
} |
| 654 | 649 |
|
| ... | ... |
@@ -666,13 +713,17 @@ func (a *Allocator) allocateNetwork(ctx context.Context, n *api.Network) error {
|
| 666 | 666 |
return errors.Wrapf(err, "failed during network allocation for network %s", n.ID) |
| 667 | 667 |
} |
| 668 | 668 |
|
| 669 |
- if err := a.store.Update(func(tx store.Tx) error {
|
|
| 669 |
+ return nil |
|
| 670 |
+} |
|
| 671 |
+ |
|
| 672 |
+func (a *Allocator) commitAllocatedNetwork(ctx context.Context, batch *store.Batch, n *api.Network) error {
|
|
| 673 |
+ if err := batch.Update(func(tx store.Tx) error {
|
|
| 670 | 674 |
if err := store.UpdateNetwork(tx, n); err != nil {
|
| 671 | 675 |
return errors.Wrapf(err, "failed updating state in store transaction for network %s", n.ID) |
| 672 | 676 |
} |
| 673 | 677 |
return nil |
| 674 | 678 |
}); err != nil {
|
| 675 |
- if err := nc.nwkAllocator.Deallocate(n); err != nil {
|
|
| 679 |
+ if err := a.netCtx.nwkAllocator.Deallocate(n); err != nil {
|
|
| 676 | 680 |
log.G(ctx).WithError(err).Errorf("failed rolling back allocation of network %s", n.ID)
|
| 677 | 681 |
} |
| 678 | 682 |
|
| ... | ... |
@@ -682,15 +733,8 @@ func (a *Allocator) allocateNetwork(ctx context.Context, n *api.Network) error {
|
| 682 | 682 |
return nil |
| 683 | 683 |
} |
| 684 | 684 |
|
| 685 |
-func (a *Allocator) allocateTask(ctx context.Context, tx store.Tx, t *api.Task) (*api.Task, error) {
|
|
| 685 |
+func (a *Allocator) allocateTask(ctx context.Context, t *api.Task) (err error) {
|
|
| 686 | 686 |
taskUpdated := false |
| 687 |
- |
|
| 688 |
- // Get the latest task state from the store before updating. |
|
| 689 |
- storeT := store.GetTask(tx, t.ID) |
|
| 690 |
- if storeT == nil {
|
|
| 691 |
- return nil, fmt.Errorf("could not find task %s while trying to update network allocation", t.ID)
|
|
| 692 |
- } |
|
| 693 |
- |
|
| 694 | 687 |
nc := a.netCtx |
| 695 | 688 |
|
| 696 | 689 |
// We might be here even if a task allocation has already |
| ... | ... |
@@ -698,163 +742,191 @@ func (a *Allocator) allocateTask(ctx context.Context, tx store.Tx, t *api.Task) |
| 698 | 698 |
// cases skip allocation and go straight ahead to updating the |
| 699 | 699 |
// store. |
| 700 | 700 |
if !nc.nwkAllocator.IsTaskAllocated(t) {
|
| 701 |
- if t.ServiceID != "" {
|
|
| 702 |
- s := store.GetService(tx, t.ServiceID) |
|
| 703 |
- if s == nil {
|
|
| 704 |
- return nil, fmt.Errorf("could not find service %s", t.ServiceID)
|
|
| 705 |
- } |
|
| 701 |
+ a.store.View(func(tx store.ReadTx) {
|
|
| 702 |
+ if t.ServiceID != "" {
|
|
| 703 |
+ s := store.GetService(tx, t.ServiceID) |
|
| 704 |
+ if s == nil {
|
|
| 705 |
+ err = fmt.Errorf("could not find service %s", t.ServiceID)
|
|
| 706 |
+ return |
|
| 707 |
+ } |
|
| 706 | 708 |
|
| 707 |
- if !nc.nwkAllocator.IsServiceAllocated(s) {
|
|
| 708 |
- return nil, fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
|
|
| 709 |
+ if !nc.nwkAllocator.IsServiceAllocated(s) {
|
|
| 710 |
+ err = fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
|
|
| 711 |
+ return |
|
| 712 |
+ } |
|
| 713 |
+ |
|
| 714 |
+ taskUpdateEndpoint(t, s.Endpoint) |
|
| 709 | 715 |
} |
| 710 | 716 |
|
| 711 |
- taskUpdateEndpoint(t, s.Endpoint) |
|
| 712 |
- } |
|
| 717 |
+ for _, na := range t.Networks {
|
|
| 718 |
+ n := store.GetNetwork(tx, na.Network.ID) |
|
| 719 |
+ if n == nil {
|
|
| 720 |
+ err = fmt.Errorf("failed to retrieve network %s while allocating task %s", na.Network.ID, t.ID)
|
|
| 721 |
+ return |
|
| 722 |
+ } |
|
| 713 | 723 |
|
| 714 |
- for _, na := range t.Networks {
|
|
| 715 |
- n := store.GetNetwork(tx, na.Network.ID) |
|
| 716 |
- if n == nil {
|
|
| 717 |
- return nil, fmt.Errorf("failed to retrieve network %s while allocating task %s", na.Network.ID, t.ID)
|
|
| 718 |
- } |
|
| 724 |
+ if !nc.nwkAllocator.IsAllocated(n) {
|
|
| 725 |
+ err = fmt.Errorf("network %s attached to task %s not allocated yet", n.ID, t.ID)
|
|
| 726 |
+ return |
|
| 727 |
+ } |
|
| 719 | 728 |
|
| 720 |
- if !nc.nwkAllocator.IsAllocated(n) {
|
|
| 721 |
- return nil, fmt.Errorf("network %s attached to task %s not allocated yet", n.ID, t.ID)
|
|
| 729 |
+ na.Network = n |
|
| 722 | 730 |
} |
| 723 | 731 |
|
| 724 |
- na.Network = n |
|
| 725 |
- } |
|
| 726 |
- |
|
| 727 |
- if err := nc.nwkAllocator.AllocateTask(t); err != nil {
|
|
| 728 |
- return nil, errors.Wrapf(err, "failed during networktask allocation for task %s", t.ID) |
|
| 729 |
- } |
|
| 730 |
- if nc.nwkAllocator.IsTaskAllocated(t) {
|
|
| 731 |
- taskUpdateNetworks(storeT, t.Networks) |
|
| 732 |
- taskUpdateEndpoint(storeT, t.Endpoint) |
|
| 733 |
- taskUpdated = true |
|
| 732 |
+ if err = nc.nwkAllocator.AllocateTask(t); err != nil {
|
|
| 733 |
+ err = errors.Wrapf(err, "failed during networktask allocation for task %s", t.ID) |
|
| 734 |
+ return |
|
| 735 |
+ } |
|
| 736 |
+ if nc.nwkAllocator.IsTaskAllocated(t) {
|
|
| 737 |
+ taskUpdated = true |
|
| 738 |
+ } |
|
| 739 |
+ }) |
|
| 740 |
+ if err != nil {
|
|
| 741 |
+ return err |
|
| 734 | 742 |
} |
| 735 | 743 |
} |
| 736 | 744 |
|
| 737 | 745 |
// Update the network allocations and moving to |
| 738 |
- // ALLOCATED state on top of the latest store state. |
|
| 746 |
+ // PENDING state on top of the latest store state. |
|
| 739 | 747 |
if a.taskAllocateVote(networkVoter, t.ID) {
|
| 740 |
- if storeT.Status.State < api.TaskStateAllocated {
|
|
| 741 |
- updateTaskStatus(storeT, api.TaskStateAllocated, "allocated") |
|
| 748 |
+ if t.Status.State < api.TaskStatePending {
|
|
| 749 |
+ updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage) |
|
| 742 | 750 |
taskUpdated = true |
| 743 | 751 |
} |
| 744 | 752 |
} |
| 745 | 753 |
|
| 746 |
- if taskUpdated {
|
|
| 747 |
- if err := store.UpdateTask(tx, storeT); err != nil {
|
|
| 748 |
- return nil, errors.Wrapf(err, "failed updating state in store transaction for task %s", storeT.ID) |
|
| 749 |
- } |
|
| 754 |
+ if !taskUpdated {
|
|
| 755 |
+ return errNoChanges |
|
| 750 | 756 |
} |
| 751 | 757 |
|
| 752 |
- return storeT, nil |
|
| 758 |
+ return nil |
|
| 759 |
+} |
|
| 760 |
+ |
|
| 761 |
+func (a *Allocator) commitAllocatedTask(ctx context.Context, batch *store.Batch, t *api.Task) error {
|
|
| 762 |
+ return batch.Update(func(tx store.Tx) error {
|
|
| 763 |
+ err := store.UpdateTask(tx, t) |
|
| 764 |
+ |
|
| 765 |
+ if err == store.ErrSequenceConflict {
|
|
| 766 |
+ storeTask := store.GetTask(tx, t.ID) |
|
| 767 |
+ taskUpdateNetworks(storeTask, t.Networks) |
|
| 768 |
+ taskUpdateEndpoint(storeTask, t.Endpoint) |
|
| 769 |
+ if storeTask.Status.State < api.TaskStatePending {
|
|
| 770 |
+ storeTask.Status = t.Status |
|
| 771 |
+ } |
|
| 772 |
+ err = store.UpdateTask(tx, storeTask) |
|
| 773 |
+ } |
|
| 774 |
+ |
|
| 775 |
+ return errors.Wrapf(err, "failed updating state in store transaction for task %s", t.ID) |
|
| 776 |
+ }) |
|
| 753 | 777 |
} |
| 754 | 778 |
|
| 755 | 779 |
func (a *Allocator) procUnallocatedNetworks(ctx context.Context) {
|
| 756 | 780 |
nc := a.netCtx |
| 781 |
+ var allocatedNetworks []*api.Network |
|
| 757 | 782 |
for _, n := range nc.unallocatedNetworks {
|
| 758 | 783 |
if !nc.nwkAllocator.IsAllocated(n) {
|
| 759 | 784 |
if err := a.allocateNetwork(ctx, n); err != nil {
|
| 760 |
- log.G(ctx).Debugf("Failed allocation of unallocated network %s: %v", n.ID, err)
|
|
| 785 |
+ log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated network %s", n.ID)
|
|
| 761 | 786 |
continue |
| 762 | 787 |
} |
| 788 |
+ allocatedNetworks = append(allocatedNetworks, n) |
|
| 763 | 789 |
} |
| 790 |
+ } |
|
| 791 |
+ |
|
| 792 |
+ if len(allocatedNetworks) == 0 {
|
|
| 793 |
+ return |
|
| 794 |
+ } |
|
| 764 | 795 |
|
| 796 |
+ committed, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 797 |
+ for _, n := range allocatedNetworks {
|
|
| 798 |
+ if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil {
|
|
| 799 |
+ log.G(ctx).WithError(err).Debugf("Failed to commit allocation of unallocated network %s", n.ID)
|
|
| 800 |
+ continue |
|
| 801 |
+ } |
|
| 802 |
+ } |
|
| 803 |
+ return nil |
|
| 804 |
+ }) |
|
| 805 |
+ |
|
| 806 |
+ if err != nil {
|
|
| 807 |
+ log.G(ctx).WithError(err).Error("Failed to commit allocation of unallocated networks")
|
|
| 808 |
+ } |
|
| 809 |
+ |
|
| 810 |
+ for _, n := range allocatedNetworks[:committed] {
|
|
| 765 | 811 |
delete(nc.unallocatedNetworks, n.ID) |
| 766 | 812 |
} |
| 767 | 813 |
} |
| 768 | 814 |
|
| 769 | 815 |
func (a *Allocator) procUnallocatedServices(ctx context.Context) {
|
| 770 | 816 |
nc := a.netCtx |
| 817 |
+ var allocatedServices []*api.Service |
|
| 771 | 818 |
for _, s := range nc.unallocatedServices {
|
| 772 | 819 |
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
| 773 | 820 |
if err := a.allocateService(ctx, s); err != nil {
|
| 774 |
- log.G(ctx).Debugf("Failed allocation of unallocated service %s: %v", s.ID, err)
|
|
| 821 |
+ log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated service %s", s.ID)
|
|
| 775 | 822 |
continue |
| 776 | 823 |
} |
| 824 |
+ allocatedServices = append(allocatedServices, s) |
|
| 777 | 825 |
} |
| 778 |
- |
|
| 779 |
- delete(nc.unallocatedServices, s.ID) |
|
| 780 | 826 |
} |
| 781 |
-} |
|
| 782 | 827 |
|
| 783 |
-func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context) {
|
|
| 784 |
- nc := a.netCtx |
|
| 785 |
- tasks := make([]*api.Task, 0, len(nc.unallocatedTasks)) |
|
| 828 |
+ if len(allocatedServices) == 0 {
|
|
| 829 |
+ return |
|
| 830 |
+ } |
|
| 786 | 831 |
|
| 787 | 832 |
committed, err := a.store.Batch(func(batch *store.Batch) error {
|
| 788 |
- for _, t := range nc.unallocatedTasks {
|
|
| 789 |
- var allocatedT *api.Task |
|
| 790 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 791 |
- var err error |
|
| 792 |
- allocatedT, err = a.allocateTask(ctx, tx, t) |
|
| 793 |
- return err |
|
| 794 |
- }) |
|
| 795 |
- |
|
| 796 |
- if err != nil {
|
|
| 797 |
- log.G(ctx).WithError(err).Error("task allocation failure")
|
|
| 833 |
+ for _, s := range allocatedServices {
|
|
| 834 |
+ if err := a.commitAllocatedService(ctx, batch, s); err != nil {
|
|
| 835 |
+ log.G(ctx).WithError(err).Debugf("Failed to commit allocation of unallocated service %s", s.ID)
|
|
| 798 | 836 |
continue |
| 799 | 837 |
} |
| 800 |
- |
|
| 801 |
- tasks = append(tasks, allocatedT) |
|
| 802 | 838 |
} |
| 803 |
- |
|
| 804 | 839 |
return nil |
| 805 | 840 |
}) |
| 806 | 841 |
|
| 807 | 842 |
if err != nil {
|
| 808 |
- log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
|
|
| 843 |
+ log.G(ctx).WithError(err).Error("Failed to commit allocation of unallocated services")
|
|
| 809 | 844 |
} |
| 810 | 845 |
|
| 811 |
- var retryCnt int |
|
| 812 |
- for len(tasks) != 0 {
|
|
| 813 |
- var err error |
|
| 846 |
+ for _, s := range allocatedServices[:committed] {
|
|
| 847 |
+ delete(nc.unallocatedServices, s.ID) |
|
| 848 |
+ } |
|
| 849 |
+} |
|
| 814 | 850 |
|
| 815 |
- for _, t := range tasks[:committed] {
|
|
| 816 |
- delete(nc.unallocatedTasks, t.ID) |
|
| 817 |
- } |
|
| 851 |
+func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context) {
|
|
| 852 |
+ nc := a.netCtx |
|
| 853 |
+ allocatedTasks := make([]*api.Task, 0, len(nc.unallocatedTasks)) |
|
| 818 | 854 |
|
| 819 |
- tasks = tasks[committed:] |
|
| 820 |
- if len(tasks) == 0 {
|
|
| 821 |
- break |
|
| 855 |
+ for _, t := range nc.unallocatedTasks {
|
|
| 856 |
+ if err := a.allocateTask(ctx, t); err == nil {
|
|
| 857 |
+ allocatedTasks = append(allocatedTasks, t) |
|
| 858 |
+ } else if err != errNoChanges {
|
|
| 859 |
+ log.G(ctx).WithError(err).Error("task allocation failure")
|
|
| 822 | 860 |
} |
| 861 |
+ } |
|
| 823 | 862 |
|
| 824 |
- updatedTasks := make([]*api.Task, 0, len(tasks)) |
|
| 825 |
- committed, err = a.store.Batch(func(batch *store.Batch) error {
|
|
| 826 |
- for _, t := range tasks {
|
|
| 827 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 828 |
- return store.UpdateTask(tx, t) |
|
| 829 |
- }) |
|
| 863 |
+ if len(allocatedTasks) == 0 {
|
|
| 864 |
+ return |
|
| 865 |
+ } |
|
| 830 | 866 |
|
| 831 |
- if err != nil {
|
|
| 832 |
- log.G(ctx).WithError(err).Error("allocated task store update failure")
|
|
| 833 |
- continue |
|
| 834 |
- } |
|
| 867 |
+ committed, err := a.store.Batch(func(batch *store.Batch) error {
|
|
| 868 |
+ for _, t := range allocatedTasks {
|
|
| 869 |
+ err := a.commitAllocatedTask(ctx, batch, t) |
|
| 835 | 870 |
|
| 836 |
- updatedTasks = append(updatedTasks, t) |
|
| 871 |
+ if err != nil {
|
|
| 872 |
+ log.G(ctx).WithError(err).Error("task allocation commit failure")
|
|
| 873 |
+ continue |
|
| 837 | 874 |
} |
| 838 |
- |
|
| 839 |
- return nil |
|
| 840 |
- }) |
|
| 841 |
- if err != nil {
|
|
| 842 |
- log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
|
|
| 843 | 875 |
} |
| 844 | 876 |
|
| 845 |
- tasks = updatedTasks |
|
| 877 |
+ return nil |
|
| 878 |
+ }) |
|
| 846 | 879 |
|
| 847 |
- select {
|
|
| 848 |
- case <-ctx.Done(): |
|
| 849 |
- return |
|
| 850 |
- default: |
|
| 851 |
- } |
|
| 880 |
+ if err != nil {
|
|
| 881 |
+ log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
|
|
| 882 |
+ } |
|
| 852 | 883 |
|
| 853 |
- retryCnt++ |
|
| 854 |
- if retryCnt >= 3 {
|
|
| 855 |
- log.G(ctx).Error("failed to complete batch update of allocated tasks after 3 retries")
|
|
| 856 |
- break |
|
| 857 |
- } |
|
| 884 |
+ for _, t := range allocatedTasks[:committed] {
|
|
| 885 |
+ delete(nc.unallocatedTasks, t.ID) |
|
| 858 | 886 |
} |
| 859 | 887 |
} |
| 860 | 888 |
|
| ... | ... |
@@ -125,6 +125,26 @@ func NodeMatches(constraints []Constraint, n *api.Node) bool {
|
| 125 | 125 |
if !constraint.Match(n.Spec.Role.String()) {
|
| 126 | 126 |
return false |
| 127 | 127 |
} |
| 128 |
+ case strings.EqualFold(constraint.key, "node.platform.os"): |
|
| 129 |
+ if n.Description == nil || n.Description.Platform == nil {
|
|
| 130 |
+ if !constraint.Match("") {
|
|
| 131 |
+ return false |
|
| 132 |
+ } |
|
| 133 |
+ continue |
|
| 134 |
+ } |
|
| 135 |
+ if !constraint.Match(n.Description.Platform.OS) {
|
|
| 136 |
+ return false |
|
| 137 |
+ } |
|
| 138 |
+ case strings.EqualFold(constraint.key, "node.platform.arch"): |
|
| 139 |
+ if n.Description == nil || n.Description.Platform == nil {
|
|
| 140 |
+ if !constraint.Match("") {
|
|
| 141 |
+ return false |
|
| 142 |
+ } |
|
| 143 |
+ continue |
|
| 144 |
+ } |
|
| 145 |
+ if !constraint.Match(n.Description.Platform.Architecture) {
|
|
| 146 |
+ return false |
|
| 147 |
+ } |
|
| 128 | 148 |
|
| 129 | 149 |
// node labels constraint in form like 'node.labels.key==value' |
| 130 | 150 |
case len(constraint.key) > len(nodeLabelPrefix) && strings.EqualFold(constraint.key[:len(nodeLabelPrefix)], nodeLabelPrefix): |
| ... | ... |
@@ -2,6 +2,7 @@ package controlapi |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"strings" |
| 5 |
+ "time" |
|
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/swarmkit/api" |
| 7 | 8 |
"github.com/docker/swarmkit/ca" |
| ... | ... |
@@ -12,6 +13,12 @@ import ( |
| 12 | 12 |
"google.golang.org/grpc/codes" |
| 13 | 13 |
) |
| 14 | 14 |
|
| 15 |
+const ( |
|
| 16 |
+ // expiredCertGrace is the amount of time to keep a node in the |
|
| 17 |
+ // blacklist beyond its certificate expiration timestamp. |
|
| 18 |
+ expiredCertGrace = 24 * time.Hour * 7 |
|
| 19 |
+) |
|
| 20 |
+ |
|
| 15 | 21 |
func validateClusterSpec(spec *api.ClusterSpec) error {
|
| 16 | 22 |
if spec == nil {
|
| 17 | 23 |
return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) |
| ... | ... |
@@ -98,6 +105,8 @@ func (s *Server) UpdateCluster(ctx context.Context, request *api.UpdateClusterRe |
| 98 | 98 |
cluster.Meta.Version = *request.ClusterVersion |
| 99 | 99 |
cluster.Spec = *request.Spec.Copy() |
| 100 | 100 |
|
| 101 |
+ expireBlacklistedCerts(cluster) |
|
| 102 |
+ |
|
| 101 | 103 |
if request.Rotation.RotateWorkerToken {
|
| 102 | 104 |
cluster.RootCA.JoinTokens.Worker = ca.GenerateJoinToken(s.rootCA) |
| 103 | 105 |
} |
| ... | ... |
@@ -202,7 +211,7 @@ func redactClusters(clusters []*api.Cluster) []*api.Cluster {
|
| 202 | 202 |
CACertHash: cluster.RootCA.CACertHash, |
| 203 | 203 |
JoinTokens: cluster.RootCA.JoinTokens, |
| 204 | 204 |
}, |
| 205 |
- RemovedNodes: cluster.RemovedNodes, |
|
| 205 |
+ BlacklistedCertificates: cluster.BlacklistedCertificates, |
|
| 206 | 206 |
} |
| 207 | 207 |
|
| 208 | 208 |
redactedClusters = append(redactedClusters, newCluster) |
| ... | ... |
@@ -210,3 +219,18 @@ func redactClusters(clusters []*api.Cluster) []*api.Cluster {
|
| 210 | 210 |
|
| 211 | 211 |
return redactedClusters |
| 212 | 212 |
} |
| 213 |
+ |
|
| 214 |
+func expireBlacklistedCerts(cluster *api.Cluster) {
|
|
| 215 |
+ nowMinusGrace := time.Now().Add(-expiredCertGrace) |
|
| 216 |
+ |
|
| 217 |
+ for cn, blacklistedCert := range cluster.BlacklistedCertificates {
|
|
| 218 |
+ if blacklistedCert.Expiry == nil {
|
|
| 219 |
+ continue |
|
| 220 |
+ } |
|
| 221 |
+ |
|
| 222 |
+ expiry, err := ptypes.Timestamp(blacklistedCert.Expiry) |
|
| 223 |
+ if err == nil && nowMinusGrace.After(expiry) {
|
|
| 224 |
+ delete(cluster.BlacklistedCertificates, cn) |
|
| 225 |
+ } |
|
| 226 |
+ } |
|
| 227 |
+} |
| ... | ... |
@@ -301,7 +301,7 @@ func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest) |
| 301 | 301 |
} |
| 302 | 302 |
cluster := clusters[0] |
| 303 | 303 |
|
| 304 |
- removedNode := &api.RemovedNode{ID: node.ID}
|
|
| 304 |
+ blacklistedCert := &api.BlacklistedCertificate{}
|
|
| 305 | 305 |
|
| 306 | 306 |
// Set an expiry time for this RemovedNode if a certificate |
| 307 | 307 |
// exists and can be parsed. |
| ... | ... |
@@ -312,13 +312,18 @@ func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest) |
| 312 | 312 |
if err == nil && !X509Cert.NotAfter.IsZero() {
|
| 313 | 313 |
expiry, err := ptypes.TimestampProto(X509Cert.NotAfter) |
| 314 | 314 |
if err == nil {
|
| 315 |
- removedNode.Expiry = expiry |
|
| 315 |
+ blacklistedCert.Expiry = expiry |
|
| 316 | 316 |
} |
| 317 | 317 |
} |
| 318 | 318 |
} |
| 319 | 319 |
} |
| 320 | 320 |
|
| 321 |
- cluster.RemovedNodes = append(cluster.RemovedNodes, removedNode) |
|
| 321 |
+ if cluster.BlacklistedCertificates == nil {
|
|
| 322 |
+ cluster.BlacklistedCertificates = make(map[string]*api.BlacklistedCertificate) |
|
| 323 |
+ } |
|
| 324 |
+ cluster.BlacklistedCertificates[node.ID] = blacklistedCert |
|
| 325 |
+ |
|
| 326 |
+ expireBlacklistedCerts(cluster) |
|
| 322 | 327 |
|
| 323 | 328 |
if err := store.UpdateCluster(tx, cluster); err != nil {
|
| 324 | 329 |
return err |
| ... | ... |
@@ -48,9 +48,50 @@ func (s *Server) GetSecret(ctx context.Context, request *api.GetSecretRequest) ( |
| 48 | 48 |
return nil, grpc.Errorf(codes.NotFound, "secret %s not found", request.SecretID) |
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 |
+ secret.Spec.Data = nil // clean the actual secret data so it's never returned |
|
| 51 | 52 |
return &api.GetSecretResponse{Secret: secret}, nil
|
| 52 | 53 |
} |
| 53 | 54 |
|
| 55 |
+// UpdateSecret updates a Secret referenced by SecretID with the given SecretSpec. |
|
| 56 |
+// - Returns `NotFound` if the Secret is not found. |
|
| 57 |
+// - Returns `InvalidArgument` if the SecretSpec is malformed or anything other than Labels is changed |
|
| 58 |
+// - Returns an error if the update fails. |
|
| 59 |
+func (s *Server) UpdateSecret(ctx context.Context, request *api.UpdateSecretRequest) (*api.UpdateSecretResponse, error) {
|
|
| 60 |
+ if request.SecretID == "" || request.SecretVersion == nil {
|
|
| 61 |
+ return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ var secret *api.Secret |
|
| 65 |
+ err := s.store.Update(func(tx store.Tx) error {
|
|
| 66 |
+ secret = store.GetSecret(tx, request.SecretID) |
|
| 67 |
+ if secret == nil {
|
|
| 68 |
+ return nil |
|
| 69 |
+ } |
|
| 70 |
+ |
|
| 71 |
+ if secret.Spec.Annotations.Name != request.Spec.Annotations.Name || request.Spec.Data != nil {
|
|
| 72 |
+ return grpc.Errorf(codes.InvalidArgument, "only updates to Labels are allowed") |
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ // We only allow updating Labels |
|
| 76 |
+ secret.Meta.Version = *request.SecretVersion |
|
| 77 |
+ secret.Spec.Annotations.Labels = request.Spec.Annotations.Labels |
|
| 78 |
+ |
|
| 79 |
+ return store.UpdateSecret(tx, secret) |
|
| 80 |
+ }) |
|
| 81 |
+ if err != nil {
|
|
| 82 |
+ return nil, err |
|
| 83 |
+ } |
|
| 84 |
+ if secret == nil {
|
|
| 85 |
+ return nil, grpc.Errorf(codes.NotFound, "secret %s not found", request.SecretID) |
|
| 86 |
+ } |
|
| 87 |
+ |
|
| 88 |
+ // WARN: we should never return the actual secret data here. We need to redact the private fields first. |
|
| 89 |
+ secret.Spec.Data = nil |
|
| 90 |
+ return &api.UpdateSecretResponse{
|
|
| 91 |
+ Secret: secret, |
|
| 92 |
+ }, nil |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 54 | 95 |
// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being |
| 55 | 96 |
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any |
| 56 | 97 |
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in |
| ... | ... |
@@ -3,6 +3,7 @@ package controlapi |
| 3 | 3 |
import ( |
| 4 | 4 |
"errors" |
| 5 | 5 |
"reflect" |
| 6 |
+ "regexp" |
|
| 6 | 7 |
"strconv" |
| 7 | 8 |
|
| 8 | 9 |
"github.com/docker/distribution/reference" |
| ... | ... |
@@ -18,9 +19,13 @@ import ( |
| 18 | 18 |
|
| 19 | 19 |
var ( |
| 20 | 20 |
errNetworkUpdateNotSupported = errors.New("changing network in service is not supported")
|
| 21 |
+ errRenameNotSupported = errors.New("renaming services is not supported")
|
|
| 21 | 22 |
errModeChangeNotAllowed = errors.New("service mode change is not allowed")
|
| 22 | 23 |
) |
| 23 | 24 |
|
| 25 |
+// Regexp pattern for hostname to conform RFC 1123 |
|
| 26 |
+var hostnamePattern = regexp.MustCompile("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$")
|
|
| 27 |
+ |
|
| 24 | 28 |
func validateResources(r *api.Resources) error {
|
| 25 | 29 |
if r == nil {
|
| 26 | 30 |
return nil |
| ... | ... |
@@ -102,6 +107,43 @@ func validateUpdate(uc *api.UpdateConfig) error {
|
| 102 | 102 |
return nil |
| 103 | 103 |
} |
| 104 | 104 |
|
| 105 |
+func validateContainerSpec(container *api.ContainerSpec) error {
|
|
| 106 |
+ if container == nil {
|
|
| 107 |
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: missing in service spec") |
|
| 108 |
+ } |
|
| 109 |
+ |
|
| 110 |
+ if err := validateHostname(container.Hostname); err != nil {
|
|
| 111 |
+ return err |
|
| 112 |
+ } |
|
| 113 |
+ |
|
| 114 |
+ if container.Image == "" {
|
|
| 115 |
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided") |
|
| 116 |
+ } |
|
| 117 |
+ |
|
| 118 |
+ if _, err := reference.ParseNamed(container.Image); err != nil {
|
|
| 119 |
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", container.Image) |
|
| 120 |
+ } |
|
| 121 |
+ |
|
| 122 |
+ mountMap := make(map[string]bool) |
|
| 123 |
+ for _, mount := range container.Mounts {
|
|
| 124 |
+ if _, exists := mountMap[mount.Target]; exists {
|
|
| 125 |
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: duplicate mount point: %s", mount.Target) |
|
| 126 |
+ } |
|
| 127 |
+ mountMap[mount.Target] = true |
|
| 128 |
+ } |
|
| 129 |
+ |
|
| 130 |
+ return nil |
|
| 131 |
+} |
|
| 132 |
+ |
|
| 133 |
+func validateHostname(hostname string) error {
|
|
| 134 |
+ if hostname != "" {
|
|
| 135 |
+ if len(hostname) > 63 || !hostnamePattern.MatchString(hostname) {
|
|
| 136 |
+ return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: %s is not valid hostname", hostname) |
|
| 137 |
+ } |
|
| 138 |
+ } |
|
| 139 |
+ return nil |
|
| 140 |
+} |
|
| 141 |
+ |
|
| 105 | 142 |
func validateTask(taskSpec api.TaskSpec) error {
|
| 106 | 143 |
if err := validateResourceRequirements(taskSpec.Resources); err != nil {
|
| 107 | 144 |
return err |
| ... | ... |
@@ -124,25 +166,8 @@ func validateTask(taskSpec api.TaskSpec) error {
|
| 124 | 124 |
return grpc.Errorf(codes.Unimplemented, "RuntimeSpec: unimplemented runtime in service spec") |
| 125 | 125 |
} |
| 126 | 126 |
|
| 127 |
- container := taskSpec.GetContainer() |
|
| 128 |
- if container == nil {
|
|
| 129 |
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: missing in service spec") |
|
| 130 |
- } |
|
| 131 |
- |
|
| 132 |
- if container.Image == "" {
|
|
| 133 |
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided") |
|
| 134 |
- } |
|
| 135 |
- |
|
| 136 |
- if _, err := reference.ParseNamed(container.Image); err != nil {
|
|
| 137 |
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", container.Image) |
|
| 138 |
- } |
|
| 139 |
- |
|
| 140 |
- mountMap := make(map[string]bool) |
|
| 141 |
- for _, mount := range container.Mounts {
|
|
| 142 |
- if _, exists := mountMap[mount.Target]; exists {
|
|
| 143 |
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: duplicate mount point: %s", mount.Target) |
|
| 144 |
- } |
|
| 145 |
- mountMap[mount.Target] = true |
|
| 127 |
+ if err := validateContainerSpec(taskSpec.GetContainer()); err != nil {
|
|
| 128 |
+ return err |
|
| 146 | 129 |
} |
| 147 | 130 |
|
| 148 | 131 |
return nil |
| ... | ... |
@@ -279,6 +304,25 @@ func (s *Server) checkPortConflicts(spec *api.ServiceSpec, serviceID string) err |
| 279 | 279 |
return nil |
| 280 | 280 |
} |
| 281 | 281 |
|
| 282 |
+// checkSecretConflicts finds if the passed in spec has secrets with conflicting targets. |
|
| 283 |
+func (s *Server) checkSecretConflicts(spec *api.ServiceSpec) error {
|
|
| 284 |
+ container := spec.Task.GetContainer() |
|
| 285 |
+ if container == nil {
|
|
| 286 |
+ return nil |
|
| 287 |
+ } |
|
| 288 |
+ |
|
| 289 |
+ existingTargets := make(map[string]string) |
|
| 290 |
+ for _, secretRef := range container.Secrets {
|
|
| 291 |
+ if prevSecretName, ok := existingTargets[secretRef.Target]; ok {
|
|
| 292 |
+ return grpc.Errorf(codes.InvalidArgument, "secret references '%s' and '%s' have a conflicting target: '%s'", prevSecretName, secretRef.SecretName, secretRef.Target) |
|
| 293 |
+ } |
|
| 294 |
+ |
|
| 295 |
+ existingTargets[secretRef.Target] = secretRef.SecretName |
|
| 296 |
+ } |
|
| 297 |
+ |
|
| 298 |
+ return nil |
|
| 299 |
+} |
|
| 300 |
+ |
|
| 282 | 301 |
// CreateService creates and return a Service based on the provided ServiceSpec. |
| 283 | 302 |
// - Returns `InvalidArgument` if the ServiceSpec is malformed. |
| 284 | 303 |
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features. |
| ... | ... |
@@ -297,6 +341,10 @@ func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRe |
| 297 | 297 |
return nil, err |
| 298 | 298 |
} |
| 299 | 299 |
|
| 300 |
+ if err := s.checkSecretConflicts(request.Spec); err != nil {
|
|
| 301 |
+ return nil, err |
|
| 302 |
+ } |
|
| 303 |
+ |
|
| 300 | 304 |
// TODO(aluzzardi): Consider using `Name` as a primary key to handle |
| 301 | 305 |
// duplicate creations. See #65 |
| 302 | 306 |
service := &api.Service{
|
| ... | ... |
@@ -364,6 +412,10 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe |
| 364 | 364 |
} |
| 365 | 365 |
} |
| 366 | 366 |
|
| 367 |
+ if err := s.checkSecretConflicts(request.Spec); err != nil {
|
|
| 368 |
+ return nil, err |
|
| 369 |
+ } |
|
| 370 |
+ |
|
| 367 | 371 |
err := s.store.Update(func(tx store.Tx) error {
|
| 368 | 372 |
service = store.GetService(tx, request.ServiceID) |
| 369 | 373 |
if service == nil {
|
| ... | ... |
@@ -390,6 +442,11 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe |
| 390 | 390 |
if reflect.TypeOf(service.Spec.Mode) != reflect.TypeOf(request.Spec.Mode) {
|
| 391 | 391 |
return errModeChangeNotAllowed |
| 392 | 392 |
} |
| 393 |
+ |
|
| 394 |
+ if service.Spec.Annotations.Name != request.Spec.Annotations.Name {
|
|
| 395 |
+ return errRenameNotSupported |
|
| 396 |
+ } |
|
| 397 |
+ |
|
| 393 | 398 |
service.Meta.Version = *request.ServiceVersion |
| 394 | 399 |
service.PreviousSpec = service.Spec.Copy() |
| 395 | 400 |
service.Spec = *request.Spec.Copy() |
| ... | ... |
@@ -2,6 +2,7 @@ package controlapi |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"github.com/docker/swarmkit/api" |
| 5 |
+ "github.com/docker/swarmkit/api/naming" |
|
| 5 | 6 |
"github.com/docker/swarmkit/manager/state/store" |
| 6 | 7 |
"golang.org/x/net/context" |
| 7 | 8 |
"google.golang.org/grpc" |
| ... | ... |
@@ -104,10 +105,10 @@ func (s *Server) ListTasks(ctx context.Context, request *api.ListTasksRequest) ( |
| 104 | 104 |
if request.Filters != nil {
|
| 105 | 105 |
tasks = filterTasks(tasks, |
| 106 | 106 |
func(e *api.Task) bool {
|
| 107 |
- return filterContains(store.TaskName(e), request.Filters.Names) |
|
| 107 |
+ return filterContains(naming.Task(e), request.Filters.Names) |
|
| 108 | 108 |
}, |
| 109 | 109 |
func(e *api.Task) bool {
|
| 110 |
- return filterContainsPrefix(store.TaskName(e), request.Filters.NamePrefixes) |
|
| 110 |
+ return filterContainsPrefix(naming.Task(e), request.Filters.NamePrefixes) |
|
| 111 | 111 |
}, |
| 112 | 112 |
func(e *api.Task) bool {
|
| 113 | 113 |
return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) |
| ... | ... |
@@ -21,7 +21,10 @@ import ( |
| 21 | 21 |
"github.com/docker/swarmkit/manager/dispatcher" |
| 22 | 22 |
"github.com/docker/swarmkit/manager/health" |
| 23 | 23 |
"github.com/docker/swarmkit/manager/keymanager" |
| 24 |
- "github.com/docker/swarmkit/manager/orchestrator" |
|
| 24 |
+ "github.com/docker/swarmkit/manager/orchestrator/constraintenforcer" |
|
| 25 |
+ "github.com/docker/swarmkit/manager/orchestrator/global" |
|
| 26 |
+ "github.com/docker/swarmkit/manager/orchestrator/replicated" |
|
| 27 |
+ "github.com/docker/swarmkit/manager/orchestrator/taskreaper" |
|
| 25 | 28 |
"github.com/docker/swarmkit/manager/resourceapi" |
| 26 | 29 |
"github.com/docker/swarmkit/manager/scheduler" |
| 27 | 30 |
"github.com/docker/swarmkit/manager/state/raft" |
| ... | ... |
@@ -82,10 +85,10 @@ type Manager struct {
|
| 82 | 82 |
|
| 83 | 83 |
caserver *ca.Server |
| 84 | 84 |
dispatcher *dispatcher.Dispatcher |
| 85 |
- replicatedOrchestrator *orchestrator.ReplicatedOrchestrator |
|
| 86 |
- globalOrchestrator *orchestrator.GlobalOrchestrator |
|
| 87 |
- taskReaper *orchestrator.TaskReaper |
|
| 88 |
- constraintEnforcer *orchestrator.ConstraintEnforcer |
|
| 85 |
+ replicatedOrchestrator *replicated.Orchestrator |
|
| 86 |
+ globalOrchestrator *global.Orchestrator |
|
| 87 |
+ taskReaper *taskreaper.TaskReaper |
|
| 88 |
+ constraintEnforcer *constraintenforcer.ConstraintEnforcer |
|
| 89 | 89 |
scheduler *scheduler.Scheduler |
| 90 | 90 |
allocator *allocator.Allocator |
| 91 | 91 |
keyManager *keymanager.KeyManager |
| ... | ... |
@@ -263,9 +266,9 @@ func (m *Manager) Run(parent context.Context) error {
|
| 263 | 263 |
|
| 264 | 264 |
authorize := func(ctx context.Context, roles []string) error {
|
| 265 | 265 |
var ( |
| 266 |
- removedNodes []*api.RemovedNode |
|
| 267 |
- clusters []*api.Cluster |
|
| 268 |
- err error |
|
| 266 |
+ blacklistedCerts map[string]*api.BlacklistedCertificate |
|
| 267 |
+ clusters []*api.Cluster |
|
| 268 |
+ err error |
|
| 269 | 269 |
) |
| 270 | 270 |
|
| 271 | 271 |
m.raftNode.MemoryStore().View(func(readTx store.ReadTx) {
|
| ... | ... |
@@ -276,11 +279,11 @@ func (m *Manager) Run(parent context.Context) error {
|
| 276 | 276 |
// Not having a cluster object yet means we can't check |
| 277 | 277 |
// the blacklist. |
| 278 | 278 |
if err == nil && len(clusters) == 1 {
|
| 279 |
- removedNodes = clusters[0].RemovedNodes |
|
| 279 |
+ blacklistedCerts = clusters[0].BlacklistedCertificates |
|
| 280 | 280 |
} |
| 281 | 281 |
|
| 282 | 282 |
// Authorize the remote roles, ensure they can only be forwarded by managers |
| 283 |
- _, err = ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization(), removedNodes)
|
|
| 283 |
+ _, err = ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization(), blacklistedCerts)
|
|
| 284 | 284 |
return err |
| 285 | 285 |
} |
| 286 | 286 |
|
| ... | ... |
@@ -651,10 +654,10 @@ func (m *Manager) becomeLeader(ctx context.Context) {
|
| 651 | 651 |
log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
|
| 652 | 652 |
} |
| 653 | 653 |
|
| 654 |
- m.replicatedOrchestrator = orchestrator.NewReplicatedOrchestrator(s) |
|
| 655 |
- m.constraintEnforcer = orchestrator.NewConstraintEnforcer(s) |
|
| 656 |
- m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s) |
|
| 657 |
- m.taskReaper = orchestrator.NewTaskReaper(s) |
|
| 654 |
+ m.replicatedOrchestrator = replicated.NewReplicatedOrchestrator(s) |
|
| 655 |
+ m.constraintEnforcer = constraintenforcer.New(s) |
|
| 656 |
+ m.globalOrchestrator = global.NewGlobalOrchestrator(s) |
|
| 657 |
+ m.taskReaper = taskreaper.New(s) |
|
| 658 | 658 |
m.scheduler = scheduler.New(s) |
| 659 | 659 |
m.keyManager = keymanager.New(s, keymanager.DefaultConfig()) |
| 660 | 660 |
|
| ... | ... |
@@ -706,21 +709,21 @@ func (m *Manager) becomeLeader(ctx context.Context) {
|
| 706 | 706 |
} |
| 707 | 707 |
}(m.scheduler) |
| 708 | 708 |
|
| 709 |
- go func(constraintEnforcer *orchestrator.ConstraintEnforcer) {
|
|
| 709 |
+ go func(constraintEnforcer *constraintenforcer.ConstraintEnforcer) {
|
|
| 710 | 710 |
constraintEnforcer.Run() |
| 711 | 711 |
}(m.constraintEnforcer) |
| 712 | 712 |
|
| 713 |
- go func(taskReaper *orchestrator.TaskReaper) {
|
|
| 713 |
+ go func(taskReaper *taskreaper.TaskReaper) {
|
|
| 714 | 714 |
taskReaper.Run() |
| 715 | 715 |
}(m.taskReaper) |
| 716 | 716 |
|
| 717 |
- go func(orchestrator *orchestrator.ReplicatedOrchestrator) {
|
|
| 717 |
+ go func(orchestrator *replicated.Orchestrator) {
|
|
| 718 | 718 |
if err := orchestrator.Run(ctx); err != nil {
|
| 719 | 719 |
log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error")
|
| 720 | 720 |
} |
| 721 | 721 |
}(m.replicatedOrchestrator) |
| 722 | 722 |
|
| 723 |
- go func(globalOrchestrator *orchestrator.GlobalOrchestrator) {
|
|
| 723 |
+ go func(globalOrchestrator *global.Orchestrator) {
|
|
| 724 | 724 |
if err := globalOrchestrator.Run(ctx); err != nil {
|
| 725 | 725 |
log.G(ctx).WithError(err).Error("global orchestrator exited with an error")
|
| 726 | 726 |
} |
| 727 | 727 |
deleted file mode 100644 |
| ... | ... |
@@ -1,157 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "github.com/docker/swarmkit/api" |
|
| 5 |
- "github.com/docker/swarmkit/log" |
|
| 6 |
- "github.com/docker/swarmkit/manager/constraint" |
|
| 7 |
- "github.com/docker/swarmkit/manager/state" |
|
| 8 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-// ConstraintEnforcer watches for updates to nodes and shuts down tasks that no |
|
| 12 |
-// longer satisfy scheduling constraints or resource limits. |
|
| 13 |
-type ConstraintEnforcer struct {
|
|
| 14 |
- store *store.MemoryStore |
|
| 15 |
- stopChan chan struct{}
|
|
| 16 |
- doneChan chan struct{}
|
|
| 17 |
-} |
|
| 18 |
- |
|
| 19 |
-// NewConstraintEnforcer creates a new ConstraintEnforcer. |
|
| 20 |
-func NewConstraintEnforcer(store *store.MemoryStore) *ConstraintEnforcer {
|
|
| 21 |
- return &ConstraintEnforcer{
|
|
| 22 |
- store: store, |
|
| 23 |
- stopChan: make(chan struct{}),
|
|
| 24 |
- doneChan: make(chan struct{}),
|
|
| 25 |
- } |
|
| 26 |
-} |
|
| 27 |
- |
|
| 28 |
-// Run is the ConstraintEnforcer's main loop. |
|
| 29 |
-func (ce *ConstraintEnforcer) Run() {
|
|
| 30 |
- defer close(ce.doneChan) |
|
| 31 |
- |
|
| 32 |
- watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), state.EventUpdateNode{})
|
|
| 33 |
- defer cancelWatch() |
|
| 34 |
- |
|
| 35 |
- var ( |
|
| 36 |
- nodes []*api.Node |
|
| 37 |
- err error |
|
| 38 |
- ) |
|
| 39 |
- ce.store.View(func(readTx store.ReadTx) {
|
|
| 40 |
- nodes, err = store.FindNodes(readTx, store.All) |
|
| 41 |
- }) |
|
| 42 |
- if err != nil {
|
|
| 43 |
- log.L.WithError(err).Error("failed to check nodes for noncompliant tasks")
|
|
| 44 |
- } else {
|
|
| 45 |
- for _, node := range nodes {
|
|
| 46 |
- ce.shutdownNoncompliantTasks(node) |
|
| 47 |
- } |
|
| 48 |
- } |
|
| 49 |
- |
|
| 50 |
- for {
|
|
| 51 |
- select {
|
|
| 52 |
- case event := <-watcher: |
|
| 53 |
- node := event.(state.EventUpdateNode).Node |
|
| 54 |
- ce.shutdownNoncompliantTasks(node) |
|
| 55 |
- case <-ce.stopChan: |
|
| 56 |
- return |
|
| 57 |
- } |
|
| 58 |
- } |
|
| 59 |
-} |
|
| 60 |
- |
|
| 61 |
-func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) {
|
|
| 62 |
- // If the availability is "drain", the orchestrator will |
|
| 63 |
- // shut down all tasks. |
|
| 64 |
- // If the availability is "pause", we shouldn't touch |
|
| 65 |
- // the tasks on this node. |
|
| 66 |
- if node.Spec.Availability != api.NodeAvailabilityActive {
|
|
| 67 |
- return |
|
| 68 |
- } |
|
| 69 |
- |
|
| 70 |
- var ( |
|
| 71 |
- tasks []*api.Task |
|
| 72 |
- err error |
|
| 73 |
- ) |
|
| 74 |
- |
|
| 75 |
- ce.store.View(func(tx store.ReadTx) {
|
|
| 76 |
- tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID)) |
|
| 77 |
- }) |
|
| 78 |
- |
|
| 79 |
- if err != nil {
|
|
| 80 |
- log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID)
|
|
| 81 |
- } |
|
| 82 |
- |
|
| 83 |
- var availableMemoryBytes, availableNanoCPUs int64 |
|
| 84 |
- if node.Description != nil && node.Description.Resources != nil {
|
|
| 85 |
- availableMemoryBytes = node.Description.Resources.MemoryBytes |
|
| 86 |
- availableNanoCPUs = node.Description.Resources.NanoCPUs |
|
| 87 |
- } |
|
| 88 |
- |
|
| 89 |
- removeTasks := make(map[string]*api.Task) |
|
| 90 |
- |
|
| 91 |
- // TODO(aaronl): The set of tasks removed will be |
|
| 92 |
- // nondeterministic because it depends on the order of |
|
| 93 |
- // the slice returned from FindTasks. We could do |
|
| 94 |
- // a separate pass over the tasks for each type of |
|
| 95 |
- // resource, and sort by the size of the reservation |
|
| 96 |
- // to remove the most resource-intensive tasks. |
|
| 97 |
- for _, t := range tasks {
|
|
| 98 |
- if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning {
|
|
| 99 |
- continue |
|
| 100 |
- } |
|
| 101 |
- |
|
| 102 |
- // Ensure that the task still meets scheduling |
|
| 103 |
- // constraints. |
|
| 104 |
- if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) != 0 {
|
|
| 105 |
- constraints, _ := constraint.Parse(t.Spec.Placement.Constraints) |
|
| 106 |
- if !constraint.NodeMatches(constraints, node) {
|
|
| 107 |
- removeTasks[t.ID] = t |
|
| 108 |
- continue |
|
| 109 |
- } |
|
| 110 |
- } |
|
| 111 |
- |
|
| 112 |
- // Ensure that the task assigned to the node |
|
| 113 |
- // still satisfies the resource limits. |
|
| 114 |
- if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil {
|
|
| 115 |
- if t.Spec.Resources.Reservations.MemoryBytes > availableMemoryBytes {
|
|
| 116 |
- removeTasks[t.ID] = t |
|
| 117 |
- continue |
|
| 118 |
- } |
|
| 119 |
- if t.Spec.Resources.Reservations.NanoCPUs > availableNanoCPUs {
|
|
| 120 |
- removeTasks[t.ID] = t |
|
| 121 |
- continue |
|
| 122 |
- } |
|
| 123 |
- availableMemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes |
|
| 124 |
- availableNanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs |
|
| 125 |
- } |
|
| 126 |
- } |
|
| 127 |
- |
|
| 128 |
- if len(removeTasks) != 0 {
|
|
| 129 |
- _, err := ce.store.Batch(func(batch *store.Batch) error {
|
|
| 130 |
- for _, t := range removeTasks {
|
|
| 131 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 132 |
- t = store.GetTask(tx, t.ID) |
|
| 133 |
- if t == nil || t.DesiredState > api.TaskStateRunning {
|
|
| 134 |
- return nil |
|
| 135 |
- } |
|
| 136 |
- |
|
| 137 |
- t.DesiredState = api.TaskStateShutdown |
|
| 138 |
- return store.UpdateTask(tx, t) |
|
| 139 |
- }) |
|
| 140 |
- if err != nil {
|
|
| 141 |
- log.L.WithError(err).Errorf("failed to shut down task %s", t.ID)
|
|
| 142 |
- } |
|
| 143 |
- } |
|
| 144 |
- return nil |
|
| 145 |
- }) |
|
| 146 |
- |
|
| 147 |
- if err != nil {
|
|
| 148 |
- log.L.WithError(err).Errorf("failed to shut down tasks")
|
|
| 149 |
- } |
|
| 150 |
- } |
|
| 151 |
-} |
|
| 152 |
- |
|
| 153 |
-// Stop stops the ConstraintEnforcer and waits for the main loop to exit. |
|
| 154 |
-func (ce *ConstraintEnforcer) Stop() {
|
|
| 155 |
- close(ce.stopChan) |
|
| 156 |
- <-ce.doneChan |
|
| 157 |
-} |
| 158 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,157 @@ |
| 0 |
+package constraintenforcer |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/docker/swarmkit/api" |
|
| 4 |
+ "github.com/docker/swarmkit/log" |
|
| 5 |
+ "github.com/docker/swarmkit/manager/constraint" |
|
| 6 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 7 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// ConstraintEnforcer watches for updates to nodes and shuts down tasks that no |
|
| 11 |
+// longer satisfy scheduling constraints or resource limits. |
|
| 12 |
+type ConstraintEnforcer struct {
|
|
| 13 |
+ store *store.MemoryStore |
|
| 14 |
+ stopChan chan struct{}
|
|
| 15 |
+ doneChan chan struct{}
|
|
| 16 |
+} |
|
| 17 |
+ |
|
| 18 |
+// New creates a new ConstraintEnforcer. |
|
| 19 |
+func New(store *store.MemoryStore) *ConstraintEnforcer {
|
|
| 20 |
+ return &ConstraintEnforcer{
|
|
| 21 |
+ store: store, |
|
| 22 |
+ stopChan: make(chan struct{}),
|
|
| 23 |
+ doneChan: make(chan struct{}),
|
|
| 24 |
+ } |
|
| 25 |
+} |
|
| 26 |
+ |
|
| 27 |
+// Run is the ConstraintEnforcer's main loop. |
|
| 28 |
+func (ce *ConstraintEnforcer) Run() {
|
|
| 29 |
+ defer close(ce.doneChan) |
|
| 30 |
+ |
|
| 31 |
+ watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), state.EventUpdateNode{})
|
|
| 32 |
+ defer cancelWatch() |
|
| 33 |
+ |
|
| 34 |
+ var ( |
|
| 35 |
+ nodes []*api.Node |
|
| 36 |
+ err error |
|
| 37 |
+ ) |
|
| 38 |
+ ce.store.View(func(readTx store.ReadTx) {
|
|
| 39 |
+ nodes, err = store.FindNodes(readTx, store.All) |
|
| 40 |
+ }) |
|
| 41 |
+ if err != nil {
|
|
| 42 |
+ log.L.WithError(err).Error("failed to check nodes for noncompliant tasks")
|
|
| 43 |
+ } else {
|
|
| 44 |
+ for _, node := range nodes {
|
|
| 45 |
+ ce.shutdownNoncompliantTasks(node) |
|
| 46 |
+ } |
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ for {
|
|
| 50 |
+ select {
|
|
| 51 |
+ case event := <-watcher: |
|
| 52 |
+ node := event.(state.EventUpdateNode).Node |
|
| 53 |
+ ce.shutdownNoncompliantTasks(node) |
|
| 54 |
+ case <-ce.stopChan: |
|
| 55 |
+ return |
|
| 56 |
+ } |
|
| 57 |
+ } |
|
| 58 |
+} |
|
| 59 |
+ |
|
| 60 |
+func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) {
|
|
| 61 |
+ // If the availability is "drain", the orchestrator will |
|
| 62 |
+ // shut down all tasks. |
|
| 63 |
+ // If the availability is "pause", we shouldn't touch |
|
| 64 |
+ // the tasks on this node. |
|
| 65 |
+ if node.Spec.Availability != api.NodeAvailabilityActive {
|
|
| 66 |
+ return |
|
| 67 |
+ } |
|
| 68 |
+ |
|
| 69 |
+ var ( |
|
| 70 |
+ tasks []*api.Task |
|
| 71 |
+ err error |
|
| 72 |
+ ) |
|
| 73 |
+ |
|
| 74 |
+ ce.store.View(func(tx store.ReadTx) {
|
|
| 75 |
+ tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID)) |
|
| 76 |
+ }) |
|
| 77 |
+ |
|
| 78 |
+ if err != nil {
|
|
| 79 |
+ log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID)
|
|
| 80 |
+ } |
|
| 81 |
+ |
|
| 82 |
+ var availableMemoryBytes, availableNanoCPUs int64 |
|
| 83 |
+ if node.Description != nil && node.Description.Resources != nil {
|
|
| 84 |
+ availableMemoryBytes = node.Description.Resources.MemoryBytes |
|
| 85 |
+ availableNanoCPUs = node.Description.Resources.NanoCPUs |
|
| 86 |
+ } |
|
| 87 |
+ |
|
| 88 |
+ removeTasks := make(map[string]*api.Task) |
|
| 89 |
+ |
|
| 90 |
+ // TODO(aaronl): The set of tasks removed will be |
|
| 91 |
+ // nondeterministic because it depends on the order of |
|
| 92 |
+ // the slice returned from FindTasks. We could do |
|
| 93 |
+ // a separate pass over the tasks for each type of |
|
| 94 |
+ // resource, and sort by the size of the reservation |
|
| 95 |
+ // to remove the most resource-intensive tasks. |
|
| 96 |
+ for _, t := range tasks {
|
|
| 97 |
+ if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning {
|
|
| 98 |
+ continue |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ // Ensure that the task still meets scheduling |
|
| 102 |
+ // constraints. |
|
| 103 |
+ if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) != 0 {
|
|
| 104 |
+ constraints, _ := constraint.Parse(t.Spec.Placement.Constraints) |
|
| 105 |
+ if !constraint.NodeMatches(constraints, node) {
|
|
| 106 |
+ removeTasks[t.ID] = t |
|
| 107 |
+ continue |
|
| 108 |
+ } |
|
| 109 |
+ } |
|
| 110 |
+ |
|
| 111 |
+ // Ensure that the task assigned to the node |
|
| 112 |
+ // still satisfies the resource limits. |
|
| 113 |
+ if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil {
|
|
| 114 |
+ if t.Spec.Resources.Reservations.MemoryBytes > availableMemoryBytes {
|
|
| 115 |
+ removeTasks[t.ID] = t |
|
| 116 |
+ continue |
|
| 117 |
+ } |
|
| 118 |
+ if t.Spec.Resources.Reservations.NanoCPUs > availableNanoCPUs {
|
|
| 119 |
+ removeTasks[t.ID] = t |
|
| 120 |
+ continue |
|
| 121 |
+ } |
|
| 122 |
+ availableMemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes |
|
| 123 |
+ availableNanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs |
|
| 124 |
+ } |
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ if len(removeTasks) != 0 {
|
|
| 128 |
+ _, err := ce.store.Batch(func(batch *store.Batch) error {
|
|
| 129 |
+ for _, t := range removeTasks {
|
|
| 130 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 131 |
+ t = store.GetTask(tx, t.ID) |
|
| 132 |
+ if t == nil || t.DesiredState > api.TaskStateRunning {
|
|
| 133 |
+ return nil |
|
| 134 |
+ } |
|
| 135 |
+ |
|
| 136 |
+ t.DesiredState = api.TaskStateShutdown |
|
| 137 |
+ return store.UpdateTask(tx, t) |
|
| 138 |
+ }) |
|
| 139 |
+ if err != nil {
|
|
| 140 |
+ log.L.WithError(err).Errorf("failed to shut down task %s", t.ID)
|
|
| 141 |
+ } |
|
| 142 |
+ } |
|
| 143 |
+ return nil |
|
| 144 |
+ }) |
|
| 145 |
+ |
|
| 146 |
+ if err != nil {
|
|
| 147 |
+ log.L.WithError(err).Errorf("failed to shut down tasks")
|
|
| 148 |
+ } |
|
| 149 |
+ } |
|
| 150 |
+} |
|
| 151 |
+ |
|
| 152 |
+// Stop stops the ConstraintEnforcer and waits for the main loop to exit. |
|
| 153 |
+func (ce *ConstraintEnforcer) Stop() {
|
|
| 154 |
+ close(ce.stopChan) |
|
| 155 |
+ <-ce.doneChan |
|
| 156 |
+} |
| 0 | 157 |
deleted file mode 100644 |
| ... | ... |
@@ -1,513 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "github.com/docker/swarmkit/api" |
|
| 5 |
- "github.com/docker/swarmkit/log" |
|
| 6 |
- "github.com/docker/swarmkit/manager/constraint" |
|
| 7 |
- "github.com/docker/swarmkit/manager/state" |
|
| 8 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 9 |
- "golang.org/x/net/context" |
|
| 10 |
-) |
|
| 11 |
- |
|
| 12 |
-type globalService struct {
|
|
| 13 |
- *api.Service |
|
| 14 |
- |
|
| 15 |
- // Compiled constraints |
|
| 16 |
- constraints []constraint.Constraint |
|
| 17 |
-} |
|
| 18 |
- |
|
| 19 |
-// GlobalOrchestrator runs a reconciliation loop to create and destroy |
|
| 20 |
-// tasks as necessary for global services. |
|
| 21 |
-type GlobalOrchestrator struct {
|
|
| 22 |
- store *store.MemoryStore |
|
| 23 |
- // nodes is the set of non-drained nodes in the cluster, indexed by node ID |
|
| 24 |
- nodes map[string]*api.Node |
|
| 25 |
- // globalServices has all the global services in the cluster, indexed by ServiceID |
|
| 26 |
- globalServices map[string]globalService |
|
| 27 |
- |
|
| 28 |
- // stopChan signals to the state machine to stop running. |
|
| 29 |
- stopChan chan struct{}
|
|
| 30 |
- // doneChan is closed when the state machine terminates. |
|
| 31 |
- doneChan chan struct{}
|
|
| 32 |
- |
|
| 33 |
- updater *UpdateSupervisor |
|
| 34 |
- restarts *RestartSupervisor |
|
| 35 |
- |
|
| 36 |
- cluster *api.Cluster // local instance of the cluster |
|
| 37 |
-} |
|
| 38 |
- |
|
| 39 |
-// NewGlobalOrchestrator creates a new GlobalOrchestrator |
|
| 40 |
-func NewGlobalOrchestrator(store *store.MemoryStore) *GlobalOrchestrator {
|
|
| 41 |
- restartSupervisor := NewRestartSupervisor(store) |
|
| 42 |
- updater := NewUpdateSupervisor(store, restartSupervisor) |
|
| 43 |
- return &GlobalOrchestrator{
|
|
| 44 |
- store: store, |
|
| 45 |
- nodes: make(map[string]*api.Node), |
|
| 46 |
- globalServices: make(map[string]globalService), |
|
| 47 |
- stopChan: make(chan struct{}),
|
|
| 48 |
- doneChan: make(chan struct{}),
|
|
| 49 |
- updater: updater, |
|
| 50 |
- restarts: restartSupervisor, |
|
| 51 |
- } |
|
| 52 |
-} |
|
| 53 |
- |
|
| 54 |
-// Run contains the GlobalOrchestrator event loop |
|
| 55 |
-func (g *GlobalOrchestrator) Run(ctx context.Context) error {
|
|
| 56 |
- defer close(g.doneChan) |
|
| 57 |
- |
|
| 58 |
- // Watch changes to services and tasks |
|
| 59 |
- queue := g.store.WatchQueue() |
|
| 60 |
- watcher, cancel := queue.Watch() |
|
| 61 |
- defer cancel() |
|
| 62 |
- |
|
| 63 |
- // lookup the cluster |
|
| 64 |
- var err error |
|
| 65 |
- g.store.View(func(readTx store.ReadTx) {
|
|
| 66 |
- var clusters []*api.Cluster |
|
| 67 |
- clusters, err = store.FindClusters(readTx, store.ByName("default"))
|
|
| 68 |
- |
|
| 69 |
- if len(clusters) != 1 {
|
|
| 70 |
- return // just pick up the cluster when it is created. |
|
| 71 |
- } |
|
| 72 |
- g.cluster = clusters[0] |
|
| 73 |
- }) |
|
| 74 |
- if err != nil {
|
|
| 75 |
- return err |
|
| 76 |
- } |
|
| 77 |
- |
|
| 78 |
- // Get list of nodes |
|
| 79 |
- var nodes []*api.Node |
|
| 80 |
- g.store.View(func(readTx store.ReadTx) {
|
|
| 81 |
- nodes, err = store.FindNodes(readTx, store.All) |
|
| 82 |
- }) |
|
| 83 |
- if err != nil {
|
|
| 84 |
- return err |
|
| 85 |
- } |
|
| 86 |
- for _, n := range nodes {
|
|
| 87 |
- g.updateNode(n) |
|
| 88 |
- } |
|
| 89 |
- |
|
| 90 |
- // Lookup global services |
|
| 91 |
- var existingServices []*api.Service |
|
| 92 |
- g.store.View(func(readTx store.ReadTx) {
|
|
| 93 |
- existingServices, err = store.FindServices(readTx, store.All) |
|
| 94 |
- }) |
|
| 95 |
- if err != nil {
|
|
| 96 |
- return err |
|
| 97 |
- } |
|
| 98 |
- |
|
| 99 |
- var reconcileServiceIDs []string |
|
| 100 |
- for _, s := range existingServices {
|
|
| 101 |
- if isGlobalService(s) {
|
|
| 102 |
- g.updateService(s) |
|
| 103 |
- reconcileServiceIDs = append(reconcileServiceIDs, s.ID) |
|
| 104 |
- } |
|
| 105 |
- } |
|
| 106 |
- g.reconcileServices(ctx, reconcileServiceIDs) |
|
| 107 |
- |
|
| 108 |
- for {
|
|
| 109 |
- select {
|
|
| 110 |
- case event := <-watcher: |
|
| 111 |
- // TODO(stevvooe): Use ctx to limit running time of operation. |
|
| 112 |
- switch v := event.(type) {
|
|
| 113 |
- case state.EventUpdateCluster: |
|
| 114 |
- g.cluster = v.Cluster |
|
| 115 |
- case state.EventCreateService: |
|
| 116 |
- if !isGlobalService(v.Service) {
|
|
| 117 |
- continue |
|
| 118 |
- } |
|
| 119 |
- g.updateService(v.Service) |
|
| 120 |
- g.reconcileServices(ctx, []string{v.Service.ID})
|
|
| 121 |
- case state.EventUpdateService: |
|
| 122 |
- if !isGlobalService(v.Service) {
|
|
| 123 |
- continue |
|
| 124 |
- } |
|
| 125 |
- g.updateService(v.Service) |
|
| 126 |
- g.reconcileServices(ctx, []string{v.Service.ID})
|
|
| 127 |
- case state.EventDeleteService: |
|
| 128 |
- if !isGlobalService(v.Service) {
|
|
| 129 |
- continue |
|
| 130 |
- } |
|
| 131 |
- deleteServiceTasks(ctx, g.store, v.Service) |
|
| 132 |
- // delete the service from service map |
|
| 133 |
- delete(g.globalServices, v.Service.ID) |
|
| 134 |
- g.restarts.ClearServiceHistory(v.Service.ID) |
|
| 135 |
- case state.EventCreateNode: |
|
| 136 |
- g.updateNode(v.Node) |
|
| 137 |
- g.reconcileOneNode(ctx, v.Node) |
|
| 138 |
- case state.EventUpdateNode: |
|
| 139 |
- g.updateNode(v.Node) |
|
| 140 |
- switch v.Node.Status.State {
|
|
| 141 |
- // NodeStatus_DISCONNECTED is a transient state, no need to make any change |
|
| 142 |
- case api.NodeStatus_DOWN: |
|
| 143 |
- g.removeTasksFromNode(ctx, v.Node) |
|
| 144 |
- case api.NodeStatus_READY: |
|
| 145 |
- // node could come back to READY from DOWN or DISCONNECT |
|
| 146 |
- g.reconcileOneNode(ctx, v.Node) |
|
| 147 |
- } |
|
| 148 |
- case state.EventDeleteNode: |
|
| 149 |
- g.removeTasksFromNode(ctx, v.Node) |
|
| 150 |
- delete(g.nodes, v.Node.ID) |
|
| 151 |
- case state.EventUpdateTask: |
|
| 152 |
- if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
|
|
| 153 |
- continue |
|
| 154 |
- } |
|
| 155 |
- // global orchestrator needs to inspect when a task has terminated |
|
| 156 |
- // it should ignore tasks whose DesiredState is past running, which |
|
| 157 |
- // means the task has been processed |
|
| 158 |
- if isTaskTerminated(v.Task) {
|
|
| 159 |
- g.restartTask(ctx, v.Task.ID, v.Task.ServiceID) |
|
| 160 |
- } |
|
| 161 |
- case state.EventDeleteTask: |
|
| 162 |
- // CLI allows deleting task |
|
| 163 |
- if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
|
|
| 164 |
- continue |
|
| 165 |
- } |
|
| 166 |
- g.reconcileServicesOneNode(ctx, []string{v.Task.ServiceID}, v.Task.NodeID)
|
|
| 167 |
- } |
|
| 168 |
- case <-g.stopChan: |
|
| 169 |
- return nil |
|
| 170 |
- } |
|
| 171 |
- } |
|
| 172 |
-} |
|
| 173 |
- |
|
| 174 |
-// Stop stops the orchestrator. |
|
| 175 |
-func (g *GlobalOrchestrator) Stop() {
|
|
| 176 |
- close(g.stopChan) |
|
| 177 |
- <-g.doneChan |
|
| 178 |
- g.updater.CancelAll() |
|
| 179 |
- g.restarts.CancelAll() |
|
| 180 |
-} |
|
| 181 |
- |
|
| 182 |
-func (g *GlobalOrchestrator) removeTasksFromNode(ctx context.Context, node *api.Node) {
|
|
| 183 |
- var ( |
|
| 184 |
- tasks []*api.Task |
|
| 185 |
- err error |
|
| 186 |
- ) |
|
| 187 |
- g.store.View(func(tx store.ReadTx) {
|
|
| 188 |
- tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID)) |
|
| 189 |
- }) |
|
| 190 |
- if err != nil {
|
|
| 191 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed finding tasks")
|
|
| 192 |
- return |
|
| 193 |
- } |
|
| 194 |
- |
|
| 195 |
- _, err = g.store.Batch(func(batch *store.Batch) error {
|
|
| 196 |
- for _, t := range tasks {
|
|
| 197 |
- // GlobalOrchestrator only removes tasks from globalServices |
|
| 198 |
- if _, exists := g.globalServices[t.ServiceID]; exists {
|
|
| 199 |
- g.removeTask(ctx, batch, t) |
|
| 200 |
- } |
|
| 201 |
- } |
|
| 202 |
- return nil |
|
| 203 |
- }) |
|
| 204 |
- if err != nil {
|
|
| 205 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed")
|
|
| 206 |
- } |
|
| 207 |
-} |
|
| 208 |
- |
|
| 209 |
-func (g *GlobalOrchestrator) reconcileServices(ctx context.Context, serviceIDs []string) {
|
|
| 210 |
- nodeCompleted := make(map[string]map[string]struct{})
|
|
| 211 |
- nodeTasks := make(map[string]map[string][]*api.Task) |
|
| 212 |
- |
|
| 213 |
- g.store.View(func(tx store.ReadTx) {
|
|
| 214 |
- for _, serviceID := range serviceIDs {
|
|
| 215 |
- tasks, err := store.FindTasks(tx, store.ByServiceID(serviceID)) |
|
| 216 |
- if err != nil {
|
|
| 217 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices failed finding tasks for service %s", serviceID)
|
|
| 218 |
- continue |
|
| 219 |
- } |
|
| 220 |
- |
|
| 221 |
- // a node may have completed this service |
|
| 222 |
- nodeCompleted[serviceID] = make(map[string]struct{})
|
|
| 223 |
- // nodeID -> task list |
|
| 224 |
- nodeTasks[serviceID] = make(map[string][]*api.Task) |
|
| 225 |
- |
|
| 226 |
- for _, t := range tasks {
|
|
| 227 |
- if isTaskRunning(t) {
|
|
| 228 |
- // Collect all running instances of this service |
|
| 229 |
- nodeTasks[serviceID][t.NodeID] = append(nodeTasks[serviceID][t.NodeID], t) |
|
| 230 |
- } else {
|
|
| 231 |
- // for finished tasks, check restartPolicy |
|
| 232 |
- if isTaskCompleted(t, restartCondition(t)) {
|
|
| 233 |
- nodeCompleted[serviceID][t.NodeID] = struct{}{}
|
|
| 234 |
- } |
|
| 235 |
- } |
|
| 236 |
- } |
|
| 237 |
- } |
|
| 238 |
- }) |
|
| 239 |
- |
|
| 240 |
- _, err := g.store.Batch(func(batch *store.Batch) error {
|
|
| 241 |
- var updateTasks []slot |
|
| 242 |
- for _, serviceID := range serviceIDs {
|
|
| 243 |
- if _, exists := nodeTasks[serviceID]; !exists {
|
|
| 244 |
- continue |
|
| 245 |
- } |
|
| 246 |
- |
|
| 247 |
- service := g.globalServices[serviceID] |
|
| 248 |
- |
|
| 249 |
- for nodeID, node := range g.nodes {
|
|
| 250 |
- meetsConstraints := constraint.NodeMatches(service.constraints, node) |
|
| 251 |
- ntasks := nodeTasks[serviceID][nodeID] |
|
| 252 |
- delete(nodeTasks[serviceID], nodeID) |
|
| 253 |
- |
|
| 254 |
- // if restart policy considers this node has finished its task |
|
| 255 |
- // it should remove all running tasks |
|
| 256 |
- if _, exists := nodeCompleted[serviceID][nodeID]; exists || !meetsConstraints {
|
|
| 257 |
- g.removeTasks(ctx, batch, ntasks) |
|
| 258 |
- continue |
|
| 259 |
- } |
|
| 260 |
- |
|
| 261 |
- if node.Spec.Availability == api.NodeAvailabilityPause {
|
|
| 262 |
- // the node is paused, so we won't add or update |
|
| 263 |
- // any tasks |
|
| 264 |
- continue |
|
| 265 |
- } |
|
| 266 |
- |
|
| 267 |
- // this node needs to run 1 copy of the task |
|
| 268 |
- if len(ntasks) == 0 {
|
|
| 269 |
- g.addTask(ctx, batch, service.Service, nodeID) |
|
| 270 |
- } else {
|
|
| 271 |
- updateTasks = append(updateTasks, ntasks) |
|
| 272 |
- } |
|
| 273 |
- } |
|
| 274 |
- if len(updateTasks) > 0 {
|
|
| 275 |
- g.updater.Update(ctx, g.cluster, service.Service, updateTasks) |
|
| 276 |
- } |
|
| 277 |
- |
|
| 278 |
- // Remove any tasks assigned to nodes not found in g.nodes. |
|
| 279 |
- // These must be associated with nodes that are drained, or |
|
| 280 |
- // nodes that no longer exist. |
|
| 281 |
- for _, ntasks := range nodeTasks[serviceID] {
|
|
| 282 |
- g.removeTasks(ctx, batch, ntasks) |
|
| 283 |
- } |
|
| 284 |
- } |
|
| 285 |
- return nil |
|
| 286 |
- }) |
|
| 287 |
- if err != nil {
|
|
| 288 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices transaction failed")
|
|
| 289 |
- } |
|
| 290 |
-} |
|
| 291 |
- |
|
| 292 |
-// updateNode updates g.nodes based on the current node value |
|
| 293 |
-func (g *GlobalOrchestrator) updateNode(node *api.Node) {
|
|
| 294 |
- if node.Spec.Availability == api.NodeAvailabilityDrain {
|
|
| 295 |
- delete(g.nodes, node.ID) |
|
| 296 |
- } else {
|
|
| 297 |
- g.nodes[node.ID] = node |
|
| 298 |
- } |
|
| 299 |
-} |
|
| 300 |
- |
|
| 301 |
-// updateService updates g.globalServices based on the current service value |
|
| 302 |
-func (g *GlobalOrchestrator) updateService(service *api.Service) {
|
|
| 303 |
- var constraints []constraint.Constraint |
|
| 304 |
- |
|
| 305 |
- if service.Spec.Task.Placement != nil && len(service.Spec.Task.Placement.Constraints) != 0 {
|
|
| 306 |
- constraints, _ = constraint.Parse(service.Spec.Task.Placement.Constraints) |
|
| 307 |
- } |
|
| 308 |
- |
|
| 309 |
- g.globalServices[service.ID] = globalService{
|
|
| 310 |
- Service: service, |
|
| 311 |
- constraints: constraints, |
|
| 312 |
- } |
|
| 313 |
-} |
|
| 314 |
- |
|
| 315 |
-// reconcileOneNode checks all global services on one node |
|
| 316 |
-func (g *GlobalOrchestrator) reconcileOneNode(ctx context.Context, node *api.Node) {
|
|
| 317 |
- if node.Spec.Availability == api.NodeAvailabilityDrain {
|
|
| 318 |
- log.G(ctx).Debugf("global orchestrator: node %s in drain state, removing tasks from it", node.ID)
|
|
| 319 |
- g.removeTasksFromNode(ctx, node) |
|
| 320 |
- return |
|
| 321 |
- } |
|
| 322 |
- |
|
| 323 |
- var serviceIDs []string |
|
| 324 |
- for id := range g.globalServices {
|
|
| 325 |
- serviceIDs = append(serviceIDs, id) |
|
| 326 |
- } |
|
| 327 |
- g.reconcileServicesOneNode(ctx, serviceIDs, node.ID) |
|
| 328 |
-} |
|
| 329 |
- |
|
| 330 |
-// reconcileServicesOneNode checks the specified services on one node |
|
| 331 |
-func (g *GlobalOrchestrator) reconcileServicesOneNode(ctx context.Context, serviceIDs []string, nodeID string) {
|
|
| 332 |
- node, exists := g.nodes[nodeID] |
|
| 333 |
- if !exists {
|
|
| 334 |
- return |
|
| 335 |
- } |
|
| 336 |
- |
|
| 337 |
- // whether each service has completed on the node |
|
| 338 |
- completed := make(map[string]bool) |
|
| 339 |
- // tasks by service |
|
| 340 |
- tasks := make(map[string][]*api.Task) |
|
| 341 |
- |
|
| 342 |
- var ( |
|
| 343 |
- tasksOnNode []*api.Task |
|
| 344 |
- err error |
|
| 345 |
- ) |
|
| 346 |
- |
|
| 347 |
- g.store.View(func(tx store.ReadTx) {
|
|
| 348 |
- tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID)) |
|
| 349 |
- }) |
|
| 350 |
- if err != nil {
|
|
| 351 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks on node %s", nodeID)
|
|
| 352 |
- return |
|
| 353 |
- } |
|
| 354 |
- |
|
| 355 |
- for _, serviceID := range serviceIDs {
|
|
| 356 |
- for _, t := range tasksOnNode {
|
|
| 357 |
- if t.ServiceID != serviceID {
|
|
| 358 |
- continue |
|
| 359 |
- } |
|
| 360 |
- if isTaskRunning(t) {
|
|
| 361 |
- tasks[serviceID] = append(tasks[serviceID], t) |
|
| 362 |
- } else {
|
|
| 363 |
- if isTaskCompleted(t, restartCondition(t)) {
|
|
| 364 |
- completed[serviceID] = true |
|
| 365 |
- } |
|
| 366 |
- } |
|
| 367 |
- } |
|
| 368 |
- } |
|
| 369 |
- |
|
| 370 |
- _, err = g.store.Batch(func(batch *store.Batch) error {
|
|
| 371 |
- for _, serviceID := range serviceIDs {
|
|
| 372 |
- service, exists := g.globalServices[serviceID] |
|
| 373 |
- if !exists {
|
|
| 374 |
- continue |
|
| 375 |
- } |
|
| 376 |
- |
|
| 377 |
- if !constraint.NodeMatches(service.constraints, node) {
|
|
| 378 |
- continue |
|
| 379 |
- } |
|
| 380 |
- |
|
| 381 |
- // if restart policy considers this node has finished its task |
|
| 382 |
- // it should remove all running tasks |
|
| 383 |
- if completed[serviceID] {
|
|
| 384 |
- g.removeTasks(ctx, batch, tasks[serviceID]) |
|
| 385 |
- continue |
|
| 386 |
- } |
|
| 387 |
- |
|
| 388 |
- if node.Spec.Availability == api.NodeAvailabilityPause {
|
|
| 389 |
- // the node is paused, so we won't add or update tasks |
|
| 390 |
- continue |
|
| 391 |
- } |
|
| 392 |
- |
|
| 393 |
- if len(tasks) == 0 {
|
|
| 394 |
- g.addTask(ctx, batch, service.Service, nodeID) |
|
| 395 |
- } else {
|
|
| 396 |
- // If task is out of date, update it. This can happen |
|
| 397 |
- // on node reconciliation if, for example, we pause a |
|
| 398 |
- // node, update the service, and then activate the node |
|
| 399 |
- // later. |
|
| 400 |
- |
|
| 401 |
- // We don't use g.updater here for two reasons: |
|
| 402 |
- // - This is not a rolling update. Since it was not |
|
| 403 |
- // triggered directly by updating the service, it |
|
| 404 |
- // should not observe the rolling update parameters |
|
| 405 |
- // or show status in UpdateStatus. |
|
| 406 |
- // - Calling Update cancels any current rolling updates |
|
| 407 |
- // for the service, such as one triggered by service |
|
| 408 |
- // reconciliation. |
|
| 409 |
- |
|
| 410 |
- var ( |
|
| 411 |
- dirtyTasks []*api.Task |
|
| 412 |
- cleanTasks []*api.Task |
|
| 413 |
- ) |
|
| 414 |
- |
|
| 415 |
- for _, t := range tasks[serviceID] {
|
|
| 416 |
- if isTaskDirty(service.Service, t) {
|
|
| 417 |
- dirtyTasks = append(dirtyTasks, t) |
|
| 418 |
- } else {
|
|
| 419 |
- cleanTasks = append(cleanTasks, t) |
|
| 420 |
- } |
|
| 421 |
- } |
|
| 422 |
- |
|
| 423 |
- if len(cleanTasks) == 0 {
|
|
| 424 |
- g.addTask(ctx, batch, service.Service, nodeID) |
|
| 425 |
- } else {
|
|
| 426 |
- dirtyTasks = append(dirtyTasks, cleanTasks[1:]...) |
|
| 427 |
- } |
|
| 428 |
- g.removeTasks(ctx, batch, dirtyTasks) |
|
| 429 |
- } |
|
| 430 |
- } |
|
| 431 |
- return nil |
|
| 432 |
- }) |
|
| 433 |
- if err != nil {
|
|
| 434 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed")
|
|
| 435 |
- } |
|
| 436 |
-} |
|
| 437 |
- |
|
| 438 |
-// restartTask calls the restart supervisor's Restart function, which |
|
| 439 |
-// sets a task's desired state to shutdown and restarts it if the restart |
|
| 440 |
-// policy calls for it to be restarted. |
|
| 441 |
-func (g *GlobalOrchestrator) restartTask(ctx context.Context, taskID string, serviceID string) {
|
|
| 442 |
- err := g.store.Update(func(tx store.Tx) error {
|
|
| 443 |
- t := store.GetTask(tx, taskID) |
|
| 444 |
- if t == nil || t.DesiredState > api.TaskStateRunning {
|
|
| 445 |
- return nil |
|
| 446 |
- } |
|
| 447 |
- service := store.GetService(tx, serviceID) |
|
| 448 |
- if service == nil {
|
|
| 449 |
- return nil |
|
| 450 |
- } |
|
| 451 |
- return g.restarts.Restart(ctx, tx, g.cluster, service, *t) |
|
| 452 |
- }) |
|
| 453 |
- if err != nil {
|
|
| 454 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: restartTask transaction failed")
|
|
| 455 |
- } |
|
| 456 |
-} |
|
| 457 |
- |
|
| 458 |
-func (g *GlobalOrchestrator) removeTask(ctx context.Context, batch *store.Batch, t *api.Task) {
|
|
| 459 |
- // set existing task DesiredState to TaskStateShutdown |
|
| 460 |
- // TODO(aaronl): optimistic update? |
|
| 461 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 462 |
- t = store.GetTask(tx, t.ID) |
|
| 463 |
- if t != nil && t.DesiredState < api.TaskStateShutdown {
|
|
| 464 |
- t.DesiredState = api.TaskStateShutdown |
|
| 465 |
- return store.UpdateTask(tx, t) |
|
| 466 |
- } |
|
| 467 |
- return nil |
|
| 468 |
- }) |
|
| 469 |
- if err != nil {
|
|
| 470 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: removeTask failed to remove %s", t.ID)
|
|
| 471 |
- } |
|
| 472 |
-} |
|
| 473 |
- |
|
| 474 |
-func (g *GlobalOrchestrator) addTask(ctx context.Context, batch *store.Batch, service *api.Service, nodeID string) {
|
|
| 475 |
- task := newTask(g.cluster, service, 0, nodeID) |
|
| 476 |
- |
|
| 477 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 478 |
- return store.CreateTask(tx, task) |
|
| 479 |
- }) |
|
| 480 |
- if err != nil {
|
|
| 481 |
- log.G(ctx).WithError(err).Errorf("global orchestrator: failed to create task")
|
|
| 482 |
- } |
|
| 483 |
-} |
|
| 484 |
- |
|
| 485 |
-func (g *GlobalOrchestrator) removeTasks(ctx context.Context, batch *store.Batch, tasks []*api.Task) {
|
|
| 486 |
- for _, t := range tasks {
|
|
| 487 |
- g.removeTask(ctx, batch, t) |
|
| 488 |
- } |
|
| 489 |
-} |
|
| 490 |
- |
|
| 491 |
-func isTaskRunning(t *api.Task) bool {
|
|
| 492 |
- return t != nil && t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning |
|
| 493 |
-} |
|
| 494 |
- |
|
| 495 |
-func isTaskCompleted(t *api.Task, restartPolicy api.RestartPolicy_RestartCondition) bool {
|
|
| 496 |
- if t == nil || isTaskRunning(t) {
|
|
| 497 |
- return false |
|
| 498 |
- } |
|
| 499 |
- return restartPolicy == api.RestartOnNone || |
|
| 500 |
- (restartPolicy == api.RestartOnFailure && t.Status.State == api.TaskStateCompleted) |
|
| 501 |
-} |
|
| 502 |
- |
|
| 503 |
-func isTaskTerminated(t *api.Task) bool {
|
|
| 504 |
- return t != nil && t.Status.State > api.TaskStateRunning |
|
| 505 |
-} |
|
| 506 |
- |
|
| 507 |
-func isGlobalService(service *api.Service) bool {
|
|
| 508 |
- if service == nil {
|
|
| 509 |
- return false |
|
| 510 |
- } |
|
| 511 |
- _, ok := service.Spec.GetMode().(*api.ServiceSpec_Global) |
|
| 512 |
- return ok |
|
| 513 |
-} |
| 514 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,508 @@ |
| 0 |
+package global |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/docker/swarmkit/api" |
|
| 4 |
+ "github.com/docker/swarmkit/log" |
|
| 5 |
+ "github.com/docker/swarmkit/manager/constraint" |
|
| 6 |
+ "github.com/docker/swarmkit/manager/orchestrator" |
|
| 7 |
+ "github.com/docker/swarmkit/manager/orchestrator/restart" |
|
| 8 |
+ "github.com/docker/swarmkit/manager/orchestrator/update" |
|
| 9 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 10 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 11 |
+ "golang.org/x/net/context" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+type globalService struct {
|
|
| 15 |
+ *api.Service |
|
| 16 |
+ |
|
| 17 |
+ // Compiled constraints |
|
| 18 |
+ constraints []constraint.Constraint |
|
| 19 |
+} |
|
| 20 |
+ |
|
| 21 |
+// Orchestrator runs a reconciliation loop to create and destroy tasks as |
|
| 22 |
+// necessary for global services. |
|
| 23 |
+type Orchestrator struct {
|
|
| 24 |
+ store *store.MemoryStore |
|
| 25 |
+ // nodes is the set of non-drained nodes in the cluster, indexed by node ID |
|
| 26 |
+ nodes map[string]*api.Node |
|
| 27 |
+ // globalServices has all the global services in the cluster, indexed by ServiceID |
|
| 28 |
+ globalServices map[string]globalService |
|
| 29 |
+ |
|
| 30 |
+ // stopChan signals to the state machine to stop running. |
|
| 31 |
+ stopChan chan struct{}
|
|
| 32 |
+ // doneChan is closed when the state machine terminates. |
|
| 33 |
+ doneChan chan struct{}
|
|
| 34 |
+ |
|
| 35 |
+ updater *update.Supervisor |
|
| 36 |
+ restarts *restart.Supervisor |
|
| 37 |
+ |
|
| 38 |
+ cluster *api.Cluster // local instance of the cluster |
|
| 39 |
+} |
|
| 40 |
+ |
|
| 41 |
+// NewGlobalOrchestrator creates a new global Orchestrator |
|
| 42 |
+func NewGlobalOrchestrator(store *store.MemoryStore) *Orchestrator {
|
|
| 43 |
+ restartSupervisor := restart.NewSupervisor(store) |
|
| 44 |
+ updater := update.NewSupervisor(store, restartSupervisor) |
|
| 45 |
+ return &Orchestrator{
|
|
| 46 |
+ store: store, |
|
| 47 |
+ nodes: make(map[string]*api.Node), |
|
| 48 |
+ globalServices: make(map[string]globalService), |
|
| 49 |
+ stopChan: make(chan struct{}),
|
|
| 50 |
+ doneChan: make(chan struct{}),
|
|
| 51 |
+ updater: updater, |
|
| 52 |
+ restarts: restartSupervisor, |
|
| 53 |
+ } |
|
| 54 |
+} |
|
| 55 |
+ |
|
| 56 |
+// Run contains the global orchestrator event loop |
|
| 57 |
+func (g *Orchestrator) Run(ctx context.Context) error {
|
|
| 58 |
+ defer close(g.doneChan) |
|
| 59 |
+ |
|
| 60 |
+ // Watch changes to services and tasks |
|
| 61 |
+ queue := g.store.WatchQueue() |
|
| 62 |
+ watcher, cancel := queue.Watch() |
|
| 63 |
+ defer cancel() |
|
| 64 |
+ |
|
| 65 |
+ // lookup the cluster |
|
| 66 |
+ var err error |
|
| 67 |
+ g.store.View(func(readTx store.ReadTx) {
|
|
| 68 |
+ var clusters []*api.Cluster |
|
| 69 |
+ clusters, err = store.FindClusters(readTx, store.ByName("default"))
|
|
| 70 |
+ |
|
| 71 |
+ if len(clusters) != 1 {
|
|
| 72 |
+ return // just pick up the cluster when it is created. |
|
| 73 |
+ } |
|
| 74 |
+ g.cluster = clusters[0] |
|
| 75 |
+ }) |
|
| 76 |
+ if err != nil {
|
|
| 77 |
+ return err |
|
| 78 |
+ } |
|
| 79 |
+ |
|
| 80 |
+ // Get list of nodes |
|
| 81 |
+ var nodes []*api.Node |
|
| 82 |
+ g.store.View(func(readTx store.ReadTx) {
|
|
| 83 |
+ nodes, err = store.FindNodes(readTx, store.All) |
|
| 84 |
+ }) |
|
| 85 |
+ if err != nil {
|
|
| 86 |
+ return err |
|
| 87 |
+ } |
|
| 88 |
+ for _, n := range nodes {
|
|
| 89 |
+ g.updateNode(n) |
|
| 90 |
+ } |
|
| 91 |
+ |
|
| 92 |
+ // Lookup global services |
|
| 93 |
+ var existingServices []*api.Service |
|
| 94 |
+ g.store.View(func(readTx store.ReadTx) {
|
|
| 95 |
+ existingServices, err = store.FindServices(readTx, store.All) |
|
| 96 |
+ }) |
|
| 97 |
+ if err != nil {
|
|
| 98 |
+ return err |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ var reconcileServiceIDs []string |
|
| 102 |
+ for _, s := range existingServices {
|
|
| 103 |
+ if orchestrator.IsGlobalService(s) {
|
|
| 104 |
+ g.updateService(s) |
|
| 105 |
+ reconcileServiceIDs = append(reconcileServiceIDs, s.ID) |
|
| 106 |
+ } |
|
| 107 |
+ } |
|
| 108 |
+ g.reconcileServices(ctx, reconcileServiceIDs) |
|
| 109 |
+ |
|
| 110 |
+ for {
|
|
| 111 |
+ select {
|
|
| 112 |
+ case event := <-watcher: |
|
| 113 |
+ // TODO(stevvooe): Use ctx to limit running time of operation. |
|
| 114 |
+ switch v := event.(type) {
|
|
| 115 |
+ case state.EventUpdateCluster: |
|
| 116 |
+ g.cluster = v.Cluster |
|
| 117 |
+ case state.EventCreateService: |
|
| 118 |
+ if !orchestrator.IsGlobalService(v.Service) {
|
|
| 119 |
+ continue |
|
| 120 |
+ } |
|
| 121 |
+ g.updateService(v.Service) |
|
| 122 |
+ g.reconcileServices(ctx, []string{v.Service.ID})
|
|
| 123 |
+ case state.EventUpdateService: |
|
| 124 |
+ if !orchestrator.IsGlobalService(v.Service) {
|
|
| 125 |
+ continue |
|
| 126 |
+ } |
|
| 127 |
+ g.updateService(v.Service) |
|
| 128 |
+ g.reconcileServices(ctx, []string{v.Service.ID})
|
|
| 129 |
+ case state.EventDeleteService: |
|
| 130 |
+ if !orchestrator.IsGlobalService(v.Service) {
|
|
| 131 |
+ continue |
|
| 132 |
+ } |
|
| 133 |
+ orchestrator.DeleteServiceTasks(ctx, g.store, v.Service) |
|
| 134 |
+ // delete the service from service map |
|
| 135 |
+ delete(g.globalServices, v.Service.ID) |
|
| 136 |
+ g.restarts.ClearServiceHistory(v.Service.ID) |
|
| 137 |
+ case state.EventCreateNode: |
|
| 138 |
+ g.updateNode(v.Node) |
|
| 139 |
+ g.reconcileOneNode(ctx, v.Node) |
|
| 140 |
+ case state.EventUpdateNode: |
|
| 141 |
+ g.updateNode(v.Node) |
|
| 142 |
+ switch v.Node.Status.State {
|
|
| 143 |
+ // NodeStatus_DISCONNECTED is a transient state, no need to make any change |
|
| 144 |
+ case api.NodeStatus_DOWN: |
|
| 145 |
+ g.removeTasksFromNode(ctx, v.Node) |
|
| 146 |
+ case api.NodeStatus_READY: |
|
| 147 |
+ // node could come back to READY from DOWN or DISCONNECT |
|
| 148 |
+ g.reconcileOneNode(ctx, v.Node) |
|
| 149 |
+ } |
|
| 150 |
+ case state.EventDeleteNode: |
|
| 151 |
+ g.removeTasksFromNode(ctx, v.Node) |
|
| 152 |
+ delete(g.nodes, v.Node.ID) |
|
| 153 |
+ case state.EventUpdateTask: |
|
| 154 |
+ if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
|
|
| 155 |
+ continue |
|
| 156 |
+ } |
|
| 157 |
+ // global orchestrator needs to inspect when a task has terminated |
|
| 158 |
+ // it should ignore tasks whose DesiredState is past running, which |
|
| 159 |
+ // means the task has been processed |
|
| 160 |
+ if isTaskTerminated(v.Task) {
|
|
| 161 |
+ g.restartTask(ctx, v.Task.ID, v.Task.ServiceID) |
|
| 162 |
+ } |
|
| 163 |
+ case state.EventDeleteTask: |
|
| 164 |
+ // CLI allows deleting task |
|
| 165 |
+ if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
|
|
| 166 |
+ continue |
|
| 167 |
+ } |
|
| 168 |
+ g.reconcileServicesOneNode(ctx, []string{v.Task.ServiceID}, v.Task.NodeID)
|
|
| 169 |
+ } |
|
| 170 |
+ case <-g.stopChan: |
|
| 171 |
+ return nil |
|
| 172 |
+ } |
|
| 173 |
+ } |
|
| 174 |
+} |
|
| 175 |
+ |
|
| 176 |
+// Stop stops the orchestrator. |
|
| 177 |
+func (g *Orchestrator) Stop() {
|
|
| 178 |
+ close(g.stopChan) |
|
| 179 |
+ <-g.doneChan |
|
| 180 |
+ g.updater.CancelAll() |
|
| 181 |
+ g.restarts.CancelAll() |
|
| 182 |
+} |
|
| 183 |
+ |
|
| 184 |
+func (g *Orchestrator) removeTasksFromNode(ctx context.Context, node *api.Node) {
|
|
| 185 |
+ var ( |
|
| 186 |
+ tasks []*api.Task |
|
| 187 |
+ err error |
|
| 188 |
+ ) |
|
| 189 |
+ g.store.View(func(tx store.ReadTx) {
|
|
| 190 |
+ tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID)) |
|
| 191 |
+ }) |
|
| 192 |
+ if err != nil {
|
|
| 193 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed finding tasks")
|
|
| 194 |
+ return |
|
| 195 |
+ } |
|
| 196 |
+ |
|
| 197 |
+ _, err = g.store.Batch(func(batch *store.Batch) error {
|
|
| 198 |
+ for _, t := range tasks {
|
|
| 199 |
+ // Global orchestrator only removes tasks from globalServices |
|
| 200 |
+ if _, exists := g.globalServices[t.ServiceID]; exists {
|
|
| 201 |
+ g.removeTask(ctx, batch, t) |
|
| 202 |
+ } |
|
| 203 |
+ } |
|
| 204 |
+ return nil |
|
| 205 |
+ }) |
|
| 206 |
+ if err != nil {
|
|
| 207 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed")
|
|
| 208 |
+ } |
|
| 209 |
+} |
|
| 210 |
+ |
|
| 211 |
+func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []string) {
|
|
| 212 |
+ nodeCompleted := make(map[string]map[string]struct{})
|
|
| 213 |
+ nodeTasks := make(map[string]map[string][]*api.Task) |
|
| 214 |
+ |
|
| 215 |
+ g.store.View(func(tx store.ReadTx) {
|
|
| 216 |
+ for _, serviceID := range serviceIDs {
|
|
| 217 |
+ tasks, err := store.FindTasks(tx, store.ByServiceID(serviceID)) |
|
| 218 |
+ if err != nil {
|
|
| 219 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices failed finding tasks for service %s", serviceID)
|
|
| 220 |
+ continue |
|
| 221 |
+ } |
|
| 222 |
+ |
|
| 223 |
+ // a node may have completed this service |
|
| 224 |
+ nodeCompleted[serviceID] = make(map[string]struct{})
|
|
| 225 |
+ // nodeID -> task list |
|
| 226 |
+ nodeTasks[serviceID] = make(map[string][]*api.Task) |
|
| 227 |
+ |
|
| 228 |
+ for _, t := range tasks {
|
|
| 229 |
+ if isTaskRunning(t) {
|
|
| 230 |
+ // Collect all running instances of this service |
|
| 231 |
+ nodeTasks[serviceID][t.NodeID] = append(nodeTasks[serviceID][t.NodeID], t) |
|
| 232 |
+ } else {
|
|
| 233 |
+ // for finished tasks, check restartPolicy |
|
| 234 |
+ if isTaskCompleted(t, orchestrator.RestartCondition(t)) {
|
|
| 235 |
+ nodeCompleted[serviceID][t.NodeID] = struct{}{}
|
|
| 236 |
+ } |
|
| 237 |
+ } |
|
| 238 |
+ } |
|
| 239 |
+ } |
|
| 240 |
+ }) |
|
| 241 |
+ |
|
| 242 |
+ _, err := g.store.Batch(func(batch *store.Batch) error {
|
|
| 243 |
+ var updateTasks []orchestrator.Slot |
|
| 244 |
+ for _, serviceID := range serviceIDs {
|
|
| 245 |
+ if _, exists := nodeTasks[serviceID]; !exists {
|
|
| 246 |
+ continue |
|
| 247 |
+ } |
|
| 248 |
+ |
|
| 249 |
+ service := g.globalServices[serviceID] |
|
| 250 |
+ |
|
| 251 |
+ for nodeID, node := range g.nodes {
|
|
| 252 |
+ meetsConstraints := constraint.NodeMatches(service.constraints, node) |
|
| 253 |
+ ntasks := nodeTasks[serviceID][nodeID] |
|
| 254 |
+ delete(nodeTasks[serviceID], nodeID) |
|
| 255 |
+ |
|
| 256 |
+ // if restart policy considers this node has finished its task |
|
| 257 |
+ // it should remove all running tasks |
|
| 258 |
+ if _, exists := nodeCompleted[serviceID][nodeID]; exists || !meetsConstraints {
|
|
| 259 |
+ g.removeTasks(ctx, batch, ntasks) |
|
| 260 |
+ continue |
|
| 261 |
+ } |
|
| 262 |
+ |
|
| 263 |
+ if node.Spec.Availability == api.NodeAvailabilityPause {
|
|
| 264 |
+ // the node is paused, so we won't add or update |
|
| 265 |
+ // any tasks |
|
| 266 |
+ continue |
|
| 267 |
+ } |
|
| 268 |
+ |
|
| 269 |
+ // this node needs to run 1 copy of the task |
|
| 270 |
+ if len(ntasks) == 0 {
|
|
| 271 |
+ g.addTask(ctx, batch, service.Service, nodeID) |
|
| 272 |
+ } else {
|
|
| 273 |
+ updateTasks = append(updateTasks, ntasks) |
|
| 274 |
+ } |
|
| 275 |
+ } |
|
| 276 |
+ if len(updateTasks) > 0 {
|
|
| 277 |
+ g.updater.Update(ctx, g.cluster, service.Service, updateTasks) |
|
| 278 |
+ } |
|
| 279 |
+ |
|
| 280 |
+ // Remove any tasks assigned to nodes not found in g.nodes. |
|
| 281 |
+ // These must be associated with nodes that are drained, or |
|
| 282 |
+ // nodes that no longer exist. |
|
| 283 |
+ for _, ntasks := range nodeTasks[serviceID] {
|
|
| 284 |
+ g.removeTasks(ctx, batch, ntasks) |
|
| 285 |
+ } |
|
| 286 |
+ } |
|
| 287 |
+ return nil |
|
| 288 |
+ }) |
|
| 289 |
+ if err != nil {
|
|
| 290 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices transaction failed")
|
|
| 291 |
+ } |
|
| 292 |
+} |
|
| 293 |
+ |
|
| 294 |
+// updateNode updates g.nodes based on the current node value |
|
| 295 |
+func (g *Orchestrator) updateNode(node *api.Node) {
|
|
| 296 |
+ if node.Spec.Availability == api.NodeAvailabilityDrain {
|
|
| 297 |
+ delete(g.nodes, node.ID) |
|
| 298 |
+ } else {
|
|
| 299 |
+ g.nodes[node.ID] = node |
|
| 300 |
+ } |
|
| 301 |
+} |
|
| 302 |
+ |
|
| 303 |
+// updateService updates g.globalServices based on the current service value |
|
| 304 |
+func (g *Orchestrator) updateService(service *api.Service) {
|
|
| 305 |
+ var constraints []constraint.Constraint |
|
| 306 |
+ |
|
| 307 |
+ if service.Spec.Task.Placement != nil && len(service.Spec.Task.Placement.Constraints) != 0 {
|
|
| 308 |
+ constraints, _ = constraint.Parse(service.Spec.Task.Placement.Constraints) |
|
| 309 |
+ } |
|
| 310 |
+ |
|
| 311 |
+ g.globalServices[service.ID] = globalService{
|
|
| 312 |
+ Service: service, |
|
| 313 |
+ constraints: constraints, |
|
| 314 |
+ } |
|
| 315 |
+} |
|
| 316 |
+ |
|
| 317 |
+// reconcileOneNode checks all global services on one node |
|
| 318 |
+func (g *Orchestrator) reconcileOneNode(ctx context.Context, node *api.Node) {
|
|
| 319 |
+ if node.Spec.Availability == api.NodeAvailabilityDrain {
|
|
| 320 |
+ log.G(ctx).Debugf("global orchestrator: node %s in drain state, removing tasks from it", node.ID)
|
|
| 321 |
+ g.removeTasksFromNode(ctx, node) |
|
| 322 |
+ return |
|
| 323 |
+ } |
|
| 324 |
+ |
|
| 325 |
+ var serviceIDs []string |
|
| 326 |
+ for id := range g.globalServices {
|
|
| 327 |
+ serviceIDs = append(serviceIDs, id) |
|
| 328 |
+ } |
|
| 329 |
+ g.reconcileServicesOneNode(ctx, serviceIDs, node.ID) |
|
| 330 |
+} |
|
| 331 |
+ |
|
| 332 |
+// reconcileServicesOneNode checks the specified services on one node |
|
| 333 |
+func (g *Orchestrator) reconcileServicesOneNode(ctx context.Context, serviceIDs []string, nodeID string) {
|
|
| 334 |
+ node, exists := g.nodes[nodeID] |
|
| 335 |
+ if !exists {
|
|
| 336 |
+ return |
|
| 337 |
+ } |
|
| 338 |
+ |
|
| 339 |
+ // whether each service has completed on the node |
|
| 340 |
+ completed := make(map[string]bool) |
|
| 341 |
+ // tasks by service |
|
| 342 |
+ tasks := make(map[string][]*api.Task) |
|
| 343 |
+ |
|
| 344 |
+ var ( |
|
| 345 |
+ tasksOnNode []*api.Task |
|
| 346 |
+ err error |
|
| 347 |
+ ) |
|
| 348 |
+ |
|
| 349 |
+ g.store.View(func(tx store.ReadTx) {
|
|
| 350 |
+ tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID)) |
|
| 351 |
+ }) |
|
| 352 |
+ if err != nil {
|
|
| 353 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks on node %s", nodeID)
|
|
| 354 |
+ return |
|
| 355 |
+ } |
|
| 356 |
+ |
|
| 357 |
+ for _, serviceID := range serviceIDs {
|
|
| 358 |
+ for _, t := range tasksOnNode {
|
|
| 359 |
+ if t.ServiceID != serviceID {
|
|
| 360 |
+ continue |
|
| 361 |
+ } |
|
| 362 |
+ if isTaskRunning(t) {
|
|
| 363 |
+ tasks[serviceID] = append(tasks[serviceID], t) |
|
| 364 |
+ } else {
|
|
| 365 |
+ if isTaskCompleted(t, orchestrator.RestartCondition(t)) {
|
|
| 366 |
+ completed[serviceID] = true |
|
| 367 |
+ } |
|
| 368 |
+ } |
|
| 369 |
+ } |
|
| 370 |
+ } |
|
| 371 |
+ |
|
| 372 |
+ _, err = g.store.Batch(func(batch *store.Batch) error {
|
|
| 373 |
+ for _, serviceID := range serviceIDs {
|
|
| 374 |
+ service, exists := g.globalServices[serviceID] |
|
| 375 |
+ if !exists {
|
|
| 376 |
+ continue |
|
| 377 |
+ } |
|
| 378 |
+ |
|
| 379 |
+ if !constraint.NodeMatches(service.constraints, node) {
|
|
| 380 |
+ continue |
|
| 381 |
+ } |
|
| 382 |
+ |
|
| 383 |
+ // if restart policy considers this node has finished its task |
|
| 384 |
+ // it should remove all running tasks |
|
| 385 |
+ if completed[serviceID] {
|
|
| 386 |
+ g.removeTasks(ctx, batch, tasks[serviceID]) |
|
| 387 |
+ continue |
|
| 388 |
+ } |
|
| 389 |
+ |
|
| 390 |
+ if node.Spec.Availability == api.NodeAvailabilityPause {
|
|
| 391 |
+ // the node is paused, so we won't add or update tasks |
|
| 392 |
+ continue |
|
| 393 |
+ } |
|
| 394 |
+ |
|
| 395 |
+ if len(tasks) == 0 {
|
|
| 396 |
+ g.addTask(ctx, batch, service.Service, nodeID) |
|
| 397 |
+ } else {
|
|
| 398 |
+ // If task is out of date, update it. This can happen |
|
| 399 |
+ // on node reconciliation if, for example, we pause a |
|
| 400 |
+ // node, update the service, and then activate the node |
|
| 401 |
+ // later. |
|
| 402 |
+ |
|
| 403 |
+ // We don't use g.updater here for two reasons: |
|
| 404 |
+ // - This is not a rolling update. Since it was not |
|
| 405 |
+ // triggered directly by updating the service, it |
|
| 406 |
+ // should not observe the rolling update parameters |
|
| 407 |
+ // or show status in UpdateStatus. |
|
| 408 |
+ // - Calling Update cancels any current rolling updates |
|
| 409 |
+ // for the service, such as one triggered by service |
|
| 410 |
+ // reconciliation. |
|
| 411 |
+ |
|
| 412 |
+ var ( |
|
| 413 |
+ dirtyTasks []*api.Task |
|
| 414 |
+ cleanTasks []*api.Task |
|
| 415 |
+ ) |
|
| 416 |
+ |
|
| 417 |
+ for _, t := range tasks[serviceID] {
|
|
| 418 |
+ if orchestrator.IsTaskDirty(service.Service, t) {
|
|
| 419 |
+ dirtyTasks = append(dirtyTasks, t) |
|
| 420 |
+ } else {
|
|
| 421 |
+ cleanTasks = append(cleanTasks, t) |
|
| 422 |
+ } |
|
| 423 |
+ } |
|
| 424 |
+ |
|
| 425 |
+ if len(cleanTasks) == 0 {
|
|
| 426 |
+ g.addTask(ctx, batch, service.Service, nodeID) |
|
| 427 |
+ } else {
|
|
| 428 |
+ dirtyTasks = append(dirtyTasks, cleanTasks[1:]...) |
|
| 429 |
+ } |
|
| 430 |
+ g.removeTasks(ctx, batch, dirtyTasks) |
|
| 431 |
+ } |
|
| 432 |
+ } |
|
| 433 |
+ return nil |
|
| 434 |
+ }) |
|
| 435 |
+ if err != nil {
|
|
| 436 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed")
|
|
| 437 |
+ } |
|
| 438 |
+} |
|
| 439 |
+ |
|
| 440 |
+// restartTask calls the restart supervisor's Restart function, which |
|
| 441 |
+// sets a task's desired state to shutdown and restarts it if the restart |
|
| 442 |
+// policy calls for it to be restarted. |
|
| 443 |
+func (g *Orchestrator) restartTask(ctx context.Context, taskID string, serviceID string) {
|
|
| 444 |
+ err := g.store.Update(func(tx store.Tx) error {
|
|
| 445 |
+ t := store.GetTask(tx, taskID) |
|
| 446 |
+ if t == nil || t.DesiredState > api.TaskStateRunning {
|
|
| 447 |
+ return nil |
|
| 448 |
+ } |
|
| 449 |
+ service := store.GetService(tx, serviceID) |
|
| 450 |
+ if service == nil {
|
|
| 451 |
+ return nil |
|
| 452 |
+ } |
|
| 453 |
+ return g.restarts.Restart(ctx, tx, g.cluster, service, *t) |
|
| 454 |
+ }) |
|
| 455 |
+ if err != nil {
|
|
| 456 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: restartTask transaction failed")
|
|
| 457 |
+ } |
|
| 458 |
+} |
|
| 459 |
+ |
|
| 460 |
+func (g *Orchestrator) removeTask(ctx context.Context, batch *store.Batch, t *api.Task) {
|
|
| 461 |
+ // set existing task DesiredState to TaskStateShutdown |
|
| 462 |
+ // TODO(aaronl): optimistic update? |
|
| 463 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 464 |
+ t = store.GetTask(tx, t.ID) |
|
| 465 |
+ if t != nil && t.DesiredState < api.TaskStateShutdown {
|
|
| 466 |
+ t.DesiredState = api.TaskStateShutdown |
|
| 467 |
+ return store.UpdateTask(tx, t) |
|
| 468 |
+ } |
|
| 469 |
+ return nil |
|
| 470 |
+ }) |
|
| 471 |
+ if err != nil {
|
|
| 472 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: removeTask failed to remove %s", t.ID)
|
|
| 473 |
+ } |
|
| 474 |
+} |
|
| 475 |
+ |
|
| 476 |
+func (g *Orchestrator) addTask(ctx context.Context, batch *store.Batch, service *api.Service, nodeID string) {
|
|
| 477 |
+ task := orchestrator.NewTask(g.cluster, service, 0, nodeID) |
|
| 478 |
+ |
|
| 479 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 480 |
+ return store.CreateTask(tx, task) |
|
| 481 |
+ }) |
|
| 482 |
+ if err != nil {
|
|
| 483 |
+ log.G(ctx).WithError(err).Errorf("global orchestrator: failed to create task")
|
|
| 484 |
+ } |
|
| 485 |
+} |
|
| 486 |
+ |
|
| 487 |
+func (g *Orchestrator) removeTasks(ctx context.Context, batch *store.Batch, tasks []*api.Task) {
|
|
| 488 |
+ for _, t := range tasks {
|
|
| 489 |
+ g.removeTask(ctx, batch, t) |
|
| 490 |
+ } |
|
| 491 |
+} |
|
| 492 |
+ |
|
| 493 |
+func isTaskRunning(t *api.Task) bool {
|
|
| 494 |
+ return t != nil && t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning |
|
| 495 |
+} |
|
| 496 |
+ |
|
| 497 |
+func isTaskCompleted(t *api.Task, restartPolicy api.RestartPolicy_RestartCondition) bool {
|
|
| 498 |
+ if t == nil || isTaskRunning(t) {
|
|
| 499 |
+ return false |
|
| 500 |
+ } |
|
| 501 |
+ return restartPolicy == api.RestartOnNone || |
|
| 502 |
+ (restartPolicy == api.RestartOnFailure && t.Status.State == api.TaskStateCompleted) |
|
| 503 |
+} |
|
| 504 |
+ |
|
| 505 |
+func isTaskTerminated(t *api.Task) bool {
|
|
| 506 |
+ return t != nil && t.Status.State > api.TaskStateRunning |
|
| 507 |
+} |
| 0 | 508 |
deleted file mode 100644 |
| ... | ... |
@@ -1,203 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "time" |
|
| 5 |
- |
|
| 6 |
- "github.com/docker/swarmkit/api" |
|
| 7 |
- "github.com/docker/swarmkit/identity" |
|
| 8 |
- "github.com/docker/swarmkit/log" |
|
| 9 |
- "github.com/docker/swarmkit/manager/state" |
|
| 10 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 11 |
- "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 12 |
- "golang.org/x/net/context" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-// A ReplicatedOrchestrator runs a reconciliation loop to create and destroy |
|
| 16 |
-// tasks as necessary for the replicated services. |
|
| 17 |
-type ReplicatedOrchestrator struct {
|
|
| 18 |
- store *store.MemoryStore |
|
| 19 |
- |
|
| 20 |
- reconcileServices map[string]*api.Service |
|
| 21 |
- restartTasks map[string]struct{}
|
|
| 22 |
- |
|
| 23 |
- // stopChan signals to the state machine to stop running. |
|
| 24 |
- stopChan chan struct{}
|
|
| 25 |
- // doneChan is closed when the state machine terminates. |
|
| 26 |
- doneChan chan struct{}
|
|
| 27 |
- |
|
| 28 |
- updater *UpdateSupervisor |
|
| 29 |
- restarts *RestartSupervisor |
|
| 30 |
- |
|
| 31 |
- cluster *api.Cluster // local cluster instance |
|
| 32 |
-} |
|
| 33 |
- |
|
| 34 |
-// NewReplicatedOrchestrator creates a new ReplicatedOrchestrator. |
|
| 35 |
-func NewReplicatedOrchestrator(store *store.MemoryStore) *ReplicatedOrchestrator {
|
|
| 36 |
- restartSupervisor := NewRestartSupervisor(store) |
|
| 37 |
- updater := NewUpdateSupervisor(store, restartSupervisor) |
|
| 38 |
- return &ReplicatedOrchestrator{
|
|
| 39 |
- store: store, |
|
| 40 |
- stopChan: make(chan struct{}),
|
|
| 41 |
- doneChan: make(chan struct{}),
|
|
| 42 |
- reconcileServices: make(map[string]*api.Service), |
|
| 43 |
- restartTasks: make(map[string]struct{}),
|
|
| 44 |
- updater: updater, |
|
| 45 |
- restarts: restartSupervisor, |
|
| 46 |
- } |
|
| 47 |
-} |
|
| 48 |
- |
|
| 49 |
-// Run contains the orchestrator event loop. It runs until Stop is called. |
|
| 50 |
-func (r *ReplicatedOrchestrator) Run(ctx context.Context) error {
|
|
| 51 |
- defer close(r.doneChan) |
|
| 52 |
- |
|
| 53 |
- // Watch changes to services and tasks |
|
| 54 |
- queue := r.store.WatchQueue() |
|
| 55 |
- watcher, cancel := queue.Watch() |
|
| 56 |
- defer cancel() |
|
| 57 |
- |
|
| 58 |
- // Balance existing services and drain initial tasks attached to invalid |
|
| 59 |
- // nodes |
|
| 60 |
- var err error |
|
| 61 |
- r.store.View(func(readTx store.ReadTx) {
|
|
| 62 |
- if err = r.initTasks(ctx, readTx); err != nil {
|
|
| 63 |
- return |
|
| 64 |
- } |
|
| 65 |
- |
|
| 66 |
- if err = r.initServices(readTx); err != nil {
|
|
| 67 |
- return |
|
| 68 |
- } |
|
| 69 |
- |
|
| 70 |
- if err = r.initCluster(readTx); err != nil {
|
|
| 71 |
- return |
|
| 72 |
- } |
|
| 73 |
- }) |
|
| 74 |
- if err != nil {
|
|
| 75 |
- return err |
|
| 76 |
- } |
|
| 77 |
- |
|
| 78 |
- r.tick(ctx) |
|
| 79 |
- |
|
| 80 |
- for {
|
|
| 81 |
- select {
|
|
| 82 |
- case event := <-watcher: |
|
| 83 |
- // TODO(stevvooe): Use ctx to limit running time of operation. |
|
| 84 |
- r.handleTaskEvent(ctx, event) |
|
| 85 |
- r.handleServiceEvent(ctx, event) |
|
| 86 |
- switch v := event.(type) {
|
|
| 87 |
- case state.EventCommit: |
|
| 88 |
- r.tick(ctx) |
|
| 89 |
- case state.EventUpdateCluster: |
|
| 90 |
- r.cluster = v.Cluster |
|
| 91 |
- } |
|
| 92 |
- case <-r.stopChan: |
|
| 93 |
- return nil |
|
| 94 |
- } |
|
| 95 |
- } |
|
| 96 |
-} |
|
| 97 |
- |
|
| 98 |
-// Stop stops the orchestrator. |
|
| 99 |
-func (r *ReplicatedOrchestrator) Stop() {
|
|
| 100 |
- close(r.stopChan) |
|
| 101 |
- <-r.doneChan |
|
| 102 |
- r.updater.CancelAll() |
|
| 103 |
- r.restarts.CancelAll() |
|
| 104 |
-} |
|
| 105 |
- |
|
| 106 |
-func (r *ReplicatedOrchestrator) tick(ctx context.Context) {
|
|
| 107 |
- // tickTasks must be called first, so we respond to task-level changes |
|
| 108 |
- // before performing service reconcillation. |
|
| 109 |
- r.tickTasks(ctx) |
|
| 110 |
- r.tickServices(ctx) |
|
| 111 |
-} |
|
| 112 |
- |
|
| 113 |
-func newTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task {
|
|
| 114 |
- var logDriver *api.Driver |
|
| 115 |
- if service.Spec.Task.LogDriver != nil {
|
|
| 116 |
- // use the log driver specific to the task, if we have it. |
|
| 117 |
- logDriver = service.Spec.Task.LogDriver |
|
| 118 |
- } else if cluster != nil {
|
|
| 119 |
- // pick up the cluster default, if available. |
|
| 120 |
- logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here. |
|
| 121 |
- } |
|
| 122 |
- |
|
| 123 |
- taskID := identity.NewID() |
|
| 124 |
- task := api.Task{
|
|
| 125 |
- ID: taskID, |
|
| 126 |
- ServiceAnnotations: service.Spec.Annotations, |
|
| 127 |
- Spec: service.Spec.Task, |
|
| 128 |
- ServiceID: service.ID, |
|
| 129 |
- Slot: slot, |
|
| 130 |
- Status: api.TaskStatus{
|
|
| 131 |
- State: api.TaskStateNew, |
|
| 132 |
- Timestamp: ptypes.MustTimestampProto(time.Now()), |
|
| 133 |
- Message: "created", |
|
| 134 |
- }, |
|
| 135 |
- Endpoint: &api.Endpoint{
|
|
| 136 |
- Spec: service.Spec.Endpoint.Copy(), |
|
| 137 |
- }, |
|
| 138 |
- DesiredState: api.TaskStateRunning, |
|
| 139 |
- LogDriver: logDriver, |
|
| 140 |
- } |
|
| 141 |
- |
|
| 142 |
- // In global mode we also set the NodeID |
|
| 143 |
- if nodeID != "" {
|
|
| 144 |
- task.NodeID = nodeID |
|
| 145 |
- } |
|
| 146 |
- |
|
| 147 |
- // Assign name based on task name schema |
|
| 148 |
- name := store.TaskName(&task) |
|
| 149 |
- task.Annotations = api.Annotations{Name: name}
|
|
| 150 |
- |
|
| 151 |
- return &task |
|
| 152 |
-} |
|
| 153 |
- |
|
| 154 |
-// isReplicatedService checks if a service is a replicated service |
|
| 155 |
-func isReplicatedService(service *api.Service) bool {
|
|
| 156 |
- // service nil validation is required as there are scenarios |
|
| 157 |
- // where service is removed from store |
|
| 158 |
- if service == nil {
|
|
| 159 |
- return false |
|
| 160 |
- } |
|
| 161 |
- _, ok := service.Spec.GetMode().(*api.ServiceSpec_Replicated) |
|
| 162 |
- return ok |
|
| 163 |
-} |
|
| 164 |
- |
|
| 165 |
-func deleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
|
|
| 166 |
- var ( |
|
| 167 |
- tasks []*api.Task |
|
| 168 |
- err error |
|
| 169 |
- ) |
|
| 170 |
- s.View(func(tx store.ReadTx) {
|
|
| 171 |
- tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID)) |
|
| 172 |
- }) |
|
| 173 |
- if err != nil {
|
|
| 174 |
- log.G(ctx).WithError(err).Errorf("failed to list tasks")
|
|
| 175 |
- return |
|
| 176 |
- } |
|
| 177 |
- |
|
| 178 |
- _, err = s.Batch(func(batch *store.Batch) error {
|
|
| 179 |
- for _, t := range tasks {
|
|
| 180 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 181 |
- if err := store.DeleteTask(tx, t.ID); err != nil {
|
|
| 182 |
- log.G(ctx).WithError(err).Errorf("failed to delete task")
|
|
| 183 |
- } |
|
| 184 |
- return nil |
|
| 185 |
- }) |
|
| 186 |
- if err != nil {
|
|
| 187 |
- return err |
|
| 188 |
- } |
|
| 189 |
- } |
|
| 190 |
- return nil |
|
| 191 |
- }) |
|
| 192 |
- if err != nil {
|
|
| 193 |
- log.G(ctx).WithError(err).Errorf("task search transaction failed")
|
|
| 194 |
- } |
|
| 195 |
-} |
|
| 196 |
- |
|
| 197 |
-func restartCondition(task *api.Task) api.RestartPolicy_RestartCondition {
|
|
| 198 |
- restartCondition := api.RestartOnAny |
|
| 199 |
- if task.Spec.Restart != nil {
|
|
| 200 |
- restartCondition = task.Spec.Restart.Condition |
|
| 201 |
- } |
|
| 202 |
- return restartCondition |
|
| 203 |
-} |
| 204 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,108 @@ |
| 0 |
+package replicated |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/docker/swarmkit/api" |
|
| 4 |
+ "github.com/docker/swarmkit/manager/orchestrator/restart" |
|
| 5 |
+ "github.com/docker/swarmkit/manager/orchestrator/update" |
|
| 6 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 7 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 8 |
+ "golang.org/x/net/context" |
|
| 9 |
+) |
|
| 10 |
+ |
|
| 11 |
+// An Orchestrator runs a reconciliation loop to create and destroy |
|
| 12 |
+// tasks as necessary for the replicated services. |
|
| 13 |
+type Orchestrator struct {
|
|
| 14 |
+ store *store.MemoryStore |
|
| 15 |
+ |
|
| 16 |
+ reconcileServices map[string]*api.Service |
|
| 17 |
+ restartTasks map[string]struct{}
|
|
| 18 |
+ |
|
| 19 |
+ // stopChan signals to the state machine to stop running. |
|
| 20 |
+ stopChan chan struct{}
|
|
| 21 |
+ // doneChan is closed when the state machine terminates. |
|
| 22 |
+ doneChan chan struct{}
|
|
| 23 |
+ |
|
| 24 |
+ updater *update.Supervisor |
|
| 25 |
+ restarts *restart.Supervisor |
|
| 26 |
+ |
|
| 27 |
+ cluster *api.Cluster // local cluster instance |
|
| 28 |
+} |
|
| 29 |
+ |
|
| 30 |
+// NewReplicatedOrchestrator creates a new replicated Orchestrator. |
|
| 31 |
+func NewReplicatedOrchestrator(store *store.MemoryStore) *Orchestrator {
|
|
| 32 |
+ restartSupervisor := restart.NewSupervisor(store) |
|
| 33 |
+ updater := update.NewSupervisor(store, restartSupervisor) |
|
| 34 |
+ return &Orchestrator{
|
|
| 35 |
+ store: store, |
|
| 36 |
+ stopChan: make(chan struct{}),
|
|
| 37 |
+ doneChan: make(chan struct{}),
|
|
| 38 |
+ reconcileServices: make(map[string]*api.Service), |
|
| 39 |
+ restartTasks: make(map[string]struct{}),
|
|
| 40 |
+ updater: updater, |
|
| 41 |
+ restarts: restartSupervisor, |
|
| 42 |
+ } |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+// Run contains the orchestrator event loop. It runs until Stop is called. |
|
| 46 |
+func (r *Orchestrator) Run(ctx context.Context) error {
|
|
| 47 |
+ defer close(r.doneChan) |
|
| 48 |
+ |
|
| 49 |
+ // Watch changes to services and tasks |
|
| 50 |
+ queue := r.store.WatchQueue() |
|
| 51 |
+ watcher, cancel := queue.Watch() |
|
| 52 |
+ defer cancel() |
|
| 53 |
+ |
|
| 54 |
+ // Balance existing services and drain initial tasks attached to invalid |
|
| 55 |
+ // nodes |
|
| 56 |
+ var err error |
|
| 57 |
+ r.store.View(func(readTx store.ReadTx) {
|
|
| 58 |
+ if err = r.initTasks(ctx, readTx); err != nil {
|
|
| 59 |
+ return |
|
| 60 |
+ } |
|
| 61 |
+ |
|
| 62 |
+ if err = r.initServices(readTx); err != nil {
|
|
| 63 |
+ return |
|
| 64 |
+ } |
|
| 65 |
+ |
|
| 66 |
+ if err = r.initCluster(readTx); err != nil {
|
|
| 67 |
+ return |
|
| 68 |
+ } |
|
| 69 |
+ }) |
|
| 70 |
+ if err != nil {
|
|
| 71 |
+ return err |
|
| 72 |
+ } |
|
| 73 |
+ |
|
| 74 |
+ r.tick(ctx) |
|
| 75 |
+ |
|
| 76 |
+ for {
|
|
| 77 |
+ select {
|
|
| 78 |
+ case event := <-watcher: |
|
| 79 |
+ // TODO(stevvooe): Use ctx to limit running time of operation. |
|
| 80 |
+ r.handleTaskEvent(ctx, event) |
|
| 81 |
+ r.handleServiceEvent(ctx, event) |
|
| 82 |
+ switch v := event.(type) {
|
|
| 83 |
+ case state.EventCommit: |
|
| 84 |
+ r.tick(ctx) |
|
| 85 |
+ case state.EventUpdateCluster: |
|
| 86 |
+ r.cluster = v.Cluster |
|
| 87 |
+ } |
|
| 88 |
+ case <-r.stopChan: |
|
| 89 |
+ return nil |
|
| 90 |
+ } |
|
| 91 |
+ } |
|
| 92 |
+} |
|
| 93 |
+ |
|
| 94 |
+// Stop stops the orchestrator. |
|
| 95 |
+func (r *Orchestrator) Stop() {
|
|
| 96 |
+ close(r.stopChan) |
|
| 97 |
+ <-r.doneChan |
|
| 98 |
+ r.updater.CancelAll() |
|
| 99 |
+ r.restarts.CancelAll() |
|
| 100 |
+} |
|
| 101 |
+ |
|
| 102 |
+func (r *Orchestrator) tick(ctx context.Context) {
|
|
| 103 |
+ // tickTasks must be called first, so we respond to task-level changes |
|
| 104 |
+ // before performing service reconcillation. |
|
| 105 |
+ r.tickTasks(ctx) |
|
| 106 |
+ r.tickServices(ctx) |
|
| 107 |
+} |
| 0 | 108 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,224 @@ |
| 0 |
+package replicated |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "sort" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/go-events" |
|
| 6 |
+ "github.com/docker/swarmkit/api" |
|
| 7 |
+ "github.com/docker/swarmkit/log" |
|
| 8 |
+ "github.com/docker/swarmkit/manager/orchestrator" |
|
| 9 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 10 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 11 |
+ "golang.org/x/net/context" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+// This file provices service-level orchestration. It observes changes to |
|
| 15 |
+// services and creates and destroys tasks as necessary to match the service |
|
| 16 |
+// specifications. This is different from task-level orchestration, which |
|
| 17 |
+// responds to changes in individual tasks (or nodes which run them). |
|
| 18 |
+ |
|
| 19 |
+func (r *Orchestrator) initCluster(readTx store.ReadTx) error {
|
|
| 20 |
+ clusters, err := store.FindClusters(readTx, store.ByName("default"))
|
|
| 21 |
+ if err != nil {
|
|
| 22 |
+ return err |
|
| 23 |
+ } |
|
| 24 |
+ |
|
| 25 |
+ if len(clusters) != 1 {
|
|
| 26 |
+ // we'll just pick it when it is created. |
|
| 27 |
+ return nil |
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ r.cluster = clusters[0] |
|
| 31 |
+ return nil |
|
| 32 |
+} |
|
| 33 |
+ |
|
| 34 |
+func (r *Orchestrator) initServices(readTx store.ReadTx) error {
|
|
| 35 |
+ services, err := store.FindServices(readTx, store.All) |
|
| 36 |
+ if err != nil {
|
|
| 37 |
+ return err |
|
| 38 |
+ } |
|
| 39 |
+ for _, s := range services {
|
|
| 40 |
+ if orchestrator.IsReplicatedService(s) {
|
|
| 41 |
+ r.reconcileServices[s.ID] = s |
|
| 42 |
+ } |
|
| 43 |
+ } |
|
| 44 |
+ return nil |
|
| 45 |
+} |
|
| 46 |
+ |
|
| 47 |
+func (r *Orchestrator) handleServiceEvent(ctx context.Context, event events.Event) {
|
|
| 48 |
+ switch v := event.(type) {
|
|
| 49 |
+ case state.EventDeleteService: |
|
| 50 |
+ if !orchestrator.IsReplicatedService(v.Service) {
|
|
| 51 |
+ return |
|
| 52 |
+ } |
|
| 53 |
+ orchestrator.DeleteServiceTasks(ctx, r.store, v.Service) |
|
| 54 |
+ r.restarts.ClearServiceHistory(v.Service.ID) |
|
| 55 |
+ case state.EventCreateService: |
|
| 56 |
+ if !orchestrator.IsReplicatedService(v.Service) {
|
|
| 57 |
+ return |
|
| 58 |
+ } |
|
| 59 |
+ r.reconcileServices[v.Service.ID] = v.Service |
|
| 60 |
+ case state.EventUpdateService: |
|
| 61 |
+ if !orchestrator.IsReplicatedService(v.Service) {
|
|
| 62 |
+ return |
|
| 63 |
+ } |
|
| 64 |
+ r.reconcileServices[v.Service.ID] = v.Service |
|
| 65 |
+ } |
|
| 66 |
+} |
|
| 67 |
+ |
|
| 68 |
+func (r *Orchestrator) tickServices(ctx context.Context) {
|
|
| 69 |
+ if len(r.reconcileServices) > 0 {
|
|
| 70 |
+ for _, s := range r.reconcileServices {
|
|
| 71 |
+ r.reconcile(ctx, s) |
|
| 72 |
+ } |
|
| 73 |
+ r.reconcileServices = make(map[string]*api.Service) |
|
| 74 |
+ } |
|
| 75 |
+} |
|
| 76 |
+ |
|
| 77 |
+func (r *Orchestrator) resolveService(ctx context.Context, task *api.Task) *api.Service {
|
|
| 78 |
+ if task.ServiceID == "" {
|
|
| 79 |
+ return nil |
|
| 80 |
+ } |
|
| 81 |
+ var service *api.Service |
|
| 82 |
+ r.store.View(func(tx store.ReadTx) {
|
|
| 83 |
+ service = store.GetService(tx, task.ServiceID) |
|
| 84 |
+ }) |
|
| 85 |
+ return service |
|
| 86 |
+} |
|
| 87 |
+ |
|
| 88 |
+func (r *Orchestrator) reconcile(ctx context.Context, service *api.Service) {
|
|
| 89 |
+ runningSlots, deadSlots, err := orchestrator.GetRunnableAndDeadSlots(r.store, service.ID) |
|
| 90 |
+ if err != nil {
|
|
| 91 |
+ log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
|
|
| 92 |
+ return |
|
| 93 |
+ } |
|
| 94 |
+ |
|
| 95 |
+ numSlots := len(runningSlots) |
|
| 96 |
+ |
|
| 97 |
+ slotsSlice := make([]orchestrator.Slot, 0, numSlots) |
|
| 98 |
+ for _, slot := range runningSlots {
|
|
| 99 |
+ slotsSlice = append(slotsSlice, slot) |
|
| 100 |
+ } |
|
| 101 |
+ |
|
| 102 |
+ deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated) |
|
| 103 |
+ specifiedSlots := int(deploy.Replicated.Replicas) |
|
| 104 |
+ |
|
| 105 |
+ switch {
|
|
| 106 |
+ case specifiedSlots > numSlots: |
|
| 107 |
+ log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numSlots, specifiedSlots)
|
|
| 108 |
+ // Update all current tasks then add missing tasks |
|
| 109 |
+ r.updater.Update(ctx, r.cluster, service, slotsSlice) |
|
| 110 |
+ _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 111 |
+ r.addTasks(ctx, batch, service, runningSlots, deadSlots, specifiedSlots-numSlots) |
|
| 112 |
+ r.deleteTasksMap(ctx, batch, deadSlots) |
|
| 113 |
+ return nil |
|
| 114 |
+ }) |
|
| 115 |
+ if err != nil {
|
|
| 116 |
+ log.G(ctx).WithError(err).Errorf("reconcile batch failed")
|
|
| 117 |
+ } |
|
| 118 |
+ |
|
| 119 |
+ case specifiedSlots < numSlots: |
|
| 120 |
+ // Update up to N tasks then remove the extra |
|
| 121 |
+ log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numSlots, specifiedSlots)
|
|
| 122 |
+ |
|
| 123 |
+ // Preferentially remove tasks on the nodes that have the most |
|
| 124 |
+ // copies of this service, to leave a more balanced result. |
|
| 125 |
+ |
|
| 126 |
+ // First sort tasks such that tasks which are currently running |
|
| 127 |
+ // (in terms of observed state) appear before non-running tasks. |
|
| 128 |
+ // This will cause us to prefer to remove non-running tasks, all |
|
| 129 |
+ // other things being equal in terms of node balance. |
|
| 130 |
+ |
|
| 131 |
+ sort.Sort(slotsByRunningState(slotsSlice)) |
|
| 132 |
+ |
|
| 133 |
+ // Assign each task an index that counts it as the nth copy of |
|
| 134 |
+ // of the service on its node (1, 2, 3, ...), and sort the |
|
| 135 |
+ // tasks by this counter value. |
|
| 136 |
+ |
|
| 137 |
+ slotsByNode := make(map[string]int) |
|
| 138 |
+ slotsWithIndices := make(slotsByIndex, 0, numSlots) |
|
| 139 |
+ |
|
| 140 |
+ for _, slot := range slotsSlice {
|
|
| 141 |
+ if len(slot) == 1 && slot[0].NodeID != "" {
|
|
| 142 |
+ slotsByNode[slot[0].NodeID]++ |
|
| 143 |
+ slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: slotsByNode[slot[0].NodeID]})
|
|
| 144 |
+ } else {
|
|
| 145 |
+ slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: -1})
|
|
| 146 |
+ } |
|
| 147 |
+ } |
|
| 148 |
+ |
|
| 149 |
+ sort.Sort(slotsWithIndices) |
|
| 150 |
+ |
|
| 151 |
+ sortedSlots := make([]orchestrator.Slot, 0, numSlots) |
|
| 152 |
+ for _, slot := range slotsWithIndices {
|
|
| 153 |
+ sortedSlots = append(sortedSlots, slot.slot) |
|
| 154 |
+ } |
|
| 155 |
+ |
|
| 156 |
+ r.updater.Update(ctx, r.cluster, service, sortedSlots[:specifiedSlots]) |
|
| 157 |
+ _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 158 |
+ r.deleteTasksMap(ctx, batch, deadSlots) |
|
| 159 |
+ r.deleteTasks(ctx, batch, sortedSlots[specifiedSlots:]) |
|
| 160 |
+ return nil |
|
| 161 |
+ }) |
|
| 162 |
+ if err != nil {
|
|
| 163 |
+ log.G(ctx).WithError(err).Errorf("reconcile batch failed")
|
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 166 |
+ case specifiedSlots == numSlots: |
|
| 167 |
+ _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 168 |
+ r.deleteTasksMap(ctx, batch, deadSlots) |
|
| 169 |
+ return nil |
|
| 170 |
+ }) |
|
| 171 |
+ if err != nil {
|
|
| 172 |
+ log.G(ctx).WithError(err).Errorf("reconcile batch failed")
|
|
| 173 |
+ } |
|
| 174 |
+ // Simple update, no scaling - update all tasks. |
|
| 175 |
+ r.updater.Update(ctx, r.cluster, service, slotsSlice) |
|
| 176 |
+ } |
|
| 177 |
+} |
|
| 178 |
+ |
|
| 179 |
+func (r *Orchestrator) addTasks(ctx context.Context, batch *store.Batch, service *api.Service, runningSlots map[uint64]orchestrator.Slot, deadSlots map[uint64]orchestrator.Slot, count int) {
|
|
| 180 |
+ slot := uint64(0) |
|
| 181 |
+ for i := 0; i < count; i++ {
|
|
| 182 |
+ // Find an slot number that is missing a running task |
|
| 183 |
+ for {
|
|
| 184 |
+ slot++ |
|
| 185 |
+ if _, ok := runningSlots[slot]; !ok {
|
|
| 186 |
+ break |
|
| 187 |
+ } |
|
| 188 |
+ } |
|
| 189 |
+ |
|
| 190 |
+ delete(deadSlots, slot) |
|
| 191 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 192 |
+ return store.CreateTask(tx, orchestrator.NewTask(r.cluster, service, slot, "")) |
|
| 193 |
+ }) |
|
| 194 |
+ if err != nil {
|
|
| 195 |
+ log.G(ctx).Errorf("Failed to create task: %v", err)
|
|
| 196 |
+ } |
|
| 197 |
+ } |
|
| 198 |
+} |
|
| 199 |
+ |
|
| 200 |
+func (r *Orchestrator) deleteTasks(ctx context.Context, batch *store.Batch, slots []orchestrator.Slot) {
|
|
| 201 |
+ for _, slot := range slots {
|
|
| 202 |
+ for _, t := range slot {
|
|
| 203 |
+ r.deleteTask(ctx, batch, t) |
|
| 204 |
+ } |
|
| 205 |
+ } |
|
| 206 |
+} |
|
| 207 |
+ |
|
| 208 |
+func (r *Orchestrator) deleteTasksMap(ctx context.Context, batch *store.Batch, slots map[uint64]orchestrator.Slot) {
|
|
| 209 |
+ for _, slot := range slots {
|
|
| 210 |
+ for _, t := range slot {
|
|
| 211 |
+ r.deleteTask(ctx, batch, t) |
|
| 212 |
+ } |
|
| 213 |
+ } |
|
| 214 |
+} |
|
| 215 |
+ |
|
| 216 |
+func (r *Orchestrator) deleteTask(ctx context.Context, batch *store.Batch, t *api.Task) {
|
|
| 217 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 218 |
+ return store.DeleteTask(tx, t.ID) |
|
| 219 |
+ }) |
|
| 220 |
+ if err != nil {
|
|
| 221 |
+ log.G(ctx).WithError(err).Errorf("deleting task %s failed", t.ID)
|
|
| 222 |
+ } |
|
| 223 |
+} |
| 0 | 224 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,55 @@ |
| 0 |
+package replicated |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/docker/swarmkit/api" |
|
| 4 |
+ "github.com/docker/swarmkit/manager/orchestrator" |
|
| 5 |
+) |
|
| 6 |
+ |
|
| 7 |
+type slotsByRunningState []orchestrator.Slot |
|
| 8 |
+ |
|
| 9 |
+func (is slotsByRunningState) Len() int { return len(is) }
|
|
| 10 |
+func (is slotsByRunningState) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
|
|
| 11 |
+ |
|
| 12 |
+func (is slotsByRunningState) Less(i, j int) bool {
|
|
| 13 |
+ iRunning := false |
|
| 14 |
+ jRunning := false |
|
| 15 |
+ |
|
| 16 |
+ for _, ii := range is[i] {
|
|
| 17 |
+ if ii.Status.State == api.TaskStateRunning {
|
|
| 18 |
+ iRunning = true |
|
| 19 |
+ break |
|
| 20 |
+ } |
|
| 21 |
+ } |
|
| 22 |
+ for _, ij := range is[j] {
|
|
| 23 |
+ if ij.Status.State == api.TaskStateRunning {
|
|
| 24 |
+ jRunning = true |
|
| 25 |
+ break |
|
| 26 |
+ } |
|
| 27 |
+ } |
|
| 28 |
+ |
|
| 29 |
+ return iRunning && !jRunning |
|
| 30 |
+} |
|
| 31 |
+ |
|
| 32 |
+type slotWithIndex struct {
|
|
| 33 |
+ slot orchestrator.Slot |
|
| 34 |
+ |
|
| 35 |
+ // index is a counter that counts this task as the nth instance of |
|
| 36 |
+ // the service on its node. This is used for sorting the tasks so that |
|
| 37 |
+ // when scaling down we leave tasks more evenly balanced. |
|
| 38 |
+ index int |
|
| 39 |
+} |
|
| 40 |
+ |
|
| 41 |
+type slotsByIndex []slotWithIndex |
|
| 42 |
+ |
|
| 43 |
+func (is slotsByIndex) Len() int { return len(is) }
|
|
| 44 |
+func (is slotsByIndex) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
|
|
| 45 |
+ |
|
| 46 |
+func (is slotsByIndex) Less(i, j int) bool {
|
|
| 47 |
+ if is[i].index < 0 && is[j].index >= 0 {
|
|
| 48 |
+ return false |
|
| 49 |
+ } |
|
| 50 |
+ if is[j].index < 0 && is[i].index >= 0 {
|
|
| 51 |
+ return true |
|
| 52 |
+ } |
|
| 53 |
+ return is[i].index < is[j].index |
|
| 54 |
+} |
| 0 | 55 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,234 @@ |
| 0 |
+package replicated |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "time" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/go-events" |
|
| 6 |
+ "github.com/docker/swarmkit/api" |
|
| 7 |
+ "github.com/docker/swarmkit/log" |
|
| 8 |
+ "github.com/docker/swarmkit/manager/orchestrator" |
|
| 9 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 10 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 11 |
+ "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 12 |
+ "golang.org/x/net/context" |
|
| 13 |
+) |
|
| 14 |
+ |
|
| 15 |
+// This file provides task-level orchestration. It observes changes to task |
|
| 16 |
+// and node state and kills/recreates tasks if necessary. This is distinct from |
|
| 17 |
+// service-level reconcillation, which observes changes to services and creates |
|
| 18 |
+// and/or kills tasks to match the service definition. |
|
| 19 |
+ |
|
| 20 |
+func invalidNode(n *api.Node) bool {
|
|
| 21 |
+ return n == nil || |
|
| 22 |
+ n.Status.State == api.NodeStatus_DOWN || |
|
| 23 |
+ n.Spec.Availability == api.NodeAvailabilityDrain |
|
| 24 |
+} |
|
| 25 |
+ |
|
| 26 |
+func (r *Orchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error {
|
|
| 27 |
+ tasks, err := store.FindTasks(readTx, store.All) |
|
| 28 |
+ if err != nil {
|
|
| 29 |
+ return err |
|
| 30 |
+ } |
|
| 31 |
+ for _, t := range tasks {
|
|
| 32 |
+ if t.NodeID != "" {
|
|
| 33 |
+ n := store.GetNode(readTx, t.NodeID) |
|
| 34 |
+ if invalidNode(n) && t.Status.State <= api.TaskStateRunning && t.DesiredState <= api.TaskStateRunning {
|
|
| 35 |
+ r.restartTasks[t.ID] = struct{}{}
|
|
| 36 |
+ } |
|
| 37 |
+ } |
|
| 38 |
+ } |
|
| 39 |
+ |
|
| 40 |
+ _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 41 |
+ for _, t := range tasks {
|
|
| 42 |
+ if t.ServiceID == "" {
|
|
| 43 |
+ continue |
|
| 44 |
+ } |
|
| 45 |
+ |
|
| 46 |
+ // TODO(aluzzardi): We should NOT retrieve the service here. |
|
| 47 |
+ service := store.GetService(readTx, t.ServiceID) |
|
| 48 |
+ if service == nil {
|
|
| 49 |
+ // Service was deleted |
|
| 50 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 51 |
+ return store.DeleteTask(tx, t.ID) |
|
| 52 |
+ }) |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ log.G(ctx).WithError(err).Error("failed to set task desired state to dead")
|
|
| 55 |
+ } |
|
| 56 |
+ continue |
|
| 57 |
+ } |
|
| 58 |
+ // TODO(aluzzardi): This is shady. We should have a more generic condition. |
|
| 59 |
+ if t.DesiredState != api.TaskStateReady || !orchestrator.IsReplicatedService(service) {
|
|
| 60 |
+ continue |
|
| 61 |
+ } |
|
| 62 |
+ restartDelay := orchestrator.DefaultRestartDelay |
|
| 63 |
+ if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
|
|
| 64 |
+ var err error |
|
| 65 |
+ restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay) |
|
| 66 |
+ if err != nil {
|
|
| 67 |
+ log.G(ctx).WithError(err).Error("invalid restart delay")
|
|
| 68 |
+ restartDelay = orchestrator.DefaultRestartDelay |
|
| 69 |
+ } |
|
| 70 |
+ } |
|
| 71 |
+ if restartDelay != 0 {
|
|
| 72 |
+ timestamp, err := ptypes.Timestamp(t.Status.Timestamp) |
|
| 73 |
+ if err == nil {
|
|
| 74 |
+ restartTime := timestamp.Add(restartDelay) |
|
| 75 |
+ calculatedRestartDelay := restartTime.Sub(time.Now()) |
|
| 76 |
+ if calculatedRestartDelay < restartDelay {
|
|
| 77 |
+ restartDelay = calculatedRestartDelay |
|
| 78 |
+ } |
|
| 79 |
+ if restartDelay > 0 {
|
|
| 80 |
+ _ = batch.Update(func(tx store.Tx) error {
|
|
| 81 |
+ t := store.GetTask(tx, t.ID) |
|
| 82 |
+ // TODO(aluzzardi): This is shady as well. We should have a more generic condition. |
|
| 83 |
+ if t == nil || t.DesiredState != api.TaskStateReady {
|
|
| 84 |
+ return nil |
|
| 85 |
+ } |
|
| 86 |
+ r.restarts.DelayStart(ctx, tx, nil, t.ID, restartDelay, true) |
|
| 87 |
+ return nil |
|
| 88 |
+ }) |
|
| 89 |
+ continue |
|
| 90 |
+ } |
|
| 91 |
+ } else {
|
|
| 92 |
+ log.G(ctx).WithError(err).Error("invalid status timestamp")
|
|
| 93 |
+ } |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ // Start now |
|
| 97 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 98 |
+ return r.restarts.StartNow(tx, t.ID) |
|
| 99 |
+ }) |
|
| 100 |
+ if err != nil {
|
|
| 101 |
+ log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("moving task out of delayed state failed")
|
|
| 102 |
+ } |
|
| 103 |
+ } |
|
| 104 |
+ return nil |
|
| 105 |
+ }) |
|
| 106 |
+ |
|
| 107 |
+ return err |
|
| 108 |
+} |
|
| 109 |
+ |
|
| 110 |
+func (r *Orchestrator) handleTaskEvent(ctx context.Context, event events.Event) {
|
|
| 111 |
+ switch v := event.(type) {
|
|
| 112 |
+ case state.EventDeleteNode: |
|
| 113 |
+ r.restartTasksByNodeID(ctx, v.Node.ID) |
|
| 114 |
+ case state.EventCreateNode: |
|
| 115 |
+ r.handleNodeChange(ctx, v.Node) |
|
| 116 |
+ case state.EventUpdateNode: |
|
| 117 |
+ r.handleNodeChange(ctx, v.Node) |
|
| 118 |
+ case state.EventDeleteTask: |
|
| 119 |
+ if v.Task.DesiredState <= api.TaskStateRunning {
|
|
| 120 |
+ service := r.resolveService(ctx, v.Task) |
|
| 121 |
+ if !orchestrator.IsReplicatedService(service) {
|
|
| 122 |
+ return |
|
| 123 |
+ } |
|
| 124 |
+ r.reconcileServices[service.ID] = service |
|
| 125 |
+ } |
|
| 126 |
+ r.restarts.Cancel(v.Task.ID) |
|
| 127 |
+ case state.EventUpdateTask: |
|
| 128 |
+ r.handleTaskChange(ctx, v.Task) |
|
| 129 |
+ case state.EventCreateTask: |
|
| 130 |
+ r.handleTaskChange(ctx, v.Task) |
|
| 131 |
+ } |
|
| 132 |
+} |
|
| 133 |
+ |
|
| 134 |
+func (r *Orchestrator) tickTasks(ctx context.Context) {
|
|
| 135 |
+ if len(r.restartTasks) > 0 {
|
|
| 136 |
+ _, err := r.store.Batch(func(batch *store.Batch) error {
|
|
| 137 |
+ for taskID := range r.restartTasks {
|
|
| 138 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 139 |
+ // TODO(aaronl): optimistic update? |
|
| 140 |
+ t := store.GetTask(tx, taskID) |
|
| 141 |
+ if t != nil {
|
|
| 142 |
+ if t.DesiredState > api.TaskStateRunning {
|
|
| 143 |
+ return nil |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ service := store.GetService(tx, t.ServiceID) |
|
| 147 |
+ if !orchestrator.IsReplicatedService(service) {
|
|
| 148 |
+ return nil |
|
| 149 |
+ } |
|
| 150 |
+ |
|
| 151 |
+ // Restart task if applicable |
|
| 152 |
+ if err := r.restarts.Restart(ctx, tx, r.cluster, service, *t); err != nil {
|
|
| 153 |
+ return err |
|
| 154 |
+ } |
|
| 155 |
+ } |
|
| 156 |
+ return nil |
|
| 157 |
+ }) |
|
| 158 |
+ if err != nil {
|
|
| 159 |
+ log.G(ctx).WithError(err).Errorf("Orchestrator task reaping transaction failed")
|
|
| 160 |
+ } |
|
| 161 |
+ } |
|
| 162 |
+ return nil |
|
| 163 |
+ }) |
|
| 164 |
+ |
|
| 165 |
+ if err != nil {
|
|
| 166 |
+ log.G(ctx).WithError(err).Errorf("orchestrator task removal batch failed")
|
|
| 167 |
+ } |
|
| 168 |
+ |
|
| 169 |
+ r.restartTasks = make(map[string]struct{})
|
|
| 170 |
+ } |
|
| 171 |
+} |
|
| 172 |
+ |
|
| 173 |
+func (r *Orchestrator) restartTasksByNodeID(ctx context.Context, nodeID string) {
|
|
| 174 |
+ var err error |
|
| 175 |
+ r.store.View(func(tx store.ReadTx) {
|
|
| 176 |
+ var tasks []*api.Task |
|
| 177 |
+ tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID)) |
|
| 178 |
+ if err != nil {
|
|
| 179 |
+ return |
|
| 180 |
+ } |
|
| 181 |
+ |
|
| 182 |
+ for _, t := range tasks {
|
|
| 183 |
+ if t.DesiredState > api.TaskStateRunning {
|
|
| 184 |
+ continue |
|
| 185 |
+ } |
|
| 186 |
+ service := store.GetService(tx, t.ServiceID) |
|
| 187 |
+ if orchestrator.IsReplicatedService(service) {
|
|
| 188 |
+ r.restartTasks[t.ID] = struct{}{}
|
|
| 189 |
+ } |
|
| 190 |
+ } |
|
| 191 |
+ }) |
|
| 192 |
+ if err != nil {
|
|
| 193 |
+ log.G(ctx).WithError(err).Errorf("failed to list tasks to remove")
|
|
| 194 |
+ } |
|
| 195 |
+} |
|
| 196 |
+ |
|
| 197 |
+func (r *Orchestrator) handleNodeChange(ctx context.Context, n *api.Node) {
|
|
| 198 |
+ if !invalidNode(n) {
|
|
| 199 |
+ return |
|
| 200 |
+ } |
|
| 201 |
+ |
|
| 202 |
+ r.restartTasksByNodeID(ctx, n.ID) |
|
| 203 |
+} |
|
| 204 |
+ |
|
| 205 |
+func (r *Orchestrator) handleTaskChange(ctx context.Context, t *api.Task) {
|
|
| 206 |
+ // If we already set the desired state past TaskStateRunning, there is no |
|
| 207 |
+ // further action necessary. |
|
| 208 |
+ if t.DesiredState > api.TaskStateRunning {
|
|
| 209 |
+ return |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ var ( |
|
| 213 |
+ n *api.Node |
|
| 214 |
+ service *api.Service |
|
| 215 |
+ ) |
|
| 216 |
+ r.store.View(func(tx store.ReadTx) {
|
|
| 217 |
+ if t.NodeID != "" {
|
|
| 218 |
+ n = store.GetNode(tx, t.NodeID) |
|
| 219 |
+ } |
|
| 220 |
+ if t.ServiceID != "" {
|
|
| 221 |
+ service = store.GetService(tx, t.ServiceID) |
|
| 222 |
+ } |
|
| 223 |
+ }) |
|
| 224 |
+ |
|
| 225 |
+ if !orchestrator.IsReplicatedService(service) {
|
|
| 226 |
+ return |
|
| 227 |
+ } |
|
| 228 |
+ |
|
| 229 |
+ if t.Status.State > api.TaskStateRunning || |
|
| 230 |
+ (t.NodeID != "" && invalidNode(n)) {
|
|
| 231 |
+ r.restartTasks[t.ID] = struct{}{}
|
|
| 232 |
+ } |
|
| 233 |
+} |
| 0 | 234 |
deleted file mode 100644 |
| ... | ... |
@@ -1,440 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "container/list" |
|
| 5 |
- "errors" |
|
| 6 |
- "sync" |
|
| 7 |
- "time" |
|
| 8 |
- |
|
| 9 |
- "github.com/docker/go-events" |
|
| 10 |
- "github.com/docker/swarmkit/api" |
|
| 11 |
- "github.com/docker/swarmkit/log" |
|
| 12 |
- "github.com/docker/swarmkit/manager/state" |
|
| 13 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 14 |
- "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 15 |
- "golang.org/x/net/context" |
|
| 16 |
-) |
|
| 17 |
- |
|
| 18 |
-const defaultOldTaskTimeout = time.Minute |
|
| 19 |
-const defaultRestartDelay = 5 * time.Second |
|
| 20 |
- |
|
| 21 |
-type restartedInstance struct {
|
|
| 22 |
- timestamp time.Time |
|
| 23 |
-} |
|
| 24 |
- |
|
| 25 |
-type instanceRestartInfo struct {
|
|
| 26 |
- // counter of restarts for this instance. |
|
| 27 |
- totalRestarts uint64 |
|
| 28 |
- // Linked list of restartedInstance structs. Only used when |
|
| 29 |
- // Restart.MaxAttempts and Restart.Window are both |
|
| 30 |
- // nonzero. |
|
| 31 |
- restartedInstances *list.List |
|
| 32 |
-} |
|
| 33 |
- |
|
| 34 |
-type delayedStart struct {
|
|
| 35 |
- // cancel is called to cancel the delayed start. |
|
| 36 |
- cancel func() |
|
| 37 |
- doneCh chan struct{}
|
|
| 38 |
- |
|
| 39 |
- // waiter is set to true if the next restart is waiting for this delay |
|
| 40 |
- // to complete. |
|
| 41 |
- waiter bool |
|
| 42 |
-} |
|
| 43 |
- |
|
| 44 |
-// RestartSupervisor initiates and manages restarts. It's responsible for |
|
| 45 |
-// delaying restarts when applicable. |
|
| 46 |
-type RestartSupervisor struct {
|
|
| 47 |
- mu sync.Mutex |
|
| 48 |
- store *store.MemoryStore |
|
| 49 |
- delays map[string]*delayedStart |
|
| 50 |
- history map[instanceTuple]*instanceRestartInfo |
|
| 51 |
- historyByService map[string]map[instanceTuple]struct{}
|
|
| 52 |
- taskTimeout time.Duration |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-// NewRestartSupervisor creates a new RestartSupervisor. |
|
| 56 |
-func NewRestartSupervisor(store *store.MemoryStore) *RestartSupervisor {
|
|
| 57 |
- return &RestartSupervisor{
|
|
| 58 |
- store: store, |
|
| 59 |
- delays: make(map[string]*delayedStart), |
|
| 60 |
- history: make(map[instanceTuple]*instanceRestartInfo), |
|
| 61 |
- historyByService: make(map[string]map[instanceTuple]struct{}),
|
|
| 62 |
- taskTimeout: defaultOldTaskTimeout, |
|
| 63 |
- } |
|
| 64 |
-} |
|
| 65 |
- |
|
| 66 |
-func (r *RestartSupervisor) waitRestart(ctx context.Context, oldDelay *delayedStart, cluster *api.Cluster, taskID string) {
|
|
| 67 |
- // Wait for the last restart delay to elapse. |
|
| 68 |
- select {
|
|
| 69 |
- case <-oldDelay.doneCh: |
|
| 70 |
- case <-ctx.Done(): |
|
| 71 |
- return |
|
| 72 |
- } |
|
| 73 |
- |
|
| 74 |
- // Start the next restart |
|
| 75 |
- err := r.store.Update(func(tx store.Tx) error {
|
|
| 76 |
- t := store.GetTask(tx, taskID) |
|
| 77 |
- if t == nil {
|
|
| 78 |
- return nil |
|
| 79 |
- } |
|
| 80 |
- if t.DesiredState > api.TaskStateRunning {
|
|
| 81 |
- return nil |
|
| 82 |
- } |
|
| 83 |
- service := store.GetService(tx, t.ServiceID) |
|
| 84 |
- if service == nil {
|
|
| 85 |
- return nil |
|
| 86 |
- } |
|
| 87 |
- return r.Restart(ctx, tx, cluster, service, *t) |
|
| 88 |
- }) |
|
| 89 |
- |
|
| 90 |
- if err != nil {
|
|
| 91 |
- log.G(ctx).WithError(err).Errorf("failed to restart task after waiting for previous restart")
|
|
| 92 |
- } |
|
| 93 |
-} |
|
| 94 |
- |
|
| 95 |
-// Restart initiates a new task to replace t if appropriate under the service's |
|
| 96 |
-// restart policy. |
|
| 97 |
-func (r *RestartSupervisor) Restart(ctx context.Context, tx store.Tx, cluster *api.Cluster, service *api.Service, t api.Task) error {
|
|
| 98 |
- // TODO(aluzzardi): This function should not depend on `service`. |
|
| 99 |
- |
|
| 100 |
- // Is the old task still in the process of restarting? If so, wait for |
|
| 101 |
- // its restart delay to elapse, to avoid tight restart loops (for |
|
| 102 |
- // example, when the image doesn't exist). |
|
| 103 |
- r.mu.Lock() |
|
| 104 |
- oldDelay, ok := r.delays[t.ID] |
|
| 105 |
- if ok {
|
|
| 106 |
- if !oldDelay.waiter {
|
|
| 107 |
- oldDelay.waiter = true |
|
| 108 |
- go r.waitRestart(ctx, oldDelay, cluster, t.ID) |
|
| 109 |
- } |
|
| 110 |
- r.mu.Unlock() |
|
| 111 |
- return nil |
|
| 112 |
- } |
|
| 113 |
- r.mu.Unlock() |
|
| 114 |
- |
|
| 115 |
- // Sanity check: was the task shut down already by a separate call to |
|
| 116 |
- // Restart? If so, we must avoid restarting it, because this will create |
|
| 117 |
- // an extra task. This should never happen unless there is a bug. |
|
| 118 |
- if t.DesiredState > api.TaskStateRunning {
|
|
| 119 |
- return errors.New("Restart called on task that was already shut down")
|
|
| 120 |
- } |
|
| 121 |
- |
|
| 122 |
- t.DesiredState = api.TaskStateShutdown |
|
| 123 |
- err := store.UpdateTask(tx, &t) |
|
| 124 |
- if err != nil {
|
|
| 125 |
- log.G(ctx).WithError(err).Errorf("failed to set task desired state to dead")
|
|
| 126 |
- return err |
|
| 127 |
- } |
|
| 128 |
- |
|
| 129 |
- if !r.shouldRestart(ctx, &t, service) {
|
|
| 130 |
- return nil |
|
| 131 |
- } |
|
| 132 |
- |
|
| 133 |
- var restartTask *api.Task |
|
| 134 |
- |
|
| 135 |
- if isReplicatedService(service) {
|
|
| 136 |
- restartTask = newTask(cluster, service, t.Slot, "") |
|
| 137 |
- } else if isGlobalService(service) {
|
|
| 138 |
- restartTask = newTask(cluster, service, 0, t.NodeID) |
|
| 139 |
- } else {
|
|
| 140 |
- log.G(ctx).Error("service not supported by restart supervisor")
|
|
| 141 |
- return nil |
|
| 142 |
- } |
|
| 143 |
- |
|
| 144 |
- n := store.GetNode(tx, t.NodeID) |
|
| 145 |
- |
|
| 146 |
- restartTask.DesiredState = api.TaskStateReady |
|
| 147 |
- |
|
| 148 |
- var restartDelay time.Duration |
|
| 149 |
- // Restart delay is not applied to drained nodes |
|
| 150 |
- if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
|
|
| 151 |
- if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
|
|
| 152 |
- var err error |
|
| 153 |
- restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay) |
|
| 154 |
- if err != nil {
|
|
| 155 |
- log.G(ctx).WithError(err).Error("invalid restart delay; using default")
|
|
| 156 |
- restartDelay = defaultRestartDelay |
|
| 157 |
- } |
|
| 158 |
- } else {
|
|
| 159 |
- restartDelay = defaultRestartDelay |
|
| 160 |
- } |
|
| 161 |
- } |
|
| 162 |
- |
|
| 163 |
- waitStop := true |
|
| 164 |
- |
|
| 165 |
- // Normally we wait for the old task to stop running, but we skip this |
|
| 166 |
- // if the old task is already dead or the node it's assigned to is down. |
|
| 167 |
- if (n != nil && n.Status.State == api.NodeStatus_DOWN) || t.Status.State > api.TaskStateRunning {
|
|
| 168 |
- waitStop = false |
|
| 169 |
- } |
|
| 170 |
- |
|
| 171 |
- if err := store.CreateTask(tx, restartTask); err != nil {
|
|
| 172 |
- log.G(ctx).WithError(err).WithField("task.id", restartTask.ID).Error("task create failed")
|
|
| 173 |
- return err |
|
| 174 |
- } |
|
| 175 |
- |
|
| 176 |
- r.recordRestartHistory(restartTask) |
|
| 177 |
- |
|
| 178 |
- r.DelayStart(ctx, tx, &t, restartTask.ID, restartDelay, waitStop) |
|
| 179 |
- return nil |
|
| 180 |
-} |
|
| 181 |
- |
|
| 182 |
-func (r *RestartSupervisor) shouldRestart(ctx context.Context, t *api.Task, service *api.Service) bool {
|
|
| 183 |
- // TODO(aluzzardi): This function should not depend on `service`. |
|
| 184 |
- |
|
| 185 |
- condition := restartCondition(t) |
|
| 186 |
- |
|
| 187 |
- if condition != api.RestartOnAny && |
|
| 188 |
- (condition != api.RestartOnFailure || t.Status.State == api.TaskStateCompleted) {
|
|
| 189 |
- return false |
|
| 190 |
- } |
|
| 191 |
- |
|
| 192 |
- if t.Spec.Restart == nil || t.Spec.Restart.MaxAttempts == 0 {
|
|
| 193 |
- return true |
|
| 194 |
- } |
|
| 195 |
- |
|
| 196 |
- instanceTuple := instanceTuple{
|
|
| 197 |
- instance: t.Slot, |
|
| 198 |
- serviceID: t.ServiceID, |
|
| 199 |
- } |
|
| 200 |
- |
|
| 201 |
- // Instance is not meaningful for "global" tasks, so they need to be |
|
| 202 |
- // indexed by NodeID. |
|
| 203 |
- if isGlobalService(service) {
|
|
| 204 |
- instanceTuple.nodeID = t.NodeID |
|
| 205 |
- } |
|
| 206 |
- |
|
| 207 |
- r.mu.Lock() |
|
| 208 |
- defer r.mu.Unlock() |
|
| 209 |
- |
|
| 210 |
- restartInfo := r.history[instanceTuple] |
|
| 211 |
- if restartInfo == nil {
|
|
| 212 |
- return true |
|
| 213 |
- } |
|
| 214 |
- |
|
| 215 |
- if t.Spec.Restart.Window == nil || (t.Spec.Restart.Window.Seconds == 0 && t.Spec.Restart.Window.Nanos == 0) {
|
|
| 216 |
- return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts |
|
| 217 |
- } |
|
| 218 |
- |
|
| 219 |
- if restartInfo.restartedInstances == nil {
|
|
| 220 |
- return true |
|
| 221 |
- } |
|
| 222 |
- |
|
| 223 |
- window, err := ptypes.Duration(t.Spec.Restart.Window) |
|
| 224 |
- if err != nil {
|
|
| 225 |
- log.G(ctx).WithError(err).Error("invalid restart lookback window")
|
|
| 226 |
- return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts |
|
| 227 |
- } |
|
| 228 |
- lookback := time.Now().Add(-window) |
|
| 229 |
- |
|
| 230 |
- var next *list.Element |
|
| 231 |
- for e := restartInfo.restartedInstances.Front(); e != nil; e = next {
|
|
| 232 |
- next = e.Next() |
|
| 233 |
- |
|
| 234 |
- if e.Value.(restartedInstance).timestamp.After(lookback) {
|
|
| 235 |
- break |
|
| 236 |
- } |
|
| 237 |
- restartInfo.restartedInstances.Remove(e) |
|
| 238 |
- } |
|
| 239 |
- |
|
| 240 |
- numRestarts := uint64(restartInfo.restartedInstances.Len()) |
|
| 241 |
- |
|
| 242 |
- if numRestarts == 0 {
|
|
| 243 |
- restartInfo.restartedInstances = nil |
|
| 244 |
- } |
|
| 245 |
- |
|
| 246 |
- return numRestarts < t.Spec.Restart.MaxAttempts |
|
| 247 |
-} |
|
| 248 |
- |
|
| 249 |
-func (r *RestartSupervisor) recordRestartHistory(restartTask *api.Task) {
|
|
| 250 |
- if restartTask.Spec.Restart == nil || restartTask.Spec.Restart.MaxAttempts == 0 {
|
|
| 251 |
- // No limit on the number of restarts, so no need to record |
|
| 252 |
- // history. |
|
| 253 |
- return |
|
| 254 |
- } |
|
| 255 |
- tuple := instanceTuple{
|
|
| 256 |
- instance: restartTask.Slot, |
|
| 257 |
- serviceID: restartTask.ServiceID, |
|
| 258 |
- nodeID: restartTask.NodeID, |
|
| 259 |
- } |
|
| 260 |
- |
|
| 261 |
- r.mu.Lock() |
|
| 262 |
- defer r.mu.Unlock() |
|
| 263 |
- |
|
| 264 |
- if r.history[tuple] == nil {
|
|
| 265 |
- r.history[tuple] = &instanceRestartInfo{}
|
|
| 266 |
- } |
|
| 267 |
- |
|
| 268 |
- restartInfo := r.history[tuple] |
|
| 269 |
- restartInfo.totalRestarts++ |
|
| 270 |
- |
|
| 271 |
- if r.historyByService[restartTask.ServiceID] == nil {
|
|
| 272 |
- r.historyByService[restartTask.ServiceID] = make(map[instanceTuple]struct{})
|
|
| 273 |
- } |
|
| 274 |
- r.historyByService[restartTask.ServiceID][tuple] = struct{}{}
|
|
| 275 |
- |
|
| 276 |
- if restartTask.Spec.Restart.Window != nil && (restartTask.Spec.Restart.Window.Seconds != 0 || restartTask.Spec.Restart.Window.Nanos != 0) {
|
|
| 277 |
- if restartInfo.restartedInstances == nil {
|
|
| 278 |
- restartInfo.restartedInstances = list.New() |
|
| 279 |
- } |
|
| 280 |
- |
|
| 281 |
- restartedInstance := restartedInstance{
|
|
| 282 |
- timestamp: time.Now(), |
|
| 283 |
- } |
|
| 284 |
- |
|
| 285 |
- restartInfo.restartedInstances.PushBack(restartedInstance) |
|
| 286 |
- } |
|
| 287 |
-} |
|
| 288 |
- |
|
| 289 |
-// DelayStart starts a timer that moves the task from READY to RUNNING once: |
|
| 290 |
-// - The restart delay has elapsed (if applicable) |
|
| 291 |
-// - The old task that it's replacing has stopped running (or this times out) |
|
| 292 |
-// It must be called during an Update transaction to ensure that it does not |
|
| 293 |
-// miss events. The purpose of the store.Tx argument is to avoid accidental |
|
| 294 |
-// calls outside an Update transaction. |
|
| 295 |
-func (r *RestartSupervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask *api.Task, newTaskID string, delay time.Duration, waitStop bool) <-chan struct{} {
|
|
| 296 |
- ctx, cancel := context.WithCancel(context.Background()) |
|
| 297 |
- doneCh := make(chan struct{})
|
|
| 298 |
- |
|
| 299 |
- r.mu.Lock() |
|
| 300 |
- for {
|
|
| 301 |
- oldDelay, ok := r.delays[newTaskID] |
|
| 302 |
- if !ok {
|
|
| 303 |
- break |
|
| 304 |
- } |
|
| 305 |
- oldDelay.cancel() |
|
| 306 |
- r.mu.Unlock() |
|
| 307 |
- // Note that this channel read should only block for a very |
|
| 308 |
- // short time, because we cancelled the existing delay and |
|
| 309 |
- // that should cause it to stop immediately. |
|
| 310 |
- <-oldDelay.doneCh |
|
| 311 |
- r.mu.Lock() |
|
| 312 |
- } |
|
| 313 |
- r.delays[newTaskID] = &delayedStart{cancel: cancel, doneCh: doneCh}
|
|
| 314 |
- r.mu.Unlock() |
|
| 315 |
- |
|
| 316 |
- var watch chan events.Event |
|
| 317 |
- cancelWatch := func() {}
|
|
| 318 |
- |
|
| 319 |
- if waitStop && oldTask != nil {
|
|
| 320 |
- // Wait for either the old task to complete, or the old task's |
|
| 321 |
- // node to become unavailable. |
|
| 322 |
- watch, cancelWatch = state.Watch( |
|
| 323 |
- r.store.WatchQueue(), |
|
| 324 |
- state.EventUpdateTask{
|
|
| 325 |
- Task: &api.Task{ID: oldTask.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
|
|
| 326 |
- Checks: []state.TaskCheckFunc{state.TaskCheckID, state.TaskCheckStateGreaterThan},
|
|
| 327 |
- }, |
|
| 328 |
- state.EventUpdateNode{
|
|
| 329 |
- Node: &api.Node{ID: oldTask.NodeID, Status: api.NodeStatus{State: api.NodeStatus_DOWN}},
|
|
| 330 |
- Checks: []state.NodeCheckFunc{state.NodeCheckID, state.NodeCheckState},
|
|
| 331 |
- }, |
|
| 332 |
- state.EventDeleteNode{
|
|
| 333 |
- Node: &api.Node{ID: oldTask.NodeID},
|
|
| 334 |
- Checks: []state.NodeCheckFunc{state.NodeCheckID},
|
|
| 335 |
- }, |
|
| 336 |
- ) |
|
| 337 |
- } |
|
| 338 |
- |
|
| 339 |
- go func() {
|
|
| 340 |
- defer func() {
|
|
| 341 |
- cancelWatch() |
|
| 342 |
- r.mu.Lock() |
|
| 343 |
- delete(r.delays, newTaskID) |
|
| 344 |
- r.mu.Unlock() |
|
| 345 |
- close(doneCh) |
|
| 346 |
- }() |
|
| 347 |
- |
|
| 348 |
- oldTaskTimer := time.NewTimer(r.taskTimeout) |
|
| 349 |
- defer oldTaskTimer.Stop() |
|
| 350 |
- |
|
| 351 |
- // Wait for the delay to elapse, if one is specified. |
|
| 352 |
- if delay != 0 {
|
|
| 353 |
- select {
|
|
| 354 |
- case <-time.After(delay): |
|
| 355 |
- case <-ctx.Done(): |
|
| 356 |
- return |
|
| 357 |
- } |
|
| 358 |
- } |
|
| 359 |
- |
|
| 360 |
- if waitStop && oldTask != nil {
|
|
| 361 |
- select {
|
|
| 362 |
- case <-watch: |
|
| 363 |
- case <-oldTaskTimer.C: |
|
| 364 |
- case <-ctx.Done(): |
|
| 365 |
- return |
|
| 366 |
- } |
|
| 367 |
- } |
|
| 368 |
- |
|
| 369 |
- err := r.store.Update(func(tx store.Tx) error {
|
|
| 370 |
- err := r.StartNow(tx, newTaskID) |
|
| 371 |
- if err != nil {
|
|
| 372 |
- log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("moving task out of delayed state failed")
|
|
| 373 |
- } |
|
| 374 |
- return nil |
|
| 375 |
- }) |
|
| 376 |
- if err != nil {
|
|
| 377 |
- log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("task restart transaction failed")
|
|
| 378 |
- } |
|
| 379 |
- }() |
|
| 380 |
- |
|
| 381 |
- return doneCh |
|
| 382 |
-} |
|
| 383 |
- |
|
| 384 |
-// StartNow moves the task into the RUNNING state so it will proceed to start |
|
| 385 |
-// up. |
|
| 386 |
-func (r *RestartSupervisor) StartNow(tx store.Tx, taskID string) error {
|
|
| 387 |
- t := store.GetTask(tx, taskID) |
|
| 388 |
- if t == nil || t.DesiredState >= api.TaskStateRunning {
|
|
| 389 |
- return nil |
|
| 390 |
- } |
|
| 391 |
- t.DesiredState = api.TaskStateRunning |
|
| 392 |
- return store.UpdateTask(tx, t) |
|
| 393 |
-} |
|
| 394 |
- |
|
| 395 |
-// Cancel cancels a pending restart. |
|
| 396 |
-func (r *RestartSupervisor) Cancel(taskID string) {
|
|
| 397 |
- r.mu.Lock() |
|
| 398 |
- delay, ok := r.delays[taskID] |
|
| 399 |
- r.mu.Unlock() |
|
| 400 |
- |
|
| 401 |
- if !ok {
|
|
| 402 |
- return |
|
| 403 |
- } |
|
| 404 |
- |
|
| 405 |
- delay.cancel() |
|
| 406 |
- <-delay.doneCh |
|
| 407 |
-} |
|
| 408 |
- |
|
| 409 |
-// CancelAll aborts all pending restarts and waits for any instances of |
|
| 410 |
-// StartNow that have already triggered to complete. |
|
| 411 |
-func (r *RestartSupervisor) CancelAll() {
|
|
| 412 |
- var cancelled []delayedStart |
|
| 413 |
- |
|
| 414 |
- r.mu.Lock() |
|
| 415 |
- for _, delay := range r.delays {
|
|
| 416 |
- delay.cancel() |
|
| 417 |
- } |
|
| 418 |
- r.mu.Unlock() |
|
| 419 |
- |
|
| 420 |
- for _, delay := range cancelled {
|
|
| 421 |
- <-delay.doneCh |
|
| 422 |
- } |
|
| 423 |
-} |
|
| 424 |
- |
|
| 425 |
-// ClearServiceHistory forgets restart history related to a given service ID. |
|
| 426 |
-func (r *RestartSupervisor) ClearServiceHistory(serviceID string) {
|
|
| 427 |
- r.mu.Lock() |
|
| 428 |
- defer r.mu.Unlock() |
|
| 429 |
- |
|
| 430 |
- tuples := r.historyByService[serviceID] |
|
| 431 |
- if tuples == nil {
|
|
| 432 |
- return |
|
| 433 |
- } |
|
| 434 |
- |
|
| 435 |
- delete(r.historyByService, serviceID) |
|
| 436 |
- |
|
| 437 |
- for t := range tuples {
|
|
| 438 |
- delete(r.history, t) |
|
| 439 |
- } |
|
| 440 |
-} |
| 441 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,446 @@ |
| 0 |
+package restart |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "container/list" |
|
| 4 |
+ "errors" |
|
| 5 |
+ "sync" |
|
| 6 |
+ "time" |
|
| 7 |
+ |
|
| 8 |
+ "github.com/docker/go-events" |
|
| 9 |
+ "github.com/docker/swarmkit/api" |
|
| 10 |
+ "github.com/docker/swarmkit/log" |
|
| 11 |
+ "github.com/docker/swarmkit/manager/orchestrator" |
|
| 12 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 13 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 14 |
+ "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 15 |
+ "golang.org/x/net/context" |
|
| 16 |
+) |
|
| 17 |
+ |
|
| 18 |
+const defaultOldTaskTimeout = time.Minute |
|
| 19 |
+ |
|
| 20 |
+type restartedInstance struct {
|
|
| 21 |
+ timestamp time.Time |
|
| 22 |
+} |
|
| 23 |
+ |
|
| 24 |
+type instanceRestartInfo struct {
|
|
| 25 |
+ // counter of restarts for this instance. |
|
| 26 |
+ totalRestarts uint64 |
|
| 27 |
+ // Linked list of restartedInstance structs. Only used when |
|
| 28 |
+ // Restart.MaxAttempts and Restart.Window are both |
|
| 29 |
+ // nonzero. |
|
| 30 |
+ restartedInstances *list.List |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+type delayedStart struct {
|
|
| 34 |
+ // cancel is called to cancel the delayed start. |
|
| 35 |
+ cancel func() |
|
| 36 |
+ doneCh chan struct{}
|
|
| 37 |
+ |
|
| 38 |
+ // waiter is set to true if the next restart is waiting for this delay |
|
| 39 |
+ // to complete. |
|
| 40 |
+ waiter bool |
|
| 41 |
+} |
|
| 42 |
+ |
|
| 43 |
+type instanceTuple struct {
|
|
| 44 |
+ instance uint64 // unset for global tasks |
|
| 45 |
+ serviceID string |
|
| 46 |
+ nodeID string // unset for replicated tasks |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+// Supervisor initiates and manages restarts. It's responsible for |
|
| 50 |
+// delaying restarts when applicable. |
|
| 51 |
+type Supervisor struct {
|
|
| 52 |
+ mu sync.Mutex |
|
| 53 |
+ store *store.MemoryStore |
|
| 54 |
+ delays map[string]*delayedStart |
|
| 55 |
+ history map[instanceTuple]*instanceRestartInfo |
|
| 56 |
+ historyByService map[string]map[instanceTuple]struct{}
|
|
| 57 |
+ TaskTimeout time.Duration |
|
| 58 |
+} |
|
| 59 |
+ |
|
| 60 |
+// NewSupervisor creates a new RestartSupervisor. |
|
| 61 |
+func NewSupervisor(store *store.MemoryStore) *Supervisor {
|
|
| 62 |
+ return &Supervisor{
|
|
| 63 |
+ store: store, |
|
| 64 |
+ delays: make(map[string]*delayedStart), |
|
| 65 |
+ history: make(map[instanceTuple]*instanceRestartInfo), |
|
| 66 |
+ historyByService: make(map[string]map[instanceTuple]struct{}),
|
|
| 67 |
+ TaskTimeout: defaultOldTaskTimeout, |
|
| 68 |
+ } |
|
| 69 |
+} |
|
| 70 |
+ |
|
| 71 |
+func (r *Supervisor) waitRestart(ctx context.Context, oldDelay *delayedStart, cluster *api.Cluster, taskID string) {
|
|
| 72 |
+ // Wait for the last restart delay to elapse. |
|
| 73 |
+ select {
|
|
| 74 |
+ case <-oldDelay.doneCh: |
|
| 75 |
+ case <-ctx.Done(): |
|
| 76 |
+ return |
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ // Start the next restart |
|
| 80 |
+ err := r.store.Update(func(tx store.Tx) error {
|
|
| 81 |
+ t := store.GetTask(tx, taskID) |
|
| 82 |
+ if t == nil {
|
|
| 83 |
+ return nil |
|
| 84 |
+ } |
|
| 85 |
+ if t.DesiredState > api.TaskStateRunning {
|
|
| 86 |
+ return nil |
|
| 87 |
+ } |
|
| 88 |
+ service := store.GetService(tx, t.ServiceID) |
|
| 89 |
+ if service == nil {
|
|
| 90 |
+ return nil |
|
| 91 |
+ } |
|
| 92 |
+ return r.Restart(ctx, tx, cluster, service, *t) |
|
| 93 |
+ }) |
|
| 94 |
+ |
|
| 95 |
+ if err != nil {
|
|
| 96 |
+ log.G(ctx).WithError(err).Errorf("failed to restart task after waiting for previous restart")
|
|
| 97 |
+ } |
|
| 98 |
+} |
|
| 99 |
+ |
|
| 100 |
+// Restart initiates a new task to replace t if appropriate under the service's |
|
| 101 |
+// restart policy. |
|
| 102 |
+func (r *Supervisor) Restart(ctx context.Context, tx store.Tx, cluster *api.Cluster, service *api.Service, t api.Task) error {
|
|
| 103 |
+ // TODO(aluzzardi): This function should not depend on `service`. |
|
| 104 |
+ |
|
| 105 |
+ // Is the old task still in the process of restarting? If so, wait for |
|
| 106 |
+ // its restart delay to elapse, to avoid tight restart loops (for |
|
| 107 |
+ // example, when the image doesn't exist). |
|
| 108 |
+ r.mu.Lock() |
|
| 109 |
+ oldDelay, ok := r.delays[t.ID] |
|
| 110 |
+ if ok {
|
|
| 111 |
+ if !oldDelay.waiter {
|
|
| 112 |
+ oldDelay.waiter = true |
|
| 113 |
+ go r.waitRestart(ctx, oldDelay, cluster, t.ID) |
|
| 114 |
+ } |
|
| 115 |
+ r.mu.Unlock() |
|
| 116 |
+ return nil |
|
| 117 |
+ } |
|
| 118 |
+ r.mu.Unlock() |
|
| 119 |
+ |
|
| 120 |
+ // Sanity check: was the task shut down already by a separate call to |
|
| 121 |
+ // Restart? If so, we must avoid restarting it, because this will create |
|
| 122 |
+ // an extra task. This should never happen unless there is a bug. |
|
| 123 |
+ if t.DesiredState > api.TaskStateRunning {
|
|
| 124 |
+ return errors.New("Restart called on task that was already shut down")
|
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ t.DesiredState = api.TaskStateShutdown |
|
| 128 |
+ err := store.UpdateTask(tx, &t) |
|
| 129 |
+ if err != nil {
|
|
| 130 |
+ log.G(ctx).WithError(err).Errorf("failed to set task desired state to dead")
|
|
| 131 |
+ return err |
|
| 132 |
+ } |
|
| 133 |
+ |
|
| 134 |
+ if !r.shouldRestart(ctx, &t, service) {
|
|
| 135 |
+ return nil |
|
| 136 |
+ } |
|
| 137 |
+ |
|
| 138 |
+ var restartTask *api.Task |
|
| 139 |
+ |
|
| 140 |
+ if orchestrator.IsReplicatedService(service) {
|
|
| 141 |
+ restartTask = orchestrator.NewTask(cluster, service, t.Slot, "") |
|
| 142 |
+ } else if orchestrator.IsGlobalService(service) {
|
|
| 143 |
+ restartTask = orchestrator.NewTask(cluster, service, 0, t.NodeID) |
|
| 144 |
+ } else {
|
|
| 145 |
+ log.G(ctx).Error("service not supported by restart supervisor")
|
|
| 146 |
+ return nil |
|
| 147 |
+ } |
|
| 148 |
+ |
|
| 149 |
+ n := store.GetNode(tx, t.NodeID) |
|
| 150 |
+ |
|
| 151 |
+ restartTask.DesiredState = api.TaskStateReady |
|
| 152 |
+ |
|
| 153 |
+ var restartDelay time.Duration |
|
| 154 |
+ // Restart delay is not applied to drained nodes |
|
| 155 |
+ if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
|
|
| 156 |
+ if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
|
|
| 157 |
+ var err error |
|
| 158 |
+ restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay) |
|
| 159 |
+ if err != nil {
|
|
| 160 |
+ log.G(ctx).WithError(err).Error("invalid restart delay; using default")
|
|
| 161 |
+ restartDelay = orchestrator.DefaultRestartDelay |
|
| 162 |
+ } |
|
| 163 |
+ } else {
|
|
| 164 |
+ restartDelay = orchestrator.DefaultRestartDelay |
|
| 165 |
+ } |
|
| 166 |
+ } |
|
| 167 |
+ |
|
| 168 |
+ waitStop := true |
|
| 169 |
+ |
|
| 170 |
+ // Normally we wait for the old task to stop running, but we skip this |
|
| 171 |
+ // if the old task is already dead or the node it's assigned to is down. |
|
| 172 |
+ if (n != nil && n.Status.State == api.NodeStatus_DOWN) || t.Status.State > api.TaskStateRunning {
|
|
| 173 |
+ waitStop = false |
|
| 174 |
+ } |
|
| 175 |
+ |
|
| 176 |
+ if err := store.CreateTask(tx, restartTask); err != nil {
|
|
| 177 |
+ log.G(ctx).WithError(err).WithField("task.id", restartTask.ID).Error("task create failed")
|
|
| 178 |
+ return err |
|
| 179 |
+ } |
|
| 180 |
+ |
|
| 181 |
+ r.recordRestartHistory(restartTask) |
|
| 182 |
+ |
|
| 183 |
+ r.DelayStart(ctx, tx, &t, restartTask.ID, restartDelay, waitStop) |
|
| 184 |
+ return nil |
|
| 185 |
+} |
|
| 186 |
+ |
|
| 187 |
+func (r *Supervisor) shouldRestart(ctx context.Context, t *api.Task, service *api.Service) bool {
|
|
| 188 |
+ // TODO(aluzzardi): This function should not depend on `service`. |
|
| 189 |
+ |
|
| 190 |
+ condition := orchestrator.RestartCondition(t) |
|
| 191 |
+ |
|
| 192 |
+ if condition != api.RestartOnAny && |
|
| 193 |
+ (condition != api.RestartOnFailure || t.Status.State == api.TaskStateCompleted) {
|
|
| 194 |
+ return false |
|
| 195 |
+ } |
|
| 196 |
+ |
|
| 197 |
+ if t.Spec.Restart == nil || t.Spec.Restart.MaxAttempts == 0 {
|
|
| 198 |
+ return true |
|
| 199 |
+ } |
|
| 200 |
+ |
|
| 201 |
+ instanceTuple := instanceTuple{
|
|
| 202 |
+ instance: t.Slot, |
|
| 203 |
+ serviceID: t.ServiceID, |
|
| 204 |
+ } |
|
| 205 |
+ |
|
| 206 |
+ // Instance is not meaningful for "global" tasks, so they need to be |
|
| 207 |
+ // indexed by NodeID. |
|
| 208 |
+ if orchestrator.IsGlobalService(service) {
|
|
| 209 |
+ instanceTuple.nodeID = t.NodeID |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ r.mu.Lock() |
|
| 213 |
+ defer r.mu.Unlock() |
|
| 214 |
+ |
|
| 215 |
+ restartInfo := r.history[instanceTuple] |
|
| 216 |
+ if restartInfo == nil {
|
|
| 217 |
+ return true |
|
| 218 |
+ } |
|
| 219 |
+ |
|
| 220 |
+ if t.Spec.Restart.Window == nil || (t.Spec.Restart.Window.Seconds == 0 && t.Spec.Restart.Window.Nanos == 0) {
|
|
| 221 |
+ return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts |
|
| 222 |
+ } |
|
| 223 |
+ |
|
| 224 |
+ if restartInfo.restartedInstances == nil {
|
|
| 225 |
+ return true |
|
| 226 |
+ } |
|
| 227 |
+ |
|
| 228 |
+ window, err := ptypes.Duration(t.Spec.Restart.Window) |
|
| 229 |
+ if err != nil {
|
|
| 230 |
+ log.G(ctx).WithError(err).Error("invalid restart lookback window")
|
|
| 231 |
+ return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts |
|
| 232 |
+ } |
|
| 233 |
+ lookback := time.Now().Add(-window) |
|
| 234 |
+ |
|
| 235 |
+ var next *list.Element |
|
| 236 |
+ for e := restartInfo.restartedInstances.Front(); e != nil; e = next {
|
|
| 237 |
+ next = e.Next() |
|
| 238 |
+ |
|
| 239 |
+ if e.Value.(restartedInstance).timestamp.After(lookback) {
|
|
| 240 |
+ break |
|
| 241 |
+ } |
|
| 242 |
+ restartInfo.restartedInstances.Remove(e) |
|
| 243 |
+ } |
|
| 244 |
+ |
|
| 245 |
+ numRestarts := uint64(restartInfo.restartedInstances.Len()) |
|
| 246 |
+ |
|
| 247 |
+ if numRestarts == 0 {
|
|
| 248 |
+ restartInfo.restartedInstances = nil |
|
| 249 |
+ } |
|
| 250 |
+ |
|
| 251 |
+ return numRestarts < t.Spec.Restart.MaxAttempts |
|
| 252 |
+} |
|
| 253 |
+ |
|
| 254 |
+func (r *Supervisor) recordRestartHistory(restartTask *api.Task) {
|
|
| 255 |
+ if restartTask.Spec.Restart == nil || restartTask.Spec.Restart.MaxAttempts == 0 {
|
|
| 256 |
+ // No limit on the number of restarts, so no need to record |
|
| 257 |
+ // history. |
|
| 258 |
+ return |
|
| 259 |
+ } |
|
| 260 |
+ tuple := instanceTuple{
|
|
| 261 |
+ instance: restartTask.Slot, |
|
| 262 |
+ serviceID: restartTask.ServiceID, |
|
| 263 |
+ nodeID: restartTask.NodeID, |
|
| 264 |
+ } |
|
| 265 |
+ |
|
| 266 |
+ r.mu.Lock() |
|
| 267 |
+ defer r.mu.Unlock() |
|
| 268 |
+ |
|
| 269 |
+ if r.history[tuple] == nil {
|
|
| 270 |
+ r.history[tuple] = &instanceRestartInfo{}
|
|
| 271 |
+ } |
|
| 272 |
+ |
|
| 273 |
+ restartInfo := r.history[tuple] |
|
| 274 |
+ restartInfo.totalRestarts++ |
|
| 275 |
+ |
|
| 276 |
+ if r.historyByService[restartTask.ServiceID] == nil {
|
|
| 277 |
+ r.historyByService[restartTask.ServiceID] = make(map[instanceTuple]struct{})
|
|
| 278 |
+ } |
|
| 279 |
+ r.historyByService[restartTask.ServiceID][tuple] = struct{}{}
|
|
| 280 |
+ |
|
| 281 |
+ if restartTask.Spec.Restart.Window != nil && (restartTask.Spec.Restart.Window.Seconds != 0 || restartTask.Spec.Restart.Window.Nanos != 0) {
|
|
| 282 |
+ if restartInfo.restartedInstances == nil {
|
|
| 283 |
+ restartInfo.restartedInstances = list.New() |
|
| 284 |
+ } |
|
| 285 |
+ |
|
| 286 |
+ restartedInstance := restartedInstance{
|
|
| 287 |
+ timestamp: time.Now(), |
|
| 288 |
+ } |
|
| 289 |
+ |
|
| 290 |
+ restartInfo.restartedInstances.PushBack(restartedInstance) |
|
| 291 |
+ } |
|
| 292 |
+} |
|
| 293 |
+ |
|
| 294 |
+// DelayStart starts a timer that moves the task from READY to RUNNING once: |
|
| 295 |
+// - The restart delay has elapsed (if applicable) |
|
| 296 |
+// - The old task that it's replacing has stopped running (or this times out) |
|
| 297 |
+// It must be called during an Update transaction to ensure that it does not |
|
| 298 |
+// miss events. The purpose of the store.Tx argument is to avoid accidental |
|
| 299 |
+// calls outside an Update transaction. |
|
| 300 |
+func (r *Supervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask *api.Task, newTaskID string, delay time.Duration, waitStop bool) <-chan struct{} {
|
|
| 301 |
+ ctx, cancel := context.WithCancel(context.Background()) |
|
| 302 |
+ doneCh := make(chan struct{})
|
|
| 303 |
+ |
|
| 304 |
+ r.mu.Lock() |
|
| 305 |
+ for {
|
|
| 306 |
+ oldDelay, ok := r.delays[newTaskID] |
|
| 307 |
+ if !ok {
|
|
| 308 |
+ break |
|
| 309 |
+ } |
|
| 310 |
+ oldDelay.cancel() |
|
| 311 |
+ r.mu.Unlock() |
|
| 312 |
+ // Note that this channel read should only block for a very |
|
| 313 |
+ // short time, because we cancelled the existing delay and |
|
| 314 |
+ // that should cause it to stop immediately. |
|
| 315 |
+ <-oldDelay.doneCh |
|
| 316 |
+ r.mu.Lock() |
|
| 317 |
+ } |
|
| 318 |
+ r.delays[newTaskID] = &delayedStart{cancel: cancel, doneCh: doneCh}
|
|
| 319 |
+ r.mu.Unlock() |
|
| 320 |
+ |
|
| 321 |
+ var watch chan events.Event |
|
| 322 |
+ cancelWatch := func() {}
|
|
| 323 |
+ |
|
| 324 |
+ if waitStop && oldTask != nil {
|
|
| 325 |
+ // Wait for either the old task to complete, or the old task's |
|
| 326 |
+ // node to become unavailable. |
|
| 327 |
+ watch, cancelWatch = state.Watch( |
|
| 328 |
+ r.store.WatchQueue(), |
|
| 329 |
+ state.EventUpdateTask{
|
|
| 330 |
+ Task: &api.Task{ID: oldTask.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
|
|
| 331 |
+ Checks: []state.TaskCheckFunc{state.TaskCheckID, state.TaskCheckStateGreaterThan},
|
|
| 332 |
+ }, |
|
| 333 |
+ state.EventUpdateNode{
|
|
| 334 |
+ Node: &api.Node{ID: oldTask.NodeID, Status: api.NodeStatus{State: api.NodeStatus_DOWN}},
|
|
| 335 |
+ Checks: []state.NodeCheckFunc{state.NodeCheckID, state.NodeCheckState},
|
|
| 336 |
+ }, |
|
| 337 |
+ state.EventDeleteNode{
|
|
| 338 |
+ Node: &api.Node{ID: oldTask.NodeID},
|
|
| 339 |
+ Checks: []state.NodeCheckFunc{state.NodeCheckID},
|
|
| 340 |
+ }, |
|
| 341 |
+ ) |
|
| 342 |
+ } |
|
| 343 |
+ |
|
| 344 |
+ go func() {
|
|
| 345 |
+ defer func() {
|
|
| 346 |
+ cancelWatch() |
|
| 347 |
+ r.mu.Lock() |
|
| 348 |
+ delete(r.delays, newTaskID) |
|
| 349 |
+ r.mu.Unlock() |
|
| 350 |
+ close(doneCh) |
|
| 351 |
+ }() |
|
| 352 |
+ |
|
| 353 |
+ oldTaskTimer := time.NewTimer(r.TaskTimeout) |
|
| 354 |
+ defer oldTaskTimer.Stop() |
|
| 355 |
+ |
|
| 356 |
+ // Wait for the delay to elapse, if one is specified. |
|
| 357 |
+ if delay != 0 {
|
|
| 358 |
+ select {
|
|
| 359 |
+ case <-time.After(delay): |
|
| 360 |
+ case <-ctx.Done(): |
|
| 361 |
+ return |
|
| 362 |
+ } |
|
| 363 |
+ } |
|
| 364 |
+ |
|
| 365 |
+ if waitStop && oldTask != nil {
|
|
| 366 |
+ select {
|
|
| 367 |
+ case <-watch: |
|
| 368 |
+ case <-oldTaskTimer.C: |
|
| 369 |
+ case <-ctx.Done(): |
|
| 370 |
+ return |
|
| 371 |
+ } |
|
| 372 |
+ } |
|
| 373 |
+ |
|
| 374 |
+ err := r.store.Update(func(tx store.Tx) error {
|
|
| 375 |
+ err := r.StartNow(tx, newTaskID) |
|
| 376 |
+ if err != nil {
|
|
| 377 |
+ log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("moving task out of delayed state failed")
|
|
| 378 |
+ } |
|
| 379 |
+ return nil |
|
| 380 |
+ }) |
|
| 381 |
+ if err != nil {
|
|
| 382 |
+ log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("task restart transaction failed")
|
|
| 383 |
+ } |
|
| 384 |
+ }() |
|
| 385 |
+ |
|
| 386 |
+ return doneCh |
|
| 387 |
+} |
|
| 388 |
+ |
|
| 389 |
+// StartNow moves the task into the RUNNING state so it will proceed to start |
|
| 390 |
+// up. |
|
| 391 |
+func (r *Supervisor) StartNow(tx store.Tx, taskID string) error {
|
|
| 392 |
+ t := store.GetTask(tx, taskID) |
|
| 393 |
+ if t == nil || t.DesiredState >= api.TaskStateRunning {
|
|
| 394 |
+ return nil |
|
| 395 |
+ } |
|
| 396 |
+ t.DesiredState = api.TaskStateRunning |
|
| 397 |
+ return store.UpdateTask(tx, t) |
|
| 398 |
+} |
|
| 399 |
+ |
|
| 400 |
+// Cancel cancels a pending restart. |
|
| 401 |
+func (r *Supervisor) Cancel(taskID string) {
|
|
| 402 |
+ r.mu.Lock() |
|
| 403 |
+ delay, ok := r.delays[taskID] |
|
| 404 |
+ r.mu.Unlock() |
|
| 405 |
+ |
|
| 406 |
+ if !ok {
|
|
| 407 |
+ return |
|
| 408 |
+ } |
|
| 409 |
+ |
|
| 410 |
+ delay.cancel() |
|
| 411 |
+ <-delay.doneCh |
|
| 412 |
+} |
|
| 413 |
+ |
|
| 414 |
+// CancelAll aborts all pending restarts and waits for any instances of |
|
| 415 |
+// StartNow that have already triggered to complete. |
|
| 416 |
+func (r *Supervisor) CancelAll() {
|
|
| 417 |
+ var cancelled []delayedStart |
|
| 418 |
+ |
|
| 419 |
+ r.mu.Lock() |
|
| 420 |
+ for _, delay := range r.delays {
|
|
| 421 |
+ delay.cancel() |
|
| 422 |
+ } |
|
| 423 |
+ r.mu.Unlock() |
|
| 424 |
+ |
|
| 425 |
+ for _, delay := range cancelled {
|
|
| 426 |
+ <-delay.doneCh |
|
| 427 |
+ } |
|
| 428 |
+} |
|
| 429 |
+ |
|
| 430 |
+// ClearServiceHistory forgets restart history related to a given service ID. |
|
| 431 |
+func (r *Supervisor) ClearServiceHistory(serviceID string) {
|
|
| 432 |
+ r.mu.Lock() |
|
| 433 |
+ defer r.mu.Unlock() |
|
| 434 |
+ |
|
| 435 |
+ tuples := r.historyByService[serviceID] |
|
| 436 |
+ if tuples == nil {
|
|
| 437 |
+ return |
|
| 438 |
+ } |
|
| 439 |
+ |
|
| 440 |
+ delete(r.historyByService, serviceID) |
|
| 441 |
+ |
|
| 442 |
+ for t := range tuples {
|
|
| 443 |
+ delete(r.history, t) |
|
| 444 |
+ } |
|
| 445 |
+} |
| 0 | 446 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,61 @@ |
| 0 |
+package orchestrator |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/docker/swarmkit/api" |
|
| 4 |
+ "github.com/docker/swarmkit/log" |
|
| 5 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 6 |
+ "golang.org/x/net/context" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// IsReplicatedService checks if a service is a replicated service. |
|
| 10 |
+func IsReplicatedService(service *api.Service) bool {
|
|
| 11 |
+ // service nil validation is required as there are scenarios |
|
| 12 |
+ // where service is removed from store |
|
| 13 |
+ if service == nil {
|
|
| 14 |
+ return false |
|
| 15 |
+ } |
|
| 16 |
+ _, ok := service.Spec.GetMode().(*api.ServiceSpec_Replicated) |
|
| 17 |
+ return ok |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+// IsGlobalService checks if the service is a global service. |
|
| 21 |
+func IsGlobalService(service *api.Service) bool {
|
|
| 22 |
+ if service == nil {
|
|
| 23 |
+ return false |
|
| 24 |
+ } |
|
| 25 |
+ _, ok := service.Spec.GetMode().(*api.ServiceSpec_Global) |
|
| 26 |
+ return ok |
|
| 27 |
+} |
|
| 28 |
+ |
|
| 29 |
+// DeleteServiceTasks deletes the tasks associated with a service. |
|
| 30 |
+func DeleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
|
|
| 31 |
+ var ( |
|
| 32 |
+ tasks []*api.Task |
|
| 33 |
+ err error |
|
| 34 |
+ ) |
|
| 35 |
+ s.View(func(tx store.ReadTx) {
|
|
| 36 |
+ tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID)) |
|
| 37 |
+ }) |
|
| 38 |
+ if err != nil {
|
|
| 39 |
+ log.G(ctx).WithError(err).Errorf("failed to list tasks")
|
|
| 40 |
+ return |
|
| 41 |
+ } |
|
| 42 |
+ |
|
| 43 |
+ _, err = s.Batch(func(batch *store.Batch) error {
|
|
| 44 |
+ for _, t := range tasks {
|
|
| 45 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 46 |
+ if err := store.DeleteTask(tx, t.ID); err != nil {
|
|
| 47 |
+ log.G(ctx).WithError(err).Errorf("failed to delete task")
|
|
| 48 |
+ } |
|
| 49 |
+ return nil |
|
| 50 |
+ }) |
|
| 51 |
+ if err != nil {
|
|
| 52 |
+ return err |
|
| 53 |
+ } |
|
| 54 |
+ } |
|
| 55 |
+ return nil |
|
| 56 |
+ }) |
|
| 57 |
+ if err != nil {
|
|
| 58 |
+ log.G(ctx).WithError(err).Errorf("task search transaction failed")
|
|
| 59 |
+ } |
|
| 60 |
+} |
| 0 | 61 |
deleted file mode 100644 |
| ... | ... |
@@ -1,256 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "sort" |
|
| 5 |
- |
|
| 6 |
- "github.com/docker/go-events" |
|
| 7 |
- "github.com/docker/swarmkit/api" |
|
| 8 |
- "github.com/docker/swarmkit/log" |
|
| 9 |
- "github.com/docker/swarmkit/manager/state" |
|
| 10 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 11 |
- "golang.org/x/net/context" |
|
| 12 |
-) |
|
| 13 |
- |
|
| 14 |
-// This file provices service-level orchestration. It observes changes to |
|
| 15 |
-// services and creates and destroys tasks as necessary to match the service |
|
| 16 |
-// specifications. This is different from task-level orchestration, which |
|
| 17 |
-// responds to changes in individual tasks (or nodes which run them). |
|
| 18 |
- |
|
| 19 |
-func (r *ReplicatedOrchestrator) initCluster(readTx store.ReadTx) error {
|
|
| 20 |
- clusters, err := store.FindClusters(readTx, store.ByName("default"))
|
|
| 21 |
- if err != nil {
|
|
| 22 |
- return err |
|
| 23 |
- } |
|
| 24 |
- |
|
| 25 |
- if len(clusters) != 1 {
|
|
| 26 |
- // we'll just pick it when it is created. |
|
| 27 |
- return nil |
|
| 28 |
- } |
|
| 29 |
- |
|
| 30 |
- r.cluster = clusters[0] |
|
| 31 |
- return nil |
|
| 32 |
-} |
|
| 33 |
- |
|
| 34 |
-func (r *ReplicatedOrchestrator) initServices(readTx store.ReadTx) error {
|
|
| 35 |
- services, err := store.FindServices(readTx, store.All) |
|
| 36 |
- if err != nil {
|
|
| 37 |
- return err |
|
| 38 |
- } |
|
| 39 |
- for _, s := range services {
|
|
| 40 |
- if isReplicatedService(s) {
|
|
| 41 |
- r.reconcileServices[s.ID] = s |
|
| 42 |
- } |
|
| 43 |
- } |
|
| 44 |
- return nil |
|
| 45 |
-} |
|
| 46 |
- |
|
| 47 |
-func (r *ReplicatedOrchestrator) handleServiceEvent(ctx context.Context, event events.Event) {
|
|
| 48 |
- switch v := event.(type) {
|
|
| 49 |
- case state.EventDeleteService: |
|
| 50 |
- if !isReplicatedService(v.Service) {
|
|
| 51 |
- return |
|
| 52 |
- } |
|
| 53 |
- deleteServiceTasks(ctx, r.store, v.Service) |
|
| 54 |
- r.restarts.ClearServiceHistory(v.Service.ID) |
|
| 55 |
- case state.EventCreateService: |
|
| 56 |
- if !isReplicatedService(v.Service) {
|
|
| 57 |
- return |
|
| 58 |
- } |
|
| 59 |
- r.reconcileServices[v.Service.ID] = v.Service |
|
| 60 |
- case state.EventUpdateService: |
|
| 61 |
- if !isReplicatedService(v.Service) {
|
|
| 62 |
- return |
|
| 63 |
- } |
|
| 64 |
- r.reconcileServices[v.Service.ID] = v.Service |
|
| 65 |
- } |
|
| 66 |
-} |
|
| 67 |
- |
|
| 68 |
-func (r *ReplicatedOrchestrator) tickServices(ctx context.Context) {
|
|
| 69 |
- if len(r.reconcileServices) > 0 {
|
|
| 70 |
- for _, s := range r.reconcileServices {
|
|
| 71 |
- r.reconcile(ctx, s) |
|
| 72 |
- } |
|
| 73 |
- r.reconcileServices = make(map[string]*api.Service) |
|
| 74 |
- } |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-func (r *ReplicatedOrchestrator) resolveService(ctx context.Context, task *api.Task) *api.Service {
|
|
| 78 |
- if task.ServiceID == "" {
|
|
| 79 |
- return nil |
|
| 80 |
- } |
|
| 81 |
- var service *api.Service |
|
| 82 |
- r.store.View(func(tx store.ReadTx) {
|
|
| 83 |
- service = store.GetService(tx, task.ServiceID) |
|
| 84 |
- }) |
|
| 85 |
- return service |
|
| 86 |
-} |
|
| 87 |
- |
|
| 88 |
-func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
|
|
| 89 |
- runningSlots, deadSlots, err := getRunnableAndDeadSlots(r.store, service.ID) |
|
| 90 |
- if err != nil {
|
|
| 91 |
- log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
|
|
| 92 |
- return |
|
| 93 |
- } |
|
| 94 |
- |
|
| 95 |
- numSlots := len(runningSlots) |
|
| 96 |
- |
|
| 97 |
- slotsSlice := make([]slot, 0, numSlots) |
|
| 98 |
- for _, slot := range runningSlots {
|
|
| 99 |
- slotsSlice = append(slotsSlice, slot) |
|
| 100 |
- } |
|
| 101 |
- |
|
| 102 |
- deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated) |
|
| 103 |
- specifiedSlots := int(deploy.Replicated.Replicas) |
|
| 104 |
- |
|
| 105 |
- switch {
|
|
| 106 |
- case specifiedSlots > numSlots: |
|
| 107 |
- log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numSlots, specifiedSlots)
|
|
| 108 |
- // Update all current tasks then add missing tasks |
|
| 109 |
- r.updater.Update(ctx, r.cluster, service, slotsSlice) |
|
| 110 |
- _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 111 |
- r.addTasks(ctx, batch, service, runningSlots, deadSlots, specifiedSlots-numSlots) |
|
| 112 |
- r.deleteTasksMap(ctx, batch, deadSlots) |
|
| 113 |
- return nil |
|
| 114 |
- }) |
|
| 115 |
- if err != nil {
|
|
| 116 |
- log.G(ctx).WithError(err).Errorf("reconcile batch failed")
|
|
| 117 |
- } |
|
| 118 |
- |
|
| 119 |
- case specifiedSlots < numSlots: |
|
| 120 |
- // Update up to N tasks then remove the extra |
|
| 121 |
- log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numSlots, specifiedSlots)
|
|
| 122 |
- |
|
| 123 |
- // Preferentially remove tasks on the nodes that have the most |
|
| 124 |
- // copies of this service, to leave a more balanced result. |
|
| 125 |
- |
|
| 126 |
- // First sort tasks such that tasks which are currently running |
|
| 127 |
- // (in terms of observed state) appear before non-running tasks. |
|
| 128 |
- // This will cause us to prefer to remove non-running tasks, all |
|
| 129 |
- // other things being equal in terms of node balance. |
|
| 130 |
- |
|
| 131 |
- sort.Sort(slotsByRunningState(slotsSlice)) |
|
| 132 |
- |
|
| 133 |
- // Assign each task an index that counts it as the nth copy of |
|
| 134 |
- // of the service on its node (1, 2, 3, ...), and sort the |
|
| 135 |
- // tasks by this counter value. |
|
| 136 |
- |
|
| 137 |
- slotsByNode := make(map[string]int) |
|
| 138 |
- slotsWithIndices := make(slotsByIndex, 0, numSlots) |
|
| 139 |
- |
|
| 140 |
- for _, slot := range slotsSlice {
|
|
| 141 |
- if len(slot) == 1 && slot[0].NodeID != "" {
|
|
| 142 |
- slotsByNode[slot[0].NodeID]++ |
|
| 143 |
- slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: slotsByNode[slot[0].NodeID]})
|
|
| 144 |
- } else {
|
|
| 145 |
- slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: -1})
|
|
| 146 |
- } |
|
| 147 |
- } |
|
| 148 |
- |
|
| 149 |
- sort.Sort(slotsWithIndices) |
|
| 150 |
- |
|
| 151 |
- sortedSlots := make([]slot, 0, numSlots) |
|
| 152 |
- for _, slot := range slotsWithIndices {
|
|
| 153 |
- sortedSlots = append(sortedSlots, slot.slot) |
|
| 154 |
- } |
|
| 155 |
- |
|
| 156 |
- r.updater.Update(ctx, r.cluster, service, sortedSlots[:specifiedSlots]) |
|
| 157 |
- _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 158 |
- r.deleteTasksMap(ctx, batch, deadSlots) |
|
| 159 |
- r.deleteTasks(ctx, batch, sortedSlots[specifiedSlots:]) |
|
| 160 |
- return nil |
|
| 161 |
- }) |
|
| 162 |
- if err != nil {
|
|
| 163 |
- log.G(ctx).WithError(err).Errorf("reconcile batch failed")
|
|
| 164 |
- } |
|
| 165 |
- |
|
| 166 |
- case specifiedSlots == numSlots: |
|
| 167 |
- _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 168 |
- r.deleteTasksMap(ctx, batch, deadSlots) |
|
| 169 |
- return nil |
|
| 170 |
- }) |
|
| 171 |
- if err != nil {
|
|
| 172 |
- log.G(ctx).WithError(err).Errorf("reconcile batch failed")
|
|
| 173 |
- } |
|
| 174 |
- // Simple update, no scaling - update all tasks. |
|
| 175 |
- r.updater.Update(ctx, r.cluster, service, slotsSlice) |
|
| 176 |
- } |
|
| 177 |
-} |
|
| 178 |
- |
|
| 179 |
-func (r *ReplicatedOrchestrator) addTasks(ctx context.Context, batch *store.Batch, service *api.Service, runningSlots map[uint64]slot, deadSlots map[uint64]slot, count int) {
|
|
| 180 |
- slot := uint64(0) |
|
| 181 |
- for i := 0; i < count; i++ {
|
|
| 182 |
- // Find an slot number that is missing a running task |
|
| 183 |
- for {
|
|
| 184 |
- slot++ |
|
| 185 |
- if _, ok := runningSlots[slot]; !ok {
|
|
| 186 |
- break |
|
| 187 |
- } |
|
| 188 |
- } |
|
| 189 |
- |
|
| 190 |
- delete(deadSlots, slot) |
|
| 191 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 192 |
- return store.CreateTask(tx, newTask(r.cluster, service, slot, "")) |
|
| 193 |
- }) |
|
| 194 |
- if err != nil {
|
|
| 195 |
- log.G(ctx).Errorf("Failed to create task: %v", err)
|
|
| 196 |
- } |
|
| 197 |
- } |
|
| 198 |
-} |
|
| 199 |
- |
|
| 200 |
-func (r *ReplicatedOrchestrator) deleteTasks(ctx context.Context, batch *store.Batch, slots []slot) {
|
|
| 201 |
- for _, slot := range slots {
|
|
| 202 |
- for _, t := range slot {
|
|
| 203 |
- r.deleteTask(ctx, batch, t) |
|
| 204 |
- } |
|
| 205 |
- } |
|
| 206 |
-} |
|
| 207 |
- |
|
| 208 |
-func (r *ReplicatedOrchestrator) deleteTasksMap(ctx context.Context, batch *store.Batch, slots map[uint64]slot) {
|
|
| 209 |
- for _, slot := range slots {
|
|
| 210 |
- for _, t := range slot {
|
|
| 211 |
- r.deleteTask(ctx, batch, t) |
|
| 212 |
- } |
|
| 213 |
- } |
|
| 214 |
-} |
|
| 215 |
- |
|
| 216 |
-func (r *ReplicatedOrchestrator) deleteTask(ctx context.Context, batch *store.Batch, t *api.Task) {
|
|
| 217 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 218 |
- return store.DeleteTask(tx, t.ID) |
|
| 219 |
- }) |
|
| 220 |
- if err != nil {
|
|
| 221 |
- log.G(ctx).WithError(err).Errorf("deleting task %s failed", t.ID)
|
|
| 222 |
- } |
|
| 223 |
-} |
|
| 224 |
- |
|
| 225 |
-// getRunnableAndDeadSlots returns two maps of slots. The first contains slots |
|
| 226 |
-// that have at least one task with a desired state above NEW and lesser or |
|
| 227 |
-// equal to RUNNING. The second is for slots that only contain tasks with a |
|
| 228 |
-// desired state above RUNNING. |
|
| 229 |
-func getRunnableAndDeadSlots(s *store.MemoryStore, serviceID string) (map[uint64]slot, map[uint64]slot, error) {
|
|
| 230 |
- var ( |
|
| 231 |
- tasks []*api.Task |
|
| 232 |
- err error |
|
| 233 |
- ) |
|
| 234 |
- s.View(func(tx store.ReadTx) {
|
|
| 235 |
- tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID)) |
|
| 236 |
- }) |
|
| 237 |
- if err != nil {
|
|
| 238 |
- return nil, nil, err |
|
| 239 |
- } |
|
| 240 |
- |
|
| 241 |
- runningSlots := make(map[uint64]slot) |
|
| 242 |
- for _, t := range tasks {
|
|
| 243 |
- if t.DesiredState <= api.TaskStateRunning {
|
|
| 244 |
- runningSlots[t.Slot] = append(runningSlots[t.Slot], t) |
|
| 245 |
- } |
|
| 246 |
- } |
|
| 247 |
- |
|
| 248 |
- deadSlots := make(map[uint64]slot) |
|
| 249 |
- for _, t := range tasks {
|
|
| 250 |
- if _, exists := runningSlots[t.Slot]; !exists {
|
|
| 251 |
- deadSlots[t.Slot] = append(deadSlots[t.Slot], t) |
|
| 252 |
- } |
|
| 253 |
- } |
|
| 254 |
- |
|
| 255 |
- return runningSlots, deadSlots, nil |
|
| 256 |
-} |
| ... | ... |
@@ -2,60 +2,45 @@ package orchestrator |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"github.com/docker/swarmkit/api" |
| 5 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 5 | 6 |
) |
| 6 | 7 |
|
| 7 |
-// slot is a list of the running tasks occupying a certain slot. Generally this |
|
| 8 |
+// Slot is a list of the running tasks occupying a certain slot. Generally this |
|
| 8 | 9 |
// will only be one task, but some rolling update situations involve |
| 9 | 10 |
// temporarily having two running tasks in the same slot. Note that this use of |
| 10 | 11 |
// "slot" is more generic than the Slot number for replicated services - a node |
| 11 | 12 |
// is also considered a slot for global services. |
| 12 |
-type slot []*api.Task |
|
| 13 |
- |
|
| 14 |
-type slotsByRunningState []slot |
|
| 15 |
- |
|
| 16 |
-func (is slotsByRunningState) Len() int { return len(is) }
|
|
| 17 |
-func (is slotsByRunningState) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
|
|
| 18 |
- |
|
| 19 |
-func (is slotsByRunningState) Less(i, j int) bool {
|
|
| 20 |
- iRunning := false |
|
| 21 |
- jRunning := false |
|
| 13 |
+type Slot []*api.Task |
|
| 14 |
+ |
|
| 15 |
+// GetRunnableAndDeadSlots returns two maps of slots. The first contains slots |
|
| 16 |
+// that have at least one task with a desired state above NEW and lesser or |
|
| 17 |
+// equal to RUNNING. The second is for slots that only contain tasks with a |
|
| 18 |
+// desired state above RUNNING. |
|
| 19 |
+func GetRunnableAndDeadSlots(s *store.MemoryStore, serviceID string) (map[uint64]Slot, map[uint64]Slot, error) {
|
|
| 20 |
+ var ( |
|
| 21 |
+ tasks []*api.Task |
|
| 22 |
+ err error |
|
| 23 |
+ ) |
|
| 24 |
+ s.View(func(tx store.ReadTx) {
|
|
| 25 |
+ tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID)) |
|
| 26 |
+ }) |
|
| 27 |
+ if err != nil {
|
|
| 28 |
+ return nil, nil, err |
|
| 29 |
+ } |
|
| 22 | 30 |
|
| 23 |
- for _, ii := range is[i] {
|
|
| 24 |
- if ii.Status.State == api.TaskStateRunning {
|
|
| 25 |
- iRunning = true |
|
| 26 |
- break |
|
| 31 |
+ runningSlots := make(map[uint64]Slot) |
|
| 32 |
+ for _, t := range tasks {
|
|
| 33 |
+ if t.DesiredState <= api.TaskStateRunning {
|
|
| 34 |
+ runningSlots[t.Slot] = append(runningSlots[t.Slot], t) |
|
| 27 | 35 |
} |
| 28 | 36 |
} |
| 29 |
- for _, ij := range is[j] {
|
|
| 30 |
- if ij.Status.State == api.TaskStateRunning {
|
|
| 31 |
- jRunning = true |
|
| 32 |
- break |
|
| 37 |
+ |
|
| 38 |
+ deadSlots := make(map[uint64]Slot) |
|
| 39 |
+ for _, t := range tasks {
|
|
| 40 |
+ if _, exists := runningSlots[t.Slot]; !exists {
|
|
| 41 |
+ deadSlots[t.Slot] = append(deadSlots[t.Slot], t) |
|
| 33 | 42 |
} |
| 34 | 43 |
} |
| 35 | 44 |
|
| 36 |
- return iRunning && !jRunning |
|
| 37 |
-} |
|
| 38 |
- |
|
| 39 |
-type slotWithIndex struct {
|
|
| 40 |
- slot slot |
|
| 41 |
- |
|
| 42 |
- // index is a counter that counts this task as the nth instance of |
|
| 43 |
- // the service on its node. This is used for sorting the tasks so that |
|
| 44 |
- // when scaling down we leave tasks more evenly balanced. |
|
| 45 |
- index int |
|
| 46 |
-} |
|
| 47 |
- |
|
| 48 |
-type slotsByIndex []slotWithIndex |
|
| 49 |
- |
|
| 50 |
-func (is slotsByIndex) Len() int { return len(is) }
|
|
| 51 |
-func (is slotsByIndex) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
|
|
| 52 |
- |
|
| 53 |
-func (is slotsByIndex) Less(i, j int) bool {
|
|
| 54 |
- if is[i].index < 0 && is[j].index >= 0 {
|
|
| 55 |
- return false |
|
| 56 |
- } |
|
| 57 |
- if is[j].index < 0 && is[i].index >= 0 {
|
|
| 58 |
- return true |
|
| 59 |
- } |
|
| 60 |
- return is[i].index < is[j].index |
|
| 45 |
+ return runningSlots, deadSlots, nil |
|
| 61 | 46 |
} |
| 62 | 47 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,67 @@ |
| 0 |
+package orchestrator |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "reflect" |
|
| 4 |
+ "time" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/swarmkit/api" |
|
| 7 |
+ "github.com/docker/swarmkit/identity" |
|
| 8 |
+ "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 9 |
+) |
|
| 10 |
+ |
|
| 11 |
+// DefaultRestartDelay is the restart delay value to use when none is |
|
| 12 |
+// specified. |
|
| 13 |
+const DefaultRestartDelay = 5 * time.Second |
|
| 14 |
+ |
|
| 15 |
+// NewTask creates a new task. |
|
| 16 |
+func NewTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task {
|
|
| 17 |
+ var logDriver *api.Driver |
|
| 18 |
+ if service.Spec.Task.LogDriver != nil {
|
|
| 19 |
+ // use the log driver specific to the task, if we have it. |
|
| 20 |
+ logDriver = service.Spec.Task.LogDriver |
|
| 21 |
+ } else if cluster != nil {
|
|
| 22 |
+ // pick up the cluster default, if available. |
|
| 23 |
+ logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here. |
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ taskID := identity.NewID() |
|
| 27 |
+ task := api.Task{
|
|
| 28 |
+ ID: taskID, |
|
| 29 |
+ ServiceAnnotations: service.Spec.Annotations, |
|
| 30 |
+ Spec: service.Spec.Task, |
|
| 31 |
+ ServiceID: service.ID, |
|
| 32 |
+ Slot: slot, |
|
| 33 |
+ Status: api.TaskStatus{
|
|
| 34 |
+ State: api.TaskStateNew, |
|
| 35 |
+ Timestamp: ptypes.MustTimestampProto(time.Now()), |
|
| 36 |
+ Message: "created", |
|
| 37 |
+ }, |
|
| 38 |
+ Endpoint: &api.Endpoint{
|
|
| 39 |
+ Spec: service.Spec.Endpoint.Copy(), |
|
| 40 |
+ }, |
|
| 41 |
+ DesiredState: api.TaskStateRunning, |
|
| 42 |
+ LogDriver: logDriver, |
|
| 43 |
+ } |
|
| 44 |
+ |
|
| 45 |
+ // In global mode we also set the NodeID |
|
| 46 |
+ if nodeID != "" {
|
|
| 47 |
+ task.NodeID = nodeID |
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 50 |
+ return &task |
|
| 51 |
+} |
|
| 52 |
+ |
|
| 53 |
+// RestartCondition returns the restart condition to apply to this task. |
|
| 54 |
+func RestartCondition(task *api.Task) api.RestartPolicy_RestartCondition {
|
|
| 55 |
+ restartCondition := api.RestartOnAny |
|
| 56 |
+ if task.Spec.Restart != nil {
|
|
| 57 |
+ restartCondition = task.Spec.Restart.Condition |
|
| 58 |
+ } |
|
| 59 |
+ return restartCondition |
|
| 60 |
+} |
|
| 61 |
+ |
|
| 62 |
+// IsTaskDirty determines whether a task matches the given service's spec. |
|
| 63 |
+func IsTaskDirty(s *api.Service, t *api.Task) bool {
|
|
| 64 |
+ return !reflect.DeepEqual(s.Spec.Task, t.Spec) || |
|
| 65 |
+ (t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec)) |
|
| 66 |
+} |
| 0 | 67 |
deleted file mode 100644 |
| ... | ... |
@@ -1,209 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "sort" |
|
| 5 |
- "time" |
|
| 6 |
- |
|
| 7 |
- "github.com/docker/go-events" |
|
| 8 |
- "github.com/docker/swarmkit/api" |
|
| 9 |
- "github.com/docker/swarmkit/manager/state" |
|
| 10 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 11 |
-) |
|
| 12 |
- |
|
| 13 |
-const ( |
|
| 14 |
- // maxDirty is the size threshold for running a task pruning operation. |
|
| 15 |
- maxDirty = 1000 |
|
| 16 |
- // reaperBatchingInterval is how often to prune old tasks. |
|
| 17 |
- reaperBatchingInterval = 250 * time.Millisecond |
|
| 18 |
-) |
|
| 19 |
- |
|
| 20 |
-type instanceTuple struct {
|
|
| 21 |
- instance uint64 // unset for global tasks |
|
| 22 |
- serviceID string |
|
| 23 |
- nodeID string // unset for replicated tasks |
|
| 24 |
-} |
|
| 25 |
- |
|
| 26 |
-// A TaskReaper deletes old tasks when more than TaskHistoryRetentionLimit tasks |
|
| 27 |
-// exist for the same service/instance or service/nodeid combination. |
|
| 28 |
-type TaskReaper struct {
|
|
| 29 |
- store *store.MemoryStore |
|
| 30 |
- // taskHistory is the number of tasks to keep |
|
| 31 |
- taskHistory int64 |
|
| 32 |
- dirty map[instanceTuple]struct{}
|
|
| 33 |
- watcher chan events.Event |
|
| 34 |
- cancelWatch func() |
|
| 35 |
- stopChan chan struct{}
|
|
| 36 |
- doneChan chan struct{}
|
|
| 37 |
-} |
|
| 38 |
- |
|
| 39 |
-// NewTaskReaper creates a new TaskReaper. |
|
| 40 |
-func NewTaskReaper(store *store.MemoryStore) *TaskReaper {
|
|
| 41 |
- watcher, cancel := state.Watch(store.WatchQueue(), state.EventCreateTask{}, state.EventUpdateCluster{})
|
|
| 42 |
- |
|
| 43 |
- return &TaskReaper{
|
|
| 44 |
- store: store, |
|
| 45 |
- watcher: watcher, |
|
| 46 |
- cancelWatch: cancel, |
|
| 47 |
- dirty: make(map[instanceTuple]struct{}),
|
|
| 48 |
- stopChan: make(chan struct{}),
|
|
| 49 |
- doneChan: make(chan struct{}),
|
|
| 50 |
- } |
|
| 51 |
-} |
|
| 52 |
- |
|
| 53 |
-// Run is the TaskReaper's main loop. |
|
| 54 |
-func (tr *TaskReaper) Run() {
|
|
| 55 |
- defer close(tr.doneChan) |
|
| 56 |
- |
|
| 57 |
- tr.store.View(func(readTx store.ReadTx) {
|
|
| 58 |
- clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) |
|
| 59 |
- if err == nil && len(clusters) == 1 {
|
|
| 60 |
- tr.taskHistory = clusters[0].Spec.Orchestration.TaskHistoryRetentionLimit |
|
| 61 |
- } |
|
| 62 |
- }) |
|
| 63 |
- |
|
| 64 |
- timer := time.NewTimer(reaperBatchingInterval) |
|
| 65 |
- |
|
| 66 |
- for {
|
|
| 67 |
- select {
|
|
| 68 |
- case event := <-tr.watcher: |
|
| 69 |
- switch v := event.(type) {
|
|
| 70 |
- case state.EventCreateTask: |
|
| 71 |
- t := v.Task |
|
| 72 |
- tr.dirty[instanceTuple{
|
|
| 73 |
- instance: t.Slot, |
|
| 74 |
- serviceID: t.ServiceID, |
|
| 75 |
- nodeID: t.NodeID, |
|
| 76 |
- }] = struct{}{}
|
|
| 77 |
- if len(tr.dirty) > maxDirty {
|
|
| 78 |
- timer.Stop() |
|
| 79 |
- tr.tick() |
|
| 80 |
- } else {
|
|
| 81 |
- timer.Reset(reaperBatchingInterval) |
|
| 82 |
- } |
|
| 83 |
- case state.EventUpdateCluster: |
|
| 84 |
- tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit |
|
| 85 |
- } |
|
| 86 |
- case <-timer.C: |
|
| 87 |
- timer.Stop() |
|
| 88 |
- tr.tick() |
|
| 89 |
- case <-tr.stopChan: |
|
| 90 |
- timer.Stop() |
|
| 91 |
- return |
|
| 92 |
- } |
|
| 93 |
- } |
|
| 94 |
-} |
|
| 95 |
- |
|
| 96 |
-func (tr *TaskReaper) tick() {
|
|
| 97 |
- if len(tr.dirty) == 0 {
|
|
| 98 |
- return |
|
| 99 |
- } |
|
| 100 |
- |
|
| 101 |
- defer func() {
|
|
| 102 |
- tr.dirty = make(map[instanceTuple]struct{})
|
|
| 103 |
- }() |
|
| 104 |
- |
|
| 105 |
- var deleteTasks []string |
|
| 106 |
- |
|
| 107 |
- tr.store.View(func(tx store.ReadTx) {
|
|
| 108 |
- for dirty := range tr.dirty {
|
|
| 109 |
- service := store.GetService(tx, dirty.serviceID) |
|
| 110 |
- if service == nil {
|
|
| 111 |
- continue |
|
| 112 |
- } |
|
| 113 |
- |
|
| 114 |
- taskHistory := tr.taskHistory |
|
| 115 |
- |
|
| 116 |
- if taskHistory < 0 {
|
|
| 117 |
- continue |
|
| 118 |
- } |
|
| 119 |
- |
|
| 120 |
- var historicTasks []*api.Task |
|
| 121 |
- |
|
| 122 |
- switch service.Spec.GetMode().(type) {
|
|
| 123 |
- case *api.ServiceSpec_Replicated: |
|
| 124 |
- var err error |
|
| 125 |
- historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.serviceID, dirty.instance)) |
|
| 126 |
- if err != nil {
|
|
| 127 |
- continue |
|
| 128 |
- } |
|
| 129 |
- |
|
| 130 |
- case *api.ServiceSpec_Global: |
|
| 131 |
- tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.nodeID)) |
|
| 132 |
- if err != nil {
|
|
| 133 |
- continue |
|
| 134 |
- } |
|
| 135 |
- |
|
| 136 |
- for _, t := range tasksByNode {
|
|
| 137 |
- if t.ServiceID == dirty.serviceID {
|
|
| 138 |
- historicTasks = append(historicTasks, t) |
|
| 139 |
- } |
|
| 140 |
- } |
|
| 141 |
- } |
|
| 142 |
- |
|
| 143 |
- if int64(len(historicTasks)) <= taskHistory {
|
|
| 144 |
- continue |
|
| 145 |
- } |
|
| 146 |
- |
|
| 147 |
- // TODO(aaronl): This could filter for non-running tasks and use quickselect |
|
| 148 |
- // instead of sorting the whole slice. |
|
| 149 |
- sort.Sort(tasksByTimestamp(historicTasks)) |
|
| 150 |
- |
|
| 151 |
- for _, t := range historicTasks {
|
|
| 152 |
- if t.DesiredState <= api.TaskStateRunning {
|
|
| 153 |
- // Don't delete running tasks |
|
| 154 |
- continue |
|
| 155 |
- } |
|
| 156 |
- |
|
| 157 |
- deleteTasks = append(deleteTasks, t.ID) |
|
| 158 |
- |
|
| 159 |
- taskHistory++ |
|
| 160 |
- if int64(len(historicTasks)) <= taskHistory {
|
|
| 161 |
- break |
|
| 162 |
- } |
|
| 163 |
- } |
|
| 164 |
- |
|
| 165 |
- } |
|
| 166 |
- }) |
|
| 167 |
- |
|
| 168 |
- if len(deleteTasks) > 0 {
|
|
| 169 |
- tr.store.Batch(func(batch *store.Batch) error {
|
|
| 170 |
- for _, taskID := range deleteTasks {
|
|
| 171 |
- batch.Update(func(tx store.Tx) error {
|
|
| 172 |
- return store.DeleteTask(tx, taskID) |
|
| 173 |
- }) |
|
| 174 |
- } |
|
| 175 |
- return nil |
|
| 176 |
- }) |
|
| 177 |
- } |
|
| 178 |
-} |
|
| 179 |
- |
|
| 180 |
-// Stop stops the TaskReaper and waits for the main loop to exit. |
|
| 181 |
-func (tr *TaskReaper) Stop() {
|
|
| 182 |
- tr.cancelWatch() |
|
| 183 |
- close(tr.stopChan) |
|
| 184 |
- <-tr.doneChan |
|
| 185 |
-} |
|
| 186 |
- |
|
| 187 |
-type tasksByTimestamp []*api.Task |
|
| 188 |
- |
|
| 189 |
-func (t tasksByTimestamp) Len() int {
|
|
| 190 |
- return len(t) |
|
| 191 |
-} |
|
| 192 |
-func (t tasksByTimestamp) Swap(i, j int) {
|
|
| 193 |
- t[i], t[j] = t[j], t[i] |
|
| 194 |
-} |
|
| 195 |
-func (t tasksByTimestamp) Less(i, j int) bool {
|
|
| 196 |
- if t[i].Status.Timestamp == nil {
|
|
| 197 |
- return true |
|
| 198 |
- } |
|
| 199 |
- if t[j].Status.Timestamp == nil {
|
|
| 200 |
- return false |
|
| 201 |
- } |
|
| 202 |
- if t[i].Status.Timestamp.Seconds < t[j].Status.Timestamp.Seconds {
|
|
| 203 |
- return true |
|
| 204 |
- } |
|
| 205 |
- if t[i].Status.Timestamp.Seconds > t[j].Status.Timestamp.Seconds {
|
|
| 206 |
- return false |
|
| 207 |
- } |
|
| 208 |
- return t[i].Status.Timestamp.Nanos < t[j].Status.Timestamp.Nanos |
|
| 209 |
-} |
| 210 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,209 @@ |
| 0 |
+package taskreaper |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "sort" |
|
| 4 |
+ "time" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/go-events" |
|
| 7 |
+ "github.com/docker/swarmkit/api" |
|
| 8 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 9 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+const ( |
|
| 13 |
+ // maxDirty is the size threshold for running a task pruning operation. |
|
| 14 |
+ maxDirty = 1000 |
|
| 15 |
+ // reaperBatchingInterval is how often to prune old tasks. |
|
| 16 |
+ reaperBatchingInterval = 250 * time.Millisecond |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+type instanceTuple struct {
|
|
| 20 |
+ instance uint64 // unset for global tasks |
|
| 21 |
+ serviceID string |
|
| 22 |
+ nodeID string // unset for replicated tasks |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+// A TaskReaper deletes old tasks when more than TaskHistoryRetentionLimit tasks |
|
| 26 |
+// exist for the same service/instance or service/nodeid combination. |
|
| 27 |
+type TaskReaper struct {
|
|
| 28 |
+ store *store.MemoryStore |
|
| 29 |
+ // taskHistory is the number of tasks to keep |
|
| 30 |
+ taskHistory int64 |
|
| 31 |
+ dirty map[instanceTuple]struct{}
|
|
| 32 |
+ watcher chan events.Event |
|
| 33 |
+ cancelWatch func() |
|
| 34 |
+ stopChan chan struct{}
|
|
| 35 |
+ doneChan chan struct{}
|
|
| 36 |
+} |
|
| 37 |
+ |
|
| 38 |
+// New creates a new TaskReaper. |
|
| 39 |
+func New(store *store.MemoryStore) *TaskReaper {
|
|
| 40 |
+ watcher, cancel := state.Watch(store.WatchQueue(), state.EventCreateTask{}, state.EventUpdateCluster{})
|
|
| 41 |
+ |
|
| 42 |
+ return &TaskReaper{
|
|
| 43 |
+ store: store, |
|
| 44 |
+ watcher: watcher, |
|
| 45 |
+ cancelWatch: cancel, |
|
| 46 |
+ dirty: make(map[instanceTuple]struct{}),
|
|
| 47 |
+ stopChan: make(chan struct{}),
|
|
| 48 |
+ doneChan: make(chan struct{}),
|
|
| 49 |
+ } |
|
| 50 |
+} |
|
| 51 |
+ |
|
| 52 |
+// Run is the TaskReaper's main loop. |
|
| 53 |
+func (tr *TaskReaper) Run() {
|
|
| 54 |
+ defer close(tr.doneChan) |
|
| 55 |
+ |
|
| 56 |
+ tr.store.View(func(readTx store.ReadTx) {
|
|
| 57 |
+ clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) |
|
| 58 |
+ if err == nil && len(clusters) == 1 {
|
|
| 59 |
+ tr.taskHistory = clusters[0].Spec.Orchestration.TaskHistoryRetentionLimit |
|
| 60 |
+ } |
|
| 61 |
+ }) |
|
| 62 |
+ |
|
| 63 |
+ timer := time.NewTimer(reaperBatchingInterval) |
|
| 64 |
+ |
|
| 65 |
+ for {
|
|
| 66 |
+ select {
|
|
| 67 |
+ case event := <-tr.watcher: |
|
| 68 |
+ switch v := event.(type) {
|
|
| 69 |
+ case state.EventCreateTask: |
|
| 70 |
+ t := v.Task |
|
| 71 |
+ tr.dirty[instanceTuple{
|
|
| 72 |
+ instance: t.Slot, |
|
| 73 |
+ serviceID: t.ServiceID, |
|
| 74 |
+ nodeID: t.NodeID, |
|
| 75 |
+ }] = struct{}{}
|
|
| 76 |
+ if len(tr.dirty) > maxDirty {
|
|
| 77 |
+ timer.Stop() |
|
| 78 |
+ tr.tick() |
|
| 79 |
+ } else {
|
|
| 80 |
+ timer.Reset(reaperBatchingInterval) |
|
| 81 |
+ } |
|
| 82 |
+ case state.EventUpdateCluster: |
|
| 83 |
+ tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit |
|
| 84 |
+ } |
|
| 85 |
+ case <-timer.C: |
|
| 86 |
+ timer.Stop() |
|
| 87 |
+ tr.tick() |
|
| 88 |
+ case <-tr.stopChan: |
|
| 89 |
+ timer.Stop() |
|
| 90 |
+ return |
|
| 91 |
+ } |
|
| 92 |
+ } |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+func (tr *TaskReaper) tick() {
|
|
| 96 |
+ if len(tr.dirty) == 0 {
|
|
| 97 |
+ return |
|
| 98 |
+ } |
|
| 99 |
+ |
|
| 100 |
+ defer func() {
|
|
| 101 |
+ tr.dirty = make(map[instanceTuple]struct{})
|
|
| 102 |
+ }() |
|
| 103 |
+ |
|
| 104 |
+ var deleteTasks []string |
|
| 105 |
+ |
|
| 106 |
+ tr.store.View(func(tx store.ReadTx) {
|
|
| 107 |
+ for dirty := range tr.dirty {
|
|
| 108 |
+ service := store.GetService(tx, dirty.serviceID) |
|
| 109 |
+ if service == nil {
|
|
| 110 |
+ continue |
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 113 |
+ taskHistory := tr.taskHistory |
|
| 114 |
+ |
|
| 115 |
+ if taskHistory < 0 {
|
|
| 116 |
+ continue |
|
| 117 |
+ } |
|
| 118 |
+ |
|
| 119 |
+ var historicTasks []*api.Task |
|
| 120 |
+ |
|
| 121 |
+ switch service.Spec.GetMode().(type) {
|
|
| 122 |
+ case *api.ServiceSpec_Replicated: |
|
| 123 |
+ var err error |
|
| 124 |
+ historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.serviceID, dirty.instance)) |
|
| 125 |
+ if err != nil {
|
|
| 126 |
+ continue |
|
| 127 |
+ } |
|
| 128 |
+ |
|
| 129 |
+ case *api.ServiceSpec_Global: |
|
| 130 |
+ tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.nodeID)) |
|
| 131 |
+ if err != nil {
|
|
| 132 |
+ continue |
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 135 |
+ for _, t := range tasksByNode {
|
|
| 136 |
+ if t.ServiceID == dirty.serviceID {
|
|
| 137 |
+ historicTasks = append(historicTasks, t) |
|
| 138 |
+ } |
|
| 139 |
+ } |
|
| 140 |
+ } |
|
| 141 |
+ |
|
| 142 |
+ if int64(len(historicTasks)) <= taskHistory {
|
|
| 143 |
+ continue |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ // TODO(aaronl): This could filter for non-running tasks and use quickselect |
|
| 147 |
+ // instead of sorting the whole slice. |
|
| 148 |
+ sort.Sort(tasksByTimestamp(historicTasks)) |
|
| 149 |
+ |
|
| 150 |
+ for _, t := range historicTasks {
|
|
| 151 |
+ if t.DesiredState <= api.TaskStateRunning {
|
|
| 152 |
+ // Don't delete running tasks |
|
| 153 |
+ continue |
|
| 154 |
+ } |
|
| 155 |
+ |
|
| 156 |
+ deleteTasks = append(deleteTasks, t.ID) |
|
| 157 |
+ |
|
| 158 |
+ taskHistory++ |
|
| 159 |
+ if int64(len(historicTasks)) <= taskHistory {
|
|
| 160 |
+ break |
|
| 161 |
+ } |
|
| 162 |
+ } |
|
| 163 |
+ |
|
| 164 |
+ } |
|
| 165 |
+ }) |
|
| 166 |
+ |
|
| 167 |
+ if len(deleteTasks) > 0 {
|
|
| 168 |
+ tr.store.Batch(func(batch *store.Batch) error {
|
|
| 169 |
+ for _, taskID := range deleteTasks {
|
|
| 170 |
+ batch.Update(func(tx store.Tx) error {
|
|
| 171 |
+ return store.DeleteTask(tx, taskID) |
|
| 172 |
+ }) |
|
| 173 |
+ } |
|
| 174 |
+ return nil |
|
| 175 |
+ }) |
|
| 176 |
+ } |
|
| 177 |
+} |
|
| 178 |
+ |
|
| 179 |
+// Stop stops the TaskReaper and waits for the main loop to exit. |
|
| 180 |
+func (tr *TaskReaper) Stop() {
|
|
| 181 |
+ tr.cancelWatch() |
|
| 182 |
+ close(tr.stopChan) |
|
| 183 |
+ <-tr.doneChan |
|
| 184 |
+} |
|
| 185 |
+ |
|
| 186 |
+type tasksByTimestamp []*api.Task |
|
| 187 |
+ |
|
| 188 |
+func (t tasksByTimestamp) Len() int {
|
|
| 189 |
+ return len(t) |
|
| 190 |
+} |
|
| 191 |
+func (t tasksByTimestamp) Swap(i, j int) {
|
|
| 192 |
+ t[i], t[j] = t[j], t[i] |
|
| 193 |
+} |
|
| 194 |
+func (t tasksByTimestamp) Less(i, j int) bool {
|
|
| 195 |
+ if t[i].Status.Timestamp == nil {
|
|
| 196 |
+ return true |
|
| 197 |
+ } |
|
| 198 |
+ if t[j].Status.Timestamp == nil {
|
|
| 199 |
+ return false |
|
| 200 |
+ } |
|
| 201 |
+ if t[i].Status.Timestamp.Seconds < t[j].Status.Timestamp.Seconds {
|
|
| 202 |
+ return true |
|
| 203 |
+ } |
|
| 204 |
+ if t[i].Status.Timestamp.Seconds > t[j].Status.Timestamp.Seconds {
|
|
| 205 |
+ return false |
|
| 206 |
+ } |
|
| 207 |
+ return t[i].Status.Timestamp.Nanos < t[j].Status.Timestamp.Nanos |
|
| 208 |
+} |
| 0 | 209 |
deleted file mode 100644 |
| ... | ... |
@@ -1,233 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "time" |
|
| 5 |
- |
|
| 6 |
- "github.com/docker/go-events" |
|
| 7 |
- "github.com/docker/swarmkit/api" |
|
| 8 |
- "github.com/docker/swarmkit/log" |
|
| 9 |
- "github.com/docker/swarmkit/manager/state" |
|
| 10 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 11 |
- "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 12 |
- "golang.org/x/net/context" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-// This file provides task-level orchestration. It observes changes to task |
|
| 16 |
-// and node state and kills/recreates tasks if necessary. This is distinct from |
|
| 17 |
-// service-level reconcillation, which observes changes to services and creates |
|
| 18 |
-// and/or kills tasks to match the service definition. |
|
| 19 |
- |
|
| 20 |
-func invalidNode(n *api.Node) bool {
|
|
| 21 |
- return n == nil || |
|
| 22 |
- n.Status.State == api.NodeStatus_DOWN || |
|
| 23 |
- n.Spec.Availability == api.NodeAvailabilityDrain |
|
| 24 |
-} |
|
| 25 |
- |
|
| 26 |
-func (r *ReplicatedOrchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error {
|
|
| 27 |
- tasks, err := store.FindTasks(readTx, store.All) |
|
| 28 |
- if err != nil {
|
|
| 29 |
- return err |
|
| 30 |
- } |
|
| 31 |
- for _, t := range tasks {
|
|
| 32 |
- if t.NodeID != "" {
|
|
| 33 |
- n := store.GetNode(readTx, t.NodeID) |
|
| 34 |
- if invalidNode(n) && t.Status.State <= api.TaskStateRunning && t.DesiredState <= api.TaskStateRunning {
|
|
| 35 |
- r.restartTasks[t.ID] = struct{}{}
|
|
| 36 |
- } |
|
| 37 |
- } |
|
| 38 |
- } |
|
| 39 |
- |
|
| 40 |
- _, err = r.store.Batch(func(batch *store.Batch) error {
|
|
| 41 |
- for _, t := range tasks {
|
|
| 42 |
- if t.ServiceID == "" {
|
|
| 43 |
- continue |
|
| 44 |
- } |
|
| 45 |
- |
|
| 46 |
- // TODO(aluzzardi): We should NOT retrieve the service here. |
|
| 47 |
- service := store.GetService(readTx, t.ServiceID) |
|
| 48 |
- if service == nil {
|
|
| 49 |
- // Service was deleted |
|
| 50 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 51 |
- return store.DeleteTask(tx, t.ID) |
|
| 52 |
- }) |
|
| 53 |
- if err != nil {
|
|
| 54 |
- log.G(ctx).WithError(err).Error("failed to set task desired state to dead")
|
|
| 55 |
- } |
|
| 56 |
- continue |
|
| 57 |
- } |
|
| 58 |
- // TODO(aluzzardi): This is shady. We should have a more generic condition. |
|
| 59 |
- if t.DesiredState != api.TaskStateReady || !isReplicatedService(service) {
|
|
| 60 |
- continue |
|
| 61 |
- } |
|
| 62 |
- restartDelay := defaultRestartDelay |
|
| 63 |
- if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
|
|
| 64 |
- var err error |
|
| 65 |
- restartDelay, err = ptypes.Duration(t.Spec.Restart.Delay) |
|
| 66 |
- if err != nil {
|
|
| 67 |
- log.G(ctx).WithError(err).Error("invalid restart delay")
|
|
| 68 |
- restartDelay = defaultRestartDelay |
|
| 69 |
- } |
|
| 70 |
- } |
|
| 71 |
- if restartDelay != 0 {
|
|
| 72 |
- timestamp, err := ptypes.Timestamp(t.Status.Timestamp) |
|
| 73 |
- if err == nil {
|
|
| 74 |
- restartTime := timestamp.Add(restartDelay) |
|
| 75 |
- calculatedRestartDelay := restartTime.Sub(time.Now()) |
|
| 76 |
- if calculatedRestartDelay < restartDelay {
|
|
| 77 |
- restartDelay = calculatedRestartDelay |
|
| 78 |
- } |
|
| 79 |
- if restartDelay > 0 {
|
|
| 80 |
- _ = batch.Update(func(tx store.Tx) error {
|
|
| 81 |
- t := store.GetTask(tx, t.ID) |
|
| 82 |
- // TODO(aluzzardi): This is shady as well. We should have a more generic condition. |
|
| 83 |
- if t == nil || t.DesiredState != api.TaskStateReady {
|
|
| 84 |
- return nil |
|
| 85 |
- } |
|
| 86 |
- r.restarts.DelayStart(ctx, tx, nil, t.ID, restartDelay, true) |
|
| 87 |
- return nil |
|
| 88 |
- }) |
|
| 89 |
- continue |
|
| 90 |
- } |
|
| 91 |
- } else {
|
|
| 92 |
- log.G(ctx).WithError(err).Error("invalid status timestamp")
|
|
| 93 |
- } |
|
| 94 |
- } |
|
| 95 |
- |
|
| 96 |
- // Start now |
|
| 97 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 98 |
- return r.restarts.StartNow(tx, t.ID) |
|
| 99 |
- }) |
|
| 100 |
- if err != nil {
|
|
| 101 |
- log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("moving task out of delayed state failed")
|
|
| 102 |
- } |
|
| 103 |
- } |
|
| 104 |
- return nil |
|
| 105 |
- }) |
|
| 106 |
- |
|
| 107 |
- return err |
|
| 108 |
-} |
|
| 109 |
- |
|
| 110 |
-func (r *ReplicatedOrchestrator) handleTaskEvent(ctx context.Context, event events.Event) {
|
|
| 111 |
- switch v := event.(type) {
|
|
| 112 |
- case state.EventDeleteNode: |
|
| 113 |
- r.restartTasksByNodeID(ctx, v.Node.ID) |
|
| 114 |
- case state.EventCreateNode: |
|
| 115 |
- r.handleNodeChange(ctx, v.Node) |
|
| 116 |
- case state.EventUpdateNode: |
|
| 117 |
- r.handleNodeChange(ctx, v.Node) |
|
| 118 |
- case state.EventDeleteTask: |
|
| 119 |
- if v.Task.DesiredState <= api.TaskStateRunning {
|
|
| 120 |
- service := r.resolveService(ctx, v.Task) |
|
| 121 |
- if !isReplicatedService(service) {
|
|
| 122 |
- return |
|
| 123 |
- } |
|
| 124 |
- r.reconcileServices[service.ID] = service |
|
| 125 |
- } |
|
| 126 |
- r.restarts.Cancel(v.Task.ID) |
|
| 127 |
- case state.EventUpdateTask: |
|
| 128 |
- r.handleTaskChange(ctx, v.Task) |
|
| 129 |
- case state.EventCreateTask: |
|
| 130 |
- r.handleTaskChange(ctx, v.Task) |
|
| 131 |
- } |
|
| 132 |
-} |
|
| 133 |
- |
|
| 134 |
-func (r *ReplicatedOrchestrator) tickTasks(ctx context.Context) {
|
|
| 135 |
- if len(r.restartTasks) > 0 {
|
|
| 136 |
- _, err := r.store.Batch(func(batch *store.Batch) error {
|
|
| 137 |
- for taskID := range r.restartTasks {
|
|
| 138 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 139 |
- // TODO(aaronl): optimistic update? |
|
| 140 |
- t := store.GetTask(tx, taskID) |
|
| 141 |
- if t != nil {
|
|
| 142 |
- if t.DesiredState > api.TaskStateRunning {
|
|
| 143 |
- return nil |
|
| 144 |
- } |
|
| 145 |
- |
|
| 146 |
- service := store.GetService(tx, t.ServiceID) |
|
| 147 |
- if !isReplicatedService(service) {
|
|
| 148 |
- return nil |
|
| 149 |
- } |
|
| 150 |
- |
|
| 151 |
- // Restart task if applicable |
|
| 152 |
- if err := r.restarts.Restart(ctx, tx, r.cluster, service, *t); err != nil {
|
|
| 153 |
- return err |
|
| 154 |
- } |
|
| 155 |
- } |
|
| 156 |
- return nil |
|
| 157 |
- }) |
|
| 158 |
- if err != nil {
|
|
| 159 |
- log.G(ctx).WithError(err).Errorf("ReplicatedOrchestrator task reaping transaction failed")
|
|
| 160 |
- } |
|
| 161 |
- } |
|
| 162 |
- return nil |
|
| 163 |
- }) |
|
| 164 |
- |
|
| 165 |
- if err != nil {
|
|
| 166 |
- log.G(ctx).WithError(err).Errorf("orchestrator task removal batch failed")
|
|
| 167 |
- } |
|
| 168 |
- |
|
| 169 |
- r.restartTasks = make(map[string]struct{})
|
|
| 170 |
- } |
|
| 171 |
-} |
|
| 172 |
- |
|
| 173 |
-func (r *ReplicatedOrchestrator) restartTasksByNodeID(ctx context.Context, nodeID string) {
|
|
| 174 |
- var err error |
|
| 175 |
- r.store.View(func(tx store.ReadTx) {
|
|
| 176 |
- var tasks []*api.Task |
|
| 177 |
- tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID)) |
|
| 178 |
- if err != nil {
|
|
| 179 |
- return |
|
| 180 |
- } |
|
| 181 |
- |
|
| 182 |
- for _, t := range tasks {
|
|
| 183 |
- if t.DesiredState > api.TaskStateRunning {
|
|
| 184 |
- continue |
|
| 185 |
- } |
|
| 186 |
- service := store.GetService(tx, t.ServiceID) |
|
| 187 |
- if isReplicatedService(service) {
|
|
| 188 |
- r.restartTasks[t.ID] = struct{}{}
|
|
| 189 |
- } |
|
| 190 |
- } |
|
| 191 |
- }) |
|
| 192 |
- if err != nil {
|
|
| 193 |
- log.G(ctx).WithError(err).Errorf("failed to list tasks to remove")
|
|
| 194 |
- } |
|
| 195 |
-} |
|
| 196 |
- |
|
| 197 |
-func (r *ReplicatedOrchestrator) handleNodeChange(ctx context.Context, n *api.Node) {
|
|
| 198 |
- if !invalidNode(n) {
|
|
| 199 |
- return |
|
| 200 |
- } |
|
| 201 |
- |
|
| 202 |
- r.restartTasksByNodeID(ctx, n.ID) |
|
| 203 |
-} |
|
| 204 |
- |
|
| 205 |
-func (r *ReplicatedOrchestrator) handleTaskChange(ctx context.Context, t *api.Task) {
|
|
| 206 |
- // If we already set the desired state past TaskStateRunning, there is no |
|
| 207 |
- // further action necessary. |
|
| 208 |
- if t.DesiredState > api.TaskStateRunning {
|
|
| 209 |
- return |
|
| 210 |
- } |
|
| 211 |
- |
|
| 212 |
- var ( |
|
| 213 |
- n *api.Node |
|
| 214 |
- service *api.Service |
|
| 215 |
- ) |
|
| 216 |
- r.store.View(func(tx store.ReadTx) {
|
|
| 217 |
- if t.NodeID != "" {
|
|
| 218 |
- n = store.GetNode(tx, t.NodeID) |
|
| 219 |
- } |
|
| 220 |
- if t.ServiceID != "" {
|
|
| 221 |
- service = store.GetService(tx, t.ServiceID) |
|
| 222 |
- } |
|
| 223 |
- }) |
|
| 224 |
- |
|
| 225 |
- if !isReplicatedService(service) {
|
|
| 226 |
- return |
|
| 227 |
- } |
|
| 228 |
- |
|
| 229 |
- if t.Status.State > api.TaskStateRunning || |
|
| 230 |
- (t.NodeID != "" && invalidNode(n)) {
|
|
| 231 |
- r.restartTasks[t.ID] = struct{}{}
|
|
| 232 |
- } |
|
| 233 |
-} |
| 234 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,613 @@ |
| 0 |
+package update |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "errors" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "reflect" |
|
| 6 |
+ "sync" |
|
| 7 |
+ "time" |
|
| 8 |
+ |
|
| 9 |
+ "golang.org/x/net/context" |
|
| 10 |
+ |
|
| 11 |
+ "github.com/docker/go-events" |
|
| 12 |
+ "github.com/docker/swarmkit/api" |
|
| 13 |
+ "github.com/docker/swarmkit/log" |
|
| 14 |
+ "github.com/docker/swarmkit/manager/orchestrator" |
|
| 15 |
+ "github.com/docker/swarmkit/manager/orchestrator/restart" |
|
| 16 |
+ "github.com/docker/swarmkit/manager/state" |
|
| 17 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 18 |
+ "github.com/docker/swarmkit/manager/state/watch" |
|
| 19 |
+ "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 20 |
+) |
|
| 21 |
+ |
|
| 22 |
+const defaultMonitor = 30 * time.Second |
|
| 23 |
+ |
|
| 24 |
+// Supervisor supervises a set of updates. It's responsible for keeping track of updates, |
|
| 25 |
+// shutting them down and replacing them. |
|
| 26 |
+type Supervisor struct {
|
|
| 27 |
+ store *store.MemoryStore |
|
| 28 |
+ restarts *restart.Supervisor |
|
| 29 |
+ updates map[string]*Updater |
|
| 30 |
+ l sync.Mutex |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// NewSupervisor creates a new UpdateSupervisor. |
|
| 34 |
+func NewSupervisor(store *store.MemoryStore, restartSupervisor *restart.Supervisor) *Supervisor {
|
|
| 35 |
+ return &Supervisor{
|
|
| 36 |
+ store: store, |
|
| 37 |
+ updates: make(map[string]*Updater), |
|
| 38 |
+ restarts: restartSupervisor, |
|
| 39 |
+ } |
|
| 40 |
+} |
|
| 41 |
+ |
|
| 42 |
+// Update starts an Update of `slots` belonging to `service` in the background |
|
| 43 |
+// and returns immediately. Each slot contains a group of one or more tasks |
|
| 44 |
+// occupying the same slot (replicated service) or node (global service). There |
|
| 45 |
+// may be more than one task per slot in cases where an update is in progress |
|
| 46 |
+// and the new task was started before the old one was shut down. If an update |
|
| 47 |
+// for that service was already in progress, it will be cancelled before the |
|
| 48 |
+// new one starts. |
|
| 49 |
+func (u *Supervisor) Update(ctx context.Context, cluster *api.Cluster, service *api.Service, slots []orchestrator.Slot) {
|
|
| 50 |
+ u.l.Lock() |
|
| 51 |
+ defer u.l.Unlock() |
|
| 52 |
+ |
|
| 53 |
+ id := service.ID |
|
| 54 |
+ |
|
| 55 |
+ if update, ok := u.updates[id]; ok {
|
|
| 56 |
+ if reflect.DeepEqual(service.Spec, update.newService.Spec) {
|
|
| 57 |
+ // There's already an update working towards this goal. |
|
| 58 |
+ return |
|
| 59 |
+ } |
|
| 60 |
+ update.Cancel() |
|
| 61 |
+ } |
|
| 62 |
+ |
|
| 63 |
+ update := NewUpdater(u.store, u.restarts, cluster, service) |
|
| 64 |
+ u.updates[id] = update |
|
| 65 |
+ go func() {
|
|
| 66 |
+ update.Run(ctx, slots) |
|
| 67 |
+ u.l.Lock() |
|
| 68 |
+ if u.updates[id] == update {
|
|
| 69 |
+ delete(u.updates, id) |
|
| 70 |
+ } |
|
| 71 |
+ u.l.Unlock() |
|
| 72 |
+ }() |
|
| 73 |
+} |
|
| 74 |
+ |
|
| 75 |
+// CancelAll cancels all current updates. |
|
| 76 |
+func (u *Supervisor) CancelAll() {
|
|
| 77 |
+ u.l.Lock() |
|
| 78 |
+ defer u.l.Unlock() |
|
| 79 |
+ |
|
| 80 |
+ for _, update := range u.updates {
|
|
| 81 |
+ update.Cancel() |
|
| 82 |
+ } |
|
| 83 |
+} |
|
| 84 |
+ |
|
| 85 |
+// Updater updates a set of tasks to a new version. |
|
| 86 |
+type Updater struct {
|
|
| 87 |
+ store *store.MemoryStore |
|
| 88 |
+ watchQueue *watch.Queue |
|
| 89 |
+ restarts *restart.Supervisor |
|
| 90 |
+ |
|
| 91 |
+ cluster *api.Cluster |
|
| 92 |
+ newService *api.Service |
|
| 93 |
+ |
|
| 94 |
+ updatedTasks map[string]time.Time // task ID to creation time |
|
| 95 |
+ updatedTasksMu sync.Mutex |
|
| 96 |
+ |
|
| 97 |
+ // stopChan signals to the state machine to stop running. |
|
| 98 |
+ stopChan chan struct{}
|
|
| 99 |
+ // doneChan is closed when the state machine terminates. |
|
| 100 |
+ doneChan chan struct{}
|
|
| 101 |
+} |
|
| 102 |
+ |
|
| 103 |
+// NewUpdater creates a new Updater. |
|
| 104 |
+func NewUpdater(store *store.MemoryStore, restartSupervisor *restart.Supervisor, cluster *api.Cluster, newService *api.Service) *Updater {
|
|
| 105 |
+ return &Updater{
|
|
| 106 |
+ store: store, |
|
| 107 |
+ watchQueue: store.WatchQueue(), |
|
| 108 |
+ restarts: restartSupervisor, |
|
| 109 |
+ cluster: cluster.Copy(), |
|
| 110 |
+ newService: newService.Copy(), |
|
| 111 |
+ updatedTasks: make(map[string]time.Time), |
|
| 112 |
+ stopChan: make(chan struct{}),
|
|
| 113 |
+ doneChan: make(chan struct{}),
|
|
| 114 |
+ } |
|
| 115 |
+} |
|
| 116 |
+ |
|
| 117 |
+// Cancel cancels the current update immediately. It blocks until the cancellation is confirmed. |
|
| 118 |
+func (u *Updater) Cancel() {
|
|
| 119 |
+ close(u.stopChan) |
|
| 120 |
+ <-u.doneChan |
|
| 121 |
+} |
|
| 122 |
+ |
|
| 123 |
+// Run starts the update and returns only once its complete or cancelled. |
|
| 124 |
+func (u *Updater) Run(ctx context.Context, slots []orchestrator.Slot) {
|
|
| 125 |
+ defer close(u.doneChan) |
|
| 126 |
+ |
|
| 127 |
+ service := u.newService |
|
| 128 |
+ |
|
| 129 |
+ // If the update is in a PAUSED state, we should not do anything. |
|
| 130 |
+ if service.UpdateStatus != nil && |
|
| 131 |
+ (service.UpdateStatus.State == api.UpdateStatus_PAUSED || |
|
| 132 |
+ service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_PAUSED) {
|
|
| 133 |
+ return |
|
| 134 |
+ } |
|
| 135 |
+ |
|
| 136 |
+ var dirtySlots []orchestrator.Slot |
|
| 137 |
+ for _, slot := range slots {
|
|
| 138 |
+ if u.isSlotDirty(slot) {
|
|
| 139 |
+ dirtySlots = append(dirtySlots, slot) |
|
| 140 |
+ } |
|
| 141 |
+ } |
|
| 142 |
+ // Abort immediately if all tasks are clean. |
|
| 143 |
+ if len(dirtySlots) == 0 {
|
|
| 144 |
+ if service.UpdateStatus != nil && |
|
| 145 |
+ (service.UpdateStatus.State == api.UpdateStatus_UPDATING || |
|
| 146 |
+ service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED) {
|
|
| 147 |
+ u.completeUpdate(ctx, service.ID) |
|
| 148 |
+ } |
|
| 149 |
+ return |
|
| 150 |
+ } |
|
| 151 |
+ |
|
| 152 |
+ // If there's no update in progress, we are starting one. |
|
| 153 |
+ if service.UpdateStatus == nil {
|
|
| 154 |
+ u.startUpdate(ctx, service.ID) |
|
| 155 |
+ } |
|
| 156 |
+ |
|
| 157 |
+ parallelism := 0 |
|
| 158 |
+ if service.Spec.Update != nil {
|
|
| 159 |
+ parallelism = int(service.Spec.Update.Parallelism) |
|
| 160 |
+ } |
|
| 161 |
+ if parallelism == 0 {
|
|
| 162 |
+ // TODO(aluzzardi): We could try to optimize unlimited parallelism by performing updates in a single |
|
| 163 |
+ // goroutine using a batch transaction. |
|
| 164 |
+ parallelism = len(dirtySlots) |
|
| 165 |
+ } |
|
| 166 |
+ |
|
| 167 |
+ // Start the workers. |
|
| 168 |
+ slotQueue := make(chan orchestrator.Slot) |
|
| 169 |
+ wg := sync.WaitGroup{}
|
|
| 170 |
+ wg.Add(parallelism) |
|
| 171 |
+ for i := 0; i < parallelism; i++ {
|
|
| 172 |
+ go func() {
|
|
| 173 |
+ u.worker(ctx, slotQueue) |
|
| 174 |
+ wg.Done() |
|
| 175 |
+ }() |
|
| 176 |
+ } |
|
| 177 |
+ |
|
| 178 |
+ failureAction := api.UpdateConfig_PAUSE |
|
| 179 |
+ allowedFailureFraction := float32(0) |
|
| 180 |
+ monitoringPeriod := defaultMonitor |
|
| 181 |
+ |
|
| 182 |
+ if service.Spec.Update != nil {
|
|
| 183 |
+ failureAction = service.Spec.Update.FailureAction |
|
| 184 |
+ allowedFailureFraction = service.Spec.Update.MaxFailureRatio |
|
| 185 |
+ |
|
| 186 |
+ if service.Spec.Update.Monitor != nil {
|
|
| 187 |
+ var err error |
|
| 188 |
+ monitoringPeriod, err = ptypes.Duration(service.Spec.Update.Monitor) |
|
| 189 |
+ if err != nil {
|
|
| 190 |
+ monitoringPeriod = defaultMonitor |
|
| 191 |
+ } |
|
| 192 |
+ } |
|
| 193 |
+ } |
|
| 194 |
+ |
|
| 195 |
+ var failedTaskWatch chan events.Event |
|
| 196 |
+ |
|
| 197 |
+ if failureAction != api.UpdateConfig_CONTINUE {
|
|
| 198 |
+ var cancelWatch func() |
|
| 199 |
+ failedTaskWatch, cancelWatch = state.Watch( |
|
| 200 |
+ u.store.WatchQueue(), |
|
| 201 |
+ state.EventUpdateTask{
|
|
| 202 |
+ Task: &api.Task{ServiceID: service.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
|
|
| 203 |
+ Checks: []state.TaskCheckFunc{state.TaskCheckServiceID, state.TaskCheckStateGreaterThan},
|
|
| 204 |
+ }, |
|
| 205 |
+ ) |
|
| 206 |
+ defer cancelWatch() |
|
| 207 |
+ } |
|
| 208 |
+ |
|
| 209 |
+ stopped := false |
|
| 210 |
+ failedTasks := make(map[string]struct{})
|
|
| 211 |
+ totalFailures := 0 |
|
| 212 |
+ |
|
| 213 |
+ failureTriggersAction := func(failedTask *api.Task) bool {
|
|
| 214 |
+ // Ignore tasks we have already seen as failures. |
|
| 215 |
+ if _, found := failedTasks[failedTask.ID]; found {
|
|
| 216 |
+ return false |
|
| 217 |
+ } |
|
| 218 |
+ |
|
| 219 |
+ // If this failed/completed task is one that we |
|
| 220 |
+ // created as part of this update, we should |
|
| 221 |
+ // follow the failure action. |
|
| 222 |
+ u.updatedTasksMu.Lock() |
|
| 223 |
+ startedAt, found := u.updatedTasks[failedTask.ID] |
|
| 224 |
+ u.updatedTasksMu.Unlock() |
|
| 225 |
+ |
|
| 226 |
+ if found && (startedAt.IsZero() || time.Since(startedAt) <= monitoringPeriod) {
|
|
| 227 |
+ failedTasks[failedTask.ID] = struct{}{}
|
|
| 228 |
+ totalFailures++ |
|
| 229 |
+ if float32(totalFailures)/float32(len(dirtySlots)) > allowedFailureFraction {
|
|
| 230 |
+ switch failureAction {
|
|
| 231 |
+ case api.UpdateConfig_PAUSE: |
|
| 232 |
+ stopped = true |
|
| 233 |
+ message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID)
|
|
| 234 |
+ u.pauseUpdate(ctx, service.ID, message) |
|
| 235 |
+ return true |
|
| 236 |
+ case api.UpdateConfig_ROLLBACK: |
|
| 237 |
+ // Never roll back a rollback |
|
| 238 |
+ if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
|
|
| 239 |
+ message := fmt.Sprintf("rollback paused due to failure or early termination of task %s", failedTask.ID)
|
|
| 240 |
+ u.pauseUpdate(ctx, service.ID, message) |
|
| 241 |
+ return true |
|
| 242 |
+ } |
|
| 243 |
+ stopped = true |
|
| 244 |
+ message := fmt.Sprintf("update rolled back due to failure or early termination of task %s", failedTask.ID)
|
|
| 245 |
+ u.rollbackUpdate(ctx, service.ID, message) |
|
| 246 |
+ return true |
|
| 247 |
+ } |
|
| 248 |
+ } |
|
| 249 |
+ } |
|
| 250 |
+ |
|
| 251 |
+ return false |
|
| 252 |
+ } |
|
| 253 |
+ |
|
| 254 |
+slotsLoop: |
|
| 255 |
+ for _, slot := range dirtySlots {
|
|
| 256 |
+ retryLoop: |
|
| 257 |
+ for {
|
|
| 258 |
+ // Wait for a worker to pick up the task or abort the update, whichever comes first. |
|
| 259 |
+ select {
|
|
| 260 |
+ case <-u.stopChan: |
|
| 261 |
+ stopped = true |
|
| 262 |
+ break slotsLoop |
|
| 263 |
+ case ev := <-failedTaskWatch: |
|
| 264 |
+ if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
|
|
| 265 |
+ break slotsLoop |
|
| 266 |
+ } |
|
| 267 |
+ case slotQueue <- slot: |
|
| 268 |
+ break retryLoop |
|
| 269 |
+ } |
|
| 270 |
+ } |
|
| 271 |
+ } |
|
| 272 |
+ |
|
| 273 |
+ close(slotQueue) |
|
| 274 |
+ wg.Wait() |
|
| 275 |
+ |
|
| 276 |
+ if !stopped {
|
|
| 277 |
+ // Keep watching for task failures for one more monitoringPeriod, |
|
| 278 |
+ // before declaring the update complete. |
|
| 279 |
+ doneMonitoring := time.After(monitoringPeriod) |
|
| 280 |
+ monitorLoop: |
|
| 281 |
+ for {
|
|
| 282 |
+ select {
|
|
| 283 |
+ case <-u.stopChan: |
|
| 284 |
+ stopped = true |
|
| 285 |
+ break monitorLoop |
|
| 286 |
+ case <-doneMonitoring: |
|
| 287 |
+ break monitorLoop |
|
| 288 |
+ case ev := <-failedTaskWatch: |
|
| 289 |
+ if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
|
|
| 290 |
+ break monitorLoop |
|
| 291 |
+ } |
|
| 292 |
+ } |
|
| 293 |
+ } |
|
| 294 |
+ } |
|
| 295 |
+ |
|
| 296 |
+ // TODO(aaronl): Potentially roll back the service if not enough tasks |
|
| 297 |
+ // have reached RUNNING by this point. |
|
| 298 |
+ |
|
| 299 |
+ if !stopped {
|
|
| 300 |
+ u.completeUpdate(ctx, service.ID) |
|
| 301 |
+ } |
|
| 302 |
+} |
|
| 303 |
+ |
|
| 304 |
+func (u *Updater) worker(ctx context.Context, queue <-chan orchestrator.Slot) {
|
|
| 305 |
+ for slot := range queue {
|
|
| 306 |
+ // Do we have a task with the new spec in desired state = RUNNING? |
|
| 307 |
+ // If so, all we have to do to complete the update is remove the |
|
| 308 |
+ // other tasks. Or if we have a task with the new spec that has |
|
| 309 |
+ // desired state < RUNNING, advance it to running and remove the |
|
| 310 |
+ // other tasks. |
|
| 311 |
+ var ( |
|
| 312 |
+ runningTask *api.Task |
|
| 313 |
+ cleanTask *api.Task |
|
| 314 |
+ ) |
|
| 315 |
+ for _, t := range slot {
|
|
| 316 |
+ if !u.isTaskDirty(t) {
|
|
| 317 |
+ if t.DesiredState == api.TaskStateRunning {
|
|
| 318 |
+ runningTask = t |
|
| 319 |
+ break |
|
| 320 |
+ } |
|
| 321 |
+ if t.DesiredState < api.TaskStateRunning {
|
|
| 322 |
+ cleanTask = t |
|
| 323 |
+ } |
|
| 324 |
+ } |
|
| 325 |
+ } |
|
| 326 |
+ if runningTask != nil {
|
|
| 327 |
+ if err := u.useExistingTask(ctx, slot, runningTask); err != nil {
|
|
| 328 |
+ log.G(ctx).WithError(err).Error("update failed")
|
|
| 329 |
+ } |
|
| 330 |
+ } else if cleanTask != nil {
|
|
| 331 |
+ if err := u.useExistingTask(ctx, slot, cleanTask); err != nil {
|
|
| 332 |
+ log.G(ctx).WithError(err).Error("update failed")
|
|
| 333 |
+ } |
|
| 334 |
+ } else {
|
|
| 335 |
+ updated := orchestrator.NewTask(u.cluster, u.newService, slot[0].Slot, "") |
|
| 336 |
+ if orchestrator.IsGlobalService(u.newService) {
|
|
| 337 |
+ updated = orchestrator.NewTask(u.cluster, u.newService, slot[0].Slot, slot[0].NodeID) |
|
| 338 |
+ } |
|
| 339 |
+ updated.DesiredState = api.TaskStateReady |
|
| 340 |
+ |
|
| 341 |
+ if err := u.updateTask(ctx, slot, updated); err != nil {
|
|
| 342 |
+ log.G(ctx).WithError(err).WithField("task.id", updated.ID).Error("update failed")
|
|
| 343 |
+ } |
|
| 344 |
+ } |
|
| 345 |
+ |
|
| 346 |
+ if u.newService.Spec.Update != nil && (u.newService.Spec.Update.Delay.Seconds != 0 || u.newService.Spec.Update.Delay.Nanos != 0) {
|
|
| 347 |
+ delay, err := ptypes.Duration(&u.newService.Spec.Update.Delay) |
|
| 348 |
+ if err != nil {
|
|
| 349 |
+ log.G(ctx).WithError(err).Error("invalid update delay")
|
|
| 350 |
+ continue |
|
| 351 |
+ } |
|
| 352 |
+ select {
|
|
| 353 |
+ case <-time.After(delay): |
|
| 354 |
+ case <-u.stopChan: |
|
| 355 |
+ return |
|
| 356 |
+ } |
|
| 357 |
+ } |
|
| 358 |
+ } |
|
| 359 |
+} |
|
| 360 |
+ |
|
| 361 |
+func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, updated *api.Task) error {
|
|
| 362 |
+ // Kick off the watch before even creating the updated task. This is in order to avoid missing any event. |
|
| 363 |
+ taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
|
|
| 364 |
+ Task: &api.Task{ID: updated.ID},
|
|
| 365 |
+ Checks: []state.TaskCheckFunc{state.TaskCheckID},
|
|
| 366 |
+ }) |
|
| 367 |
+ defer cancel() |
|
| 368 |
+ |
|
| 369 |
+ // Create an empty entry for this task, so the updater knows a failure |
|
| 370 |
+ // should count towards the failure count. The timestamp is added |
|
| 371 |
+ // if/when the task reaches RUNNING. |
|
| 372 |
+ u.updatedTasksMu.Lock() |
|
| 373 |
+ u.updatedTasks[updated.ID] = time.Time{}
|
|
| 374 |
+ u.updatedTasksMu.Unlock() |
|
| 375 |
+ |
|
| 376 |
+ var delayStartCh <-chan struct{}
|
|
| 377 |
+ // Atomically create the updated task and bring down the old one. |
|
| 378 |
+ _, err := u.store.Batch(func(batch *store.Batch) error {
|
|
| 379 |
+ oldTask, err := u.removeOldTasks(ctx, batch, slot) |
|
| 380 |
+ if err != nil {
|
|
| 381 |
+ return err |
|
| 382 |
+ } |
|
| 383 |
+ |
|
| 384 |
+ err = batch.Update(func(tx store.Tx) error {
|
|
| 385 |
+ if err := store.CreateTask(tx, updated); err != nil {
|
|
| 386 |
+ return err |
|
| 387 |
+ } |
|
| 388 |
+ return nil |
|
| 389 |
+ }) |
|
| 390 |
+ if err != nil {
|
|
| 391 |
+ return err |
|
| 392 |
+ } |
|
| 393 |
+ |
|
| 394 |
+ delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, updated.ID, 0, true) |
|
| 395 |
+ |
|
| 396 |
+ return nil |
|
| 397 |
+ |
|
| 398 |
+ }) |
|
| 399 |
+ if err != nil {
|
|
| 400 |
+ return err |
|
| 401 |
+ } |
|
| 402 |
+ |
|
| 403 |
+ if delayStartCh != nil {
|
|
| 404 |
+ <-delayStartCh |
|
| 405 |
+ } |
|
| 406 |
+ |
|
| 407 |
+ // Wait for the new task to come up. |
|
| 408 |
+ // TODO(aluzzardi): Consider adding a timeout here. |
|
| 409 |
+ for {
|
|
| 410 |
+ select {
|
|
| 411 |
+ case e := <-taskUpdates: |
|
| 412 |
+ updated = e.(state.EventUpdateTask).Task |
|
| 413 |
+ if updated.Status.State >= api.TaskStateRunning {
|
|
| 414 |
+ u.updatedTasksMu.Lock() |
|
| 415 |
+ u.updatedTasks[updated.ID] = time.Now() |
|
| 416 |
+ u.updatedTasksMu.Unlock() |
|
| 417 |
+ return nil |
|
| 418 |
+ } |
|
| 419 |
+ case <-u.stopChan: |
|
| 420 |
+ return nil |
|
| 421 |
+ } |
|
| 422 |
+ } |
|
| 423 |
+} |
|
| 424 |
+ |
|
| 425 |
+func (u *Updater) useExistingTask(ctx context.Context, slot orchestrator.Slot, existing *api.Task) error {
|
|
| 426 |
+ var removeTasks []*api.Task |
|
| 427 |
+ for _, t := range slot {
|
|
| 428 |
+ if t != existing {
|
|
| 429 |
+ removeTasks = append(removeTasks, t) |
|
| 430 |
+ } |
|
| 431 |
+ } |
|
| 432 |
+ if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning {
|
|
| 433 |
+ var delayStartCh <-chan struct{}
|
|
| 434 |
+ _, err := u.store.Batch(func(batch *store.Batch) error {
|
|
| 435 |
+ var oldTask *api.Task |
|
| 436 |
+ if len(removeTasks) != 0 {
|
|
| 437 |
+ var err error |
|
| 438 |
+ oldTask, err = u.removeOldTasks(ctx, batch, removeTasks) |
|
| 439 |
+ if err != nil {
|
|
| 440 |
+ return err |
|
| 441 |
+ } |
|
| 442 |
+ } |
|
| 443 |
+ |
|
| 444 |
+ if existing.DesiredState != api.TaskStateRunning {
|
|
| 445 |
+ delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, existing.ID, 0, true) |
|
| 446 |
+ } |
|
| 447 |
+ return nil |
|
| 448 |
+ }) |
|
| 449 |
+ if err != nil {
|
|
| 450 |
+ return err |
|
| 451 |
+ } |
|
| 452 |
+ |
|
| 453 |
+ if delayStartCh != nil {
|
|
| 454 |
+ <-delayStartCh |
|
| 455 |
+ } |
|
| 456 |
+ } |
|
| 457 |
+ |
|
| 458 |
+ return nil |
|
| 459 |
+} |
|
| 460 |
+ |
|
| 461 |
+// removeOldTasks shuts down the given tasks and returns one of the tasks that |
|
| 462 |
+// was shut down, or an error. |
|
| 463 |
+func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) (*api.Task, error) {
|
|
| 464 |
+ var ( |
|
| 465 |
+ lastErr error |
|
| 466 |
+ removedTask *api.Task |
|
| 467 |
+ ) |
|
| 468 |
+ for _, original := range removeTasks {
|
|
| 469 |
+ err := batch.Update(func(tx store.Tx) error {
|
|
| 470 |
+ t := store.GetTask(tx, original.ID) |
|
| 471 |
+ if t == nil {
|
|
| 472 |
+ return fmt.Errorf("task %s not found while trying to shut it down", original.ID)
|
|
| 473 |
+ } |
|
| 474 |
+ if t.DesiredState > api.TaskStateRunning {
|
|
| 475 |
+ return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
|
|
| 476 |
+ } |
|
| 477 |
+ t.DesiredState = api.TaskStateShutdown |
|
| 478 |
+ return store.UpdateTask(tx, t) |
|
| 479 |
+ }) |
|
| 480 |
+ if err != nil {
|
|
| 481 |
+ lastErr = err |
|
| 482 |
+ } else {
|
|
| 483 |
+ removedTask = original |
|
| 484 |
+ } |
|
| 485 |
+ } |
|
| 486 |
+ |
|
| 487 |
+ if removedTask == nil {
|
|
| 488 |
+ return nil, lastErr |
|
| 489 |
+ } |
|
| 490 |
+ return removedTask, nil |
|
| 491 |
+} |
|
| 492 |
+ |
|
| 493 |
+func (u *Updater) isTaskDirty(t *api.Task) bool {
|
|
| 494 |
+ return orchestrator.IsTaskDirty(u.newService, t) |
|
| 495 |
+} |
|
| 496 |
+ |
|
| 497 |
+func (u *Updater) isSlotDirty(slot orchestrator.Slot) bool {
|
|
| 498 |
+ return len(slot) > 1 || (len(slot) == 1 && u.isTaskDirty(slot[0])) |
|
| 499 |
+} |
|
| 500 |
+ |
|
| 501 |
+func (u *Updater) startUpdate(ctx context.Context, serviceID string) {
|
|
| 502 |
+ err := u.store.Update(func(tx store.Tx) error {
|
|
| 503 |
+ service := store.GetService(tx, serviceID) |
|
| 504 |
+ if service == nil {
|
|
| 505 |
+ return nil |
|
| 506 |
+ } |
|
| 507 |
+ if service.UpdateStatus != nil {
|
|
| 508 |
+ return nil |
|
| 509 |
+ } |
|
| 510 |
+ |
|
| 511 |
+ service.UpdateStatus = &api.UpdateStatus{
|
|
| 512 |
+ State: api.UpdateStatus_UPDATING, |
|
| 513 |
+ Message: "update in progress", |
|
| 514 |
+ StartedAt: ptypes.MustTimestampProto(time.Now()), |
|
| 515 |
+ } |
|
| 516 |
+ |
|
| 517 |
+ return store.UpdateService(tx, service) |
|
| 518 |
+ }) |
|
| 519 |
+ |
|
| 520 |
+ if err != nil {
|
|
| 521 |
+ log.G(ctx).WithError(err).Errorf("failed to mark update of service %s in progress", serviceID)
|
|
| 522 |
+ } |
|
| 523 |
+} |
|
| 524 |
+ |
|
| 525 |
+func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
|
|
| 526 |
+ log.G(ctx).Debugf("pausing update of service %s", serviceID)
|
|
| 527 |
+ |
|
| 528 |
+ err := u.store.Update(func(tx store.Tx) error {
|
|
| 529 |
+ service := store.GetService(tx, serviceID) |
|
| 530 |
+ if service == nil {
|
|
| 531 |
+ return nil |
|
| 532 |
+ } |
|
| 533 |
+ if service.UpdateStatus == nil {
|
|
| 534 |
+ // The service was updated since we started this update |
|
| 535 |
+ return nil |
|
| 536 |
+ } |
|
| 537 |
+ |
|
| 538 |
+ if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
|
|
| 539 |
+ service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_PAUSED |
|
| 540 |
+ } else {
|
|
| 541 |
+ service.UpdateStatus.State = api.UpdateStatus_PAUSED |
|
| 542 |
+ } |
|
| 543 |
+ service.UpdateStatus.Message = message |
|
| 544 |
+ |
|
| 545 |
+ return store.UpdateService(tx, service) |
|
| 546 |
+ }) |
|
| 547 |
+ |
|
| 548 |
+ if err != nil {
|
|
| 549 |
+ log.G(ctx).WithError(err).Errorf("failed to pause update of service %s", serviceID)
|
|
| 550 |
+ } |
|
| 551 |
+} |
|
| 552 |
+ |
|
| 553 |
+func (u *Updater) rollbackUpdate(ctx context.Context, serviceID, message string) {
|
|
| 554 |
+ log.G(ctx).Debugf("starting rollback of service %s", serviceID)
|
|
| 555 |
+ |
|
| 556 |
+ var service *api.Service |
|
| 557 |
+ err := u.store.Update(func(tx store.Tx) error {
|
|
| 558 |
+ service = store.GetService(tx, serviceID) |
|
| 559 |
+ if service == nil {
|
|
| 560 |
+ return nil |
|
| 561 |
+ } |
|
| 562 |
+ if service.UpdateStatus == nil {
|
|
| 563 |
+ // The service was updated since we started this update |
|
| 564 |
+ return nil |
|
| 565 |
+ } |
|
| 566 |
+ |
|
| 567 |
+ service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_STARTED |
|
| 568 |
+ service.UpdateStatus.Message = message |
|
| 569 |
+ |
|
| 570 |
+ if service.PreviousSpec == nil {
|
|
| 571 |
+ return errors.New("cannot roll back service because no previous spec is available")
|
|
| 572 |
+ } |
|
| 573 |
+ service.Spec = *service.PreviousSpec |
|
| 574 |
+ service.PreviousSpec = nil |
|
| 575 |
+ |
|
| 576 |
+ return store.UpdateService(tx, service) |
|
| 577 |
+ }) |
|
| 578 |
+ |
|
| 579 |
+ if err != nil {
|
|
| 580 |
+ log.G(ctx).WithError(err).Errorf("failed to start rollback of service %s", serviceID)
|
|
| 581 |
+ return |
|
| 582 |
+ } |
|
| 583 |
+} |
|
| 584 |
+ |
|
| 585 |
+func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
|
|
| 586 |
+ log.G(ctx).Debugf("update of service %s complete", serviceID)
|
|
| 587 |
+ |
|
| 588 |
+ err := u.store.Update(func(tx store.Tx) error {
|
|
| 589 |
+ service := store.GetService(tx, serviceID) |
|
| 590 |
+ if service == nil {
|
|
| 591 |
+ return nil |
|
| 592 |
+ } |
|
| 593 |
+ if service.UpdateStatus == nil {
|
|
| 594 |
+ // The service was changed since we started this update |
|
| 595 |
+ return nil |
|
| 596 |
+ } |
|
| 597 |
+ if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
|
|
| 598 |
+ service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED |
|
| 599 |
+ service.UpdateStatus.Message = "rollback completed" |
|
| 600 |
+ } else {
|
|
| 601 |
+ service.UpdateStatus.State = api.UpdateStatus_COMPLETED |
|
| 602 |
+ service.UpdateStatus.Message = "update completed" |
|
| 603 |
+ } |
|
| 604 |
+ service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now()) |
|
| 605 |
+ |
|
| 606 |
+ return store.UpdateService(tx, service) |
|
| 607 |
+ }) |
|
| 608 |
+ |
|
| 609 |
+ if err != nil {
|
|
| 610 |
+ log.G(ctx).WithError(err).Errorf("failed to mark update of service %s complete", serviceID)
|
|
| 611 |
+ } |
|
| 612 |
+} |
| 0 | 613 |
deleted file mode 100644 |
| ... | ... |
@@ -1,616 +0,0 @@ |
| 1 |
-package orchestrator |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "errors" |
|
| 5 |
- "fmt" |
|
| 6 |
- "reflect" |
|
| 7 |
- "sync" |
|
| 8 |
- "time" |
|
| 9 |
- |
|
| 10 |
- "golang.org/x/net/context" |
|
| 11 |
- |
|
| 12 |
- "github.com/docker/go-events" |
|
| 13 |
- "github.com/docker/swarmkit/api" |
|
| 14 |
- "github.com/docker/swarmkit/log" |
|
| 15 |
- "github.com/docker/swarmkit/manager/state" |
|
| 16 |
- "github.com/docker/swarmkit/manager/state/store" |
|
| 17 |
- "github.com/docker/swarmkit/manager/state/watch" |
|
| 18 |
- "github.com/docker/swarmkit/protobuf/ptypes" |
|
| 19 |
-) |
|
| 20 |
- |
|
| 21 |
-const defaultMonitor = 30 * time.Second |
|
| 22 |
- |
|
| 23 |
-// UpdateSupervisor supervises a set of updates. It's responsible for keeping track of updates, |
|
| 24 |
-// shutting them down and replacing them. |
|
| 25 |
-type UpdateSupervisor struct {
|
|
| 26 |
- store *store.MemoryStore |
|
| 27 |
- restarts *RestartSupervisor |
|
| 28 |
- updates map[string]*Updater |
|
| 29 |
- l sync.Mutex |
|
| 30 |
-} |
|
| 31 |
- |
|
| 32 |
-// NewUpdateSupervisor creates a new UpdateSupervisor. |
|
| 33 |
-func NewUpdateSupervisor(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *UpdateSupervisor {
|
|
| 34 |
- return &UpdateSupervisor{
|
|
| 35 |
- store: store, |
|
| 36 |
- updates: make(map[string]*Updater), |
|
| 37 |
- restarts: restartSupervisor, |
|
| 38 |
- } |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// Update starts an Update of `slots` belonging to `service` in the background |
|
| 42 |
-// and returns immediately. Each slot contains a group of one or more tasks |
|
| 43 |
-// occupying the same slot (replicated service) or node (global service). There |
|
| 44 |
-// may be more than one task per slot in cases where an update is in progress |
|
| 45 |
-// and the new task was started before the old one was shut down. If an update |
|
| 46 |
-// for that service was already in progress, it will be cancelled before the |
|
| 47 |
-// new one starts. |
|
| 48 |
-func (u *UpdateSupervisor) Update(ctx context.Context, cluster *api.Cluster, service *api.Service, slots []slot) {
|
|
| 49 |
- u.l.Lock() |
|
| 50 |
- defer u.l.Unlock() |
|
| 51 |
- |
|
| 52 |
- id := service.ID |
|
| 53 |
- |
|
| 54 |
- if update, ok := u.updates[id]; ok {
|
|
| 55 |
- if reflect.DeepEqual(service.Spec, update.newService.Spec) {
|
|
| 56 |
- // There's already an update working towards this goal. |
|
| 57 |
- return |
|
| 58 |
- } |
|
| 59 |
- update.Cancel() |
|
| 60 |
- } |
|
| 61 |
- |
|
| 62 |
- update := NewUpdater(u.store, u.restarts, cluster, service) |
|
| 63 |
- u.updates[id] = update |
|
| 64 |
- go func() {
|
|
| 65 |
- update.Run(ctx, slots) |
|
| 66 |
- u.l.Lock() |
|
| 67 |
- if u.updates[id] == update {
|
|
| 68 |
- delete(u.updates, id) |
|
| 69 |
- } |
|
| 70 |
- u.l.Unlock() |
|
| 71 |
- }() |
|
| 72 |
-} |
|
| 73 |
- |
|
| 74 |
-// CancelAll cancels all current updates. |
|
| 75 |
-func (u *UpdateSupervisor) CancelAll() {
|
|
| 76 |
- u.l.Lock() |
|
| 77 |
- defer u.l.Unlock() |
|
| 78 |
- |
|
| 79 |
- for _, update := range u.updates {
|
|
| 80 |
- update.Cancel() |
|
| 81 |
- } |
|
| 82 |
-} |
|
| 83 |
- |
|
| 84 |
-// Updater updates a set of tasks to a new version. |
|
| 85 |
-type Updater struct {
|
|
| 86 |
- store *store.MemoryStore |
|
| 87 |
- watchQueue *watch.Queue |
|
| 88 |
- restarts *RestartSupervisor |
|
| 89 |
- |
|
| 90 |
- cluster *api.Cluster |
|
| 91 |
- newService *api.Service |
|
| 92 |
- |
|
| 93 |
- updatedTasks map[string]time.Time // task ID to creation time |
|
| 94 |
- updatedTasksMu sync.Mutex |
|
| 95 |
- |
|
| 96 |
- // stopChan signals to the state machine to stop running. |
|
| 97 |
- stopChan chan struct{}
|
|
| 98 |
- // doneChan is closed when the state machine terminates. |
|
| 99 |
- doneChan chan struct{}
|
|
| 100 |
-} |
|
| 101 |
- |
|
| 102 |
-// NewUpdater creates a new Updater. |
|
| 103 |
-func NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor, cluster *api.Cluster, newService *api.Service) *Updater {
|
|
| 104 |
- return &Updater{
|
|
| 105 |
- store: store, |
|
| 106 |
- watchQueue: store.WatchQueue(), |
|
| 107 |
- restarts: restartSupervisor, |
|
| 108 |
- cluster: cluster.Copy(), |
|
| 109 |
- newService: newService.Copy(), |
|
| 110 |
- updatedTasks: make(map[string]time.Time), |
|
| 111 |
- stopChan: make(chan struct{}),
|
|
| 112 |
- doneChan: make(chan struct{}),
|
|
| 113 |
- } |
|
| 114 |
-} |
|
| 115 |
- |
|
| 116 |
-// Cancel cancels the current update immediately. It blocks until the cancellation is confirmed. |
|
| 117 |
-func (u *Updater) Cancel() {
|
|
| 118 |
- close(u.stopChan) |
|
| 119 |
- <-u.doneChan |
|
| 120 |
-} |
|
| 121 |
- |
|
| 122 |
-// Run starts the update and returns only once its complete or cancelled. |
|
| 123 |
-func (u *Updater) Run(ctx context.Context, slots []slot) {
|
|
| 124 |
- defer close(u.doneChan) |
|
| 125 |
- |
|
| 126 |
- service := u.newService |
|
| 127 |
- |
|
| 128 |
- // If the update is in a PAUSED state, we should not do anything. |
|
| 129 |
- if service.UpdateStatus != nil && |
|
| 130 |
- (service.UpdateStatus.State == api.UpdateStatus_PAUSED || |
|
| 131 |
- service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_PAUSED) {
|
|
| 132 |
- return |
|
| 133 |
- } |
|
| 134 |
- |
|
| 135 |
- var dirtySlots []slot |
|
| 136 |
- for _, slot := range slots {
|
|
| 137 |
- if u.isSlotDirty(slot) {
|
|
| 138 |
- dirtySlots = append(dirtySlots, slot) |
|
| 139 |
- } |
|
| 140 |
- } |
|
| 141 |
- // Abort immediately if all tasks are clean. |
|
| 142 |
- if len(dirtySlots) == 0 {
|
|
| 143 |
- if service.UpdateStatus != nil && |
|
| 144 |
- (service.UpdateStatus.State == api.UpdateStatus_UPDATING || |
|
| 145 |
- service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED) {
|
|
| 146 |
- u.completeUpdate(ctx, service.ID) |
|
| 147 |
- } |
|
| 148 |
- return |
|
| 149 |
- } |
|
| 150 |
- |
|
| 151 |
- // If there's no update in progress, we are starting one. |
|
| 152 |
- if service.UpdateStatus == nil {
|
|
| 153 |
- u.startUpdate(ctx, service.ID) |
|
| 154 |
- } |
|
| 155 |
- |
|
| 156 |
- parallelism := 0 |
|
| 157 |
- if service.Spec.Update != nil {
|
|
| 158 |
- parallelism = int(service.Spec.Update.Parallelism) |
|
| 159 |
- } |
|
| 160 |
- if parallelism == 0 {
|
|
| 161 |
- // TODO(aluzzardi): We could try to optimize unlimited parallelism by performing updates in a single |
|
| 162 |
- // goroutine using a batch transaction. |
|
| 163 |
- parallelism = len(dirtySlots) |
|
| 164 |
- } |
|
| 165 |
- |
|
| 166 |
- // Start the workers. |
|
| 167 |
- slotQueue := make(chan slot) |
|
| 168 |
- wg := sync.WaitGroup{}
|
|
| 169 |
- wg.Add(parallelism) |
|
| 170 |
- for i := 0; i < parallelism; i++ {
|
|
| 171 |
- go func() {
|
|
| 172 |
- u.worker(ctx, slotQueue) |
|
| 173 |
- wg.Done() |
|
| 174 |
- }() |
|
| 175 |
- } |
|
| 176 |
- |
|
| 177 |
- failureAction := api.UpdateConfig_PAUSE |
|
| 178 |
- allowedFailureFraction := float32(0) |
|
| 179 |
- monitoringPeriod := defaultMonitor |
|
| 180 |
- |
|
| 181 |
- if service.Spec.Update != nil {
|
|
| 182 |
- failureAction = service.Spec.Update.FailureAction |
|
| 183 |
- allowedFailureFraction = service.Spec.Update.MaxFailureRatio |
|
| 184 |
- |
|
| 185 |
- if service.Spec.Update.Monitor != nil {
|
|
| 186 |
- var err error |
|
| 187 |
- monitoringPeriod, err = ptypes.Duration(service.Spec.Update.Monitor) |
|
| 188 |
- if err != nil {
|
|
| 189 |
- monitoringPeriod = defaultMonitor |
|
| 190 |
- } |
|
| 191 |
- } |
|
| 192 |
- } |
|
| 193 |
- |
|
| 194 |
- var failedTaskWatch chan events.Event |
|
| 195 |
- |
|
| 196 |
- if failureAction != api.UpdateConfig_CONTINUE {
|
|
| 197 |
- var cancelWatch func() |
|
| 198 |
- failedTaskWatch, cancelWatch = state.Watch( |
|
| 199 |
- u.store.WatchQueue(), |
|
| 200 |
- state.EventUpdateTask{
|
|
| 201 |
- Task: &api.Task{ServiceID: service.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
|
|
| 202 |
- Checks: []state.TaskCheckFunc{state.TaskCheckServiceID, state.TaskCheckStateGreaterThan},
|
|
| 203 |
- }, |
|
| 204 |
- ) |
|
| 205 |
- defer cancelWatch() |
|
| 206 |
- } |
|
| 207 |
- |
|
| 208 |
- stopped := false |
|
| 209 |
- failedTasks := make(map[string]struct{})
|
|
| 210 |
- totalFailures := 0 |
|
| 211 |
- |
|
| 212 |
- failureTriggersAction := func(failedTask *api.Task) bool {
|
|
| 213 |
- // Ignore tasks we have already seen as failures. |
|
| 214 |
- if _, found := failedTasks[failedTask.ID]; found {
|
|
| 215 |
- return false |
|
| 216 |
- } |
|
| 217 |
- |
|
| 218 |
- // If this failed/completed task is one that we |
|
| 219 |
- // created as part of this update, we should |
|
| 220 |
- // follow the failure action. |
|
| 221 |
- u.updatedTasksMu.Lock() |
|
| 222 |
- startedAt, found := u.updatedTasks[failedTask.ID] |
|
| 223 |
- u.updatedTasksMu.Unlock() |
|
| 224 |
- |
|
| 225 |
- if found && (startedAt.IsZero() || time.Since(startedAt) <= monitoringPeriod) {
|
|
| 226 |
- failedTasks[failedTask.ID] = struct{}{}
|
|
| 227 |
- totalFailures++ |
|
| 228 |
- if float32(totalFailures)/float32(len(dirtySlots)) > allowedFailureFraction {
|
|
| 229 |
- switch failureAction {
|
|
| 230 |
- case api.UpdateConfig_PAUSE: |
|
| 231 |
- stopped = true |
|
| 232 |
- message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID)
|
|
| 233 |
- u.pauseUpdate(ctx, service.ID, message) |
|
| 234 |
- return true |
|
| 235 |
- case api.UpdateConfig_ROLLBACK: |
|
| 236 |
- // Never roll back a rollback |
|
| 237 |
- if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
|
|
| 238 |
- message := fmt.Sprintf("rollback paused due to failure or early termination of task %s", failedTask.ID)
|
|
| 239 |
- u.pauseUpdate(ctx, service.ID, message) |
|
| 240 |
- return true |
|
| 241 |
- } |
|
| 242 |
- stopped = true |
|
| 243 |
- message := fmt.Sprintf("update rolled back due to failure or early termination of task %s", failedTask.ID)
|
|
| 244 |
- u.rollbackUpdate(ctx, service.ID, message) |
|
| 245 |
- return true |
|
| 246 |
- } |
|
| 247 |
- } |
|
| 248 |
- } |
|
| 249 |
- |
|
| 250 |
- return false |
|
| 251 |
- } |
|
| 252 |
- |
|
| 253 |
-slotsLoop: |
|
| 254 |
- for _, slot := range dirtySlots {
|
|
| 255 |
- retryLoop: |
|
| 256 |
- for {
|
|
| 257 |
- // Wait for a worker to pick up the task or abort the update, whichever comes first. |
|
| 258 |
- select {
|
|
| 259 |
- case <-u.stopChan: |
|
| 260 |
- stopped = true |
|
| 261 |
- break slotsLoop |
|
| 262 |
- case ev := <-failedTaskWatch: |
|
| 263 |
- if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
|
|
| 264 |
- break slotsLoop |
|
| 265 |
- } |
|
| 266 |
- case slotQueue <- slot: |
|
| 267 |
- break retryLoop |
|
| 268 |
- } |
|
| 269 |
- } |
|
| 270 |
- } |
|
| 271 |
- |
|
| 272 |
- close(slotQueue) |
|
| 273 |
- wg.Wait() |
|
| 274 |
- |
|
| 275 |
- if !stopped {
|
|
| 276 |
- // Keep watching for task failures for one more monitoringPeriod, |
|
| 277 |
- // before declaring the update complete. |
|
| 278 |
- doneMonitoring := time.After(monitoringPeriod) |
|
| 279 |
- monitorLoop: |
|
| 280 |
- for {
|
|
| 281 |
- select {
|
|
| 282 |
- case <-u.stopChan: |
|
| 283 |
- stopped = true |
|
| 284 |
- break monitorLoop |
|
| 285 |
- case <-doneMonitoring: |
|
| 286 |
- break monitorLoop |
|
| 287 |
- case ev := <-failedTaskWatch: |
|
| 288 |
- if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
|
|
| 289 |
- break monitorLoop |
|
| 290 |
- } |
|
| 291 |
- } |
|
| 292 |
- } |
|
| 293 |
- } |
|
| 294 |
- |
|
| 295 |
- // TODO(aaronl): Potentially roll back the service if not enough tasks |
|
| 296 |
- // have reached RUNNING by this point. |
|
| 297 |
- |
|
| 298 |
- if !stopped {
|
|
| 299 |
- u.completeUpdate(ctx, service.ID) |
|
| 300 |
- } |
|
| 301 |
-} |
|
| 302 |
- |
|
| 303 |
-func (u *Updater) worker(ctx context.Context, queue <-chan slot) {
|
|
| 304 |
- for slot := range queue {
|
|
| 305 |
- // Do we have a task with the new spec in desired state = RUNNING? |
|
| 306 |
- // If so, all we have to do to complete the update is remove the |
|
| 307 |
- // other tasks. Or if we have a task with the new spec that has |
|
| 308 |
- // desired state < RUNNING, advance it to running and remove the |
|
| 309 |
- // other tasks. |
|
| 310 |
- var ( |
|
| 311 |
- runningTask *api.Task |
|
| 312 |
- cleanTask *api.Task |
|
| 313 |
- ) |
|
| 314 |
- for _, t := range slot {
|
|
| 315 |
- if !u.isTaskDirty(t) {
|
|
| 316 |
- if t.DesiredState == api.TaskStateRunning {
|
|
| 317 |
- runningTask = t |
|
| 318 |
- break |
|
| 319 |
- } |
|
| 320 |
- if t.DesiredState < api.TaskStateRunning {
|
|
| 321 |
- cleanTask = t |
|
| 322 |
- } |
|
| 323 |
- } |
|
| 324 |
- } |
|
| 325 |
- if runningTask != nil {
|
|
| 326 |
- if err := u.useExistingTask(ctx, slot, runningTask); err != nil {
|
|
| 327 |
- log.G(ctx).WithError(err).Error("update failed")
|
|
| 328 |
- } |
|
| 329 |
- } else if cleanTask != nil {
|
|
| 330 |
- if err := u.useExistingTask(ctx, slot, cleanTask); err != nil {
|
|
| 331 |
- log.G(ctx).WithError(err).Error("update failed")
|
|
| 332 |
- } |
|
| 333 |
- } else {
|
|
| 334 |
- updated := newTask(u.cluster, u.newService, slot[0].Slot, "") |
|
| 335 |
- if isGlobalService(u.newService) {
|
|
| 336 |
- updated = newTask(u.cluster, u.newService, slot[0].Slot, slot[0].NodeID) |
|
| 337 |
- } |
|
| 338 |
- updated.DesiredState = api.TaskStateReady |
|
| 339 |
- |
|
| 340 |
- if err := u.updateTask(ctx, slot, updated); err != nil {
|
|
| 341 |
- log.G(ctx).WithError(err).WithField("task.id", updated.ID).Error("update failed")
|
|
| 342 |
- } |
|
| 343 |
- } |
|
| 344 |
- |
|
| 345 |
- if u.newService.Spec.Update != nil && (u.newService.Spec.Update.Delay.Seconds != 0 || u.newService.Spec.Update.Delay.Nanos != 0) {
|
|
| 346 |
- delay, err := ptypes.Duration(&u.newService.Spec.Update.Delay) |
|
| 347 |
- if err != nil {
|
|
| 348 |
- log.G(ctx).WithError(err).Error("invalid update delay")
|
|
| 349 |
- continue |
|
| 350 |
- } |
|
| 351 |
- select {
|
|
| 352 |
- case <-time.After(delay): |
|
| 353 |
- case <-u.stopChan: |
|
| 354 |
- return |
|
| 355 |
- } |
|
| 356 |
- } |
|
| 357 |
- } |
|
| 358 |
-} |
|
| 359 |
- |
|
| 360 |
-func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task) error {
|
|
| 361 |
- // Kick off the watch before even creating the updated task. This is in order to avoid missing any event. |
|
| 362 |
- taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
|
|
| 363 |
- Task: &api.Task{ID: updated.ID},
|
|
| 364 |
- Checks: []state.TaskCheckFunc{state.TaskCheckID},
|
|
| 365 |
- }) |
|
| 366 |
- defer cancel() |
|
| 367 |
- |
|
| 368 |
- // Create an empty entry for this task, so the updater knows a failure |
|
| 369 |
- // should count towards the failure count. The timestamp is added |
|
| 370 |
- // if/when the task reaches RUNNING. |
|
| 371 |
- u.updatedTasksMu.Lock() |
|
| 372 |
- u.updatedTasks[updated.ID] = time.Time{}
|
|
| 373 |
- u.updatedTasksMu.Unlock() |
|
| 374 |
- |
|
| 375 |
- var delayStartCh <-chan struct{}
|
|
| 376 |
- // Atomically create the updated task and bring down the old one. |
|
| 377 |
- _, err := u.store.Batch(func(batch *store.Batch) error {
|
|
| 378 |
- oldTask, err := u.removeOldTasks(ctx, batch, slot) |
|
| 379 |
- if err != nil {
|
|
| 380 |
- return err |
|
| 381 |
- } |
|
| 382 |
- |
|
| 383 |
- err = batch.Update(func(tx store.Tx) error {
|
|
| 384 |
- if err := store.CreateTask(tx, updated); err != nil {
|
|
| 385 |
- return err |
|
| 386 |
- } |
|
| 387 |
- return nil |
|
| 388 |
- }) |
|
| 389 |
- if err != nil {
|
|
| 390 |
- return err |
|
| 391 |
- } |
|
| 392 |
- |
|
| 393 |
- delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, updated.ID, 0, true) |
|
| 394 |
- |
|
| 395 |
- return nil |
|
| 396 |
- |
|
| 397 |
- }) |
|
| 398 |
- if err != nil {
|
|
| 399 |
- return err |
|
| 400 |
- } |
|
| 401 |
- |
|
| 402 |
- if delayStartCh != nil {
|
|
| 403 |
- <-delayStartCh |
|
| 404 |
- } |
|
| 405 |
- |
|
| 406 |
- // Wait for the new task to come up. |
|
| 407 |
- // TODO(aluzzardi): Consider adding a timeout here. |
|
| 408 |
- for {
|
|
| 409 |
- select {
|
|
| 410 |
- case e := <-taskUpdates: |
|
| 411 |
- updated = e.(state.EventUpdateTask).Task |
|
| 412 |
- if updated.Status.State >= api.TaskStateRunning {
|
|
| 413 |
- u.updatedTasksMu.Lock() |
|
| 414 |
- u.updatedTasks[updated.ID] = time.Now() |
|
| 415 |
- u.updatedTasksMu.Unlock() |
|
| 416 |
- return nil |
|
| 417 |
- } |
|
| 418 |
- case <-u.stopChan: |
|
| 419 |
- return nil |
|
| 420 |
- } |
|
| 421 |
- } |
|
| 422 |
-} |
|
| 423 |
- |
|
| 424 |
-func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) error {
|
|
| 425 |
- var removeTasks []*api.Task |
|
| 426 |
- for _, t := range slot {
|
|
| 427 |
- if t != existing {
|
|
| 428 |
- removeTasks = append(removeTasks, t) |
|
| 429 |
- } |
|
| 430 |
- } |
|
| 431 |
- if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning {
|
|
| 432 |
- var delayStartCh <-chan struct{}
|
|
| 433 |
- _, err := u.store.Batch(func(batch *store.Batch) error {
|
|
| 434 |
- var oldTask *api.Task |
|
| 435 |
- if len(removeTasks) != 0 {
|
|
| 436 |
- var err error |
|
| 437 |
- oldTask, err = u.removeOldTasks(ctx, batch, removeTasks) |
|
| 438 |
- if err != nil {
|
|
| 439 |
- return err |
|
| 440 |
- } |
|
| 441 |
- } |
|
| 442 |
- |
|
| 443 |
- if existing.DesiredState != api.TaskStateRunning {
|
|
| 444 |
- delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, existing.ID, 0, true) |
|
| 445 |
- } |
|
| 446 |
- return nil |
|
| 447 |
- }) |
|
| 448 |
- if err != nil {
|
|
| 449 |
- return err |
|
| 450 |
- } |
|
| 451 |
- |
|
| 452 |
- if delayStartCh != nil {
|
|
| 453 |
- <-delayStartCh |
|
| 454 |
- } |
|
| 455 |
- } |
|
| 456 |
- |
|
| 457 |
- return nil |
|
| 458 |
-} |
|
| 459 |
- |
|
| 460 |
-// removeOldTasks shuts down the given tasks and returns one of the tasks that |
|
| 461 |
-// was shut down, or an error. |
|
| 462 |
-func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) (*api.Task, error) {
|
|
| 463 |
- var ( |
|
| 464 |
- lastErr error |
|
| 465 |
- removedTask *api.Task |
|
| 466 |
- ) |
|
| 467 |
- for _, original := range removeTasks {
|
|
| 468 |
- err := batch.Update(func(tx store.Tx) error {
|
|
| 469 |
- t := store.GetTask(tx, original.ID) |
|
| 470 |
- if t == nil {
|
|
| 471 |
- return fmt.Errorf("task %s not found while trying to shut it down", original.ID)
|
|
| 472 |
- } |
|
| 473 |
- if t.DesiredState > api.TaskStateRunning {
|
|
| 474 |
- return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
|
|
| 475 |
- } |
|
| 476 |
- t.DesiredState = api.TaskStateShutdown |
|
| 477 |
- return store.UpdateTask(tx, t) |
|
| 478 |
- }) |
|
| 479 |
- if err != nil {
|
|
| 480 |
- lastErr = err |
|
| 481 |
- } else {
|
|
| 482 |
- removedTask = original |
|
| 483 |
- } |
|
| 484 |
- } |
|
| 485 |
- |
|
| 486 |
- if removedTask == nil {
|
|
| 487 |
- return nil, lastErr |
|
| 488 |
- } |
|
| 489 |
- return removedTask, nil |
|
| 490 |
-} |
|
| 491 |
- |
|
| 492 |
-func isTaskDirty(s *api.Service, t *api.Task) bool {
|
|
| 493 |
- return !reflect.DeepEqual(s.Spec.Task, t.Spec) || |
|
| 494 |
- (t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec)) |
|
| 495 |
-} |
|
| 496 |
- |
|
| 497 |
-func (u *Updater) isTaskDirty(t *api.Task) bool {
|
|
| 498 |
- return isTaskDirty(u.newService, t) |
|
| 499 |
-} |
|
| 500 |
- |
|
| 501 |
-func (u *Updater) isSlotDirty(slot slot) bool {
|
|
| 502 |
- return len(slot) > 1 || (len(slot) == 1 && u.isTaskDirty(slot[0])) |
|
| 503 |
-} |
|
| 504 |
- |
|
| 505 |
-func (u *Updater) startUpdate(ctx context.Context, serviceID string) {
|
|
| 506 |
- err := u.store.Update(func(tx store.Tx) error {
|
|
| 507 |
- service := store.GetService(tx, serviceID) |
|
| 508 |
- if service == nil {
|
|
| 509 |
- return nil |
|
| 510 |
- } |
|
| 511 |
- if service.UpdateStatus != nil {
|
|
| 512 |
- return nil |
|
| 513 |
- } |
|
| 514 |
- |
|
| 515 |
- service.UpdateStatus = &api.UpdateStatus{
|
|
| 516 |
- State: api.UpdateStatus_UPDATING, |
|
| 517 |
- Message: "update in progress", |
|
| 518 |
- StartedAt: ptypes.MustTimestampProto(time.Now()), |
|
| 519 |
- } |
|
| 520 |
- |
|
| 521 |
- return store.UpdateService(tx, service) |
|
| 522 |
- }) |
|
| 523 |
- |
|
| 524 |
- if err != nil {
|
|
| 525 |
- log.G(ctx).WithError(err).Errorf("failed to mark update of service %s in progress", serviceID)
|
|
| 526 |
- } |
|
| 527 |
-} |
|
| 528 |
- |
|
| 529 |
-func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
|
|
| 530 |
- log.G(ctx).Debugf("pausing update of service %s", serviceID)
|
|
| 531 |
- |
|
| 532 |
- err := u.store.Update(func(tx store.Tx) error {
|
|
| 533 |
- service := store.GetService(tx, serviceID) |
|
| 534 |
- if service == nil {
|
|
| 535 |
- return nil |
|
| 536 |
- } |
|
| 537 |
- if service.UpdateStatus == nil {
|
|
| 538 |
- // The service was updated since we started this update |
|
| 539 |
- return nil |
|
| 540 |
- } |
|
| 541 |
- |
|
| 542 |
- if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
|
|
| 543 |
- service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_PAUSED |
|
| 544 |
- } else {
|
|
| 545 |
- service.UpdateStatus.State = api.UpdateStatus_PAUSED |
|
| 546 |
- } |
|
| 547 |
- service.UpdateStatus.Message = message |
|
| 548 |
- |
|
| 549 |
- return store.UpdateService(tx, service) |
|
| 550 |
- }) |
|
| 551 |
- |
|
| 552 |
- if err != nil {
|
|
| 553 |
- log.G(ctx).WithError(err).Errorf("failed to pause update of service %s", serviceID)
|
|
| 554 |
- } |
|
| 555 |
-} |
|
| 556 |
- |
|
| 557 |
-func (u *Updater) rollbackUpdate(ctx context.Context, serviceID, message string) {
|
|
| 558 |
- log.G(ctx).Debugf("starting rollback of service %s", serviceID)
|
|
| 559 |
- |
|
| 560 |
- var service *api.Service |
|
| 561 |
- err := u.store.Update(func(tx store.Tx) error {
|
|
| 562 |
- service = store.GetService(tx, serviceID) |
|
| 563 |
- if service == nil {
|
|
| 564 |
- return nil |
|
| 565 |
- } |
|
| 566 |
- if service.UpdateStatus == nil {
|
|
| 567 |
- // The service was updated since we started this update |
|
| 568 |
- return nil |
|
| 569 |
- } |
|
| 570 |
- |
|
| 571 |
- service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_STARTED |
|
| 572 |
- service.UpdateStatus.Message = message |
|
| 573 |
- |
|
| 574 |
- if service.PreviousSpec == nil {
|
|
| 575 |
- return errors.New("cannot roll back service because no previous spec is available")
|
|
| 576 |
- } |
|
| 577 |
- service.Spec = *service.PreviousSpec |
|
| 578 |
- service.PreviousSpec = nil |
|
| 579 |
- |
|
| 580 |
- return store.UpdateService(tx, service) |
|
| 581 |
- }) |
|
| 582 |
- |
|
| 583 |
- if err != nil {
|
|
| 584 |
- log.G(ctx).WithError(err).Errorf("failed to start rollback of service %s", serviceID)
|
|
| 585 |
- return |
|
| 586 |
- } |
|
| 587 |
-} |
|
| 588 |
- |
|
| 589 |
-func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
|
|
| 590 |
- log.G(ctx).Debugf("update of service %s complete", serviceID)
|
|
| 591 |
- |
|
| 592 |
- err := u.store.Update(func(tx store.Tx) error {
|
|
| 593 |
- service := store.GetService(tx, serviceID) |
|
| 594 |
- if service == nil {
|
|
| 595 |
- return nil |
|
| 596 |
- } |
|
| 597 |
- if service.UpdateStatus == nil {
|
|
| 598 |
- // The service was changed since we started this update |
|
| 599 |
- return nil |
|
| 600 |
- } |
|
| 601 |
- if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
|
|
| 602 |
- service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED |
|
| 603 |
- service.UpdateStatus.Message = "rollback completed" |
|
| 604 |
- } else {
|
|
| 605 |
- service.UpdateStatus.State = api.UpdateStatus_COMPLETED |
|
| 606 |
- service.UpdateStatus.Message = "update completed" |
|
| 607 |
- } |
|
| 608 |
- service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now()) |
|
| 609 |
- |
|
| 610 |
- return store.UpdateService(tx, service) |
|
| 611 |
- }) |
|
| 612 |
- |
|
| 613 |
- if err != nil {
|
|
| 614 |
- log.G(ctx).WithError(err).Errorf("failed to mark update of service %s complete", serviceID)
|
|
| 615 |
- } |
|
| 616 |
-} |
| ... | ... |
@@ -1,6 +1,12 @@ |
| 1 | 1 |
package scheduler |
| 2 | 2 |
|
| 3 |
-import "github.com/docker/swarmkit/api" |
|
| 3 |
+import ( |
|
| 4 |
+ "time" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/swarmkit/api" |
|
| 7 |
+ "github.com/docker/swarmkit/log" |
|
| 8 |
+ "golang.org/x/net/context" |
|
| 9 |
+) |
|
| 4 | 10 |
|
| 5 | 11 |
// NodeInfo contains a node and some additional metadata. |
| 6 | 12 |
type NodeInfo struct {
|
| ... | ... |
@@ -9,6 +15,13 @@ type NodeInfo struct {
|
| 9 | 9 |
DesiredRunningTasksCount int |
| 10 | 10 |
DesiredRunningTasksCountByService map[string]int |
| 11 | 11 |
AvailableResources api.Resources |
| 12 |
+ |
|
| 13 |
+ // recentFailures is a map from service ID to the timestamps of the |
|
| 14 |
+ // most recent failures the node has experienced from replicas of that |
|
| 15 |
+ // service. |
|
| 16 |
+ // TODO(aaronl): When spec versioning is supported, this should track |
|
| 17 |
+ // the version of the spec that failed. |
|
| 18 |
+ recentFailures map[string][]time.Time |
|
| 12 | 19 |
} |
| 13 | 20 |
|
| 14 | 21 |
func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api.Resources) NodeInfo {
|
| ... | ... |
@@ -17,6 +30,7 @@ func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api |
| 17 | 17 |
Tasks: make(map[string]*api.Task), |
| 18 | 18 |
DesiredRunningTasksCountByService: make(map[string]int), |
| 19 | 19 |
AvailableResources: availableResources, |
| 20 |
+ recentFailures: make(map[string][]time.Time), |
|
| 20 | 21 |
} |
| 21 | 22 |
|
| 22 | 23 |
for _, t := range tasks {
|
| ... | ... |
@@ -28,9 +42,6 @@ func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api |
| 28 | 28 |
// addTask removes a task from nodeInfo if it's tracked there, and returns true |
| 29 | 29 |
// if nodeInfo was modified. |
| 30 | 30 |
func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
|
| 31 |
- if nodeInfo.Tasks == nil {
|
|
| 32 |
- return false |
|
| 33 |
- } |
|
| 34 | 31 |
oldTask, ok := nodeInfo.Tasks[t.ID] |
| 35 | 32 |
if !ok {
|
| 36 | 33 |
return false |
| ... | ... |
@@ -52,13 +63,6 @@ func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
|
| 52 | 52 |
// addTask adds or updates a task on nodeInfo, and returns true if nodeInfo was |
| 53 | 53 |
// modified. |
| 54 | 54 |
func (nodeInfo *NodeInfo) addTask(t *api.Task) bool {
|
| 55 |
- if nodeInfo.Tasks == nil {
|
|
| 56 |
- nodeInfo.Tasks = make(map[string]*api.Task) |
|
| 57 |
- } |
|
| 58 |
- if nodeInfo.DesiredRunningTasksCountByService == nil {
|
|
| 59 |
- nodeInfo.DesiredRunningTasksCountByService = make(map[string]int) |
|
| 60 |
- } |
|
| 61 |
- |
|
| 62 | 55 |
oldTask, ok := nodeInfo.Tasks[t.ID] |
| 63 | 56 |
if ok {
|
| 64 | 57 |
if t.DesiredState == api.TaskStateRunning && oldTask.DesiredState != api.TaskStateRunning {
|
| ... | ... |
@@ -94,3 +98,35 @@ func taskReservations(spec api.TaskSpec) (reservations api.Resources) {
|
| 94 | 94 |
} |
| 95 | 95 |
return |
| 96 | 96 |
} |
| 97 |
+ |
|
| 98 |
+// taskFailed records a task failure from a given service. |
|
| 99 |
+func (nodeInfo *NodeInfo) taskFailed(ctx context.Context, serviceID string) {
|
|
| 100 |
+ expired := 0 |
|
| 101 |
+ now := time.Now() |
|
| 102 |
+ for _, timestamp := range nodeInfo.recentFailures[serviceID] {
|
|
| 103 |
+ if now.Sub(timestamp) < monitorFailures {
|
|
| 104 |
+ break |
|
| 105 |
+ } |
|
| 106 |
+ expired++ |
|
| 107 |
+ } |
|
| 108 |
+ |
|
| 109 |
+ if len(nodeInfo.recentFailures[serviceID])-expired == maxFailures-1 {
|
|
| 110 |
+ log.G(ctx).Warnf("underweighting node %s for service %s because it experienced %d failures or rejections within %s", nodeInfo.ID, serviceID, maxFailures, monitorFailures.String())
|
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 113 |
+ nodeInfo.recentFailures[serviceID] = append(nodeInfo.recentFailures[serviceID][expired:], now) |
|
| 114 |
+} |
|
| 115 |
+ |
|
| 116 |
+// countRecentFailures returns the number of times the service has failed on |
|
| 117 |
+// this node within the lookback window monitorFailures. |
|
| 118 |
+func (nodeInfo *NodeInfo) countRecentFailures(now time.Time, serviceID string) int {
|
|
| 119 |
+ recentFailureCount := len(nodeInfo.recentFailures[serviceID]) |
|
| 120 |
+ for i := recentFailureCount - 1; i >= 0; i-- {
|
|
| 121 |
+ if now.Sub(nodeInfo.recentFailures[serviceID][i]) > monitorFailures {
|
|
| 122 |
+ recentFailureCount -= i + 1 |
|
| 123 |
+ break |
|
| 124 |
+ } |
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ return recentFailureCount |
|
| 128 |
+} |
| ... | ... |
@@ -3,6 +3,9 @@ package scheduler |
| 3 | 3 |
import ( |
| 4 | 4 |
"container/heap" |
| 5 | 5 |
"errors" |
| 6 |
+ "time" |
|
| 7 |
+ |
|
| 8 |
+ "github.com/docker/swarmkit/api" |
|
| 6 | 9 |
) |
| 7 | 10 |
|
| 8 | 11 |
var errNodeNotFound = errors.New("node not found in scheduler dataset")
|
| ... | ... |
@@ -27,6 +30,16 @@ func (ns *nodeSet) nodeInfo(nodeID string) (NodeInfo, error) {
|
| 27 | 27 |
// addOrUpdateNode sets the number of tasks for a given node. It adds the node |
| 28 | 28 |
// to the set if it wasn't already tracked. |
| 29 | 29 |
func (ns *nodeSet) addOrUpdateNode(n NodeInfo) {
|
| 30 |
+ if n.Tasks == nil {
|
|
| 31 |
+ n.Tasks = make(map[string]*api.Task) |
|
| 32 |
+ } |
|
| 33 |
+ if n.DesiredRunningTasksCountByService == nil {
|
|
| 34 |
+ n.DesiredRunningTasksCountByService = make(map[string]int) |
|
| 35 |
+ } |
|
| 36 |
+ if n.recentFailures == nil {
|
|
| 37 |
+ n.recentFailures = make(map[string][]time.Time) |
|
| 38 |
+ } |
|
| 39 |
+ |
|
| 30 | 40 |
ns.nodes[n.ID] = n |
| 31 | 41 |
} |
| 32 | 42 |
|
| ... | ... |
@@ -12,6 +12,16 @@ import ( |
| 12 | 12 |
"golang.org/x/net/context" |
| 13 | 13 |
) |
| 14 | 14 |
|
| 15 |
+const ( |
|
| 16 |
+ // monitorFailures is the lookback period for counting failures of |
|
| 17 |
+ // a task to determine if a node is faulty for a particular service. |
|
| 18 |
+ monitorFailures = 5 * time.Minute |
|
| 19 |
+ |
|
| 20 |
+ // maxFailures is the number of failures within monitorFailures that |
|
| 21 |
+ // triggers downweighting of a node in the sorting function. |
|
| 22 |
+ maxFailures = 5 |
|
| 23 |
+) |
|
| 24 |
+ |
|
| 15 | 25 |
type schedulingDecision struct {
|
| 16 | 26 |
old *api.Task |
| 17 | 27 |
new *api.Task |
| ... | ... |
@@ -54,9 +64,9 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
|
| 54 | 54 |
|
| 55 | 55 |
tasksByNode := make(map[string]map[string]*api.Task) |
| 56 | 56 |
for _, t := range tasks {
|
| 57 |
- // Ignore all tasks that have not reached ALLOCATED |
|
| 57 |
+ // Ignore all tasks that have not reached PENDING |
|
| 58 | 58 |
// state and tasks that no longer consume resources. |
| 59 |
- if t.Status.State < api.TaskStateAllocated || t.Status.State > api.TaskStateRunning {
|
|
| 59 |
+ if t.Status.State < api.TaskStatePending || t.Status.State > api.TaskStateRunning {
|
|
| 60 | 60 |
continue |
| 61 | 61 |
} |
| 62 | 62 |
|
| ... | ... |
@@ -66,7 +76,7 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
|
| 66 | 66 |
continue |
| 67 | 67 |
} |
| 68 | 68 |
// preassigned tasks need to validate resource requirement on corresponding node |
| 69 |
- if t.Status.State == api.TaskStateAllocated {
|
|
| 69 |
+ if t.Status.State == api.TaskStatePending {
|
|
| 70 | 70 |
s.preassignedTasks[t.ID] = t |
| 71 | 71 |
continue |
| 72 | 72 |
} |
| ... | ... |
@@ -185,9 +195,9 @@ func (s *Scheduler) enqueue(t *api.Task) {
|
| 185 | 185 |
} |
| 186 | 186 |
|
| 187 | 187 |
func (s *Scheduler) createTask(ctx context.Context, t *api.Task) int {
|
| 188 |
- // Ignore all tasks that have not reached ALLOCATED |
|
| 188 |
+ // Ignore all tasks that have not reached PENDING |
|
| 189 | 189 |
// state, and tasks that no longer consume resources. |
| 190 |
- if t.Status.State < api.TaskStateAllocated || t.Status.State > api.TaskStateRunning {
|
|
| 190 |
+ if t.Status.State < api.TaskStatePending || t.Status.State > api.TaskStateRunning {
|
|
| 191 | 191 |
return 0 |
| 192 | 192 |
} |
| 193 | 193 |
|
| ... | ... |
@@ -198,7 +208,7 @@ func (s *Scheduler) createTask(ctx context.Context, t *api.Task) int {
|
| 198 | 198 |
return 1 |
| 199 | 199 |
} |
| 200 | 200 |
|
| 201 |
- if t.Status.State == api.TaskStateAllocated {
|
|
| 201 |
+ if t.Status.State == api.TaskStatePending {
|
|
| 202 | 202 |
s.preassignedTasks[t.ID] = t |
| 203 | 203 |
// preassigned tasks do not contribute to running tasks count |
| 204 | 204 |
return 0 |
| ... | ... |
@@ -213,9 +223,9 @@ func (s *Scheduler) createTask(ctx context.Context, t *api.Task) int {
|
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 | 215 |
func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
|
| 216 |
- // Ignore all tasks that have not reached ALLOCATED |
|
| 216 |
+ // Ignore all tasks that have not reached PENDING |
|
| 217 | 217 |
// state. |
| 218 |
- if t.Status.State < api.TaskStateAllocated {
|
|
| 218 |
+ if t.Status.State < api.TaskStatePending {
|
|
| 219 | 219 |
return 0 |
| 220 | 220 |
} |
| 221 | 221 |
|
| ... | ... |
@@ -224,8 +234,17 @@ func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
|
| 224 | 224 |
// Ignore all tasks that have not reached ALLOCATED |
| 225 | 225 |
// state, and tasks that no longer consume resources. |
| 226 | 226 |
if t.Status.State > api.TaskStateRunning {
|
| 227 |
- if oldTask != nil {
|
|
| 228 |
- s.deleteTask(ctx, oldTask) |
|
| 227 |
+ if oldTask == nil {
|
|
| 228 |
+ return 1 |
|
| 229 |
+ } |
|
| 230 |
+ s.deleteTask(ctx, oldTask) |
|
| 231 |
+ if t.Status.State != oldTask.Status.State && |
|
| 232 |
+ (t.Status.State == api.TaskStateFailed || t.Status.State == api.TaskStateRejected) {
|
|
| 233 |
+ nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID) |
|
| 234 |
+ if err == nil {
|
|
| 235 |
+ nodeInfo.taskFailed(ctx, t.ServiceID) |
|
| 236 |
+ s.nodeSet.updateNode(nodeInfo) |
|
| 237 |
+ } |
|
| 229 | 238 |
} |
| 230 | 239 |
return 1 |
| 231 | 240 |
} |
| ... | ... |
@@ -240,7 +259,7 @@ func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
|
| 240 | 240 |
return 1 |
| 241 | 241 |
} |
| 242 | 242 |
|
| 243 |
- if t.Status.State == api.TaskStateAllocated {
|
|
| 243 |
+ if t.Status.State == api.TaskStatePending {
|
|
| 244 | 244 |
if oldTask != nil {
|
| 245 | 245 |
s.deleteTask(ctx, oldTask) |
| 246 | 246 |
} |
| ... | ... |
@@ -481,7 +500,23 @@ func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string] |
| 481 | 481 |
|
| 482 | 482 |
s.pipeline.SetTask(t) |
| 483 | 483 |
|
| 484 |
+ now := time.Now() |
|
| 485 |
+ |
|
| 484 | 486 |
nodeLess := func(a *NodeInfo, b *NodeInfo) bool {
|
| 487 |
+ // If either node has at least maxFailures recent failures, |
|
| 488 |
+ // that's the deciding factor. |
|
| 489 |
+ recentFailuresA := a.countRecentFailures(now, t.ServiceID) |
|
| 490 |
+ recentFailuresB := b.countRecentFailures(now, t.ServiceID) |
|
| 491 |
+ |
|
| 492 |
+ if recentFailuresA >= maxFailures || recentFailuresB >= maxFailures {
|
|
| 493 |
+ if recentFailuresA > recentFailuresB {
|
|
| 494 |
+ return false |
|
| 495 |
+ } |
|
| 496 |
+ if recentFailuresB > recentFailuresA {
|
|
| 497 |
+ return true |
|
| 498 |
+ } |
|
| 499 |
+ } |
|
| 500 |
+ |
|
| 485 | 501 |
tasksByServiceA := a.DesiredRunningTasksCountByService[t.ServiceID] |
| 486 | 502 |
tasksByServiceB := b.DesiredRunningTasksCountByService[t.ServiceID] |
| 487 | 503 |
|
| ... | ... |
@@ -755,6 +755,9 @@ func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResp |
| 755 | 755 |
return nil, err |
| 756 | 756 |
} |
| 757 | 757 |
|
| 758 |
+ ctx, cancel := n.WithContext(ctx) |
|
| 759 |
+ defer cancel() |
|
| 760 |
+ |
|
| 758 | 761 |
fields := logrus.Fields{
|
| 759 | 762 |
"node.id": nodeInfo.NodeID, |
| 760 | 763 |
"method": "(*Node).Leave", |
| ... | ... |
@@ -765,20 +768,7 @@ func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResp |
| 765 | 765 |
} |
| 766 | 766 |
log.G(ctx).WithFields(fields).Debug("")
|
| 767 | 767 |
|
| 768 |
- // can't stop the raft node while an async RPC is in progress |
|
| 769 |
- n.stopMu.RLock() |
|
| 770 |
- defer n.stopMu.RUnlock() |
|
| 771 |
- |
|
| 772 |
- if !n.IsMember() {
|
|
| 773 |
- return nil, ErrNoRaftMember |
|
| 774 |
- } |
|
| 775 |
- |
|
| 776 |
- if !n.isLeader() {
|
|
| 777 |
- return nil, ErrLostLeadership |
|
| 778 |
- } |
|
| 779 |
- |
|
| 780 |
- err = n.RemoveMember(ctx, req.Node.RaftID) |
|
| 781 |
- if err != nil {
|
|
| 768 |
+ if err := n.removeMember(ctx, req.Node.RaftID); err != nil {
|
|
| 782 | 769 |
return nil, err |
| 783 | 770 |
} |
| 784 | 771 |
|
| ... | ... |
@@ -791,12 +781,21 @@ func (n *Node) CanRemoveMember(id uint64) bool {
|
| 791 | 791 |
return n.cluster.CanRemoveMember(n.Config.ID, id) |
| 792 | 792 |
} |
| 793 | 793 |
|
| 794 |
-// RemoveMember submits a configuration change to remove a member from the raft cluster |
|
| 795 |
-// after checking if the operation would not result in a loss of quorum. |
|
| 796 |
-func (n *Node) RemoveMember(ctx context.Context, id uint64) error {
|
|
| 794 |
+func (n *Node) removeMember(ctx context.Context, id uint64) error {
|
|
| 795 |
+ // can't stop the raft node while an async RPC is in progress |
|
| 796 |
+ n.stopMu.RLock() |
|
| 797 |
+ defer n.stopMu.RUnlock() |
|
| 798 |
+ |
|
| 799 |
+ if !n.IsMember() {
|
|
| 800 |
+ return ErrNoRaftMember |
|
| 801 |
+ } |
|
| 802 |
+ |
|
| 803 |
+ if !n.isLeader() {
|
|
| 804 |
+ return ErrLostLeadership |
|
| 805 |
+ } |
|
| 806 |
+ |
|
| 797 | 807 |
n.membershipLock.Lock() |
| 798 | 808 |
defer n.membershipLock.Unlock() |
| 799 |
- |
|
| 800 | 809 |
if n.cluster.CanRemoveMember(n.Config.ID, id) {
|
| 801 | 810 |
cc := raftpb.ConfChange{
|
| 802 | 811 |
ID: id, |
| ... | ... |
@@ -804,15 +803,21 @@ func (n *Node) RemoveMember(ctx context.Context, id uint64) error {
|
| 804 | 804 |
NodeID: id, |
| 805 | 805 |
Context: []byte(""),
|
| 806 | 806 |
} |
| 807 |
- ctx, cancel := n.WithContext(ctx) |
|
| 808 | 807 |
err := n.configure(ctx, cc) |
| 809 |
- cancel() |
|
| 810 | 808 |
return err |
| 811 | 809 |
} |
| 812 | 810 |
|
| 813 | 811 |
return ErrCannotRemoveMember |
| 814 | 812 |
} |
| 815 | 813 |
|
| 814 |
+// RemoveMember submits a configuration change to remove a member from the raft cluster |
|
| 815 |
+// after checking if the operation would not result in a loss of quorum. |
|
| 816 |
+func (n *Node) RemoveMember(ctx context.Context, id uint64) error {
|
|
| 817 |
+ ctx, cancel := n.WithContext(ctx) |
|
| 818 |
+ defer cancel() |
|
| 819 |
+ return n.removeMember(ctx, id) |
|
| 820 |
+} |
|
| 821 |
+ |
|
| 816 | 822 |
// ProcessRaftMessage calls 'Step' which advances the |
| 817 | 823 |
// raft state machine with the provided message on the |
| 818 | 824 |
// receiving node |
| ... | ... |
@@ -240,7 +240,7 @@ func (s *MemoryStore) update(proposer state.Proposer, cb func(Tx) error) error {
|
| 240 | 240 |
sa, err = tx.changelistStoreActions() |
| 241 | 241 |
|
| 242 | 242 |
if err == nil {
|
| 243 |
- if sa != nil {
|
|
| 243 |
+ if len(sa) != 0 {
|
|
| 244 | 244 |
err = proposer.ProposeValue(context.Background(), sa, func() {
|
| 245 | 245 |
memDBTx.Commit() |
| 246 | 246 |
}) |
| ... | ... |
@@ -350,7 +350,7 @@ func (batch *Batch) commit() error {
|
| 350 | 350 |
sa, batch.err = batch.tx.changelistStoreActions() |
| 351 | 351 |
|
| 352 | 352 |
if batch.err == nil {
|
| 353 |
- if sa != nil {
|
|
| 353 |
+ if len(sa) != 0 {
|
|
| 354 | 354 |
batch.err = batch.store.proposer.ProposeValue(context.Background(), sa, func() {
|
| 355 | 355 |
batch.tx.memDBTx.Commit() |
| 356 | 356 |
}) |
| ... | ... |
@@ -1,11 +1,11 @@ |
| 1 | 1 |
package store |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
- "fmt" |
|
| 5 | 4 |
"strconv" |
| 6 | 5 |
"strings" |
| 7 | 6 |
|
| 8 | 7 |
"github.com/docker/swarmkit/api" |
| 8 |
+ "github.com/docker/swarmkit/api/naming" |
|
| 9 | 9 |
"github.com/docker/swarmkit/manager/state" |
| 10 | 10 |
memdb "github.com/hashicorp/go-memdb" |
| 11 | 11 |
) |
| ... | ... |
@@ -112,26 +112,6 @@ func init() {
|
| 112 | 112 |
}) |
| 113 | 113 |
} |
| 114 | 114 |
|
| 115 |
-// TaskName returns the task name from Annotations.Name, |
|
| 116 |
-// and, in case Annotations.Name is missing, fallback |
|
| 117 |
-// to construct the name from othere information. |
|
| 118 |
-func TaskName(t *api.Task) string {
|
|
| 119 |
- name := t.Annotations.Name |
|
| 120 |
- if name == "" {
|
|
| 121 |
- // If Task name is not assigned then calculated name is used like before. |
|
| 122 |
- // This might be removed in the future. |
|
| 123 |
- // We use the following scheme for Task name: |
|
| 124 |
- // Name := <ServiceAnnotations.Name>.<Slot>.<TaskID> (replicated mode) |
|
| 125 |
- // := <ServiceAnnotations.Name>.<NodeID>.<TaskID> (global mode) |
|
| 126 |
- if t.Slot != 0 {
|
|
| 127 |
- name = fmt.Sprintf("%v.%v.%v", t.ServiceAnnotations.Name, t.Slot, t.ID)
|
|
| 128 |
- } else {
|
|
| 129 |
- name = fmt.Sprintf("%v.%v.%v", t.ServiceAnnotations.Name, t.NodeID, t.ID)
|
|
| 130 |
- } |
|
| 131 |
- } |
|
| 132 |
- return name |
|
| 133 |
-} |
|
| 134 |
- |
|
| 135 | 115 |
type taskEntry struct {
|
| 136 | 116 |
*api.Task |
| 137 | 117 |
} |
| ... | ... |
@@ -245,7 +225,7 @@ func (ti taskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) {
|
| 245 | 245 |
panic("unexpected type passed to FromObject")
|
| 246 | 246 |
} |
| 247 | 247 |
|
| 248 |
- name := TaskName(t.Task) |
|
| 248 |
+ name := naming.Task(t.Task) |
|
| 249 | 249 |
|
| 250 | 250 |
// Add the null character as a terminator |
| 251 | 251 |
return true, []byte(strings.ToLower(name) + "\x00"), nil |
| ... | ... |
@@ -563,12 +563,17 @@ func Watch(queue *watch.Queue, specifiers ...Event) (eventq chan events.Event, c |
| 563 | 563 |
if len(specifiers) == 0 {
|
| 564 | 564 |
return queue.Watch() |
| 565 | 565 |
} |
| 566 |
- return queue.CallbackWatch(events.MatcherFunc(func(event events.Event) bool {
|
|
| 566 |
+ return queue.CallbackWatch(Matcher(specifiers...)) |
|
| 567 |
+} |
|
| 568 |
+ |
|
| 569 |
+// Matcher returns an events.Matcher that matches the specifiers with OR logic. |
|
| 570 |
+func Matcher(specifiers ...Event) events.MatcherFunc {
|
|
| 571 |
+ return events.MatcherFunc(func(event events.Event) bool {
|
|
| 567 | 572 |
for _, s := range specifiers {
|
| 568 | 573 |
if s.matches(event) {
|
| 569 | 574 |
return true |
| 570 | 575 |
} |
| 571 | 576 |
} |
| 572 | 577 |
return false |
| 573 |
- })) |
|
| 578 |
+ }) |
|
| 574 | 579 |
} |
| ... | ... |
@@ -104,7 +104,7 @@ type Node struct {
|
| 104 | 104 |
err error |
| 105 | 105 |
agent *agent.Agent |
| 106 | 106 |
manager *manager.Manager |
| 107 |
- roleChangeReq chan api.NodeRole // used to send role updates from the dispatcher api on promotion/demotion |
|
| 107 |
+ notifyNodeChange chan *api.Node // used to send role updates from the dispatcher api on promotion/demotion |
|
| 108 | 108 |
} |
| 109 | 109 |
|
| 110 | 110 |
// RemoteAPIAddr returns address on which remote manager api listens. |
| ... | ... |
@@ -148,7 +148,7 @@ func New(c *Config) (*Node, error) {
|
| 148 | 148 |
closed: make(chan struct{}),
|
| 149 | 149 |
ready: make(chan struct{}),
|
| 150 | 150 |
certificateRequested: make(chan struct{}),
|
| 151 |
- roleChangeReq: make(chan api.NodeRole, 1), |
|
| 151 |
+ notifyNodeChange: make(chan *api.Node, 1), |
|
| 152 | 152 |
} |
| 153 | 153 |
n.roleCond = sync.NewCond(n.RLocker()) |
| 154 | 154 |
n.connCond = sync.NewCond(n.RLocker()) |
| ... | ... |
@@ -248,16 +248,29 @@ func (n *Node) run(ctx context.Context) (err error) {
|
| 248 | 248 |
} |
| 249 | 249 |
|
| 250 | 250 |
forceCertRenewal := make(chan struct{})
|
| 251 |
+ renewCert := func() {
|
|
| 252 |
+ select {
|
|
| 253 |
+ case forceCertRenewal <- struct{}{}:
|
|
| 254 |
+ case <-ctx.Done(): |
|
| 255 |
+ } |
|
| 256 |
+ } |
|
| 257 |
+ |
|
| 251 | 258 |
go func() {
|
| 252 | 259 |
for {
|
| 253 | 260 |
select {
|
| 254 | 261 |
case <-ctx.Done(): |
| 255 | 262 |
return |
| 256 |
- case apirole := <-n.roleChangeReq: |
|
| 263 |
+ case node := <-n.notifyNodeChange: |
|
| 264 |
+ // If the server is sending us a ForceRenewal State, renew |
|
| 265 |
+ if node.Certificate.Status.State == api.IssuanceStateRotate {
|
|
| 266 |
+ renewCert() |
|
| 267 |
+ continue |
|
| 268 |
+ } |
|
| 257 | 269 |
n.Lock() |
| 270 |
+ // If we got a role change, renew |
|
| 258 | 271 |
lastRole := n.role |
| 259 | 272 |
role := ca.WorkerRole |
| 260 |
- if apirole == api.NodeRoleManager {
|
|
| 273 |
+ if node.Spec.Role == api.NodeRoleManager {
|
|
| 261 | 274 |
role = ca.ManagerRole |
| 262 | 275 |
} |
| 263 | 276 |
if lastRole == role {
|
| ... | ... |
@@ -270,11 +283,7 @@ func (n *Node) run(ctx context.Context) (err error) {
|
| 270 | 270 |
n.roleCond.Broadcast() |
| 271 | 271 |
} |
| 272 | 272 |
n.Unlock() |
| 273 |
- select {
|
|
| 274 |
- case forceCertRenewal <- struct{}{}:
|
|
| 275 |
- case <-ctx.Done(): |
|
| 276 |
- return |
|
| 277 |
- } |
|
| 273 |
+ renewCert() |
|
| 278 | 274 |
} |
| 279 | 275 |
} |
| 280 | 276 |
}() |
| ... | ... |
@@ -380,7 +389,7 @@ func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.Tran |
| 380 | 380 |
Managers: n.remotes, |
| 381 | 381 |
Executor: n.config.Executor, |
| 382 | 382 |
DB: db, |
| 383 |
- NotifyRoleChange: n.roleChangeReq, |
|
| 383 |
+ NotifyNodeChange: n.notifyNodeChange, |
|
| 384 | 384 |
Credentials: creds, |
| 385 | 385 |
}) |
| 386 | 386 |
if err != nil {
|
| ... | ... |
@@ -1,3 +1,3 @@ |
| 1 | 1 |
package plugin |
| 2 | 2 |
|
| 3 |
-//go:generate protoc -I.:/usr/local --gogoswarm_out=import_path=github.com/docker/swarmkit/protobuf/plugin,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. plugin.proto |
|
| 3 |
+//go:generate protoc -I.:../../vendor/github.com/gogo/protobuf/protobuf --gogoswarm_out=import_path=github.com/docker/swarmkit/protobuf/plugin,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. plugin.proto |