vendor: update swarmkit for 1.12.2
Vincent Demeester authored on 2016/10/01 17:39:54... | ... |
@@ -139,7 +139,7 @@ clone git github.com/docker/docker-credential-helpers v0.3.0 |
139 | 139 |
clone git github.com/docker/containerd v0.2.4 |
140 | 140 |
|
141 | 141 |
# cluster |
142 |
-clone git github.com/docker/swarmkit de507ff6b0ee99002d56a784e095c753eab1ad61 |
|
142 |
+clone git github.com/docker/swarmkit a2abe794f7a1cfe0ed376fbc7c107ab3d6cf7705 |
|
143 | 143 |
clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 |
144 | 144 |
clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028 |
145 | 145 |
clone git github.com/cloudflare/cfssl b895b0549c0ff676f92cf09ba971ae02bb41367b |
... | ... |
@@ -204,7 +204,6 @@ func (a *Agent) run(ctx context.Context) { |
204 | 204 |
delay := time.Duration(rand.Int63n(int64(backoff))) |
205 | 205 |
session = newSession(ctx, a, delay) |
206 | 206 |
registered = session.registered |
207 |
- sessionq = a.sessionq |
|
208 | 207 |
case <-a.stopped: |
209 | 208 |
// TODO(stevvooe): Wait on shutdown and cleanup. May need to pump |
210 | 209 |
// this loop a few times. |
... | ... |
@@ -320,7 +319,7 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api |
320 | 320 |
err = nil // dispatcher no longer cares about this task. |
321 | 321 |
} else { |
322 | 322 |
log.G(ctx).WithError(err).Error("closing session after fatal error") |
323 |
- session.close() |
|
323 |
+ session.sendError(err) |
|
324 | 324 |
} |
325 | 325 |
} else { |
326 | 326 |
log.G(ctx).Debug("task status reported") |
... | ... |
@@ -301,6 +301,16 @@ func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTa |
301 | 301 |
return updates[n:], nil |
302 | 302 |
} |
303 | 303 |
|
304 |
+// sendError is used to send errors to errs channel and trigger session recreation |
|
305 |
+func (s *session) sendError(err error) { |
|
306 |
+ select { |
|
307 |
+ case s.errs <- err: |
|
308 |
+ case <-s.closed: |
|
309 |
+ } |
|
310 |
+} |
|
311 |
+ |
|
312 |
+// close closing session. It should be called only in <-session.errs branch |
|
313 |
+// of event loop. |
|
304 | 314 |
func (s *session) close() error { |
305 | 315 |
s.closeOnce.Do(func() { |
306 | 316 |
if s.conn != nil { |
... | ... |
@@ -683,26 +683,27 @@ func GetRemoteSignedCertificate(ctx context.Context, csr []byte, token string, r |
683 | 683 |
} |
684 | 684 |
} |
685 | 685 |
|
686 |
-// readCertExpiration returns the number of months left for certificate expiration |
|
687 |
-func readCertExpiration(paths CertPaths) (time.Duration, error) { |
|
686 |
+// readCertValidity returns the certificate issue and expiration time |
|
687 |
+func readCertValidity(paths CertPaths) (time.Time, time.Time, error) { |
|
688 |
+ var zeroTime time.Time |
|
688 | 689 |
// Read the Cert |
689 | 690 |
cert, err := ioutil.ReadFile(paths.Cert) |
690 | 691 |
if err != nil { |
691 | 692 |
log.Debugf("failed to read certificate file: %s", paths.Cert) |
692 |
- return time.Hour, err |
|
693 |
+ return zeroTime, zeroTime, err |
|
693 | 694 |
} |
694 | 695 |
|
695 | 696 |
// Create an x509 certificate out of the contents on disk |
696 | 697 |
certBlock, _ := pem.Decode([]byte(cert)) |
697 | 698 |
if certBlock == nil { |
698 |
- return time.Hour, fmt.Errorf("failed to decode certificate block") |
|
699 |
+ return zeroTime, zeroTime, errors.New("failed to decode certificate block") |
|
699 | 700 |
} |
700 | 701 |
X509Cert, err := x509.ParseCertificate(certBlock.Bytes) |
701 | 702 |
if err != nil { |
702 |
- return time.Hour, err |
|
703 |
+ return zeroTime, zeroTime, err |
|
703 | 704 |
} |
704 | 705 |
|
705 |
- return X509Cert.NotAfter.Sub(time.Now()), nil |
|
706 |
+ return X509Cert.NotBefore, X509Cert.NotAfter, nil |
|
706 | 707 |
|
707 | 708 |
} |
708 | 709 |
|
... | ... |
@@ -313,8 +313,8 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string, |
313 | 313 |
|
314 | 314 |
// Since the expiration of the certificate is managed remotely we should update our |
315 | 315 |
// retry timer on every iteration of this loop. |
316 |
- // Retrieve the time until the certificate expires. |
|
317 |
- expiresIn, err := readCertExpiration(paths.Node) |
|
316 |
+ // Retrieve the current certificate expiration information. |
|
317 |
+ validFrom, validUntil, err := readCertValidity(paths.Node) |
|
318 | 318 |
if err != nil { |
319 | 319 |
// We failed to read the expiration, let's stick with the starting default |
320 | 320 |
log.Errorf("failed to read the expiration of the TLS certificate in: %s", paths.Node.Cert) |
... | ... |
@@ -322,12 +322,12 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string, |
322 | 322 |
} else { |
323 | 323 |
// If we have an expired certificate, we let's stick with the starting default in |
324 | 324 |
// the hope that this is a temporary clock skew. |
325 |
- if expiresIn.Minutes() < 0 { |
|
326 |
- log.Debugf("failed to create a new client TLS config: %v", err) |
|
327 |
- updates <- CertificateUpdate{Err: fmt.Errorf("TLS Certificate is expired")} |
|
325 |
+ if validUntil.Before(time.Now()) { |
|
326 |
+ log.WithError(err).Errorf("failed to create a new client TLS config") |
|
327 |
+ updates <- CertificateUpdate{Err: errors.New("TLS certificate is expired")} |
|
328 | 328 |
} else { |
329 | 329 |
// Random retry time between 50% and 80% of the total time to expiration |
330 |
- retry = calculateRandomExpiry(expiresIn) |
|
330 |
+ retry = calculateRandomExpiry(validFrom, validUntil) |
|
331 | 331 |
} |
332 | 332 |
} |
333 | 333 |
|
... | ... |
@@ -391,18 +391,16 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string, |
391 | 391 |
return updates |
392 | 392 |
} |
393 | 393 |
|
394 |
-// calculateRandomExpiry returns a random duration between 50% and 80% of the original |
|
395 |
-// duration |
|
396 |
-func calculateRandomExpiry(expiresIn time.Duration) time.Duration { |
|
397 |
- if expiresIn.Minutes() < 1 { |
|
398 |
- return time.Second |
|
399 |
- } |
|
394 |
+// calculateRandomExpiry returns a random duration between 50% and 80% of the |
|
395 |
+// original validity period |
|
396 |
+func calculateRandomExpiry(validFrom, validUntil time.Time) time.Duration { |
|
397 |
+ duration := validUntil.Sub(validFrom) |
|
400 | 398 |
|
401 | 399 |
var randomExpiry int |
402 | 400 |
// Our lower bound of renewal will be half of the total expiration time |
403 |
- minValidity := int(expiresIn.Minutes() * CertLowerRotationRange) |
|
401 |
+ minValidity := int(duration.Minutes() * CertLowerRotationRange) |
|
404 | 402 |
// Our upper bound of renewal will be 80% of the total expiration time |
405 |
- maxValidity := int(expiresIn.Minutes() * CertUpperRotationRange) |
|
403 |
+ maxValidity := int(duration.Minutes() * CertUpperRotationRange) |
|
406 | 404 |
// Let's select a random number of minutes between min and max, and set our retry for that |
407 | 405 |
// Using randomly selected rotation allows us to avoid certificate thundering herds. |
408 | 406 |
if maxValidity-minValidity < 1 { |
... | ... |
@@ -411,7 +409,11 @@ func calculateRandomExpiry(expiresIn time.Duration) time.Duration { |
411 | 411 |
randomExpiry = rand.Intn(maxValidity-minValidity) + int(minValidity) |
412 | 412 |
} |
413 | 413 |
|
414 |
- return time.Duration(randomExpiry) * time.Minute |
|
414 |
+ expiry := validFrom.Add(time.Duration(randomExpiry) * time.Minute).Sub(time.Now()) |
|
415 |
+ if expiry < 0 { |
|
416 |
+ return 0 |
|
417 |
+ } |
|
418 |
+ return expiry |
|
415 | 419 |
} |
416 | 420 |
|
417 | 421 |
// LoadTLSCreds loads tls credentials from the specified path and verifies that |
... | ... |
@@ -95,7 +95,7 @@ type Node struct { |
95 | 95 |
wait *wait |
96 | 96 |
wal *wal.WAL |
97 | 97 |
snapshotter *snap.Snapshotter |
98 |
- restored bool |
|
98 |
+ campaignWhenAble bool |
|
99 | 99 |
signalledLeadership uint32 |
100 | 100 |
isMember uint32 |
101 | 101 |
joinAddr string |
... | ... |
@@ -281,6 +281,7 @@ func (n *Node) JoinAndStart() error { |
281 | 281 |
if n.joinAddr != "" { |
282 | 282 |
n.Config.Logger.Warning("ignoring request to join cluster, because raft state already exists") |
283 | 283 |
} |
284 |
+ n.campaignWhenAble = true |
|
284 | 285 |
n.Node = raft.RestartNode(n.Config) |
285 | 286 |
atomic.StoreUint32(&n.isMember, 1) |
286 | 287 |
return nil |
... | ... |
@@ -424,15 +425,16 @@ func (n *Node) Run(ctx context.Context) error { |
424 | 424 |
// If we are the only registered member after |
425 | 425 |
// restoring from the state, campaign to be the |
426 | 426 |
// leader. |
427 |
- if !n.restored { |
|
428 |
- // Node ID should be in the progress list to Campaign |
|
429 |
- _, ok := n.Node.Status().Progress[n.Config.ID] |
|
430 |
- if len(n.cluster.Members()) <= 1 && ok { |
|
427 |
+ if n.campaignWhenAble { |
|
428 |
+ members := n.cluster.Members() |
|
429 |
+ if len(members) >= 1 { |
|
430 |
+ n.campaignWhenAble = false |
|
431 |
+ } |
|
432 |
+ if len(members) == 1 && members[n.Config.ID] != nil { |
|
431 | 433 |
if err := n.Campaign(n.Ctx); err != nil { |
432 | 434 |
panic("raft: cannot campaign to be the leader on node restore") |
433 | 435 |
} |
434 | 436 |
} |
435 |
- n.restored = true |
|
436 | 437 |
} |
437 | 438 |
|
438 | 439 |
// Advance the state machine |
... | ... |
@@ -887,6 +889,7 @@ func (n *Node) registerNode(node *api.RaftMember) error { |
887 | 887 |
} |
888 | 888 |
return err |
889 | 889 |
} |
890 |
+ |
|
890 | 891 |
return nil |
891 | 892 |
} |
892 | 893 |
|