Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
| ... | ... |
@@ -105,7 +105,7 @@ github.com/docker/containerd 8ef7df579710405c4bb6e0812495671002ce08e0 |
| 105 | 105 |
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 |
| 106 | 106 |
|
| 107 | 107 |
# cluster |
| 108 |
-github.com/docker/swarmkit f420c4b9e1535170fc229db97ee8ac32374020b1 |
|
| 108 |
+github.com/docker/swarmkit ae29cf24355ef2106b63884d2f9b0a6406e5a144 |
|
| 109 | 109 |
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2 |
| 110 | 110 |
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a |
| 111 | 111 |
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e |
| ... | ... |
@@ -2074,7 +2074,7 @@ func init() { proto.RegisterFile("ca.proto", fileDescriptorCa) }
|
| 2074 | 2074 |
|
| 2075 | 2075 |
var fileDescriptorCa = []byte{
|
| 2076 | 2076 |
// 610 bytes of a gzipped FileDescriptorProto |
| 2077 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x40, |
|
| 2077 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x40, |
|
| 2078 | 2078 |
0x10, 0xee, 0xba, 0x25, 0x6d, 0x27, 0xa1, 0x45, 0xdb, 0x56, 0x32, 0x69, 0xea, 0x54, 0xe6, 0xd0, |
| 2079 | 2079 |
0x72, 0xc0, 0x6d, 0x03, 0x27, 0xb8, 0x90, 0x04, 0xa9, 0x8a, 0x50, 0x11, 0xda, 0x08, 0xae, 0x95, |
| 2080 | 2080 |
0xe3, 0x2c, 0xc1, 0x8a, 0xe3, 0x35, 0xde, 0x75, 0x20, 0x37, 0x24, 0x10, 0x6f, 0x80, 0xe0, 0xc4, |
| ... | ... |
@@ -15993,7 +15993,7 @@ func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
|
| 15993 | 15993 |
|
| 15994 | 15994 |
var fileDescriptorControl = []byte{
|
| 15995 | 15995 |
// 2096 bytes of a gzipped FileDescriptorProto |
| 15996 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x5a, 0x4b, 0x6f, 0x1b, 0xc9, |
|
| 15996 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x4b, 0x6f, 0x1b, 0xc9, |
|
| 15997 | 15997 |
0x11, 0x36, 0x1f, 0x12, 0xa9, 0xa2, 0x44, 0x49, 0x2d, 0x39, 0x21, 0x68, 0x47, 0x32, 0xc6, 0xb1, |
| 15998 | 15998 |
0x4d, 0x07, 0x0e, 0x95, 0xa5, 0xb3, 0x88, 0xb3, 0x41, 0x1e, 0x2b, 0xd1, 0xeb, 0x70, 0xb5, 0x2b, |
| 15999 | 15999 |
0x1b, 0x23, 0x6b, 0x91, 0x1b, 0x41, 0x91, 0x2d, 0x65, 0x4c, 0x8a, 0xc3, 0xcc, 0x0c, 0xb5, 0x2b, |
| ... | ... |
@@ -3781,7 +3781,7 @@ func init() { proto.RegisterFile("dispatcher.proto", fileDescriptorDispatcher) }
|
| 3781 | 3781 |
|
| 3782 | 3782 |
var fileDescriptorDispatcher = []byte{
|
| 3783 | 3783 |
// 983 bytes of a gzipped FileDescriptorProto |
| 3784 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x6f, 0x1b, 0x45, |
|
| 3784 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x6f, 0x1b, 0x45, |
|
| 3785 | 3785 |
0x14, 0xf7, 0x38, 0xce, 0x26, 0x7e, 0x4e, 0x82, 0x19, 0xaa, 0xb0, 0xac, 0x54, 0xc7, 0x6c, 0x68, |
| 3786 | 3786 |
0x14, 0xa9, 0x61, 0x53, 0xcc, 0x9f, 0x0b, 0x51, 0x20, 0x8e, 0x2d, 0xc5, 0x6a, 0x93, 0x46, 0x13, |
| 3787 | 3787 |
0xb7, 0x3d, 0x5a, 0x6b, 0xef, 0x74, 0xb3, 0x38, 0xde, 0x59, 0x76, 0xc6, 0x2d, 0x3e, 0x20, 0x71, |
| ... | ... |
@@ -700,7 +700,7 @@ func init() { proto.RegisterFile("health.proto", fileDescriptorHealth) }
|
| 700 | 700 |
|
| 701 | 701 |
var fileDescriptorHealth = []byte{
|
| 702 | 702 |
// 287 bytes of a gzipped FileDescriptorProto |
| 703 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, |
|
| 703 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, |
|
| 704 | 704 |
0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, |
| 705 | 705 |
0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, |
| 706 | 706 |
0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x52, 0xc2, 0x05, 0x39, 0xa5, 0xe9, 0x99, |
| ... | ... |
@@ -3366,7 +3366,7 @@ func init() { proto.RegisterFile("logbroker.proto", fileDescriptorLogbroker) }
|
| 3366 | 3366 |
|
| 3367 | 3367 |
var fileDescriptorLogbroker = []byte{
|
| 3368 | 3368 |
// 940 bytes of a gzipped FileDescriptorProto |
| 3369 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x95, 0xcf, 0x6f, 0x1b, 0x45, |
|
| 3369 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcf, 0x6f, 0x1b, 0x45, |
|
| 3370 | 3370 |
0x14, 0xc7, 0x33, 0xeb, 0xc4, 0x3f, 0x9e, 0x9b, 0xc4, 0x1d, 0xa7, 0x91, 0x65, 0xa8, 0x6d, 0x6d, |
| 3371 | 3371 |
0xa5, 0x62, 0x45, 0xc5, 0x6e, 0x8d, 0x50, 0x91, 0x2a, 0x21, 0x6a, 0x5c, 0x21, 0x0b, 0x37, 0x41, |
| 3372 | 3372 |
0x63, 0x47, 0x70, 0x8b, 0xd6, 0xde, 0xe9, 0xb2, 0xf2, 0x7a, 0xc7, 0xec, 0x8c, 0x13, 0x90, 0x38, |
| ... | ... |
@@ -7470,7 +7470,7 @@ func init() { proto.RegisterFile("objects.proto", fileDescriptorObjects) }
|
| 7470 | 7470 |
|
| 7471 | 7471 |
var fileDescriptorObjects = []byte{
|
| 7472 | 7472 |
// 1405 bytes of a gzipped FileDescriptorProto |
| 7473 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x57, 0xc1, 0x6f, 0x1b, 0x45, |
|
| 7473 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xc1, 0x6f, 0x1b, 0x45, |
|
| 7474 | 7474 |
0x17, 0xef, 0xda, 0x1b, 0xdb, 0xfb, 0x9c, 0x58, 0xf9, 0xa6, 0xf9, 0xf2, 0x6d, 0xf3, 0x05, 0x3b, |
| 7475 | 7475 |
0xb8, 0x02, 0x55, 0xa8, 0x72, 0x4a, 0x29, 0x28, 0x0d, 0x14, 0x6a, 0x27, 0x11, 0xb5, 0x4a, 0x69, |
| 7476 | 7476 |
0x34, 0x2d, 0x2d, 0x37, 0x33, 0xd9, 0x9d, 0xba, 0x8b, 0xd7, 0x3b, 0xab, 0x9d, 0xb1, 0x8b, 0x6f, |
| ... | ... |
@@ -3587,7 +3587,7 @@ func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
|
| 3587 | 3587 |
|
| 3588 | 3588 |
var fileDescriptorRaft = []byte{
|
| 3589 | 3589 |
// 949 bytes of a gzipped FileDescriptorProto |
| 3590 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x96, 0x4f, 0x6f, 0x1b, 0x45, |
|
| 3590 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x96, 0x4f, 0x6f, 0x1b, 0x45, |
|
| 3591 | 3591 |
0x18, 0xc6, 0x77, 0xed, 0xad, 0x9d, 0xbc, 0x6e, 0xe2, 0x68, 0x42, 0xc2, 0x76, 0x29, 0x8e, 0xbb, |
| 3592 | 3592 |
0x45, 0xaa, 0x5b, 0x91, 0xb5, 0x30, 0x48, 0xa0, 0x42, 0x0f, 0xb1, 0x63, 0xc9, 0xa6, 0xad, 0x53, |
| 3593 | 3593 |
0x6d, 0x12, 0xe8, 0x2d, 0xac, 0x77, 0x27, 0xee, 0x62, 0x7b, 0xc7, 0xcc, 0x8c, 0x1d, 0xb8, 0xa0, |
| ... | ... |
@@ -1065,7 +1065,7 @@ func init() { proto.RegisterFile("resource.proto", fileDescriptorResource) }
|
| 1065 | 1065 |
|
| 1066 | 1066 |
var fileDescriptorResource = []byte{
|
| 1067 | 1067 |
// 368 bytes of a gzipped FileDescriptorProto |
| 1068 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x4a, 0x2d, 0xce, |
|
| 1068 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x4a, 0x2d, 0xce, |
|
| 1069 | 1069 |
0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, |
| 1070 | 1070 |
0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0xe2, |
| 1071 | 1071 |
0x2e, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0x28, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, |
| ... | ... |
@@ -1321,7 +1321,7 @@ func init() { proto.RegisterFile("snapshot.proto", fileDescriptorSnapshot) }
|
| 1321 | 1321 |
|
| 1322 | 1322 |
var fileDescriptorSnapshot = []byte{
|
| 1323 | 1323 |
// 466 bytes of a gzipped FileDescriptorProto |
| 1324 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x93, 0x3f, 0x6f, 0x13, 0x4d, |
|
| 1324 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x3f, 0x6f, 0x13, 0x4d, |
|
| 1325 | 1325 |
0x10, 0x87, 0xbd, 0xfe, 0x77, 0xce, 0x44, 0xc9, 0xfb, 0xb2, 0xa2, 0x58, 0x19, 0x38, 0x8c, 0xa1, |
| 1326 | 1326 |
0x70, 0x75, 0x80, 0x41, 0x02, 0x21, 0x85, 0x22, 0x11, 0x05, 0x05, 0x29, 0xd6, 0x28, 0xa2, 0x3d, |
| 1327 | 1327 |
0x9f, 0xc7, 0xce, 0x71, 0xf8, 0xd6, 0xda, 0xd9, 0x38, 0x94, 0xf0, 0xed, 0x5c, 0x52, 0x52, 0x21, |
| ... | ... |
@@ -5884,7 +5884,7 @@ func init() { proto.RegisterFile("specs.proto", fileDescriptorSpecs) }
|
| 5884 | 5884 |
|
| 5885 | 5885 |
var fileDescriptorSpecs = []byte{
|
| 5886 | 5886 |
// 1824 bytes of a gzipped FileDescriptorProto |
| 5887 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x73, 0x1b, 0x49, |
|
| 5887 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x73, 0x1b, 0x49, |
|
| 5888 | 5888 |
0x15, 0xb7, 0x6c, 0x59, 0x7f, 0xde, 0xc8, 0x89, 0xd2, 0x24, 0x61, 0xac, 0xb0, 0xb2, 0xa2, 0x0d, |
| 5889 | 5889 |
0xc1, 0xcb, 0x16, 0x72, 0x61, 0xa8, 0x25, 0x4b, 0x58, 0x40, 0xb2, 0x84, 0x63, 0x8c, 0x1d, 0x55, |
| 5890 | 5890 |
0xdb, 0x1b, 0xc8, 0x49, 0xd5, 0x9e, 0x69, 0x4b, 0x53, 0x1e, 0x75, 0x0f, 0xdd, 0x3d, 0xda, 0xd2, |
| ... | ... |
@@ -16084,7 +16084,7 @@ func init() { proto.RegisterFile("types.proto", fileDescriptorTypes) }
|
| 16084 | 16084 |
|
| 16085 | 16085 |
var fileDescriptorTypes = []byte{
|
| 16086 | 16086 |
// 4658 bytes of a gzipped FileDescriptorProto |
| 16087 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x5a, 0x4d, 0x6c, 0x23, 0x47, |
|
| 16087 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4d, 0x6c, 0x23, 0x47, |
|
| 16088 | 16088 |
0x76, 0x16, 0x7f, 0x45, 0x3e, 0x52, 0x52, 0x4f, 0x8d, 0x3c, 0xd6, 0xd0, 0x63, 0x49, 0x6e, 0x7b, |
| 16089 | 16089 |
0xd6, 0x3f, 0xeb, 0xd0, 0xf3, 0x63, 0x1b, 0x63, 0x3b, 0x6b, 0x9b, 0x7f, 0x1a, 0x71, 0x47, 0x22, |
| 16090 | 16090 |
0x89, 0x22, 0x35, 0xb3, 0x3e, 0x24, 0x8d, 0x56, 0x77, 0x89, 0x6a, 0xab, 0xd9, 0xc5, 0x74, 0x17, |
| ... | ... |
@@ -4523,7 +4523,7 @@ func init() { proto.RegisterFile("watch.proto", fileDescriptorWatch) }
|
| 4523 | 4523 |
|
| 4524 | 4524 |
var fileDescriptorWatch = []byte{
|
| 4525 | 4525 |
// 1155 bytes of a gzipped FileDescriptorProto |
| 4526 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x96, 0xbb, 0x73, 0x1b, 0xd5, |
|
| 4526 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbb, 0x73, 0x1b, 0xd5, |
|
| 4527 | 4527 |
0x17, 0xc7, 0xb5, 0x8a, 0xbc, 0x92, 0x8e, 0xac, 0xc4, 0x73, 0xed, 0x24, 0xfb, 0xd3, 0x2f, 0x48, |
| 4528 | 4528 |
0x42, 0x0c, 0xe0, 0x21, 0x41, 0x01, 0x13, 0xc2, 0x00, 0x81, 0x19, 0x4b, 0x16, 0x23, 0x91, 0xb1, |
| 4529 | 4529 |
0xec, 0xb9, 0xb6, 0xe3, 0x52, 0xb3, 0xde, 0x3d, 0x56, 0x16, 0xed, 0x43, 0xdc, 0x5d, 0xc9, 0x71, |
| ... | ... |
@@ -3,7 +3,6 @@ package controlapi |
| 3 | 3 |
import ( |
| 4 | 4 |
"errors" |
| 5 | 5 |
"reflect" |
| 6 |
- "strconv" |
|
| 7 | 6 |
"strings" |
| 8 | 7 |
"time" |
| 9 | 8 |
|
| ... | ... |
@@ -433,10 +432,9 @@ func (s *Server) validateNetworks(networks []*api.NetworkAttachmentConfig) error |
| 433 | 433 |
if network == nil {
|
| 434 | 434 |
continue |
| 435 | 435 |
} |
| 436 |
- if network.Spec.Internal {
|
|
| 436 |
+ if allocator.IsIngressNetwork(network) {
|
|
| 437 | 437 |
return grpc.Errorf(codes.InvalidArgument, |
| 438 |
- "Service cannot be explicitly attached to %q network which is a swarm internal network", |
|
| 439 |
- network.Spec.Annotations.Name) |
|
| 438 |
+ "Service cannot be explicitly attached to the ingress network %q", network.Spec.Annotations.Name) |
|
| 440 | 439 |
} |
| 441 | 440 |
} |
| 442 | 441 |
return nil |
| ... | ... |
@@ -490,18 +488,32 @@ func (s *Server) checkPortConflicts(spec *api.ServiceSpec, serviceID string) err |
| 490 | 490 |
return nil |
| 491 | 491 |
} |
| 492 | 492 |
|
| 493 |
- pcToString := func(pc *api.PortConfig) string {
|
|
| 494 |
- port := strconv.FormatUint(uint64(pc.PublishedPort), 10) |
|
| 495 |
- return port + "/" + pc.Protocol.String() |
|
| 493 |
+ type portSpec struct {
|
|
| 494 |
+ protocol api.PortConfig_Protocol |
|
| 495 |
+ publishedPort uint32 |
|
| 496 | 496 |
} |
| 497 | 497 |
|
| 498 |
- reqPorts := make(map[string]bool) |
|
| 498 |
+ pcToStruct := func(pc *api.PortConfig) portSpec {
|
|
| 499 |
+ return portSpec{
|
|
| 500 |
+ protocol: pc.Protocol, |
|
| 501 |
+ publishedPort: pc.PublishedPort, |
|
| 502 |
+ } |
|
| 503 |
+ } |
|
| 504 |
+ |
|
| 505 |
+ ingressPorts := make(map[portSpec]struct{})
|
|
| 506 |
+ hostModePorts := make(map[portSpec]struct{})
|
|
| 499 | 507 |
for _, pc := range spec.Endpoint.Ports {
|
| 500 |
- if pc.PublishedPort > 0 {
|
|
| 501 |
- reqPorts[pcToString(pc)] = true |
|
| 508 |
+ if pc.PublishedPort == 0 {
|
|
| 509 |
+ continue |
|
| 510 |
+ } |
|
| 511 |
+ switch pc.PublishMode {
|
|
| 512 |
+ case api.PublishModeIngress: |
|
| 513 |
+ ingressPorts[pcToStruct(pc)] = struct{}{}
|
|
| 514 |
+ case api.PublishModeHost: |
|
| 515 |
+ hostModePorts[pcToStruct(pc)] = struct{}{}
|
|
| 502 | 516 |
} |
| 503 | 517 |
} |
| 504 |
- if len(reqPorts) == 0 {
|
|
| 518 |
+ if len(ingressPorts) == 0 && len(hostModePorts) == 0 {
|
|
| 505 | 519 |
return nil |
| 506 | 520 |
} |
| 507 | 521 |
|
| ... | ... |
@@ -517,6 +529,31 @@ func (s *Server) checkPortConflicts(spec *api.ServiceSpec, serviceID string) err |
| 517 | 517 |
return err |
| 518 | 518 |
} |
| 519 | 519 |
|
| 520 |
+ isPortInUse := func(pc *api.PortConfig, service *api.Service) error {
|
|
| 521 |
+ if pc.PublishedPort == 0 {
|
|
| 522 |
+ return nil |
|
| 523 |
+ } |
|
| 524 |
+ |
|
| 525 |
+ switch pc.PublishMode {
|
|
| 526 |
+ case api.PublishModeHost: |
|
| 527 |
+ if _, ok := ingressPorts[pcToStruct(pc)]; ok {
|
|
| 528 |
+ return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as a host-published port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID) |
|
| 529 |
+ } |
|
| 530 |
+ |
|
| 531 |
+ // Multiple services with same port in host publish mode can |
|
| 532 |
+ // coexist - this is handled by the scheduler. |
|
| 533 |
+ return nil |
|
| 534 |
+ case api.PublishModeIngress: |
|
| 535 |
+ _, ingressConflict := ingressPorts[pcToStruct(pc)] |
|
| 536 |
+ _, hostModeConflict := hostModePorts[pcToStruct(pc)] |
|
| 537 |
+ if ingressConflict || hostModeConflict {
|
|
| 538 |
+ return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as an ingress port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID) |
|
| 539 |
+ } |
|
| 540 |
+ } |
|
| 541 |
+ |
|
| 542 |
+ return nil |
|
| 543 |
+ } |
|
| 544 |
+ |
|
| 520 | 545 |
for _, service := range services {
|
| 521 | 546 |
// If service ID is the same (and not "") then this is an update |
| 522 | 547 |
if serviceID != "" && serviceID == service.ID {
|
| ... | ... |
@@ -524,15 +561,15 @@ func (s *Server) checkPortConflicts(spec *api.ServiceSpec, serviceID string) err |
| 524 | 524 |
} |
| 525 | 525 |
if service.Spec.Endpoint != nil {
|
| 526 | 526 |
for _, pc := range service.Spec.Endpoint.Ports {
|
| 527 |
- if reqPorts[pcToString(pc)] {
|
|
| 528 |
- return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s)", pc.PublishedPort, service.Spec.Annotations.Name, service.ID) |
|
| 527 |
+ if err := isPortInUse(pc, service); err != nil {
|
|
| 528 |
+ return err |
|
| 529 | 529 |
} |
| 530 | 530 |
} |
| 531 | 531 |
} |
| 532 | 532 |
if service.Endpoint != nil {
|
| 533 | 533 |
for _, pc := range service.Endpoint.Ports {
|
| 534 |
- if reqPorts[pcToString(pc)] {
|
|
| 535 |
- return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s)", pc.PublishedPort, service.Spec.Annotations.Name, service.ID) |
|
| 534 |
+ if err := isPortInUse(pc, service); err != nil {
|
|
| 535 |
+ return err |
|
| 536 | 536 |
} |
| 537 | 537 |
} |
| 538 | 538 |
} |
| ... | ... |
@@ -529,6 +529,8 @@ func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStat |
| 529 | 529 |
return nil, err |
| 530 | 530 |
} |
| 531 | 531 |
|
| 532 |
+ validTaskUpdates := make([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, 0, len(r.Updates)) |
|
| 533 |
+ |
|
| 532 | 534 |
// Validate task updates |
| 533 | 535 |
for _, u := range r.Updates {
|
| 534 | 536 |
if u.Status == nil {
|
| ... | ... |
@@ -541,7 +543,8 @@ func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStat |
| 541 | 541 |
t = store.GetTask(tx, u.TaskID) |
| 542 | 542 |
}) |
| 543 | 543 |
if t == nil {
|
| 544 |
- log.WithField("task.id", u.TaskID).Warn("cannot find target task in store")
|
|
| 544 |
+ // Task may have been deleted |
|
| 545 |
+ log.WithField("task.id", u.TaskID).Debug("cannot find target task in store")
|
|
| 545 | 546 |
continue |
| 546 | 547 |
} |
| 547 | 548 |
|
| ... | ... |
@@ -550,14 +553,13 @@ func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStat |
| 550 | 550 |
log.WithField("task.id", u.TaskID).Error(err)
|
| 551 | 551 |
return nil, err |
| 552 | 552 |
} |
| 553 |
+ |
|
| 554 |
+ validTaskUpdates = append(validTaskUpdates, u) |
|
| 553 | 555 |
} |
| 554 | 556 |
|
| 555 | 557 |
d.taskUpdatesLock.Lock() |
| 556 | 558 |
// Enqueue task updates |
| 557 |
- for _, u := range r.Updates {
|
|
| 558 |
- if u.Status == nil {
|
|
| 559 |
- continue |
|
| 560 |
- } |
|
| 559 |
+ for _, u := range validTaskUpdates {
|
|
| 561 | 560 |
d.taskUpdates[u.TaskID] = u.Status |
| 562 | 561 |
} |
| 563 | 562 |
|
| ... | ... |
@@ -606,7 +608,8 @@ func (d *Dispatcher) processUpdates(ctx context.Context) {
|
| 606 | 606 |
logger := log.WithField("task.id", taskID)
|
| 607 | 607 |
task := store.GetTask(tx, taskID) |
| 608 | 608 |
if task == nil {
|
| 609 |
- logger.Errorf("task unavailable")
|
|
| 609 |
+ // Task may have been deleted |
|
| 610 |
+ logger.Debug("cannot find target task in store")
|
|
| 610 | 611 |
return nil |
| 611 | 612 |
} |
| 612 | 613 |
|
| ... | ... |
@@ -28,6 +28,7 @@ import ( |
| 28 | 28 |
"github.com/docker/swarmkit/manager/health" |
| 29 | 29 |
"github.com/docker/swarmkit/manager/keymanager" |
| 30 | 30 |
"github.com/docker/swarmkit/manager/logbroker" |
| 31 |
+ "github.com/docker/swarmkit/manager/metrics" |
|
| 31 | 32 |
"github.com/docker/swarmkit/manager/orchestrator/constraintenforcer" |
| 32 | 33 |
"github.com/docker/swarmkit/manager/orchestrator/global" |
| 33 | 34 |
"github.com/docker/swarmkit/manager/orchestrator/replicated" |
| ... | ... |
@@ -123,6 +124,7 @@ type Config struct {
|
| 123 | 123 |
type Manager struct {
|
| 124 | 124 |
config Config |
| 125 | 125 |
|
| 126 |
+ collector *metrics.Collector |
|
| 126 | 127 |
caserver *ca.Server |
| 127 | 128 |
dispatcher *dispatcher.Dispatcher |
| 128 | 129 |
logbroker *logbroker.LogBroker |
| ... | ... |
@@ -214,6 +216,7 @@ func New(config *Config) (*Manager, error) {
|
| 214 | 214 |
|
| 215 | 215 |
m := &Manager{
|
| 216 | 216 |
config: *config, |
| 217 |
+ collector: metrics.NewCollector(raftNode.MemoryStore()), |
|
| 217 | 218 |
caserver: ca.NewServer(raftNode.MemoryStore(), config.SecurityConfig, config.RootCAPaths), |
| 218 | 219 |
dispatcher: dispatcher.New(raftNode, dispatcher.DefaultConfig()), |
| 219 | 220 |
logbroker: logbroker.New(raftNode.MemoryStore()), |
| ... | ... |
@@ -503,6 +506,13 @@ func (m *Manager) Run(parent context.Context) error {
|
| 503 | 503 |
|
| 504 | 504 |
localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_SERVING)
|
| 505 | 505 |
|
| 506 |
+ // Start metrics collection. |
|
| 507 |
+ go func(collector *metrics.Collector) {
|
|
| 508 |
+ if err := collector.Run(ctx); err != nil {
|
|
| 509 |
+ log.G(ctx).WithError(err).Error("collector failed with an error")
|
|
| 510 |
+ } |
|
| 511 |
+ }(m.collector) |
|
| 512 |
+ |
|
| 506 | 513 |
close(m.started) |
| 507 | 514 |
|
| 508 | 515 |
go func() {
|
| ... | ... |
@@ -579,6 +589,7 @@ func (m *Manager) Stop(ctx context.Context, clearData bool) {
|
| 579 | 579 |
|
| 580 | 580 |
m.raftNode.Cancel() |
| 581 | 581 |
|
| 582 |
+ m.collector.Stop() |
|
| 582 | 583 |
m.dispatcher.Stop() |
| 583 | 584 |
m.logbroker.Stop() |
| 584 | 585 |
m.caserver.Stop() |
| 585 | 586 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,104 @@ |
| 0 |
+package metrics |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "context" |
|
| 4 |
+ |
|
| 5 |
+ "strings" |
|
| 6 |
+ |
|
| 7 |
+ metrics "github.com/docker/go-metrics" |
|
| 8 |
+ "github.com/docker/swarmkit/api" |
|
| 9 |
+ "github.com/docker/swarmkit/manager/state/store" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+var ( |
|
| 13 |
+ ns = metrics.NewNamespace("swarm", "manager", nil)
|
|
| 14 |
+ nodesMetric metrics.LabeledGauge |
|
| 15 |
+) |
|
| 16 |
+ |
|
| 17 |
+func init() {
|
|
| 18 |
+ nodesMetric = ns.NewLabeledGauge("nodes", "The number of nodes", "", "state")
|
|
| 19 |
+ for _, state := range api.NodeStatus_State_name {
|
|
| 20 |
+ nodesMetric.WithValues(strings.ToLower(state)).Set(0) |
|
| 21 |
+ } |
|
| 22 |
+ metrics.Register(ns) |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+// Collector collects swarmkit metrics |
|
| 26 |
+type Collector struct {
|
|
| 27 |
+ store *store.MemoryStore |
|
| 28 |
+ |
|
| 29 |
+ // stopChan signals to the state machine to stop running. |
|
| 30 |
+ stopChan chan struct{}
|
|
| 31 |
+ // doneChan is closed when the state machine terminates. |
|
| 32 |
+ doneChan chan struct{}
|
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+// NewCollector creates a new metrics collector |
|
| 36 |
+func NewCollector(store *store.MemoryStore) *Collector {
|
|
| 37 |
+ return &Collector{
|
|
| 38 |
+ store: store, |
|
| 39 |
+ stopChan: make(chan struct{}),
|
|
| 40 |
+ doneChan: make(chan struct{}),
|
|
| 41 |
+ } |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+func (c *Collector) updateNodeState(prevNode, newNode *api.Node) {
|
|
| 45 |
+ // Skip updates if nothing changed. |
|
| 46 |
+ if prevNode != nil && newNode != nil && prevNode.Status.State == newNode.Status.State {
|
|
| 47 |
+ return |
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 50 |
+ if prevNode != nil {
|
|
| 51 |
+ nodesMetric.WithValues(strings.ToLower(prevNode.Status.State.String())).Dec(1) |
|
| 52 |
+ } |
|
| 53 |
+ if newNode != nil {
|
|
| 54 |
+ nodesMetric.WithValues(strings.ToLower(newNode.Status.State.String())).Inc(1) |
|
| 55 |
+ } |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+// Run contains the collector event loop |
|
| 59 |
+func (c *Collector) Run(ctx context.Context) error {
|
|
| 60 |
+ defer close(c.doneChan) |
|
| 61 |
+ |
|
| 62 |
+ watcher, cancel, err := store.ViewAndWatch(c.store, func(readTx store.ReadTx) error {
|
|
| 63 |
+ nodes, err := store.FindNodes(readTx, store.All) |
|
| 64 |
+ if err != nil {
|
|
| 65 |
+ return err |
|
| 66 |
+ } |
|
| 67 |
+ for _, node := range nodes {
|
|
| 68 |
+ c.updateNodeState(nil, node) |
|
| 69 |
+ } |
|
| 70 |
+ return nil |
|
| 71 |
+ }) |
|
| 72 |
+ if err != nil {
|
|
| 73 |
+ return err |
|
| 74 |
+ } |
|
| 75 |
+ defer cancel() |
|
| 76 |
+ |
|
| 77 |
+ for {
|
|
| 78 |
+ select {
|
|
| 79 |
+ case event := <-watcher: |
|
| 80 |
+ switch v := event.(type) {
|
|
| 81 |
+ case api.EventCreateNode: |
|
| 82 |
+ c.updateNodeState(nil, v.Node) |
|
| 83 |
+ case api.EventUpdateNode: |
|
| 84 |
+ c.updateNodeState(v.OldNode, v.Node) |
|
| 85 |
+ case api.EventDeleteNode: |
|
| 86 |
+ c.updateNodeState(v.Node, nil) |
|
| 87 |
+ } |
|
| 88 |
+ case <-c.stopChan: |
|
| 89 |
+ return nil |
|
| 90 |
+ } |
|
| 91 |
+ } |
|
| 92 |
+} |
|
| 93 |
+ |
|
| 94 |
+// Stop stops the collector. |
|
| 95 |
+func (c *Collector) Stop() {
|
|
| 96 |
+ close(c.stopChan) |
|
| 97 |
+ <-c.doneChan |
|
| 98 |
+ |
|
| 99 |
+ // Clean the metrics on exit. |
|
| 100 |
+ for _, state := range api.NodeStatus_State_name {
|
|
| 101 |
+ nodesMetric.WithValues(strings.ToLower(state)).Set(0) |
|
| 102 |
+ } |
|
| 103 |
+} |
| ... | ... |
@@ -260,3 +260,44 @@ func (f *PlatformFilter) Explain(nodes int) string {
|
| 260 | 260 |
} |
| 261 | 261 |
return fmt.Sprintf("unsupported platform on %d nodes", nodes)
|
| 262 | 262 |
} |
| 263 |
+ |
|
| 264 |
+// HostPortFilter checks that the node has a specific port available. |
|
| 265 |
+type HostPortFilter struct {
|
|
| 266 |
+ t *api.Task |
|
| 267 |
+} |
|
| 268 |
+ |
|
| 269 |
+// SetTask returns true when the filter is enabled for a given task. |
|
| 270 |
+func (f *HostPortFilter) SetTask(t *api.Task) bool {
|
|
| 271 |
+ if t.Endpoint != nil {
|
|
| 272 |
+ for _, port := range t.Endpoint.Ports {
|
|
| 273 |
+ if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 {
|
|
| 274 |
+ f.t = t |
|
| 275 |
+ return true |
|
| 276 |
+ } |
|
| 277 |
+ } |
|
| 278 |
+ } |
|
| 279 |
+ |
|
| 280 |
+ return false |
|
| 281 |
+} |
|
| 282 |
+ |
|
| 283 |
+// Check returns true if the task can be scheduled into the given node. |
|
| 284 |
+func (f *HostPortFilter) Check(n *NodeInfo) bool {
|
|
| 285 |
+ for _, port := range f.t.Endpoint.Ports {
|
|
| 286 |
+ if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 {
|
|
| 287 |
+ portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort}
|
|
| 288 |
+ if _, ok := n.usedHostPorts[portSpec]; ok {
|
|
| 289 |
+ return false |
|
| 290 |
+ } |
|
| 291 |
+ } |
|
| 292 |
+ } |
|
| 293 |
+ |
|
| 294 |
+ return true |
|
| 295 |
+} |
|
| 296 |
+ |
|
| 297 |
+// Explain returns an explanation of a failure. |
|
| 298 |
+func (f *HostPortFilter) Explain(nodes int) string {
|
|
| 299 |
+ if nodes == 1 {
|
|
| 300 |
+ return "host-mode port already in use on 1 node" |
|
| 301 |
+ } |
|
| 302 |
+ return fmt.Sprintf("host-mode port already in use on %d nodes", nodes)
|
|
| 303 |
+} |
| ... | ... |
@@ -8,6 +8,12 @@ import ( |
| 8 | 8 |
"golang.org/x/net/context" |
| 9 | 9 |
) |
| 10 | 10 |
|
| 11 |
+// hostPortSpec specifies a used host port. |
|
| 12 |
+type hostPortSpec struct {
|
|
| 13 |
+ protocol api.PortConfig_Protocol |
|
| 14 |
+ publishedPort uint32 |
|
| 15 |
+} |
|
| 16 |
+ |
|
| 11 | 17 |
// NodeInfo contains a node and some additional metadata. |
| 12 | 18 |
type NodeInfo struct {
|
| 13 | 19 |
*api.Node |
| ... | ... |
@@ -15,6 +21,7 @@ type NodeInfo struct {
|
| 15 | 15 |
ActiveTasksCount int |
| 16 | 16 |
ActiveTasksCountByService map[string]int |
| 17 | 17 |
AvailableResources api.Resources |
| 18 |
+ usedHostPorts map[hostPortSpec]struct{}
|
|
| 18 | 19 |
|
| 19 | 20 |
// recentFailures is a map from service ID to the timestamps of the |
| 20 | 21 |
// most recent failures the node has experienced from replicas of that |
| ... | ... |
@@ -30,6 +37,7 @@ func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api |
| 30 | 30 |
Tasks: make(map[string]*api.Task), |
| 31 | 31 |
ActiveTasksCountByService: make(map[string]int), |
| 32 | 32 |
AvailableResources: availableResources, |
| 33 |
+ usedHostPorts: make(map[hostPortSpec]struct{}),
|
|
| 33 | 34 |
recentFailures: make(map[string][]time.Time), |
| 34 | 35 |
} |
| 35 | 36 |
|
| ... | ... |
@@ -57,6 +65,15 @@ func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
|
| 57 | 57 |
nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes |
| 58 | 58 |
nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs |
| 59 | 59 |
|
| 60 |
+ if t.Endpoint != nil {
|
|
| 61 |
+ for _, port := range t.Endpoint.Ports {
|
|
| 62 |
+ if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 {
|
|
| 63 |
+ portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort}
|
|
| 64 |
+ delete(nodeInfo.usedHostPorts, portSpec) |
|
| 65 |
+ } |
|
| 66 |
+ } |
|
| 67 |
+ } |
|
| 68 |
+ |
|
| 60 | 69 |
return true |
| 61 | 70 |
} |
| 62 | 71 |
|
| ... | ... |
@@ -84,6 +101,15 @@ func (nodeInfo *NodeInfo) addTask(t *api.Task) bool {
|
| 84 | 84 |
nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes |
| 85 | 85 |
nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs |
| 86 | 86 |
|
| 87 |
+ if t.Endpoint != nil {
|
|
| 88 |
+ for _, port := range t.Endpoint.Ports {
|
|
| 89 |
+ if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 {
|
|
| 90 |
+ portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort}
|
|
| 91 |
+ nodeInfo.usedHostPorts[portSpec] = struct{}{}
|
|
| 92 |
+ } |
|
| 93 |
+ } |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 87 | 96 |
if t.DesiredState <= api.TaskStateRunning {
|
| 88 | 97 |
nodeInfo.ActiveTasksCount++ |
| 89 | 98 |
nodeInfo.ActiveTasksCountByService[t.ServiceID]++ |
| ... | ... |
@@ -1370,10 +1370,6 @@ func (n *Node) getLeaderConn() (*grpc.ClientConn, error) {
|
| 1370 | 1370 |
// LeaderConn returns current connection to cluster leader or raftselector.ErrIsLeader |
| 1371 | 1371 |
// if current machine is leader. |
| 1372 | 1372 |
func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
|
| 1373 |
- if atomic.LoadUint32(&n.ticksWithNoLeader) > lostQuorumTimeout {
|
|
| 1374 |
- return nil, errLostQuorum |
|
| 1375 |
- } |
|
| 1376 |
- |
|
| 1377 | 1373 |
cc, err := n.getLeaderConn() |
| 1378 | 1374 |
if err == nil {
|
| 1379 | 1375 |
return cc, nil |
| ... | ... |
@@ -1381,6 +1377,10 @@ func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
|
| 1381 | 1381 |
if err == raftselector.ErrIsLeader {
|
| 1382 | 1382 |
return nil, err |
| 1383 | 1383 |
} |
| 1384 |
+ if atomic.LoadUint32(&n.ticksWithNoLeader) > lostQuorumTimeout {
|
|
| 1385 |
+ return nil, errLostQuorum |
|
| 1386 |
+ } |
|
| 1387 |
+ |
|
| 1384 | 1388 |
ticker := time.NewTicker(1 * time.Second) |
| 1385 | 1389 |
defer ticker.Stop() |
| 1386 | 1390 |
for {
|
| ... | ... |
@@ -16,6 +16,7 @@ import ( |
| 16 | 16 |
"github.com/Sirupsen/logrus" |
| 17 | 17 |
"github.com/boltdb/bolt" |
| 18 | 18 |
"github.com/docker/docker/pkg/plugingetter" |
| 19 |
+ metrics "github.com/docker/go-metrics" |
|
| 19 | 20 |
"github.com/docker/swarmkit/agent" |
| 20 | 21 |
"github.com/docker/swarmkit/agent/exec" |
| 21 | 22 |
"github.com/docker/swarmkit/api" |
| ... | ... |
@@ -41,6 +42,9 @@ const ( |
| 41 | 41 |
) |
| 42 | 42 |
|
| 43 | 43 |
var ( |
| 44 |
+ nodeInfo metrics.LabeledGauge |
|
| 45 |
+ nodeManager metrics.Gauge |
|
| 46 |
+ |
|
| 44 | 47 |
errNodeStarted = errors.New("node: already started")
|
| 45 | 48 |
errNodeNotStarted = errors.New("node: not started")
|
| 46 | 49 |
certDirectory = "certificates" |
| ... | ... |
@@ -49,6 +53,16 @@ var ( |
| 49 | 49 |
ErrInvalidUnlockKey = errors.New("node is locked, and needs a valid unlock key")
|
| 50 | 50 |
) |
| 51 | 51 |
|
| 52 |
+func init() {
|
|
| 53 |
+ ns := metrics.NewNamespace("swarm", "node", nil)
|
|
| 54 |
+ nodeInfo = ns.NewLabeledGauge("info", "Information related to the swarm", "",
|
|
| 55 |
+ "swarm_id", |
|
| 56 |
+ "node_id", |
|
| 57 |
+ ) |
|
| 58 |
+ nodeManager = ns.NewGauge("manager", "Whether this node is a manager or not", "")
|
|
| 59 |
+ metrics.Register(ns) |
|
| 60 |
+} |
|
| 61 |
+ |
|
| 52 | 62 |
// Config provides values for a Node. |
| 53 | 63 |
type Config struct {
|
| 54 | 64 |
// Hostname is the name of host for agent instance. |
| ... | ... |
@@ -346,6 +360,17 @@ func (n *Node) run(ctx context.Context) (err error) {
|
| 346 | 346 |
var wg sync.WaitGroup |
| 347 | 347 |
wg.Add(3) |
| 348 | 348 |
|
| 349 |
+ nodeInfo.WithValues( |
|
| 350 |
+ securityConfig.ClientTLSCreds.Organization(), |
|
| 351 |
+ securityConfig.ClientTLSCreds.NodeID(), |
|
| 352 |
+ ).Set(1) |
|
| 353 |
+ |
|
| 354 |
+ if n.currentRole() == api.NodeRoleManager {
|
|
| 355 |
+ nodeManager.Set(1) |
|
| 356 |
+ } else {
|
|
| 357 |
+ nodeManager.Set(0) |
|
| 358 |
+ } |
|
| 359 |
+ |
|
| 349 | 360 |
updates := renewer.Start(ctx) |
| 350 | 361 |
go func() {
|
| 351 | 362 |
for certUpdate := range updates {
|
| ... | ... |
@@ -357,6 +382,13 @@ func (n *Node) run(ctx context.Context) (err error) {
|
| 357 | 357 |
n.role = certUpdate.Role |
| 358 | 358 |
n.roleCond.Broadcast() |
| 359 | 359 |
n.Unlock() |
| 360 |
+ |
|
| 361 |
+ // Export the new role. |
|
| 362 |
+ if n.currentRole() == api.NodeRoleManager {
|
|
| 363 |
+ nodeManager.Set(1) |
|
| 364 |
+ } else {
|
|
| 365 |
+ nodeManager.Set(0) |
|
| 366 |
+ } |
|
| 360 | 367 |
} |
| 361 | 368 |
|
| 362 | 369 |
wg.Done() |
| ... | ... |
@@ -1144,7 +1144,7 @@ func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
|
| 1144 | 1144 |
|
| 1145 | 1145 |
var fileDescriptorPlugin = []byte{
|
| 1146 | 1146 |
// 551 bytes of a gzipped FileDescriptorProto |
| 1147 |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x52, 0xc1, 0x6e, 0xd3, 0x40, |
|
| 1147 |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0xc1, 0x6e, 0xd3, 0x40, |
|
| 1148 | 1148 |
0x10, 0xad, 0xd3, 0x36, 0x4d, 0xc6, 0x69, 0x29, 0x2b, 0x54, 0xad, 0x7a, 0xb0, 0xab, 0x46, 0x42, |
| 1149 | 1149 |
0x41, 0x42, 0xa9, 0xd4, 0x63, 0x6e, 0x94, 0x5c, 0x22, 0x01, 0x45, 0x0e, 0x12, 0x37, 0x2c, 0xd7, |
| 1150 | 1150 |
0x3b, 0x4d, 0x96, 0x3a, 0x5e, 0x6b, 0x77, 0x4d, 0x0b, 0x27, 0x7e, 0x80, 0x0f, 0xe0, 0xca, 0xd7, |
| ... | ... |
@@ -6,6 +6,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.0 |
| 6 | 6 |
|
| 7 | 7 |
# metrics |
| 8 | 8 |
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 |
| 9 |
+github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 |
|
| 9 | 10 |
|
| 10 | 11 |
# etcd/raft |
| 11 | 12 |
github.com/coreos/etcd ea5389a79f40206170582c1ea076191b8622cb8e https://github.com/aaronlehmann/etcd # for https://github.com/coreos/etcd/pull/7830 |