Implement the proposal from
https://github.com/docker/docker/issues/24430#issuecomment-233100121
Removes acceptance policy and secret in favor of an automatically
generated join token that combines the secret, CA hash, and
manager/worker role into a single opaque string.
Adds a docker swarm join-token subcommand to inspect and rotate the
tokens.
Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
| 1 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,32 +0,0 @@ |
| 1 |
-package node |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- |
|
| 6 |
- "github.com/docker/docker/api/client" |
|
| 7 |
- "github.com/docker/docker/cli" |
|
| 8 |
- "github.com/docker/engine-api/types/swarm" |
|
| 9 |
- "github.com/spf13/cobra" |
|
| 10 |
-) |
|
| 11 |
- |
|
| 12 |
-func newAcceptCommand(dockerCli *client.DockerCli) *cobra.Command {
|
|
| 13 |
- return &cobra.Command{
|
|
| 14 |
- Use: "accept NODE [NODE...]", |
|
| 15 |
- Short: "Accept a node in the swarm", |
|
| 16 |
- Args: cli.RequiresMinArgs(1), |
|
| 17 |
- RunE: func(cmd *cobra.Command, args []string) error {
|
|
| 18 |
- return runAccept(dockerCli, args) |
|
| 19 |
- }, |
|
| 20 |
- } |
|
| 21 |
-} |
|
| 22 |
- |
|
| 23 |
-func runAccept(dockerCli *client.DockerCli, nodes []string) error {
|
|
| 24 |
- accept := func(node *swarm.Node) error {
|
|
| 25 |
- node.Spec.Membership = swarm.NodeMembershipAccepted |
|
| 26 |
- return nil |
|
| 27 |
- } |
|
| 28 |
- success := func(nodeID string) {
|
|
| 29 |
- fmt.Fprintf(dockerCli.Out(), "Node %s accepted in the swarm.\n", nodeID) |
|
| 30 |
- } |
|
| 31 |
- return updateNodes(dockerCli, nodes, accept, success) |
|
| 32 |
-} |
| ... | ... |
@@ -16,7 +16,7 @@ import ( |
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 | 18 |
const ( |
| 19 |
- listItemFmt = "%s\t%s\t%s\t%s\t%s\t%s\n" |
|
| 19 |
+ listItemFmt = "%s\t%s\t%s\t%s\t%s\n" |
|
| 20 | 20 |
) |
| 21 | 21 |
|
| 22 | 22 |
type listOptions struct {
|
| ... | ... |
@@ -74,11 +74,10 @@ func printTable(out io.Writer, nodes []swarm.Node, info types.Info) {
|
| 74 | 74 |
// Ignore flushing errors |
| 75 | 75 |
defer writer.Flush() |
| 76 | 76 |
|
| 77 |
- fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "MEMBERSHIP", "STATUS", "AVAILABILITY", "MANAGER STATUS") |
|
| 77 |
+ fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "STATUS", "AVAILABILITY", "MANAGER STATUS") |
|
| 78 | 78 |
for _, node := range nodes {
|
| 79 | 79 |
name := node.Description.Hostname |
| 80 | 80 |
availability := string(node.Spec.Availability) |
| 81 |
- membership := string(node.Spec.Membership) |
|
| 82 | 81 |
|
| 83 | 82 |
reachability := "" |
| 84 | 83 |
if node.ManagerStatus != nil {
|
| ... | ... |
@@ -99,7 +98,6 @@ func printTable(out io.Writer, nodes []swarm.Node, info types.Info) {
|
| 99 | 99 |
listItemFmt, |
| 100 | 100 |
ID, |
| 101 | 101 |
name, |
| 102 |
- client.PrettyPrint(membership), |
|
| 103 | 102 |
client.PrettyPrint(string(node.Status.State)), |
| 104 | 103 |
client.PrettyPrint(availability), |
| 105 | 104 |
client.PrettyPrint(reachability)) |
| ... | ... |
@@ -12,7 +12,6 @@ import ( |
| 12 | 12 |
type nodeOptions struct {
|
| 13 | 13 |
annotations |
| 14 | 14 |
role string |
| 15 |
- membership string |
|
| 16 | 15 |
availability string |
| 17 | 16 |
} |
| 18 | 17 |
|
| ... | ... |
@@ -45,14 +44,6 @@ func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) {
|
| 45 | 45 |
return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role)
|
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 |
- switch swarm.NodeMembership(strings.ToLower(opts.membership)) {
|
|
| 49 |
- case swarm.NodeMembershipAccepted: |
|
| 50 |
- spec.Membership = swarm.NodeMembershipAccepted |
|
| 51 |
- case "": |
|
| 52 |
- default: |
|
| 53 |
- return swarm.NodeSpec{}, fmt.Errorf("invalid membership %q, only accepted is supported", opts.membership)
|
|
| 54 |
- } |
|
| 55 |
- |
|
| 56 | 48 |
switch swarm.NodeAvailability(strings.ToLower(opts.availability)) {
|
| 57 | 49 |
case swarm.NodeAvailabilityActive: |
| 58 | 50 |
spec.Availability = swarm.NodeAvailabilityActive |
| ... | ... |
@@ -27,7 +27,6 @@ func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| 27 | 27 |
|
| 28 | 28 |
flags := cmd.Flags() |
| 29 | 29 |
flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)") |
| 30 |
- flags.StringVar(&nodeOpts.membership, flagMembership, "", "Membership of the node (accepted/rejected)") |
|
| 31 | 30 |
flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)") |
| 32 | 31 |
flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") |
| 33 | 32 |
labelKeys := opts.NewListOpts(nil) |
| ... | ... |
@@ -76,13 +75,6 @@ func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error {
|
| 76 | 76 |
} |
| 77 | 77 |
spec.Role = swarm.NodeRole(str) |
| 78 | 78 |
} |
| 79 |
- if flags.Changed(flagMembership) {
|
|
| 80 |
- str, err := flags.GetString(flagMembership) |
|
| 81 |
- if err != nil {
|
|
| 82 |
- return err |
|
| 83 |
- } |
|
| 84 |
- spec.Membership = swarm.NodeMembership(str) |
|
| 85 |
- } |
|
| 86 | 79 |
if flags.Changed(flagAvailability) {
|
| 87 | 80 |
str, err := flags.GetString(flagAvailability) |
| 88 | 81 |
if err != nil {
|
| ... | ... |
@@ -115,7 +107,6 @@ func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error {
|
| 115 | 115 |
|
| 116 | 116 |
const ( |
| 117 | 117 |
flagRole = "role" |
| 118 |
- flagMembership = "membership" |
|
| 119 | 118 |
flagAvailability = "availability" |
| 120 | 119 |
flagLabelAdd = "label-add" |
| 121 | 120 |
flagLabelRemove = "label-rm" |
| ... | ... |
@@ -506,7 +506,7 @@ func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) {
|
| 506 | 506 |
|
| 507 | 507 |
flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)") |
| 508 | 508 |
|
| 509 |
- flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to Swarm agents") |
|
| 509 |
+ flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") |
|
| 510 | 510 |
|
| 511 | 511 |
flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") |
| 512 | 512 |
flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") |
| ... | ... |
@@ -28,14 +28,11 @@ type initOptions struct {
|
| 28 | 28 |
func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| 29 | 29 |
opts := initOptions{
|
| 30 | 30 |
listenAddr: NewListenAddrOption(), |
| 31 |
- swarmOptions: swarmOptions{
|
|
| 32 |
- autoAccept: NewAutoAcceptOption(), |
|
| 33 |
- }, |
|
| 34 | 31 |
} |
| 35 | 32 |
|
| 36 | 33 |
cmd := &cobra.Command{
|
| 37 | 34 |
Use: "init [OPTIONS]", |
| 38 |
- Short: "Initialize a Swarm", |
|
| 35 |
+ Short: "Initialize a swarm", |
|
| 39 | 36 |
Args: cli.NoArgs, |
| 40 | 37 |
RunE: func(cmd *cobra.Command, args []string) error {
|
| 41 | 38 |
return runInit(dockerCli, cmd.Flags(), opts) |
| ... | ... |
@@ -53,12 +50,6 @@ func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions |
| 53 | 53 |
client := dockerCli.Client() |
| 54 | 54 |
ctx := context.Background() |
| 55 | 55 |
|
| 56 |
- // If no secret was specified, we create a random one |
|
| 57 |
- if !flags.Changed("secret") {
|
|
| 58 |
- opts.secret = generateRandomSecret() |
|
| 59 |
- fmt.Fprintf(dockerCli.Out(), "No --secret provided. Generated random secret:\n %s\n\n", opts.secret) |
|
| 60 |
- } |
|
| 61 |
- |
|
| 62 | 56 |
req := swarm.InitRequest{
|
| 63 | 57 |
ListenAddr: opts.listenAddr.String(), |
| 64 | 58 |
ForceNewCluster: opts.forceNewCluster, |
| ... | ... |
@@ -72,24 +63,5 @@ func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions |
| 72 | 72 |
|
| 73 | 73 |
fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) |
| 74 | 74 |
|
| 75 |
- // Fetch CAHash and Address from the API |
|
| 76 |
- info, err := client.Info(ctx) |
|
| 77 |
- if err != nil {
|
|
| 78 |
- return err |
|
| 79 |
- } |
|
| 80 |
- |
|
| 81 |
- node, _, err := client.NodeInspectWithRaw(ctx, nodeID) |
|
| 82 |
- if err != nil {
|
|
| 83 |
- return err |
|
| 84 |
- } |
|
| 85 |
- |
|
| 86 |
- if node.ManagerStatus != nil && info.Swarm.CACertHash != "" {
|
|
| 87 |
- var secretArgs string |
|
| 88 |
- if opts.secret != "" {
|
|
| 89 |
- secretArgs = "--secret " + opts.secret |
|
| 90 |
- } |
|
| 91 |
- fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n docker swarm join %s \\\n --ca-hash %s \\\n %s\n", secretArgs, info.Swarm.CACertHash, node.ManagerStatus.Addr) |
|
| 92 |
- } |
|
| 93 |
- |
|
| 94 |
- return nil |
|
| 75 |
+ return printJoinCommand(ctx, dockerCli, nodeID, true, true) |
|
| 95 | 76 |
} |
| ... | ... |
@@ -18,7 +18,7 @@ func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| 18 | 18 |
|
| 19 | 19 |
cmd := &cobra.Command{
|
| 20 | 20 |
Use: "inspect [OPTIONS]", |
| 21 |
- Short: "Inspect the Swarm", |
|
| 21 |
+ Short: "Inspect the swarm", |
|
| 22 | 22 |
Args: cli.NoArgs, |
| 23 | 23 |
RunE: func(cmd *cobra.Command, args []string) error {
|
| 24 | 24 |
return runInspect(dockerCli, opts) |
| ... | ... |
@@ -2,6 +2,7 @@ package swarm |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 |
+ "strings" |
|
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/api/client" |
| 7 | 8 |
"github.com/docker/docker/cli" |
| ... | ... |
@@ -13,9 +14,7 @@ import ( |
| 13 | 13 |
type joinOptions struct {
|
| 14 | 14 |
remote string |
| 15 | 15 |
listenAddr NodeAddrOption |
| 16 |
- manager bool |
|
| 17 |
- secret string |
|
| 18 |
- CACertHash string |
|
| 16 |
+ token string |
|
| 19 | 17 |
} |
| 20 | 18 |
|
| 21 | 19 |
func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| ... | ... |
@@ -25,7 +24,7 @@ func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| 25 | 25 |
|
| 26 | 26 |
cmd := &cobra.Command{
|
| 27 | 27 |
Use: "join [OPTIONS] HOST:PORT", |
| 28 |
- Short: "Join a Swarm as a node and/or manager", |
|
| 28 |
+ Short: "Join a swarm as a node and/or manager", |
|
| 29 | 29 |
Args: cli.ExactArgs(1), |
| 30 | 30 |
RunE: func(cmd *cobra.Command, args []string) error {
|
| 31 | 31 |
opts.remote = args[0] |
| ... | ... |
@@ -35,9 +34,7 @@ func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| 35 | 35 |
|
| 36 | 36 |
flags := cmd.Flags() |
| 37 | 37 |
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address") |
| 38 |
- flags.BoolVar(&opts.manager, "manager", false, "Try joining as a manager.") |
|
| 39 |
- flags.StringVar(&opts.secret, flagSecret, "", "Secret for node acceptance") |
|
| 40 |
- flags.StringVar(&opts.CACertHash, "ca-hash", "", "Hash of the Root Certificate Authority certificate used for trusted join") |
|
| 38 |
+ flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") |
|
| 41 | 39 |
return cmd |
| 42 | 40 |
} |
| 43 | 41 |
|
| ... | ... |
@@ -46,20 +43,29 @@ func runJoin(dockerCli *client.DockerCli, opts joinOptions) error {
|
| 46 | 46 |
ctx := context.Background() |
| 47 | 47 |
|
| 48 | 48 |
req := swarm.JoinRequest{
|
| 49 |
- Manager: opts.manager, |
|
| 50 |
- Secret: opts.secret, |
|
| 49 |
+ JoinToken: opts.token, |
|
| 51 | 50 |
ListenAddr: opts.listenAddr.String(), |
| 52 | 51 |
RemoteAddrs: []string{opts.remote},
|
| 53 |
- CACertHash: opts.CACertHash, |
|
| 54 | 52 |
} |
| 55 | 53 |
err := client.SwarmJoin(ctx, req) |
| 56 | 54 |
if err != nil {
|
| 57 | 55 |
return err |
| 58 | 56 |
} |
| 59 |
- if opts.manager {
|
|
| 60 |
- fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a manager.") |
|
| 57 |
+ |
|
| 58 |
+ info, err := client.Info(ctx) |
|
| 59 |
+ if err != nil {
|
|
| 60 |
+ return err |
|
| 61 |
+ } |
|
| 62 |
+ |
|
| 63 |
+ _, _, err = client.NodeInspectWithRaw(ctx, info.Swarm.NodeID) |
|
| 64 |
+ if err != nil {
|
|
| 65 |
+ // TODO(aaronl): is there a better way to do this? |
|
| 66 |
+ if strings.Contains(err.Error(), "This node is not a swarm manager.") {
|
|
| 67 |
+ fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") |
|
| 68 |
+ } |
|
| 61 | 69 |
} else {
|
| 62 |
- fmt.Fprintln(dockerCli.Out(), "This node joined a Swarm as a worker.") |
|
| 70 |
+ fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") |
|
| 63 | 71 |
} |
| 72 |
+ |
|
| 64 | 73 |
return nil |
| 65 | 74 |
} |
| 66 | 75 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,110 @@ |
| 0 |
+package swarm |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "errors" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/spf13/cobra" |
|
| 7 |
+ |
|
| 8 |
+ "github.com/docker/docker/api/client" |
|
| 9 |
+ "github.com/docker/docker/cli" |
|
| 10 |
+ "github.com/docker/engine-api/types/swarm" |
|
| 11 |
+ "golang.org/x/net/context" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+const ( |
|
| 15 |
+ flagRotate = "rotate" |
|
| 16 |
+ flagQuiet = "quiet" |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+func newJoinTokenCommand(dockerCli *client.DockerCli) *cobra.Command {
|
|
| 20 |
+ var rotate, quiet bool |
|
| 21 |
+ |
|
| 22 |
+ cmd := &cobra.Command{
|
|
| 23 |
+ Use: "join-token [-q] [--rotate] (worker|manager)", |
|
| 24 |
+ Short: "Manage join tokens", |
|
| 25 |
+ Args: cli.ExactArgs(1), |
|
| 26 |
+ RunE: func(cmd *cobra.Command, args []string) error {
|
|
| 27 |
+ if args[0] != "worker" && args[0] != "manager" {
|
|
| 28 |
+ return errors.New("unknown role " + args[0])
|
|
| 29 |
+ } |
|
| 30 |
+ |
|
| 31 |
+ client := dockerCli.Client() |
|
| 32 |
+ ctx := context.Background() |
|
| 33 |
+ |
|
| 34 |
+ if rotate {
|
|
| 35 |
+ var flags swarm.UpdateFlags |
|
| 36 |
+ |
|
| 37 |
+ swarm, err := client.SwarmInspect(ctx) |
|
| 38 |
+ if err != nil {
|
|
| 39 |
+ return err |
|
| 40 |
+ } |
|
| 41 |
+ |
|
| 42 |
+ if args[0] == "worker" {
|
|
| 43 |
+ flags.RotateWorkerToken = true |
|
| 44 |
+ } else if args[0] == "manager" {
|
|
| 45 |
+ flags.RotateManagerToken = true |
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) |
|
| 49 |
+ if err != nil {
|
|
| 50 |
+ return err |
|
| 51 |
+ } |
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ swarm, err := client.SwarmInspect(ctx) |
|
| 55 |
+ if err != nil {
|
|
| 56 |
+ return err |
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ if quiet {
|
|
| 60 |
+ if args[0] == "worker" {
|
|
| 61 |
+ fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Worker) |
|
| 62 |
+ } else if args[0] == "manager" {
|
|
| 63 |
+ fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Manager) |
|
| 64 |
+ } |
|
| 65 |
+ } else {
|
|
| 66 |
+ info, err := client.Info(ctx) |
|
| 67 |
+ if err != nil {
|
|
| 68 |
+ return err |
|
| 69 |
+ } |
|
| 70 |
+ return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, args[0] == "worker", args[0] == "manager") |
|
| 71 |
+ } |
|
| 72 |
+ return nil |
|
| 73 |
+ }, |
|
| 74 |
+ } |
|
| 75 |
+ |
|
| 76 |
+ flags := cmd.Flags() |
|
| 77 |
+ flags.BoolVar(&rotate, flagRotate, false, "Rotate join token") |
|
| 78 |
+ flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") |
|
| 79 |
+ |
|
| 80 |
+ return cmd |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+func printJoinCommand(ctx context.Context, dockerCli *client.DockerCli, nodeID string, worker bool, manager bool) error {
|
|
| 84 |
+ client := dockerCli.Client() |
|
| 85 |
+ |
|
| 86 |
+ swarm, err := client.SwarmInspect(ctx) |
|
| 87 |
+ if err != nil {
|
|
| 88 |
+ return err |
|
| 89 |
+ } |
|
| 90 |
+ |
|
| 91 |
+ node, _, err := client.NodeInspectWithRaw(ctx, nodeID) |
|
| 92 |
+ if err != nil {
|
|
| 93 |
+ return err |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ if node.ManagerStatus != nil {
|
|
| 97 |
+ if worker {
|
|
| 98 |
+ fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n docker swarm join \\\n --token %s \\\n %s\n", swarm.JoinTokens.Worker, node.ManagerStatus.Addr) |
|
| 99 |
+ } |
|
| 100 |
+ if manager {
|
|
| 101 |
+ if worker {
|
|
| 102 |
+ fmt.Fprintln(dockerCli.Out()) |
|
| 103 |
+ } |
|
| 104 |
+ fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n docker swarm join \\\n --token %s \\\n %s\n", swarm.JoinTokens.Manager, node.ManagerStatus.Addr) |
|
| 105 |
+ } |
|
| 106 |
+ } |
|
| 107 |
+ |
|
| 108 |
+ return nil |
|
| 109 |
+} |
| ... | ... |
@@ -19,7 +19,7 @@ func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| 19 | 19 |
|
| 20 | 20 |
cmd := &cobra.Command{
|
| 21 | 21 |
Use: "leave [OPTIONS]", |
| 22 |
- Short: "Leave a Swarm", |
|
| 22 |
+ Short: "Leave a swarm", |
|
| 23 | 23 |
Args: cli.NoArgs, |
| 24 | 24 |
RunE: func(cmd *cobra.Command, args []string) error {
|
| 25 | 25 |
return runLeave(dockerCli, opts) |
| ... | ... |
@@ -15,29 +15,15 @@ import ( |
| 15 | 15 |
const ( |
| 16 | 16 |
defaultListenAddr = "0.0.0.0:2377" |
| 17 | 17 |
|
| 18 |
- worker = "WORKER" |
|
| 19 |
- manager = "MANAGER" |
|
| 20 |
- none = "NONE" |
|
| 21 |
- |
|
| 22 |
- flagAutoAccept = "auto-accept" |
|
| 23 | 18 |
flagCertExpiry = "cert-expiry" |
| 24 | 19 |
flagDispatcherHeartbeat = "dispatcher-heartbeat" |
| 25 | 20 |
flagListenAddr = "listen-addr" |
| 26 |
- flagSecret = "secret" |
|
| 21 |
+ flagToken = "token" |
|
| 27 | 22 |
flagTaskHistoryLimit = "task-history-limit" |
| 28 | 23 |
flagExternalCA = "external-ca" |
| 29 | 24 |
) |
| 30 | 25 |
|
| 31 |
-var ( |
|
| 32 |
- defaultPolicies = []swarm.Policy{
|
|
| 33 |
- {Role: worker, Autoaccept: true},
|
|
| 34 |
- {Role: manager, Autoaccept: false},
|
|
| 35 |
- } |
|
| 36 |
-) |
|
| 37 |
- |
|
| 38 | 26 |
type swarmOptions struct {
|
| 39 |
- autoAccept AutoAcceptOption |
|
| 40 |
- secret string |
|
| 41 | 27 |
taskHistoryLimit int64 |
| 42 | 28 |
dispatcherHeartbeat time.Duration |
| 43 | 29 |
nodeCertExpiry time.Duration |
| ... | ... |
@@ -84,71 +70,6 @@ func NewListenAddrOption() NodeAddrOption {
|
| 84 | 84 |
return NewNodeAddrOption(defaultListenAddr) |
| 85 | 85 |
} |
| 86 | 86 |
|
| 87 |
-// AutoAcceptOption is a value type for auto-accept policy |
|
| 88 |
-type AutoAcceptOption struct {
|
|
| 89 |
- values map[string]struct{}
|
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-// String prints a string representation of this option |
|
| 93 |
-func (o *AutoAcceptOption) String() string {
|
|
| 94 |
- keys := []string{}
|
|
| 95 |
- for key := range o.values {
|
|
| 96 |
- keys = append(keys, fmt.Sprintf("%s=true", strings.ToLower(key)))
|
|
| 97 |
- } |
|
| 98 |
- return strings.Join(keys, ", ") |
|
| 99 |
-} |
|
| 100 |
- |
|
| 101 |
-// Set sets a new value on this option |
|
| 102 |
-func (o *AutoAcceptOption) Set(acceptValues string) error {
|
|
| 103 |
- for _, value := range strings.Split(acceptValues, ",") {
|
|
| 104 |
- value = strings.ToUpper(value) |
|
| 105 |
- switch value {
|
|
| 106 |
- case none, worker, manager: |
|
| 107 |
- o.values[value] = struct{}{}
|
|
| 108 |
- default: |
|
| 109 |
- return fmt.Errorf("must be one / combination of %s, %s; or NONE", worker, manager)
|
|
| 110 |
- } |
|
| 111 |
- } |
|
| 112 |
- // NONE must stand alone, so if any non-NONE setting exist with it, error with conflict |
|
| 113 |
- if o.isPresent(none) && len(o.values) > 1 {
|
|
| 114 |
- return fmt.Errorf("value NONE cannot be specified alongside other node types")
|
|
| 115 |
- } |
|
| 116 |
- return nil |
|
| 117 |
-} |
|
| 118 |
- |
|
| 119 |
-// Type returns the type of this option |
|
| 120 |
-func (o *AutoAcceptOption) Type() string {
|
|
| 121 |
- return "auto-accept" |
|
| 122 |
-} |
|
| 123 |
- |
|
| 124 |
-// Policies returns a representation of this option for the api |
|
| 125 |
-func (o *AutoAcceptOption) Policies(secret *string) []swarm.Policy {
|
|
| 126 |
- policies := []swarm.Policy{}
|
|
| 127 |
- for _, p := range defaultPolicies {
|
|
| 128 |
- if len(o.values) != 0 {
|
|
| 129 |
- if _, ok := o.values[string(p.Role)]; ok {
|
|
| 130 |
- p.Autoaccept = true |
|
| 131 |
- } else {
|
|
| 132 |
- p.Autoaccept = false |
|
| 133 |
- } |
|
| 134 |
- } |
|
| 135 |
- p.Secret = secret |
|
| 136 |
- policies = append(policies, p) |
|
| 137 |
- } |
|
| 138 |
- return policies |
|
| 139 |
-} |
|
| 140 |
- |
|
| 141 |
-// isPresent returns whether the key exists in the set or not |
|
| 142 |
-func (o *AutoAcceptOption) isPresent(key string) bool {
|
|
| 143 |
- _, c := o.values[key] |
|
| 144 |
- return c |
|
| 145 |
-} |
|
| 146 |
- |
|
| 147 |
-// NewAutoAcceptOption returns a new auto-accept option |
|
| 148 |
-func NewAutoAcceptOption() AutoAcceptOption {
|
|
| 149 |
- return AutoAcceptOption{values: make(map[string]struct{})}
|
|
| 150 |
-} |
|
| 151 |
- |
|
| 152 | 87 |
// ExternalCAOption is a Value type for parsing external CA specifications. |
| 153 | 88 |
type ExternalCAOption struct {
|
| 154 | 89 |
values []*swarm.ExternalCA |
| ... | ... |
@@ -239,8 +160,6 @@ func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) {
|
| 239 | 239 |
} |
| 240 | 240 |
|
| 241 | 241 |
func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
| 242 |
- flags.Var(&opts.autoAccept, flagAutoAccept, "Auto acceptance policy (worker, manager or none)") |
|
| 243 |
- flags.StringVar(&opts.secret, flagSecret, "", "Set secret value needed to join a cluster") |
|
| 244 | 242 |
flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 10, "Task history retention limit") |
| 245 | 243 |
flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period") |
| 246 | 244 |
flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates") |
| ... | ... |
@@ -249,11 +168,6 @@ func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
|
| 249 | 249 |
|
| 250 | 250 |
func (opts *swarmOptions) ToSpec() swarm.Spec {
|
| 251 | 251 |
spec := swarm.Spec{}
|
| 252 |
- if opts.secret != "" {
|
|
| 253 |
- spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(&opts.secret) |
|
| 254 |
- } else {
|
|
| 255 |
- spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(nil) |
|
| 256 |
- } |
|
| 257 | 252 |
spec.Orchestration.TaskHistoryRetentionLimit = opts.taskHistoryLimit |
| 258 | 253 |
spec.Dispatcher.HeartbeatPeriod = uint64(opts.dispatcherHeartbeat.Nanoseconds()) |
| 259 | 254 |
spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry |
| ... | ... |
@@ -4,7 +4,6 @@ import ( |
| 4 | 4 |
"testing" |
| 5 | 5 |
|
| 6 | 6 |
"github.com/docker/docker/pkg/testutil/assert" |
| 7 |
- "github.com/docker/engine-api/types/swarm" |
|
| 8 | 7 |
) |
| 9 | 8 |
|
| 10 | 9 |
func TestNodeAddrOptionSetHostAndPort(t *testing.T) {
|
| ... | ... |
@@ -36,101 +35,3 @@ func TestNodeAddrOptionSetInvalidFormat(t *testing.T) {
|
| 36 | 36 |
opt := NewListenAddrOption() |
| 37 | 37 |
assert.Error(t, opt.Set("http://localhost:4545"), "Invalid")
|
| 38 | 38 |
} |
| 39 |
- |
|
| 40 |
-func TestAutoAcceptOptionSetWorker(t *testing.T) {
|
|
| 41 |
- opt := NewAutoAcceptOption() |
|
| 42 |
- assert.NilError(t, opt.Set("worker"))
|
|
| 43 |
- assert.Equal(t, opt.isPresent(worker), true) |
|
| 44 |
-} |
|
| 45 |
- |
|
| 46 |
-func TestAutoAcceptOptionSetManager(t *testing.T) {
|
|
| 47 |
- opt := NewAutoAcceptOption() |
|
| 48 |
- assert.NilError(t, opt.Set("manager"))
|
|
| 49 |
- assert.Equal(t, opt.isPresent(manager), true) |
|
| 50 |
-} |
|
| 51 |
- |
|
| 52 |
-func TestAutoAcceptOptionSetInvalid(t *testing.T) {
|
|
| 53 |
- opt := NewAutoAcceptOption() |
|
| 54 |
- assert.Error(t, opt.Set("bogus"), "must be one / combination")
|
|
| 55 |
-} |
|
| 56 |
- |
|
| 57 |
-func TestAutoAcceptOptionSetEmpty(t *testing.T) {
|
|
| 58 |
- opt := NewAutoAcceptOption() |
|
| 59 |
- assert.Error(t, opt.Set(""), "must be one / combination")
|
|
| 60 |
-} |
|
| 61 |
- |
|
| 62 |
-func TestAutoAcceptOptionSetNone(t *testing.T) {
|
|
| 63 |
- opt := NewAutoAcceptOption() |
|
| 64 |
- assert.NilError(t, opt.Set("none"))
|
|
| 65 |
- assert.Equal(t, opt.isPresent(manager), false) |
|
| 66 |
- assert.Equal(t, opt.isPresent(worker), false) |
|
| 67 |
-} |
|
| 68 |
- |
|
| 69 |
-func TestAutoAcceptOptionSetTwo(t *testing.T) {
|
|
| 70 |
- opt := NewAutoAcceptOption() |
|
| 71 |
- assert.NilError(t, opt.Set("worker,manager"))
|
|
| 72 |
- assert.Equal(t, opt.isPresent(manager), true) |
|
| 73 |
- assert.Equal(t, opt.isPresent(worker), true) |
|
| 74 |
-} |
|
| 75 |
- |
|
| 76 |
-func TestAutoAcceptOptionSetConflict(t *testing.T) {
|
|
| 77 |
- opt := NewAutoAcceptOption() |
|
| 78 |
- assert.Error(t, opt.Set("none,manager"), "value NONE cannot be specified alongside other node types")
|
|
| 79 |
- |
|
| 80 |
- opt = NewAutoAcceptOption() |
|
| 81 |
- assert.Error(t, opt.Set("none,worker"), "value NONE cannot be specified alongside other node types")
|
|
| 82 |
- |
|
| 83 |
- opt = NewAutoAcceptOption() |
|
| 84 |
- assert.Error(t, opt.Set("worker,none,manager"), "value NONE cannot be specified alongside other node types")
|
|
| 85 |
- |
|
| 86 |
- opt = NewAutoAcceptOption() |
|
| 87 |
- assert.Error(t, opt.Set("worker,manager,none"), "value NONE cannot be specified alongside other node types")
|
|
| 88 |
-} |
|
| 89 |
- |
|
| 90 |
-func TestAutoAcceptOptionPoliciesDefault(t *testing.T) {
|
|
| 91 |
- opt := NewAutoAcceptOption() |
|
| 92 |
- secret := "thesecret" |
|
| 93 |
- |
|
| 94 |
- policies := opt.Policies(&secret) |
|
| 95 |
- assert.Equal(t, len(policies), 2) |
|
| 96 |
- assert.Equal(t, policies[0], swarm.Policy{
|
|
| 97 |
- Role: worker, |
|
| 98 |
- Autoaccept: true, |
|
| 99 |
- Secret: &secret, |
|
| 100 |
- }) |
|
| 101 |
- assert.Equal(t, policies[1], swarm.Policy{
|
|
| 102 |
- Role: manager, |
|
| 103 |
- Autoaccept: false, |
|
| 104 |
- Secret: &secret, |
|
| 105 |
- }) |
|
| 106 |
-} |
|
| 107 |
- |
|
| 108 |
-func TestAutoAcceptOptionPoliciesWithManager(t *testing.T) {
|
|
| 109 |
- opt := NewAutoAcceptOption() |
|
| 110 |
- secret := "thesecret" |
|
| 111 |
- |
|
| 112 |
- assert.NilError(t, opt.Set("manager"))
|
|
| 113 |
- |
|
| 114 |
- policies := opt.Policies(&secret) |
|
| 115 |
- assert.Equal(t, len(policies), 2) |
|
| 116 |
- assert.Equal(t, policies[0], swarm.Policy{
|
|
| 117 |
- Role: worker, |
|
| 118 |
- Autoaccept: false, |
|
| 119 |
- Secret: &secret, |
|
| 120 |
- }) |
|
| 121 |
- assert.Equal(t, policies[1], swarm.Policy{
|
|
| 122 |
- Role: manager, |
|
| 123 |
- Autoaccept: true, |
|
| 124 |
- Secret: &secret, |
|
| 125 |
- }) |
|
| 126 |
-} |
|
| 127 |
- |
|
| 128 |
-func TestAutoAcceptOptionString(t *testing.T) {
|
|
| 129 |
- opt := NewAutoAcceptOption() |
|
| 130 |
- assert.NilError(t, opt.Set("manager"))
|
|
| 131 |
- assert.NilError(t, opt.Set("worker"))
|
|
| 132 |
- |
|
| 133 |
- repr := opt.String() |
|
| 134 |
- assert.Contains(t, repr, "worker=true") |
|
| 135 |
- assert.Contains(t, repr, "manager=true") |
|
| 136 |
-} |
| 137 | 39 |
deleted file mode 100644 |
| ... | ... |
@@ -1,19 +0,0 @@ |
| 1 |
-package swarm |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- cryptorand "crypto/rand" |
|
| 5 |
- "fmt" |
|
| 6 |
- "math/big" |
|
| 7 |
-) |
|
| 8 |
- |
|
| 9 |
-func generateRandomSecret() string {
|
|
| 10 |
- var secretBytes [generatedSecretEntropyBytes]byte |
|
| 11 |
- |
|
| 12 |
- if _, err := cryptorand.Read(secretBytes[:]); err != nil {
|
|
| 13 |
- panic(fmt.Errorf("failed to read random bytes: %v", err))
|
|
| 14 |
- } |
|
| 15 |
- |
|
| 16 |
- var nn big.Int |
|
| 17 |
- nn.SetBytes(secretBytes[:]) |
|
| 18 |
- return fmt.Sprintf("%0[1]*s", maxGeneratedSecretLength, nn.Text(generatedSecretBase))
|
|
| 19 |
-} |
| ... | ... |
@@ -13,11 +13,11 @@ import ( |
| 13 | 13 |
) |
| 14 | 14 |
|
| 15 | 15 |
func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
|
| 16 |
- opts := swarmOptions{autoAccept: NewAutoAcceptOption()}
|
|
| 16 |
+ opts := swarmOptions{}
|
|
| 17 | 17 |
|
| 18 | 18 |
cmd := &cobra.Command{
|
| 19 | 19 |
Use: "update [OPTIONS]", |
| 20 |
- Short: "Update the Swarm", |
|
| 20 |
+ Short: "Update the swarm", |
|
| 21 | 21 |
Args: cli.NoArgs, |
| 22 | 22 |
RunE: func(cmd *cobra.Command, args []string) error {
|
| 23 | 23 |
return runUpdate(dockerCli, cmd.Flags(), opts) |
| ... | ... |
@@ -32,6 +32,8 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOpti |
| 32 | 32 |
client := dockerCli.Client() |
| 33 | 33 |
ctx := context.Background() |
| 34 | 34 |
|
| 35 |
+ var updateFlags swarm.UpdateFlags |
|
| 36 |
+ |
|
| 35 | 37 |
swarm, err := client.SwarmInspect(ctx) |
| 36 | 38 |
if err != nil {
|
| 37 | 39 |
return err |
| ... | ... |
@@ -42,7 +44,7 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOpti |
| 42 | 42 |
return err |
| 43 | 43 |
} |
| 44 | 44 |
|
| 45 |
- err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec) |
|
| 45 |
+ err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags) |
|
| 46 | 46 |
if err != nil {
|
| 47 | 47 |
return err |
| 48 | 48 |
} |
| ... | ... |
@@ -55,21 +57,6 @@ func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOpti |
| 55 | 55 |
func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error {
|
| 56 | 56 |
spec := &swarm.Spec |
| 57 | 57 |
|
| 58 |
- if flags.Changed(flagAutoAccept) {
|
|
| 59 |
- value := flags.Lookup(flagAutoAccept).Value.(*AutoAcceptOption) |
|
| 60 |
- spec.AcceptancePolicy.Policies = value.Policies(nil) |
|
| 61 |
- } |
|
| 62 |
- |
|
| 63 |
- var psecret *string |
|
| 64 |
- if flags.Changed(flagSecret) {
|
|
| 65 |
- secret, _ := flags.GetString(flagSecret) |
|
| 66 |
- psecret = &secret |
|
| 67 |
- } |
|
| 68 |
- |
|
| 69 |
- for i := range spec.AcceptancePolicy.Policies {
|
|
| 70 |
- spec.AcceptancePolicy.Policies[i].Secret = psecret |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 | 58 |
if flags.Changed(flagTaskHistoryLimit) {
|
| 74 | 59 |
spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64(flagTaskHistoryLimit) |
| 75 | 60 |
} |
| ... | ... |
@@ -85,7 +85,6 @@ func runInfo(dockerCli *client.DockerCli) error {
|
| 85 | 85 |
if info.Swarm.ControlAvailable {
|
| 86 | 86 |
fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) |
| 87 | 87 |
fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) |
| 88 |
- ioutils.FprintfIfNotEmpty(dockerCli.Out(), " CA Certificate Hash: %s\n", info.Swarm.CACertHash) |
|
| 89 | 88 |
} |
| 90 | 89 |
} |
| 91 | 90 |
|
| ... | ... |
@@ -11,7 +11,7 @@ type Backend interface {
|
| 11 | 11 |
Join(req types.JoinRequest) error |
| 12 | 12 |
Leave(force bool) error |
| 13 | 13 |
Inspect() (types.Swarm, error) |
| 14 |
- Update(uint64, types.Spec) error |
|
| 14 |
+ Update(uint64, types.Spec, types.UpdateFlags) error |
|
| 15 | 15 |
GetServices(basictypes.ServiceListOptions) ([]types.Service, error) |
| 16 | 16 |
GetService(string) (types.Service, error) |
| 17 | 17 |
CreateService(types.ServiceSpec, string) (string, error) |
| ... | ... |
@@ -66,7 +66,15 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, |
| 66 | 66 |
return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
|
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 |
- if err := sr.backend.Update(version, swarm); err != nil {
|
|
| 69 |
+ var flags types.UpdateFlags |
|
| 70 |
+ if r.URL.Query().Get("rotate_worker_token") == "true" {
|
|
| 71 |
+ flags.RotateWorkerToken = true |
|
| 72 |
+ } |
|
| 73 |
+ if r.URL.Query().Get("rotate_manager_token") == "true" {
|
|
| 74 |
+ flags.RotateManagerToken = true |
|
| 75 |
+ } |
|
| 76 |
+ |
|
| 77 |
+ if err := sr.backend.Update(version, swarm, flags); err != nil {
|
|
| 70 | 78 |
logrus.Errorf("Error configuring swarm: %v", err)
|
| 71 | 79 |
return err |
| 72 | 80 |
} |
| ... | ... |
@@ -1087,7 +1087,7 @@ __docker_service_subcommand() {
|
| 1087 | 1087 |
"($help)--name=[Service name]:name: " |
| 1088 | 1088 |
"($help)*--network=[Network attachments]:network: " |
| 1089 | 1089 |
"($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: "
|
| 1090 |
- "($help)--registry-auth[Send registry authentication details to Swarm agents]" |
|
| 1090 |
+ "($help)--registry-auth[Send registry authentication details to swarm agents]" |
|
| 1091 | 1091 |
"($help)--replicas=[Number of tasks]:replicas: " |
| 1092 | 1092 |
"($help)--reserve-cpu=[Reserve CPUs]:value: " |
| 1093 | 1093 |
"($help)--reserve-memory=[Reserve Memory]:value: " |
| ... | ... |
@@ -1185,11 +1185,11 @@ __docker_service_subcommand() {
|
| 1185 | 1185 |
__docker_swarm_commands() {
|
| 1186 | 1186 |
local -a _docker_swarm_subcommands |
| 1187 | 1187 |
_docker_swarm_subcommands=( |
| 1188 |
- "init:Initialize a Swarm" |
|
| 1189 |
- "inspect:Inspect the Swarm" |
|
| 1190 |
- "join:Join a Swarm as a node and/or manager" |
|
| 1191 |
- "leave:Leave a Swarm" |
|
| 1192 |
- "update:Update the Swarm" |
|
| 1188 |
+ "init:Initialize a swarm" |
|
| 1189 |
+ "inspect:Inspect the swarm" |
|
| 1190 |
+ "join:Join a swarm as a node and/or manager" |
|
| 1191 |
+ "leave:Leave a swarm" |
|
| 1192 |
+ "update:Update the swarm" |
|
| 1193 | 1193 |
) |
| 1194 | 1194 |
_describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands |
| 1195 | 1195 |
} |
| ... | ... |
@@ -13,7 +13,6 @@ import ( |
| 13 | 13 |
"google.golang.org/grpc" |
| 14 | 14 |
|
| 15 | 15 |
"github.com/Sirupsen/logrus" |
| 16 |
- "github.com/docker/distribution/digest" |
|
| 17 | 16 |
"github.com/docker/docker/daemon/cluster/convert" |
| 18 | 17 |
executorpkg "github.com/docker/docker/daemon/cluster/executor" |
| 19 | 18 |
"github.com/docker/docker/daemon/cluster/executor/container" |
| ... | ... |
@@ -42,16 +41,16 @@ const ( |
| 42 | 42 |
) |
| 43 | 43 |
|
| 44 | 44 |
// ErrNoSwarm is returned on leaving a cluster that was never initialized |
| 45 |
-var ErrNoSwarm = fmt.Errorf("This node is not part of Swarm")
|
|
| 45 |
+var ErrNoSwarm = fmt.Errorf("This node is not part of swarm")
|
|
| 46 | 46 |
|
| 47 | 47 |
// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated |
| 48 |
-var ErrSwarmExists = fmt.Errorf("This node is already part of a Swarm cluster. Use \"docker swarm leave\" to leave this cluster and join another one.")
|
|
| 48 |
+var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm cluster. Use \"docker swarm leave\" to leave this cluster and join another one.")
|
|
| 49 | 49 |
|
| 50 | 50 |
// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet. |
| 51 | 51 |
var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.")
|
| 52 | 52 |
|
| 53 | 53 |
// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. |
| 54 |
-var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. Attempt to join the cluster will continue in the background. Use \"docker info\" command to see the current Swarm status of your node.")
|
|
| 54 |
+var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. Attempt to join the cluster will continue in the background. Use \"docker info\" command to see the current swarm status of your node.")
|
|
| 55 | 55 |
|
| 56 | 56 |
// defaultSpec contains some sane defaults if cluster options are missing on init |
| 57 | 57 |
var defaultSpec = types.Spec{
|
| ... | ... |
@@ -127,7 +126,7 @@ func New(config Config) (*Cluster, error) {
|
| 127 | 127 |
return nil, err |
| 128 | 128 |
} |
| 129 | 129 |
|
| 130 |
- n, err := c.startNewNode(false, st.ListenAddr, "", "", "", false) |
|
| 130 |
+ n, err := c.startNewNode(false, st.ListenAddr, "", "") |
|
| 131 | 131 |
if err != nil {
|
| 132 | 132 |
return nil, err |
| 133 | 133 |
} |
| ... | ... |
@@ -196,7 +195,7 @@ func (c *Cluster) reconnectOnFailure(n *node) {
|
| 196 | 196 |
return |
| 197 | 197 |
} |
| 198 | 198 |
var err error |
| 199 |
- n, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false) |
|
| 199 |
+ n, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "") |
|
| 200 | 200 |
if err != nil {
|
| 201 | 201 |
c.err = err |
| 202 | 202 |
close(n.done) |
| ... | ... |
@@ -205,7 +204,7 @@ func (c *Cluster) reconnectOnFailure(n *node) {
|
| 205 | 205 |
} |
| 206 | 206 |
} |
| 207 | 207 |
|
| 208 |
-func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*node, error) {
|
|
| 208 |
+func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, joinToken string) (*node, error) {
|
|
| 209 | 209 |
if err := c.config.Backend.IsSwarmCompatible(); err != nil {
|
| 210 | 210 |
return nil, err |
| 211 | 211 |
} |
| ... | ... |
@@ -219,12 +218,10 @@ func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secre |
| 219 | 219 |
ListenRemoteAPI: listenAddr, |
| 220 | 220 |
JoinAddr: joinAddr, |
| 221 | 221 |
StateDir: c.root, |
| 222 |
- CAHash: cahash, |
|
| 223 |
- Secret: secret, |
|
| 222 |
+ JoinToken: joinToken, |
|
| 224 | 223 |
Executor: container.NewExecutor(c.config.Backend), |
| 225 | 224 |
HeartbeatTick: 1, |
| 226 | 225 |
ElectionTick: 3, |
| 227 |
- IsManager: ismanager, |
|
| 228 | 226 |
}) |
| 229 | 227 |
if err != nil {
|
| 230 | 228 |
return nil, err |
| ... | ... |
@@ -291,7 +288,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
| 291 | 291 |
if node := c.node; node != nil {
|
| 292 | 292 |
if !req.ForceNewCluster {
|
| 293 | 293 |
c.Unlock() |
| 294 |
- return "", errSwarmExists(node) |
|
| 294 |
+ return "", ErrSwarmExists |
|
| 295 | 295 |
} |
| 296 | 296 |
if err := c.stopNode(); err != nil {
|
| 297 | 297 |
c.Unlock() |
| ... | ... |
@@ -305,7 +302,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
| 305 | 305 |
} |
| 306 | 306 |
|
| 307 | 307 |
// todo: check current state existing |
| 308 |
- n, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "", "", false) |
|
| 308 |
+ n, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "") |
|
| 309 | 309 |
if err != nil {
|
| 310 | 310 |
c.Unlock() |
| 311 | 311 |
return "", err |
| ... | ... |
@@ -336,40 +333,32 @@ func (c *Cluster) Join(req types.JoinRequest) error {
|
| 336 | 336 |
c.Lock() |
| 337 | 337 |
if node := c.node; node != nil {
|
| 338 | 338 |
c.Unlock() |
| 339 |
- return errSwarmExists(node) |
|
| 339 |
+ return ErrSwarmExists |
|
| 340 | 340 |
} |
| 341 | 341 |
if err := validateAndSanitizeJoinRequest(&req); err != nil {
|
| 342 | 342 |
c.Unlock() |
| 343 | 343 |
return err |
| 344 | 344 |
} |
| 345 | 345 |
// todo: check current state existing |
| 346 |
- n, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.Secret, req.CACertHash, req.Manager) |
|
| 346 |
+ n, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.JoinToken) |
|
| 347 | 347 |
if err != nil {
|
| 348 | 348 |
c.Unlock() |
| 349 | 349 |
return err |
| 350 | 350 |
} |
| 351 | 351 |
c.Unlock() |
| 352 | 352 |
|
| 353 |
- certificateRequested := n.CertificateRequested() |
|
| 354 |
- for {
|
|
| 355 |
- select {
|
|
| 356 |
- case <-certificateRequested: |
|
| 357 |
- if n.NodeMembership() == swarmapi.NodeMembershipPending {
|
|
| 358 |
- return fmt.Errorf("Your node is in the process of joining the cluster but needs to be accepted by existing cluster member.\nTo accept this node into cluster run \"docker node accept %v\" in an existing cluster manager. Use \"docker info\" command to see the current Swarm status of your node.", n.NodeID())
|
|
| 359 |
- } |
|
| 360 |
- certificateRequested = nil |
|
| 361 |
- case <-time.After(swarmConnectTimeout): |
|
| 362 |
- // attempt to connect will continue in background, also reconnecting |
|
| 363 |
- go c.reconnectOnFailure(n) |
|
| 364 |
- return ErrSwarmJoinTimeoutReached |
|
| 365 |
- case <-n.Ready(): |
|
| 366 |
- go c.reconnectOnFailure(n) |
|
| 367 |
- return nil |
|
| 368 |
- case <-n.done: |
|
| 369 |
- c.RLock() |
|
| 370 |
- defer c.RUnlock() |
|
| 371 |
- return c.err |
|
| 372 |
- } |
|
| 353 |
+ select {
|
|
| 354 |
+ case <-time.After(swarmConnectTimeout): |
|
| 355 |
+ // attempt to connect will continue in background, also reconnecting |
|
| 356 |
+ go c.reconnectOnFailure(n) |
|
| 357 |
+ return ErrSwarmJoinTimeoutReached |
|
| 358 |
+ case <-n.Ready(): |
|
| 359 |
+ go c.reconnectOnFailure(n) |
|
| 360 |
+ return nil |
|
| 361 |
+ case <-n.done: |
|
| 362 |
+ c.RLock() |
|
| 363 |
+ defer c.RUnlock() |
|
| 364 |
+ return c.err |
|
| 373 | 365 |
} |
| 374 | 366 |
} |
| 375 | 367 |
|
| ... | ... |
@@ -489,7 +478,7 @@ func (c *Cluster) Inspect() (types.Swarm, error) {
|
| 489 | 489 |
} |
| 490 | 490 |
|
| 491 | 491 |
// Update updates configuration of a managed swarm cluster. |
| 492 |
-func (c *Cluster) Update(version uint64, spec types.Spec) error {
|
|
| 492 |
+func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error {
|
|
| 493 | 493 |
c.RLock() |
| 494 | 494 |
defer c.RUnlock() |
| 495 | 495 |
|
| ... | ... |
@@ -505,7 +494,7 @@ func (c *Cluster) Update(version uint64, spec types.Spec) error {
|
| 505 | 505 |
return err |
| 506 | 506 |
} |
| 507 | 507 |
|
| 508 |
- swarmSpec, err := convert.SwarmSpecToGRPCandMerge(spec, &swarm.Spec) |
|
| 508 |
+ swarmSpec, err := convert.SwarmSpecToGRPC(spec) |
|
| 509 | 509 |
if err != nil {
|
| 510 | 510 |
return err |
| 511 | 511 |
} |
| ... | ... |
@@ -518,6 +507,10 @@ func (c *Cluster) Update(version uint64, spec types.Spec) error {
|
| 518 | 518 |
ClusterVersion: &swarmapi.Version{
|
| 519 | 519 |
Index: version, |
| 520 | 520 |
}, |
| 521 |
+ Rotation: swarmapi.JoinTokenRotation{
|
|
| 522 |
+ RotateWorkerToken: flags.RotateWorkerToken, |
|
| 523 |
+ RotateManagerToken: flags.RotateManagerToken, |
|
| 524 |
+ }, |
|
| 521 | 525 |
}, |
| 522 | 526 |
) |
| 523 | 527 |
return err |
| ... | ... |
@@ -611,10 +604,6 @@ func (c *Cluster) Info() types.Info {
|
| 611 | 611 |
} |
| 612 | 612 |
} |
| 613 | 613 |
} |
| 614 |
- |
|
| 615 |
- if swarm, err := getSwarm(ctx, c.client); err == nil && swarm != nil {
|
|
| 616 |
- info.CACertHash = swarm.RootCA.CACertHash |
|
| 617 |
- } |
|
| 618 | 614 |
} |
| 619 | 615 |
|
| 620 | 616 |
if c.node != nil {
|
| ... | ... |
@@ -636,12 +625,12 @@ func (c *Cluster) isActiveManager() bool {
|
| 636 | 636 |
// Call with read lock. |
| 637 | 637 |
func (c *Cluster) errNoManager() error {
|
| 638 | 638 |
if c.node == nil {
|
| 639 |
- return fmt.Errorf("This node is not a Swarm manager. Use \"docker swarm init\" or \"docker swarm join --manager\" to connect this node to Swarm and try again.")
|
|
| 639 |
+ return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join --manager\" to connect this node to swarm and try again.")
|
|
| 640 | 640 |
} |
| 641 | 641 |
if c.node.Manager() != nil {
|
| 642 |
- return fmt.Errorf("This node is not a Swarm manager. Manager is being prepared or has trouble connecting to the cluster.")
|
|
| 642 |
+ return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.")
|
|
| 643 | 643 |
} |
| 644 |
- return fmt.Errorf("This node is not a Swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")
|
|
| 644 |
+ return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")
|
|
| 645 | 645 |
} |
| 646 | 646 |
|
| 647 | 647 |
// GetServices returns all services of a managed swarm cluster. |
| ... | ... |
@@ -1219,11 +1208,6 @@ func validateAndSanitizeJoinRequest(req *types.JoinRequest) error {
|
| 1219 | 1219 |
return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err)
|
| 1220 | 1220 |
} |
| 1221 | 1221 |
} |
| 1222 |
- if req.CACertHash != "" {
|
|
| 1223 |
- if _, err := digest.ParseDigest(req.CACertHash); err != nil {
|
|
| 1224 |
- return fmt.Errorf("invalid CACertHash %q, %v", req.CACertHash, err)
|
|
| 1225 |
- } |
|
| 1226 |
- } |
|
| 1227 | 1222 |
return nil |
| 1228 | 1223 |
} |
| 1229 | 1224 |
|
| ... | ... |
@@ -1238,13 +1222,6 @@ func validateAddr(addr string) (string, error) {
|
| 1238 | 1238 |
return strings.TrimPrefix(newaddr, "tcp://"), nil |
| 1239 | 1239 |
} |
| 1240 | 1240 |
|
| 1241 |
-func errSwarmExists(node *node) error {
|
|
| 1242 |
- if node.NodeMembership() != swarmapi.NodeMembershipAccepted {
|
|
| 1243 |
- return ErrPendingSwarmExists |
|
| 1244 |
- } |
|
| 1245 |
- return ErrSwarmExists |
|
| 1246 |
-} |
|
| 1247 |
- |
|
| 1248 | 1241 |
func initClusterSpec(node *node, spec types.Spec) error {
|
| 1249 | 1242 |
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) |
| 1250 | 1243 |
for conn := range node.ListenControlSocket(ctx) {
|
| ... | ... |
@@ -1269,7 +1246,7 @@ func initClusterSpec(node *node, spec types.Spec) error {
|
| 1269 | 1269 |
cluster = lcr.Clusters[0] |
| 1270 | 1270 |
break |
| 1271 | 1271 |
} |
| 1272 |
- newspec, err := convert.SwarmSpecToGRPCandMerge(spec, &cluster.Spec) |
|
| 1272 |
+ newspec, err := convert.SwarmSpecToGRPC(spec) |
|
| 1273 | 1273 |
if err != nil {
|
| 1274 | 1274 |
return fmt.Errorf("error updating cluster settings: %v", err)
|
| 1275 | 1275 |
} |
| ... | ... |
@@ -15,7 +15,6 @@ func NodeFromGRPC(n swarmapi.Node) types.Node {
|
| 15 | 15 |
ID: n.ID, |
| 16 | 16 |
Spec: types.NodeSpec{
|
| 17 | 17 |
Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), |
| 18 |
- Membership: types.NodeMembership(strings.ToLower(n.Spec.Membership.String())), |
|
| 19 | 18 |
Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), |
| 20 | 19 |
}, |
| 21 | 20 |
Status: types.NodeStatus{
|
| ... | ... |
@@ -79,12 +78,6 @@ func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) {
|
| 79 | 79 |
return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role)
|
| 80 | 80 |
} |
| 81 | 81 |
|
| 82 |
- if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(string(s.Membership))]; ok {
|
|
| 83 |
- spec.Membership = swarmapi.NodeSpec_Membership(membership) |
|
| 84 |
- } else {
|
|
| 85 |
- return swarmapi.NodeSpec{}, fmt.Errorf("invalid Membership: %q", s.Membership)
|
|
| 86 |
- } |
|
| 87 |
- |
|
| 88 | 82 |
if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok {
|
| 89 | 83 |
spec.Availability = swarmapi.NodeSpec_Availability(availability) |
| 90 | 84 |
} else {
|
| ... | ... |
@@ -5,8 +5,6 @@ import ( |
| 5 | 5 |
"strings" |
| 6 | 6 |
"time" |
| 7 | 7 |
|
| 8 |
- "golang.org/x/crypto/bcrypt" |
|
| 9 |
- |
|
| 10 | 8 |
types "github.com/docker/engine-api/types/swarm" |
| 11 | 9 |
swarmapi "github.com/docker/swarmkit/api" |
| 12 | 10 |
"github.com/docker/swarmkit/protobuf/ptypes" |
| ... | ... |
@@ -28,6 +26,10 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
|
| 28 | 28 |
ElectionTick: c.Spec.Raft.ElectionTick, |
| 29 | 29 |
}, |
| 30 | 30 |
}, |
| 31 |
+ JoinTokens: types.JoinTokens{
|
|
| 32 |
+ Worker: c.RootCA.JoinTokens.Worker, |
|
| 33 |
+ Manager: c.RootCA.JoinTokens.Manager, |
|
| 34 |
+ }, |
|
| 31 | 35 |
} |
| 32 | 36 |
|
| 33 | 37 |
heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) |
| ... | ... |
@@ -52,23 +54,11 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
|
| 52 | 52 |
swarm.Spec.Name = c.Spec.Annotations.Name |
| 53 | 53 |
swarm.Spec.Labels = c.Spec.Annotations.Labels |
| 54 | 54 |
|
| 55 |
- for _, policy := range c.Spec.AcceptancePolicy.Policies {
|
|
| 56 |
- p := types.Policy{
|
|
| 57 |
- Role: types.NodeRole(strings.ToLower(policy.Role.String())), |
|
| 58 |
- Autoaccept: policy.Autoaccept, |
|
| 59 |
- } |
|
| 60 |
- if policy.Secret != nil {
|
|
| 61 |
- secret := string(policy.Secret.Data) |
|
| 62 |
- p.Secret = &secret |
|
| 63 |
- } |
|
| 64 |
- swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p) |
|
| 65 |
- } |
|
| 66 |
- |
|
| 67 | 55 |
return swarm |
| 68 | 56 |
} |
| 69 | 57 |
|
| 70 |
-// SwarmSpecToGRPCandMerge converts a Spec to a grpc ClusterSpec and merge AcceptancePolicy from an existing grpc ClusterSpec if provided. |
|
| 71 |
-func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
|
|
| 58 |
+// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. |
|
| 59 |
+func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
|
|
| 72 | 60 |
spec := swarmapi.ClusterSpec{
|
| 73 | 61 |
Annotations: swarmapi.Annotations{
|
| 74 | 62 |
Name: s.Name, |
| ... | ... |
@@ -104,63 +94,5 @@ func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) ( |
| 104 | 104 |
}) |
| 105 | 105 |
} |
| 106 | 106 |
|
| 107 |
- if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy, existingSpec); err != nil {
|
|
| 108 |
- return swarmapi.ClusterSpec{}, err
|
|
| 109 |
- } |
|
| 110 |
- |
|
| 111 | 107 |
return spec, nil |
| 112 | 108 |
} |
| 113 |
- |
|
| 114 |
-// SwarmSpecUpdateAcceptancePolicy updates a grpc ClusterSpec using AcceptancePolicy. |
|
| 115 |
-func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy, oldSpec *swarmapi.ClusterSpec) error {
|
|
| 116 |
- spec.AcceptancePolicy.Policies = nil |
|
| 117 |
- hashs := make(map[string][]byte) |
|
| 118 |
- |
|
| 119 |
- for _, p := range acceptancePolicy.Policies {
|
|
| 120 |
- role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(p.Role))] |
|
| 121 |
- if !ok {
|
|
| 122 |
- return fmt.Errorf("invalid Role: %q", p.Role)
|
|
| 123 |
- } |
|
| 124 |
- |
|
| 125 |
- policy := &swarmapi.AcceptancePolicy_RoleAdmissionPolicy{
|
|
| 126 |
- Role: swarmapi.NodeRole(role), |
|
| 127 |
- Autoaccept: p.Autoaccept, |
|
| 128 |
- } |
|
| 129 |
- |
|
| 130 |
- if p.Secret != nil {
|
|
| 131 |
- if *p.Secret == "" { // if provided secret is empty, it means erase previous secret.
|
|
| 132 |
- policy.Secret = nil |
|
| 133 |
- } else { // if provided secret is not empty, we generate a new one.
|
|
| 134 |
- hashPwd, ok := hashs[*p.Secret] |
|
| 135 |
- if !ok {
|
|
| 136 |
- hashPwd, _ = bcrypt.GenerateFromPassword([]byte(*p.Secret), 0) |
|
| 137 |
- hashs[*p.Secret] = hashPwd |
|
| 138 |
- } |
|
| 139 |
- policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{
|
|
| 140 |
- Data: hashPwd, |
|
| 141 |
- Alg: "bcrypt", |
|
| 142 |
- } |
|
| 143 |
- } |
|
| 144 |
- } else if oldSecret := getOldSecret(oldSpec, policy.Role); oldSecret != nil { // else use the old one.
|
|
| 145 |
- policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret{
|
|
| 146 |
- Data: oldSecret.Data, |
|
| 147 |
- Alg: oldSecret.Alg, |
|
| 148 |
- } |
|
| 149 |
- } |
|
| 150 |
- |
|
| 151 |
- spec.AcceptancePolicy.Policies = append(spec.AcceptancePolicy.Policies, policy) |
|
| 152 |
- } |
|
| 153 |
- return nil |
|
| 154 |
-} |
|
| 155 |
- |
|
| 156 |
-func getOldSecret(oldSpec *swarmapi.ClusterSpec, role swarmapi.NodeRole) *swarmapi.AcceptancePolicy_RoleAdmissionPolicy_Secret {
|
|
| 157 |
- if oldSpec == nil {
|
|
| 158 |
- return nil |
|
| 159 |
- } |
|
| 160 |
- for _, p := range oldSpec.AcceptancePolicy.Policies {
|
|
| 161 |
- if p.Role == role {
|
|
| 162 |
- return p.Secret |
|
| 163 |
- } |
|
| 164 |
- } |
|
| 165 |
- return nil |
|
| 166 |
-} |
| ... | ... |
@@ -3351,7 +3351,6 @@ List nodes |
| 3351 | 3351 |
"UpdatedAt": "2016-06-07T20:31:11.999868824Z", |
| 3352 | 3352 |
"Spec": {
|
| 3353 | 3353 |
"Role": "MANAGER", |
| 3354 |
- "Membership": "ACCEPTED", |
|
| 3355 | 3354 |
"Availability": "ACTIVE" |
| 3356 | 3355 |
}, |
| 3357 | 3356 |
"Description": {
|
| ... | ... |
@@ -3481,7 +3480,6 @@ Return low-level information on the node `id` |
| 3481 | 3481 |
"UpdatedAt": "2016-06-07T20:31:11.999868824Z", |
| 3482 | 3482 |
"Spec": {
|
| 3483 | 3483 |
"Role": "MANAGER", |
| 3484 |
- "Membership": "ACCEPTED", |
|
| 3485 | 3484 |
"Availability": "ACTIVE" |
| 3486 | 3485 |
}, |
| 3487 | 3486 |
"Description": {
|
| ... | ... |
@@ -3595,18 +3593,6 @@ Initialize a new Swarm |
| 3595 | 3595 |
"ListenAddr": "0.0.0.0:4500", |
| 3596 | 3596 |
"ForceNewCluster": false, |
| 3597 | 3597 |
"Spec": {
|
| 3598 |
- "AcceptancePolicy": {
|
|
| 3599 |
- "Policies": [ |
|
| 3600 |
- {
|
|
| 3601 |
- "Role": "MANAGER", |
|
| 3602 |
- "Autoaccept": false |
|
| 3603 |
- }, |
|
| 3604 |
- {
|
|
| 3605 |
- "Role": "WORKER", |
|
| 3606 |
- "Autoaccept": true |
|
| 3607 |
- } |
|
| 3608 |
- ] |
|
| 3609 |
- }, |
|
| 3610 | 3598 |
"Orchestration": {},
|
| 3611 | 3599 |
"Raft": {},
|
| 3612 | 3600 |
"Dispatcher": {},
|
| ... | ... |
@@ -3676,9 +3662,7 @@ Join an existing new Swarm |
| 3676 | 3676 |
{
|
| 3677 | 3677 |
"ListenAddr": "0.0.0.0:4500", |
| 3678 | 3678 |
"RemoteAddrs": ["node1:4500"], |
| 3679 |
- "Secret": "", |
|
| 3680 |
- "CACertHash": "", |
|
| 3681 |
- "Manager": false |
|
| 3679 |
+ "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" |
|
| 3682 | 3680 |
} |
| 3683 | 3681 |
|
| 3684 | 3682 |
**Example response**: |
| ... | ... |
@@ -3698,9 +3682,7 @@ JSON Parameters: |
| 3698 | 3698 |
- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to |
| 3699 | 3699 |
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). |
| 3700 | 3700 |
- **RemoteAddr** – Address of any manager node already participating in the Swarm to join. |
| 3701 |
-- **Secret** – Secret token for joining this Swarm. |
|
| 3702 |
-- **CACertHash** – Optional hash of the root CA to avoid relying on trust on first use. |
|
| 3703 |
-- **Manager** – Directly join as a manager (only for a Swarm configured to autoaccept managers). |
|
| 3701 |
+- **JoinToken** – Secret token for joining this Swarm. |
|
| 3704 | 3702 |
|
| 3705 | 3703 |
### Leave a Swarm |
| 3706 | 3704 |
|
| ... | ... |
@@ -3741,18 +3723,6 @@ Update a Swarm |
| 3741 | 3741 |
|
| 3742 | 3742 |
{
|
| 3743 | 3743 |
"Name": "default", |
| 3744 |
- "AcceptancePolicy": {
|
|
| 3745 |
- "Policies": [ |
|
| 3746 |
- {
|
|
| 3747 |
- "Role": "WORKER", |
|
| 3748 |
- "Autoaccept": false |
|
| 3749 |
- }, |
|
| 3750 |
- {
|
|
| 3751 |
- "Role": "MANAGER", |
|
| 3752 |
- "Autoaccept": false |
|
| 3753 |
- } |
|
| 3754 |
- ] |
|
| 3755 |
- }, |
|
| 3756 | 3744 |
"Orchestration": {
|
| 3757 | 3745 |
"TaskHistoryRetentionLimit": 10 |
| 3758 | 3746 |
}, |
| ... | ... |
@@ -3767,6 +3737,10 @@ Update a Swarm |
| 3767 | 3767 |
}, |
| 3768 | 3768 |
"CAConfig": {
|
| 3769 | 3769 |
"NodeCertExpiry": 7776000000000000 |
| 3770 |
+ }, |
|
| 3771 |
+ "JoinTokens": {
|
|
| 3772 |
+ "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", |
|
| 3773 |
+ "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" |
|
| 3770 | 3774 |
} |
| 3771 | 3775 |
} |
| 3772 | 3776 |
|
| ... | ... |
@@ -3777,6 +3751,13 @@ Update a Swarm |
| 3777 | 3777 |
Content-Length: 0 |
| 3778 | 3778 |
Content-Type: text/plain; charset=utf-8 |
| 3779 | 3779 |
|
| 3780 |
+**Query parameters**: |
|
| 3781 |
+ |
|
| 3782 |
+- **version** – The version number of the swarm object being updated. This is |
|
| 3783 |
+ required to avoid conflicting writes. |
|
| 3784 |
+- **rotate_worker_token** - Set to `true` to rotate the worker join token. |
|
| 3785 |
+- **rotate_manager_token** - Set to `true` to rotate the manager join token. |
|
| 3786 |
+ |
|
| 3780 | 3787 |
**Status codes**: |
| 3781 | 3788 |
|
| 3782 | 3789 |
- **200** – no error |
| ... | ... |
@@ -3785,11 +3766,6 @@ Update a Swarm |
| 3785 | 3785 |
|
| 3786 | 3786 |
JSON Parameters: |
| 3787 | 3787 |
|
| 3788 |
-- **Policies** – An array of acceptance policies. |
|
| 3789 |
- - **Role** – The role that policy applies to (`MANAGER` or `WORKER`) |
|
| 3790 |
- - **Autoaccept** – A boolean indicating whether nodes joining for that role should be |
|
| 3791 |
- automatically accepted in the Swarm. |
|
| 3792 |
- - **Secret** – An optional secret to provide for nodes to join the Swarm. |
|
| 3793 | 3788 |
- **Orchestration** – Configuration settings for the orchestration aspects of the Swarm. |
| 3794 | 3789 |
- **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. |
| 3795 | 3790 |
- **Raft** – Raft related configuration. |
| ... | ... |
@@ -3811,6 +3787,9 @@ JSON Parameters: |
| 3811 | 3811 |
- **URL** - URL where certificate signing requests should be sent. |
| 3812 | 3812 |
- **Options** - An object with key/value pairs that are interpreted |
| 3813 | 3813 |
as protocol-specific options for the external CA driver. |
| 3814 |
+- **JoinTokens** - Tokens that can be used by other nodes to join the Swarm. |
|
| 3815 |
+ - **Worker** - Token to use for joining as a worker. |
|
| 3816 |
+ - **Manager** - Token to use for joining as a manager. |
|
| 3814 | 3817 |
|
| 3815 | 3818 |
## 3.8 Services |
| 3816 | 3819 |
|
| ... | ... |
@@ -4292,6 +4271,10 @@ Update the service `id`. |
| 4292 | 4292 |
of: `"Ports": { "<port>/<tcp|udp>: {}" }`
|
| 4293 | 4293 |
- **VirtualIPs** |
| 4294 | 4294 |
|
| 4295 |
+**Query parameters**: |
|
| 4296 |
+ |
|
| 4297 |
+- **version** – The version number of the service object being updated. This is |
|
| 4298 |
+ required to avoid conflicting writes. |
|
| 4295 | 4299 |
|
| 4296 | 4300 |
**Status codes**: |
| 4297 | 4301 |
|
| ... | ... |
@@ -3352,7 +3352,6 @@ List nodes |
| 3352 | 3352 |
"UpdatedAt": "2016-06-07T20:31:11.999868824Z", |
| 3353 | 3353 |
"Spec": {
|
| 3354 | 3354 |
"Role": "MANAGER", |
| 3355 |
- "Membership": "ACCEPTED", |
|
| 3356 | 3355 |
"Availability": "ACTIVE" |
| 3357 | 3356 |
}, |
| 3358 | 3357 |
"Description": {
|
| ... | ... |
@@ -3482,7 +3481,6 @@ Return low-level information on the node `id` |
| 3482 | 3482 |
"UpdatedAt": "2016-06-07T20:31:11.999868824Z", |
| 3483 | 3483 |
"Spec": {
|
| 3484 | 3484 |
"Role": "MANAGER", |
| 3485 |
- "Membership": "ACCEPTED", |
|
| 3486 | 3485 |
"Availability": "ACTIVE" |
| 3487 | 3486 |
}, |
| 3488 | 3487 |
"Description": {
|
| ... | ... |
@@ -3596,18 +3594,6 @@ Initialize a new Swarm |
| 3596 | 3596 |
"ListenAddr": "0.0.0.0:4500", |
| 3597 | 3597 |
"ForceNewCluster": false, |
| 3598 | 3598 |
"Spec": {
|
| 3599 |
- "AcceptancePolicy": {
|
|
| 3600 |
- "Policies": [ |
|
| 3601 |
- {
|
|
| 3602 |
- "Role": "MANAGER", |
|
| 3603 |
- "Autoaccept": false |
|
| 3604 |
- }, |
|
| 3605 |
- {
|
|
| 3606 |
- "Role": "WORKER", |
|
| 3607 |
- "Autoaccept": true |
|
| 3608 |
- } |
|
| 3609 |
- ] |
|
| 3610 |
- }, |
|
| 3611 | 3599 |
"Orchestration": {},
|
| 3612 | 3600 |
"Raft": {},
|
| 3613 | 3601 |
"Dispatcher": {},
|
| ... | ... |
@@ -3677,9 +3663,7 @@ Join an existing new Swarm |
| 3677 | 3677 |
{
|
| 3678 | 3678 |
"ListenAddr": "0.0.0.0:4500", |
| 3679 | 3679 |
"RemoteAddrs": ["node1:4500"], |
| 3680 |
- "Secret": "", |
|
| 3681 |
- "CACertHash": "", |
|
| 3682 |
- "Manager": false |
|
| 3680 |
+ "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" |
|
| 3683 | 3681 |
} |
| 3684 | 3682 |
|
| 3685 | 3683 |
**Example response**: |
| ... | ... |
@@ -3699,9 +3683,7 @@ JSON Parameters: |
| 3699 | 3699 |
- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to |
| 3700 | 3700 |
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). |
| 3701 | 3701 |
- **RemoteAddr** – Address of any manager node already participating in the Swarm to join. |
| 3702 |
-- **Secret** – Secret token for joining this Swarm. |
|
| 3703 |
-- **CACertHash** – Optional hash of the root CA to avoid relying on trust on first use. |
|
| 3704 |
-- **Manager** – Directly join as a manager (only for a Swarm configured to autoaccept managers). |
|
| 3702 |
+- **JoinToken** – Secret token for joining this Swarm. |
|
| 3705 | 3703 |
|
| 3706 | 3704 |
### Leave a Swarm |
| 3707 | 3705 |
|
| ... | ... |
@@ -3742,18 +3724,6 @@ Update a Swarm |
| 3742 | 3742 |
|
| 3743 | 3743 |
{
|
| 3744 | 3744 |
"Name": "default", |
| 3745 |
- "AcceptancePolicy": {
|
|
| 3746 |
- "Policies": [ |
|
| 3747 |
- {
|
|
| 3748 |
- "Role": "WORKER", |
|
| 3749 |
- "Autoaccept": false |
|
| 3750 |
- }, |
|
| 3751 |
- {
|
|
| 3752 |
- "Role": "MANAGER", |
|
| 3753 |
- "Autoaccept": false |
|
| 3754 |
- } |
|
| 3755 |
- ] |
|
| 3756 |
- }, |
|
| 3757 | 3745 |
"Orchestration": {
|
| 3758 | 3746 |
"TaskHistoryRetentionLimit": 10 |
| 3759 | 3747 |
}, |
| ... | ... |
@@ -3768,6 +3738,10 @@ Update a Swarm |
| 3768 | 3768 |
}, |
| 3769 | 3769 |
"CAConfig": {
|
| 3770 | 3770 |
"NodeCertExpiry": 7776000000000000 |
| 3771 |
+ }, |
|
| 3772 |
+ "JoinTokens": {
|
|
| 3773 |
+ "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", |
|
| 3774 |
+ "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" |
|
| 3771 | 3775 |
} |
| 3772 | 3776 |
} |
| 3773 | 3777 |
|
| ... | ... |
@@ -3778,6 +3752,13 @@ Update a Swarm |
| 3778 | 3778 |
Content-Length: 0 |
| 3779 | 3779 |
Content-Type: text/plain; charset=utf-8 |
| 3780 | 3780 |
|
| 3781 |
+**Query parameters**: |
|
| 3782 |
+ |
|
| 3783 |
+- **version** – The version number of the swarm object being updated. This is |
|
| 3784 |
+ required to avoid conflicting writes. |
|
| 3785 |
+- **rotate_worker_token** - Set to `true` to rotate the worker join token. |
|
| 3786 |
+- **rotate_manager_token** - Set to `true` to rotate the manager join token. |
|
| 3787 |
+ |
|
| 3781 | 3788 |
**Status codes**: |
| 3782 | 3789 |
|
| 3783 | 3790 |
- **200** – no error |
| ... | ... |
@@ -3786,11 +3767,6 @@ Update a Swarm |
| 3786 | 3786 |
|
| 3787 | 3787 |
JSON Parameters: |
| 3788 | 3788 |
|
| 3789 |
-- **Policies** – An array of acceptance policies. |
|
| 3790 |
- - **Role** – The role that policy applies to (`MANAGER` or `WORKER`) |
|
| 3791 |
- - **Autoaccept** – A boolean indicating whether nodes joining for that role should be |
|
| 3792 |
- automatically accepted in the Swarm. |
|
| 3793 |
- - **Secret** – An optional secret to provide for nodes to join the Swarm. |
|
| 3794 | 3789 |
- **Orchestration** – Configuration settings for the orchestration aspects of the Swarm. |
| 3795 | 3790 |
- **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. |
| 3796 | 3791 |
- **Raft** – Raft related configuration. |
| ... | ... |
@@ -3812,6 +3788,9 @@ JSON Parameters: |
| 3812 | 3812 |
- **URL** - URL where certificate signing requests should be sent. |
| 3813 | 3813 |
- **Options** - An object with key/value pairs that are interpreted |
| 3814 | 3814 |
as protocol-specific options for the external CA driver. |
| 3815 |
+- **JoinTokens** - Tokens that can be used by other nodes to join the Swarm. |
|
| 3816 |
+ - **Worker** - Token to use for joining as a worker. |
|
| 3817 |
+ - **Manager** - Token to use for joining as a manager. |
|
| 3815 | 3818 |
|
| 3816 | 3819 |
## 3.8 Services |
| 3817 | 3820 |
|
| ... | ... |
@@ -4293,6 +4272,10 @@ Update the service `id`. |
| 4293 | 4293 |
of: `"Ports": { "<port>/<tcp|udp>: {}" }`
|
| 4294 | 4294 |
- **VirtualIPs** |
| 4295 | 4295 |
|
| 4296 |
+**Query parameters**: |
|
| 4297 |
+ |
|
| 4298 |
+- **version** – The version number of the service object being updated. This is |
|
| 4299 |
+ required to avoid conflicting writes. |
|
| 4296 | 4300 |
|
| 4297 | 4301 |
**Status codes**: |
| 4298 | 4302 |
|
| ... | ... |
@@ -19,7 +19,7 @@ Create and update a stack from a Distributed Application Bundle (DAB) |
| 19 | 19 |
Options: |
| 20 | 20 |
--file string Path to a Distributed Application Bundle file (Default: STACK.dab) |
| 21 | 21 |
--help Print usage |
| 22 |
- --registry-auth Send registry authentication details to Swarm agents |
|
| 22 |
+ --registry-auth Send registry authentication details to swarm agents |
|
| 23 | 23 |
``` |
| 24 | 24 |
|
| 25 | 25 |
Create and update a stack from a `dab` file. This command has to be |
| ... | ... |
@@ -111,7 +111,6 @@ read the [`dockerd`](dockerd.md) reference page. |
| 111 | 111 |
|
| 112 | 112 |
| Command | Description | |
| 113 | 113 |
|:--------|:-------------------------------------------------------------------| |
| 114 |
-| [node accept](node_accept.md) | Accept a node into the swarm | |
|
| 115 | 114 |
| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | |
| 116 | 115 |
| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | |
| 117 | 116 |
| [node inspect](node_inspect.md) | Inspect a node in the swarm | |
| ... | ... |
@@ -124,10 +123,11 @@ read the [`dockerd`](dockerd.md) reference page. |
| 124 | 124 |
|
| 125 | 125 |
| Command | Description | |
| 126 | 126 |
|:--------|:-------------------------------------------------------------------| |
| 127 |
-| [swarm init](swarm_init.md) | Initialize a Swarm | |
|
| 128 |
-| [swarm join](swarm_join.md) | Join a Swarm as a manager node or worker node | |
|
| 127 |
+| [swarm init](swarm_init.md) | Initialize a swarm | |
|
| 128 |
+| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | |
|
| 129 | 129 |
| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | |
| 130 | 130 |
| [swarm update](swarm_update.md) | Update attributes of a swarm | |
| 131 |
+| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | |
|
| 131 | 132 |
|
| 132 | 133 |
### Swarm service commands |
| 133 | 134 |
|
| ... | ... |
@@ -38,7 +38,7 @@ available on the volume where `/var/lib/docker` is mounted. |
| 38 | 38 |
## Display Docker system information |
| 39 | 39 |
|
| 40 | 40 |
Here is a sample output for a daemon running on Ubuntu, using the overlay |
| 41 |
-storage driver and a node that is part of a 2 node Swarm cluster: |
|
| 41 |
+storage driver and a node that is part of a 2 node swarm cluster: |
|
| 42 | 42 |
|
| 43 | 43 |
$ docker -D info |
| 44 | 44 |
Containers: 14 |
| 45 | 45 |
deleted file mode 100644 |
| ... | ... |
@@ -1,32 +0,0 @@ |
| 1 |
-<!--[metadata]> |
|
| 2 |
-+++ |
|
| 3 |
-title = "node accept" |
|
| 4 |
-description = "The node accept command description and usage" |
|
| 5 |
-keywords = ["node, accept"] |
|
| 6 |
-[menu.main] |
|
| 7 |
-parent = "smn_cli" |
|
| 8 |
-+++ |
|
| 9 |
-<![end-metadata]--> |
|
| 10 |
- |
|
| 11 |
-# node accept |
|
| 12 |
- |
|
| 13 |
-```markdown |
|
| 14 |
-Usage: docker node accept NODE [NODE...] |
|
| 15 |
- |
|
| 16 |
-Accept a node in the swarm |
|
| 17 |
- |
|
| 18 |
-Options: |
|
| 19 |
- --help Print usage |
|
| 20 |
-``` |
|
| 21 |
- |
|
| 22 |
-Accept a node into the swarm. This command targets a docker engine that is a manager in the swarm cluster. |
|
| 23 |
- |
|
| 24 |
- |
|
| 25 |
-```bash |
|
| 26 |
-$ docker node accept <node name> |
|
| 27 |
-``` |
|
| 28 |
- |
|
| 29 |
-## Related information |
|
| 30 |
- |
|
| 31 |
-* [node promote](node_promote.md) |
|
| 32 |
-* [node demote](node_demote.md) |
| ... | ... |
@@ -30,10 +30,10 @@ Lists all the nodes that the Docker Swarm manager knows about. You can filter us |
| 30 | 30 |
Example output: |
| 31 | 31 |
|
| 32 | 32 |
$ docker node ls |
| 33 |
- ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 34 |
- 1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Accepted Ready Active |
|
| 35 |
- 38ciaotwjuritcdtn9npbnkuz swarm-worker1 Accepted Ready Active |
|
| 36 |
- e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Accepted Ready Active Reachable Yes |
|
| 33 |
+ ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 34 |
+ 1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active |
|
| 35 |
+ 38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active |
|
| 36 |
+ e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader |
|
| 37 | 37 |
|
| 38 | 38 |
|
| 39 | 39 |
## Filtering |
| ... | ... |
@@ -54,16 +54,16 @@ The `name` filter matches on all or part of a node name. |
| 54 | 54 |
The following filter matches the node with a name equal to `swarm-master` string. |
| 55 | 55 |
|
| 56 | 56 |
$ docker node ls -f name=swarm-manager1 |
| 57 |
- ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 58 |
- e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Accepted Ready Active Reachable Yes |
|
| 57 |
+ ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 58 |
+ e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader |
|
| 59 | 59 |
|
| 60 | 60 |
### id |
| 61 | 61 |
|
| 62 | 62 |
The `id` filter matches all or part of a node's id. |
| 63 | 63 |
|
| 64 | 64 |
$ docker node ls -f id=1 |
| 65 |
- ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 66 |
- 1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Accepted Ready Active |
|
| 65 |
+ ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 66 |
+ 1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active |
|
| 67 | 67 |
|
| 68 | 68 |
|
| 69 | 69 |
#### label |
| ... | ... |
@@ -75,8 +75,8 @@ The following filter matches nodes with the `usage` label regardless of its valu |
| 75 | 75 |
|
| 76 | 76 |
```bash |
| 77 | 77 |
$ docker node ls -f "label=foo" |
| 78 |
-ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 79 |
-1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Accepted Ready Active |
|
| 78 |
+ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 79 |
+1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active |
|
| 80 | 80 |
``` |
| 81 | 81 |
|
| 82 | 82 |
|
| ... | ... |
@@ -23,14 +23,13 @@ Options: |
| 23 | 23 |
--help Print usage |
| 24 | 24 |
``` |
| 25 | 25 |
|
| 26 |
-Removes specified nodes from a swarm. Rejects nodes with `Pending` |
|
| 27 |
-membership from the swarm. |
|
| 26 |
+Removes specified nodes from a swarm. |
|
| 28 | 27 |
|
| 29 | 28 |
|
| 30 | 29 |
Example output: |
| 31 | 30 |
|
| 32 | 31 |
$ docker node rm swarm-node-02 |
| 33 |
- Node swarm-node-02 removed from Swarm |
|
| 32 |
+ Node swarm-node-02 removed from swarm |
|
| 34 | 33 |
|
| 35 | 34 |
|
| 36 | 35 |
## Related information |
| ... | ... |
@@ -21,7 +21,6 @@ Options: |
| 21 | 21 |
--help Print usage |
| 22 | 22 |
--label-add value Add or update a node label (key=value) (default []) |
| 23 | 23 |
--label-rm value Remove a node label if exists (default []) |
| 24 |
- --membership string Membership of the node (accepted/rejected) |
|
| 25 | 24 |
--role string Role of the node (worker/manager) |
| 26 | 25 |
``` |
| 27 | 26 |
|
| ... | ... |
@@ -31,7 +31,7 @@ Options: |
| 31 | 31 |
--name string Service name |
| 32 | 32 |
--network value Network attachments (default []) |
| 33 | 33 |
-p, --publish value Publish a port as a node port (default []) |
| 34 |
- --registry-auth Send registry authentication details to Swarm agents |
|
| 34 |
+ --registry-auth Send registry authentication details to swarm agents |
|
| 35 | 35 |
--replicas value Number of tasks (default none) |
| 36 | 36 |
--reserve-cpu value Reserve CPUs (default 0.000) |
| 37 | 37 |
--reserve-memory value Reserve Memory (default 0 B) |
| ... | ... |
@@ -38,7 +38,7 @@ Options: |
| 38 | 38 |
--network-rm value Remove a network by name (default []) |
| 39 | 39 |
--publish-add value Add or update a published port (default []) |
| 40 | 40 |
--publish-rm value Remove a published port by its target port (default []) |
| 41 |
- --registry-auth Send registry authentication details to Swarm agents |
|
| 41 |
+ --registry-auth Send registry authentication details to swarm agents |
|
| 42 | 42 |
--replicas value Number of tasks (default none) |
| 43 | 43 |
--reserve-cpu value Reserve CPUs (default 0.000) |
| 44 | 44 |
--reserve-memory value Reserve Memory (default 0 B) |
| ... | ... |
@@ -14,74 +14,43 @@ parent = "smn_cli" |
| 14 | 14 |
```markdown |
| 15 | 15 |
Usage: docker swarm init [OPTIONS] |
| 16 | 16 |
|
| 17 |
-Initialize a Swarm |
|
| 17 |
+Initialize a swarm |
|
| 18 | 18 |
|
| 19 | 19 |
Options: |
| 20 |
- --auto-accept value Auto acceptance policy (default worker) |
|
| 21 | 20 |
--cert-expiry duration Validity period for node certificates (default 2160h0m0s) |
| 22 | 21 |
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s) |
| 23 | 22 |
--external-ca value Specifications of one or more certificate signing endpoints |
| 24 | 23 |
--force-new-cluster Force create a new cluster from current state. |
| 25 | 24 |
--help Print usage |
| 26 | 25 |
--listen-addr value Listen address (default 0.0.0.0:2377) |
| 27 |
- --secret string Set secret value needed to accept nodes into cluster |
|
| 28 | 26 |
--task-history-limit int Task history retention limit (default 10) |
| 29 | 27 |
``` |
| 30 | 28 |
|
| 31 |
-Initialize a Swarm cluster. The docker engine targeted by this command becomes a manager |
|
| 32 |
-in the newly created one node Swarm cluster. |
|
| 29 |
+Initialize a swarm cluster. The docker engine targeted by this command becomes a manager |
|
| 30 |
+in the newly created one node swarm cluster. |
|
| 33 | 31 |
|
| 34 | 32 |
|
| 35 | 33 |
```bash |
| 36 | 34 |
$ docker swarm init --listen-addr 192.168.99.121:2377 |
| 37 |
-No --secret provided. Generated random secret: |
|
| 38 |
- 4ao565v9jsuogtq5t8s379ulb |
|
| 39 |
- |
|
| 40 |
-Swarm initialized: current node (1ujecd0j9n3ro9i6628smdmth) is now a manager. |
|
| 35 |
+Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. |
|
| 41 | 36 |
|
| 42 | 37 |
To add a worker to this swarm, run the following command: |
| 43 |
- docker swarm join --secret 4ao565v9jsuogtq5t8s379ulb \ |
|
| 44 |
- --ca-hash sha256:07ce22bd1a7619f2adc0d63bd110479a170e7c4e69df05b67a1aa2705c88ef09 \ |
|
| 45 |
- 192.168.99.121:2377 |
|
| 46 |
-$ docker node ls |
|
| 47 |
-ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 48 |
-1ujecd0j9n3ro9i6628smdmth * manager1 Accepted Ready Active Reachable Yes |
|
| 49 |
-``` |
|
| 50 |
- |
|
| 51 |
-If a secret for joining new nodes is not provided with `--secret`, `docker swarm init` will |
|
| 52 |
-generate a random one and print it to the terminal (as seen in the example above). To initialize |
|
| 53 |
-a swarm with no secret, use `--secret ""`. |
|
| 54 |
- |
|
| 55 |
-### `--auto-accept value` |
|
| 56 |
- |
|
| 57 |
-This flag controls node acceptance into the cluster. By default, `worker` nodes are |
|
| 58 |
-automatically accepted by the cluster. This can be changed by specifying what kinds of nodes |
|
| 59 |
-can be auto-accepted into the cluster. If auto-accept is not turned on, then |
|
| 60 |
-[node accept](node_accept.md) can be used to explicitly accept a node into the cluster. |
|
| 61 |
- |
|
| 62 |
-For example, the following initializes a cluster with auto-acceptance of workers, but not managers |
|
| 63 |
- |
|
| 64 |
- |
|
| 65 |
-```bash |
|
| 66 |
-$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept worker |
|
| 38 |
+ docker swarm join \ |
|
| 39 |
+ --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ |
|
| 40 |
+ 172.17.0.2:2377 |
|
| 41 |
+ |
|
| 42 |
+To add a manager to this swarm, run the following command: |
|
| 43 |
+ docker swarm join \ |
|
| 44 |
+ --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ |
|
| 45 |
+ 172.17.0.2:2377 |
|
| 67 | 46 |
``` |
| 68 | 47 |
|
| 69 |
-It is possible to pass a comma-separated list of node types. The following initializes a cluster |
|
| 70 |
-with auto-acceptance of both `worker` and `manager` nodes |
|
| 48 |
+`docker swarm init` generates two random tokens, a worker token and a manager token. When you join |
|
| 49 |
+a new node to the swarm, the node joins as a worker or manager node based upon the token you pass |
|
| 50 |
+to [swarm join](swarm_join.md). |
|
| 71 | 51 |
|
| 72 |
-```bash |
|
| 73 |
-$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept worker,manager |
|
| 74 |
-``` |
|
| 75 |
- |
|
| 76 |
-To disable auto acceptance, use the `none` option. Note that this option cannot |
|
| 77 |
-be combined with other values. When disabling auto acceptance, nodes must be |
|
| 78 |
-manually accepted or rejected using `docker node accept` or `docker node rm`. |
|
| 79 |
- |
|
| 80 |
-The following example enables swarm mode with auto acceptance disabled: |
|
| 81 |
- |
|
| 82 |
-```bash |
|
| 83 |
-$ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept none |
|
| 84 |
-``` |
|
| 52 |
+After you create the swarm, you can display or rotate the token using |
|
| 53 |
+[swarm join-token](swarm_join_token.md). |
|
| 85 | 54 |
|
| 86 | 55 |
### `--cert-expiry` |
| 87 | 56 |
|
| ... | ... |
@@ -105,11 +74,7 @@ This flag forces an existing node that was part of a quorum that was lost to res |
| 105 | 105 |
|
| 106 | 106 |
### `--listen-addr value` |
| 107 | 107 |
|
| 108 |
-The node listens for inbound Swarm manager traffic on this IP:PORT |
|
| 109 |
- |
|
| 110 |
-### `--secret string` |
|
| 111 |
- |
|
| 112 |
-Secret value needed to accept nodes into the Swarm |
|
| 108 |
+The node listens for inbound swarm manager traffic on this IP:PORT |
|
| 113 | 109 |
|
| 114 | 110 |
### `--task-history-limit` |
| 115 | 111 |
|
| ... | ... |
@@ -120,5 +85,5 @@ This flag sets up task history retention limit. |
| 120 | 120 |
* [swarm join](swarm_join.md) |
| 121 | 121 |
* [swarm leave](swarm_leave.md) |
| 122 | 122 |
* [swarm update](swarm_update.md) |
| 123 |
-* [node accept](node_accept.md) |
|
| 123 |
+* [swarm join-token](swarm_join_token.md) |
|
| 124 | 124 |
* [node rm](node_rm.md) |
| ... | ... |
@@ -14,55 +14,54 @@ parent = "smn_cli" |
| 14 | 14 |
```markdown |
| 15 | 15 |
Usage: docker swarm join [OPTIONS] HOST:PORT |
| 16 | 16 |
|
| 17 |
-Join a Swarm as a node and/or manager |
|
| 17 |
+Join a swarm as a node and/or manager |
|
| 18 | 18 |
|
| 19 | 19 |
Options: |
| 20 |
- --ca-hash string Hash of the Root Certificate Authority certificate used for trusted join |
|
| 21 | 20 |
--help Print usage |
| 22 | 21 |
--listen-addr value Listen address (default 0.0.0.0:2377) |
| 23 |
- --manager Try joining as a manager. |
|
| 24 |
- --secret string Secret for node acceptance |
|
| 22 |
+ --token string Token for entry into the swarm |
|
| 25 | 23 |
``` |
| 26 | 24 |
|
| 27 |
-Join a node to a Swarm cluster. If the `--manager` flag is specified, the docker engine |
|
| 28 |
-targeted by this command becomes a `manager`. If it is not specified, it becomes a `worker`. |
|
| 25 |
+Join a node to a swarm. The node joins as a manager node or worker node based upon the token you |
|
| 26 |
+pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you |
|
| 27 |
+pass a worker token, the node joins as a worker. |
|
| 29 | 28 |
|
| 30 | 29 |
### Join a node to swarm as a manager |
| 31 | 30 |
|
| 31 |
+The example below demonstrates joining a manager node using a manager token. |
|
| 32 |
+ |
|
| 32 | 33 |
```bash |
| 33 |
-$ docker swarm join --secret 4ao565v9jsuogtq5t8s379ulb --manager --listen-addr 192.168.99.122:2377 192.168.99.121:2377 |
|
| 34 |
-This node joined a Swarm as a manager. |
|
| 34 |
+$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 --listen-addr 192.168.99.122:2377 192.168.99.121:2377 |
|
| 35 |
+This node joined a swarm as a manager. |
|
| 35 | 36 |
$ docker node ls |
| 36 |
-ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 37 |
-dkp8vy1dq1kxleu9g4u78tlag * manager2 Accepted Ready Active Reachable |
|
| 38 |
-dvfxp4zseq4s0rih1selh0d20 manager1 Accepted Ready Active Reachable Yes |
|
| 37 |
+ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 38 |
+dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable |
|
| 39 |
+dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader |
|
| 39 | 40 |
``` |
| 40 | 41 |
|
| 42 |
+A cluster should only have 3-7 managers at most, because a majority of managers must be available |
|
| 43 |
+for the cluster to function. Nodes that aren't meant to participate in this management quorum |
|
| 44 |
+should join as workers instead. Managers should be stable hosts that have static IP addresses. |
|
| 45 |
+ |
|
| 41 | 46 |
### Join a node to swarm as a worker |
| 42 | 47 |
|
| 48 |
+The example below demonstrates joining a worker node using a worker token. |
|
| 49 |
+ |
|
| 43 | 50 |
```bash |
| 44 |
-$ docker swarm join --secret 4ao565v9jsuogtq5t8s379ulb --listen-addr 192.168.99.123:2377 192.168.99.121:2377 |
|
| 45 |
-This node joined a Swarm as a worker. |
|
| 51 |
+$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx --listen-addr 192.168.99.123:2377 192.168.99.121:2377 |
|
| 52 |
+This node joined a swarm as a worker. |
|
| 46 | 53 |
$ docker node ls |
| 47 |
-ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 48 |
-7ln70fl22uw2dvjn2ft53m3q5 worker2 Accepted Ready Active |
|
| 49 |
-dkp8vy1dq1kxleu9g4u78tlag worker1 Accepted Ready Active Reachable |
|
| 50 |
-dvfxp4zseq4s0rih1selh0d20 * manager1 Accepted Ready Active Reachable Yes |
|
| 54 |
+ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 55 |
+7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active |
|
| 56 |
+dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable |
|
| 57 |
+dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader |
|
| 51 | 58 |
``` |
| 52 | 59 |
|
| 53 |
-### `--ca-hash` |
|
| 54 |
- |
|
| 55 |
-Hash of the Root Certificate Authority certificate used for trusted join. |
|
| 56 |
- |
|
| 57 | 60 |
### `--listen-addr value` |
| 58 | 61 |
|
| 59 |
-The node listens for inbound Swarm manager traffic on this IP:PORT |
|
| 60 |
- |
|
| 61 |
-### `--manager` |
|
| 62 |
- |
|
| 63 |
-Joins the node as a manager |
|
| 62 |
+The node listens for inbound swarm manager traffic on this IP:PORT |
|
| 64 | 63 |
|
| 65 |
-### `--secret string` |
|
| 64 |
+### `--token string` |
|
| 66 | 65 |
|
| 67 | 66 |
Secret value required for nodes to join the swarm |
| 68 | 67 |
|
| 69 | 68 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,76 @@ |
| 0 |
+<!--[metadata]> |
|
| 1 |
+title = "swarm join-token" |
|
| 2 |
+description = "The swarm join-token command description and usage" |
|
| 3 |
+keywords = ["swarm, join-token"] |
|
| 4 |
+advisory = "rc" |
|
| 5 |
+[menu.main] |
|
| 6 |
+parent = "smn_cli" |
|
| 7 |
+<![end-metadata]--> |
|
| 8 |
+ |
|
| 9 |
+# swarm join-token |
|
| 10 |
+ |
|
| 11 |
+```markdown |
|
| 12 |
+Usage: docker swarm join-token [--rotate] (worker|manager) |
|
| 13 |
+ |
|
| 14 |
+Manage join tokens |
|
| 15 |
+ |
|
| 16 |
+Options: |
|
| 17 |
+ --help Print usage |
|
| 18 |
+ -q, --quiet Only display token |
|
| 19 |
+ --rotate Rotate join token |
|
| 20 |
+``` |
|
| 21 |
+ |
|
| 22 |
+Join tokens are secrets that determine whether or not a node will join the swarm as a manager node |
|
| 23 |
+or a worker node. You pass the token using the `--token flag` when you run |
|
| 24 |
+[swarm join](swarm_join.md). You can access the current tokens or rotate the tokens using |
|
| 25 |
+`swarm join-token`. |
|
| 26 |
+ |
|
| 27 |
+Run with only a single `worker` or `manager` argument, it will print a command for joining a new |
|
| 28 |
+node to the swarm, including the necessary token: |
|
| 29 |
+ |
|
| 30 |
+```bash |
|
| 31 |
+$ docker swarm join-token worker |
|
| 32 |
+To add a worker to this swarm, run the following command: |
|
| 33 |
+ docker swarm join \ |
|
| 34 |
+ --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ |
|
| 35 |
+ 172.17.0.2:2377 |
|
| 36 |
+ |
|
| 37 |
+$ docker swarm join-token manager |
|
| 38 |
+To add a manager to this swarm, run the following command: |
|
| 39 |
+ docker swarm join \ |
|
| 40 |
+ --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ |
|
| 41 |
+ 172.17.0.2:2377 |
|
| 42 |
+``` |
|
| 43 |
+ |
|
| 44 |
+Use the `--rotate` flag to generate a new join token for the specified role: |
|
| 45 |
+ |
|
| 46 |
+```bash |
|
| 47 |
+$ docker swarm join-token --rotate worker |
|
| 48 |
+To add a worker to this swarm, run the following command: |
|
| 49 |
+ docker swarm join \ |
|
| 50 |
+ --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ |
|
| 51 |
+ 172.17.0.2:2377 |
|
| 52 |
+``` |
|
| 53 |
+ |
|
| 54 |
+After using `--rotate`, only the new token will be valid for joining with the specified role. |
|
| 55 |
+ |
|
| 56 |
+The `-q` (or `--quiet`) flag only prints the token: |
|
| 57 |
+ |
|
| 58 |
+```bash |
|
| 59 |
+$ docker swarm join-token -q worker |
|
| 60 |
+SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t |
|
| 61 |
+``` |
|
| 62 |
+ |
|
| 63 |
+### `--rotate` |
|
| 64 |
+ |
|
| 65 |
+Update the join token for a specified role with a new token and print the token. |
|
| 66 |
+ |
|
| 67 |
+### `--quiet` |
|
| 68 |
+ |
|
| 69 |
+Only print the token. Do not print a complete command for joining. |
|
| 70 |
+ |
|
| 71 |
+## Related information |
|
| 72 |
+ |
|
| 73 |
+* [swarm join](swarm_join.md) |
| ... | ... |
@@ -14,7 +14,7 @@ parent = "smn_cli" |
| 14 | 14 |
```markdown |
| 15 | 15 |
Usage: docker swarm leave [OPTIONS] |
| 16 | 16 |
|
| 17 |
-Leave a Swarm |
|
| 17 |
+Leave a swarm |
|
| 18 | 18 |
|
| 19 | 19 |
Options: |
| 20 | 20 |
--force Force leave ignoring warnings. |
| ... | ... |
@@ -26,10 +26,10 @@ This command causes the node to leave the swarm. |
| 26 | 26 |
On a manager node: |
| 27 | 27 |
```bash |
| 28 | 28 |
$ docker node ls |
| 29 |
-ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 30 |
-7ln70fl22uw2dvjn2ft53m3q5 worker2 Accepted Ready Active |
|
| 31 |
-dkp8vy1dq1kxleu9g4u78tlag worker1 Accepted Ready Active Reachable |
|
| 32 |
-dvfxp4zseq4s0rih1selh0d20 * manager1 Accepted Ready Active Reachable Yes |
|
| 29 |
+ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 30 |
+7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active |
|
| 31 |
+dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable |
|
| 32 |
+dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader |
|
| 33 | 33 |
``` |
| 34 | 34 |
|
| 35 | 35 |
On a worker node: |
| ... | ... |
@@ -41,10 +41,10 @@ Node left the default swarm. |
| 41 | 41 |
On a manager node: |
| 42 | 42 |
```bash |
| 43 | 43 |
$ docker node ls |
| 44 |
-ID HOSTNAME MEMBERSHIP STATUS AVAILABILITY MANAGER STATUS LEADER |
|
| 45 |
-7ln70fl22uw2dvjn2ft53m3q5 worker2 Accepted Down Active |
|
| 46 |
-dkp8vy1dq1kxleu9g4u78tlag worker1 Accepted Ready Active Reachable |
|
| 47 |
-dvfxp4zseq4s0rih1selh0d20 * manager1 Accepted Ready Active Reachable Yes |
|
| 44 |
+ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS |
|
| 45 |
+7ln70fl22uw2dvjn2ft53m3q5 worker2 Down Active |
|
| 46 |
+dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable |
|
| 47 |
+dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader |
|
| 48 | 48 |
``` |
| 49 | 49 |
|
| 50 | 50 |
## Related information |
| ... | ... |
@@ -14,23 +14,21 @@ parent = "smn_cli" |
| 14 | 14 |
```markdown |
| 15 | 15 |
Usage: docker swarm update [OPTIONS] |
| 16 | 16 |
|
| 17 |
-Update the Swarm |
|
| 17 |
+Update the swarm |
|
| 18 | 18 |
|
| 19 | 19 |
Options: |
| 20 |
- --auto-accept value Auto acceptance policy (worker, manager or none) |
|
| 21 | 20 |
--cert-expiry duration Validity period for node certificates (default 2160h0m0s) |
| 22 | 21 |
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s) |
| 23 | 22 |
--external-ca value Specifications of one or more certificate signing endpoints |
| 24 | 23 |
--help Print usage |
| 25 |
- --secret string Set secret value needed to accept nodes into cluster |
|
| 26 | 24 |
--task-history-limit int Task history retention limit (default 10) |
| 27 | 25 |
``` |
| 28 | 26 |
|
| 29 |
-Updates a Swarm cluster with new parameter values. This command must target a manager node. |
|
| 27 |
+Updates a swarm cluster with new parameter values. This command must target a manager node. |
|
| 30 | 28 |
|
| 31 | 29 |
|
| 32 | 30 |
```bash |
| 33 |
-$ docker swarm update --auto-accept manager |
|
| 31 |
+$ docker swarm update --cert-expirty 4000h0m0s |
|
| 34 | 32 |
``` |
| 35 | 33 |
|
| 36 | 34 |
## Related information |
| ... | ... |
@@ -216,15 +216,17 @@ func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *Swarm |
| 216 | 216 |
|
| 217 | 217 |
if joinSwarm == true {
|
| 218 | 218 |
if len(s.daemons) > 0 {
|
| 219 |
+ tokens := s.daemons[0].joinTokens(c) |
|
| 220 |
+ token := tokens.Worker |
|
| 221 |
+ if manager {
|
|
| 222 |
+ token = tokens.Manager |
|
| 223 |
+ } |
|
| 219 | 224 |
c.Assert(d.Join(swarm.JoinRequest{
|
| 220 | 225 |
RemoteAddrs: []string{s.daemons[0].listenAddr},
|
| 221 |
- Manager: manager}), check.IsNil) |
|
| 222 |
- } else {
|
|
| 223 |
- c.Assert(d.Init(swarm.InitRequest{
|
|
| 224 |
- Spec: swarm.Spec{
|
|
| 225 |
- AcceptancePolicy: autoAcceptPolicy, |
|
| 226 |
- }, |
|
| 226 |
+ JoinToken: token, |
|
| 227 | 227 |
}), check.IsNil) |
| 228 |
+ } else {
|
|
| 229 |
+ c.Assert(d.Init(swarm.InitRequest{}), check.IsNil)
|
|
| 228 | 230 |
} |
| 229 | 231 |
} |
| 230 | 232 |
|
| ... | ... |
@@ -22,14 +22,6 @@ type SwarmDaemon struct {
|
| 22 | 22 |
listenAddr string |
| 23 | 23 |
} |
| 24 | 24 |
|
| 25 |
-// default policy in tests is allow-all |
|
| 26 |
-var autoAcceptPolicy = swarm.AcceptancePolicy{
|
|
| 27 |
- Policies: []swarm.Policy{
|
|
| 28 |
- {Role: swarm.NodeRoleWorker, Autoaccept: true},
|
|
| 29 |
- {Role: swarm.NodeRoleManager, Autoaccept: true},
|
|
| 30 |
- }, |
|
| 31 |
-} |
|
| 32 |
- |
|
| 33 | 25 |
// Init initializes a new swarm cluster. |
| 34 | 26 |
func (d *SwarmDaemon) Init(req swarm.InitRequest) error {
|
| 35 | 27 |
if req.ListenAddr == "" {
|
| ... | ... |
@@ -271,6 +263,28 @@ func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) {
|
| 271 | 271 |
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
| 272 | 272 |
} |
| 273 | 273 |
|
| 274 |
+func (d *SwarmDaemon) rotateTokens(c *check.C) {
|
|
| 275 |
+ var sw swarm.Swarm |
|
| 276 |
+ status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 277 |
+ c.Assert(err, checker.IsNil) |
|
| 278 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 279 |
+ c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 280 |
+ |
|
| 281 |
+ url := fmt.Sprintf("/swarm/update?version=%d&rotate_worker_token=true&rotate_manager_token=true", sw.Version.Index)
|
|
| 282 |
+ status, out, err = d.SockRequest("POST", url, sw.Spec)
|
|
| 283 |
+ c.Assert(err, checker.IsNil) |
|
| 284 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 285 |
+} |
|
| 286 |
+ |
|
| 287 |
+func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens {
|
|
| 288 |
+ var sw swarm.Swarm |
|
| 289 |
+ status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 290 |
+ c.Assert(err, checker.IsNil) |
|
| 291 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 292 |
+ c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 293 |
+ return sw.JoinTokens |
|
| 294 |
+} |
|
| 295 |
+ |
|
| 274 | 296 |
func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) {
|
| 275 | 297 |
info, err := d.info() |
| 276 | 298 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -43,7 +43,7 @@ func (s *DockerSwarmSuite) TestApiSwarmInit(c *check.C) {
|
| 43 | 43 |
c.Assert(info.ControlAvailable, checker.False) |
| 44 | 44 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 45 | 45 |
|
| 46 |
- c.Assert(d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 46 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 47 | 47 |
|
| 48 | 48 |
info, err = d2.info() |
| 49 | 49 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -72,89 +72,29 @@ func (s *DockerSwarmSuite) TestApiSwarmInit(c *check.C) {
|
| 72 | 72 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 73 | 73 |
} |
| 74 | 74 |
|
| 75 |
-func (s *DockerSwarmSuite) TestApiSwarmManualAcceptance(c *check.C) {
|
|
| 76 |
- testRequires(c, Network) |
|
| 77 |
- s.testAPISwarmManualAcceptance(c, "") |
|
| 78 |
-} |
|
| 79 |
-func (s *DockerSwarmSuite) TestApiSwarmManualAcceptanceSecret(c *check.C) {
|
|
| 80 |
- testRequires(c, Network) |
|
| 81 |
- s.testAPISwarmManualAcceptance(c, "foobaz") |
|
| 82 |
-} |
|
| 83 |
- |
|
| 84 |
-func (s *DockerSwarmSuite) testAPISwarmManualAcceptance(c *check.C, secret string) {
|
|
| 85 |
- d1 := s.AddDaemon(c, false, false) |
|
| 86 |
- c.Assert(d1.Init(swarm.InitRequest{
|
|
| 87 |
- Spec: swarm.Spec{
|
|
| 88 |
- AcceptancePolicy: swarm.AcceptancePolicy{
|
|
| 89 |
- Policies: []swarm.Policy{
|
|
| 90 |
- {Role: swarm.NodeRoleWorker, Secret: &secret},
|
|
| 91 |
- {Role: swarm.NodeRoleManager, Secret: &secret},
|
|
| 92 |
- }, |
|
| 93 |
- }, |
|
| 94 |
- }, |
|
| 95 |
- }), checker.IsNil) |
|
| 96 |
- |
|
| 97 |
- d2 := s.AddDaemon(c, false, false) |
|
| 98 |
- err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
|
| 99 |
- c.Assert(err, checker.NotNil) |
|
| 100 |
- if secret == "" {
|
|
| 101 |
- c.Assert(err.Error(), checker.Contains, "needs to be accepted") |
|
| 102 |
- info, err := d2.info() |
|
| 103 |
- c.Assert(err, checker.IsNil) |
|
| 104 |
- c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) |
|
| 105 |
- c.Assert(d2.Leave(false), checker.IsNil) |
|
| 106 |
- info, err = d2.info() |
|
| 107 |
- c.Assert(err, checker.IsNil) |
|
| 108 |
- c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
|
| 109 |
- } else {
|
|
| 110 |
- c.Assert(err.Error(), checker.Contains, "valid secret token is necessary") |
|
| 111 |
- info, err := d2.info() |
|
| 112 |
- c.Assert(err, checker.IsNil) |
|
| 113 |
- c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
|
| 114 |
- } |
|
| 115 |
- d3 := s.AddDaemon(c, false, false) |
|
| 116 |
- c.Assert(d3.Join(swarm.JoinRequest{Secret: secret, RemoteAddrs: []string{d1.listenAddr}}), checker.NotNil)
|
|
| 117 |
- info, err := d3.info() |
|
| 118 |
- c.Assert(err, checker.IsNil) |
|
| 119 |
- c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) |
|
| 120 |
- c.Assert(len(info.NodeID), checker.GreaterThan, 5) |
|
| 121 |
- d1.updateNode(c, info.NodeID, func(n *swarm.Node) {
|
|
| 122 |
- n.Spec.Membership = swarm.NodeMembershipAccepted |
|
| 123 |
- }) |
|
| 124 |
- waitAndAssert(c, defaultReconciliationTimeout, d3.checkLocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
|
| 125 |
-} |
|
| 126 |
- |
|
| 127 |
-func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
|
| 75 |
+func (s *DockerSwarmSuite) TestApiSwarmJoinToken(c *check.C) {
|
|
| 128 | 76 |
testRequires(c, Network) |
| 129 | 77 |
d1 := s.AddDaemon(c, false, false) |
| 130 |
- secret := "foobar" |
|
| 131 |
- c.Assert(d1.Init(swarm.InitRequest{
|
|
| 132 |
- Spec: swarm.Spec{
|
|
| 133 |
- AcceptancePolicy: swarm.AcceptancePolicy{
|
|
| 134 |
- Policies: []swarm.Policy{
|
|
| 135 |
- {Role: swarm.NodeRoleWorker, Autoaccept: true, Secret: &secret},
|
|
| 136 |
- {Role: swarm.NodeRoleManager, Secret: &secret},
|
|
| 137 |
- }, |
|
| 138 |
- }, |
|
| 139 |
- }, |
|
| 140 |
- }), checker.IsNil) |
|
| 78 |
+ c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
|
|
| 141 | 79 |
|
| 142 | 80 |
d2 := s.AddDaemon(c, false, false) |
| 143 | 81 |
err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
| 144 | 82 |
c.Assert(err, checker.NotNil) |
| 145 |
- c.Assert(err.Error(), checker.Contains, "secret token is necessary") |
|
| 83 |
+ c.Assert(err.Error(), checker.Contains, "join token is necessary") |
|
| 146 | 84 |
info, err := d2.info() |
| 147 | 85 |
c.Assert(err, checker.IsNil) |
| 148 | 86 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 149 | 87 |
|
| 150 |
- err = d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}})
|
|
| 88 |
+ err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}})
|
|
| 151 | 89 |
c.Assert(err, checker.NotNil) |
| 152 |
- c.Assert(err.Error(), checker.Contains, "secret token is necessary") |
|
| 90 |
+ c.Assert(err.Error(), checker.Contains, "join token is necessary") |
|
| 153 | 91 |
info, err = d2.info() |
| 154 | 92 |
c.Assert(err, checker.IsNil) |
| 155 | 93 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 156 | 94 |
|
| 157 |
- c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobar", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 95 |
+ workerToken := d1.joinTokens(c).Worker |
|
| 96 |
+ |
|
| 97 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 158 | 98 |
info, err = d2.info() |
| 159 | 99 |
c.Assert(err, checker.IsNil) |
| 160 | 100 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -163,22 +103,19 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
| 163 | 163 |
c.Assert(err, checker.IsNil) |
| 164 | 164 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 165 | 165 |
|
| 166 |
- // change secret |
|
| 167 |
- d1.updateSwarm(c, func(s *swarm.Spec) {
|
|
| 168 |
- for i := range s.AcceptancePolicy.Policies {
|
|
| 169 |
- p := "foobaz" |
|
| 170 |
- s.AcceptancePolicy.Policies[i].Secret = &p |
|
| 171 |
- } |
|
| 172 |
- }) |
|
| 166 |
+ // change tokens |
|
| 167 |
+ d1.rotateTokens(c) |
|
| 173 | 168 |
|
| 174 |
- err = d2.Join(swarm.JoinRequest{Secret: "foobar", RemoteAddrs: []string{d1.listenAddr}})
|
|
| 169 |
+ err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}})
|
|
| 175 | 170 |
c.Assert(err, checker.NotNil) |
| 176 |
- c.Assert(err.Error(), checker.Contains, "secret token is necessary") |
|
| 171 |
+ c.Assert(err.Error(), checker.Contains, "join token is necessary") |
|
| 177 | 172 |
info, err = d2.info() |
| 178 | 173 |
c.Assert(err, checker.IsNil) |
| 179 | 174 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 180 | 175 |
|
| 181 |
- c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 176 |
+ workerToken = d1.joinTokens(c).Worker |
|
| 177 |
+ |
|
| 178 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 182 | 179 |
info, err = d2.info() |
| 183 | 180 |
c.Assert(err, checker.IsNil) |
| 184 | 181 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -187,41 +124,17 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
| 187 | 187 |
c.Assert(err, checker.IsNil) |
| 188 | 188 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 189 | 189 |
|
| 190 |
- // change policy, don't change secret |
|
| 191 |
- d1.updateSwarm(c, func(s *swarm.Spec) {
|
|
| 192 |
- for i, p := range s.AcceptancePolicy.Policies {
|
|
| 193 |
- if p.Role == swarm.NodeRoleManager {
|
|
| 194 |
- s.AcceptancePolicy.Policies[i].Autoaccept = false |
|
| 195 |
- } |
|
| 196 |
- s.AcceptancePolicy.Policies[i].Secret = nil |
|
| 197 |
- } |
|
| 198 |
- }) |
|
| 190 |
+ // change spec, don't change tokens |
|
| 191 |
+ d1.updateSwarm(c, func(s *swarm.Spec) {})
|
|
| 199 | 192 |
|
| 200 | 193 |
err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
| 201 | 194 |
c.Assert(err, checker.NotNil) |
| 202 |
- c.Assert(err.Error(), checker.Contains, "secret token is necessary") |
|
| 203 |
- info, err = d2.info() |
|
| 204 |
- c.Assert(err, checker.IsNil) |
|
| 205 |
- c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
|
| 206 |
- |
|
| 207 |
- c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 208 |
- info, err = d2.info() |
|
| 209 |
- c.Assert(err, checker.IsNil) |
|
| 210 |
- c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
|
| 211 |
- c.Assert(d2.Leave(false), checker.IsNil) |
|
| 195 |
+ c.Assert(err.Error(), checker.Contains, "join token is necessary") |
|
| 212 | 196 |
info, err = d2.info() |
| 213 | 197 |
c.Assert(err, checker.IsNil) |
| 214 | 198 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 215 | 199 |
|
| 216 |
- // clear secret |
|
| 217 |
- d1.updateSwarm(c, func(s *swarm.Spec) {
|
|
| 218 |
- for i := range s.AcceptancePolicy.Policies {
|
|
| 219 |
- p := "" |
|
| 220 |
- s.AcceptancePolicy.Policies[i].Secret = &p |
|
| 221 |
- } |
|
| 222 |
- }) |
|
| 223 |
- |
|
| 224 |
- c.Assert(d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 200 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 225 | 201 |
info, err = d2.info() |
| 226 | 202 |
c.Assert(err, checker.IsNil) |
| 227 | 203 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -229,34 +142,24 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
|
| 229 | 229 |
info, err = d2.info() |
| 230 | 230 |
c.Assert(err, checker.IsNil) |
| 231 | 231 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 232 |
- |
|
| 233 | 232 |
} |
| 234 | 233 |
|
| 235 | 234 |
func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) {
|
| 236 | 235 |
testRequires(c, Network) |
| 237 | 236 |
d1 := s.AddDaemon(c, true, true) |
| 238 | 237 |
d2 := s.AddDaemon(c, false, false) |
| 239 |
- err := d2.Join(swarm.JoinRequest{CACertHash: "foobar", RemoteAddrs: []string{d1.listenAddr}})
|
|
| 238 |
+ splitToken := strings.Split(d1.joinTokens(c).Worker, "-") |
|
| 239 |
+ splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" |
|
| 240 |
+ replacementToken := strings.Join(splitToken, "-") |
|
| 241 |
+ err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}})
|
|
| 240 | 242 |
c.Assert(err, checker.NotNil) |
| 241 |
- c.Assert(err.Error(), checker.Contains, "invalid checksum digest format") |
|
| 242 |
- |
|
| 243 |
- c.Assert(len(d1.CACertHash), checker.GreaterThan, 0) |
|
| 244 |
- c.Assert(d2.Join(swarm.JoinRequest{CACertHash: d1.CACertHash, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 243 |
+ c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") |
|
| 245 | 244 |
} |
| 246 | 245 |
|
| 247 | 246 |
func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) {
|
| 248 | 247 |
testRequires(c, Network) |
| 249 | 248 |
d1 := s.AddDaemon(c, false, false) |
| 250 |
- c.Assert(d1.Init(swarm.InitRequest{
|
|
| 251 |
- Spec: swarm.Spec{
|
|
| 252 |
- AcceptancePolicy: swarm.AcceptancePolicy{
|
|
| 253 |
- Policies: []swarm.Policy{
|
|
| 254 |
- {Role: swarm.NodeRoleWorker, Autoaccept: true},
|
|
| 255 |
- {Role: swarm.NodeRoleManager},
|
|
| 256 |
- }, |
|
| 257 |
- }, |
|
| 258 |
- }, |
|
| 259 |
- }), checker.IsNil) |
|
| 249 |
+ c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
|
|
| 260 | 250 |
d2 := s.AddDaemon(c, true, false) |
| 261 | 251 |
|
| 262 | 252 |
info, err := d2.info() |
| ... | ... |
@@ -838,9 +741,7 @@ func (s *DockerSwarmSuite) TestApiSwarmForceNewCluster(c *check.C) {
|
| 838 | 838 |
|
| 839 | 839 |
c.Assert(d1.Init(swarm.InitRequest{
|
| 840 | 840 |
ForceNewCluster: true, |
| 841 |
- Spec: swarm.Spec{
|
|
| 842 |
- AcceptancePolicy: autoAcceptPolicy, |
|
| 843 |
- }, |
|
| 841 |
+ Spec: swarm.Spec{},
|
|
| 844 | 842 |
}), checker.IsNil) |
| 845 | 843 |
|
| 846 | 844 |
waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) |
| ... | ... |
@@ -937,7 +838,6 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount |
| 937 | 937 |
for _, n := range d.listNodes(c) {
|
| 938 | 938 |
c.Assert(n.Status.State, checker.Equals, swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID))
|
| 939 | 939 |
c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID))
|
| 940 |
- c.Assert(n.Spec.Membership, checker.Equals, swarm.NodeMembershipAccepted, check.Commentf("membership of node %s, reported by %s", n.ID, d.Info.NodeID))
|
|
| 941 | 940 |
if n.Spec.Role == swarm.NodeRoleManager {
|
| 942 | 941 |
c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID))
|
| 943 | 942 |
if n.ManagerStatus.Leader {
|
| ... | ... |
@@ -25,50 +25,13 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) {
|
| 25 | 25 |
return sw[0].Spec |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 |
- out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", "--auto-accept", "manager", "--auto-accept", "worker", "--secret", "foo")
|
|
| 28 |
+ out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s")
|
|
| 29 | 29 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
| 30 | 30 |
|
| 31 | 31 |
spec := getSpec() |
| 32 | 32 |
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) |
| 33 | 33 |
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second)) |
| 34 | 34 |
|
| 35 |
- c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2) |
|
| 36 |
- |
|
| 37 |
- for _, p := range spec.AcceptancePolicy.Policies {
|
|
| 38 |
- c.Assert(p.Autoaccept, checker.Equals, true) |
|
| 39 |
- c.Assert(p.Secret, checker.NotNil) |
|
| 40 |
- c.Assert(*p.Secret, checker.Not(checker.Equals), "") |
|
| 41 |
- } |
|
| 42 |
- |
|
| 43 |
- out, err = d.Cmd("swarm", "update", "--auto-accept", "none")
|
|
| 44 |
- c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
|
| 45 |
- |
|
| 46 |
- spec = getSpec() |
|
| 47 |
- c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) |
|
| 48 |
- c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second)) |
|
| 49 |
- |
|
| 50 |
- c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2) |
|
| 51 |
- |
|
| 52 |
- for _, p := range spec.AcceptancePolicy.Policies {
|
|
| 53 |
- c.Assert(p.Autoaccept, checker.Equals, false) |
|
| 54 |
- // secret is still set |
|
| 55 |
- c.Assert(p.Secret, checker.NotNil) |
|
| 56 |
- c.Assert(*p.Secret, checker.Not(checker.Equals), "") |
|
| 57 |
- } |
|
| 58 |
- |
|
| 59 |
- out, err = d.Cmd("swarm", "update", "--auto-accept", "manager", "--secret", "")
|
|
| 60 |
- c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
|
| 61 |
- |
|
| 62 |
- spec = getSpec() |
|
| 63 |
- |
|
| 64 |
- c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2) |
|
| 65 |
- |
|
| 66 |
- for _, p := range spec.AcceptancePolicy.Policies {
|
|
| 67 |
- c.Assert(p.Autoaccept, checker.Equals, p.Role == swarm.NodeRoleManager) |
|
| 68 |
- // secret has been removed |
|
| 69 |
- c.Assert(p.Secret, checker.IsNil) |
|
| 70 |
- } |
|
| 71 |
- |
|
| 72 | 35 |
// setting anything under 30m for cert-expiry is not allowed |
| 73 | 36 |
out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m")
|
| 74 | 37 |
c.Assert(err, checker.NotNil) |
| ... | ... |
@@ -89,37 +52,21 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
|
| 89 | 89 |
return sw[0].Spec |
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 |
- out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", "--auto-accept", "manager", "--auto-accept", "worker", "--secret", "foo")
|
|
| 92 |
+ out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s")
|
|
| 93 | 93 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
| 94 | 94 |
|
| 95 | 95 |
spec := getSpec() |
| 96 | 96 |
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) |
| 97 | 97 |
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second)) |
| 98 | 98 |
|
| 99 |
- c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2) |
|
| 100 |
- |
|
| 101 |
- for _, p := range spec.AcceptancePolicy.Policies {
|
|
| 102 |
- c.Assert(p.Autoaccept, checker.Equals, true) |
|
| 103 |
- c.Assert(p.Secret, checker.NotNil) |
|
| 104 |
- c.Assert(*p.Secret, checker.Not(checker.Equals), "") |
|
| 105 |
- } |
|
| 106 |
- |
|
| 107 | 99 |
c.Assert(d.Leave(true), checker.IsNil) |
| 108 | 100 |
|
| 109 |
- out, err = d.Cmd("swarm", "init", "--auto-accept", "none", "--secret", "")
|
|
| 101 |
+ out, err = d.Cmd("swarm", "init")
|
|
| 110 | 102 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
| 111 | 103 |
|
| 112 | 104 |
spec = getSpec() |
| 113 | 105 |
c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) |
| 114 | 106 |
c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(5*time.Second)) |
| 115 |
- |
|
| 116 |
- c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2) |
|
| 117 |
- |
|
| 118 |
- for _, p := range spec.AcceptancePolicy.Policies {
|
|
| 119 |
- c.Assert(p.Autoaccept, checker.Equals, false) |
|
| 120 |
- c.Assert(p.Secret, checker.IsNil) |
|
| 121 |
- } |
|
| 122 |
- |
|
| 123 | 107 |
} |
| 124 | 108 |
|
| 125 | 109 |
func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) {
|