Browse code

Merge pull request #33996 from thaJeztah/un-fork-etcd

Un-fork coreos/etcd - bump to v3.2.1

Sebastiaan van Stijn authored on 2017/07/11 01:14:22
Showing 30 changed files
... ...
@@ -42,7 +42,8 @@ github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
42 42
 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
43 43
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
44 44
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
45
-github.com/coreos/etcd ea5389a79f40206170582c1ea076191b8622cb8e https://github.com/aaronlehmann/etcd # for https://github.com/coreos/etcd/pull/7830
45
+github.com/coreos/etcd v3.2.1
46
+github.com/coreos/go-semver v0.2.0
46 47
 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
47 48
 github.com/hashicorp/consul v0.5.2
48 49
 github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
... ...
@@ -11,7 +11,7 @@
11 11
 
12 12
 ![etcd Logo](logos/etcd-horizontal-color.png)
13 13
 
14
-etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being:
14
+etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being:
15 15
 
16 16
 * *Simple*: well-defined, user-facing API (gRPC)
17 17
 * *Secure*: automatic TLS with optional client cert authentication
... ...
@@ -37,13 +37,11 @@ See [etcdctl][etcdctl] for a simple command line client.
37 37
 
38 38
 ### Getting etcd
39 39
 
40
-The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
40
+The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, [rkt][rkt], and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
41 41
 
42
-For those wanting to try the very latest version, you can [build the latest version of etcd][dl-build] from the `master` branch.
43
-You will first need [*Go*](https://golang.org/) installed on your machine (version 1.6+ is required).
44
-All development occurs on `master`, including new features and bug fixes.
45
-Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
42
+For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `master` branch. This first needs [*Go*](https://golang.org/) installed (version 1.8+ is required). All development occurs on `master`, including new features and bug fixes. Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
46 43
 
44
+[rkt]: https://github.com/rkt/rkt/releases/
47 45
 [github-release]: https://github.com/coreos/etcd/releases/
48 46
 [branch-management]: ./Documentation/branch_management.md
49 47
 [dl-build]: ./Documentation/dl_build.md#build-the-latest-version
... ...
@@ -75,9 +73,9 @@ That's it! etcd is now running and serving client requests. For more
75 75
 
76 76
 ### etcd TCP ports
77 77
 
78
-The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. 
78
+The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.
79 79
 
80
-[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
80
+[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
81 81
 
82 82
 ### Running a local etcd cluster
83 83
 
... ...
@@ -95,7 +93,7 @@ Every cluster member and proxy accepts key value reads and key value writes.
95 95
 
96 96
 ### Running etcd on Kubernetes
97 97
 
98
-If you want to run etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
98
+To run an etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
99 99
 
100 100
 ### Next steps
101 101
 
... ...
@@ -105,7 +103,7 @@ Now it's time to dig into the full etcd API and other guides.
105 105
 - Explore the full gRPC [API][api].
106 106
 - Set up a [multi-machine cluster][clustering].
107 107
 - Learn the [config format, env variables and flags][configuration].
108
-- Find [language bindings and tools][libraries-and-tools].
108
+- Find [language bindings and tools][integrations].
109 109
 - Use TLS to [secure an etcd cluster][security].
110 110
 - [Tune etcd][tuning].
111 111
 
... ...
@@ -113,7 +111,7 @@ Now it's time to dig into the full etcd API and other guides.
113 113
 [api]: ./Documentation/dev-guide/api_reference_v3.md
114 114
 [clustering]: ./Documentation/op-guide/clustering.md
115 115
 [configuration]: ./Documentation/op-guide/configuration.md
116
-[libraries-and-tools]: ./Documentation/libraries-and-tools.md
116
+[integrations]: ./Documentation/integrations.md
117 117
 [security]: ./Documentation/op-guide/security.md
118 118
 [tuning]: ./Documentation/tuning.md
119 119
 
... ...
@@ -130,10 +128,8 @@ See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the co
130 130
 
131 131
 ## Reporting bugs
132 132
 
133
-See [reporting bugs](Documentation/reporting_bugs.md) for details about reporting any issue you may encounter.
133
+See [reporting bugs](Documentation/reporting_bugs.md) for details about reporting any issues.
134 134
 
135 135
 ### License
136 136
 
137 137
 etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
138
-
139
-
... ...
@@ -15,6 +15,7 @@
15 15
 package client
16 16
 
17 17
 import (
18
+	"encoding/json"
18 19
 	"errors"
19 20
 	"fmt"
20 21
 	"io/ioutil"
... ...
@@ -27,6 +28,8 @@ import (
27 27
 	"sync"
28 28
 	"time"
29 29
 
30
+	"github.com/coreos/etcd/version"
31
+
30 32
 	"golang.org/x/net/context"
31 33
 )
32 34
 
... ...
@@ -201,6 +204,9 @@ type Client interface {
201 201
 	// returned
202 202
 	SetEndpoints(eps []string) error
203 203
 
204
+	// GetVersion retrieves the current etcd server and cluster version
205
+	GetVersion(ctx context.Context) (*version.Versions, error)
206
+
204 207
 	httpClient
205 208
 }
206 209
 
... ...
@@ -477,6 +483,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration
477 477
 	}
478 478
 }
479 479
 
480
+func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
481
+	act := &getAction{Prefix: "/version"}
482
+
483
+	resp, body, err := c.Do(ctx, act)
484
+	if err != nil {
485
+		return nil, err
486
+	}
487
+
488
+	switch resp.StatusCode {
489
+	case http.StatusOK:
490
+		if len(body) == 0 {
491
+			return nil, ErrEmptyBody
492
+		}
493
+		var vresp version.Versions
494
+		if err := json.Unmarshal(body, &vresp); err != nil {
495
+			return nil, ErrInvalidJSON
496
+		}
497
+		return &vresp, nil
498
+	default:
499
+		var etcdErr Error
500
+		if err := json.Unmarshal(body, &etcdErr); err != nil {
501
+			return nil, ErrInvalidJSON
502
+		}
503
+		return nil, etcdErr
504
+	}
505
+}
506
+
480 507
 type roundTripResponse struct {
481 508
 	resp *http.Response
482 509
 	err  error
... ...
@@ -14,8 +14,27 @@
14 14
 
15 15
 package client
16 16
 
17
+import (
18
+	"github.com/coreos/etcd/pkg/srv"
19
+)
20
+
17 21
 // Discoverer is an interface that wraps the Discover method.
18 22
 type Discoverer interface {
19 23
 	// Discover looks up the etcd servers for the domain.
20 24
 	Discover(domain string) ([]string, error)
21 25
 }
26
+
27
+type srvDiscover struct{}
28
+
29
+// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
30
+func NewSRVDiscover() Discoverer {
31
+	return &srvDiscover{}
32
+}
33
+
34
+func (d *srvDiscover) Discover(domain string) ([]string, error) {
35
+	srvs, err := srv.GetClient("etcd-client", domain)
36
+	if err != nil {
37
+		return nil, err
38
+	}
39
+	return srvs.Endpoints, nil
40
+}
22 41
deleted file mode 100644
... ...
@@ -1,65 +0,0 @@
1
-// Copyright 2015 The etcd Authors
2
-//
3
-// Licensed under the Apache License, Version 2.0 (the "License");
4
-// you may not use this file except in compliance with the License.
5
-// You may obtain a copy of the License at
6
-//
7
-//     http://www.apache.org/licenses/LICENSE-2.0
8
-//
9
-// Unless required by applicable law or agreed to in writing, software
10
-// distributed under the License is distributed on an "AS IS" BASIS,
11
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
-// See the License for the specific language governing permissions and
13
-// limitations under the License.
14
-
15
-package client
16
-
17
-import (
18
-	"fmt"
19
-	"net"
20
-	"net/url"
21
-)
22
-
23
-var (
24
-	// indirection for testing
25
-	lookupSRV = net.LookupSRV
26
-)
27
-
28
-type srvDiscover struct{}
29
-
30
-// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
31
-func NewSRVDiscover() Discoverer {
32
-	return &srvDiscover{}
33
-}
34
-
35
-// Discover looks up the etcd servers for the domain.
36
-func (d *srvDiscover) Discover(domain string) ([]string, error) {
37
-	var urls []*url.URL
38
-
39
-	updateURLs := func(service, scheme string) error {
40
-		_, addrs, err := lookupSRV(service, "tcp", domain)
41
-		if err != nil {
42
-			return err
43
-		}
44
-		for _, srv := range addrs {
45
-			urls = append(urls, &url.URL{
46
-				Scheme: scheme,
47
-				Host:   net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
48
-			})
49
-		}
50
-		return nil
51
-	}
52
-
53
-	errHTTPS := updateURLs("etcd-client-ssl", "https")
54
-	errHTTP := updateURLs("etcd-client", "http")
55
-
56
-	if errHTTPS != nil && errHTTP != nil {
57
-		return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
58
-	}
59
-
60
-	endpoints := make([]string, len(urls))
61
-	for i := range urls {
62
-		endpoints[i] = urls[i].String()
63
-	}
64
-	return endpoints, nil
65
-}
... ...
@@ -17,9 +17,10 @@ package fileutil
17 17
 
18 18
 import (
19 19
 	"fmt"
20
+	"io"
20 21
 	"io/ioutil"
21 22
 	"os"
22
-	"path"
23
+	"path/filepath"
23 24
 	"sort"
24 25
 
25 26
 	"github.com/coreos/pkg/capnslog"
... ...
@@ -39,7 +40,7 @@ var (
39 39
 // IsDirWriteable checks if dir is writable by writing and removing a file
40 40
 // to dir. It returns nil if dir is writable.
41 41
 func IsDirWriteable(dir string) error {
42
-	f := path.Join(dir, ".touch")
42
+	f := filepath.Join(dir, ".touch")
43 43
 	if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
44 44
 		return err
45 45
 	}
... ...
@@ -101,11 +102,11 @@ func Exist(name string) bool {
101 101
 // shorten the length of the file.
102 102
 func ZeroToEnd(f *os.File) error {
103 103
 	// TODO: support FALLOC_FL_ZERO_RANGE
104
-	off, err := f.Seek(0, os.SEEK_CUR)
104
+	off, err := f.Seek(0, io.SeekCurrent)
105 105
 	if err != nil {
106 106
 		return err
107 107
 	}
108
-	lenf, lerr := f.Seek(0, os.SEEK_END)
108
+	lenf, lerr := f.Seek(0, io.SeekEnd)
109 109
 	if lerr != nil {
110 110
 		return lerr
111 111
 	}
... ...
@@ -116,6 +117,6 @@ func ZeroToEnd(f *os.File) error {
116 116
 	if err = Preallocate(f, lenf, true); err != nil {
117 117
 		return err
118 118
 	}
119
-	_, err = f.Seek(off, os.SEEK_SET)
119
+	_, err = f.Seek(off, io.SeekStart)
120 120
 	return err
121 121
 }
... ...
@@ -17,6 +17,7 @@
17 17
 package fileutil
18 18
 
19 19
 import (
20
+	"io"
20 21
 	"os"
21 22
 	"syscall"
22 23
 )
... ...
@@ -36,7 +37,7 @@ const (
36 36
 var (
37 37
 	wrlck = syscall.Flock_t{
38 38
 		Type:   syscall.F_WRLCK,
39
-		Whence: int16(os.SEEK_SET),
39
+		Whence: int16(io.SeekStart),
40 40
 		Start:  0,
41 41
 		Len:    0,
42 42
 	}
... ...
@@ -14,7 +14,10 @@
14 14
 
15 15
 package fileutil
16 16
 
17
-import "os"
17
+import (
18
+	"io"
19
+	"os"
20
+)
18 21
 
19 22
 // Preallocate tries to allocate the space for given
20 23
 // file. This operation is only supported on linux by a
... ...
@@ -22,6 +25,10 @@ import "os"
22 22
 // If the operation is unsupported, no error will be returned.
23 23
 // Otherwise, the error encountered will be returned.
24 24
 func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
25
+	if sizeInBytes == 0 {
26
+		// fallocate will return EINVAL if length is 0; skip
27
+		return nil
28
+	}
25 29
 	if extendFile {
26 30
 		return preallocExtend(f, sizeInBytes)
27 31
 	}
... ...
@@ -29,15 +36,15 @@ func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
29 29
 }
30 30
 
31 31
 func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
32
-	curOff, err := f.Seek(0, os.SEEK_CUR)
32
+	curOff, err := f.Seek(0, io.SeekCurrent)
33 33
 	if err != nil {
34 34
 		return err
35 35
 	}
36
-	size, err := f.Seek(sizeInBytes, os.SEEK_END)
36
+	size, err := f.Seek(sizeInBytes, io.SeekEnd)
37 37
 	if err != nil {
38 38
 		return err
39 39
 	}
40
-	if _, err = f.Seek(curOff, os.SEEK_SET); err != nil {
40
+	if _, err = f.Seek(curOff, io.SeekStart); err != nil {
41 41
 		return err
42 42
 	}
43 43
 	if sizeInBytes > size {
... ...
@@ -16,7 +16,7 @@ package fileutil
16 16
 
17 17
 import (
18 18
 	"os"
19
-	"path"
19
+	"path/filepath"
20 20
 	"sort"
21 21
 	"strings"
22 22
 	"time"
... ...
@@ -45,7 +45,7 @@ func purgeFile(dirname string, suffix string, max uint, interval time.Duration,
45 45
 			sort.Strings(newfnames)
46 46
 			fnames = newfnames
47 47
 			for len(newfnames) > int(max) {
48
-				f := path.Join(dirname, newfnames[0])
48
+				f := filepath.Join(dirname, newfnames[0])
49 49
 				l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
50 50
 				if err != nil {
51 51
 					break
... ...
@@ -32,8 +32,8 @@ const (
32 32
 // a node member ID.
33 33
 //
34 34
 // The initial id is in this format:
35
-// High order byte is memberID, next 5 bytes are from timestamp,
36
-// and low order 2 bytes are 0s.
35
+// High order 2 bytes are from memberID, next 5 bytes are from timestamp,
36
+// and low order one byte is a counter.
37 37
 // | prefix   | suffix              |
38 38
 // | 2 bytes  | 5 bytes   | 1 byte  |
39 39
 // | memberID | timestamp | cnt     |
40 40
new file mode 100644
... ...
@@ -0,0 +1,140 @@
0
+// Copyright 2015 The etcd Authors
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//     http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Package srv looks up DNS SRV records.
15
+package srv
16
+
17
+import (
18
+	"fmt"
19
+	"net"
20
+	"net/url"
21
+	"strings"
22
+
23
+	"github.com/coreos/etcd/pkg/types"
24
+)
25
+
26
+var (
27
+	// indirection for testing
28
+	lookupSRV      = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
29
+	resolveTCPAddr = net.ResolveTCPAddr
30
+)
31
+
32
+// GetCluster gets the cluster information via DNS discovery.
33
+// Also sees each entry as a separate instance.
34
+func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
35
+	tempName := int(0)
36
+	tcp2ap := make(map[string]url.URL)
37
+
38
+	// First, resolve the apurls
39
+	for _, url := range apurls {
40
+		tcpAddr, err := resolveTCPAddr("tcp", url.Host)
41
+		if err != nil {
42
+			return nil, err
43
+		}
44
+		tcp2ap[tcpAddr.String()] = url
45
+	}
46
+
47
+	stringParts := []string{}
48
+	updateNodeMap := func(service, scheme string) error {
49
+		_, addrs, err := lookupSRV(service, "tcp", dns)
50
+		if err != nil {
51
+			return err
52
+		}
53
+		for _, srv := range addrs {
54
+			port := fmt.Sprintf("%d", srv.Port)
55
+			host := net.JoinHostPort(srv.Target, port)
56
+			tcpAddr, terr := resolveTCPAddr("tcp", host)
57
+			if terr != nil {
58
+				err = terr
59
+				continue
60
+			}
61
+			n := ""
62
+			url, ok := tcp2ap[tcpAddr.String()]
63
+			if ok {
64
+				n = name
65
+			}
66
+			if n == "" {
67
+				n = fmt.Sprintf("%d", tempName)
68
+				tempName++
69
+			}
70
+			// SRV records have a trailing dot but URL shouldn't.
71
+			shortHost := strings.TrimSuffix(srv.Target, ".")
72
+			urlHost := net.JoinHostPort(shortHost, port)
73
+			stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
74
+			if ok && url.Scheme != scheme {
75
+				err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
76
+			}
77
+		}
78
+		if len(stringParts) == 0 {
79
+			return err
80
+		}
81
+		return nil
82
+	}
83
+
84
+	failCount := 0
85
+	err := updateNodeMap(service+"-ssl", "https")
86
+	srvErr := make([]string, 2)
87
+	if err != nil {
88
+		srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
89
+		failCount++
90
+	}
91
+	err = updateNodeMap(service, "http")
92
+	if err != nil {
93
+		srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
94
+		failCount++
95
+	}
96
+	if failCount == 2 {
97
+		return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
98
+	}
99
+	return stringParts, nil
100
+}
101
+
102
+type SRVClients struct {
103
+	Endpoints []string
104
+	SRVs      []*net.SRV
105
+}
106
+
107
+// GetClient looks up the client endpoints for a service and domain.
108
+func GetClient(service, domain string) (*SRVClients, error) {
109
+	var urls []*url.URL
110
+	var srvs []*net.SRV
111
+
112
+	updateURLs := func(service, scheme string) error {
113
+		_, addrs, err := lookupSRV(service, "tcp", domain)
114
+		if err != nil {
115
+			return err
116
+		}
117
+		for _, srv := range addrs {
118
+			urls = append(urls, &url.URL{
119
+				Scheme: scheme,
120
+				Host:   net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
121
+			})
122
+		}
123
+		srvs = append(srvs, addrs...)
124
+		return nil
125
+	}
126
+
127
+	errHTTPS := updateURLs(service+"-ssl", "https")
128
+	errHTTP := updateURLs(service, "http")
129
+
130
+	if errHTTPS != nil && errHTTP != nil {
131
+		return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
132
+	}
133
+
134
+	endpoints := make([]string, len(urls))
135
+	for i := range urls {
136
+		endpoints[i] = urls[i].String()
137
+	}
138
+	return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
139
+}
... ...
@@ -13,9 +13,7 @@ To keep the codebase small as well as provide flexibility, the library only impl
13 13
 
14 14
 In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine.  The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
15 15
 
16
-A simple example application, _raftexample_, is also available to help illustrate
17
-how to use this package in practice:
18
-https://github.com/coreos/etcd/tree/master/contrib/raftexample
16
+A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample
19 17
 
20 18
 # Features
21 19
 
... ...
@@ -51,11 +49,11 @@ This raft implementation also includes a few optional enhancements:
51 51
 - [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store
52 52
 - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
53 53
 - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
54
+- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
54 55
 
55 56
 ## Usage
56 57
 
57
-The primary object in raft is a Node. You either start a Node from scratch
58
-using raft.StartNode or start a Node from some initial state using raft.RestartNode.
58
+The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
59 59
 
60 60
 To start a three-node cluster
61 61
 ```go
... ...
@@ -73,7 +71,7 @@ To start a three-node cluster
73 73
   n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
74 74
 ```
75 75
 
76
-You can start a single node cluster, like so:
76
+Start a single node cluster, like so:
77 77
 ```go
78 78
   // Create storage and config as shown above.
79 79
   // Set peer list to itself, so this node can become the leader of this single-node cluster.
... ...
@@ -81,7 +79,7 @@ You can start a single node cluster, like so:
81 81
   n := raft.StartNode(c, peers)
82 82
 ```
83 83
 
84
-To allow a new node to join this cluster, do not pass in any peers. First, you need add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, you can start the node with empty peer list, like so:
84
+To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
85 85
 ```go
86 86
   // Create storage and config as shown above.
87 87
   n := raft.StartNode(c, nil)
... ...
@@ -110,46 +108,21 @@ To restart a node from previous state:
110 110
   n := raft.RestartNode(c)
111 111
 ```
112 112
 
113
-Now that you are holding onto a Node you have a few responsibilities:
114
-
115
-First, you must read from the Node.Ready() channel and process the updates
116
-it contains. These steps may be performed in parallel, except as noted in step
117
-2.
118
-
119
-1. Write HardState, Entries, and Snapshot to persistent storage if they are
120
-not empty. Note that when writing an Entry with Index i, any
121
-previously-persisted entries with Index >= i must be discarded.
122
-
123
-2. Send all Messages to the nodes named in the To field. It is important that
124
-no messages be sent until the latest HardState has been persisted to disk,
125
-and all Entries written by any previous Ready batch (Messages may be sent while
126
-entries from the same batch are being persisted). To reduce the I/O latency, an
127
-optimization can be applied to make leader write to disk in parallel with its
128
-followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
129
-MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
130
-large). Note: Marshalling messages is not thread-safe; it is important that you
131
-make sure that no new entries are persisted while marshalling.
132
-The easiest way to achieve this is to serialise the messages directly inside
133
-your main raft loop.
134
-
135
-3. Apply Snapshot (if any) and CommittedEntries to the state machine.
136
-If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
137
-to apply it to the node. The configuration change may be cancelled at this point
138
-by setting the NodeID field to zero before calling ApplyConfChange
139
-(but ApplyConfChange must be called one way or the other, and the decision to cancel
140
-must be based solely on the state machine and not external information such as
141
-the observed health of the node).
142
-
143
-4. Call Node.Advance() to signal readiness for the next batch of updates.
144
-This may be done at any time after step 1, although all updates must be processed
145
-in the order they were returned by Ready.
146
-
147
-Second, all persisted log entries must be made available via an
148
-implementation of the Storage interface. The provided MemoryStorage
149
-type can be used for this (if you repopulate its state upon a
150
-restart), or you can supply your own disk-backed implementation.
151
-
152
-Third, when you receive a message from another node, pass it to Node.Step:
113
+After creating a Node, the user has a few responsibilities:
114
+
115
+First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
116
+
117
+1. Write HardState, Entries, and Snapshot to persistent storage if they are not empty. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
118
+
119
+2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
120
+
121
+3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
122
+
123
+4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
124
+
125
+Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
126
+
127
+Third, after receiving a message from another node, pass it to Node.Step:
153 128
 
154 129
 ```go
155 130
 	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
... ...
@@ -157,10 +130,7 @@ Third, when you receive a message from another node, pass it to Node.Step:
157 157
 	}
158 158
 ```
159 159
 
160
-Finally, you need to call `Node.Tick()` at regular intervals (probably
161
-via a `time.Ticker`). Raft has two important timeouts: heartbeat and the
162
-election timeout. However, internally to the raft package time is
163
-represented by an abstract "tick".
160
+Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
164 161
 
165 162
 The total state machine handling loop will look something like this:
166 163
 
... ...
@@ -190,16 +160,13 @@ The total state machine handling loop will look something like this:
190 190
   }
191 191
 ```
192 192
 
193
-To propose changes to the state machine from your node take your application
194
-data, serialize it into a byte slice and call:
193
+To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
195 194
 
196 195
 ```go
197 196
 	n.Propose(ctx, data)
198 197
 ```
199 198
 
200
-If the proposal is committed, data will appear in committed entries with type
201
-raftpb.EntryNormal. There is no guarantee that a proposed command will be
202
-committed; you may have to re-propose after a timeout.
199
+If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. 
203 200
 
204 201
 To add or remove node in a cluster, build ConfChange struct 'cc' and call:
205 202
 
... ...
@@ -207,8 +174,7 @@ To add or remove node in a cluster, build ConfChange struct 'cc' and call:
207 207
 	n.ProposeConfChange(ctx, cc)
208 208
 ```
209 209
 
210
-After config change is committed, some committed entry with type
211
-raftpb.EntryConfChange will be returned. You must apply it to node through:
210
+After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
212 211
 
213 212
 ```go
214 213
 	var cc raftpb.ConfChange
... ...
@@ -223,25 +189,8 @@ may be reused. Node IDs must be non-zero.
223 223
 
224 224
 ## Implementation notes
225 225
 
226
-This implementation is up to date with the final Raft thesis
227
-(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
228
-implementation of the membership change protocol differs somewhat from
229
-that described in chapter 4. The key invariant that membership changes
230
-happen one node at a time is preserved, but in our implementation the
231
-membership change takes effect when its entry is applied, not when it
232
-is added to the log (so the entry is committed under the old
233
-membership instead of the new). This is equivalent in terms of safety,
234
-since the old and new configurations are guaranteed to overlap.
235
-
236
-To ensure that we do not attempt to commit two membership changes at
237
-once by matching log positions (which would be unsafe since they
238
-should have different quorum requirements), we simply disallow any
239
-proposed membership change while any uncommitted change appears in
240
-the leader's log.
241
-
242
-This approach introduces a problem when you try to remove a member
243
-from a two-member cluster: If one of the members dies before the
244
-other one receives the commit of the confchange entry, then the member
245
-cannot be removed any more since the cluster cannot make progress.
246
-For this reason it is highly recommended to use three or more nodes in
247
-every cluster.
226
+This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
227
+
228
+To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
229
+
230
+This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
... ...
@@ -85,6 +85,26 @@ func (u *unstable) stableTo(i, t uint64) {
85 85
 	if gt == t && i >= u.offset {
86 86
 		u.entries = u.entries[i+1-u.offset:]
87 87
 		u.offset = i + 1
88
+		u.shrinkEntriesArray()
89
+	}
90
+}
91
+
92
+// shrinkEntriesArray discards the underlying array used by the entries slice
93
+// if most of it isn't being used. This avoids holding references to a bunch of
94
+// potentially large entries that aren't needed anymore. Simply clearing the
95
+// entries wouldn't be safe because clients might still be using them.
96
+func (u *unstable) shrinkEntriesArray() {
97
+	// We replace the array if we're using less than half of the space in
98
+	// it. This number is fairly arbitrary, chosen as an attempt to balance
99
+	// memory usage vs number of allocations. It could probably be improved
100
+	// with some focused tuning.
101
+	const lenMultiple = 2
102
+	if len(u.entries) == 0 {
103
+		u.entries = nil
104
+	} else if len(u.entries)*lenMultiple < cap(u.entries) {
105
+		newEntries := make([]pb.Entry, len(u.entries))
106
+		copy(newEntries, u.entries)
107
+		u.entries = newEntries
88 108
 	}
89 109
 }
90 110
 
... ...
@@ -83,6 +83,10 @@ type Ready struct {
83 83
 	// If it contains a MsgSnap message, the application MUST report back to raft
84 84
 	// when the snapshot has been received or has failed by calling ReportSnapshot.
85 85
 	Messages []pb.Message
86
+
87
+	// MustSync indicates whether the HardState and Entries must be synchronously
88
+	// written to disk or if an asynchronous write is permissible.
89
+	MustSync bool
86 90
 }
87 91
 
88 92
 func isHardStateEqual(a, b pb.HardState) bool {
... ...
@@ -517,5 +521,17 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
517 517
 	if len(r.readStates) != 0 {
518 518
 		rd.ReadStates = r.readStates
519 519
 	}
520
+	rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
520 521
 	return rd
521 522
 }
523
+
524
+// MustSync returns true if the hard state and count of Raft entries indicate
525
+// that a synchronous write to persistent storage is required.
526
+func MustSync(st, prevst pb.HardState, entsnum int) bool {
527
+	// Persistent state on all servers:
528
+	// (Updated on stable storage before responding to RPCs)
529
+	// currentTerm
530
+	// votedFor
531
+	// log entries[]
532
+	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
533
+}
... ...
@@ -823,6 +823,11 @@ func stepLeader(r *raft, m pb.Message) {
823 823
 		return
824 824
 	case pb.MsgReadIndex:
825 825
 		if r.quorum() > 1 {
826
+			if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
827
+				// Reject read only request when this leader has not committed any log entry at its term.
828
+				return
829
+			}
830
+
826 831
 			// thinking: use an interally defined context instead of the user given context.
827 832
 			// We can express this in terms of the term and index instead of a user-supplied value.
828 833
 			// This would allow multiple reads to piggyback on the same message.
... ...
@@ -1847,7 +1847,7 @@ func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
1847 1847
 
1848 1848
 var fileDescriptorRaft = []byte{
1849 1849
 	// 790 bytes of a gzipped FileDescriptorProto
1850
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46,
1850
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46,
1851 1851
 	0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e,
1852 1852
 	0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc,
1853 1853
 	0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79,
... ...
@@ -100,7 +100,7 @@ func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
100 100
 	if found {
101 101
 		ro.readIndexQueue = ro.readIndexQueue[i:]
102 102
 		for _, rs := range rss {
103
-			delete(ro.pendingReadIndex, string(rs.req.Context))
103
+			delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
104 104
 		}
105 105
 		return rss
106 106
 	}
... ...
@@ -15,15 +15,18 @@
15 15
 package snap
16 16
 
17 17
 import (
18
+	"errors"
18 19
 	"fmt"
19 20
 	"io"
20 21
 	"io/ioutil"
21 22
 	"os"
22
-	"path"
23
+	"path/filepath"
23 24
 
24 25
 	"github.com/coreos/etcd/pkg/fileutil"
25 26
 )
26 27
 
28
+var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist")
29
+
27 30
 // SaveDBFrom saves snapshot of the database from the given reader. It
28 31
 // guarantees the save operation is atomic.
29 32
 func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
... ...
@@ -41,7 +44,7 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
41 41
 		os.Remove(f.Name())
42 42
 		return n, err
43 43
 	}
44
-	fn := path.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
44
+	fn := s.dbFilePath(id)
45 45
 	if fileutil.Exist(fn) {
46 46
 		os.Remove(f.Name())
47 47
 		return n, nil
... ...
@@ -60,15 +63,15 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
60 60
 // DBFilePath returns the file path for the snapshot of the database with
61 61
 // given id. If the snapshot does not exist, it returns error.
62 62
 func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
63
-	fns, err := fileutil.ReadDir(s.dir)
64
-	if err != nil {
63
+	if _, err := fileutil.ReadDir(s.dir); err != nil {
65 64
 		return "", err
66 65
 	}
67
-	wfn := fmt.Sprintf("%016x.snap.db", id)
68
-	for _, fn := range fns {
69
-		if fn == wfn {
70
-			return path.Join(s.dir, fn), nil
71
-		}
66
+	if fn := s.dbFilePath(id); fileutil.Exist(fn) {
67
+		return fn, nil
72 68
 	}
73
-	return "", fmt.Errorf("snap: snapshot file doesn't exist")
69
+	return "", ErrNoDBSnapshot
70
+}
71
+
72
+func (s *Snapshotter) dbFilePath(id uint64) string {
73
+	return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
74 74
 }
... ...
@@ -342,7 +342,7 @@ func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) }
342 342
 
343 343
 var fileDescriptorSnap = []byte{
344 344
 	// 126 bytes of a gzipped FileDescriptorProto
345
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
345
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
346 346
 	0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
347 347
 	0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
348 348
 	0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
... ...
@@ -21,7 +21,7 @@ import (
21 21
 	"hash/crc32"
22 22
 	"io/ioutil"
23 23
 	"os"
24
-	"path"
24
+	"path/filepath"
25 25
 	"sort"
26 26
 	"strings"
27 27
 	"time"
... ...
@@ -84,13 +84,13 @@ func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
84 84
 		marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second))
85 85
 	}
86 86
 
87
-	err = pioutil.WriteAndSyncFile(path.Join(s.dir, fname), d, 0666)
87
+	err = pioutil.WriteAndSyncFile(filepath.Join(s.dir, fname), d, 0666)
88 88
 	if err == nil {
89 89
 		saveDurations.Observe(float64(time.Since(start)) / float64(time.Second))
90 90
 	} else {
91
-		err1 := os.Remove(path.Join(s.dir, fname))
91
+		err1 := os.Remove(filepath.Join(s.dir, fname))
92 92
 		if err1 != nil {
93
-			plog.Errorf("failed to remove broken snapshot file %s", path.Join(s.dir, fname))
93
+			plog.Errorf("failed to remove broken snapshot file %s", filepath.Join(s.dir, fname))
94 94
 		}
95 95
 	}
96 96
 	return err
... ...
@@ -114,7 +114,7 @@ func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
114 114
 }
115 115
 
116 116
 func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
117
-	fpath := path.Join(dir, name)
117
+	fpath := filepath.Join(dir, name)
118 118
 	snap, err := Read(fpath)
119 119
 	if err != nil {
120 120
 		renameBroken(fpath)
121 121
new file mode 100644
... ...
@@ -0,0 +1,56 @@
0
+// Copyright 2015 The etcd Authors
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//     http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Package version implements etcd version parsing and contains latest version
15
+// information.
16
+package version
17
+
18
+import (
19
+	"fmt"
20
+	"strings"
21
+
22
+	"github.com/coreos/go-semver/semver"
23
+)
24
+
25
+var (
26
+	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
27
+	MinClusterVersion = "3.0.0"
28
+	Version           = "3.2.1"
29
+	APIVersion        = "unknown"
30
+
31
+	// Git SHA Value will be set during build
32
+	GitSHA = "Not provided (use ./build instead of go build)"
33
+)
34
+
35
+func init() {
36
+	ver, err := semver.NewVersion(Version)
37
+	if err == nil {
38
+		APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
39
+	}
40
+}
41
+
42
+type Versions struct {
43
+	Server  string `json:"etcdserver"`
44
+	Cluster string `json:"etcdcluster"`
45
+	// TODO: raft state machine version
46
+}
47
+
48
+// Cluster only keeps the major.minor.
49
+func Cluster(v string) string {
50
+	vs := strings.Split(v, ".")
51
+	if len(vs) <= 2 {
52
+		return v
53
+	}
54
+	return fmt.Sprintf("%s.%s", vs[0], vs[1])
55
+}
... ...
@@ -52,7 +52,7 @@ func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder {
52 52
 
53 53
 // newFileEncoder creates a new encoder with current file offset for the page writer.
54 54
 func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
55
-	offset, err := f.Seek(0, os.SEEK_CUR)
55
+	offset, err := f.Seek(0, io.SeekCurrent)
56 56
 	if err != nil {
57 57
 		return nil, err
58 58
 	}
... ...
@@ -17,7 +17,7 @@ package wal
17 17
 import (
18 18
 	"fmt"
19 19
 	"os"
20
-	"path"
20
+	"path/filepath"
21 21
 
22 22
 	"github.com/coreos/etcd/pkg/fileutil"
23 23
 )
... ...
@@ -65,7 +65,7 @@ func (fp *filePipeline) Close() error {
65 65
 
66 66
 func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) {
67 67
 	// count % 2 so this file isn't the same as the one last published
68
-	fpath := path.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
68
+	fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
69 69
 	if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
70 70
 		return nil, err
71 71
 	}
... ...
@@ -17,7 +17,7 @@ package wal
17 17
 import (
18 18
 	"io"
19 19
 	"os"
20
-	"path"
20
+	"path/filepath"
21 21
 
22 22
 	"github.com/coreos/etcd/pkg/fileutil"
23 23
 	"github.com/coreos/etcd/wal/walpb"
... ...
@@ -62,7 +62,7 @@ func Repair(dirpath string) bool {
62 62
 			}
63 63
 			defer bf.Close()
64 64
 
65
-			if _, err = f.Seek(0, os.SEEK_SET); err != nil {
65
+			if _, err = f.Seek(0, io.SeekStart); err != nil {
66 66
 				plog.Errorf("could not repair %v, failed to read file", f.Name())
67 67
 				return false
68 68
 			}
... ...
@@ -94,6 +94,6 @@ func openLast(dirpath string) (*fileutil.LockedFile, error) {
94 94
 	if err != nil {
95 95
 		return nil, err
96 96
 	}
97
-	last := path.Join(dirpath, names[len(names)-1])
97
+	last := filepath.Join(dirpath, names[len(names)-1])
98 98
 	return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode)
99 99
 }
... ...
@@ -21,7 +21,7 @@ import (
21 21
 	"hash/crc32"
22 22
 	"io"
23 23
 	"os"
24
-	"path"
24
+	"path/filepath"
25 25
 	"sync"
26 26
 	"time"
27 27
 
... ...
@@ -97,7 +97,7 @@ func Create(dirpath string, metadata []byte) (*WAL, error) {
97 97
 	}
98 98
 
99 99
 	// keep temporary wal directory so WAL initialization appears atomic
100
-	tmpdirpath := path.Clean(dirpath) + ".tmp"
100
+	tmpdirpath := filepath.Clean(dirpath) + ".tmp"
101 101
 	if fileutil.Exist(tmpdirpath) {
102 102
 		if err := os.RemoveAll(tmpdirpath); err != nil {
103 103
 			return nil, err
... ...
@@ -107,12 +107,12 @@ func Create(dirpath string, metadata []byte) (*WAL, error) {
107 107
 		return nil, err
108 108
 	}
109 109
 
110
-	p := path.Join(tmpdirpath, walName(0, 0))
110
+	p := filepath.Join(tmpdirpath, walName(0, 0))
111 111
 	f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
112 112
 	if err != nil {
113 113
 		return nil, err
114 114
 	}
115
-	if _, err = f.Seek(0, os.SEEK_END); err != nil {
115
+	if _, err = f.Seek(0, io.SeekEnd); err != nil {
116 116
 		return nil, err
117 117
 	}
118 118
 	if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
... ...
@@ -143,7 +143,7 @@ func Create(dirpath string, metadata []byte) (*WAL, error) {
143 143
 	}
144 144
 
145 145
 	// directory was renamed; sync parent dir to persist rename
146
-	pdir, perr := fileutil.OpenDir(path.Dir(w.dir))
146
+	pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
147 147
 	if perr != nil {
148 148
 		return nil, perr
149 149
 	}
... ...
@@ -196,7 +196,7 @@ func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error)
196 196
 	rs := make([]io.Reader, 0)
197 197
 	ls := make([]*fileutil.LockedFile, 0)
198 198
 	for _, name := range names[nameIndex:] {
199
-		p := path.Join(dirpath, name)
199
+		p := filepath.Join(dirpath, name)
200 200
 		if write {
201 201
 			l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
202 202
 			if err != nil {
... ...
@@ -232,7 +232,7 @@ func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error)
232 232
 		// write reuses the file descriptors from read; don't close so
233 233
 		// WAL can append without dropping the file lock
234 234
 		w.readClose = nil
235
-		if _, _, err := parseWalName(path.Base(w.tail().Name())); err != nil {
235
+		if _, _, err := parseWalName(filepath.Base(w.tail().Name())); err != nil {
236 236
 			closer()
237 237
 			return nil, err
238 238
 		}
... ...
@@ -322,7 +322,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.
322 322
 		// not all, will cause CRC errors on WAL open. Since the records
323 323
 		// were never fully synced to disk in the first place, it's safe
324 324
 		// to zero them out to avoid any CRC errors from new writes.
325
-		if _, err = w.tail().Seek(w.decoder.lastOffset(), os.SEEK_SET); err != nil {
325
+		if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
326 326
 			return nil, state, nil, err
327 327
 		}
328 328
 		if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
... ...
@@ -361,7 +361,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.
361 361
 // Then cut atomically rename temp wal file to a wal file.
362 362
 func (w *WAL) cut() error {
363 363
 	// close old wal file; truncate to avoid wasting space if an early cut
364
-	off, serr := w.tail().Seek(0, os.SEEK_CUR)
364
+	off, serr := w.tail().Seek(0, io.SeekCurrent)
365 365
 	if serr != nil {
366 366
 		return serr
367 367
 	}
... ...
@@ -372,7 +372,7 @@ func (w *WAL) cut() error {
372 372
 		return err
373 373
 	}
374 374
 
375
-	fpath := path.Join(w.dir, walName(w.seq()+1, w.enti+1))
375
+	fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
376 376
 
377 377
 	// create a temp wal file with name sequence + 1, or truncate the existing one
378 378
 	newTail, err := w.fp.Open()
... ...
@@ -401,7 +401,7 @@ func (w *WAL) cut() error {
401 401
 		return err
402 402
 	}
403 403
 
404
-	off, err = w.tail().Seek(0, os.SEEK_CUR)
404
+	off, err = w.tail().Seek(0, io.SeekCurrent)
405 405
 	if err != nil {
406 406
 		return err
407 407
 	}
... ...
@@ -418,7 +418,7 @@ func (w *WAL) cut() error {
418 418
 	if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
419 419
 		return err
420 420
 	}
421
-	if _, err = newTail.Seek(off, os.SEEK_SET); err != nil {
421
+	if _, err = newTail.Seek(off, io.SeekStart); err != nil {
422 422
 		return err
423 423
 	}
424 424
 
... ...
@@ -464,7 +464,7 @@ func (w *WAL) ReleaseLockTo(index uint64) error {
464 464
 	found := false
465 465
 
466 466
 	for i, l := range w.locks {
467
-		_, lockIndex, err := parseWalName(path.Base(l.Name()))
467
+		_, lockIndex, err := parseWalName(filepath.Base(l.Name()))
468 468
 		if err != nil {
469 469
 			return err
470 470
 		}
... ...
@@ -552,7 +552,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
552 552
 		return nil
553 553
 	}
554 554
 
555
-	mustSync := mustSync(st, w.state, len(ents))
555
+	mustSync := raft.MustSync(st, w.state, len(ents))
556 556
 
557 557
 	// TODO(xiangli): no more reference operator
558 558
 	for i := range ents {
... ...
@@ -564,7 +564,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
564 564
 		return err
565 565
 	}
566 566
 
567
-	curOff, err := w.tail().Seek(0, os.SEEK_CUR)
567
+	curOff, err := w.tail().Seek(0, io.SeekCurrent)
568 568
 	if err != nil {
569 569
 		return err
570 570
 	}
... ...
@@ -611,22 +611,13 @@ func (w *WAL) seq() uint64 {
611 611
 	if t == nil {
612 612
 		return 0
613 613
 	}
614
-	seq, _, err := parseWalName(path.Base(t.Name()))
614
+	seq, _, err := parseWalName(filepath.Base(t.Name()))
615 615
 	if err != nil {
616 616
 		plog.Fatalf("bad wal name %s (%v)", t.Name(), err)
617 617
 	}
618 618
 	return seq
619 619
 }
620 620
 
621
-func mustSync(st, prevst raftpb.HardState, entsnum int) bool {
622
-	// Persistent state on all servers:
623
-	// (Updated on stable storage before responding to RPCs)
624
-	// currentTerm
625
-	// votedFor
626
-	// log entries[]
627
-	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
628
-}
629
-
630 621
 func closeAll(rcs ...io.ReadCloser) error {
631 622
 	for _, f := range rcs {
632 623
 		if err := f.Close(); err != nil {
... ...
@@ -506,7 +506,7 @@ func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) }
506 506
 
507 507
 var fileDescriptorRecord = []byte{
508 508
 	// 186 bytes of a gzipped FileDescriptorProto
509
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
509
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
510 510
 	0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92,
511 511
 	0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6,
512 512
 	0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
513 513
new file mode 100644
... ...
@@ -0,0 +1,202 @@
0
+
1
+                                 Apache License
2
+                           Version 2.0, January 2004
3
+                        http://www.apache.org/licenses/
4
+
5
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+   1. Definitions.
8
+
9
+      "License" shall mean the terms and conditions for use, reproduction,
10
+      and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+      "Licensor" shall mean the copyright owner or entity authorized by
13
+      the copyright owner that is granting the License.
14
+
15
+      "Legal Entity" shall mean the union of the acting entity and all
16
+      other entities that control, are controlled by, or are under common
17
+      control with that entity. For the purposes of this definition,
18
+      "control" means (i) the power, direct or indirect, to cause the
19
+      direction or management of such entity, whether by contract or
20
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+      outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+      "You" (or "Your") shall mean an individual or Legal Entity
24
+      exercising permissions granted by this License.
25
+
26
+      "Source" form shall mean the preferred form for making modifications,
27
+      including but not limited to software source code, documentation
28
+      source, and configuration files.
29
+
30
+      "Object" form shall mean any form resulting from mechanical
31
+      transformation or translation of a Source form, including but
32
+      not limited to compiled object code, generated documentation,
33
+      and conversions to other media types.
34
+
35
+      "Work" shall mean the work of authorship, whether in Source or
36
+      Object form, made available under the License, as indicated by a
37
+      copyright notice that is included in or attached to the work
38
+      (an example is provided in the Appendix below).
39
+
40
+      "Derivative Works" shall mean any work, whether in Source or Object
41
+      form, that is based on (or derived from) the Work and for which the
42
+      editorial revisions, annotations, elaborations, or other modifications
43
+      represent, as a whole, an original work of authorship. For the purposes
44
+      of this License, Derivative Works shall not include works that remain
45
+      separable from, or merely link (or bind by name) to the interfaces of,
46
+      the Work and Derivative Works thereof.
47
+
48
+      "Contribution" shall mean any work of authorship, including
49
+      the original version of the Work and any modifications or additions
50
+      to that Work or Derivative Works thereof, that is intentionally
51
+      submitted to Licensor for inclusion in the Work by the copyright owner
52
+      or by an individual or Legal Entity authorized to submit on behalf of
53
+      the copyright owner. For the purposes of this definition, "submitted"
54
+      means any form of electronic, verbal, or written communication sent
55
+      to the Licensor or its representatives, including but not limited to
56
+      communication on electronic mailing lists, source code control systems,
57
+      and issue tracking systems that are managed by, or on behalf of, the
58
+      Licensor for the purpose of discussing and improving the Work, but
59
+      excluding communication that is conspicuously marked or otherwise
60
+      designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+      "Contributor" shall mean Licensor and any individual or Legal Entity
63
+      on behalf of whom a Contribution has been received by Licensor and
64
+      subsequently incorporated within the Work.
65
+
66
+   2. Grant of Copyright License. Subject to the terms and conditions of
67
+      this License, each Contributor hereby grants to You a perpetual,
68
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+      copyright license to reproduce, prepare Derivative Works of,
70
+      publicly display, publicly perform, sublicense, and distribute the
71
+      Work and such Derivative Works in Source or Object form.
72
+
73
+   3. Grant of Patent License. Subject to the terms and conditions of
74
+      this License, each Contributor hereby grants to You a perpetual,
75
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+      (except as stated in this section) patent license to make, have made,
77
+      use, offer to sell, sell, import, and otherwise transfer the Work,
78
+      where such license applies only to those patent claims licensable
79
+      by such Contributor that are necessarily infringed by their
80
+      Contribution(s) alone or by combination of their Contribution(s)
81
+      with the Work to which such Contribution(s) was submitted. If You
82
+      institute patent litigation against any entity (including a
83
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+      or a Contribution incorporated within the Work constitutes direct
85
+      or contributory patent infringement, then any patent licenses
86
+      granted to You under this License for that Work shall terminate
87
+      as of the date such litigation is filed.
88
+
89
+   4. Redistribution. You may reproduce and distribute copies of the
90
+      Work or Derivative Works thereof in any medium, with or without
91
+      modifications, and in Source or Object form, provided that You
92
+      meet the following conditions:
93
+
94
+      (a) You must give any other recipients of the Work or
95
+          Derivative Works a copy of this License; and
96
+
97
+      (b) You must cause any modified files to carry prominent notices
98
+          stating that You changed the files; and
99
+
100
+      (c) You must retain, in the Source form of any Derivative Works
101
+          that You distribute, all copyright, patent, trademark, and
102
+          attribution notices from the Source form of the Work,
103
+          excluding those notices that do not pertain to any part of
104
+          the Derivative Works; and
105
+
106
+      (d) If the Work includes a "NOTICE" text file as part of its
107
+          distribution, then any Derivative Works that You distribute must
108
+          include a readable copy of the attribution notices contained
109
+          within such NOTICE file, excluding those notices that do not
110
+          pertain to any part of the Derivative Works, in at least one
111
+          of the following places: within a NOTICE text file distributed
112
+          as part of the Derivative Works; within the Source form or
113
+          documentation, if provided along with the Derivative Works; or,
114
+          within a display generated by the Derivative Works, if and
115
+          wherever such third-party notices normally appear. The contents
116
+          of the NOTICE file are for informational purposes only and
117
+          do not modify the License. You may add Your own attribution
118
+          notices within Derivative Works that You distribute, alongside
119
+          or as an addendum to the NOTICE text from the Work, provided
120
+          that such additional attribution notices cannot be construed
121
+          as modifying the License.
122
+
123
+      You may add Your own copyright statement to Your modifications and
124
+      may provide additional or different license terms and conditions
125
+      for use, reproduction, or distribution of Your modifications, or
126
+      for any such Derivative Works as a whole, provided Your use,
127
+      reproduction, and distribution of the Work otherwise complies with
128
+      the conditions stated in this License.
129
+
130
+   5. Submission of Contributions. Unless You explicitly state otherwise,
131
+      any Contribution intentionally submitted for inclusion in the Work
132
+      by You to the Licensor shall be under the terms and conditions of
133
+      this License, without any additional terms or conditions.
134
+      Notwithstanding the above, nothing herein shall supersede or modify
135
+      the terms of any separate license agreement you may have executed
136
+      with Licensor regarding such Contributions.
137
+
138
+   6. Trademarks. This License does not grant permission to use the trade
139
+      names, trademarks, service marks, or product names of the Licensor,
140
+      except as required for reasonable and customary use in describing the
141
+      origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+   7. Disclaimer of Warranty. Unless required by applicable law or
144
+      agreed to in writing, Licensor provides the Work (and each
145
+      Contributor provides its Contributions) on an "AS IS" BASIS,
146
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+      implied, including, without limitation, any warranties or conditions
148
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+      PARTICULAR PURPOSE. You are solely responsible for determining the
150
+      appropriateness of using or redistributing the Work and assume any
151
+      risks associated with Your exercise of permissions under this License.
152
+
153
+   8. Limitation of Liability. In no event and under no legal theory,
154
+      whether in tort (including negligence), contract, or otherwise,
155
+      unless required by applicable law (such as deliberate and grossly
156
+      negligent acts) or agreed to in writing, shall any Contributor be
157
+      liable to You for damages, including any direct, indirect, special,
158
+      incidental, or consequential damages of any character arising as a
159
+      result of this License or out of the use or inability to use the
160
+      Work (including but not limited to damages for loss of goodwill,
161
+      work stoppage, computer failure or malfunction, or any and all
162
+      other commercial damages or losses), even if such Contributor
163
+      has been advised of the possibility of such damages.
164
+
165
+   9. Accepting Warranty or Additional Liability. While redistributing
166
+      the Work or Derivative Works thereof, You may choose to offer,
167
+      and charge a fee for, acceptance of support, warranty, indemnity,
168
+      or other liability obligations and/or rights consistent with this
169
+      License. However, in accepting such obligations, You may act only
170
+      on Your own behalf and on Your sole responsibility, not on behalf
171
+      of any other Contributor, and only if You agree to indemnify,
172
+      defend, and hold each Contributor harmless for any liability
173
+      incurred by, or claims asserted against, such Contributor by reason
174
+      of your accepting any such warranty or additional liability.
175
+
176
+   END OF TERMS AND CONDITIONS
177
+
178
+   APPENDIX: How to apply the Apache License to your work.
179
+
180
+      To apply the Apache License to your work, attach the following
181
+      boilerplate notice, with the fields enclosed by brackets "[]"
182
+      replaced with your own identifying information. (Don't include
183
+      the brackets!)  The text should be enclosed in the appropriate
184
+      comment syntax for the file format. We also recommend that a
185
+      file or class name and description of purpose be included on the
186
+      same "printed page" as the copyright notice for easier
187
+      identification within third-party archives.
188
+
189
+   Copyright [yyyy] [name of copyright owner]
190
+
191
+   Licensed under the Apache License, Version 2.0 (the "License");
192
+   you may not use this file except in compliance with the License.
193
+   You may obtain a copy of the License at
194
+
195
+       http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+   Unless required by applicable law or agreed to in writing, software
198
+   distributed under the License is distributed on an "AS IS" BASIS,
199
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+   See the License for the specific language governing permissions and
201
+   limitations under the License.
0 202
new file mode 100644
... ...
@@ -0,0 +1,28 @@
0
+# go-semver - Semantic Versioning Library
1
+
2
+[![Build Status](https://travis-ci.org/coreos/go-semver.svg?branch=master)](https://travis-ci.org/coreos/go-semver)
3
+[![GoDoc](https://godoc.org/github.com/coreos/go-semver/semver?status.svg)](https://godoc.org/github.com/coreos/go-semver/semver)
4
+
5
+go-semver is a [semantic versioning][semver] library for Go. It lets you parse
6
+and compare two semantic version strings.
7
+
8
+[semver]: http://semver.org/
9
+
10
+## Usage
11
+
12
+```go
13
+vA := semver.New("1.2.3")
14
+vB := semver.New("3.2.1")
15
+
16
+fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
17
+```
18
+
19
+## Example Application
20
+
21
+```
22
+$ go run example.go 1.2.3 3.2.1
23
+1.2.3 < 3.2.1 == true
24
+
25
+$ go run example.go 5.2.3 3.2.1
26
+5.2.3 < 3.2.1 == false
27
+```
0 28
new file mode 100644
... ...
@@ -0,0 +1,268 @@
0
+// Copyright 2013-2015 CoreOS, Inc.
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//     http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Semantic Versions http://semver.org
15
+package semver
16
+
17
+import (
18
+	"bytes"
19
+	"errors"
20
+	"fmt"
21
+	"strconv"
22
+	"strings"
23
+)
24
+
25
+type Version struct {
26
+	Major      int64
27
+	Minor      int64
28
+	Patch      int64
29
+	PreRelease PreRelease
30
+	Metadata   string
31
+}
32
+
33
+type PreRelease string
34
+
35
+func splitOff(input *string, delim string) (val string) {
36
+	parts := strings.SplitN(*input, delim, 2)
37
+
38
+	if len(parts) == 2 {
39
+		*input = parts[0]
40
+		val = parts[1]
41
+	}
42
+
43
+	return val
44
+}
45
+
46
+func New(version string) *Version {
47
+	return Must(NewVersion(version))
48
+}
49
+
50
+func NewVersion(version string) (*Version, error) {
51
+	v := Version{}
52
+
53
+	if err := v.Set(version); err != nil {
54
+		return nil, err
55
+	}
56
+
57
+	return &v, nil
58
+}
59
+
60
+// Must is a helper for wrapping NewVersion and will panic if err is not nil.
61
+func Must(v *Version, err error) *Version {
62
+	if err != nil {
63
+		panic(err)
64
+	}
65
+	return v
66
+}
67
+
68
+// Set parses and updates v from the given version string. Implements flag.Value
69
+func (v *Version) Set(version string) error {
70
+	metadata := splitOff(&version, "+")
71
+	preRelease := PreRelease(splitOff(&version, "-"))
72
+	dotParts := strings.SplitN(version, ".", 3)
73
+
74
+	if len(dotParts) != 3 {
75
+		return fmt.Errorf("%s is not in dotted-tri format", version)
76
+	}
77
+
78
+	parsed := make([]int64, 3, 3)
79
+
80
+	for i, v := range dotParts[:3] {
81
+		val, err := strconv.ParseInt(v, 10, 64)
82
+		parsed[i] = val
83
+		if err != nil {
84
+			return err
85
+		}
86
+	}
87
+
88
+	v.Metadata = metadata
89
+	v.PreRelease = preRelease
90
+	v.Major = parsed[0]
91
+	v.Minor = parsed[1]
92
+	v.Patch = parsed[2]
93
+	return nil
94
+}
95
+
96
+func (v Version) String() string {
97
+	var buffer bytes.Buffer
98
+
99
+	fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
100
+
101
+	if v.PreRelease != "" {
102
+		fmt.Fprintf(&buffer, "-%s", v.PreRelease)
103
+	}
104
+
105
+	if v.Metadata != "" {
106
+		fmt.Fprintf(&buffer, "+%s", v.Metadata)
107
+	}
108
+
109
+	return buffer.String()
110
+}
111
+
112
+func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
113
+	var data string
114
+	if err := unmarshal(&data); err != nil {
115
+		return err
116
+	}
117
+	return v.Set(data)
118
+}
119
+
120
+func (v Version) MarshalJSON() ([]byte, error) {
121
+	return []byte(`"` + v.String() + `"`), nil
122
+}
123
+
124
+func (v *Version) UnmarshalJSON(data []byte) error {
125
+	l := len(data)
126
+	if l == 0 || string(data) == `""` {
127
+		return nil
128
+	}
129
+	if l < 2 || data[0] != '"' || data[l-1] != '"' {
130
+		return errors.New("invalid semver string")
131
+	}
132
+	return v.Set(string(data[1 : l-1]))
133
+}
134
+
135
+// Compare tests if v is less than, equal to, or greater than versionB,
136
+// returning -1, 0, or +1 respectively.
137
+func (v Version) Compare(versionB Version) int {
138
+	if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
139
+		return cmp
140
+	}
141
+	return preReleaseCompare(v, versionB)
142
+}
143
+
144
+// Equal tests if v is equal to versionB.
145
+func (v Version) Equal(versionB Version) bool {
146
+	return v.Compare(versionB) == 0
147
+}
148
+
149
+// LessThan tests if v is less than versionB.
150
+func (v Version) LessThan(versionB Version) bool {
151
+	return v.Compare(versionB) < 0
152
+}
153
+
154
+// Slice converts the comparable parts of the semver into a slice of integers.
155
+func (v Version) Slice() []int64 {
156
+	return []int64{v.Major, v.Minor, v.Patch}
157
+}
158
+
159
+func (p PreRelease) Slice() []string {
160
+	preRelease := string(p)
161
+	return strings.Split(preRelease, ".")
162
+}
163
+
164
+func preReleaseCompare(versionA Version, versionB Version) int {
165
+	a := versionA.PreRelease
166
+	b := versionB.PreRelease
167
+
168
+	/* Handle the case where if two versions are otherwise equal it is the
169
+	 * one without a PreRelease that is greater */
170
+	if len(a) == 0 && (len(b) > 0) {
171
+		return 1
172
+	} else if len(b) == 0 && (len(a) > 0) {
173
+		return -1
174
+	}
175
+
176
+	// If there is a prerelease, check and compare each part.
177
+	return recursivePreReleaseCompare(a.Slice(), b.Slice())
178
+}
179
+
180
+func recursiveCompare(versionA []int64, versionB []int64) int {
181
+	if len(versionA) == 0 {
182
+		return 0
183
+	}
184
+
185
+	a := versionA[0]
186
+	b := versionB[0]
187
+
188
+	if a > b {
189
+		return 1
190
+	} else if a < b {
191
+		return -1
192
+	}
193
+
194
+	return recursiveCompare(versionA[1:], versionB[1:])
195
+}
196
+
197
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
198
+	// A larger set of pre-release fields has a higher precedence than a smaller set,
199
+	// if all of the preceding identifiers are equal.
200
+	if len(versionA) == 0 {
201
+		if len(versionB) > 0 {
202
+			return -1
203
+		}
204
+		return 0
205
+	} else if len(versionB) == 0 {
206
+		// We're longer than versionB so return 1.
207
+		return 1
208
+	}
209
+
210
+	a := versionA[0]
211
+	b := versionB[0]
212
+
213
+	aInt := false
214
+	bInt := false
215
+
216
+	aI, err := strconv.Atoi(versionA[0])
217
+	if err == nil {
218
+		aInt = true
219
+	}
220
+
221
+	bI, err := strconv.Atoi(versionB[0])
222
+	if err == nil {
223
+		bInt = true
224
+	}
225
+
226
+	// Handle Integer Comparison
227
+	if aInt && bInt {
228
+		if aI > bI {
229
+			return 1
230
+		} else if aI < bI {
231
+			return -1
232
+		}
233
+	}
234
+
235
+	// Handle String Comparison
236
+	if a > b {
237
+		return 1
238
+	} else if a < b {
239
+		return -1
240
+	}
241
+
242
+	return recursivePreReleaseCompare(versionA[1:], versionB[1:])
243
+}
244
+
245
+// BumpMajor increments the Major field by 1 and resets all other fields to their default values
246
+func (v *Version) BumpMajor() {
247
+	v.Major += 1
248
+	v.Minor = 0
249
+	v.Patch = 0
250
+	v.PreRelease = PreRelease("")
251
+	v.Metadata = ""
252
+}
253
+
254
+// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
255
+func (v *Version) BumpMinor() {
256
+	v.Minor += 1
257
+	v.Patch = 0
258
+	v.PreRelease = PreRelease("")
259
+	v.Metadata = ""
260
+}
261
+
262
+// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
263
+func (v *Version) BumpPatch() {
264
+	v.Patch += 1
265
+	v.PreRelease = PreRelease("")
266
+	v.Metadata = ""
267
+}
0 268
new file mode 100644
... ...
@@ -0,0 +1,38 @@
0
+// Copyright 2013-2015 CoreOS, Inc.
1
+//
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+//     http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package semver
15
+
16
+import (
17
+	"sort"
18
+)
19
+
20
+type Versions []*Version
21
+
22
+func (s Versions) Len() int {
23
+	return len(s)
24
+}
25
+
26
+func (s Versions) Swap(i, j int) {
27
+	s[i], s[j] = s[j], s[i]
28
+}
29
+
30
+func (s Versions) Less(i, j int) bool {
31
+	return s[i].LessThan(*s[j])
32
+}
33
+
34
+// Sort sorts the given slice of Version
35
+func Sort(versions []*Version) {
36
+	sort.Sort(Versions(versions))
37
+}