| ... | ... |
@@ -19,7 +19,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
|
| 19 | 19 |
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
|
| 20 | 20 |
since := cmd.String([]string{"-since"}, "", "Show logs since timestamp")
|
| 21 | 21 |
times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
|
| 22 |
- tail := cmd.String([]string{"-tail"}, "latest", "Number of lines to show from the end of the logs")
|
|
| 22 |
+ tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
|
|
| 23 | 23 |
cmd.Require(flag.Exact, 1) |
| 24 | 24 |
|
| 25 | 25 |
cmd.ParseFlags(args, true) |
| ... | ... |
@@ -629,6 +629,17 @@ func (s *Server) getContainersLogs(version version.Version, w http.ResponseWrite |
| 629 | 629 |
closeNotifier = notifier.CloseNotify() |
| 630 | 630 |
} |
| 631 | 631 |
|
| 632 |
+ c, err := s.daemon.Get(vars["name"]) |
|
| 633 |
+ if err != nil {
|
|
| 634 |
+ return err |
|
| 635 |
+ } |
|
| 636 |
+ |
|
| 637 |
+ outStream := ioutils.NewWriteFlusher(w) |
|
| 638 |
+ // write an empty chunk of data (this is to ensure that the |
|
| 639 |
+ // HTTP Response is sent immediatly, even if the container has |
|
| 640 |
+ // not yet produced any data) |
|
| 641 |
+ outStream.Write(nil) |
|
| 642 |
+ |
|
| 632 | 643 |
logsConfig := &daemon.ContainerLogsConfig{
|
| 633 | 644 |
Follow: boolValue(r, "follow"), |
| 634 | 645 |
Timestamps: boolValue(r, "timestamps"), |
| ... | ... |
@@ -636,11 +647,11 @@ func (s *Server) getContainersLogs(version version.Version, w http.ResponseWrite |
| 636 | 636 |
Tail: r.Form.Get("tail"),
|
| 637 | 637 |
UseStdout: stdout, |
| 638 | 638 |
UseStderr: stderr, |
| 639 |
- OutStream: ioutils.NewWriteFlusher(w), |
|
| 639 |
+ OutStream: outStream, |
|
| 640 | 640 |
Stop: closeNotifier, |
| 641 | 641 |
} |
| 642 | 642 |
|
| 643 |
- if err := s.daemon.ContainerLogs(vars["name"], logsConfig); err != nil {
|
|
| 643 |
+ if err := s.daemon.ContainerLogs(c, logsConfig); err != nil {
|
|
| 644 | 644 |
fmt.Fprintf(w, "Error running logs job: %s\n", err) |
| 645 | 645 |
} |
| 646 | 646 |
|
| ... | ... |
@@ -25,7 +25,6 @@ import ( |
| 25 | 25 |
"github.com/docker/docker/pkg/broadcastwriter" |
| 26 | 26 |
"github.com/docker/docker/pkg/fileutils" |
| 27 | 27 |
"github.com/docker/docker/pkg/ioutils" |
| 28 |
- "github.com/docker/docker/pkg/jsonlog" |
|
| 29 | 28 |
"github.com/docker/docker/pkg/mount" |
| 30 | 29 |
"github.com/docker/docker/pkg/nat" |
| 31 | 30 |
"github.com/docker/docker/pkg/promise" |
| ... | ... |
@@ -325,25 +324,13 @@ func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser {
|
| 325 | 325 |
|
| 326 | 326 |
func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser {
|
| 327 | 327 |
reader, writer := io.Pipe() |
| 328 |
- streamConfig.stdout.AddWriter(writer, "") |
|
| 328 |
+ streamConfig.stdout.AddWriter(writer) |
|
| 329 | 329 |
return ioutils.NewBufReader(reader) |
| 330 | 330 |
} |
| 331 | 331 |
|
| 332 | 332 |
func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser {
|
| 333 | 333 |
reader, writer := io.Pipe() |
| 334 |
- streamConfig.stderr.AddWriter(writer, "") |
|
| 335 |
- return ioutils.NewBufReader(reader) |
|
| 336 |
-} |
|
| 337 |
- |
|
| 338 |
-func (streamConfig *StreamConfig) StdoutLogPipe() io.ReadCloser {
|
|
| 339 |
- reader, writer := io.Pipe() |
|
| 340 |
- streamConfig.stdout.AddWriter(writer, "stdout") |
|
| 341 |
- return ioutils.NewBufReader(reader) |
|
| 342 |
-} |
|
| 343 |
- |
|
| 344 |
-func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser {
|
|
| 345 |
- reader, writer := io.Pipe() |
|
| 346 |
- streamConfig.stderr.AddWriter(writer, "stderr") |
|
| 334 |
+ streamConfig.stderr.AddWriter(writer) |
|
| 347 | 335 |
return ioutils.NewBufReader(reader) |
| 348 | 336 |
} |
| 349 | 337 |
|
| ... | ... |
@@ -715,6 +702,9 @@ func (container *Container) getLogConfig() runconfig.LogConfig {
|
| 715 | 715 |
} |
| 716 | 716 |
|
| 717 | 717 |
func (container *Container) getLogger() (logger.Logger, error) {
|
| 718 |
+ if container.logDriver != nil && container.IsRunning() {
|
|
| 719 |
+ return container.logDriver, nil |
|
| 720 |
+ } |
|
| 718 | 721 |
cfg := container.getLogConfig() |
| 719 | 722 |
if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
|
| 720 | 723 |
return nil, err |
| ... | ... |
@@ -888,36 +878,33 @@ func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writ |
| 888 | 888 |
} |
| 889 | 889 |
|
| 890 | 890 |
func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
|
| 891 |
- |
|
| 892 | 891 |
if logs {
|
| 893 | 892 |
logDriver, err := c.getLogger() |
| 894 | 893 |
if err != nil {
|
| 895 |
- logrus.Errorf("Error obtaining the logger %v", err)
|
|
| 896 | 894 |
return err |
| 897 | 895 |
} |
| 898 |
- if _, ok := logDriver.(logger.Reader); !ok {
|
|
| 899 |
- logrus.Errorf("cannot read logs for [%s] driver", logDriver.Name())
|
|
| 900 |
- } else {
|
|
| 901 |
- if cLog, err := logDriver.(logger.Reader).ReadLog(); err != nil {
|
|
| 902 |
- logrus.Errorf("Error reading logs %v", err)
|
|
| 903 |
- } else {
|
|
| 904 |
- dec := json.NewDecoder(cLog) |
|
| 905 |
- for {
|
|
| 906 |
- l := &jsonlog.JSONLog{}
|
|
| 907 |
- |
|
| 908 |
- if err := dec.Decode(l); err == io.EOF {
|
|
| 909 |
- break |
|
| 910 |
- } else if err != nil {
|
|
| 911 |
- logrus.Errorf("Error streaming logs: %s", err)
|
|
| 912 |
- break |
|
| 913 |
- } |
|
| 914 |
- if l.Stream == "stdout" && stdout != nil {
|
|
| 915 |
- io.WriteString(stdout, l.Log) |
|
| 916 |
- } |
|
| 917 |
- if l.Stream == "stderr" && stderr != nil {
|
|
| 918 |
- io.WriteString(stderr, l.Log) |
|
| 919 |
- } |
|
| 896 |
+ cLog, ok := logDriver.(logger.LogReader) |
|
| 897 |
+ if !ok {
|
|
| 898 |
+ return logger.ErrReadLogsNotSupported |
|
| 899 |
+ } |
|
| 900 |
+ logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1})
|
|
| 901 |
+ |
|
| 902 |
+ LogLoop: |
|
| 903 |
+ for {
|
|
| 904 |
+ select {
|
|
| 905 |
+ case msg, ok := <-logs.Msg: |
|
| 906 |
+ if !ok {
|
|
| 907 |
+ break LogLoop |
|
| 908 |
+ } |
|
| 909 |
+ if msg.Source == "stdout" && stdout != nil {
|
|
| 910 |
+ stdout.Write(msg.Line) |
|
| 911 |
+ } |
|
| 912 |
+ if msg.Source == "stderr" && stderr != nil {
|
|
| 913 |
+ stderr.Write(msg.Line) |
|
| 920 | 914 |
} |
| 915 |
+ case err := <-logs.Err: |
|
| 916 |
+ logrus.Errorf("Error streaming logs: %v", err)
|
|
| 917 |
+ break LogLoop |
|
| 921 | 918 |
} |
| 922 | 919 |
} |
| 923 | 920 |
} |
| ... | ... |
@@ -27,6 +27,7 @@ type Context struct {
|
| 27 | 27 |
LogPath string |
| 28 | 28 |
} |
| 29 | 29 |
|
| 30 |
+// Hostname returns the hostname from the underlying OS |
|
| 30 | 31 |
func (ctx *Context) Hostname() (string, error) {
|
| 31 | 32 |
hostname, err := os.Hostname() |
| 32 | 33 |
if err != nil {
|
| ... | ... |
@@ -35,6 +36,7 @@ func (ctx *Context) Hostname() (string, error) {
|
| 35 | 35 |
return hostname, nil |
| 36 | 36 |
} |
| 37 | 37 |
|
| 38 |
+// Command returns the command that the container being logged was started with |
|
| 38 | 39 |
func (ctx *Context) Command() string {
|
| 39 | 40 |
terms := []string{ctx.ContainerEntrypoint}
|
| 40 | 41 |
for _, arg := range ctx.ContainerArgs {
|
| ... | ... |
@@ -2,32 +2,42 @@ package jsonfilelog |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 |
+ "encoding/json" |
|
| 5 | 6 |
"fmt" |
| 6 | 7 |
"io" |
| 7 | 8 |
"os" |
| 8 | 9 |
"strconv" |
| 9 | 10 |
"sync" |
| 11 |
+ "time" |
|
| 12 |
+ |
|
| 13 |
+ "gopkg.in/fsnotify.v1" |
|
| 10 | 14 |
|
| 11 | 15 |
"github.com/Sirupsen/logrus" |
| 12 | 16 |
"github.com/docker/docker/daemon/logger" |
| 17 |
+ "github.com/docker/docker/pkg/ioutils" |
|
| 13 | 18 |
"github.com/docker/docker/pkg/jsonlog" |
| 19 |
+ "github.com/docker/docker/pkg/pubsub" |
|
| 20 |
+ "github.com/docker/docker/pkg/tailfile" |
|
| 14 | 21 |
"github.com/docker/docker/pkg/timeutils" |
| 15 | 22 |
"github.com/docker/docker/pkg/units" |
| 16 | 23 |
) |
| 17 | 24 |
|
| 18 | 25 |
const ( |
| 19 |
- Name = "json-file" |
|
| 26 |
+ Name = "json-file" |
|
| 27 |
+ maxJSONDecodeRetry = 10 |
|
| 20 | 28 |
) |
| 21 | 29 |
|
| 22 | 30 |
// JSONFileLogger is Logger implementation for default docker logging: |
| 23 | 31 |
// JSON objects to file |
| 24 | 32 |
type JSONFileLogger struct {
|
| 25 |
- buf *bytes.Buffer |
|
| 26 |
- f *os.File // store for closing |
|
| 27 |
- mu sync.Mutex // protects buffer |
|
| 28 |
- capacity int64 //maximum size of each file |
|
| 29 |
- n int //maximum number of files |
|
| 30 |
- ctx logger.Context |
|
| 33 |
+ buf *bytes.Buffer |
|
| 34 |
+ f *os.File // store for closing |
|
| 35 |
+ mu sync.Mutex // protects buffer |
|
| 36 |
+ capacity int64 //maximum size of each file |
|
| 37 |
+ n int //maximum number of files |
|
| 38 |
+ ctx logger.Context |
|
| 39 |
+ readers map[*logger.LogWatcher]struct{} // stores the active log followers
|
|
| 40 |
+ notifyRotate *pubsub.Publisher |
|
| 31 | 41 |
} |
| 32 | 42 |
|
| 33 | 43 |
func init() {
|
| ... | ... |
@@ -64,11 +74,13 @@ func New(ctx logger.Context) (logger.Logger, error) {
|
| 64 | 64 |
} |
| 65 | 65 |
} |
| 66 | 66 |
return &JSONFileLogger{
|
| 67 |
- f: log, |
|
| 68 |
- buf: bytes.NewBuffer(nil), |
|
| 69 |
- ctx: ctx, |
|
| 70 |
- capacity: capval, |
|
| 71 |
- n: maxFiles, |
|
| 67 |
+ f: log, |
|
| 68 |
+ buf: bytes.NewBuffer(nil), |
|
| 69 |
+ ctx: ctx, |
|
| 70 |
+ capacity: capval, |
|
| 71 |
+ n: maxFiles, |
|
| 72 |
+ readers: make(map[*logger.LogWatcher]struct{}),
|
|
| 73 |
+ notifyRotate: pubsub.NewPublisher(0, 1), |
|
| 72 | 74 |
}, nil |
| 73 | 75 |
} |
| 74 | 76 |
|
| ... | ... |
@@ -111,6 +123,7 @@ func writeLog(l *JSONFileLogger) (int64, error) {
|
| 111 | 111 |
return -1, err |
| 112 | 112 |
} |
| 113 | 113 |
l.f = file |
| 114 |
+ l.notifyRotate.Publish(struct{}{})
|
|
| 114 | 115 |
} |
| 115 | 116 |
return writeToBuf(l) |
| 116 | 117 |
} |
| ... | ... |
@@ -148,11 +161,11 @@ func backup(old, curr string) error {
|
| 148 | 148 |
} |
| 149 | 149 |
} |
| 150 | 150 |
if _, err := os.Stat(curr); os.IsNotExist(err) {
|
| 151 |
- if f, err := os.Create(curr); err != nil {
|
|
| 151 |
+ f, err := os.Create(curr) |
|
| 152 |
+ if err != nil {
|
|
| 152 | 153 |
return err |
| 153 |
- } else {
|
|
| 154 |
- f.Close() |
|
| 155 | 154 |
} |
| 155 |
+ f.Close() |
|
| 156 | 156 |
} |
| 157 | 157 |
return os.Rename(curr, old) |
| 158 | 158 |
} |
| ... | ... |
@@ -169,31 +182,200 @@ func ValidateLogOpt(cfg map[string]string) error {
|
| 169 | 169 |
return nil |
| 170 | 170 |
} |
| 171 | 171 |
|
| 172 |
-func (l *JSONFileLogger) ReadLog(args ...string) (io.Reader, error) {
|
|
| 173 |
- pth := l.ctx.LogPath |
|
| 174 |
- if len(args) > 0 {
|
|
| 175 |
- //check if args[0] is an integer index |
|
| 176 |
- index, err := strconv.ParseInt(args[0], 0, 0) |
|
| 177 |
- if err != nil {
|
|
| 178 |
- return nil, err |
|
| 179 |
- } |
|
| 180 |
- if index > 0 {
|
|
| 181 |
- pth = pth + "." + args[0] |
|
| 182 |
- } |
|
| 183 |
- } |
|
| 184 |
- return os.Open(pth) |
|
| 185 |
-} |
|
| 186 |
- |
|
| 187 | 172 |
func (l *JSONFileLogger) LogPath() string {
|
| 188 | 173 |
return l.ctx.LogPath |
| 189 | 174 |
} |
| 190 | 175 |
|
| 191 |
-// Close closes underlying file |
|
| 176 |
+// Close closes underlying file and signals all readers to stop |
|
| 192 | 177 |
func (l *JSONFileLogger) Close() error {
|
| 193 |
- return l.f.Close() |
|
| 178 |
+ l.mu.Lock() |
|
| 179 |
+ err := l.f.Close() |
|
| 180 |
+ for r := range l.readers {
|
|
| 181 |
+ r.Close() |
|
| 182 |
+ delete(l.readers, r) |
|
| 183 |
+ } |
|
| 184 |
+ l.mu.Unlock() |
|
| 185 |
+ return err |
|
| 194 | 186 |
} |
| 195 | 187 |
|
| 196 | 188 |
// Name returns name of this logger |
| 197 | 189 |
func (l *JSONFileLogger) Name() string {
|
| 198 | 190 |
return Name |
| 199 | 191 |
} |
| 192 |
+ |
|
| 193 |
+func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
|
|
| 194 |
+ l.Reset() |
|
| 195 |
+ if err := dec.Decode(l); err != nil {
|
|
| 196 |
+ return nil, err |
|
| 197 |
+ } |
|
| 198 |
+ msg := &logger.Message{
|
|
| 199 |
+ Source: l.Stream, |
|
| 200 |
+ Timestamp: l.Created, |
|
| 201 |
+ Line: []byte(l.Log), |
|
| 202 |
+ } |
|
| 203 |
+ return msg, nil |
|
| 204 |
+} |
|
| 205 |
+ |
|
| 206 |
+// Reads from the log file |
|
| 207 |
+func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
|
|
| 208 |
+ logWatcher := logger.NewLogWatcher() |
|
| 209 |
+ |
|
| 210 |
+ go l.readLogs(logWatcher, config) |
|
| 211 |
+ return logWatcher |
|
| 212 |
+} |
|
| 213 |
+ |
|
| 214 |
+func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
|
|
| 215 |
+ defer close(logWatcher.Msg) |
|
| 216 |
+ |
|
| 217 |
+ pth := l.ctx.LogPath |
|
| 218 |
+ var files []io.ReadSeeker |
|
| 219 |
+ for i := l.n; i > 1; i-- {
|
|
| 220 |
+ f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
|
|
| 221 |
+ if err != nil {
|
|
| 222 |
+ if !os.IsNotExist(err) {
|
|
| 223 |
+ logWatcher.Err <- err |
|
| 224 |
+ break |
|
| 225 |
+ } |
|
| 226 |
+ continue |
|
| 227 |
+ } |
|
| 228 |
+ defer f.Close() |
|
| 229 |
+ files = append(files, f) |
|
| 230 |
+ } |
|
| 231 |
+ |
|
| 232 |
+ latestFile, err := os.Open(pth) |
|
| 233 |
+ if err != nil {
|
|
| 234 |
+ logWatcher.Err <- err |
|
| 235 |
+ return |
|
| 236 |
+ } |
|
| 237 |
+ defer latestFile.Close() |
|
| 238 |
+ |
|
| 239 |
+ files = append(files, latestFile) |
|
| 240 |
+ tailer := ioutils.MultiReadSeeker(files...) |
|
| 241 |
+ |
|
| 242 |
+ if config.Tail != 0 {
|
|
| 243 |
+ tailFile(tailer, logWatcher, config.Tail, config.Since) |
|
| 244 |
+ } |
|
| 245 |
+ |
|
| 246 |
+ if !config.Follow {
|
|
| 247 |
+ return |
|
| 248 |
+ } |
|
| 249 |
+ if config.Tail == 0 {
|
|
| 250 |
+ latestFile.Seek(0, os.SEEK_END) |
|
| 251 |
+ } |
|
| 252 |
+ |
|
| 253 |
+ l.mu.Lock() |
|
| 254 |
+ l.readers[logWatcher] = struct{}{}
|
|
| 255 |
+ l.mu.Unlock() |
|
| 256 |
+ |
|
| 257 |
+ notifyRotate := l.notifyRotate.Subscribe() |
|
| 258 |
+ followLogs(latestFile, logWatcher, notifyRotate, config.Since) |
|
| 259 |
+ |
|
| 260 |
+ l.mu.Lock() |
|
| 261 |
+ delete(l.readers, logWatcher) |
|
| 262 |
+ l.mu.Unlock() |
|
| 263 |
+ |
|
| 264 |
+ l.notifyRotate.Evict(notifyRotate) |
|
| 265 |
+} |
|
| 266 |
+ |
|
| 267 |
+func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
|
|
| 268 |
+ var rdr io.Reader = f |
|
| 269 |
+ if tail > 0 {
|
|
| 270 |
+ ls, err := tailfile.TailFile(f, tail) |
|
| 271 |
+ if err != nil {
|
|
| 272 |
+ logWatcher.Err <- err |
|
| 273 |
+ return |
|
| 274 |
+ } |
|
| 275 |
+ rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
|
|
| 276 |
+ } |
|
| 277 |
+ dec := json.NewDecoder(rdr) |
|
| 278 |
+ l := &jsonlog.JSONLog{}
|
|
| 279 |
+ for {
|
|
| 280 |
+ msg, err := decodeLogLine(dec, l) |
|
| 281 |
+ if err != nil {
|
|
| 282 |
+ if err != io.EOF {
|
|
| 283 |
+ logWatcher.Err <- err |
|
| 284 |
+ } |
|
| 285 |
+ return |
|
| 286 |
+ } |
|
| 287 |
+ if !since.IsZero() && msg.Timestamp.Before(since) {
|
|
| 288 |
+ continue |
|
| 289 |
+ } |
|
| 290 |
+ logWatcher.Msg <- msg |
|
| 291 |
+ } |
|
| 292 |
+} |
|
| 293 |
+ |
|
| 294 |
+func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
|
|
| 295 |
+ dec := json.NewDecoder(f) |
|
| 296 |
+ l := &jsonlog.JSONLog{}
|
|
| 297 |
+ fileWatcher, err := fsnotify.NewWatcher() |
|
| 298 |
+ if err != nil {
|
|
| 299 |
+ logWatcher.Err <- err |
|
| 300 |
+ return |
|
| 301 |
+ } |
|
| 302 |
+ defer fileWatcher.Close() |
|
| 303 |
+ if err := fileWatcher.Add(f.Name()); err != nil {
|
|
| 304 |
+ logWatcher.Err <- err |
|
| 305 |
+ return |
|
| 306 |
+ } |
|
| 307 |
+ |
|
| 308 |
+ var retries int |
|
| 309 |
+ for {
|
|
| 310 |
+ msg, err := decodeLogLine(dec, l) |
|
| 311 |
+ if err != nil {
|
|
| 312 |
+ if err != io.EOF {
|
|
| 313 |
+ // try again because this shouldn't happen |
|
| 314 |
+ if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
|
|
| 315 |
+ dec = json.NewDecoder(f) |
|
| 316 |
+ retries += 1 |
|
| 317 |
+ continue |
|
| 318 |
+ } |
|
| 319 |
+ logWatcher.Err <- err |
|
| 320 |
+ return |
|
| 321 |
+ } |
|
| 322 |
+ |
|
| 323 |
+ select {
|
|
| 324 |
+ case <-fileWatcher.Events: |
|
| 325 |
+ dec = json.NewDecoder(f) |
|
| 326 |
+ continue |
|
| 327 |
+ case <-fileWatcher.Errors: |
|
| 328 |
+ logWatcher.Err <- err |
|
| 329 |
+ return |
|
| 330 |
+ case <-logWatcher.WatchClose(): |
|
| 331 |
+ return |
|
| 332 |
+ case <-notifyRotate: |
|
| 333 |
+ fileWatcher.Remove(f.Name()) |
|
| 334 |
+ |
|
| 335 |
+ f, err = os.Open(f.Name()) |
|
| 336 |
+ if err != nil {
|
|
| 337 |
+ logWatcher.Err <- err |
|
| 338 |
+ return |
|
| 339 |
+ } |
|
| 340 |
+ if err := fileWatcher.Add(f.Name()); err != nil {
|
|
| 341 |
+ logWatcher.Err <- err |
|
| 342 |
+ } |
|
| 343 |
+ dec = json.NewDecoder(f) |
|
| 344 |
+ continue |
|
| 345 |
+ } |
|
| 346 |
+ } |
|
| 347 |
+ |
|
| 348 |
+ retries = 0 // reset retries since we've succeeded |
|
| 349 |
+ if !since.IsZero() && msg.Timestamp.Before(since) {
|
|
| 350 |
+ continue |
|
| 351 |
+ } |
|
| 352 |
+ select {
|
|
| 353 |
+ case logWatcher.Msg <- msg: |
|
| 354 |
+ case <-logWatcher.WatchClose(): |
|
| 355 |
+ logWatcher.Msg <- msg |
|
| 356 |
+ for {
|
|
| 357 |
+ msg, err := decodeLogLine(dec, l) |
|
| 358 |
+ if err != nil {
|
|
| 359 |
+ return |
|
| 360 |
+ } |
|
| 361 |
+ if !since.IsZero() && msg.Timestamp.Before(since) {
|
|
| 362 |
+ continue |
|
| 363 |
+ } |
|
| 364 |
+ logWatcher.Msg <- msg |
|
| 365 |
+ } |
|
| 366 |
+ } |
|
| 367 |
+ } |
|
| 368 |
+} |
| ... | ... |
@@ -2,11 +2,19 @@ package logger |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"errors" |
| 5 |
- "io" |
|
| 6 | 5 |
"time" |
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/pkg/timeutils" |
|
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 |
-var ReadLogsNotSupported = errors.New("configured logging reader does not support reading")
|
|
| 10 |
+// ErrReadLogsNotSupported is returned when the logger does not support reading logs |
|
| 11 |
+var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading")
|
|
| 12 |
+ |
|
| 13 |
+const ( |
|
| 14 |
+ // TimeFormat is the time format used for timestamps sent to log readers |
|
| 15 |
+ TimeFormat = timeutils.RFC3339NanoFixed |
|
| 16 |
+ logWatcherBufferSize = 4096 |
|
| 17 |
+) |
|
| 10 | 18 |
|
| 11 | 19 |
// Message is datastructure that represents record from some container |
| 12 | 20 |
type Message struct {
|
| ... | ... |
@@ -16,14 +24,51 @@ type Message struct {
|
| 16 | 16 |
Timestamp time.Time |
| 17 | 17 |
} |
| 18 | 18 |
|
| 19 |
-// Logger is interface for docker logging drivers |
|
| 19 |
+// Logger is the interface for docker logging drivers |
|
| 20 | 20 |
type Logger interface {
|
| 21 | 21 |
Log(*Message) error |
| 22 | 22 |
Name() string |
| 23 | 23 |
Close() error |
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 |
-//Reader is an interface for docker logging drivers that support reading |
|
| 27 |
-type Reader interface {
|
|
| 28 |
- ReadLog(args ...string) (io.Reader, error) |
|
| 26 |
+// ReadConfig is the configuration passed into ReadLogs |
|
| 27 |
+type ReadConfig struct {
|
|
| 28 |
+ Since time.Time |
|
| 29 |
+ Tail int |
|
| 30 |
+ Follow bool |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// LogReader is the interface for reading log messages for loggers that support reading |
|
| 34 |
+type LogReader interface {
|
|
| 35 |
+ // Read logs from underlying logging backend |
|
| 36 |
+ ReadLogs(ReadConfig) *LogWatcher |
|
| 37 |
+} |
|
| 38 |
+ |
|
| 39 |
+// LogWatcher is used when consuming logs read from the LogReader interface |
|
| 40 |
+type LogWatcher struct {
|
|
| 41 |
+ // For sending log messages to a reader |
|
| 42 |
+ Msg chan *Message |
|
| 43 |
+ // For sending error messages that occur while while reading logs |
|
| 44 |
+ Err chan error |
|
| 45 |
+ closeNotifier chan struct{}
|
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+// NewLogWatcher returns a new LogWatcher. |
|
| 49 |
+func NewLogWatcher() *LogWatcher {
|
|
| 50 |
+ return &LogWatcher{
|
|
| 51 |
+ Msg: make(chan *Message, logWatcherBufferSize), |
|
| 52 |
+ Err: make(chan error, 1), |
|
| 53 |
+ closeNotifier: make(chan struct{}),
|
|
| 54 |
+ } |
|
| 55 |
+} |
|
| 56 |
+ |
|
| 57 |
+// Close notifies the underlying log reader to stop |
|
| 58 |
+func (w *LogWatcher) Close() {
|
|
| 59 |
+ close(w.closeNotifier) |
|
| 60 |
+} |
|
| 61 |
+ |
|
| 62 |
+// WatchClose returns a channel receiver that receives notification when the watcher has been closed |
|
| 63 |
+// This should only be called from one goroutine |
|
| 64 |
+func (w *LogWatcher) WatchClose() <-chan struct{} {
|
|
| 65 |
+ return w.closeNotifier |
|
| 29 | 66 |
} |
| ... | ... |
@@ -1,23 +1,14 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
- "bytes" |
|
| 5 |
- "encoding/json" |
|
| 6 | 4 |
"fmt" |
| 7 | 5 |
"io" |
| 8 |
- "net" |
|
| 9 |
- "os" |
|
| 10 | 6 |
"strconv" |
| 11 |
- "syscall" |
|
| 12 | 7 |
"time" |
| 13 | 8 |
|
| 14 | 9 |
"github.com/Sirupsen/logrus" |
| 15 | 10 |
"github.com/docker/docker/daemon/logger" |
| 16 |
- "github.com/docker/docker/daemon/logger/jsonfilelog" |
|
| 17 |
- "github.com/docker/docker/pkg/jsonlog" |
|
| 18 | 11 |
"github.com/docker/docker/pkg/stdcopy" |
| 19 |
- "github.com/docker/docker/pkg/tailfile" |
|
| 20 |
- "github.com/docker/docker/pkg/timeutils" |
|
| 21 | 12 |
) |
| 22 | 13 |
|
| 23 | 14 |
type ContainerLogsConfig struct {
|
| ... | ... |
@@ -29,209 +20,64 @@ type ContainerLogsConfig struct {
|
| 29 | 29 |
Stop <-chan bool |
| 30 | 30 |
} |
| 31 | 31 |
|
| 32 |
-func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) error {
|
|
| 33 |
- var ( |
|
| 34 |
- lines = -1 |
|
| 35 |
- format string |
|
| 36 |
- ) |
|
| 32 |
+func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
|
|
| 37 | 33 |
if !(config.UseStdout || config.UseStderr) {
|
| 38 | 34 |
return fmt.Errorf("You must choose at least one stream")
|
| 39 | 35 |
} |
| 40 |
- if config.Timestamps {
|
|
| 41 |
- format = timeutils.RFC3339NanoFixed |
|
| 42 |
- } |
|
| 43 |
- if config.Tail == "" {
|
|
| 44 |
- config.Tail = "latest" |
|
| 45 |
- } |
|
| 46 |
- |
|
| 47 |
- container, err := daemon.Get(name) |
|
| 48 |
- if err != nil {
|
|
| 49 |
- return err |
|
| 50 |
- } |
|
| 51 | 36 |
|
| 52 |
- var ( |
|
| 53 |
- outStream = config.OutStream |
|
| 54 |
- errStream io.Writer |
|
| 55 |
- ) |
|
| 37 |
+ outStream := config.OutStream |
|
| 38 |
+ errStream := outStream |
|
| 56 | 39 |
if !container.Config.Tty {
|
| 57 | 40 |
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) |
| 58 | 41 |
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) |
| 59 |
- } else {
|
|
| 60 |
- errStream = outStream |
|
| 61 |
- } |
|
| 62 |
- |
|
| 63 |
- if container.LogDriverType() != jsonfilelog.Name {
|
|
| 64 |
- return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
|
|
| 65 |
- } |
|
| 66 |
- |
|
| 67 |
- maxFile := 1 |
|
| 68 |
- container.readHostConfig() |
|
| 69 |
- cfg := container.getLogConfig() |
|
| 70 |
- conf := cfg.Config |
|
| 71 |
- if val, ok := conf["max-file"]; ok {
|
|
| 72 |
- var err error |
|
| 73 |
- maxFile, err = strconv.Atoi(val) |
|
| 74 |
- if err != nil {
|
|
| 75 |
- return fmt.Errorf("Error reading max-file value: %s", err)
|
|
| 76 |
- } |
|
| 77 | 42 |
} |
| 78 | 43 |
|
| 79 |
- logDriver, err := container.getLogger() |
|
| 44 |
+ cLog, err := container.getLogger() |
|
| 80 | 45 |
if err != nil {
|
| 81 | 46 |
return err |
| 82 | 47 |
} |
| 83 |
- _, ok := logDriver.(logger.Reader) |
|
| 48 |
+ logReader, ok := cLog.(logger.LogReader) |
|
| 84 | 49 |
if !ok {
|
| 85 |
- logrus.Errorf("Cannot read logs of the [%s] driver", logDriver.Name())
|
|
| 86 |
- } else {
|
|
| 87 |
- // json-file driver |
|
| 88 |
- if config.Tail != "all" && config.Tail != "latest" {
|
|
| 89 |
- var err error |
|
| 90 |
- lines, err = strconv.Atoi(config.Tail) |
|
| 91 |
- if err != nil {
|
|
| 92 |
- logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", config.Tail, err)
|
|
| 93 |
- lines = -1 |
|
| 94 |
- } |
|
| 95 |
- } |
|
| 96 |
- |
|
| 97 |
- if lines != 0 {
|
|
| 98 |
- n := maxFile |
|
| 99 |
- if config.Tail == "latest" && config.Since.IsZero() {
|
|
| 100 |
- n = 1 |
|
| 101 |
- } |
|
| 102 |
- before := false |
|
| 103 |
- for i := n; i > 0; i-- {
|
|
| 104 |
- if before {
|
|
| 105 |
- break |
|
| 106 |
- } |
|
| 107 |
- cLog, err := getReader(logDriver, i, n, lines) |
|
| 108 |
- if err != nil {
|
|
| 109 |
- logrus.Debugf("Error reading %d log file: %v", i-1, err)
|
|
| 110 |
- continue |
|
| 111 |
- } |
|
| 112 |
- //if lines are specified, then iterate only once |
|
| 113 |
- if lines > 0 {
|
|
| 114 |
- i = 1 |
|
| 115 |
- } else { // if lines are not specified, cLog is a file, It needs to be closed
|
|
| 116 |
- defer cLog.(*os.File).Close() |
|
| 117 |
- } |
|
| 118 |
- dec := json.NewDecoder(cLog) |
|
| 119 |
- l := &jsonlog.JSONLog{}
|
|
| 120 |
- for {
|
|
| 121 |
- l.Reset() |
|
| 122 |
- if err := dec.Decode(l); err == io.EOF {
|
|
| 123 |
- break |
|
| 124 |
- } else if err != nil {
|
|
| 125 |
- logrus.Errorf("Error streaming logs: %s", err)
|
|
| 126 |
- break |
|
| 127 |
- } |
|
| 128 |
- logLine := l.Log |
|
| 129 |
- if !config.Since.IsZero() && l.Created.Before(config.Since) {
|
|
| 130 |
- continue |
|
| 131 |
- } |
|
| 132 |
- if config.Timestamps {
|
|
| 133 |
- // format can be "" or time format, so here can't be error |
|
| 134 |
- logLine, _ = l.Format(format) |
|
| 135 |
- } |
|
| 136 |
- if l.Stream == "stdout" && config.UseStdout {
|
|
| 137 |
- io.WriteString(outStream, logLine) |
|
| 138 |
- } |
|
| 139 |
- if l.Stream == "stderr" && config.UseStderr {
|
|
| 140 |
- io.WriteString(errStream, logLine) |
|
| 141 |
- } |
|
| 142 |
- } |
|
| 143 |
- } |
|
| 144 |
- } |
|
| 50 |
+ return logger.ErrReadLogsNotSupported |
|
| 145 | 51 |
} |
| 146 | 52 |
|
| 147 |
- if config.Follow && container.IsRunning() {
|
|
| 148 |
- chErrStderr := make(chan error) |
|
| 149 |
- chErrStdout := make(chan error) |
|
| 150 |
- var stdoutPipe, stderrPipe io.ReadCloser |
|
| 151 |
- |
|
| 152 |
- // write an empty chunk of data (this is to ensure that the |
|
| 153 |
- // HTTP Response is sent immediatly, even if the container has |
|
| 154 |
- // not yet produced any data) |
|
| 155 |
- outStream.Write(nil) |
|
| 53 |
+ follow := config.Follow && container.IsRunning() |
|
| 54 |
+ tailLines, err := strconv.Atoi(config.Tail) |
|
| 55 |
+ if err != nil {
|
|
| 56 |
+ tailLines = -1 |
|
| 57 |
+ } |
|
| 156 | 58 |
|
| 157 |
- if config.UseStdout {
|
|
| 158 |
- stdoutPipe = container.StdoutLogPipe() |
|
| 159 |
- go func() {
|
|
| 160 |
- logrus.Debug("logs: stdout stream begin")
|
|
| 161 |
- chErrStdout <- jsonlog.WriteLog(stdoutPipe, outStream, format, config.Since) |
|
| 162 |
- logrus.Debug("logs: stdout stream end")
|
|
| 163 |
- }() |
|
| 164 |
- } |
|
| 165 |
- if config.UseStderr {
|
|
| 166 |
- stderrPipe = container.StderrLogPipe() |
|
| 167 |
- go func() {
|
|
| 168 |
- logrus.Debug("logs: stderr stream begin")
|
|
| 169 |
- chErrStderr <- jsonlog.WriteLog(stderrPipe, errStream, format, config.Since) |
|
| 170 |
- logrus.Debug("logs: stderr stream end")
|
|
| 171 |
- }() |
|
| 172 |
- } |
|
| 59 |
+ logrus.Debug("logs: begin stream")
|
|
| 60 |
+ readConfig := logger.ReadConfig{
|
|
| 61 |
+ Since: config.Since, |
|
| 62 |
+ Tail: tailLines, |
|
| 63 |
+ Follow: follow, |
|
| 64 |
+ } |
|
| 65 |
+ logs := logReader.ReadLogs(readConfig) |
|
| 173 | 66 |
|
| 67 |
+ for {
|
|
| 174 | 68 |
select {
|
| 175 |
- case err = <-chErrStderr: |
|
| 176 |
- if stdoutPipe != nil {
|
|
| 177 |
- stdoutPipe.Close() |
|
| 178 |
- <-chErrStdout |
|
| 179 |
- } |
|
| 180 |
- case err = <-chErrStdout: |
|
| 181 |
- if stderrPipe != nil {
|
|
| 182 |
- stderrPipe.Close() |
|
| 183 |
- <-chErrStderr |
|
| 184 |
- } |
|
| 69 |
+ case err := <-logs.Err: |
|
| 70 |
+ logrus.Errorf("Error streaming logs: %v", err)
|
|
| 71 |
+ return nil |
|
| 185 | 72 |
case <-config.Stop: |
| 186 |
- if stdoutPipe != nil {
|
|
| 187 |
- stdoutPipe.Close() |
|
| 188 |
- <-chErrStdout |
|
| 73 |
+ logs.Close() |
|
| 74 |
+ return nil |
|
| 75 |
+ case msg, ok := <-logs.Msg: |
|
| 76 |
+ if !ok {
|
|
| 77 |
+ logrus.Debugf("logs: end stream")
|
|
| 78 |
+ return nil |
|
| 189 | 79 |
} |
| 190 |
- if stderrPipe != nil {
|
|
| 191 |
- stderrPipe.Close() |
|
| 192 |
- <-chErrStderr |
|
| 80 |
+ logLine := msg.Line |
|
| 81 |
+ if config.Timestamps {
|
|
| 82 |
+ logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) |
|
| 193 | 83 |
} |
| 194 |
- return nil |
|
| 195 |
- } |
|
| 196 |
- |
|
| 197 |
- if err != nil && err != io.EOF && err != io.ErrClosedPipe {
|
|
| 198 |
- if e, ok := err.(*net.OpError); ok && e.Err != syscall.EPIPE {
|
|
| 199 |
- logrus.Errorf("error streaming logs: %v", err)
|
|
| 84 |
+ if msg.Source == "stdout" && config.UseStdout {
|
|
| 85 |
+ outStream.Write(logLine) |
|
| 86 |
+ } |
|
| 87 |
+ if msg.Source == "stderr" && config.UseStderr {
|
|
| 88 |
+ errStream.Write(logLine) |
|
| 200 | 89 |
} |
| 201 | 90 |
} |
| 202 | 91 |
} |
| 203 |
- return nil |
|
| 204 |
-} |
|
| 205 |
- |
|
| 206 |
-func getReader(logDriver logger.Logger, fileIndex, maxFiles, lines int) (io.Reader, error) {
|
|
| 207 |
- if lines <= 0 {
|
|
| 208 |
- index := strconv.Itoa(fileIndex - 1) |
|
| 209 |
- cLog, err := logDriver.(logger.Reader).ReadLog(index) |
|
| 210 |
- return cLog, err |
|
| 211 |
- } |
|
| 212 |
- buf := bytes.NewBuffer([]byte{})
|
|
| 213 |
- remaining := lines |
|
| 214 |
- for i := 0; i < maxFiles; i++ {
|
|
| 215 |
- index := strconv.Itoa(i) |
|
| 216 |
- cLog, err := logDriver.(logger.Reader).ReadLog(index) |
|
| 217 |
- if err != nil {
|
|
| 218 |
- return buf, err |
|
| 219 |
- } |
|
| 220 |
- f := cLog.(*os.File) |
|
| 221 |
- ls, err := tailfile.TailFile(f, remaining) |
|
| 222 |
- if err != nil {
|
|
| 223 |
- return buf, err |
|
| 224 |
- } |
|
| 225 |
- tmp := bytes.NewBuffer([]byte{})
|
|
| 226 |
- for _, l := range ls {
|
|
| 227 |
- fmt.Fprintf(tmp, "%s\n", l) |
|
| 228 |
- } |
|
| 229 |
- tmp.ReadFrom(buf) |
|
| 230 |
- buf = tmp |
|
| 231 |
- if len(ls) == remaining {
|
|
| 232 |
- return buf, nil |
|
| 233 |
- } |
|
| 234 |
- remaining = remaining - len(ls) |
|
| 235 |
- } |
|
| 236 |
- return buf, nil |
|
| 237 | 92 |
} |
| ... | ... |
@@ -12,7 +12,10 @@ import ( |
| 12 | 12 |
"github.com/docker/docker/runconfig" |
| 13 | 13 |
) |
| 14 | 14 |
|
| 15 |
-const defaultTimeIncrement = 100 |
|
| 15 |
+const ( |
|
| 16 |
+ defaultTimeIncrement = 100 |
|
| 17 |
+ loggerCloseTimeout = 10 * time.Second |
|
| 18 |
+) |
|
| 16 | 19 |
|
| 17 | 20 |
// containerMonitor monitors the execution of a container's main process. |
| 18 | 21 |
// If a restart policy is specified for the container the monitor will ensure that the |
| ... | ... |
@@ -310,7 +313,7 @@ func (m *containerMonitor) resetContainer(lock bool) {
|
| 310 | 310 |
close(exit) |
| 311 | 311 |
}() |
| 312 | 312 |
select {
|
| 313 |
- case <-time.After(1 * time.Second): |
|
| 313 |
+ case <-time.After(loggerCloseTimeout): |
|
| 314 | 314 |
logrus.Warnf("Logger didn't exit in time: logs may be truncated")
|
| 315 | 315 |
case <-exit: |
| 316 | 316 |
} |
| ... | ... |
@@ -29,7 +29,7 @@ The `docker logs --follow` command will continue streaming the new output from |
| 29 | 29 |
the container's `STDOUT` and `STDERR`. |
| 30 | 30 |
|
| 31 | 31 |
Passing a negative number or a non-integer to `--tail` is invalid and the |
| 32 |
-value is set to `latest` in that case. |
|
| 32 |
+value is set to `all` in that case. |
|
| 33 | 33 |
|
| 34 | 34 |
The `docker logs --timestamp` commands will add an RFC3339Nano |
| 35 | 35 |
timestamp, for example `2014-09-16T06:17:46.000000000Z`, to each |
| ... | ... |
@@ -50,4 +50,7 @@ clone git github.com/fluent/fluent-logger-golang v1.0.0 |
| 50 | 50 |
clone git github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa |
| 51 | 51 |
clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c |
| 52 | 52 |
|
| 53 |
+# fsnotify |
|
| 54 |
+clone git gopkg.in/fsnotify.v1 v1.2.0 |
|
| 55 |
+ |
|
| 53 | 56 |
clean |
| ... | ... |
@@ -250,13 +250,9 @@ func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) {
|
| 250 | 250 |
}() |
| 251 | 251 |
|
| 252 | 252 |
logCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) |
| 253 |
- |
|
| 254 | 253 |
stdout, err := logCmd.StdoutPipe() |
| 255 | 254 |
c.Assert(err, check.IsNil) |
| 256 |
- |
|
| 257 |
- if err := logCmd.Start(); err != nil {
|
|
| 258 |
- c.Fatal(err) |
|
| 259 |
- } |
|
| 255 |
+ c.Assert(logCmd.Start(), check.IsNil) |
|
| 260 | 256 |
|
| 261 | 257 |
// First read slowly |
| 262 | 258 |
bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) |
| ... | ... |
@@ -1,33 +1,20 @@ |
| 1 | 1 |
package broadcastwriter |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
- "bytes" |
|
| 5 | 4 |
"io" |
| 6 | 5 |
"sync" |
| 7 |
- "time" |
|
| 8 |
- |
|
| 9 |
- "github.com/Sirupsen/logrus" |
|
| 10 |
- "github.com/docker/docker/pkg/jsonlog" |
|
| 11 |
- "github.com/docker/docker/pkg/timeutils" |
|
| 12 | 6 |
) |
| 13 | 7 |
|
| 14 | 8 |
// BroadcastWriter accumulate multiple io.WriteCloser by stream. |
| 15 | 9 |
type BroadcastWriter struct {
|
| 16 | 10 |
sync.Mutex |
| 17 |
- buf *bytes.Buffer |
|
| 18 |
- jsLogBuf *bytes.Buffer |
|
| 19 |
- streams map[string](map[io.WriteCloser]struct{})
|
|
| 11 |
+ writers map[io.WriteCloser]struct{}
|
|
| 20 | 12 |
} |
| 21 | 13 |
|
| 22 |
-// AddWriter adds new io.WriteCloser for stream. |
|
| 23 |
-// If stream is "", then all writes proceed as is. Otherwise every line from |
|
| 24 |
-// input will be packed to serialized jsonlog.JSONLog. |
|
| 25 |
-func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) {
|
|
| 14 |
+// AddWriter adds new io.WriteCloser. |
|
| 15 |
+func (w *BroadcastWriter) AddWriter(writer io.WriteCloser) {
|
|
| 26 | 16 |
w.Lock() |
| 27 |
- if _, ok := w.streams[stream]; !ok {
|
|
| 28 |
- w.streams[stream] = make(map[io.WriteCloser]struct{})
|
|
| 29 |
- } |
|
| 30 |
- w.streams[stream][writer] = struct{}{}
|
|
| 17 |
+ w.writers[writer] = struct{}{}
|
|
| 31 | 18 |
w.Unlock() |
| 32 | 19 |
} |
| 33 | 20 |
|
| ... | ... |
@@ -35,67 +22,12 @@ func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) {
|
| 35 | 35 |
// this call. |
| 36 | 36 |
func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
|
| 37 | 37 |
w.Lock() |
| 38 |
- if writers, ok := w.streams[""]; ok {
|
|
| 39 |
- for sw := range writers {
|
|
| 40 |
- if n, err := sw.Write(p); err != nil || n != len(p) {
|
|
| 41 |
- // On error, evict the writer |
|
| 42 |
- delete(writers, sw) |
|
| 43 |
- } |
|
| 44 |
- } |
|
| 45 |
- if len(w.streams) == 1 {
|
|
| 46 |
- if w.buf.Len() >= 4096 {
|
|
| 47 |
- w.buf.Reset() |
|
| 48 |
- } else {
|
|
| 49 |
- w.buf.Write(p) |
|
| 50 |
- } |
|
| 51 |
- w.Unlock() |
|
| 52 |
- return len(p), nil |
|
| 38 |
+ for sw := range w.writers {
|
|
| 39 |
+ if n, err := sw.Write(p); err != nil || n != len(p) {
|
|
| 40 |
+ // On error, evict the writer |
|
| 41 |
+ delete(w.writers, sw) |
|
| 53 | 42 |
} |
| 54 | 43 |
} |
| 55 |
- if w.jsLogBuf == nil {
|
|
| 56 |
- w.jsLogBuf = new(bytes.Buffer) |
|
| 57 |
- w.jsLogBuf.Grow(1024) |
|
| 58 |
- } |
|
| 59 |
- var timestamp string |
|
| 60 |
- created := time.Now().UTC() |
|
| 61 |
- w.buf.Write(p) |
|
| 62 |
- for {
|
|
| 63 |
- if n := w.buf.Len(); n == 0 {
|
|
| 64 |
- break |
|
| 65 |
- } |
|
| 66 |
- i := bytes.IndexByte(w.buf.Bytes(), '\n') |
|
| 67 |
- if i < 0 {
|
|
| 68 |
- break |
|
| 69 |
- } |
|
| 70 |
- lineBytes := w.buf.Next(i + 1) |
|
| 71 |
- if timestamp == "" {
|
|
| 72 |
- timestamp, err = timeutils.FastMarshalJSON(created) |
|
| 73 |
- if err != nil {
|
|
| 74 |
- continue |
|
| 75 |
- } |
|
| 76 |
- } |
|
| 77 |
- |
|
| 78 |
- for stream, writers := range w.streams {
|
|
| 79 |
- if stream == "" {
|
|
| 80 |
- continue |
|
| 81 |
- } |
|
| 82 |
- jsonLog := jsonlog.JSONLogBytes{Log: lineBytes, Stream: stream, Created: timestamp}
|
|
| 83 |
- err = jsonLog.MarshalJSONBuf(w.jsLogBuf) |
|
| 84 |
- if err != nil {
|
|
| 85 |
- logrus.Errorf("Error making JSON log line: %s", err)
|
|
| 86 |
- continue |
|
| 87 |
- } |
|
| 88 |
- w.jsLogBuf.WriteByte('\n')
|
|
| 89 |
- b := w.jsLogBuf.Bytes() |
|
| 90 |
- for sw := range writers {
|
|
| 91 |
- if _, err := sw.Write(b); err != nil {
|
|
| 92 |
- delete(writers, sw) |
|
| 93 |
- } |
|
| 94 |
- } |
|
| 95 |
- } |
|
| 96 |
- w.jsLogBuf.Reset() |
|
| 97 |
- } |
|
| 98 |
- w.jsLogBuf.Reset() |
|
| 99 | 44 |
w.Unlock() |
| 100 | 45 |
return len(p), nil |
| 101 | 46 |
} |
| ... | ... |
@@ -104,19 +36,16 @@ func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
|
| 104 | 104 |
// will be saved. |
| 105 | 105 |
func (w *BroadcastWriter) Clean() error {
|
| 106 | 106 |
w.Lock() |
| 107 |
- for _, writers := range w.streams {
|
|
| 108 |
- for w := range writers {
|
|
| 109 |
- w.Close() |
|
| 110 |
- } |
|
| 107 |
+ for w := range w.writers {
|
|
| 108 |
+ w.Close() |
|
| 111 | 109 |
} |
| 112 |
- w.streams = make(map[string](map[io.WriteCloser]struct{}))
|
|
| 110 |
+ w.writers = make(map[io.WriteCloser]struct{})
|
|
| 113 | 111 |
w.Unlock() |
| 114 | 112 |
return nil |
| 115 | 113 |
} |
| 116 | 114 |
|
| 117 | 115 |
func New() *BroadcastWriter {
|
| 118 | 116 |
return &BroadcastWriter{
|
| 119 |
- streams: make(map[string](map[io.WriteCloser]struct{})),
|
|
| 120 |
- buf: bytes.NewBuffer(nil), |
|
| 117 |
+ writers: make(map[io.WriteCloser]struct{}),
|
|
| 121 | 118 |
} |
| 122 | 119 |
} |
| ... | ... |
@@ -32,9 +32,9 @@ func TestBroadcastWriter(t *testing.T) {
|
| 32 | 32 |
|
| 33 | 33 |
// Test 1: Both bufferA and bufferB should contain "foo" |
| 34 | 34 |
bufferA := &dummyWriter{}
|
| 35 |
- writer.AddWriter(bufferA, "") |
|
| 35 |
+ writer.AddWriter(bufferA) |
|
| 36 | 36 |
bufferB := &dummyWriter{}
|
| 37 |
- writer.AddWriter(bufferB, "") |
|
| 37 |
+ writer.AddWriter(bufferB) |
|
| 38 | 38 |
writer.Write([]byte("foo"))
|
| 39 | 39 |
|
| 40 | 40 |
if bufferA.String() != "foo" {
|
| ... | ... |
@@ -48,7 +48,7 @@ func TestBroadcastWriter(t *testing.T) {
|
| 48 | 48 |
// Test2: bufferA and bufferB should contain "foobar", |
| 49 | 49 |
// while bufferC should only contain "bar" |
| 50 | 50 |
bufferC := &dummyWriter{}
|
| 51 |
- writer.AddWriter(bufferC, "") |
|
| 51 |
+ writer.AddWriter(bufferC) |
|
| 52 | 52 |
writer.Write([]byte("bar"))
|
| 53 | 53 |
|
| 54 | 54 |
if bufferA.String() != "foobar" {
|
| ... | ... |
@@ -100,7 +100,7 @@ func TestRaceBroadcastWriter(t *testing.T) {
|
| 100 | 100 |
writer := New() |
| 101 | 101 |
c := make(chan bool) |
| 102 | 102 |
go func() {
|
| 103 |
- writer.AddWriter(devNullCloser(0), "") |
|
| 103 |
+ writer.AddWriter(devNullCloser(0)) |
|
| 104 | 104 |
c <- true |
| 105 | 105 |
}() |
| 106 | 106 |
writer.Write([]byte("hello"))
|
| ... | ... |
@@ -111,9 +111,9 @@ func BenchmarkBroadcastWriter(b *testing.B) {
|
| 111 | 111 |
writer := New() |
| 112 | 112 |
setUpWriter := func() {
|
| 113 | 113 |
for i := 0; i < 100; i++ {
|
| 114 |
- writer.AddWriter(devNullCloser(0), "stdout") |
|
| 115 |
- writer.AddWriter(devNullCloser(0), "stderr") |
|
| 116 |
- writer.AddWriter(devNullCloser(0), "") |
|
| 114 |
+ writer.AddWriter(devNullCloser(0)) |
|
| 115 |
+ writer.AddWriter(devNullCloser(0)) |
|
| 116 |
+ writer.AddWriter(devNullCloser(0)) |
|
| 117 | 117 |
} |
| 118 | 118 |
} |
| 119 | 119 |
testLine := "Line that thinks that it is log line from docker" |
| ... | ... |
@@ -142,33 +142,3 @@ func BenchmarkBroadcastWriter(b *testing.B) {
|
| 142 | 142 |
b.StartTimer() |
| 143 | 143 |
} |
| 144 | 144 |
} |
| 145 |
- |
|
| 146 |
-func BenchmarkBroadcastWriterWithoutStdoutStderr(b *testing.B) {
|
|
| 147 |
- writer := New() |
|
| 148 |
- setUpWriter := func() {
|
|
| 149 |
- for i := 0; i < 100; i++ {
|
|
| 150 |
- writer.AddWriter(devNullCloser(0), "") |
|
| 151 |
- } |
|
| 152 |
- } |
|
| 153 |
- testLine := "Line that thinks that it is log line from docker" |
|
| 154 |
- var buf bytes.Buffer |
|
| 155 |
- for i := 0; i < 100; i++ {
|
|
| 156 |
- buf.Write([]byte(testLine + "\n")) |
|
| 157 |
- } |
|
| 158 |
- // line without eol |
|
| 159 |
- buf.Write([]byte(testLine)) |
|
| 160 |
- testText := buf.Bytes() |
|
| 161 |
- b.SetBytes(int64(5 * len(testText))) |
|
| 162 |
- b.ResetTimer() |
|
| 163 |
- for i := 0; i < b.N; i++ {
|
|
| 164 |
- setUpWriter() |
|
| 165 |
- |
|
| 166 |
- for j := 0; j < 5; j++ {
|
|
| 167 |
- if _, err := writer.Write(testText); err != nil {
|
|
| 168 |
- b.Fatal(err) |
|
| 169 |
- } |
|
| 170 |
- } |
|
| 171 |
- |
|
| 172 |
- writer.Clean() |
|
| 173 |
- } |
|
| 174 |
-} |
| 175 | 145 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,226 @@ |
| 0 |
+package ioutils |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io" |
|
| 6 |
+ "os" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+type pos struct {
|
|
| 10 |
+ idx int |
|
| 11 |
+ offset int64 |
|
| 12 |
+} |
|
| 13 |
+ |
|
| 14 |
+type multiReadSeeker struct {
|
|
| 15 |
+ readers []io.ReadSeeker |
|
| 16 |
+ pos *pos |
|
| 17 |
+ posIdx map[io.ReadSeeker]int |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|
| 21 |
+ var tmpOffset int64 |
|
| 22 |
+ switch whence {
|
|
| 23 |
+ case os.SEEK_SET: |
|
| 24 |
+ for i, rdr := range r.readers {
|
|
| 25 |
+ // get size of the current reader |
|
| 26 |
+ s, err := rdr.Seek(0, os.SEEK_END) |
|
| 27 |
+ if err != nil {
|
|
| 28 |
+ return -1, err |
|
| 29 |
+ } |
|
| 30 |
+ |
|
| 31 |
+ if offset > tmpOffset+s {
|
|
| 32 |
+ if i == len(r.readers)-1 {
|
|
| 33 |
+ rdrOffset := s + (offset - tmpOffset) |
|
| 34 |
+ if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
|
|
| 35 |
+ return -1, err |
|
| 36 |
+ } |
|
| 37 |
+ r.pos = &pos{i, rdrOffset}
|
|
| 38 |
+ return offset, nil |
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ tmpOffset += s |
|
| 42 |
+ continue |
|
| 43 |
+ } |
|
| 44 |
+ |
|
| 45 |
+ rdrOffset := offset - tmpOffset |
|
| 46 |
+ idx := i |
|
| 47 |
+ |
|
| 48 |
+ rdr.Seek(rdrOffset, os.SEEK_SET) |
|
| 49 |
+ // make sure all following readers are at 0 |
|
| 50 |
+ for _, rdr := range r.readers[i+1:] {
|
|
| 51 |
+ rdr.Seek(0, os.SEEK_SET) |
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ if rdrOffset == s && i != len(r.readers)-1 {
|
|
| 55 |
+ idx += 1 |
|
| 56 |
+ rdrOffset = 0 |
|
| 57 |
+ } |
|
| 58 |
+ r.pos = &pos{idx, rdrOffset}
|
|
| 59 |
+ return offset, nil |
|
| 60 |
+ } |
|
| 61 |
+ case os.SEEK_END: |
|
| 62 |
+ for _, rdr := range r.readers {
|
|
| 63 |
+ s, err := rdr.Seek(0, os.SEEK_END) |
|
| 64 |
+ if err != nil {
|
|
| 65 |
+ return -1, err |
|
| 66 |
+ } |
|
| 67 |
+ tmpOffset += s |
|
| 68 |
+ } |
|
| 69 |
+ r.Seek(tmpOffset+offset, os.SEEK_SET) |
|
| 70 |
+ return tmpOffset + offset, nil |
|
| 71 |
+ case os.SEEK_CUR: |
|
| 72 |
+ if r.pos == nil {
|
|
| 73 |
+ return r.Seek(offset, os.SEEK_SET) |
|
| 74 |
+ } |
|
| 75 |
+ // Just return the current offset |
|
| 76 |
+ if offset == 0 {
|
|
| 77 |
+ return r.getCurOffset() |
|
| 78 |
+ } |
|
| 79 |
+ |
|
| 80 |
+ curOffset, err := r.getCurOffset() |
|
| 81 |
+ if err != nil {
|
|
| 82 |
+ return -1, err |
|
| 83 |
+ } |
|
| 84 |
+ rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) |
|
| 85 |
+ if err != nil {
|
|
| 86 |
+ return -1, err |
|
| 87 |
+ } |
|
| 88 |
+ |
|
| 89 |
+ r.pos = &pos{r.posIdx[rdr], rdrOffset}
|
|
| 90 |
+ return curOffset + offset, nil |
|
| 91 |
+ default: |
|
| 92 |
+ return -1, fmt.Errorf("Invalid whence: %d", whence)
|
|
| 93 |
+ } |
|
| 94 |
+ |
|
| 95 |
+ return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
|
|
| 96 |
+} |
|
| 97 |
+ |
|
| 98 |
+func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
|
|
| 99 |
+ var rdr io.ReadSeeker |
|
| 100 |
+ var rdrOffset int64 |
|
| 101 |
+ |
|
| 102 |
+ for i, rdr := range r.readers {
|
|
| 103 |
+ offsetTo, err := r.getOffsetToReader(rdr) |
|
| 104 |
+ if err != nil {
|
|
| 105 |
+ return nil, -1, err |
|
| 106 |
+ } |
|
| 107 |
+ if offsetTo > offset {
|
|
| 108 |
+ rdr = r.readers[i-1] |
|
| 109 |
+ rdrOffset = offsetTo - offset |
|
| 110 |
+ break |
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 113 |
+ if rdr == r.readers[len(r.readers)-1] {
|
|
| 114 |
+ rdrOffset = offsetTo + offset |
|
| 115 |
+ break |
|
| 116 |
+ } |
|
| 117 |
+ } |
|
| 118 |
+ |
|
| 119 |
+ return rdr, rdrOffset, nil |
|
| 120 |
+} |
|
| 121 |
+ |
|
| 122 |
+func (r *multiReadSeeker) getCurOffset() (int64, error) {
|
|
| 123 |
+ var totalSize int64 |
|
| 124 |
+ for _, rdr := range r.readers[:r.pos.idx+1] {
|
|
| 125 |
+ if r.posIdx[rdr] == r.pos.idx {
|
|
| 126 |
+ totalSize += r.pos.offset |
|
| 127 |
+ break |
|
| 128 |
+ } |
|
| 129 |
+ |
|
| 130 |
+ size, err := getReadSeekerSize(rdr) |
|
| 131 |
+ if err != nil {
|
|
| 132 |
+ return -1, fmt.Errorf("error getting seeker size: %v", err)
|
|
| 133 |
+ } |
|
| 134 |
+ totalSize += size |
|
| 135 |
+ } |
|
| 136 |
+ return totalSize, nil |
|
| 137 |
+} |
|
| 138 |
+ |
|
| 139 |
+func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
|
|
| 140 |
+ var offset int64 |
|
| 141 |
+ for _, r := range r.readers {
|
|
| 142 |
+ if r == rdr {
|
|
| 143 |
+ break |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ size, err := getReadSeekerSize(rdr) |
|
| 147 |
+ if err != nil {
|
|
| 148 |
+ return -1, err |
|
| 149 |
+ } |
|
| 150 |
+ offset += size |
|
| 151 |
+ } |
|
| 152 |
+ return offset, nil |
|
| 153 |
+} |
|
| 154 |
+ |
|
| 155 |
+func (r *multiReadSeeker) Read(b []byte) (int, error) {
|
|
| 156 |
+ if r.pos == nil {
|
|
| 157 |
+ r.pos = &pos{0, 0}
|
|
| 158 |
+ } |
|
| 159 |
+ |
|
| 160 |
+ bCap := int64(cap(b)) |
|
| 161 |
+ buf := bytes.NewBuffer(nil) |
|
| 162 |
+ var rdr io.ReadSeeker |
|
| 163 |
+ |
|
| 164 |
+ for _, rdr = range r.readers[r.pos.idx:] {
|
|
| 165 |
+ readBytes, err := io.CopyN(buf, rdr, bCap) |
|
| 166 |
+ if err != nil && err != io.EOF {
|
|
| 167 |
+ return -1, err |
|
| 168 |
+ } |
|
| 169 |
+ bCap -= readBytes |
|
| 170 |
+ |
|
| 171 |
+ if bCap == 0 {
|
|
| 172 |
+ break |
|
| 173 |
+ } |
|
| 174 |
+ } |
|
| 175 |
+ |
|
| 176 |
+ rdrPos, err := rdr.Seek(0, os.SEEK_CUR) |
|
| 177 |
+ if err != nil {
|
|
| 178 |
+ return -1, err |
|
| 179 |
+ } |
|
| 180 |
+ r.pos = &pos{r.posIdx[rdr], rdrPos}
|
|
| 181 |
+ return buf.Read(b) |
|
| 182 |
+} |
|
| 183 |
+ |
|
| 184 |
+func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
|
|
| 185 |
+ // save the current position |
|
| 186 |
+ pos, err := rdr.Seek(0, os.SEEK_CUR) |
|
| 187 |
+ if err != nil {
|
|
| 188 |
+ return -1, err |
|
| 189 |
+ } |
|
| 190 |
+ |
|
| 191 |
+ // get the size |
|
| 192 |
+ size, err := rdr.Seek(0, os.SEEK_END) |
|
| 193 |
+ if err != nil {
|
|
| 194 |
+ return -1, err |
|
| 195 |
+ } |
|
| 196 |
+ |
|
| 197 |
+ // reset the position |
|
| 198 |
+ if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
|
|
| 199 |
+ return -1, err |
|
| 200 |
+ } |
|
| 201 |
+ return size, nil |
|
| 202 |
+} |
|
| 203 |
+ |
|
| 204 |
+// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided |
|
| 205 |
+// input readseekers. After calling this method the initial position is set to the |
|
| 206 |
+// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances |
|
| 207 |
+// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. |
|
| 208 |
+// Seek can be used over the sum of lengths of all readseekers. |
|
| 209 |
+// |
|
| 210 |
+// When a MultiReadSeeker is used, no Read and Seek operations should be made on |
|
| 211 |
+// its ReadSeeker components. Also, users should make no assumption on the state |
|
| 212 |
+// of individual readseekers while the MultiReadSeeker is used. |
|
| 213 |
+func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
|
|
| 214 |
+ if len(readers) == 1 {
|
|
| 215 |
+ return readers[0] |
|
| 216 |
+ } |
|
| 217 |
+ idx := make(map[io.ReadSeeker]int) |
|
| 218 |
+ for i, rdr := range readers {
|
|
| 219 |
+ idx[rdr] = i |
|
| 220 |
+ } |
|
| 221 |
+ return &multiReadSeeker{
|
|
| 222 |
+ readers: readers, |
|
| 223 |
+ posIdx: idx, |
|
| 224 |
+ } |
|
| 225 |
+} |
| 0 | 226 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,149 @@ |
| 0 |
+package ioutils |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io" |
|
| 6 |
+ "io/ioutil" |
|
| 7 |
+ "os" |
|
| 8 |
+ "strings" |
|
| 9 |
+ "testing" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+func TestMultiReadSeekerReadAll(t *testing.T) {
|
|
| 13 |
+ str := "hello world" |
|
| 14 |
+ s1 := strings.NewReader(str + " 1") |
|
| 15 |
+ s2 := strings.NewReader(str + " 2") |
|
| 16 |
+ s3 := strings.NewReader(str + " 3") |
|
| 17 |
+ mr := MultiReadSeeker(s1, s2, s3) |
|
| 18 |
+ |
|
| 19 |
+ expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) |
|
| 20 |
+ |
|
| 21 |
+ b, err := ioutil.ReadAll(mr) |
|
| 22 |
+ if err != nil {
|
|
| 23 |
+ t.Fatal(err) |
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ expected := "hello world 1hello world 2hello world 3" |
|
| 27 |
+ if string(b) != expected {
|
|
| 28 |
+ t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
|
|
| 29 |
+ } |
|
| 30 |
+ |
|
| 31 |
+ size, err := mr.Seek(0, os.SEEK_END) |
|
| 32 |
+ if err != nil {
|
|
| 33 |
+ t.Fatal(err) |
|
| 34 |
+ } |
|
| 35 |
+ if size != expectedSize {
|
|
| 36 |
+ t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize)
|
|
| 37 |
+ } |
|
| 38 |
+ |
|
| 39 |
+ // Reset the position and read again |
|
| 40 |
+ pos, err := mr.Seek(0, os.SEEK_SET) |
|
| 41 |
+ if err != nil {
|
|
| 42 |
+ t.Fatal(err) |
|
| 43 |
+ } |
|
| 44 |
+ if pos != 0 {
|
|
| 45 |
+ t.Fatalf("expected position to be set to 0, got %d", pos)
|
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ b, err = ioutil.ReadAll(mr) |
|
| 49 |
+ if err != nil {
|
|
| 50 |
+ t.Fatal(err) |
|
| 51 |
+ } |
|
| 52 |
+ |
|
| 53 |
+ if string(b) != expected {
|
|
| 54 |
+ t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
|
|
| 55 |
+ } |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+func TestMultiReadSeekerReadEach(t *testing.T) {
|
|
| 59 |
+ str := "hello world" |
|
| 60 |
+ s1 := strings.NewReader(str + " 1") |
|
| 61 |
+ s2 := strings.NewReader(str + " 2") |
|
| 62 |
+ s3 := strings.NewReader(str + " 3") |
|
| 63 |
+ mr := MultiReadSeeker(s1, s2, s3) |
|
| 64 |
+ |
|
| 65 |
+ var totalBytes int64 |
|
| 66 |
+ for i, s := range []*strings.Reader{s1, s2, s3} {
|
|
| 67 |
+ sLen := int64(s.Len()) |
|
| 68 |
+ buf := make([]byte, s.Len()) |
|
| 69 |
+ expected := []byte(fmt.Sprintf("%s %d", str, i+1))
|
|
| 70 |
+ |
|
| 71 |
+ if _, err := mr.Read(buf); err != nil && err != io.EOF {
|
|
| 72 |
+ t.Fatal(err) |
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ if !bytes.Equal(buf, expected) {
|
|
| 76 |
+ t.Fatalf("expected %q to be %q", string(buf), string(expected))
|
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ pos, err := mr.Seek(0, os.SEEK_CUR) |
|
| 80 |
+ if err != nil {
|
|
| 81 |
+ t.Fatalf("iteration: %d, error: %v", i+1, err)
|
|
| 82 |
+ } |
|
| 83 |
+ |
|
| 84 |
+ // check that the total bytes read is the current position of the seeker |
|
| 85 |
+ totalBytes += sLen |
|
| 86 |
+ if pos != totalBytes {
|
|
| 87 |
+ t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1)
|
|
| 88 |
+ } |
|
| 89 |
+ |
|
| 90 |
+ // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well |
|
| 91 |
+ newPos, err := mr.Seek(pos, os.SEEK_SET) |
|
| 92 |
+ if err != nil {
|
|
| 93 |
+ t.Fatal(err) |
|
| 94 |
+ } |
|
| 95 |
+ if newPos != pos {
|
|
| 96 |
+ t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos)
|
|
| 97 |
+ } |
|
| 98 |
+ } |
|
| 99 |
+} |
|
| 100 |
+ |
|
| 101 |
+func TestMultiReadSeekerReadSpanningChunks(t *testing.T) {
|
|
| 102 |
+ str := "hello world" |
|
| 103 |
+ s1 := strings.NewReader(str + " 1") |
|
| 104 |
+ s2 := strings.NewReader(str + " 2") |
|
| 105 |
+ s3 := strings.NewReader(str + " 3") |
|
| 106 |
+ mr := MultiReadSeeker(s1, s2, s3) |
|
| 107 |
+ |
|
| 108 |
+ buf := make([]byte, s1.Len()+3) |
|
| 109 |
+ _, err := mr.Read(buf) |
|
| 110 |
+ if err != nil {
|
|
| 111 |
+ t.Fatal(err) |
|
| 112 |
+ } |
|
| 113 |
+ |
|
| 114 |
+ // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string |
|
| 115 |
+ expected := "hello world 1hel" |
|
| 116 |
+ if string(buf) != expected {
|
|
| 117 |
+ t.Fatalf("expected %s to be %s", string(buf), expected)
|
|
| 118 |
+ } |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+func TestMultiReadSeekerNegativeSeek(t *testing.T) {
|
|
| 122 |
+ str := "hello world" |
|
| 123 |
+ s1 := strings.NewReader(str + " 1") |
|
| 124 |
+ s2 := strings.NewReader(str + " 2") |
|
| 125 |
+ s3 := strings.NewReader(str + " 3") |
|
| 126 |
+ mr := MultiReadSeeker(s1, s2, s3) |
|
| 127 |
+ |
|
| 128 |
+ s1Len := s1.Len() |
|
| 129 |
+ s2Len := s2.Len() |
|
| 130 |
+ s3Len := s3.Len() |
|
| 131 |
+ |
|
| 132 |
+ s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) |
|
| 133 |
+ if err != nil {
|
|
| 134 |
+ t.Fatal(err) |
|
| 135 |
+ } |
|
| 136 |
+ if s != int64(s1Len+s2Len) {
|
|
| 137 |
+ t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len())
|
|
| 138 |
+ } |
|
| 139 |
+ |
|
| 140 |
+ buf := make([]byte, s3Len) |
|
| 141 |
+ if _, err := mr.Read(buf); err != nil && err != io.EOF {
|
|
| 142 |
+ t.Fatal(err) |
|
| 143 |
+ } |
|
| 144 |
+ expected := fmt.Sprintf("%s %d", str, 3)
|
|
| 145 |
+ if string(buf) != fmt.Sprintf("%s %d", str, 3) {
|
|
| 146 |
+ t.Fatalf("expected %q to be %q", string(buf), expected)
|
|
| 147 |
+ } |
|
| 148 |
+} |
| ... | ... |
@@ -3,7 +3,6 @@ package jsonlog |
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 |
- "io" |
|
| 7 | 6 |
"time" |
| 8 | 7 |
) |
| 9 | 8 |
|
| ... | ... |
@@ -29,28 +28,3 @@ func (jl *JSONLog) Reset() {
|
| 29 | 29 |
jl.Stream = "" |
| 30 | 30 |
jl.Created = time.Time{}
|
| 31 | 31 |
} |
| 32 |
- |
|
| 33 |
-func WriteLog(src io.Reader, dst io.Writer, format string, since time.Time) error {
|
|
| 34 |
- dec := json.NewDecoder(src) |
|
| 35 |
- l := &JSONLog{}
|
|
| 36 |
- for {
|
|
| 37 |
- l.Reset() |
|
| 38 |
- if err := dec.Decode(l); err != nil {
|
|
| 39 |
- if err == io.EOF {
|
|
| 40 |
- return nil |
|
| 41 |
- } |
|
| 42 |
- return err |
|
| 43 |
- } |
|
| 44 |
- if !since.IsZero() && l.Created.Before(since) {
|
|
| 45 |
- continue |
|
| 46 |
- } |
|
| 47 |
- |
|
| 48 |
- line, err := l.Format(format) |
|
| 49 |
- if err != nil {
|
|
| 50 |
- return err |
|
| 51 |
- } |
|
| 52 |
- if _, err := io.WriteString(dst, line); err != nil {
|
|
| 53 |
- return err |
|
| 54 |
- } |
|
| 55 |
- } |
|
| 56 |
-} |
| 57 | 32 |
deleted file mode 100644 |
| ... | ... |
@@ -1,157 +0,0 @@ |
| 1 |
-package jsonlog |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "encoding/json" |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- "regexp" |
|
| 8 |
- "strconv" |
|
| 9 |
- "strings" |
|
| 10 |
- "testing" |
|
| 11 |
- "time" |
|
| 12 |
- |
|
| 13 |
- "github.com/docker/docker/pkg/timeutils" |
|
| 14 |
-) |
|
| 15 |
- |
|
| 16 |
-// Invalid json should return an error |
|
| 17 |
-func TestWriteLogWithInvalidJSON(t *testing.T) {
|
|
| 18 |
- json := strings.NewReader("Invalid json")
|
|
| 19 |
- w := bytes.NewBuffer(nil) |
|
| 20 |
- if err := WriteLog(json, w, "json", time.Time{}); err == nil {
|
|
| 21 |
- t.Fatalf("Expected an error, got [%v]", w.String())
|
|
| 22 |
- } |
|
| 23 |
-} |
|
| 24 |
- |
|
| 25 |
-// Any format is valid, it will just print it |
|
| 26 |
-func TestWriteLogWithInvalidFormat(t *testing.T) {
|
|
| 27 |
- testLine := "Line that thinks that it is log line from docker\n" |
|
| 28 |
- var buf bytes.Buffer |
|
| 29 |
- e := json.NewEncoder(&buf) |
|
| 30 |
- for i := 0; i < 35; i++ {
|
|
| 31 |
- e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
|
|
| 32 |
- } |
|
| 33 |
- w := bytes.NewBuffer(nil) |
|
| 34 |
- if err := WriteLog(&buf, w, "invalid format", time.Time{}); err != nil {
|
|
| 35 |
- t.Fatal(err) |
|
| 36 |
- } |
|
| 37 |
- res := w.String() |
|
| 38 |
- t.Logf("Result of WriteLog: %q", res)
|
|
| 39 |
- lines := strings.Split(strings.TrimSpace(res), "\n") |
|
| 40 |
- expression := "^invalid format Line that thinks that it is log line from docker$" |
|
| 41 |
- logRe := regexp.MustCompile(expression) |
|
| 42 |
- expectedLines := 35 |
|
| 43 |
- if len(lines) != expectedLines {
|
|
| 44 |
- t.Fatalf("Must be %v lines but got %d", expectedLines, len(lines))
|
|
| 45 |
- } |
|
| 46 |
- for _, l := range lines {
|
|
| 47 |
- if !logRe.MatchString(l) {
|
|
| 48 |
- t.Fatalf("Log line not in expected format [%v]: %q", expression, l)
|
|
| 49 |
- } |
|
| 50 |
- } |
|
| 51 |
-} |
|
| 52 |
- |
|
| 53 |
-// Having multiple Log/Stream element |
|
| 54 |
-func TestWriteLogWithMultipleStreamLog(t *testing.T) {
|
|
| 55 |
- testLine := "Line that thinks that it is log line from docker\n" |
|
| 56 |
- var buf bytes.Buffer |
|
| 57 |
- e := json.NewEncoder(&buf) |
|
| 58 |
- for i := 0; i < 35; i++ {
|
|
| 59 |
- e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
|
|
| 60 |
- } |
|
| 61 |
- w := bytes.NewBuffer(nil) |
|
| 62 |
- if err := WriteLog(&buf, w, "invalid format", time.Time{}); err != nil {
|
|
| 63 |
- t.Fatal(err) |
|
| 64 |
- } |
|
| 65 |
- res := w.String() |
|
| 66 |
- t.Logf("Result of WriteLog: %q", res)
|
|
| 67 |
- lines := strings.Split(strings.TrimSpace(res), "\n") |
|
| 68 |
- expression := "^invalid format Line that thinks that it is log line from docker$" |
|
| 69 |
- logRe := regexp.MustCompile(expression) |
|
| 70 |
- expectedLines := 35 |
|
| 71 |
- if len(lines) != expectedLines {
|
|
| 72 |
- t.Fatalf("Must be %v lines but got %d", expectedLines, len(lines))
|
|
| 73 |
- } |
|
| 74 |
- for _, l := range lines {
|
|
| 75 |
- if !logRe.MatchString(l) {
|
|
| 76 |
- t.Fatalf("Log line not in expected format [%v]: %q", expression, l)
|
|
| 77 |
- } |
|
| 78 |
- } |
|
| 79 |
-} |
|
| 80 |
- |
|
| 81 |
-// Write log with since after created, it won't print anything |
|
| 82 |
-func TestWriteLogWithDate(t *testing.T) {
|
|
| 83 |
- created, _ := time.Parse("YYYY-MM-dd", "2015-01-01")
|
|
| 84 |
- var buf bytes.Buffer |
|
| 85 |
- testLine := "Line that thinks that it is log line from docker\n" |
|
| 86 |
- jsonLog := JSONLog{Log: testLine, Stream: "stdout", Created: created}
|
|
| 87 |
- if err := json.NewEncoder(&buf).Encode(jsonLog); err != nil {
|
|
| 88 |
- t.Fatal(err) |
|
| 89 |
- } |
|
| 90 |
- w := bytes.NewBuffer(nil) |
|
| 91 |
- if err := WriteLog(&buf, w, "json", time.Now()); err != nil {
|
|
| 92 |
- t.Fatal(err) |
|
| 93 |
- } |
|
| 94 |
- res := w.String() |
|
| 95 |
- if res != "" {
|
|
| 96 |
- t.Fatalf("Expected empty log, got [%v]", res)
|
|
| 97 |
- } |
|
| 98 |
-} |
|
| 99 |
- |
|
| 100 |
-// Happy path :) |
|
| 101 |
-func TestWriteLog(t *testing.T) {
|
|
| 102 |
- testLine := "Line that thinks that it is log line from docker\n" |
|
| 103 |
- format := timeutils.RFC3339NanoFixed |
|
| 104 |
- logs := map[string][]string{
|
|
| 105 |
- "": {"35", "^Line that thinks that it is log line from docker$"},
|
|
| 106 |
- "json": {"1", `^{\"log\":\"Line that thinks that it is log line from docker\\n\",\"stream\":\"stdout\",\"time\":.{30,}\"}$`},
|
|
| 107 |
- // 30+ symbols, five more can come from system timezone |
|
| 108 |
- format: {"35", `.{30,} Line that thinks that it is log line from docker`},
|
|
| 109 |
- } |
|
| 110 |
- for givenFormat, expressionAndLines := range logs {
|
|
| 111 |
- expectedLines, _ := strconv.Atoi(expressionAndLines[0]) |
|
| 112 |
- expression := expressionAndLines[1] |
|
| 113 |
- var buf bytes.Buffer |
|
| 114 |
- e := json.NewEncoder(&buf) |
|
| 115 |
- for i := 0; i < 35; i++ {
|
|
| 116 |
- e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
|
|
| 117 |
- } |
|
| 118 |
- w := bytes.NewBuffer(nil) |
|
| 119 |
- if err := WriteLog(&buf, w, givenFormat, time.Time{}); err != nil {
|
|
| 120 |
- t.Fatal(err) |
|
| 121 |
- } |
|
| 122 |
- res := w.String() |
|
| 123 |
- t.Logf("Result of WriteLog: %q", res)
|
|
| 124 |
- lines := strings.Split(strings.TrimSpace(res), "\n") |
|
| 125 |
- if len(lines) != expectedLines {
|
|
| 126 |
- t.Fatalf("Must be %v lines but got %d", expectedLines, len(lines))
|
|
| 127 |
- } |
|
| 128 |
- logRe := regexp.MustCompile(expression) |
|
| 129 |
- for _, l := range lines {
|
|
| 130 |
- if !logRe.MatchString(l) {
|
|
| 131 |
- t.Fatalf("Log line not in expected format [%v]: %q", expression, l)
|
|
| 132 |
- } |
|
| 133 |
- } |
|
| 134 |
- } |
|
| 135 |
-} |
|
| 136 |
- |
|
| 137 |
-func BenchmarkWriteLog(b *testing.B) {
|
|
| 138 |
- var buf bytes.Buffer |
|
| 139 |
- e := json.NewEncoder(&buf) |
|
| 140 |
- testLine := "Line that thinks that it is log line from docker\n" |
|
| 141 |
- for i := 0; i < 30; i++ {
|
|
| 142 |
- e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()})
|
|
| 143 |
- } |
|
| 144 |
- r := bytes.NewReader(buf.Bytes()) |
|
| 145 |
- w := ioutil.Discard |
|
| 146 |
- format := timeutils.RFC3339NanoFixed |
|
| 147 |
- b.SetBytes(int64(r.Len())) |
|
| 148 |
- b.ResetTimer() |
|
| 149 |
- for i := 0; i < b.N; i++ {
|
|
| 150 |
- if err := WriteLog(r, w, format, time.Time{}); err != nil {
|
|
| 151 |
- b.Fatal(err) |
|
| 152 |
- } |
|
| 153 |
- b.StopTimer() |
|
| 154 |
- r.Seek(0, 0) |
|
| 155 |
- b.StartTimer() |
|
| 156 |
- } |
|
| 157 |
-} |
| ... | ... |
@@ -3,6 +3,7 @@ package tailfile |
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 | 5 |
"errors" |
| 6 |
+ "io" |
|
| 6 | 7 |
"os" |
| 7 | 8 |
) |
| 8 | 9 |
|
| ... | ... |
@@ -12,7 +13,7 @@ var eol = []byte("\n")
|
| 12 | 12 |
var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive")
|
| 13 | 13 |
|
| 14 | 14 |
//TailFile returns last n lines of file f |
| 15 |
-func TailFile(f *os.File, n int) ([][]byte, error) {
|
|
| 15 |
+func TailFile(f io.ReadSeeker, n int) ([][]byte, error) {
|
|
| 16 | 16 |
if n <= 0 {
|
| 17 | 17 |
return nil, ErrNonPositiveLinesNumber |
| 18 | 18 |
} |
| 0 | 6 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,15 @@ |
| 0 |
+sudo: false |
|
| 1 |
+language: go |
|
| 2 |
+ |
|
| 3 |
+go: |
|
| 4 |
+ - 1.4.1 |
|
| 5 |
+ |
|
| 6 |
+before_script: |
|
| 7 |
+ - FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi |
|
| 8 |
+ |
|
| 9 |
+os: |
|
| 10 |
+ - linux |
|
| 11 |
+ - osx |
|
| 12 |
+ |
|
| 13 |
+notifications: |
|
| 14 |
+ email: false |
| 0 | 15 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,34 @@ |
| 0 |
+# Names should be added to this file as |
|
| 1 |
+# Name or Organization <email address> |
|
| 2 |
+# The email address is not required for organizations. |
|
| 3 |
+ |
|
| 4 |
+# You can update this list using the following command: |
|
| 5 |
+# |
|
| 6 |
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
|
| 7 |
+ |
|
| 8 |
+# Please keep the list sorted. |
|
| 9 |
+ |
|
| 10 |
+Adrien Bustany <adrien@bustany.org> |
|
| 11 |
+Caleb Spare <cespare@gmail.com> |
|
| 12 |
+Case Nelson <case@teammating.com> |
|
| 13 |
+Chris Howey <howeyc@gmail.com> <chris@howey.me> |
|
| 14 |
+Christoffer Buchholz <christoffer.buchholz@gmail.com> |
|
| 15 |
+Dave Cheney <dave@cheney.net> |
|
| 16 |
+Francisco Souza <f@souza.cc> |
|
| 17 |
+Hari haran <hariharan.uno@gmail.com> |
|
| 18 |
+John C Barstow |
|
| 19 |
+Kelvin Fo <vmirage@gmail.com> |
|
| 20 |
+Matt Layher <mdlayher@gmail.com> |
|
| 21 |
+Nathan Youngman <git@nathany.com> |
|
| 22 |
+Paul Hammond <paul@paulhammond.org> |
|
| 23 |
+Pieter Droogendijk <pieter@binky.org.uk> |
|
| 24 |
+Pursuit92 <JoshChase@techpursuit.net> |
|
| 25 |
+Rob Figueiredo <robfig@gmail.com> |
|
| 26 |
+Soge Zhang <zhssoge@gmail.com> |
|
| 27 |
+Tilak Sharma <tilaks@google.com> |
|
| 28 |
+Travis Cline <travis.cline@gmail.com> |
|
| 29 |
+Tudor Golubenco <tudor.g@gmail.com> |
|
| 30 |
+Yukang <moorekang@gmail.com> |
|
| 31 |
+bronze1man <bronze1man@gmail.com> |
|
| 32 |
+debrando <denis.brandolini@gmail.com> |
|
| 33 |
+henrikedwards <henrik.edwards@gmail.com> |
| 0 | 34 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,263 @@ |
| 0 |
+# Changelog |
|
| 1 |
+ |
|
| 2 |
+## v1.2.0 / 2015-02-08 |
|
| 3 |
+ |
|
| 4 |
+* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD) |
|
| 5 |
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD) |
|
| 6 |
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59) |
|
| 7 |
+ |
|
| 8 |
+## v1.1.1 / 2015-02-05 |
|
| 9 |
+ |
|
| 10 |
+* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD) |
|
| 11 |
+ |
|
| 12 |
+## v1.1.0 / 2014-12-12 |
|
| 13 |
+ |
|
| 14 |
+* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43) |
|
| 15 |
+ * add low-level functions |
|
| 16 |
+ * only need to store flags on directories |
|
| 17 |
+ * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13) |
|
| 18 |
+ * done can be an unbuffered channel |
|
| 19 |
+ * remove calls to os.NewSyscallError |
|
| 20 |
+* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher) |
|
| 21 |
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48) |
|
| 22 |
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) |
|
| 23 |
+ |
|
| 24 |
+## v1.0.4 / 2014-09-07 |
|
| 25 |
+ |
|
| 26 |
+* kqueue: add dragonfly to the build tags. |
|
| 27 |
+* Rename source code files, rearrange code so exported APIs are at the top. |
|
| 28 |
+* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang) |
|
| 29 |
+ |
|
| 30 |
+## v1.0.3 / 2014-08-19 |
|
| 31 |
+ |
|
| 32 |
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36) |
|
| 33 |
+ |
|
| 34 |
+## v1.0.2 / 2014-08-17 |
|
| 35 |
+ |
|
| 36 |
+* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) |
|
| 37 |
+* [Fix] Make ./path and path equivalent. (thanks @zhsso) |
|
| 38 |
+ |
|
| 39 |
+## v1.0.0 / 2014-08-15 |
|
| 40 |
+ |
|
| 41 |
+* [API] Remove AddWatch on Windows, use Add. |
|
| 42 |
+* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30) |
|
| 43 |
+* Minor updates based on feedback from golint. |
|
| 44 |
+ |
|
| 45 |
+## dev / 2014-07-09 |
|
| 46 |
+ |
|
| 47 |
+* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify). |
|
| 48 |
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) |
|
| 49 |
+ |
|
| 50 |
+## dev / 2014-07-04 |
|
| 51 |
+ |
|
| 52 |
+* kqueue: fix incorrect mutex used in Close() |
|
| 53 |
+* Update example to demonstrate usage of Op. |
|
| 54 |
+ |
|
| 55 |
+## dev / 2014-06-28 |
|
| 56 |
+ |
|
| 57 |
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4) |
|
| 58 |
+* Fix for String() method on Event (thanks Alex Brainman) |
|
| 59 |
+* Don't build on Plan 9 or Solaris (thanks @4ad) |
|
| 60 |
+ |
|
| 61 |
+## dev / 2014-06-21 |
|
| 62 |
+ |
|
| 63 |
+* Events channel of type Event rather than *Event. |
|
| 64 |
+* [internal] use syscall constants directly for inotify and kqueue. |
|
| 65 |
+* [internal] kqueue: rename events to kevents and fileEvent to event. |
|
| 66 |
+ |
|
| 67 |
+## dev / 2014-06-19 |
|
| 68 |
+ |
|
| 69 |
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). |
|
| 70 |
+* [internal] remove cookie from Event struct (unused). |
|
| 71 |
+* [internal] Event struct has the same definition across every OS. |
|
| 72 |
+* [internal] remove internal watch and removeWatch methods. |
|
| 73 |
+ |
|
| 74 |
+## dev / 2014-06-12 |
|
| 75 |
+ |
|
| 76 |
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). |
|
| 77 |
+* [API] Pluralized channel names: Events and Errors. |
|
| 78 |
+* [API] Renamed FileEvent struct to Event. |
|
| 79 |
+* [API] Op constants replace methods like IsCreate(). |
|
| 80 |
+ |
|
| 81 |
+## dev / 2014-06-12 |
|
| 82 |
+ |
|
| 83 |
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) |
|
| 84 |
+ |
|
| 85 |
+## dev / 2014-05-23 |
|
| 86 |
+ |
|
| 87 |
+* [API] Remove current implementation of WatchFlags. |
|
| 88 |
+ * current implementation doesn't take advantage of OS for efficiency |
|
| 89 |
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes |
|
| 90 |
+ * no tests for the current implementation |
|
| 91 |
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) |
|
| 92 |
+ |
|
| 93 |
+## v0.9.3 / 2014-12-31 |
|
| 94 |
+ |
|
| 95 |
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) |
|
| 96 |
+ |
|
| 97 |
+## v0.9.2 / 2014-08-17 |
|
| 98 |
+ |
|
| 99 |
+* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) |
|
| 100 |
+ |
|
| 101 |
+## v0.9.1 / 2014-06-12 |
|
| 102 |
+ |
|
| 103 |
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) |
|
| 104 |
+ |
|
| 105 |
+## v0.9.0 / 2014-01-17 |
|
| 106 |
+ |
|
| 107 |
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) |
|
| 108 |
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) |
|
| 109 |
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. |
|
| 110 |
+ |
|
| 111 |
+## v0.8.12 / 2013-11-13 |
|
| 112 |
+ |
|
| 113 |
+* [API] Remove FD_SET and friends from Linux adapter |
|
| 114 |
+ |
|
| 115 |
+## v0.8.11 / 2013-11-02 |
|
| 116 |
+ |
|
| 117 |
+* [Doc] Add Changelog [#72][] (thanks @nathany) |
|
| 118 |
+* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond) |
|
| 119 |
+ |
|
| 120 |
+## v0.8.10 / 2013-10-19 |
|
| 121 |
+ |
|
| 122 |
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) |
|
| 123 |
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) |
|
| 124 |
+* [Doc] specify OS-specific limits in README (thanks @debrando) |
|
| 125 |
+ |
|
| 126 |
+## v0.8.9 / 2013-09-08 |
|
| 127 |
+ |
|
| 128 |
+* [Doc] Contributing (thanks @nathany) |
|
| 129 |
+* [Doc] update package path in example code [#63][] (thanks @paulhammond) |
|
| 130 |
+* [Doc] GoCI badge in README (Linux only) [#60][] |
|
| 131 |
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) |
|
| 132 |
+ |
|
| 133 |
+## v0.8.8 / 2013-06-17 |
|
| 134 |
+ |
|
| 135 |
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) |
|
| 136 |
+ |
|
| 137 |
+## v0.8.7 / 2013-06-03 |
|
| 138 |
+ |
|
| 139 |
+* [API] Make syscall flags internal |
|
| 140 |
+* [Fix] inotify: ignore event changes |
|
| 141 |
+* [Fix] race in symlink test [#45][] (reported by @srid) |
|
| 142 |
+* [Fix] tests on Windows |
|
| 143 |
+* lower case error messages |
|
| 144 |
+ |
|
| 145 |
+## v0.8.6 / 2013-05-23 |
|
| 146 |
+ |
|
| 147 |
+* kqueue: Use EVT_ONLY flag on Darwin |
|
| 148 |
+* [Doc] Update README with full example |
|
| 149 |
+ |
|
| 150 |
+## v0.8.5 / 2013-05-09 |
|
| 151 |
+ |
|
| 152 |
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) |
|
| 153 |
+ |
|
| 154 |
+## v0.8.4 / 2013-04-07 |
|
| 155 |
+ |
|
| 156 |
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) |
|
| 157 |
+ |
|
| 158 |
+## v0.8.3 / 2013-03-13 |
|
| 159 |
+ |
|
| 160 |
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) |
|
| 161 |
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) |
|
| 162 |
+ |
|
| 163 |
+## v0.8.2 / 2013-02-07 |
|
| 164 |
+ |
|
| 165 |
+* [Doc] add Authors |
|
| 166 |
+* [Fix] fix data races for map access [#29][] (thanks @fsouza) |
|
| 167 |
+ |
|
| 168 |
+## v0.8.1 / 2013-01-09 |
|
| 169 |
+ |
|
| 170 |
+* [Fix] Windows path separators |
|
| 171 |
+* [Doc] BSD License |
|
| 172 |
+ |
|
| 173 |
+## v0.8.0 / 2012-11-09 |
|
| 174 |
+ |
|
| 175 |
+* kqueue: directory watching improvements (thanks @vmirage) |
|
| 176 |
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) |
|
| 177 |
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) |
|
| 178 |
+ |
|
| 179 |
+## v0.7.4 / 2012-10-09 |
|
| 180 |
+ |
|
| 181 |
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) |
|
| 182 |
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) |
|
| 183 |
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) |
|
| 184 |
+* [Fix] kqueue: modify after recreation of file |
|
| 185 |
+ |
|
| 186 |
+## v0.7.3 / 2012-09-27 |
|
| 187 |
+ |
|
| 188 |
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) |
|
| 189 |
+* [Fix] kqueue: no longer get duplicate CREATE events |
|
| 190 |
+ |
|
| 191 |
+## v0.7.2 / 2012-09-01 |
|
| 192 |
+ |
|
| 193 |
+* kqueue: events for created directories |
|
| 194 |
+ |
|
| 195 |
+## v0.7.1 / 2012-07-14 |
|
| 196 |
+ |
|
| 197 |
+* [Fix] for renaming files |
|
| 198 |
+ |
|
| 199 |
+## v0.7.0 / 2012-07-02 |
|
| 200 |
+ |
|
| 201 |
+* [Feature] FSNotify flags |
|
| 202 |
+* [Fix] inotify: Added file name back to event path |
|
| 203 |
+ |
|
| 204 |
+## v0.6.0 / 2012-06-06 |
|
| 205 |
+ |
|
| 206 |
+* kqueue: watch files after directory created (thanks @tmc) |
|
| 207 |
+ |
|
| 208 |
+## v0.5.1 / 2012-05-22 |
|
| 209 |
+ |
|
| 210 |
+* [Fix] inotify: remove all watches before Close() |
|
| 211 |
+ |
|
| 212 |
+## v0.5.0 / 2012-05-03 |
|
| 213 |
+ |
|
| 214 |
+* [API] kqueue: return errors during watch instead of sending over channel |
|
| 215 |
+* kqueue: match symlink behavior on Linux |
|
| 216 |
+* inotify: add `DELETE_SELF` (requested by @taralx) |
|
| 217 |
+* [Fix] kqueue: handle EINTR (reported by @robfig) |
|
| 218 |
+* [Doc] Godoc example [#1][] (thanks @davecheney) |
|
| 219 |
+ |
|
| 220 |
+## v0.4.0 / 2012-03-30 |
|
| 221 |
+ |
|
| 222 |
+* Go 1 released: build with go tool |
|
| 223 |
+* [Feature] Windows support using winfsnotify |
|
| 224 |
+* Windows does not have attribute change notifications |
|
| 225 |
+* Roll attribute notifications into IsModify |
|
| 226 |
+ |
|
| 227 |
+## v0.3.0 / 2012-02-19 |
|
| 228 |
+ |
|
| 229 |
+* kqueue: add files when watch directory |
|
| 230 |
+ |
|
| 231 |
+## v0.2.0 / 2011-12-30 |
|
| 232 |
+ |
|
| 233 |
+* update to latest Go weekly code |
|
| 234 |
+ |
|
| 235 |
+## v0.1.0 / 2011-10-19 |
|
| 236 |
+ |
|
| 237 |
+* kqueue: add watch on file creation to match inotify |
|
| 238 |
+* kqueue: create file event |
|
| 239 |
+* inotify: ignore `IN_IGNORED` events |
|
| 240 |
+* event String() |
|
| 241 |
+* linux: common FileEvent functions |
|
| 242 |
+* initial commit |
|
| 243 |
+ |
|
| 244 |
+[#79]: https://github.com/howeyc/fsnotify/pull/79 |
|
| 245 |
+[#77]: https://github.com/howeyc/fsnotify/pull/77 |
|
| 246 |
+[#72]: https://github.com/howeyc/fsnotify/issues/72 |
|
| 247 |
+[#71]: https://github.com/howeyc/fsnotify/issues/71 |
|
| 248 |
+[#70]: https://github.com/howeyc/fsnotify/issues/70 |
|
| 249 |
+[#63]: https://github.com/howeyc/fsnotify/issues/63 |
|
| 250 |
+[#62]: https://github.com/howeyc/fsnotify/issues/62 |
|
| 251 |
+[#60]: https://github.com/howeyc/fsnotify/issues/60 |
|
| 252 |
+[#59]: https://github.com/howeyc/fsnotify/issues/59 |
|
| 253 |
+[#49]: https://github.com/howeyc/fsnotify/issues/49 |
|
| 254 |
+[#45]: https://github.com/howeyc/fsnotify/issues/45 |
|
| 255 |
+[#40]: https://github.com/howeyc/fsnotify/issues/40 |
|
| 256 |
+[#36]: https://github.com/howeyc/fsnotify/issues/36 |
|
| 257 |
+[#33]: https://github.com/howeyc/fsnotify/issues/33 |
|
| 258 |
+[#29]: https://github.com/howeyc/fsnotify/issues/29 |
|
| 259 |
+[#25]: https://github.com/howeyc/fsnotify/issues/25 |
|
| 260 |
+[#24]: https://github.com/howeyc/fsnotify/issues/24 |
|
| 261 |
+[#21]: https://github.com/howeyc/fsnotify/issues/21 |
|
| 262 |
+ |
| 0 | 263 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,77 @@ |
| 0 |
+# Contributing |
|
| 1 |
+ |
|
| 2 |
+## Issues |
|
| 3 |
+ |
|
| 4 |
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues). |
|
| 5 |
+* Please indicate the platform you are using fsnotify on. |
|
| 6 |
+* A code example to reproduce the problem is appreciated. |
|
| 7 |
+ |
|
| 8 |
+## Pull Requests |
|
| 9 |
+ |
|
| 10 |
+### Contributor License Agreement |
|
| 11 |
+ |
|
| 12 |
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). |
|
| 13 |
+ |
|
| 14 |
+Please indicate that you have signed the CLA in your pull request. |
|
| 15 |
+ |
|
| 16 |
+### How fsnotify is Developed |
|
| 17 |
+ |
|
| 18 |
+* Development is done on feature branches. |
|
| 19 |
+* Tests are run on BSD, Linux, OS X and Windows. |
|
| 20 |
+* Pull requests are reviewed and [applied to master][am] using [hub][]. |
|
| 21 |
+ * Maintainers may modify or squash commits rather than asking contributors to. |
|
| 22 |
+* To issue a new release, the maintainers will: |
|
| 23 |
+ * Update the CHANGELOG |
|
| 24 |
+ * Tag a version, which will become available through gopkg.in. |
|
| 25 |
+ |
|
| 26 |
+### How to Fork |
|
| 27 |
+ |
|
| 28 |
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy. |
|
| 29 |
+ |
|
| 30 |
+1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`) |
|
| 31 |
+2. Create your feature branch (`git checkout -b my-new-feature`) |
|
| 32 |
+3. Ensure everything works and the tests pass (see below) |
|
| 33 |
+4. Commit your changes (`git commit -am 'Add some feature'`) |
|
| 34 |
+ |
|
| 35 |
+Contribute upstream: |
|
| 36 |
+ |
|
| 37 |
+1. Fork fsnotify on GitHub |
|
| 38 |
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) |
|
| 39 |
+3. Push to the branch (`git push fork my-new-feature`) |
|
| 40 |
+4. Create a new Pull Request on GitHub |
|
| 41 |
+ |
|
| 42 |
+This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/). |
|
| 43 |
+ |
|
| 44 |
+### Testing |
|
| 45 |
+ |
|
| 46 |
+fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows. |
|
| 47 |
+ |
|
| 48 |
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. |
|
| 49 |
+ |
|
| 50 |
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. |
|
| 51 |
+ |
|
| 52 |
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) |
|
| 53 |
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. |
|
| 54 |
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) |
|
| 55 |
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`. |
|
| 56 |
+* When you're done, you will want to halt or destroy the Vagrant boxes. |
|
| 57 |
+ |
|
| 58 |
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. |
|
| 59 |
+ |
|
| 60 |
+Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). |
|
| 61 |
+ |
|
| 62 |
+### Maintainers |
|
| 63 |
+ |
|
| 64 |
+Help maintaining fsnotify is welcome. To be a maintainer: |
|
| 65 |
+ |
|
| 66 |
+* Submit a pull request and sign the CLA as above. |
|
| 67 |
+* You must be able to run the test suite on Mac, Windows, Linux and BSD. |
|
| 68 |
+ |
|
| 69 |
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. |
|
| 70 |
+ |
|
| 71 |
+All code changes should be internal pull requests. |
|
| 72 |
+ |
|
| 73 |
+Releases are tagged using [Semantic Versioning](http://semver.org/). |
|
| 74 |
+ |
|
| 75 |
+[hub]: https://github.com/github/hub |
|
| 76 |
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs |
| 0 | 77 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,28 @@ |
| 0 |
+Copyright (c) 2012 The Go Authors. All rights reserved. |
|
| 1 |
+Copyright (c) 2012 fsnotify Authors. All rights reserved. |
|
| 2 |
+ |
|
| 3 |
+Redistribution and use in source and binary forms, with or without |
|
| 4 |
+modification, are permitted provided that the following conditions are |
|
| 5 |
+met: |
|
| 6 |
+ |
|
| 7 |
+ * Redistributions of source code must retain the above copyright |
|
| 8 |
+notice, this list of conditions and the following disclaimer. |
|
| 9 |
+ * Redistributions in binary form must reproduce the above |
|
| 10 |
+copyright notice, this list of conditions and the following disclaimer |
|
| 11 |
+in the documentation and/or other materials provided with the |
|
| 12 |
+distribution. |
|
| 13 |
+ * Neither the name of Google Inc. nor the names of its |
|
| 14 |
+contributors may be used to endorse or promote products derived from |
|
| 15 |
+this software without specific prior written permission. |
|
| 16 |
+ |
|
| 17 |
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|
| 18 |
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|
| 19 |
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|
| 20 |
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|
| 21 |
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
| 22 |
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
| 23 |
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|
| 24 |
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|
| 25 |
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
| 26 |
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
| 27 |
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 1 | 29 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,59 @@ |
| 0 |
+# File system notifications for Go |
|
| 1 |
+ |
|
| 2 |
+[](http://gocover.io/github.com/go-fsnotify/fsnotify) [](https://godoc.org/gopkg.in/fsnotify.v1) |
|
| 3 |
+ |
|
| 4 |
+Go 1.3+ required. |
|
| 5 |
+ |
|
| 6 |
+Cross platform: Windows, Linux, BSD and OS X. |
|
| 7 |
+ |
|
| 8 |
+|Adapter |OS |Status | |
|
| 9 |
+|----------|----------|----------| |
|
| 10 |
+|inotify |Linux, Android\*|Supported [](https://travis-ci.org/go-fsnotify/fsnotify)| |
|
| 11 |
+|kqueue |BSD, OS X, iOS\*|Supported [](https://circleci.com/gh/go-fsnotify/fsnotify)| |
|
| 12 |
+|ReadDirectoryChangesW|Windows|Supported [](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| |
|
| 13 |
+|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)| |
|
| 14 |
+|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)| |
|
| 15 |
+|fanotify |Linux 2.6.37+ | | |
|
| 16 |
+|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)| |
|
| 17 |
+|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)| |
|
| 18 |
+ |
|
| 19 |
+\* Android and iOS are untested. |
|
| 20 |
+ |
|
| 21 |
+Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information. |
|
| 22 |
+ |
|
| 23 |
+## API stability |
|
| 24 |
+ |
|
| 25 |
+Two major versions of fsnotify exist. |
|
| 26 |
+ |
|
| 27 |
+**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1. |
|
| 28 |
+ |
|
| 29 |
+```go |
|
| 30 |
+import "gopkg.in/fsnotify.v0" |
|
| 31 |
+``` |
|
| 32 |
+ |
|
| 33 |
+\* Refer to the package as fsnotify (without the .v0 suffix). |
|
| 34 |
+ |
|
| 35 |
+**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with: |
|
| 36 |
+ |
|
| 37 |
+```go |
|
| 38 |
+import "gopkg.in/fsnotify.v1" |
|
| 39 |
+``` |
|
| 40 |
+ |
|
| 41 |
+Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API. |
|
| 42 |
+ |
|
| 43 |
+**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible: |
|
| 44 |
+ |
|
| 45 |
+```go |
|
| 46 |
+import "github.com/go-fsnotify/fsnotify" |
|
| 47 |
+``` |
|
| 48 |
+ |
|
| 49 |
+## Contributing |
|
| 50 |
+ |
|
| 51 |
+Please refer to [CONTRIBUTING][] before opening an issue or pull request. |
|
| 52 |
+ |
|
| 53 |
+## Example |
|
| 54 |
+ |
|
| 55 |
+See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go). |
|
| 56 |
+ |
|
| 57 |
+ |
|
| 58 |
+[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md |
| 0 | 59 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,26 @@ |
| 0 |
+## OS X build (CircleCI iOS beta) |
|
| 1 |
+ |
|
| 2 |
+# Pretend like it's an Xcode project, at least to get it running. |
|
| 3 |
+machine: |
|
| 4 |
+ environment: |
|
| 5 |
+ XCODE_WORKSPACE: NotUsed.xcworkspace |
|
| 6 |
+ XCODE_SCHEME: NotUsed |
|
| 7 |
+ # This is where the go project is actually checked out to: |
|
| 8 |
+ CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify |
|
| 9 |
+ |
|
| 10 |
+dependencies: |
|
| 11 |
+ pre: |
|
| 12 |
+ - brew upgrade go |
|
| 13 |
+ |
|
| 14 |
+test: |
|
| 15 |
+ override: |
|
| 16 |
+ - go test ./... |
|
| 17 |
+ |
|
| 18 |
+# Idealized future config, eventually with cross-platform build matrix :-) |
|
| 19 |
+ |
|
| 20 |
+# machine: |
|
| 21 |
+# go: |
|
| 22 |
+# version: 1.4 |
|
| 23 |
+# os: |
|
| 24 |
+# - osx |
|
| 25 |
+# - linux |
| 0 | 26 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,62 @@ |
| 0 |
+// Copyright 2012 The Go Authors. All rights reserved. |
|
| 1 |
+// Use of this source code is governed by a BSD-style |
|
| 2 |
+// license that can be found in the LICENSE file. |
|
| 3 |
+ |
|
| 4 |
+// +build !plan9,!solaris |
|
| 5 |
+ |
|
| 6 |
+// Package fsnotify provides a platform-independent interface for file system notifications. |
|
| 7 |
+package fsnotify |
|
| 8 |
+ |
|
| 9 |
+import ( |
|
| 10 |
+ "bytes" |
|
| 11 |
+ "fmt" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+// Event represents a single file system notification. |
|
| 15 |
+type Event struct {
|
|
| 16 |
+ Name string // Relative path to the file or directory. |
|
| 17 |
+ Op Op // File operation that triggered the event. |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+// Op describes a set of file operations. |
|
| 21 |
+type Op uint32 |
|
| 22 |
+ |
|
| 23 |
+// These are the generalized file operations that can trigger a notification. |
|
| 24 |
+const ( |
|
| 25 |
+ Create Op = 1 << iota |
|
| 26 |
+ Write |
|
| 27 |
+ Remove |
|
| 28 |
+ Rename |
|
| 29 |
+ Chmod |
|
| 30 |
+) |
|
| 31 |
+ |
|
| 32 |
+// String returns a string representation of the event in the form |
|
| 33 |
+// "file: REMOVE|WRITE|..." |
|
| 34 |
+func (e Event) String() string {
|
|
| 35 |
+ // Use a buffer for efficient string concatenation |
|
| 36 |
+ var buffer bytes.Buffer |
|
| 37 |
+ |
|
| 38 |
+ if e.Op&Create == Create {
|
|
| 39 |
+ buffer.WriteString("|CREATE")
|
|
| 40 |
+ } |
|
| 41 |
+ if e.Op&Remove == Remove {
|
|
| 42 |
+ buffer.WriteString("|REMOVE")
|
|
| 43 |
+ } |
|
| 44 |
+ if e.Op&Write == Write {
|
|
| 45 |
+ buffer.WriteString("|WRITE")
|
|
| 46 |
+ } |
|
| 47 |
+ if e.Op&Rename == Rename {
|
|
| 48 |
+ buffer.WriteString("|RENAME")
|
|
| 49 |
+ } |
|
| 50 |
+ if e.Op&Chmod == Chmod {
|
|
| 51 |
+ buffer.WriteString("|CHMOD")
|
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ // If buffer remains empty, return no event names |
|
| 55 |
+ if buffer.Len() == 0 {
|
|
| 56 |
+ return fmt.Sprintf("%q: ", e.Name)
|
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ // Return a list of event names, with leading pipe character stripped |
|
| 60 |
+ return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
|
|
| 61 |
+} |
| 0 | 62 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,306 @@ |
| 0 |
+// Copyright 2010 The Go Authors. All rights reserved. |
|
| 1 |
+// Use of this source code is governed by a BSD-style |
|
| 2 |
+// license that can be found in the LICENSE file. |
|
| 3 |
+ |
|
| 4 |
+// +build linux |
|
| 5 |
+ |
|
| 6 |
+package fsnotify |
|
| 7 |
+ |
|
| 8 |
+import ( |
|
| 9 |
+ "errors" |
|
| 10 |
+ "fmt" |
|
| 11 |
+ "io" |
|
| 12 |
+ "os" |
|
| 13 |
+ "path/filepath" |
|
| 14 |
+ "strings" |
|
| 15 |
+ "sync" |
|
| 16 |
+ "syscall" |
|
| 17 |
+ "unsafe" |
|
| 18 |
+) |
|
| 19 |
+ |
|
| 20 |
+// Watcher watches a set of files, delivering events to a channel. |
|
| 21 |
+type Watcher struct {
|
|
| 22 |
+ Events chan Event |
|
| 23 |
+ Errors chan error |
|
| 24 |
+ mu sync.Mutex // Map access |
|
| 25 |
+ fd int |
|
| 26 |
+ poller *fdPoller |
|
| 27 |
+ watches map[string]*watch // Map of inotify watches (key: path) |
|
| 28 |
+ paths map[int]string // Map of watched paths (key: watch descriptor) |
|
| 29 |
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
|
| 30 |
+ doneResp chan struct{} // Channel to respond to Close
|
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. |
|
| 34 |
+func NewWatcher() (*Watcher, error) {
|
|
| 35 |
+ // Create inotify fd |
|
| 36 |
+ fd, errno := syscall.InotifyInit() |
|
| 37 |
+ if fd == -1 {
|
|
| 38 |
+ return nil, errno |
|
| 39 |
+ } |
|
| 40 |
+ // Create epoll |
|
| 41 |
+ poller, err := newFdPoller(fd) |
|
| 42 |
+ if err != nil {
|
|
| 43 |
+ syscall.Close(fd) |
|
| 44 |
+ return nil, err |
|
| 45 |
+ } |
|
| 46 |
+ w := &Watcher{
|
|
| 47 |
+ fd: fd, |
|
| 48 |
+ poller: poller, |
|
| 49 |
+ watches: make(map[string]*watch), |
|
| 50 |
+ paths: make(map[int]string), |
|
| 51 |
+ Events: make(chan Event), |
|
| 52 |
+ Errors: make(chan error), |
|
| 53 |
+ done: make(chan struct{}),
|
|
| 54 |
+ doneResp: make(chan struct{}),
|
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 57 |
+ go w.readEvents() |
|
| 58 |
+ return w, nil |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+func (w *Watcher) isClosed() bool {
|
|
| 62 |
+ select {
|
|
| 63 |
+ case <-w.done: |
|
| 64 |
+ return true |
|
| 65 |
+ default: |
|
| 66 |
+ return false |
|
| 67 |
+ } |
|
| 68 |
+} |
|
| 69 |
+ |
|
| 70 |
+// Close removes all watches and closes the events channel. |
|
| 71 |
+func (w *Watcher) Close() error {
|
|
| 72 |
+ if w.isClosed() {
|
|
| 73 |
+ return nil |
|
| 74 |
+ } |
|
| 75 |
+ |
|
| 76 |
+ // Send 'close' signal to goroutine, and set the Watcher to closed. |
|
| 77 |
+ close(w.done) |
|
| 78 |
+ |
|
| 79 |
+ // Wake up goroutine |
|
| 80 |
+ w.poller.wake() |
|
| 81 |
+ |
|
| 82 |
+ // Wait for goroutine to close |
|
| 83 |
+ <-w.doneResp |
|
| 84 |
+ |
|
| 85 |
+ return nil |
|
| 86 |
+} |
|
| 87 |
+ |
|
| 88 |
+// Add starts watching the named file or directory (non-recursively). |
|
| 89 |
+func (w *Watcher) Add(name string) error {
|
|
| 90 |
+ name = filepath.Clean(name) |
|
| 91 |
+ if w.isClosed() {
|
|
| 92 |
+ return errors.New("inotify instance already closed")
|
|
| 93 |
+ } |
|
| 94 |
+ |
|
| 95 |
+ const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM | |
|
| 96 |
+ syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY | |
|
| 97 |
+ syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF |
|
| 98 |
+ |
|
| 99 |
+ var flags uint32 = agnosticEvents |
|
| 100 |
+ |
|
| 101 |
+ w.mu.Lock() |
|
| 102 |
+ watchEntry, found := w.watches[name] |
|
| 103 |
+ w.mu.Unlock() |
|
| 104 |
+ if found {
|
|
| 105 |
+ watchEntry.flags |= flags |
|
| 106 |
+ flags |= syscall.IN_MASK_ADD |
|
| 107 |
+ } |
|
| 108 |
+ wd, errno := syscall.InotifyAddWatch(w.fd, name, flags) |
|
| 109 |
+ if wd == -1 {
|
|
| 110 |
+ return errno |
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 113 |
+ w.mu.Lock() |
|
| 114 |
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
|
| 115 |
+ w.paths[wd] = name |
|
| 116 |
+ w.mu.Unlock() |
|
| 117 |
+ |
|
| 118 |
+ return nil |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+// Remove stops watching the named file or directory (non-recursively). |
|
| 122 |
+func (w *Watcher) Remove(name string) error {
|
|
| 123 |
+ name = filepath.Clean(name) |
|
| 124 |
+ |
|
| 125 |
+ // Fetch the watch. |
|
| 126 |
+ w.mu.Lock() |
|
| 127 |
+ defer w.mu.Unlock() |
|
| 128 |
+ watch, ok := w.watches[name] |
|
| 129 |
+ |
|
| 130 |
+ // Remove it from inotify. |
|
| 131 |
+ if !ok {
|
|
| 132 |
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
|
| 133 |
+ } |
|
| 134 |
+ // inotify_rm_watch will return EINVAL if the file has been deleted; |
|
| 135 |
+ // the inotify will already have been removed. |
|
| 136 |
+ // That means we can safely delete it from our watches, whatever inotify_rm_watch does. |
|
| 137 |
+ delete(w.watches, name) |
|
| 138 |
+ success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) |
|
| 139 |
+ if success == -1 {
|
|
| 140 |
+ // TODO: Perhaps it's not helpful to return an error here in every case. |
|
| 141 |
+ // the only two possible errors are: |
|
| 142 |
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind. |
|
| 143 |
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. |
|
| 144 |
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly; |
|
| 145 |
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. |
|
| 146 |
+ return errno |
|
| 147 |
+ } |
|
| 148 |
+ return nil |
|
| 149 |
+} |
|
| 150 |
+ |
|
| 151 |
+type watch struct {
|
|
| 152 |
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) |
|
| 153 |
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) |
|
| 154 |
+} |
|
| 155 |
+ |
|
| 156 |
+// readEvents reads from the inotify file descriptor, converts the |
|
| 157 |
+// received events into Event objects and sends them via the Events channel |
|
| 158 |
+func (w *Watcher) readEvents() {
|
|
| 159 |
+ var ( |
|
| 160 |
+ buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events |
|
| 161 |
+ n int // Number of bytes read with read() |
|
| 162 |
+ errno error // Syscall errno |
|
| 163 |
+ ok bool // For poller.wait |
|
| 164 |
+ ) |
|
| 165 |
+ |
|
| 166 |
+ defer close(w.doneResp) |
|
| 167 |
+ defer close(w.Errors) |
|
| 168 |
+ defer close(w.Events) |
|
| 169 |
+ defer syscall.Close(w.fd) |
|
| 170 |
+ defer w.poller.close() |
|
| 171 |
+ |
|
| 172 |
+ for {
|
|
| 173 |
+ // See if we have been closed. |
|
| 174 |
+ if w.isClosed() {
|
|
| 175 |
+ return |
|
| 176 |
+ } |
|
| 177 |
+ |
|
| 178 |
+ ok, errno = w.poller.wait() |
|
| 179 |
+ if errno != nil {
|
|
| 180 |
+ select {
|
|
| 181 |
+ case w.Errors <- errno: |
|
| 182 |
+ case <-w.done: |
|
| 183 |
+ return |
|
| 184 |
+ } |
|
| 185 |
+ continue |
|
| 186 |
+ } |
|
| 187 |
+ |
|
| 188 |
+ if !ok {
|
|
| 189 |
+ continue |
|
| 190 |
+ } |
|
| 191 |
+ |
|
| 192 |
+ n, errno = syscall.Read(w.fd, buf[:]) |
|
| 193 |
+ // If a signal interrupted execution, see if we've been asked to close, and try again. |
|
| 194 |
+ // http://man7.org/linux/man-pages/man7/signal.7.html : |
|
| 195 |
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" |
|
| 196 |
+ if errno == syscall.EINTR {
|
|
| 197 |
+ continue |
|
| 198 |
+ } |
|
| 199 |
+ |
|
| 200 |
+ // syscall.Read might have been woken up by Close. If so, we're done. |
|
| 201 |
+ if w.isClosed() {
|
|
| 202 |
+ return |
|
| 203 |
+ } |
|
| 204 |
+ |
|
| 205 |
+ if n < syscall.SizeofInotifyEvent {
|
|
| 206 |
+ var err error |
|
| 207 |
+ if n == 0 {
|
|
| 208 |
+ // If EOF is received. This should really never happen. |
|
| 209 |
+ err = io.EOF |
|
| 210 |
+ } else if n < 0 {
|
|
| 211 |
+ // If an error occured while reading. |
|
| 212 |
+ err = errno |
|
| 213 |
+ } else {
|
|
| 214 |
+ // Read was too short. |
|
| 215 |
+ err = errors.New("notify: short read in readEvents()")
|
|
| 216 |
+ } |
|
| 217 |
+ select {
|
|
| 218 |
+ case w.Errors <- err: |
|
| 219 |
+ case <-w.done: |
|
| 220 |
+ return |
|
| 221 |
+ } |
|
| 222 |
+ continue |
|
| 223 |
+ } |
|
| 224 |
+ |
|
| 225 |
+ var offset uint32 |
|
| 226 |
+ // We don't know how many events we just read into the buffer |
|
| 227 |
+ // While the offset points to at least one whole event... |
|
| 228 |
+ for offset <= uint32(n-syscall.SizeofInotifyEvent) {
|
|
| 229 |
+ // Point "raw" to the event in the buffer |
|
| 230 |
+ raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) |
|
| 231 |
+ |
|
| 232 |
+ mask := uint32(raw.Mask) |
|
| 233 |
+ nameLen := uint32(raw.Len) |
|
| 234 |
+ // If the event happened to the watched directory or the watched file, the kernel |
|
| 235 |
+ // doesn't append the filename to the event, but we would like to always fill the |
|
| 236 |
+ // the "Name" field with a valid filename. We retrieve the path of the watch from |
|
| 237 |
+ // the "paths" map. |
|
| 238 |
+ w.mu.Lock() |
|
| 239 |
+ name := w.paths[int(raw.Wd)] |
|
| 240 |
+ w.mu.Unlock() |
|
| 241 |
+ if nameLen > 0 {
|
|
| 242 |
+ // Point "bytes" at the first byte of the filename |
|
| 243 |
+ bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) |
|
| 244 |
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those. |
|
| 245 |
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") |
|
| 246 |
+ } |
|
| 247 |
+ |
|
| 248 |
+ event := newEvent(name, mask) |
|
| 249 |
+ |
|
| 250 |
+ // Send the events that are not ignored on the events channel |
|
| 251 |
+ if !event.ignoreLinux(mask) {
|
|
| 252 |
+ select {
|
|
| 253 |
+ case w.Events <- event: |
|
| 254 |
+ case <-w.done: |
|
| 255 |
+ return |
|
| 256 |
+ } |
|
| 257 |
+ } |
|
| 258 |
+ |
|
| 259 |
+ // Move to the next event in the buffer |
|
| 260 |
+ offset += syscall.SizeofInotifyEvent + nameLen |
|
| 261 |
+ } |
|
| 262 |
+ } |
|
| 263 |
+} |
|
| 264 |
+ |
|
| 265 |
+// Certain types of events can be "ignored" and not sent over the Events |
|
| 266 |
+// channel. Such as events marked ignore by the kernel, or MODIFY events |
|
| 267 |
+// against files that do not exist. |
|
| 268 |
+func (e *Event) ignoreLinux(mask uint32) bool {
|
|
| 269 |
+ // Ignore anything the inotify API says to ignore |
|
| 270 |
+ if mask&syscall.IN_IGNORED == syscall.IN_IGNORED {
|
|
| 271 |
+ return true |
|
| 272 |
+ } |
|
| 273 |
+ |
|
| 274 |
+ // If the event is not a DELETE or RENAME, the file must exist. |
|
| 275 |
+ // Otherwise the event is ignored. |
|
| 276 |
+ // *Note*: this was put in place because it was seen that a MODIFY |
|
| 277 |
+ // event was sent after the DELETE. This ignores that MODIFY and |
|
| 278 |
+ // assumes a DELETE will come or has come if the file doesn't exist. |
|
| 279 |
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
|
| 280 |
+ _, statErr := os.Lstat(e.Name) |
|
| 281 |
+ return os.IsNotExist(statErr) |
|
| 282 |
+ } |
|
| 283 |
+ return false |
|
| 284 |
+} |
|
| 285 |
+ |
|
| 286 |
+// newEvent returns an platform-independent Event based on an inotify mask. |
|
| 287 |
+func newEvent(name string, mask uint32) Event {
|
|
| 288 |
+ e := Event{Name: name}
|
|
| 289 |
+ if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {
|
|
| 290 |
+ e.Op |= Create |
|
| 291 |
+ } |
|
| 292 |
+ if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE {
|
|
| 293 |
+ e.Op |= Remove |
|
| 294 |
+ } |
|
| 295 |
+ if mask&syscall.IN_MODIFY == syscall.IN_MODIFY {
|
|
| 296 |
+ e.Op |= Write |
|
| 297 |
+ } |
|
| 298 |
+ if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM {
|
|
| 299 |
+ e.Op |= Rename |
|
| 300 |
+ } |
|
| 301 |
+ if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB {
|
|
| 302 |
+ e.Op |= Chmod |
|
| 303 |
+ } |
|
| 304 |
+ return e |
|
| 305 |
+} |
| 0 | 306 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,186 @@ |
| 0 |
+// Copyright 2015 The Go Authors. All rights reserved. |
|
| 1 |
+// Use of this source code is governed by a BSD-style |
|
| 2 |
+// license that can be found in the LICENSE file. |
|
| 3 |
+ |
|
| 4 |
+// +build linux |
|
| 5 |
+ |
|
| 6 |
+package fsnotify |
|
| 7 |
+ |
|
| 8 |
+import ( |
|
| 9 |
+ "errors" |
|
| 10 |
+ "syscall" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+type fdPoller struct {
|
|
| 14 |
+ fd int // File descriptor (as returned by the inotify_init() syscall) |
|
| 15 |
+ epfd int // Epoll file descriptor |
|
| 16 |
+ pipe [2]int // Pipe for waking up |
|
| 17 |
+} |
|
| 18 |
+ |
|
| 19 |
+func emptyPoller(fd int) *fdPoller {
|
|
| 20 |
+ poller := new(fdPoller) |
|
| 21 |
+ poller.fd = fd |
|
| 22 |
+ poller.epfd = -1 |
|
| 23 |
+ poller.pipe[0] = -1 |
|
| 24 |
+ poller.pipe[1] = -1 |
|
| 25 |
+ return poller |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// Create a new inotify poller. |
|
| 29 |
+// This creates an inotify handler, and an epoll handler. |
|
| 30 |
+func newFdPoller(fd int) (*fdPoller, error) {
|
|
| 31 |
+ var errno error |
|
| 32 |
+ poller := emptyPoller(fd) |
|
| 33 |
+ defer func() {
|
|
| 34 |
+ if errno != nil {
|
|
| 35 |
+ poller.close() |
|
| 36 |
+ } |
|
| 37 |
+ }() |
|
| 38 |
+ poller.fd = fd |
|
| 39 |
+ |
|
| 40 |
+ // Create epoll fd |
|
| 41 |
+ poller.epfd, errno = syscall.EpollCreate(1) |
|
| 42 |
+ if poller.epfd == -1 {
|
|
| 43 |
+ return nil, errno |
|
| 44 |
+ } |
|
| 45 |
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end. |
|
| 46 |
+ errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK) |
|
| 47 |
+ if errno != nil {
|
|
| 48 |
+ return nil, errno |
|
| 49 |
+ } |
|
| 50 |
+ |
|
| 51 |
+ // Register inotify fd with epoll |
|
| 52 |
+ event := syscall.EpollEvent{
|
|
| 53 |
+ Fd: int32(poller.fd), |
|
| 54 |
+ Events: syscall.EPOLLIN, |
|
| 55 |
+ } |
|
| 56 |
+ errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event) |
|
| 57 |
+ if errno != nil {
|
|
| 58 |
+ return nil, errno |
|
| 59 |
+ } |
|
| 60 |
+ |
|
| 61 |
+ // Register pipe fd with epoll |
|
| 62 |
+ event = syscall.EpollEvent{
|
|
| 63 |
+ Fd: int32(poller.pipe[0]), |
|
| 64 |
+ Events: syscall.EPOLLIN, |
|
| 65 |
+ } |
|
| 66 |
+ errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event) |
|
| 67 |
+ if errno != nil {
|
|
| 68 |
+ return nil, errno |
|
| 69 |
+ } |
|
| 70 |
+ |
|
| 71 |
+ return poller, nil |
|
| 72 |
+} |
|
| 73 |
+ |
|
| 74 |
+// Wait using epoll. |
|
| 75 |
+// Returns true if something is ready to be read, |
|
| 76 |
+// false if there is not. |
|
| 77 |
+func (poller *fdPoller) wait() (bool, error) {
|
|
| 78 |
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. |
|
| 79 |
+ // I don't know whether epoll_wait returns the number of events returned, |
|
| 80 |
+ // or the total number of events ready. |
|
| 81 |
+ // I decided to catch both by making the buffer one larger than the maximum. |
|
| 82 |
+ events := make([]syscall.EpollEvent, 7) |
|
| 83 |
+ for {
|
|
| 84 |
+ n, errno := syscall.EpollWait(poller.epfd, events, -1) |
|
| 85 |
+ if n == -1 {
|
|
| 86 |
+ if errno == syscall.EINTR {
|
|
| 87 |
+ continue |
|
| 88 |
+ } |
|
| 89 |
+ return false, errno |
|
| 90 |
+ } |
|
| 91 |
+ if n == 0 {
|
|
| 92 |
+ // If there are no events, try again. |
|
| 93 |
+ continue |
|
| 94 |
+ } |
|
| 95 |
+ if n > 6 {
|
|
| 96 |
+ // This should never happen. More events were returned than should be possible. |
|
| 97 |
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
|
|
| 98 |
+ } |
|
| 99 |
+ ready := events[:n] |
|
| 100 |
+ epollhup := false |
|
| 101 |
+ epollerr := false |
|
| 102 |
+ epollin := false |
|
| 103 |
+ for _, event := range ready {
|
|
| 104 |
+ if event.Fd == int32(poller.fd) {
|
|
| 105 |
+ if event.Events&syscall.EPOLLHUP != 0 {
|
|
| 106 |
+ // This should not happen, but if it does, treat it as a wakeup. |
|
| 107 |
+ epollhup = true |
|
| 108 |
+ } |
|
| 109 |
+ if event.Events&syscall.EPOLLERR != 0 {
|
|
| 110 |
+ // If an error is waiting on the file descriptor, we should pretend |
|
| 111 |
+ // something is ready to read, and let syscall.Read pick up the error. |
|
| 112 |
+ epollerr = true |
|
| 113 |
+ } |
|
| 114 |
+ if event.Events&syscall.EPOLLIN != 0 {
|
|
| 115 |
+ // There is data to read. |
|
| 116 |
+ epollin = true |
|
| 117 |
+ } |
|
| 118 |
+ } |
|
| 119 |
+ if event.Fd == int32(poller.pipe[0]) {
|
|
| 120 |
+ if event.Events&syscall.EPOLLHUP != 0 {
|
|
| 121 |
+ // Write pipe descriptor was closed, by us. This means we're closing down the |
|
| 122 |
+ // watcher, and we should wake up. |
|
| 123 |
+ } |
|
| 124 |
+ if event.Events&syscall.EPOLLERR != 0 {
|
|
| 125 |
+ // If an error is waiting on the pipe file descriptor. |
|
| 126 |
+ // This is an absolute mystery, and should never ever happen. |
|
| 127 |
+ return false, errors.New("Error on the pipe descriptor.")
|
|
| 128 |
+ } |
|
| 129 |
+ if event.Events&syscall.EPOLLIN != 0 {
|
|
| 130 |
+ // This is a regular wakeup, so we have to clear the buffer. |
|
| 131 |
+ err := poller.clearWake() |
|
| 132 |
+ if err != nil {
|
|
| 133 |
+ return false, err |
|
| 134 |
+ } |
|
| 135 |
+ } |
|
| 136 |
+ } |
|
| 137 |
+ } |
|
| 138 |
+ |
|
| 139 |
+ if epollhup || epollerr || epollin {
|
|
| 140 |
+ return true, nil |
|
| 141 |
+ } |
|
| 142 |
+ return false, nil |
|
| 143 |
+ } |
|
| 144 |
+} |
|
| 145 |
+ |
|
| 146 |
+// Close the write end of the poller. |
|
| 147 |
+func (poller *fdPoller) wake() error {
|
|
| 148 |
+ buf := make([]byte, 1) |
|
| 149 |
+ n, errno := syscall.Write(poller.pipe[1], buf) |
|
| 150 |
+ if n == -1 {
|
|
| 151 |
+ if errno == syscall.EAGAIN {
|
|
| 152 |
+ // Buffer is full, poller will wake. |
|
| 153 |
+ return nil |
|
| 154 |
+ } |
|
| 155 |
+ return errno |
|
| 156 |
+ } |
|
| 157 |
+ return nil |
|
| 158 |
+} |
|
| 159 |
+ |
|
| 160 |
+func (poller *fdPoller) clearWake() error {
|
|
| 161 |
+ // You have to be woken up a LOT in order to get to 100! |
|
| 162 |
+ buf := make([]byte, 100) |
|
| 163 |
+ n, errno := syscall.Read(poller.pipe[0], buf) |
|
| 164 |
+ if n == -1 {
|
|
| 165 |
+ if errno == syscall.EAGAIN {
|
|
| 166 |
+ // Buffer is empty, someone else cleared our wake. |
|
| 167 |
+ return nil |
|
| 168 |
+ } |
|
| 169 |
+ return errno |
|
| 170 |
+ } |
|
| 171 |
+ return nil |
|
| 172 |
+} |
|
| 173 |
+ |
|
| 174 |
+// Close all poller file descriptors, but not the one passed to it. |
|
| 175 |
+func (poller *fdPoller) close() {
|
|
| 176 |
+ if poller.pipe[1] != -1 {
|
|
| 177 |
+ syscall.Close(poller.pipe[1]) |
|
| 178 |
+ } |
|
| 179 |
+ if poller.pipe[0] != -1 {
|
|
| 180 |
+ syscall.Close(poller.pipe[0]) |
|
| 181 |
+ } |
|
| 182 |
+ if poller.epfd != -1 {
|
|
| 183 |
+ syscall.Close(poller.epfd) |
|
| 184 |
+ } |
|
| 185 |
+} |
| 0 | 186 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,463 @@ |
| 0 |
+// Copyright 2010 The Go Authors. All rights reserved. |
|
| 1 |
+// Use of this source code is governed by a BSD-style |
|
| 2 |
+// license that can be found in the LICENSE file. |
|
| 3 |
+ |
|
| 4 |
+// +build freebsd openbsd netbsd dragonfly darwin |
|
| 5 |
+ |
|
| 6 |
+package fsnotify |
|
| 7 |
+ |
|
| 8 |
+import ( |
|
| 9 |
+ "errors" |
|
| 10 |
+ "fmt" |
|
| 11 |
+ "io/ioutil" |
|
| 12 |
+ "os" |
|
| 13 |
+ "path/filepath" |
|
| 14 |
+ "sync" |
|
| 15 |
+ "syscall" |
|
| 16 |
+ "time" |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+// Watcher watches a set of files, delivering events to a channel. |
|
| 20 |
+type Watcher struct {
|
|
| 21 |
+ Events chan Event |
|
| 22 |
+ Errors chan error |
|
| 23 |
+ done chan bool // Channel for sending a "quit message" to the reader goroutine |
|
| 24 |
+ |
|
| 25 |
+ kq int // File descriptor (as returned by the kqueue() syscall). |
|
| 26 |
+ |
|
| 27 |
+ mu sync.Mutex // Protects access to watcher data |
|
| 28 |
+ watches map[string]int // Map of watched file descriptors (key: path). |
|
| 29 |
+ externalWatches map[string]bool // Map of watches added by user of the library. |
|
| 30 |
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. |
|
| 31 |
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. |
|
| 32 |
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). |
|
| 33 |
+ isClosed bool // Set to true when Close() is first called |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+type pathInfo struct {
|
|
| 37 |
+ name string |
|
| 38 |
+ isDir bool |
|
| 39 |
+} |
|
| 40 |
+ |
|
| 41 |
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. |
|
| 42 |
+func NewWatcher() (*Watcher, error) {
|
|
| 43 |
+ kq, err := kqueue() |
|
| 44 |
+ if err != nil {
|
|
| 45 |
+ return nil, err |
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ w := &Watcher{
|
|
| 49 |
+ kq: kq, |
|
| 50 |
+ watches: make(map[string]int), |
|
| 51 |
+ dirFlags: make(map[string]uint32), |
|
| 52 |
+ paths: make(map[int]pathInfo), |
|
| 53 |
+ fileExists: make(map[string]bool), |
|
| 54 |
+ externalWatches: make(map[string]bool), |
|
| 55 |
+ Events: make(chan Event), |
|
| 56 |
+ Errors: make(chan error), |
|
| 57 |
+ done: make(chan bool), |
|
| 58 |
+ } |
|
| 59 |
+ |
|
| 60 |
+ go w.readEvents() |
|
| 61 |
+ return w, nil |
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+// Close removes all watches and closes the events channel. |
|
| 65 |
+func (w *Watcher) Close() error {
|
|
| 66 |
+ w.mu.Lock() |
|
| 67 |
+ if w.isClosed {
|
|
| 68 |
+ w.mu.Unlock() |
|
| 69 |
+ return nil |
|
| 70 |
+ } |
|
| 71 |
+ w.isClosed = true |
|
| 72 |
+ w.mu.Unlock() |
|
| 73 |
+ |
|
| 74 |
+ w.mu.Lock() |
|
| 75 |
+ ws := w.watches |
|
| 76 |
+ w.mu.Unlock() |
|
| 77 |
+ |
|
| 78 |
+ var err error |
|
| 79 |
+ for name := range ws {
|
|
| 80 |
+ if e := w.Remove(name); e != nil && err == nil {
|
|
| 81 |
+ err = e |
|
| 82 |
+ } |
|
| 83 |
+ } |
|
| 84 |
+ |
|
| 85 |
+ // Send "quit" message to the reader goroutine: |
|
| 86 |
+ w.done <- true |
|
| 87 |
+ |
|
| 88 |
+ return nil |
|
| 89 |
+} |
|
| 90 |
+ |
|
| 91 |
+// Add starts watching the named file or directory (non-recursively). |
|
| 92 |
+func (w *Watcher) Add(name string) error {
|
|
| 93 |
+ w.mu.Lock() |
|
| 94 |
+ w.externalWatches[name] = true |
|
| 95 |
+ w.mu.Unlock() |
|
| 96 |
+ return w.addWatch(name, noteAllEvents) |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+// Remove stops watching the the named file or directory (non-recursively). |
|
| 100 |
+func (w *Watcher) Remove(name string) error {
|
|
| 101 |
+ name = filepath.Clean(name) |
|
| 102 |
+ w.mu.Lock() |
|
| 103 |
+ watchfd, ok := w.watches[name] |
|
| 104 |
+ w.mu.Unlock() |
|
| 105 |
+ if !ok {
|
|
| 106 |
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
|
| 107 |
+ } |
|
| 108 |
+ |
|
| 109 |
+ const registerRemove = syscall.EV_DELETE |
|
| 110 |
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
|
| 111 |
+ return err |
|
| 112 |
+ } |
|
| 113 |
+ |
|
| 114 |
+ syscall.Close(watchfd) |
|
| 115 |
+ |
|
| 116 |
+ w.mu.Lock() |
|
| 117 |
+ isDir := w.paths[watchfd].isDir |
|
| 118 |
+ delete(w.watches, name) |
|
| 119 |
+ delete(w.paths, watchfd) |
|
| 120 |
+ delete(w.dirFlags, name) |
|
| 121 |
+ w.mu.Unlock() |
|
| 122 |
+ |
|
| 123 |
+ // Find all watched paths that are in this directory that are not external. |
|
| 124 |
+ if isDir {
|
|
| 125 |
+ var pathsToRemove []string |
|
| 126 |
+ w.mu.Lock() |
|
| 127 |
+ for _, path := range w.paths {
|
|
| 128 |
+ wdir, _ := filepath.Split(path.name) |
|
| 129 |
+ if filepath.Clean(wdir) == name {
|
|
| 130 |
+ if !w.externalWatches[path.name] {
|
|
| 131 |
+ pathsToRemove = append(pathsToRemove, path.name) |
|
| 132 |
+ } |
|
| 133 |
+ } |
|
| 134 |
+ } |
|
| 135 |
+ w.mu.Unlock() |
|
| 136 |
+ for _, name := range pathsToRemove {
|
|
| 137 |
+ // Since these are internal, not much sense in propagating error |
|
| 138 |
+ // to the user, as that will just confuse them with an error about |
|
| 139 |
+ // a path they did not explicitly watch themselves. |
|
| 140 |
+ w.Remove(name) |
|
| 141 |
+ } |
|
| 142 |
+ } |
|
| 143 |
+ |
|
| 144 |
+ return nil |
|
| 145 |
+} |
|
| 146 |
+ |
|
| 147 |
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) |
|
| 148 |
+const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME |
|
| 149 |
+ |
|
| 150 |
+// keventWaitTime to block on each read from kevent |
|
| 151 |
+var keventWaitTime = durationToTimespec(100 * time.Millisecond) |
|
| 152 |
+ |
|
| 153 |
+// addWatch adds name to the watched file set. |
|
| 154 |
+// The flags are interpreted as described in kevent(2). |
|
| 155 |
+func (w *Watcher) addWatch(name string, flags uint32) error {
|
|
| 156 |
+ var isDir bool |
|
| 157 |
+ // Make ./name and name equivalent |
|
| 158 |
+ name = filepath.Clean(name) |
|
| 159 |
+ |
|
| 160 |
+ w.mu.Lock() |
|
| 161 |
+ if w.isClosed {
|
|
| 162 |
+ w.mu.Unlock() |
|
| 163 |
+ return errors.New("kevent instance already closed")
|
|
| 164 |
+ } |
|
| 165 |
+ watchfd, alreadyWatching := w.watches[name] |
|
| 166 |
+ // We already have a watch, but we can still override flags. |
|
| 167 |
+ if alreadyWatching {
|
|
| 168 |
+ isDir = w.paths[watchfd].isDir |
|
| 169 |
+ } |
|
| 170 |
+ w.mu.Unlock() |
|
| 171 |
+ |
|
| 172 |
+ if !alreadyWatching {
|
|
| 173 |
+ fi, err := os.Lstat(name) |
|
| 174 |
+ if err != nil {
|
|
| 175 |
+ return err |
|
| 176 |
+ } |
|
| 177 |
+ |
|
| 178 |
+ // Don't watch sockets. |
|
| 179 |
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
|
| 180 |
+ return nil |
|
| 181 |
+ } |
|
| 182 |
+ |
|
| 183 |
+ // Follow Symlinks |
|
| 184 |
+ // Unfortunately, Linux can add bogus symlinks to watch list without |
|
| 185 |
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain |
|
| 186 |
+ // consistency, we will act like everything is fine. There will simply |
|
| 187 |
+ // be no file events for broken symlinks. |
|
| 188 |
+ // Hence the returns of nil on errors. |
|
| 189 |
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
|
| 190 |
+ name, err = filepath.EvalSymlinks(name) |
|
| 191 |
+ if err != nil {
|
|
| 192 |
+ return nil |
|
| 193 |
+ } |
|
| 194 |
+ |
|
| 195 |
+ fi, err = os.Lstat(name) |
|
| 196 |
+ if err != nil {
|
|
| 197 |
+ return nil |
|
| 198 |
+ } |
|
| 199 |
+ } |
|
| 200 |
+ |
|
| 201 |
+ watchfd, err = syscall.Open(name, openMode, 0700) |
|
| 202 |
+ if watchfd == -1 {
|
|
| 203 |
+ return err |
|
| 204 |
+ } |
|
| 205 |
+ |
|
| 206 |
+ isDir = fi.IsDir() |
|
| 207 |
+ } |
|
| 208 |
+ |
|
| 209 |
+ const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE |
|
| 210 |
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
|
| 211 |
+ syscall.Close(watchfd) |
|
| 212 |
+ return err |
|
| 213 |
+ } |
|
| 214 |
+ |
|
| 215 |
+ if !alreadyWatching {
|
|
| 216 |
+ w.mu.Lock() |
|
| 217 |
+ w.watches[name] = watchfd |
|
| 218 |
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
|
| 219 |
+ w.mu.Unlock() |
|
| 220 |
+ } |
|
| 221 |
+ |
|
| 222 |
+ if isDir {
|
|
| 223 |
+ // Watch the directory if it has not been watched before, |
|
| 224 |
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) |
|
| 225 |
+ w.mu.Lock() |
|
| 226 |
+ watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE && |
|
| 227 |
+ (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) |
|
| 228 |
+ // Store flags so this watch can be updated later |
|
| 229 |
+ w.dirFlags[name] = flags |
|
| 230 |
+ w.mu.Unlock() |
|
| 231 |
+ |
|
| 232 |
+ if watchDir {
|
|
| 233 |
+ if err := w.watchDirectoryFiles(name); err != nil {
|
|
| 234 |
+ return err |
|
| 235 |
+ } |
|
| 236 |
+ } |
|
| 237 |
+ } |
|
| 238 |
+ return nil |
|
| 239 |
+} |
|
| 240 |
+ |
|
| 241 |
+// readEvents reads from kqueue and converts the received kevents into |
|
| 242 |
+// Event values that it sends down the Events channel. |
|
| 243 |
+func (w *Watcher) readEvents() {
|
|
| 244 |
+ eventBuffer := make([]syscall.Kevent_t, 10) |
|
| 245 |
+ |
|
| 246 |
+ for {
|
|
| 247 |
+ // See if there is a message on the "done" channel |
|
| 248 |
+ select {
|
|
| 249 |
+ case <-w.done: |
|
| 250 |
+ err := syscall.Close(w.kq) |
|
| 251 |
+ if err != nil {
|
|
| 252 |
+ w.Errors <- err |
|
| 253 |
+ } |
|
| 254 |
+ close(w.Events) |
|
| 255 |
+ close(w.Errors) |
|
| 256 |
+ return |
|
| 257 |
+ default: |
|
| 258 |
+ } |
|
| 259 |
+ |
|
| 260 |
+ // Get new events |
|
| 261 |
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime) |
|
| 262 |
+ // EINTR is okay, the syscall was interrupted before timeout expired. |
|
| 263 |
+ if err != nil && err != syscall.EINTR {
|
|
| 264 |
+ w.Errors <- err |
|
| 265 |
+ continue |
|
| 266 |
+ } |
|
| 267 |
+ |
|
| 268 |
+ // Flush the events we received to the Events channel |
|
| 269 |
+ for len(kevents) > 0 {
|
|
| 270 |
+ kevent := &kevents[0] |
|
| 271 |
+ watchfd := int(kevent.Ident) |
|
| 272 |
+ mask := uint32(kevent.Fflags) |
|
| 273 |
+ w.mu.Lock() |
|
| 274 |
+ path := w.paths[watchfd] |
|
| 275 |
+ w.mu.Unlock() |
|
| 276 |
+ event := newEvent(path.name, mask) |
|
| 277 |
+ |
|
| 278 |
+ if path.isDir && !(event.Op&Remove == Remove) {
|
|
| 279 |
+ // Double check to make sure the directory exists. This can happen when |
|
| 280 |
+ // we do a rm -fr on a recursively watched folders and we receive a |
|
| 281 |
+ // modification event first but the folder has been deleted and later |
|
| 282 |
+ // receive the delete event |
|
| 283 |
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
|
| 284 |
+ // mark is as delete event |
|
| 285 |
+ event.Op |= Remove |
|
| 286 |
+ } |
|
| 287 |
+ } |
|
| 288 |
+ |
|
| 289 |
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
|
| 290 |
+ w.Remove(event.Name) |
|
| 291 |
+ w.mu.Lock() |
|
| 292 |
+ delete(w.fileExists, event.Name) |
|
| 293 |
+ w.mu.Unlock() |
|
| 294 |
+ } |
|
| 295 |
+ |
|
| 296 |
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
|
| 297 |
+ w.sendDirectoryChangeEvents(event.Name) |
|
| 298 |
+ } else {
|
|
| 299 |
+ // Send the event on the Events channel |
|
| 300 |
+ w.Events <- event |
|
| 301 |
+ } |
|
| 302 |
+ |
|
| 303 |
+ if event.Op&Remove == Remove {
|
|
| 304 |
+ // Look for a file that may have overwritten this. |
|
| 305 |
+ // For example, mv f1 f2 will delete f2, then create f2. |
|
| 306 |
+ fileDir, _ := filepath.Split(event.Name) |
|
| 307 |
+ fileDir = filepath.Clean(fileDir) |
|
| 308 |
+ w.mu.Lock() |
|
| 309 |
+ _, found := w.watches[fileDir] |
|
| 310 |
+ w.mu.Unlock() |
|
| 311 |
+ if found {
|
|
| 312 |
+ // make sure the directory exists before we watch for changes. When we |
|
| 313 |
+ // do a recursive watch and perform rm -fr, the parent directory might |
|
| 314 |
+ // have gone missing, ignore the missing directory and let the |
|
| 315 |
+ // upcoming delete event remove the watch from the parent directory. |
|
| 316 |
+ if _, err := os.Lstat(fileDir); os.IsExist(err) {
|
|
| 317 |
+ w.sendDirectoryChangeEvents(fileDir) |
|
| 318 |
+ // FIXME: should this be for events on files or just isDir? |
|
| 319 |
+ } |
|
| 320 |
+ } |
|
| 321 |
+ } |
|
| 322 |
+ |
|
| 323 |
+ // Move to next event |
|
| 324 |
+ kevents = kevents[1:] |
|
| 325 |
+ } |
|
| 326 |
+ } |
|
| 327 |
+} |
|
| 328 |
+ |
|
| 329 |
+// newEvent returns an platform-independent Event based on kqueue Fflags. |
|
| 330 |
+func newEvent(name string, mask uint32) Event {
|
|
| 331 |
+ e := Event{Name: name}
|
|
| 332 |
+ if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE {
|
|
| 333 |
+ e.Op |= Remove |
|
| 334 |
+ } |
|
| 335 |
+ if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE {
|
|
| 336 |
+ e.Op |= Write |
|
| 337 |
+ } |
|
| 338 |
+ if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME {
|
|
| 339 |
+ e.Op |= Rename |
|
| 340 |
+ } |
|
| 341 |
+ if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB {
|
|
| 342 |
+ e.Op |= Chmod |
|
| 343 |
+ } |
|
| 344 |
+ return e |
|
| 345 |
+} |
|
| 346 |
+ |
|
| 347 |
+func newCreateEvent(name string) Event {
|
|
| 348 |
+ return Event{Name: name, Op: Create}
|
|
| 349 |
+} |
|
| 350 |
+ |
|
| 351 |
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory |
|
| 352 |
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
|
| 353 |
+ // Get all files |
|
| 354 |
+ files, err := ioutil.ReadDir(dirPath) |
|
| 355 |
+ if err != nil {
|
|
| 356 |
+ return err |
|
| 357 |
+ } |
|
| 358 |
+ |
|
| 359 |
+ for _, fileInfo := range files {
|
|
| 360 |
+ filePath := filepath.Join(dirPath, fileInfo.Name()) |
|
| 361 |
+ if err := w.internalWatch(filePath, fileInfo); err != nil {
|
|
| 362 |
+ return err |
|
| 363 |
+ } |
|
| 364 |
+ |
|
| 365 |
+ w.mu.Lock() |
|
| 366 |
+ w.fileExists[filePath] = true |
|
| 367 |
+ w.mu.Unlock() |
|
| 368 |
+ } |
|
| 369 |
+ |
|
| 370 |
+ return nil |
|
| 371 |
+} |
|
| 372 |
+ |
|
| 373 |
+// sendDirectoryEvents searches the directory for newly created files |
|
| 374 |
+// and sends them over the event channel. This functionality is to have |
|
| 375 |
+// the BSD version of fsnotify match Linux inotify which provides a |
|
| 376 |
+// create event for files created in a watched directory. |
|
| 377 |
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
|
| 378 |
+ // Get all files |
|
| 379 |
+ files, err := ioutil.ReadDir(dirPath) |
|
| 380 |
+ if err != nil {
|
|
| 381 |
+ w.Errors <- err |
|
| 382 |
+ } |
|
| 383 |
+ |
|
| 384 |
+ // Search for new files |
|
| 385 |
+ for _, fileInfo := range files {
|
|
| 386 |
+ filePath := filepath.Join(dirPath, fileInfo.Name()) |
|
| 387 |
+ w.mu.Lock() |
|
| 388 |
+ _, doesExist := w.fileExists[filePath] |
|
| 389 |
+ w.mu.Unlock() |
|
| 390 |
+ if !doesExist {
|
|
| 391 |
+ // Send create event |
|
| 392 |
+ w.Events <- newCreateEvent(filePath) |
|
| 393 |
+ } |
|
| 394 |
+ |
|
| 395 |
+ // like watchDirectoryFiles (but without doing another ReadDir) |
|
| 396 |
+ if err := w.internalWatch(filePath, fileInfo); err != nil {
|
|
| 397 |
+ return |
|
| 398 |
+ } |
|
| 399 |
+ |
|
| 400 |
+ w.mu.Lock() |
|
| 401 |
+ w.fileExists[filePath] = true |
|
| 402 |
+ w.mu.Unlock() |
|
| 403 |
+ } |
|
| 404 |
+} |
|
| 405 |
+ |
|
| 406 |
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error {
|
|
| 407 |
+ if fileInfo.IsDir() {
|
|
| 408 |
+ // mimic Linux providing delete events for subdirectories |
|
| 409 |
+ // but preserve the flags used if currently watching subdirectory |
|
| 410 |
+ w.mu.Lock() |
|
| 411 |
+ flags := w.dirFlags[name] |
|
| 412 |
+ w.mu.Unlock() |
|
| 413 |
+ |
|
| 414 |
+ flags |= syscall.NOTE_DELETE |
|
| 415 |
+ return w.addWatch(name, flags) |
|
| 416 |
+ } |
|
| 417 |
+ |
|
| 418 |
+ // watch file to mimic Linux inotify |
|
| 419 |
+ return w.addWatch(name, noteAllEvents) |
|
| 420 |
+} |
|
| 421 |
+ |
|
| 422 |
+// kqueue creates a new kernel event queue and returns a descriptor. |
|
| 423 |
+func kqueue() (kq int, err error) {
|
|
| 424 |
+ kq, err = syscall.Kqueue() |
|
| 425 |
+ if kq == -1 {
|
|
| 426 |
+ return kq, err |
|
| 427 |
+ } |
|
| 428 |
+ return kq, nil |
|
| 429 |
+} |
|
| 430 |
+ |
|
| 431 |
+// register events with the queue |
|
| 432 |
+func register(kq int, fds []int, flags int, fflags uint32) error {
|
|
| 433 |
+ changes := make([]syscall.Kevent_t, len(fds)) |
|
| 434 |
+ |
|
| 435 |
+ for i, fd := range fds {
|
|
| 436 |
+ // SetKevent converts int to the platform-specific types: |
|
| 437 |
+ syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags) |
|
| 438 |
+ changes[i].Fflags = fflags |
|
| 439 |
+ } |
|
| 440 |
+ |
|
| 441 |
+ // register the events |
|
| 442 |
+ success, err := syscall.Kevent(kq, changes, nil, nil) |
|
| 443 |
+ if success == -1 {
|
|
| 444 |
+ return err |
|
| 445 |
+ } |
|
| 446 |
+ return nil |
|
| 447 |
+} |
|
| 448 |
+ |
|
| 449 |
+// read retrieves pending events, or waits until an event occurs. |
|
| 450 |
+// A timeout of nil blocks indefinitely, while 0 polls the queue. |
|
| 451 |
+func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) {
|
|
| 452 |
+ n, err := syscall.Kevent(kq, nil, events, timeout) |
|
| 453 |
+ if err != nil {
|
|
| 454 |
+ return nil, err |
|
| 455 |
+ } |
|
| 456 |
+ return events[0:n], nil |
|
| 457 |
+} |
|
| 458 |
+ |
|
| 459 |
+// durationToTimespec prepares a timeout value |
|
| 460 |
+func durationToTimespec(d time.Duration) syscall.Timespec {
|
|
| 461 |
+ return syscall.NsecToTimespec(d.Nanoseconds()) |
|
| 462 |
+} |
| 0 | 463 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,11 @@ |
| 0 |
+// Copyright 2013 The Go Authors. All rights reserved. |
|
| 1 |
+// Use of this source code is governed by a BSD-style |
|
| 2 |
+// license that can be found in the LICENSE file. |
|
| 3 |
+ |
|
| 4 |
+// +build freebsd openbsd netbsd dragonfly |
|
| 5 |
+ |
|
| 6 |
+package fsnotify |
|
| 7 |
+ |
|
| 8 |
+import "syscall" |
|
| 9 |
+ |
|
| 10 |
+const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY |
| 0 | 11 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,12 @@ |
| 0 |
+// Copyright 2013 The Go Authors. All rights reserved. |
|
| 1 |
+// Use of this source code is governed by a BSD-style |
|
| 2 |
+// license that can be found in the LICENSE file. |
|
| 3 |
+ |
|
| 4 |
+// +build darwin |
|
| 5 |
+ |
|
| 6 |
+package fsnotify |
|
| 7 |
+ |
|
| 8 |
+import "syscall" |
|
| 9 |
+ |
|
| 10 |
+// note: this constant is not defined on BSD |
|
| 11 |
+const openMode = syscall.O_EVTONLY |
| 0 | 12 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,561 @@ |
| 0 |
+// Copyright 2011 The Go Authors. All rights reserved. |
|
| 1 |
+// Use of this source code is governed by a BSD-style |
|
| 2 |
+// license that can be found in the LICENSE file. |
|
| 3 |
+ |
|
| 4 |
+// +build windows |
|
| 5 |
+ |
|
| 6 |
+package fsnotify |
|
| 7 |
+ |
|
| 8 |
+import ( |
|
| 9 |
+ "errors" |
|
| 10 |
+ "fmt" |
|
| 11 |
+ "os" |
|
| 12 |
+ "path/filepath" |
|
| 13 |
+ "runtime" |
|
| 14 |
+ "sync" |
|
| 15 |
+ "syscall" |
|
| 16 |
+ "unsafe" |
|
| 17 |
+) |
|
| 18 |
+ |
|
| 19 |
+// Watcher watches a set of files, delivering events to a channel. |
|
| 20 |
+type Watcher struct {
|
|
| 21 |
+ Events chan Event |
|
| 22 |
+ Errors chan error |
|
| 23 |
+ isClosed bool // Set to true when Close() is first called |
|
| 24 |
+ mu sync.Mutex // Map access |
|
| 25 |
+ port syscall.Handle // Handle to completion port |
|
| 26 |
+ watches watchMap // Map of watches (key: i-number) |
|
| 27 |
+ input chan *input // Inputs to the reader are sent on this channel |
|
| 28 |
+ quit chan chan<- error |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. |
|
| 32 |
+func NewWatcher() (*Watcher, error) {
|
|
| 33 |
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) |
|
| 34 |
+ if e != nil {
|
|
| 35 |
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
|
| 36 |
+ } |
|
| 37 |
+ w := &Watcher{
|
|
| 38 |
+ port: port, |
|
| 39 |
+ watches: make(watchMap), |
|
| 40 |
+ input: make(chan *input, 1), |
|
| 41 |
+ Events: make(chan Event, 50), |
|
| 42 |
+ Errors: make(chan error), |
|
| 43 |
+ quit: make(chan chan<- error, 1), |
|
| 44 |
+ } |
|
| 45 |
+ go w.readEvents() |
|
| 46 |
+ return w, nil |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+// Close removes all watches and closes the events channel. |
|
| 50 |
+func (w *Watcher) Close() error {
|
|
| 51 |
+ if w.isClosed {
|
|
| 52 |
+ return nil |
|
| 53 |
+ } |
|
| 54 |
+ w.isClosed = true |
|
| 55 |
+ |
|
| 56 |
+ // Send "quit" message to the reader goroutine |
|
| 57 |
+ ch := make(chan error) |
|
| 58 |
+ w.quit <- ch |
|
| 59 |
+ if err := w.wakeupReader(); err != nil {
|
|
| 60 |
+ return err |
|
| 61 |
+ } |
|
| 62 |
+ return <-ch |
|
| 63 |
+} |
|
| 64 |
+ |
|
| 65 |
+// Add starts watching the named file or directory (non-recursively). |
|
| 66 |
+func (w *Watcher) Add(name string) error {
|
|
| 67 |
+ if w.isClosed {
|
|
| 68 |
+ return errors.New("watcher already closed")
|
|
| 69 |
+ } |
|
| 70 |
+ in := &input{
|
|
| 71 |
+ op: opAddWatch, |
|
| 72 |
+ path: filepath.Clean(name), |
|
| 73 |
+ flags: sys_FS_ALL_EVENTS, |
|
| 74 |
+ reply: make(chan error), |
|
| 75 |
+ } |
|
| 76 |
+ w.input <- in |
|
| 77 |
+ if err := w.wakeupReader(); err != nil {
|
|
| 78 |
+ return err |
|
| 79 |
+ } |
|
| 80 |
+ return <-in.reply |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+// Remove stops watching the the named file or directory (non-recursively). |
|
| 84 |
+func (w *Watcher) Remove(name string) error {
|
|
| 85 |
+ in := &input{
|
|
| 86 |
+ op: opRemoveWatch, |
|
| 87 |
+ path: filepath.Clean(name), |
|
| 88 |
+ reply: make(chan error), |
|
| 89 |
+ } |
|
| 90 |
+ w.input <- in |
|
| 91 |
+ if err := w.wakeupReader(); err != nil {
|
|
| 92 |
+ return err |
|
| 93 |
+ } |
|
| 94 |
+ return <-in.reply |
|
| 95 |
+} |
|
| 96 |
+ |
|
| 97 |
+const ( |
|
| 98 |
+ // Options for AddWatch |
|
| 99 |
+ sys_FS_ONESHOT = 0x80000000 |
|
| 100 |
+ sys_FS_ONLYDIR = 0x1000000 |
|
| 101 |
+ |
|
| 102 |
+ // Events |
|
| 103 |
+ sys_FS_ACCESS = 0x1 |
|
| 104 |
+ sys_FS_ALL_EVENTS = 0xfff |
|
| 105 |
+ sys_FS_ATTRIB = 0x4 |
|
| 106 |
+ sys_FS_CLOSE = 0x18 |
|
| 107 |
+ sys_FS_CREATE = 0x100 |
|
| 108 |
+ sys_FS_DELETE = 0x200 |
|
| 109 |
+ sys_FS_DELETE_SELF = 0x400 |
|
| 110 |
+ sys_FS_MODIFY = 0x2 |
|
| 111 |
+ sys_FS_MOVE = 0xc0 |
|
| 112 |
+ sys_FS_MOVED_FROM = 0x40 |
|
| 113 |
+ sys_FS_MOVED_TO = 0x80 |
|
| 114 |
+ sys_FS_MOVE_SELF = 0x800 |
|
| 115 |
+ |
|
| 116 |
+ // Special events |
|
| 117 |
+ sys_FS_IGNORED = 0x8000 |
|
| 118 |
+ sys_FS_Q_OVERFLOW = 0x4000 |
|
| 119 |
+) |
|
| 120 |
+ |
|
| 121 |
+func newEvent(name string, mask uint32) Event {
|
|
| 122 |
+ e := Event{Name: name}
|
|
| 123 |
+ if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO {
|
|
| 124 |
+ e.Op |= Create |
|
| 125 |
+ } |
|
| 126 |
+ if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF {
|
|
| 127 |
+ e.Op |= Remove |
|
| 128 |
+ } |
|
| 129 |
+ if mask&sys_FS_MODIFY == sys_FS_MODIFY {
|
|
| 130 |
+ e.Op |= Write |
|
| 131 |
+ } |
|
| 132 |
+ if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM {
|
|
| 133 |
+ e.Op |= Rename |
|
| 134 |
+ } |
|
| 135 |
+ if mask&sys_FS_ATTRIB == sys_FS_ATTRIB {
|
|
| 136 |
+ e.Op |= Chmod |
|
| 137 |
+ } |
|
| 138 |
+ return e |
|
| 139 |
+} |
|
| 140 |
+ |
|
| 141 |
+const ( |
|
| 142 |
+ opAddWatch = iota |
|
| 143 |
+ opRemoveWatch |
|
| 144 |
+) |
|
| 145 |
+ |
|
| 146 |
+const ( |
|
| 147 |
+ provisional uint64 = 1 << (32 + iota) |
|
| 148 |
+) |
|
| 149 |
+ |
|
| 150 |
+type input struct {
|
|
| 151 |
+ op int |
|
| 152 |
+ path string |
|
| 153 |
+ flags uint32 |
|
| 154 |
+ reply chan error |
|
| 155 |
+} |
|
| 156 |
+ |
|
| 157 |
+type inode struct {
|
|
| 158 |
+ handle syscall.Handle |
|
| 159 |
+ volume uint32 |
|
| 160 |
+ index uint64 |
|
| 161 |
+} |
|
| 162 |
+ |
|
| 163 |
+type watch struct {
|
|
| 164 |
+ ov syscall.Overlapped |
|
| 165 |
+ ino *inode // i-number |
|
| 166 |
+ path string // Directory path |
|
| 167 |
+ mask uint64 // Directory itself is being watched with these notify flags |
|
| 168 |
+ names map[string]uint64 // Map of names being watched and their notify flags |
|
| 169 |
+ rename string // Remembers the old name while renaming a file |
|
| 170 |
+ buf [4096]byte |
|
| 171 |
+} |
|
| 172 |
+ |
|
| 173 |
+type indexMap map[uint64]*watch |
|
| 174 |
+type watchMap map[uint32]indexMap |
|
| 175 |
+ |
|
| 176 |
+func (w *Watcher) wakeupReader() error {
|
|
| 177 |
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) |
|
| 178 |
+ if e != nil {
|
|
| 179 |
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
|
| 180 |
+ } |
|
| 181 |
+ return nil |
|
| 182 |
+} |
|
| 183 |
+ |
|
| 184 |
+func getDir(pathname string) (dir string, err error) {
|
|
| 185 |
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) |
|
| 186 |
+ if e != nil {
|
|
| 187 |
+ return "", os.NewSyscallError("GetFileAttributes", e)
|
|
| 188 |
+ } |
|
| 189 |
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
|
| 190 |
+ dir = pathname |
|
| 191 |
+ } else {
|
|
| 192 |
+ dir, _ = filepath.Split(pathname) |
|
| 193 |
+ dir = filepath.Clean(dir) |
|
| 194 |
+ } |
|
| 195 |
+ return |
|
| 196 |
+} |
|
| 197 |
+ |
|
| 198 |
+func getIno(path string) (ino *inode, err error) {
|
|
| 199 |
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), |
|
| 200 |
+ syscall.FILE_LIST_DIRECTORY, |
|
| 201 |
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, |
|
| 202 |
+ nil, syscall.OPEN_EXISTING, |
|
| 203 |
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) |
|
| 204 |
+ if e != nil {
|
|
| 205 |
+ return nil, os.NewSyscallError("CreateFile", e)
|
|
| 206 |
+ } |
|
| 207 |
+ var fi syscall.ByHandleFileInformation |
|
| 208 |
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
|
| 209 |
+ syscall.CloseHandle(h) |
|
| 210 |
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
|
| 211 |
+ } |
|
| 212 |
+ ino = &inode{
|
|
| 213 |
+ handle: h, |
|
| 214 |
+ volume: fi.VolumeSerialNumber, |
|
| 215 |
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), |
|
| 216 |
+ } |
|
| 217 |
+ return ino, nil |
|
| 218 |
+} |
|
| 219 |
+ |
|
| 220 |
+// Must run within the I/O thread. |
|
| 221 |
+func (m watchMap) get(ino *inode) *watch {
|
|
| 222 |
+ if i := m[ino.volume]; i != nil {
|
|
| 223 |
+ return i[ino.index] |
|
| 224 |
+ } |
|
| 225 |
+ return nil |
|
| 226 |
+} |
|
| 227 |
+ |
|
| 228 |
+// Must run within the I/O thread. |
|
| 229 |
+func (m watchMap) set(ino *inode, watch *watch) {
|
|
| 230 |
+ i := m[ino.volume] |
|
| 231 |
+ if i == nil {
|
|
| 232 |
+ i = make(indexMap) |
|
| 233 |
+ m[ino.volume] = i |
|
| 234 |
+ } |
|
| 235 |
+ i[ino.index] = watch |
|
| 236 |
+} |
|
| 237 |
+ |
|
| 238 |
+// Must run within the I/O thread. |
|
| 239 |
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
|
| 240 |
+ dir, err := getDir(pathname) |
|
| 241 |
+ if err != nil {
|
|
| 242 |
+ return err |
|
| 243 |
+ } |
|
| 244 |
+ if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
|
|
| 245 |
+ return nil |
|
| 246 |
+ } |
|
| 247 |
+ ino, err := getIno(dir) |
|
| 248 |
+ if err != nil {
|
|
| 249 |
+ return err |
|
| 250 |
+ } |
|
| 251 |
+ w.mu.Lock() |
|
| 252 |
+ watchEntry := w.watches.get(ino) |
|
| 253 |
+ w.mu.Unlock() |
|
| 254 |
+ if watchEntry == nil {
|
|
| 255 |
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
|
| 256 |
+ syscall.CloseHandle(ino.handle) |
|
| 257 |
+ return os.NewSyscallError("CreateIoCompletionPort", e)
|
|
| 258 |
+ } |
|
| 259 |
+ watchEntry = &watch{
|
|
| 260 |
+ ino: ino, |
|
| 261 |
+ path: dir, |
|
| 262 |
+ names: make(map[string]uint64), |
|
| 263 |
+ } |
|
| 264 |
+ w.mu.Lock() |
|
| 265 |
+ w.watches.set(ino, watchEntry) |
|
| 266 |
+ w.mu.Unlock() |
|
| 267 |
+ flags |= provisional |
|
| 268 |
+ } else {
|
|
| 269 |
+ syscall.CloseHandle(ino.handle) |
|
| 270 |
+ } |
|
| 271 |
+ if pathname == dir {
|
|
| 272 |
+ watchEntry.mask |= flags |
|
| 273 |
+ } else {
|
|
| 274 |
+ watchEntry.names[filepath.Base(pathname)] |= flags |
|
| 275 |
+ } |
|
| 276 |
+ if err = w.startRead(watchEntry); err != nil {
|
|
| 277 |
+ return err |
|
| 278 |
+ } |
|
| 279 |
+ if pathname == dir {
|
|
| 280 |
+ watchEntry.mask &= ^provisional |
|
| 281 |
+ } else {
|
|
| 282 |
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional |
|
| 283 |
+ } |
|
| 284 |
+ return nil |
|
| 285 |
+} |
|
| 286 |
+ |
|
| 287 |
+// Must run within the I/O thread. |
|
| 288 |
+func (w *Watcher) remWatch(pathname string) error {
|
|
| 289 |
+ dir, err := getDir(pathname) |
|
| 290 |
+ if err != nil {
|
|
| 291 |
+ return err |
|
| 292 |
+ } |
|
| 293 |
+ ino, err := getIno(dir) |
|
| 294 |
+ if err != nil {
|
|
| 295 |
+ return err |
|
| 296 |
+ } |
|
| 297 |
+ w.mu.Lock() |
|
| 298 |
+ watch := w.watches.get(ino) |
|
| 299 |
+ w.mu.Unlock() |
|
| 300 |
+ if watch == nil {
|
|
| 301 |
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
|
| 302 |
+ } |
|
| 303 |
+ if pathname == dir {
|
|
| 304 |
+ w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) |
|
| 305 |
+ watch.mask = 0 |
|
| 306 |
+ } else {
|
|
| 307 |
+ name := filepath.Base(pathname) |
|
| 308 |
+ w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED) |
|
| 309 |
+ delete(watch.names, name) |
|
| 310 |
+ } |
|
| 311 |
+ return w.startRead(watch) |
|
| 312 |
+} |
|
| 313 |
+ |
|
| 314 |
+// Must run within the I/O thread. |
|
| 315 |
+func (w *Watcher) deleteWatch(watch *watch) {
|
|
| 316 |
+ for name, mask := range watch.names {
|
|
| 317 |
+ if mask&provisional == 0 {
|
|
| 318 |
+ w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED) |
|
| 319 |
+ } |
|
| 320 |
+ delete(watch.names, name) |
|
| 321 |
+ } |
|
| 322 |
+ if watch.mask != 0 {
|
|
| 323 |
+ if watch.mask&provisional == 0 {
|
|
| 324 |
+ w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) |
|
| 325 |
+ } |
|
| 326 |
+ watch.mask = 0 |
|
| 327 |
+ } |
|
| 328 |
+} |
|
| 329 |
+ |
|
| 330 |
+// Must run within the I/O thread. |
|
| 331 |
+func (w *Watcher) startRead(watch *watch) error {
|
|
| 332 |
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
|
| 333 |
+ w.Errors <- os.NewSyscallError("CancelIo", e)
|
|
| 334 |
+ w.deleteWatch(watch) |
|
| 335 |
+ } |
|
| 336 |
+ mask := toWindowsFlags(watch.mask) |
|
| 337 |
+ for _, m := range watch.names {
|
|
| 338 |
+ mask |= toWindowsFlags(m) |
|
| 339 |
+ } |
|
| 340 |
+ if mask == 0 {
|
|
| 341 |
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
|
| 342 |
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
|
|
| 343 |
+ } |
|
| 344 |
+ w.mu.Lock() |
|
| 345 |
+ delete(w.watches[watch.ino.volume], watch.ino.index) |
|
| 346 |
+ w.mu.Unlock() |
|
| 347 |
+ return nil |
|
| 348 |
+ } |
|
| 349 |
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], |
|
| 350 |
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) |
|
| 351 |
+ if e != nil {
|
|
| 352 |
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
|
|
| 353 |
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
|
| 354 |
+ // Watched directory was probably removed |
|
| 355 |
+ if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
|
|
| 356 |
+ if watch.mask&sys_FS_ONESHOT != 0 {
|
|
| 357 |
+ watch.mask = 0 |
|
| 358 |
+ } |
|
| 359 |
+ } |
|
| 360 |
+ err = nil |
|
| 361 |
+ } |
|
| 362 |
+ w.deleteWatch(watch) |
|
| 363 |
+ w.startRead(watch) |
|
| 364 |
+ return err |
|
| 365 |
+ } |
|
| 366 |
+ return nil |
|
| 367 |
+} |
|
| 368 |
+ |
|
| 369 |
+// readEvents reads from the I/O completion port, converts the |
|
| 370 |
+// received events into Event objects and sends them via the Events channel. |
|
| 371 |
+// Entry point to the I/O thread. |
|
| 372 |
+func (w *Watcher) readEvents() {
|
|
| 373 |
+ var ( |
|
| 374 |
+ n, key uint32 |
|
| 375 |
+ ov *syscall.Overlapped |
|
| 376 |
+ ) |
|
| 377 |
+ runtime.LockOSThread() |
|
| 378 |
+ |
|
| 379 |
+ for {
|
|
| 380 |
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) |
|
| 381 |
+ watch := (*watch)(unsafe.Pointer(ov)) |
|
| 382 |
+ |
|
| 383 |
+ if watch == nil {
|
|
| 384 |
+ select {
|
|
| 385 |
+ case ch := <-w.quit: |
|
| 386 |
+ w.mu.Lock() |
|
| 387 |
+ var indexes []indexMap |
|
| 388 |
+ for _, index := range w.watches {
|
|
| 389 |
+ indexes = append(indexes, index) |
|
| 390 |
+ } |
|
| 391 |
+ w.mu.Unlock() |
|
| 392 |
+ for _, index := range indexes {
|
|
| 393 |
+ for _, watch := range index {
|
|
| 394 |
+ w.deleteWatch(watch) |
|
| 395 |
+ w.startRead(watch) |
|
| 396 |
+ } |
|
| 397 |
+ } |
|
| 398 |
+ var err error |
|
| 399 |
+ if e := syscall.CloseHandle(w.port); e != nil {
|
|
| 400 |
+ err = os.NewSyscallError("CloseHandle", e)
|
|
| 401 |
+ } |
|
| 402 |
+ close(w.Events) |
|
| 403 |
+ close(w.Errors) |
|
| 404 |
+ ch <- err |
|
| 405 |
+ return |
|
| 406 |
+ case in := <-w.input: |
|
| 407 |
+ switch in.op {
|
|
| 408 |
+ case opAddWatch: |
|
| 409 |
+ in.reply <- w.addWatch(in.path, uint64(in.flags)) |
|
| 410 |
+ case opRemoveWatch: |
|
| 411 |
+ in.reply <- w.remWatch(in.path) |
|
| 412 |
+ } |
|
| 413 |
+ default: |
|
| 414 |
+ } |
|
| 415 |
+ continue |
|
| 416 |
+ } |
|
| 417 |
+ |
|
| 418 |
+ switch e {
|
|
| 419 |
+ case syscall.ERROR_MORE_DATA: |
|
| 420 |
+ if watch == nil {
|
|
| 421 |
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
|
| 422 |
+ } else {
|
|
| 423 |
+ // The i/o succeeded but the buffer is full. |
|
| 424 |
+ // In theory we should be building up a full packet. |
|
| 425 |
+ // In practice we can get away with just carrying on. |
|
| 426 |
+ n = uint32(unsafe.Sizeof(watch.buf)) |
|
| 427 |
+ } |
|
| 428 |
+ case syscall.ERROR_ACCESS_DENIED: |
|
| 429 |
+ // Watched directory was probably removed |
|
| 430 |
+ w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) |
|
| 431 |
+ w.deleteWatch(watch) |
|
| 432 |
+ w.startRead(watch) |
|
| 433 |
+ continue |
|
| 434 |
+ case syscall.ERROR_OPERATION_ABORTED: |
|
| 435 |
+ // CancelIo was called on this handle |
|
| 436 |
+ continue |
|
| 437 |
+ default: |
|
| 438 |
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
|
| 439 |
+ continue |
|
| 440 |
+ case nil: |
|
| 441 |
+ } |
|
| 442 |
+ |
|
| 443 |
+ var offset uint32 |
|
| 444 |
+ for {
|
|
| 445 |
+ if n == 0 {
|
|
| 446 |
+ w.Events <- newEvent("", sys_FS_Q_OVERFLOW)
|
|
| 447 |
+ w.Errors <- errors.New("short read in readEvents()")
|
|
| 448 |
+ break |
|
| 449 |
+ } |
|
| 450 |
+ |
|
| 451 |
+ // Point "raw" to the event in the buffer |
|
| 452 |
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) |
|
| 453 |
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) |
|
| 454 |
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) |
|
| 455 |
+ fullname := watch.path + "\\" + name |
|
| 456 |
+ |
|
| 457 |
+ var mask uint64 |
|
| 458 |
+ switch raw.Action {
|
|
| 459 |
+ case syscall.FILE_ACTION_REMOVED: |
|
| 460 |
+ mask = sys_FS_DELETE_SELF |
|
| 461 |
+ case syscall.FILE_ACTION_MODIFIED: |
|
| 462 |
+ mask = sys_FS_MODIFY |
|
| 463 |
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME: |
|
| 464 |
+ watch.rename = name |
|
| 465 |
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME: |
|
| 466 |
+ if watch.names[watch.rename] != 0 {
|
|
| 467 |
+ watch.names[name] |= watch.names[watch.rename] |
|
| 468 |
+ delete(watch.names, watch.rename) |
|
| 469 |
+ mask = sys_FS_MOVE_SELF |
|
| 470 |
+ } |
|
| 471 |
+ } |
|
| 472 |
+ |
|
| 473 |
+ sendNameEvent := func() {
|
|
| 474 |
+ if w.sendEvent(fullname, watch.names[name]&mask) {
|
|
| 475 |
+ if watch.names[name]&sys_FS_ONESHOT != 0 {
|
|
| 476 |
+ delete(watch.names, name) |
|
| 477 |
+ } |
|
| 478 |
+ } |
|
| 479 |
+ } |
|
| 480 |
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
|
| 481 |
+ sendNameEvent() |
|
| 482 |
+ } |
|
| 483 |
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
|
|
| 484 |
+ w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED) |
|
| 485 |
+ delete(watch.names, name) |
|
| 486 |
+ } |
|
| 487 |
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
|
| 488 |
+ if watch.mask&sys_FS_ONESHOT != 0 {
|
|
| 489 |
+ watch.mask = 0 |
|
| 490 |
+ } |
|
| 491 |
+ } |
|
| 492 |
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
|
| 493 |
+ fullname = watch.path + "\\" + watch.rename |
|
| 494 |
+ sendNameEvent() |
|
| 495 |
+ } |
|
| 496 |
+ |
|
| 497 |
+ // Move to the next event in the buffer |
|
| 498 |
+ if raw.NextEntryOffset == 0 {
|
|
| 499 |
+ break |
|
| 500 |
+ } |
|
| 501 |
+ offset += raw.NextEntryOffset |
|
| 502 |
+ |
|
| 503 |
+ // Error! |
|
| 504 |
+ if offset >= n {
|
|
| 505 |
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
|
| 506 |
+ break |
|
| 507 |
+ } |
|
| 508 |
+ } |
|
| 509 |
+ |
|
| 510 |
+ if err := w.startRead(watch); err != nil {
|
|
| 511 |
+ w.Errors <- err |
|
| 512 |
+ } |
|
| 513 |
+ } |
|
| 514 |
+} |
|
| 515 |
+ |
|
| 516 |
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
|
| 517 |
+ if mask == 0 {
|
|
| 518 |
+ return false |
|
| 519 |
+ } |
|
| 520 |
+ event := newEvent(name, uint32(mask)) |
|
| 521 |
+ select {
|
|
| 522 |
+ case ch := <-w.quit: |
|
| 523 |
+ w.quit <- ch |
|
| 524 |
+ case w.Events <- event: |
|
| 525 |
+ } |
|
| 526 |
+ return true |
|
| 527 |
+} |
|
| 528 |
+ |
|
| 529 |
+func toWindowsFlags(mask uint64) uint32 {
|
|
| 530 |
+ var m uint32 |
|
| 531 |
+ if mask&sys_FS_ACCESS != 0 {
|
|
| 532 |
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS |
|
| 533 |
+ } |
|
| 534 |
+ if mask&sys_FS_MODIFY != 0 {
|
|
| 535 |
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE |
|
| 536 |
+ } |
|
| 537 |
+ if mask&sys_FS_ATTRIB != 0 {
|
|
| 538 |
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES |
|
| 539 |
+ } |
|
| 540 |
+ if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
|
|
| 541 |
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME |
|
| 542 |
+ } |
|
| 543 |
+ return m |
|
| 544 |
+} |
|
| 545 |
+ |
|
| 546 |
+func toFSnotifyFlags(action uint32) uint64 {
|
|
| 547 |
+ switch action {
|
|
| 548 |
+ case syscall.FILE_ACTION_ADDED: |
|
| 549 |
+ return sys_FS_CREATE |
|
| 550 |
+ case syscall.FILE_ACTION_REMOVED: |
|
| 551 |
+ return sys_FS_DELETE |
|
| 552 |
+ case syscall.FILE_ACTION_MODIFIED: |
|
| 553 |
+ return sys_FS_MODIFY |
|
| 554 |
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME: |
|
| 555 |
+ return sys_FS_MOVED_FROM |
|
| 556 |
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME: |
|
| 557 |
+ return sys_FS_MOVED_TO |
|
| 558 |
+ } |
|
| 559 |
+ return 0 |
|
| 560 |
+} |