Browse code

Fix log readers can block writes indefinitely

Before this patch, a log reader is able to block all log writes
indefinitely (and other operations) by simply opening the log stream and
not consuming all the messages.

The reason for this is we protect the read stream from corruption by
ensuring there are no new writes while the log stream is consumed (and
caught up with the live entries).

We can get around this issue because log files are append only, so we
can limit reads to only the section of the file that was written to when
the log stream was first requested.

Now logs are only blocked until all files are opened, rather than
streamed to the client.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>

Brian Goff authored on 2017/07/01 02:13:32
Showing 2 changed files
... ...
@@ -7,6 +7,7 @@ import (
7 7
 	"bytes"
8 8
 	"encoding/json"
9 9
 	"fmt"
10
+	"io"
10 11
 	"strconv"
11 12
 	"sync"
12 13
 
... ...
@@ -15,6 +16,7 @@ import (
15 15
 	"github.com/docker/docker/daemon/logger/loggerutils"
16 16
 	"github.com/docker/docker/pkg/jsonlog"
17 17
 	"github.com/docker/go-units"
18
+	"github.com/pkg/errors"
18 19
 )
19 20
 
20 21
 // Name is the name of the file that the jsonlogger logs to.
... ...
@@ -22,12 +24,13 @@ const Name = "json-file"
22 22
 
23 23
 // JSONFileLogger is Logger implementation for default Docker logging.
24 24
 type JSONFileLogger struct {
25
-	buf     *bytes.Buffer
25
+	extra []byte // json-encoded extra attributes
26
+
27
+	mu      sync.RWMutex
28
+	buf     *bytes.Buffer // avoids allocating a new buffer on each call to `Log()`
29
+	closed  bool
26 30
 	writer  *loggerutils.RotateFileWriter
27
-	mu      sync.Mutex
28 31
 	readers map[*logger.LogWatcher]struct{} // stores the active log followers
29
-	extra   []byte                          // json-encoded extra attributes
30
-	closed  bool
31 32
 }
32 33
 
33 34
 func init() {
... ...
@@ -90,33 +93,45 @@ func New(info logger.Info) (logger.Logger, error) {
90 90
 
91 91
 // Log converts logger.Message to jsonlog.JSONLog and serializes it to file.
92 92
 func (l *JSONFileLogger) Log(msg *logger.Message) error {
93
+	l.mu.Lock()
94
+	err := writeMessageBuf(l.writer, msg, l.extra, l.buf)
95
+	l.buf.Reset()
96
+	l.mu.Unlock()
97
+	return err
98
+}
99
+
100
+func writeMessageBuf(w io.Writer, m *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error {
101
+	if err := marshalMessage(m, extra, buf); err != nil {
102
+		logger.PutMessage(m)
103
+		return err
104
+	}
105
+	logger.PutMessage(m)
106
+	if _, err := w.Write(buf.Bytes()); err != nil {
107
+		return errors.Wrap(err, "error writing log entry")
108
+	}
109
+	return nil
110
+}
111
+
112
+func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error {
93 113
 	timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp)
94 114
 	if err != nil {
95 115
 		return err
96 116
 	}
97
-	l.mu.Lock()
98
-	logline := msg.Line
117
+	logLine := msg.Line
99 118
 	if !msg.Partial {
100
-		logline = append(msg.Line, '\n')
119
+		logLine = append(msg.Line, '\n')
101 120
 	}
102 121
 	err = (&jsonlog.JSONLogs{
103
-		Log:      logline,
122
+		Log:      logLine,
104 123
 		Stream:   msg.Source,
105 124
 		Created:  timestamp,
106
-		RawAttrs: l.extra,
107
-	}).MarshalJSONBuf(l.buf)
108
-	logger.PutMessage(msg)
125
+		RawAttrs: extra,
126
+	}).MarshalJSONBuf(buf)
109 127
 	if err != nil {
110
-		l.mu.Unlock()
111
-		return err
128
+		return errors.Wrap(err, "error writing log message to buffer")
112 129
 	}
113
-
114
-	l.buf.WriteByte('\n')
115
-	_, err = l.writer.Write(l.buf.Bytes())
116
-	l.buf.Reset()
117
-	l.mu.Unlock()
118
-
119
-	return err
130
+	err = buf.WriteByte('\n')
131
+	return errors.Wrap(err, "error finalizing log buffer")
120 132
 }
121 133
 
122 134
 // ValidateLogOpt looks for json specific log options max-file & max-size.
... ...
@@ -3,7 +3,6 @@ package jsonfilelog
3 3
 import (
4 4
 	"bytes"
5 5
 	"encoding/json"
6
-	"errors"
7 6
 	"fmt"
8 7
 	"io"
9 8
 	"os"
... ...
@@ -18,6 +17,7 @@ import (
18 18
 	"github.com/docker/docker/pkg/filenotify"
19 19
 	"github.com/docker/docker/pkg/jsonlog"
20 20
 	"github.com/docker/docker/pkg/tailfile"
21
+	"github.com/pkg/errors"
21 22
 )
22 23
 
23 24
 const maxJSONDecodeRetry = 20000
... ...
@@ -48,10 +48,11 @@ func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
48 48
 func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
49 49
 	defer close(logWatcher.Msg)
50 50
 
51
-	// lock so the read stream doesn't get corrupted due to rotations or other log data written while we read
51
+	// lock so the read stream doesn't get corrupted due to rotations or other log data written while we open these files
52 52
 	// This will block writes!!!
53
-	l.mu.Lock()
53
+	l.mu.RLock()
54 54
 
55
+	// TODO it would be nice to move a lot of this reader implementation to the rotate logger object
55 56
 	pth := l.writer.LogPath()
56 57
 	var files []io.ReadSeeker
57 58
 	for i := l.writer.MaxFiles(); i > 1; i-- {
... ...
@@ -59,25 +60,36 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
59 59
 		if err != nil {
60 60
 			if !os.IsNotExist(err) {
61 61
 				logWatcher.Err <- err
62
-				break
62
+				l.mu.RUnlock()
63
+				return
63 64
 			}
64 65
 			continue
65 66
 		}
66 67
 		defer f.Close()
67
-
68 68
 		files = append(files, f)
69 69
 	}
70 70
 
71 71
 	latestFile, err := os.Open(pth)
72 72
 	if err != nil {
73
-		logWatcher.Err <- err
74
-		l.mu.Unlock()
73
+		logWatcher.Err <- errors.Wrap(err, "error opening latest log file")
74
+		l.mu.RUnlock()
75 75
 		return
76 76
 	}
77 77
 	defer latestFile.Close()
78 78
 
79
+	latestChunk, err := newSectionReader(latestFile)
80
+
81
+	// Now we have the reader sectioned, all fd's opened, we can unlock.
82
+	// New writes/rotates will not affect seeking through these files
83
+	l.mu.RUnlock()
84
+
85
+	if err != nil {
86
+		logWatcher.Err <- err
87
+		return
88
+	}
89
+
79 90
 	if config.Tail != 0 {
80
-		tailer := multireader.MultiReadSeeker(append(files, latestFile)...)
91
+		tailer := multireader.MultiReadSeeker(append(files, latestChunk)...)
81 92
 		tailFile(tailer, logWatcher, config.Tail, config.Since)
82 93
 	}
83 94
 
... ...
@@ -89,19 +101,14 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
89 89
 	}
90 90
 
91 91
 	if !config.Follow || l.closed {
92
-		l.mu.Unlock()
93 92
 		return
94 93
 	}
95 94
 
96
-	if config.Tail >= 0 {
97
-		latestFile.Seek(0, os.SEEK_END)
98
-	}
99
-
100 95
 	notifyRotate := l.writer.NotifyRotate()
101 96
 	defer l.writer.NotifyRotateEvict(notifyRotate)
102 97
 
98
+	l.mu.Lock()
103 99
 	l.readers[logWatcher] = struct{}{}
104
-
105 100
 	l.mu.Unlock()
106 101
 
107 102
 	followLogs(latestFile, logWatcher, notifyRotate, config.Since)
... ...
@@ -111,6 +118,16 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
111 111
 	l.mu.Unlock()
112 112
 }
113 113
 
114
+func newSectionReader(f *os.File) (*io.SectionReader, error) {
115
+	// seek to the end to get the size
116
+	// we'll leave this at the end of the file since section reader does not advance the reader
117
+	size, err := f.Seek(0, os.SEEK_END)
118
+	if err != nil {
119
+		return nil, errors.Wrap(err, "error getting current file size")
120
+	}
121
+	return io.NewSectionReader(f, 0, size), nil
122
+}
123
+
114 124
 func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
115 125
 	var rdr io.Reader
116 126
 	rdr = f