Signed-off-by: Yanqiang Miao <miao.yanqiang@zte.com.cn>
'docker logs' can read from compressed files
Signed-off-by: Yanqiang Miao <miao.yanqiang@zte.com.cn>
Add Metadata to the gzip header, optmize 'readlog'
Signed-off-by: Yanqiang Miao <miao.yanqiang@zte.com.cn>
| ... | ... |
@@ -49,6 +49,9 @@ func New(info logger.Info) (logger.Logger, error) {
|
| 49 | 49 |
if err != nil {
|
| 50 | 50 |
return nil, err |
| 51 | 51 |
} |
| 52 |
+ if capval <= 0 {
|
|
| 53 |
+ return nil, fmt.Errorf("max-size should be a positive numbler")
|
|
| 54 |
+ } |
|
| 52 | 55 |
} |
| 53 | 56 |
var maxFiles = 1 |
| 54 | 57 |
if maxFileString, ok := info.Config["max-file"]; ok {
|
| ... | ... |
@@ -62,6 +65,18 @@ func New(info logger.Info) (logger.Logger, error) {
|
| 62 | 62 |
} |
| 63 | 63 |
} |
| 64 | 64 |
|
| 65 |
+ var compress bool |
|
| 66 |
+ if compressString, ok := info.Config["compress"]; ok {
|
|
| 67 |
+ var err error |
|
| 68 |
+ compress, err = strconv.ParseBool(compressString) |
|
| 69 |
+ if err != nil {
|
|
| 70 |
+ return nil, err |
|
| 71 |
+ } |
|
| 72 |
+ if compress && (maxFiles == 1 || capval == -1) {
|
|
| 73 |
+ return nil, fmt.Errorf("compress cannot be true when max-file is less than 2 or max-size is not set")
|
|
| 74 |
+ } |
|
| 75 |
+ } |
|
| 76 |
+ |
|
| 65 | 77 |
attrs, err := info.ExtraAttributes(nil) |
| 66 | 78 |
if err != nil {
|
| 67 | 79 |
return nil, err |
| ... | ... |
@@ -95,7 +110,7 @@ func New(info logger.Info) (logger.Logger, error) {
|
| 95 | 95 |
return b, nil |
| 96 | 96 |
} |
| 97 | 97 |
|
| 98 |
- writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, marshalFunc, decodeFunc, 0640) |
|
| 98 |
+ writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, marshalFunc, decodeFunc, 0640) |
|
| 99 | 99 |
if err != nil {
|
| 100 | 100 |
return nil, err |
| 101 | 101 |
} |
| ... | ... |
@@ -139,6 +154,7 @@ func ValidateLogOpt(cfg map[string]string) error {
|
| 139 | 139 |
switch key {
|
| 140 | 140 |
case "max-file": |
| 141 | 141 |
case "max-size": |
| 142 |
+ case "compress": |
|
| 142 | 143 |
case "labels": |
| 143 | 144 |
case "env": |
| 144 | 145 |
case "env-regex": |
| ... | ... |
@@ -2,6 +2,7 @@ package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelo |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 |
+ "compress/gzip" |
|
| 5 | 6 |
"encoding/json" |
| 6 | 7 |
"io/ioutil" |
| 7 | 8 |
"os" |
| ... | ... |
@@ -142,7 +143,7 @@ func TestJSONFileLoggerWithOpts(t *testing.T) {
|
| 142 | 142 |
} |
| 143 | 143 |
defer os.RemoveAll(tmp) |
| 144 | 144 |
filename := filepath.Join(tmp, "container.log") |
| 145 |
- config := map[string]string{"max-file": "2", "max-size": "1k"}
|
|
| 145 |
+ config := map[string]string{"max-file": "3", "max-size": "1k", "compress": "true"}
|
|
| 146 | 146 |
l, err := New(logger.Info{
|
| 147 | 147 |
ContainerID: cid, |
| 148 | 148 |
LogPath: filename, |
| ... | ... |
@@ -152,21 +153,55 @@ func TestJSONFileLoggerWithOpts(t *testing.T) {
|
| 152 | 152 |
t.Fatal(err) |
| 153 | 153 |
} |
| 154 | 154 |
defer l.Close() |
| 155 |
- for i := 0; i < 20; i++ {
|
|
| 155 |
+ for i := 0; i < 36; i++ {
|
|
| 156 | 156 |
if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil {
|
| 157 | 157 |
t.Fatal(err) |
| 158 | 158 |
} |
| 159 | 159 |
} |
| 160 |
+ |
|
| 160 | 161 |
res, err := ioutil.ReadFile(filename) |
| 161 | 162 |
if err != nil {
|
| 162 | 163 |
t.Fatal(err) |
| 163 | 164 |
} |
| 165 |
+ |
|
| 164 | 166 |
penUlt, err := ioutil.ReadFile(filename + ".1") |
| 165 | 167 |
if err != nil {
|
| 168 |
+ if !os.IsNotExist(err) {
|
|
| 169 |
+ t.Fatal(err) |
|
| 170 |
+ } |
|
| 171 |
+ |
|
| 172 |
+ file, err := os.Open(filename + ".1.gz") |
|
| 173 |
+ defer file.Close() |
|
| 174 |
+ if err != nil {
|
|
| 175 |
+ t.Fatal(err) |
|
| 176 |
+ } |
|
| 177 |
+ zipReader, err := gzip.NewReader(file) |
|
| 178 |
+ defer zipReader.Close() |
|
| 179 |
+ if err != nil {
|
|
| 180 |
+ t.Fatal(err) |
|
| 181 |
+ } |
|
| 182 |
+ penUlt, err = ioutil.ReadAll(zipReader) |
|
| 183 |
+ if err != nil {
|
|
| 184 |
+ t.Fatal(err) |
|
| 185 |
+ } |
|
| 186 |
+ } |
|
| 187 |
+ |
|
| 188 |
+ file, err := os.Open(filename + ".2.gz") |
|
| 189 |
+ defer file.Close() |
|
| 190 |
+ if err != nil {
|
|
| 191 |
+ t.Fatal(err) |
|
| 192 |
+ } |
|
| 193 |
+ zipReader, err := gzip.NewReader(file) |
|
| 194 |
+ defer zipReader.Close() |
|
| 195 |
+ if err != nil {
|
|
| 196 |
+ t.Fatal(err) |
|
| 197 |
+ } |
|
| 198 |
+ antepenult, err := ioutil.ReadAll(zipReader) |
|
| 199 |
+ if err != nil {
|
|
| 166 | 200 |
t.Fatal(err) |
| 167 | 201 |
} |
| 168 | 202 |
|
| 169 |
- expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 203 |
+ expectedAntepenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 170 | 204 |
{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| 171 | 205 |
{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| 172 | 206 |
{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| ... | ... |
@@ -183,10 +218,27 @@ func TestJSONFileLoggerWithOpts(t *testing.T) {
|
| 183 | 183 |
{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| 184 | 184 |
{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| 185 | 185 |
` |
| 186 |
- expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 186 |
+ expectedPenultimate := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 187 | 187 |
{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| 188 | 188 |
{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| 189 | 189 |
{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
| 190 |
+{"log":"line20\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 191 |
+{"log":"line21\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 192 |
+{"log":"line22\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 193 |
+{"log":"line23\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 194 |
+{"log":"line24\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 195 |
+{"log":"line25\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 196 |
+{"log":"line26\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 197 |
+{"log":"line27\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 198 |
+{"log":"line28\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 199 |
+{"log":"line29\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 200 |
+{"log":"line30\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 201 |
+{"log":"line31\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 202 |
+` |
|
| 203 |
+ expected := `{"log":"line32\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 204 |
+{"log":"line33\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 205 |
+{"log":"line34\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 206 |
+{"log":"line35\n","stream":"src1","time":"0001-01-01T00:00:00Z"}
|
|
| 190 | 207 |
` |
| 191 | 208 |
|
| 192 | 209 |
if string(res) != expected {
|
| ... | ... |
@@ -195,7 +247,9 @@ func TestJSONFileLoggerWithOpts(t *testing.T) {
|
| 195 | 195 |
if string(penUlt) != expectedPenultimate {
|
| 196 | 196 |
t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate)
|
| 197 | 197 |
} |
| 198 |
- |
|
| 198 |
+ if string(antepenult) != expectedAntepenultimate {
|
|
| 199 |
+ t.Fatalf("Wrong log content: %q, expected %q", antepenult, expectedAntepenultimate)
|
|
| 200 |
+ } |
|
| 199 | 201 |
} |
| 200 | 202 |
|
| 201 | 203 |
func TestJSONFileLoggerWithLabelsEnv(t *testing.T) {
|
| ... | ... |
@@ -2,17 +2,21 @@ package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutil |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 |
+ "compress/gzip" |
|
| 5 | 6 |
"context" |
| 7 |
+ "encoding/json" |
|
| 6 | 8 |
"fmt" |
| 7 | 9 |
"io" |
| 8 | 10 |
"os" |
| 9 | 11 |
"strconv" |
| 12 |
+ "strings" |
|
| 10 | 13 |
"sync" |
| 11 | 14 |
"time" |
| 12 | 15 |
|
| 13 | 16 |
"github.com/docker/docker/daemon/logger" |
| 14 | 17 |
"github.com/docker/docker/daemon/logger/loggerutils/multireader" |
| 15 | 18 |
"github.com/docker/docker/pkg/filenotify" |
| 19 |
+ "github.com/docker/docker/pkg/pools" |
|
| 16 | 20 |
"github.com/docker/docker/pkg/pubsub" |
| 17 | 21 |
"github.com/docker/docker/pkg/tailfile" |
| 18 | 22 |
"github.com/fsnotify/fsnotify" |
| ... | ... |
@@ -20,24 +24,81 @@ import ( |
| 20 | 20 |
"github.com/sirupsen/logrus" |
| 21 | 21 |
) |
| 22 | 22 |
|
| 23 |
+const tmpLogfileSuffix = ".tmp" |
|
| 24 |
+ |
|
| 25 |
+// rotateFileMetadata is a metadata of the gzip header of the compressed log file |
|
| 26 |
+type rotateFileMetadata struct {
|
|
| 27 |
+ LastTime time.Time `json:"lastTime,omitempty"` |
|
| 28 |
+} |
|
| 29 |
+ |
|
| 30 |
+// refCounter is a counter of logfile being referenced |
|
| 31 |
+type refCounter struct {
|
|
| 32 |
+ mu sync.Mutex |
|
| 33 |
+ counter map[string]int |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// Reference increase the reference counter for specified logfile |
|
| 37 |
+func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) {
|
|
| 38 |
+ rc.mu.Lock() |
|
| 39 |
+ defer rc.mu.Unlock() |
|
| 40 |
+ |
|
| 41 |
+ var ( |
|
| 42 |
+ file *os.File |
|
| 43 |
+ err error |
|
| 44 |
+ ) |
|
| 45 |
+ _, ok := rc.counter[fileName] |
|
| 46 |
+ file, err = openRefFile(fileName, ok) |
|
| 47 |
+ if err != nil {
|
|
| 48 |
+ return nil, err |
|
| 49 |
+ } |
|
| 50 |
+ |
|
| 51 |
+ if ok {
|
|
| 52 |
+ rc.counter[fileName]++ |
|
| 53 |
+ } else if file != nil {
|
|
| 54 |
+ rc.counter[file.Name()] = 1 |
|
| 55 |
+ } |
|
| 56 |
+ |
|
| 57 |
+ return file, nil |
|
| 58 |
+} |
|
| 59 |
+ |
|
| 60 |
+// Dereference reduce the reference counter for specified logfile |
|
| 61 |
+func (rc *refCounter) Dereference(fileName string) error {
|
|
| 62 |
+ rc.mu.Lock() |
|
| 63 |
+ defer rc.mu.Unlock() |
|
| 64 |
+ |
|
| 65 |
+ rc.counter[fileName]-- |
|
| 66 |
+ if rc.counter[fileName] <= 0 {
|
|
| 67 |
+ delete(rc.counter, fileName) |
|
| 68 |
+ err := os.Remove(fileName) |
|
| 69 |
+ if err != nil {
|
|
| 70 |
+ return err |
|
| 71 |
+ } |
|
| 72 |
+ } |
|
| 73 |
+ return nil |
|
| 74 |
+} |
|
| 75 |
+ |
|
| 23 | 76 |
// LogFile is Logger implementation for default Docker logging. |
| 24 | 77 |
type LogFile struct {
|
| 25 |
- f *os.File // store for closing |
|
| 26 |
- closed bool |
|
| 27 |
- mu sync.RWMutex |
|
| 28 |
- capacity int64 //maximum size of each file |
|
| 29 |
- currentSize int64 // current size of the latest file |
|
| 30 |
- maxFiles int //maximum number of files |
|
| 31 |
- notifyRotate *pubsub.Publisher |
|
| 32 |
- marshal logger.MarshalFunc |
|
| 33 |
- createDecoder makeDecoderFunc |
|
| 34 |
- perms os.FileMode |
|
| 78 |
+ mu sync.RWMutex // protects the logfile access |
|
| 79 |
+ f *os.File // store for closing |
|
| 80 |
+ closed bool |
|
| 81 |
+ rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed |
|
| 82 |
+ capacity int64 // maximum size of each file |
|
| 83 |
+ currentSize int64 // current size of the latest file |
|
| 84 |
+ maxFiles int // maximum number of files |
|
| 85 |
+ compress bool // whether old versions of log files are compressed |
|
| 86 |
+ lastTimestamp time.Time // timestamp of the last log |
|
| 87 |
+ filesRefCounter refCounter // keep reference-counted of decompressed files |
|
| 88 |
+ notifyRotate *pubsub.Publisher |
|
| 89 |
+ marshal logger.MarshalFunc |
|
| 90 |
+ createDecoder makeDecoderFunc |
|
| 91 |
+ perms os.FileMode |
|
| 35 | 92 |
} |
| 36 | 93 |
|
| 37 | 94 |
type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) |
| 38 | 95 |
|
| 39 | 96 |
//NewLogFile creates new LogFile |
| 40 |
-func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) {
|
|
| 97 |
+func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) {
|
|
| 41 | 98 |
log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) |
| 42 | 99 |
if err != nil {
|
| 43 | 100 |
return nil, err |
| ... | ... |
@@ -49,14 +110,16 @@ func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger. |
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 | 51 |
return &LogFile{
|
| 52 |
- f: log, |
|
| 53 |
- capacity: capacity, |
|
| 54 |
- currentSize: size, |
|
| 55 |
- maxFiles: maxFiles, |
|
| 56 |
- notifyRotate: pubsub.NewPublisher(0, 1), |
|
| 57 |
- marshal: marshaller, |
|
| 58 |
- createDecoder: decodeFunc, |
|
| 59 |
- perms: perms, |
|
| 52 |
+ f: log, |
|
| 53 |
+ capacity: capacity, |
|
| 54 |
+ currentSize: size, |
|
| 55 |
+ maxFiles: maxFiles, |
|
| 56 |
+ compress: compress, |
|
| 57 |
+ filesRefCounter: refCounter{counter: make(map[string]int)},
|
|
| 58 |
+ notifyRotate: pubsub.NewPublisher(0, 1), |
|
| 59 |
+ marshal: marshaller, |
|
| 60 |
+ createDecoder: decodeFunc, |
|
| 61 |
+ perms: perms, |
|
| 60 | 62 |
}, nil |
| 61 | 63 |
} |
| 62 | 64 |
|
| ... | ... |
@@ -84,6 +147,7 @@ func (w *LogFile) WriteLogEntry(msg *logger.Message) error {
|
| 84 | 84 |
n, err := w.f.Write(b) |
| 85 | 85 |
if err == nil {
|
| 86 | 86 |
w.currentSize += int64(n) |
| 87 |
+ w.lastTimestamp = msg.Timestamp |
|
| 87 | 88 |
} |
| 88 | 89 |
w.mu.Unlock() |
| 89 | 90 |
return err |
| ... | ... |
@@ -95,43 +159,108 @@ func (w *LogFile) checkCapacityAndRotate() error {
|
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 | 97 |
if w.currentSize >= w.capacity {
|
| 98 |
- name := w.f.Name() |
|
| 98 |
+ w.rotateMu.Lock() |
|
| 99 |
+ fname := w.f.Name() |
|
| 99 | 100 |
if err := w.f.Close(); err != nil {
|
| 101 |
+ w.rotateMu.Unlock() |
|
| 100 | 102 |
return errors.Wrap(err, "error closing file") |
| 101 | 103 |
} |
| 102 |
- if err := rotate(name, w.maxFiles); err != nil {
|
|
| 104 |
+ if err := rotate(fname, w.maxFiles, w.compress); err != nil {
|
|
| 105 |
+ w.rotateMu.Unlock() |
|
| 103 | 106 |
return err |
| 104 | 107 |
} |
| 105 |
- file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) |
|
| 108 |
+ file, err := os.OpenFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) |
|
| 106 | 109 |
if err != nil {
|
| 110 |
+ w.rotateMu.Unlock() |
|
| 107 | 111 |
return err |
| 108 | 112 |
} |
| 109 | 113 |
w.f = file |
| 110 | 114 |
w.currentSize = 0 |
| 111 | 115 |
w.notifyRotate.Publish(struct{}{})
|
| 116 |
+ |
|
| 117 |
+ if w.maxFiles <= 1 || !w.compress {
|
|
| 118 |
+ w.rotateMu.Unlock() |
|
| 119 |
+ return nil |
|
| 120 |
+ } |
|
| 121 |
+ |
|
| 122 |
+ go func() {
|
|
| 123 |
+ compressFile(fname+".1", w.lastTimestamp) |
|
| 124 |
+ w.rotateMu.Unlock() |
|
| 125 |
+ }() |
|
| 112 | 126 |
} |
| 113 | 127 |
|
| 114 | 128 |
return nil |
| 115 | 129 |
} |
| 116 | 130 |
|
| 117 |
-func rotate(name string, maxFiles int) error {
|
|
| 131 |
+func rotate(name string, maxFiles int, compress bool) error {
|
|
| 118 | 132 |
if maxFiles < 2 {
|
| 119 | 133 |
return nil |
| 120 | 134 |
} |
| 135 |
+ |
|
| 136 |
+ var extension string |
|
| 137 |
+ if compress {
|
|
| 138 |
+ extension = ".gz" |
|
| 139 |
+ } |
|
| 121 | 140 |
for i := maxFiles - 1; i > 1; i-- {
|
| 122 |
- toPath := name + "." + strconv.Itoa(i) |
|
| 123 |
- fromPath := name + "." + strconv.Itoa(i-1) |
|
| 141 |
+ toPath := name + "." + strconv.Itoa(i) + extension |
|
| 142 |
+ fromPath := name + "." + strconv.Itoa(i-1) + extension |
|
| 124 | 143 |
if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) {
|
| 125 |
- return errors.Wrap(err, "error rotating old log entries") |
|
| 144 |
+ return err |
|
| 126 | 145 |
} |
| 127 | 146 |
} |
| 128 | 147 |
|
| 129 | 148 |
if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) {
|
| 130 |
- return errors.Wrap(err, "error rotating current log") |
|
| 149 |
+ return err |
|
| 131 | 150 |
} |
| 151 |
+ |
|
| 132 | 152 |
return nil |
| 133 | 153 |
} |
| 134 | 154 |
|
| 155 |
+func compressFile(fileName string, lastTimestamp time.Time) {
|
|
| 156 |
+ file, err := os.Open(fileName) |
|
| 157 |
+ if err != nil {
|
|
| 158 |
+ logrus.Errorf("Failed to open log file: %v", err)
|
|
| 159 |
+ return |
|
| 160 |
+ } |
|
| 161 |
+ defer func() {
|
|
| 162 |
+ file.Close() |
|
| 163 |
+ err := os.Remove(fileName) |
|
| 164 |
+ if err != nil {
|
|
| 165 |
+ logrus.Errorf("Failed to remove source log file: %v", err)
|
|
| 166 |
+ } |
|
| 167 |
+ }() |
|
| 168 |
+ |
|
| 169 |
+ outFile, err := os.OpenFile(fileName+".gz", os.O_CREATE|os.O_RDWR, 0640) |
|
| 170 |
+ if err != nil {
|
|
| 171 |
+ logrus.Errorf("Failed to open or create gzip log file: %v", err)
|
|
| 172 |
+ return |
|
| 173 |
+ } |
|
| 174 |
+ defer func() {
|
|
| 175 |
+ outFile.Close() |
|
| 176 |
+ if err != nil {
|
|
| 177 |
+ os.Remove(fileName + ".gz") |
|
| 178 |
+ } |
|
| 179 |
+ }() |
|
| 180 |
+ |
|
| 181 |
+ compressWriter := gzip.NewWriter(outFile) |
|
| 182 |
+ defer compressWriter.Close() |
|
| 183 |
+ |
|
| 184 |
+ // Add the last log entry timestramp to the gzip header |
|
| 185 |
+ extra := rotateFileMetadata{}
|
|
| 186 |
+ extra.LastTime = lastTimestamp |
|
| 187 |
+ compressWriter.Header.Extra, err = json.Marshal(&extra) |
|
| 188 |
+ if err != nil {
|
|
| 189 |
+ // Here log the error only and don't return since this is just an optimization. |
|
| 190 |
+ logrus.Warningf("Failed to marshal JSON: %v", err)
|
|
| 191 |
+ } |
|
| 192 |
+ |
|
| 193 |
+ _, err = pools.Copy(compressWriter, file) |
|
| 194 |
+ if err != nil {
|
|
| 195 |
+ logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file")
|
|
| 196 |
+ return |
|
| 197 |
+ } |
|
| 198 |
+} |
|
| 199 |
+ |
|
| 135 | 200 |
// MaxFiles return maximum number of files |
| 136 | 201 |
func (w *LogFile) MaxFiles() int {
|
| 137 | 202 |
return w.maxFiles |
| ... | ... |
@@ -154,18 +283,6 @@ func (w *LogFile) Close() error {
|
| 154 | 154 |
// ReadLogs decodes entries from log files and sends them the passed in watcher |
| 155 | 155 |
func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) {
|
| 156 | 156 |
w.mu.RLock() |
| 157 |
- files, err := w.openRotatedFiles() |
|
| 158 |
- if err != nil {
|
|
| 159 |
- w.mu.RUnlock() |
|
| 160 |
- watcher.Err <- err |
|
| 161 |
- return |
|
| 162 |
- } |
|
| 163 |
- defer func() {
|
|
| 164 |
- for _, f := range files {
|
|
| 165 |
- f.Close() |
|
| 166 |
- } |
|
| 167 |
- }() |
|
| 168 |
- |
|
| 169 | 157 |
currentFile, err := os.Open(w.f.Name()) |
| 170 | 158 |
if err != nil {
|
| 171 | 159 |
w.mu.RUnlock() |
| ... | ... |
@@ -175,14 +292,20 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) |
| 175 | 175 |
defer currentFile.Close() |
| 176 | 176 |
|
| 177 | 177 |
currentChunk, err := newSectionReader(currentFile) |
| 178 |
- w.mu.RUnlock() |
|
| 179 |
- |
|
| 180 | 178 |
if err != nil {
|
| 179 |
+ w.mu.RUnlock() |
|
| 181 | 180 |
watcher.Err <- err |
| 182 | 181 |
return |
| 183 | 182 |
} |
| 184 | 183 |
|
| 185 | 184 |
if config.Tail != 0 {
|
| 185 |
+ files, err := w.openRotatedFiles(config) |
|
| 186 |
+ if err != nil {
|
|
| 187 |
+ w.mu.RUnlock() |
|
| 188 |
+ watcher.Err <- err |
|
| 189 |
+ return |
|
| 190 |
+ } |
|
| 191 |
+ w.mu.RUnlock() |
|
| 186 | 192 |
seekers := make([]io.ReadSeeker, 0, len(files)+1) |
| 187 | 193 |
for _, f := range files {
|
| 188 | 194 |
seekers = append(seekers, f) |
| ... | ... |
@@ -193,9 +316,20 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) |
| 193 | 193 |
if len(seekers) > 0 {
|
| 194 | 194 |
tailFile(multireader.MultiReadSeeker(seekers...), watcher, w.createDecoder, config) |
| 195 | 195 |
} |
| 196 |
+ for _, f := range files {
|
|
| 197 |
+ f.Close() |
|
| 198 |
+ fileName := f.Name() |
|
| 199 |
+ if strings.HasSuffix(fileName, tmpLogfileSuffix) {
|
|
| 200 |
+ err := w.filesRefCounter.Dereference(fileName) |
|
| 201 |
+ if err != nil {
|
|
| 202 |
+ logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err)
|
|
| 203 |
+ } |
|
| 204 |
+ } |
|
| 205 |
+ } |
|
| 206 |
+ |
|
| 207 |
+ w.mu.RLock() |
|
| 196 | 208 |
} |
| 197 | 209 |
|
| 198 |
- w.mu.RLock() |
|
| 199 | 210 |
if !config.Follow || w.closed {
|
| 200 | 211 |
w.mu.RUnlock() |
| 201 | 212 |
return |
| ... | ... |
@@ -207,13 +341,22 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) |
| 207 | 207 |
followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) |
| 208 | 208 |
} |
| 209 | 209 |
|
| 210 |
-func (w *LogFile) openRotatedFiles() (files []*os.File, err error) {
|
|
| 210 |
+func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) {
|
|
| 211 |
+ w.rotateMu.Lock() |
|
| 212 |
+ defer w.rotateMu.Unlock() |
|
| 213 |
+ |
|
| 211 | 214 |
defer func() {
|
| 212 | 215 |
if err == nil {
|
| 213 | 216 |
return |
| 214 | 217 |
} |
| 215 | 218 |
for _, f := range files {
|
| 216 | 219 |
f.Close() |
| 220 |
+ if strings.HasSuffix(f.Name(), tmpLogfileSuffix) {
|
|
| 221 |
+ err := os.Remove(f.Name()) |
|
| 222 |
+ if err != nil && !os.IsNotExist(err) {
|
|
| 223 |
+ logrus.Warningf("Failed to remove the logfile %q: %v", f.Name, err)
|
|
| 224 |
+ } |
|
| 225 |
+ } |
|
| 217 | 226 |
} |
| 218 | 227 |
}() |
| 219 | 228 |
|
| ... | ... |
@@ -223,6 +366,28 @@ func (w *LogFile) openRotatedFiles() (files []*os.File, err error) {
|
| 223 | 223 |
if !os.IsNotExist(err) {
|
| 224 | 224 |
return nil, err |
| 225 | 225 |
} |
| 226 |
+ |
|
| 227 |
+ fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1)
|
|
| 228 |
+ decompressedFileName := fileName + tmpLogfileSuffix |
|
| 229 |
+ tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) {
|
|
| 230 |
+ if exists {
|
|
| 231 |
+ return os.Open(refFileName) |
|
| 232 |
+ } |
|
| 233 |
+ return decompressfile(fileName, refFileName, config.Since) |
|
| 234 |
+ }) |
|
| 235 |
+ |
|
| 236 |
+ if err != nil {
|
|
| 237 |
+ if !os.IsNotExist(err) {
|
|
| 238 |
+ return nil, err |
|
| 239 |
+ } |
|
| 240 |
+ continue |
|
| 241 |
+ } |
|
| 242 |
+ if tmpFile == nil {
|
|
| 243 |
+ // The log before `config.Since` does not need to read |
|
| 244 |
+ break |
|
| 245 |
+ } |
|
| 246 |
+ |
|
| 247 |
+ files = append(files, tmpFile) |
|
| 226 | 248 |
continue |
| 227 | 249 |
} |
| 228 | 250 |
files = append(files, f) |
| ... | ... |
@@ -231,6 +396,44 @@ func (w *LogFile) openRotatedFiles() (files []*os.File, err error) {
|
| 231 | 231 |
return files, nil |
| 232 | 232 |
} |
| 233 | 233 |
|
| 234 |
+func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) {
|
|
| 235 |
+ cf, err := os.Open(fileName) |
|
| 236 |
+ if err != nil {
|
|
| 237 |
+ return nil, err |
|
| 238 |
+ } |
|
| 239 |
+ defer cf.Close() |
|
| 240 |
+ |
|
| 241 |
+ rc, err := gzip.NewReader(cf) |
|
| 242 |
+ if err != nil {
|
|
| 243 |
+ return nil, err |
|
| 244 |
+ } |
|
| 245 |
+ defer rc.Close() |
|
| 246 |
+ |
|
| 247 |
+ // Extract the last log entry timestramp from the gzip header |
|
| 248 |
+ extra := &rotateFileMetadata{}
|
|
| 249 |
+ err = json.Unmarshal(rc.Header.Extra, extra) |
|
| 250 |
+ if err == nil && extra.LastTime.Before(since) {
|
|
| 251 |
+ return nil, nil |
|
| 252 |
+ } |
|
| 253 |
+ |
|
| 254 |
+ rs, err := os.OpenFile(destFileName, os.O_CREATE|os.O_RDWR, 0640) |
|
| 255 |
+ if err != nil {
|
|
| 256 |
+ return nil, err |
|
| 257 |
+ } |
|
| 258 |
+ |
|
| 259 |
+ _, err = pools.Copy(rs, rc) |
|
| 260 |
+ if err != nil {
|
|
| 261 |
+ rs.Close() |
|
| 262 |
+ rErr := os.Remove(rs.Name()) |
|
| 263 |
+ if rErr != nil && os.IsNotExist(rErr) {
|
|
| 264 |
+ logrus.Errorf("Failed to remove the logfile %q: %v", rs.Name(), rErr)
|
|
| 265 |
+ } |
|
| 266 |
+ return nil, err |
|
| 267 |
+ } |
|
| 268 |
+ |
|
| 269 |
+ return rs, nil |
|
| 270 |
+} |
|
| 271 |
+ |
|
| 234 | 272 |
func newSectionReader(f *os.File) (*io.SectionReader, error) {
|
| 235 | 273 |
// seek to the end to get the size |
| 236 | 274 |
// we'll leave this at the end of the file since section reader does not advance the reader |