Browse code

Fix logrus formatting

This fix tries to fix logrus formatting by removing `f` from
`logrus.[Error|Warn|Debug|Fatal|Panic|Info]f` when formatting string
is not present.

This fix fixes #23459.

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>

Yong Tang authored on 2016/06/12 05:16:55
Showing 19 changed files
... ...
@@ -46,7 +46,7 @@ func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inpu
46 46
 				_, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader)
47 47
 			}
48 48
 
49
-			logrus.Debugf("[hijack] End of stdout")
49
+			logrus.Debug("[hijack] End of stdout")
50 50
 			receiveStdout <- err
51 51
 		}()
52 52
 	}
... ...
@@ -62,7 +62,7 @@ func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inpu
62 62
 					cli.restoreTerminal(inputStream)
63 63
 				})
64 64
 			}
65
-			logrus.Debugf("[hijack] End of stdin")
65
+			logrus.Debug("[hijack] End of stdin")
66 66
 		}
67 67
 
68 68
 		if err := resp.CloseWrite(); err != nil {
... ...
@@ -163,7 +163,7 @@ func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) {
163 163
 func (s *Server) createMux() *mux.Router {
164 164
 	m := mux.NewRouter()
165 165
 
166
-	logrus.Debugf("Registering routers")
166
+	logrus.Debug("Registering routers")
167 167
 	for _, apiRouter := range s.routers {
168 168
 		for _, r := range apiRouter.Routes() {
169 169
 			f := s.makeHTTPHandler(r.Handler())
... ...
@@ -284,12 +284,12 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S
284 284
 	// Wait for initialization to complete.
285 285
 	failed := <-h.tosvc
286 286
 	if failed {
287
-		logrus.Debugf("Aborting service start due to failure during initializtion")
287
+		logrus.Debug("Aborting service start due to failure during initializtion")
288 288
 		return true, 1
289 289
 	}
290 290
 
291 291
 	s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)}
292
-	logrus.Debugf("Service running")
292
+	logrus.Debug("Service running")
293 293
 Loop:
294 294
 	for {
295 295
 		select {
... ...
@@ -393,7 +393,7 @@ func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, op
393 393
 		if stdin == nil || !openStdin {
394 394
 			return
395 395
 		}
396
-		logrus.Debugf("attach: stdin: begin")
396
+		logrus.Debug("attach: stdin: begin")
397 397
 
398 398
 		var err error
399 399
 		if tty {
... ...
@@ -419,7 +419,7 @@ func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, op
419 419
 				cStderr.Close()
420 420
 			}
421 421
 		}
422
-		logrus.Debugf("attach: stdin: end")
422
+		logrus.Debug("attach: stdin: end")
423 423
 		wg.Done()
424 424
 	}()
425 425
 
... ...
@@ -28,7 +28,7 @@ func (s *Health) String() string {
28 28
 // it returns nil.
29 29
 func (s *Health) OpenMonitorChannel() chan struct{} {
30 30
 	if s.stop == nil {
31
-		logrus.Debugf("OpenMonitorChannel")
31
+		logrus.Debug("OpenMonitorChannel")
32 32
 		s.stop = make(chan struct{})
33 33
 		return s.stop
34 34
 	}
... ...
@@ -38,12 +38,12 @@ func (s *Health) OpenMonitorChannel() chan struct{} {
38 38
 // CloseMonitorChannel closes any existing monitor channel.
39 39
 func (s *Health) CloseMonitorChannel() {
40 40
 	if s.stop != nil {
41
-		logrus.Debugf("CloseMonitorChannel: waiting for probe to stop")
41
+		logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
42 42
 		// This channel does not buffer. Once the write succeeds, the monitor
43 43
 		// has read the stop request and will not make any further updates
44 44
 		// to c.State.Health.
45 45
 		s.stop <- struct{}{}
46 46
 		s.stop = nil
47
-		logrus.Debugf("CloseMonitorChannel done")
47
+		logrus.Debug("CloseMonitorChannel done")
48 48
 	}
49 49
 }
... ...
@@ -114,7 +114,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadClose
114 114
 			r, w := io.Pipe()
115 115
 			go func() {
116 116
 				defer w.Close()
117
-				defer logrus.Debugf("Closing buffered stdin pipe")
117
+				defer logrus.Debug("Closing buffered stdin pipe")
118 118
 				io.Copy(w, stdin)
119 119
 			}()
120 120
 			stdinPipe = r
... ...
@@ -175,7 +175,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
175 175
 		r, w := io.Pipe()
176 176
 		go func() {
177 177
 			defer w.Close()
178
-			defer logrus.Debugf("Closing buffered stdin pipe")
178
+			defer logrus.Debug("Closing buffered stdin pipe")
179 179
 			pools.Copy(w, stdin)
180 180
 		}()
181 181
 		cStdin = r
... ...
@@ -699,7 +699,7 @@ func (devices *DeviceSet) startDeviceDeletionWorker() {
699 699
 		return
700 700
 	}
701 701
 
702
-	logrus.Debugf("devmapper: Worker to cleanup deleted devices started")
702
+	logrus.Debug("devmapper: Worker to cleanup deleted devices started")
703 703
 	for range devices.deletionWorkerTicker.C {
704 704
 		devices.cleanupDeletedDevices()
705 705
 	}
... ...
@@ -1002,7 +1002,7 @@ func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error {
1002 1002
 }
1003 1003
 
1004 1004
 func (devices *DeviceSet) createBaseImage() error {
1005
-	logrus.Debugf("devmapper: Initializing base device-mapper thin volume")
1005
+	logrus.Debug("devmapper: Initializing base device-mapper thin volume")
1006 1006
 
1007 1007
 	// Create initial device
1008 1008
 	info, err := devices.createRegisterDevice("")
... ...
@@ -1010,7 +1010,7 @@ func (devices *DeviceSet) createBaseImage() error {
1010 1010
 		return err
1011 1011
 	}
1012 1012
 
1013
-	logrus.Debugf("devmapper: Creating filesystem on base device-mapper thin volume")
1013
+	logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume")
1014 1014
 
1015 1015
 	if err := devices.activateDeviceIfNeeded(info, false); err != nil {
1016 1016
 		return err
... ...
@@ -1188,7 +1188,7 @@ func (devices *DeviceSet) setupBaseImage() error {
1188 1188
 			return nil
1189 1189
 		}
1190 1190
 
1191
-		logrus.Debugf("devmapper: Removing uninitialized base image")
1191
+		logrus.Debug("devmapper: Removing uninitialized base image")
1192 1192
 		// If previous base device is in deferred delete state,
1193 1193
 		// that needs to be cleaned up first. So don't try
1194 1194
 		// deferred deletion.
... ...
@@ -1455,7 +1455,7 @@ func (devices *DeviceSet) refreshTransaction(DeviceID int) error {
1455 1455
 
1456 1456
 func (devices *DeviceSet) closeTransaction() error {
1457 1457
 	if err := devices.updatePoolTransactionID(); err != nil {
1458
-		logrus.Debugf("devmapper: Failed to close Transaction")
1458
+		logrus.Debug("devmapper: Failed to close Transaction")
1459 1459
 		return err
1460 1460
 	}
1461 1461
 	return nil
... ...
@@ -1644,7 +1644,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
1644 1644
 		if !devicemapper.LibraryDeferredRemovalSupport {
1645 1645
 			return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it")
1646 1646
 		}
1647
-		logrus.Debugf("devmapper: Deferred removal support enabled.")
1647
+		logrus.Debug("devmapper: Deferred removal support enabled.")
1648 1648
 		devices.deferredRemove = true
1649 1649
 	}
1650 1650
 
... ...
@@ -1652,7 +1652,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
1652 1652
 		if !devices.deferredRemove {
1653 1653
 			return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter")
1654 1654
 		}
1655
-		logrus.Debugf("devmapper: Deferred deletion support enabled.")
1655
+		logrus.Debug("devmapper: Deferred deletion support enabled.")
1656 1656
 		devices.deferredDelete = true
1657 1657
 	}
1658 1658
 
... ...
@@ -1716,7 +1716,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
1716 1716
 
1717 1717
 	// If the pool doesn't exist, create it
1718 1718
 	if !poolExists && devices.thinPoolDevice == "" {
1719
-		logrus.Debugf("devmapper: Pool doesn't exist. Creating it.")
1719
+		logrus.Debug("devmapper: Pool doesn't exist. Creating it.")
1720 1720
 
1721 1721
 		var (
1722 1722
 			dataFile     *os.File
... ...
@@ -2044,8 +2044,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
2044 2044
 }
2045 2045
 
2046 2046
 func (devices *DeviceSet) deactivatePool() error {
2047
-	logrus.Debugf("devmapper: deactivatePool()")
2048
-	defer logrus.Debugf("devmapper: deactivatePool END")
2047
+	logrus.Debug("devmapper: deactivatePool()")
2048
+	defer logrus.Debug("devmapper: deactivatePool END")
2049 2049
 	devname := devices.getPoolDevName()
2050 2050
 
2051 2051
 	devinfo, err := devicemapper.GetInfo(devname)
... ...
@@ -2304,7 +2304,7 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
2304 2304
 	if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
2305 2305
 		return err
2306 2306
 	}
2307
-	logrus.Debugf("devmapper: Unmount done")
2307
+	logrus.Debug("devmapper: Unmount done")
2308 2308
 
2309 2309
 	if err := devices.deactivateDevice(info); err != nil {
2310 2310
 		return err
... ...
@@ -132,7 +132,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s
132 132
 	options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
133 133
 		GIDMaps: gdw.gidMaps}
134 134
 	start := time.Now().UTC()
135
-	logrus.Debugf("Start untar layer")
135
+	logrus.Debug("Start untar layer")
136 136
 	if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
137 137
 		return
138 138
 	}
... ...
@@ -154,10 +154,10 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
154 154
 	for {
155 155
 		select {
156 156
 		case <-stop:
157
-			logrus.Debugf("Stop healthcheck monitoring (received while idle)")
157
+			logrus.Debug("Stop healthcheck monitoring (received while idle)")
158 158
 			return
159 159
 		case <-time.After(probeInterval):
160
-			logrus.Debugf("Running health check...")
160
+			logrus.Debug("Running health check...")
161 161
 			startTime := time.Now()
162 162
 			ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout)
163 163
 			results := make(chan *types.HealthcheckResult)
... ...
@@ -180,7 +180,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
180 180
 			}()
181 181
 			select {
182 182
 			case <-stop:
183
-				logrus.Debugf("Stop healthcheck monitoring (received while probing)")
183
+				logrus.Debug("Stop healthcheck monitoring (received while probing)")
184 184
 				// Stop timeout and kill probe, but don't wait for probe to exit.
185 185
 				cancelProbe()
186 186
 				return
... ...
@@ -189,7 +189,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
189 189
 				// Stop timeout
190 190
 				cancelProbe()
191 191
 			case <-ctx.Done():
192
-				logrus.Debugf("Health check taking too long")
192
+				logrus.Debug("Health check taking too long")
193 193
 				handleProbeResult(d, c, &types.HealthcheckResult{
194 194
 					ExitCode: -1,
195 195
 					Output:   fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout),
... ...
@@ -85,7 +85,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
85 85
 			return nil
86 86
 		case msg, ok := <-logs.Msg:
87 87
 			if !ok {
88
-				logrus.Debugf("logs: end stream")
88
+				logrus.Debug("logs: end stream")
89 89
 				logs.Close()
90 90
 				return nil
91 91
 			}
... ...
@@ -89,7 +89,7 @@ func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) erro
89 89
 		return err
90 90
 	}
91 91
 
92
-	logrus.Debugf("Retrieving the tag list")
92
+	logrus.Debug("Retrieving the tag list")
93 93
 	var tagsList map[string]string
94 94
 	if !isTagged {
95 95
 		tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo)
... ...
@@ -208,7 +208,7 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
208 208
 		size = 0
209 209
 	} else {
210 210
 		if size != 0 && offset > size {
211
-			logrus.Debugf("Partial download is larger than full blob. Starting over")
211
+			logrus.Debug("Partial download is larger than full blob. Starting over")
212 212
 			offset = 0
213 213
 			if err := ld.truncateDownloadFile(); err != nil {
214 214
 				return nil, 0, xfer.DoNotRetry{Err: err}
... ...
@@ -130,7 +130,7 @@ func DetectCompression(source []byte) Compression {
130 130
 		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
131 131
 	} {
132 132
 		if len(source) < len(m) {
133
-			logrus.Debugf("Len too short")
133
+			logrus.Debug("Len too short")
134 134
 			continue
135 135
 		}
136 136
 		if bytes.Compare(m, source[:len(m)]) == 0 {
... ...
@@ -408,7 +408,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
408 408
 		}
409 409
 
410 410
 	case tar.TypeXGlobalHeader:
411
-		logrus.Debugf("PAX Global Extended Headers found and ignored")
411
+		logrus.Debug("PAX Global Extended Headers found and ignored")
412 412
 		return nil
413 413
 
414 414
 	default:
... ...
@@ -155,7 +155,7 @@ func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) {
155 155
 func (rm *responseModifier) CloseNotify() <-chan bool {
156 156
 	closeNotifier, ok := rm.rw.(http.CloseNotifier)
157 157
 	if !ok {
158
-		logrus.Errorf("Internal response writer doesn't support the CloseNotifier interface")
158
+		logrus.Error("Internal response writer doesn't support the CloseNotifier interface")
159 159
 		return nil
160 160
 	}
161 161
 	return closeNotifier.CloseNotify()
... ...
@@ -165,7 +165,7 @@ func (rm *responseModifier) CloseNotify() <-chan bool {
165 165
 func (rm *responseModifier) Flush() {
166 166
 	flusher, ok := rm.rw.(http.Flusher)
167 167
 	if !ok {
168
-		logrus.Errorf("Internal response writer doesn't support the Flusher interface")
168
+		logrus.Error("Internal response writer doesn't support the Flusher interface")
169 169
 		return
170 170
 	}
171 171
 
... ...
@@ -279,7 +279,7 @@ func LogInit(logger DevmapperLogger) {
279 279
 // SetDevDir sets the dev folder for the device mapper library (usually /dev).
280 280
 func SetDevDir(dir string) error {
281 281
 	if res := DmSetDevDir(dir); res != 1 {
282
-		logrus.Debugf("devicemapper: Error dm_set_dev_dir")
282
+		logrus.Debug("devicemapper: Error dm_set_dev_dir")
283 283
 		return ErrSetDevDir
284 284
 	}
285 285
 	return nil
... ...
@@ -47,7 +47,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
47 47
 		fi, err := os.Stat(target)
48 48
 		if err != nil {
49 49
 			if os.IsNotExist(err) {
50
-				logrus.Errorf("There are no more loopback devices available.")
50
+				logrus.Error("There are no more loopback devices available.")
51 51
 			}
52 52
 			return nil, ErrAttachLoopbackDevice
53 53
 		}
... ...
@@ -127,7 +127,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
127 127
 
128 128
 		// If the call failed, then free the loopback device
129 129
 		if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
130
-			logrus.Errorf("Error while cleaning up the loopback device")
130
+			logrus.Error("Error while cleaning up the loopback device")
131 131
 		}
132 132
 		loopFile.Close()
133 133
 		return nil, ErrAttachLoopbackDevice
... ...
@@ -49,11 +49,11 @@ func Trap(cleanup func()) {
49 49
 						}
50 50
 					} else {
51 51
 						// 3 SIGTERM/INT signals received; force exit without cleanup
52
-						logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
52
+						logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
53 53
 					}
54 54
 				case syscall.SIGQUIT:
55 55
 					DumpStacks()
56
-					logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
56
+					logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
57 57
 				}
58 58
 				//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
59 59
 				os.Exit(128 + int(sig.(syscall.Signal)))
... ...
@@ -302,10 +302,10 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io
302 302
 	}
303 303
 
304 304
 	if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
305
-		logrus.Debugf("server supports resume")
305
+		logrus.Debug("server supports resume")
306 306
 		return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
307 307
 	}
308
-	logrus.Debugf("server doesn't support resume")
308
+	logrus.Debug("server doesn't support resume")
309 309
 	return res.Body, nil
310 310
 }
311 311