Docker-DCO-1.1-Signed-off-by: Josiah Kiehl <josiah@capoferro.net> (github: capoferro)
| ... | ... |
@@ -28,6 +28,7 @@ import ( |
| 28 | 28 |
"github.com/docker/docker/engine" |
| 29 | 29 |
"github.com/docker/docker/nat" |
| 30 | 30 |
"github.com/docker/docker/opts" |
| 31 |
+ "github.com/docker/docker/pkg/log" |
|
| 31 | 32 |
"github.com/docker/docker/pkg/parsers" |
| 32 | 33 |
"github.com/docker/docker/pkg/parsers/filters" |
| 33 | 34 |
"github.com/docker/docker/pkg/signal" |
| ... | ... |
@@ -433,11 +434,11 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
| 433 | 433 |
out := engine.NewOutput() |
| 434 | 434 |
remoteVersion, err := out.AddEnv() |
| 435 | 435 |
if err != nil {
|
| 436 |
- utils.Errorf("Error reading remote version: %s\n", err)
|
|
| 436 |
+ log.Errorf("Error reading remote version: %s\n", err)
|
|
| 437 | 437 |
return err |
| 438 | 438 |
} |
| 439 | 439 |
if _, err := out.Write(body); err != nil {
|
| 440 |
- utils.Errorf("Error reading remote version: %s\n", err)
|
|
| 440 |
+ log.Errorf("Error reading remote version: %s\n", err)
|
|
| 441 | 441 |
return err |
| 442 | 442 |
} |
| 443 | 443 |
out.Close() |
| ... | ... |
@@ -473,7 +474,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
| 473 | 473 |
} |
| 474 | 474 |
|
| 475 | 475 |
if _, err := out.Write(body); err != nil {
|
| 476 |
- utils.Errorf("Error reading remote info: %s\n", err)
|
|
| 476 |
+ log.Errorf("Error reading remote info: %s\n", err)
|
|
| 477 | 477 |
return err |
| 478 | 478 |
} |
| 479 | 479 |
out.Close() |
| ... | ... |
@@ -597,10 +598,10 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
|
| 597 | 597 |
} |
| 598 | 598 |
} |
| 599 | 599 |
if sig == "" {
|
| 600 |
- utils.Errorf("Unsupported signal: %d. Discarding.", s)
|
|
| 600 |
+ log.Errorf("Unsupported signal: %d. Discarding.", s)
|
|
| 601 | 601 |
} |
| 602 | 602 |
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
|
| 603 |
- utils.Debugf("Error sending signal: %s", err)
|
|
| 603 |
+ log.Debugf("Error sending signal: %s", err)
|
|
| 604 | 604 |
} |
| 605 | 605 |
} |
| 606 | 606 |
}() |
| ... | ... |
@@ -690,7 +691,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
|
| 690 | 690 |
if *openStdin || *attach {
|
| 691 | 691 |
if tty && cli.isTerminal {
|
| 692 | 692 |
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
|
| 693 |
- utils.Errorf("Error monitoring TTY size: %s\n", err)
|
|
| 693 |
+ log.Errorf("Error monitoring TTY size: %s\n", err)
|
|
| 694 | 694 |
} |
| 695 | 695 |
} |
| 696 | 696 |
return <-cErr |
| ... | ... |
@@ -1827,7 +1828,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
|
| 1827 | 1827 |
|
| 1828 | 1828 |
if tty && cli.isTerminal {
|
| 1829 | 1829 |
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
|
| 1830 |
- utils.Debugf("Error monitoring TTY size: %s", err)
|
|
| 1830 |
+ log.Debugf("Error monitoring TTY size: %s", err)
|
|
| 1831 | 1831 |
} |
| 1832 | 1832 |
} |
| 1833 | 1833 |
|
| ... | ... |
@@ -2098,9 +2099,9 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
| 2098 | 2098 |
|
| 2099 | 2099 |
// Block the return until the chan gets closed |
| 2100 | 2100 |
defer func() {
|
| 2101 |
- utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
|
|
| 2101 |
+ log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
|
|
| 2102 | 2102 |
if _, ok := <-hijacked; ok {
|
| 2103 |
- utils.Errorf("Hijack did not finish (chan still open)")
|
|
| 2103 |
+ log.Errorf("Hijack did not finish (chan still open)")
|
|
| 2104 | 2104 |
} |
| 2105 | 2105 |
}() |
| 2106 | 2106 |
|
| ... | ... |
@@ -2146,7 +2147,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
| 2146 | 2146 |
} |
| 2147 | 2147 |
case err := <-errCh: |
| 2148 | 2148 |
if err != nil {
|
| 2149 |
- utils.Debugf("Error hijack: %s", err)
|
|
| 2149 |
+ log.Debugf("Error hijack: %s", err)
|
|
| 2150 | 2150 |
return err |
| 2151 | 2151 |
} |
| 2152 | 2152 |
} |
| ... | ... |
@@ -2158,13 +2159,13 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
| 2158 | 2158 |
|
| 2159 | 2159 |
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
|
| 2160 | 2160 |
if err := cli.monitorTtySize(runResult.Get("Id")); err != nil {
|
| 2161 |
- utils.Errorf("Error monitoring TTY size: %s\n", err)
|
|
| 2161 |
+ log.Errorf("Error monitoring TTY size: %s\n", err)
|
|
| 2162 | 2162 |
} |
| 2163 | 2163 |
} |
| 2164 | 2164 |
|
| 2165 | 2165 |
if errCh != nil {
|
| 2166 | 2166 |
if err := <-errCh; err != nil {
|
| 2167 |
- utils.Debugf("Error hijack: %s", err)
|
|
| 2167 |
+ log.Debugf("Error hijack: %s", err)
|
|
| 2168 | 2168 |
return err |
| 2169 | 2169 |
} |
| 2170 | 2170 |
} |
| ... | ... |
@@ -13,6 +13,7 @@ import ( |
| 13 | 13 |
|
| 14 | 14 |
"github.com/docker/docker/api" |
| 15 | 15 |
"github.com/docker/docker/dockerversion" |
| 16 |
+ "github.com/docker/docker/pkg/log" |
|
| 16 | 17 |
"github.com/docker/docker/pkg/term" |
| 17 | 18 |
"github.com/docker/docker/utils" |
| 18 | 19 |
) |
| ... | ... |
@@ -93,7 +94,7 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea |
| 93 | 93 |
} else {
|
| 94 | 94 |
_, err = utils.StdCopy(stdout, stderr, br) |
| 95 | 95 |
} |
| 96 |
- utils.Debugf("[hijack] End of stdout")
|
|
| 96 |
+ log.Debugf("[hijack] End of stdout")
|
|
| 97 | 97 |
return err |
| 98 | 98 |
}) |
| 99 | 99 |
} |
| ... | ... |
@@ -101,15 +102,15 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea |
| 101 | 101 |
sendStdin := utils.Go(func() error {
|
| 102 | 102 |
if in != nil {
|
| 103 | 103 |
io.Copy(rwc, in) |
| 104 |
- utils.Debugf("[hijack] End of stdin")
|
|
| 104 |
+ log.Debugf("[hijack] End of stdin")
|
|
| 105 | 105 |
} |
| 106 | 106 |
if tcpc, ok := rwc.(*net.TCPConn); ok {
|
| 107 | 107 |
if err := tcpc.CloseWrite(); err != nil {
|
| 108 |
- utils.Debugf("Couldn't send EOF: %s\n", err)
|
|
| 108 |
+ log.Debugf("Couldn't send EOF: %s\n", err)
|
|
| 109 | 109 |
} |
| 110 | 110 |
} else if unixc, ok := rwc.(*net.UnixConn); ok {
|
| 111 | 111 |
if err := unixc.CloseWrite(); err != nil {
|
| 112 |
- utils.Debugf("Couldn't send EOF: %s\n", err)
|
|
| 112 |
+ log.Debugf("Couldn't send EOF: %s\n", err)
|
|
| 113 | 113 |
} |
| 114 | 114 |
} |
| 115 | 115 |
// Discard errors due to pipe interruption |
| ... | ... |
@@ -118,14 +119,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea |
| 118 | 118 |
|
| 119 | 119 |
if stdout != nil || stderr != nil {
|
| 120 | 120 |
if err := <-receiveStdout; err != nil {
|
| 121 |
- utils.Debugf("Error receiveStdout: %s", err)
|
|
| 121 |
+ log.Debugf("Error receiveStdout: %s", err)
|
|
| 122 | 122 |
return err |
| 123 | 123 |
} |
| 124 | 124 |
} |
| 125 | 125 |
|
| 126 | 126 |
if !cli.isTerminal {
|
| 127 | 127 |
if err := <-sendStdin; err != nil {
|
| 128 |
- utils.Debugf("Error sendStdin: %s", err)
|
|
| 128 |
+ log.Debugf("Error sendStdin: %s", err)
|
|
| 129 | 129 |
return err |
| 130 | 130 |
} |
| 131 | 131 |
} |
| ... | ... |
@@ -20,6 +20,7 @@ import ( |
| 20 | 20 |
"github.com/docker/docker/api" |
| 21 | 21 |
"github.com/docker/docker/dockerversion" |
| 22 | 22 |
"github.com/docker/docker/engine" |
| 23 |
+ "github.com/docker/docker/pkg/log" |
|
| 23 | 24 |
"github.com/docker/docker/pkg/term" |
| 24 | 25 |
"github.com/docker/docker/registry" |
| 25 | 26 |
"github.com/docker/docker/utils" |
| ... | ... |
@@ -165,7 +166,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in |
| 165 | 165 |
} else {
|
| 166 | 166 |
_, err = utils.StdCopy(stdout, stderr, resp.Body) |
| 167 | 167 |
} |
| 168 |
- utils.Debugf("[stream] End of stdout")
|
|
| 168 |
+ log.Debugf("[stream] End of stdout")
|
|
| 169 | 169 |
return err |
| 170 | 170 |
} |
| 171 | 171 |
return nil |
| ... | ... |
@@ -180,7 +181,7 @@ func (cli *DockerCli) resizeTty(id string) {
|
| 180 | 180 |
v.Set("h", strconv.Itoa(height))
|
| 181 | 181 |
v.Set("w", strconv.Itoa(width))
|
| 182 | 182 |
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
|
| 183 |
- utils.Debugf("Error resize: %s", err)
|
|
| 183 |
+ log.Debugf("Error resize: %s", err)
|
|
| 184 | 184 |
} |
| 185 | 185 |
} |
| 186 | 186 |
|
| ... | ... |
@@ -237,7 +238,7 @@ func (cli *DockerCli) getTtySize() (int, int) {
|
| 237 | 237 |
} |
| 238 | 238 |
ws, err := term.GetWinsize(cli.terminalFd) |
| 239 | 239 |
if err != nil {
|
| 240 |
- utils.Debugf("Error getting size: %s", err)
|
|
| 240 |
+ log.Debugf("Error getting size: %s", err)
|
|
| 241 | 241 |
if ws == nil {
|
| 242 | 242 |
return 0, 0 |
| 243 | 243 |
} |
| ... | ... |
@@ -6,9 +6,9 @@ import ( |
| 6 | 6 |
"strings" |
| 7 | 7 |
|
| 8 | 8 |
"github.com/docker/docker/engine" |
| 9 |
+ "github.com/docker/docker/pkg/log" |
|
| 9 | 10 |
"github.com/docker/docker/pkg/parsers" |
| 10 | 11 |
"github.com/docker/docker/pkg/version" |
| 11 |
- "github.com/docker/docker/utils" |
|
| 12 | 12 |
) |
| 13 | 13 |
|
| 14 | 14 |
const ( |
| ... | ... |
@@ -43,7 +43,7 @@ func DisplayablePorts(ports *engine.Table) string {
|
| 43 | 43 |
func MatchesContentType(contentType, expectedType string) bool {
|
| 44 | 44 |
mimetype, _, err := mime.ParseMediaType(contentType) |
| 45 | 45 |
if err != nil {
|
| 46 |
- utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
|
|
| 46 |
+ log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
|
|
| 47 | 47 |
} |
| 48 | 48 |
return err == nil && mimetype == expectedType |
| 49 | 49 |
} |
| ... | ... |
@@ -11,7 +11,6 @@ import ( |
| 11 | 11 |
"fmt" |
| 12 | 12 |
"io" |
| 13 | 13 |
"io/ioutil" |
| 14 |
- "log" |
|
| 15 | 14 |
"net" |
| 16 | 15 |
"net/http" |
| 17 | 16 |
"net/http/pprof" |
| ... | ... |
@@ -21,17 +20,18 @@ import ( |
| 21 | 21 |
"syscall" |
| 22 | 22 |
|
| 23 | 23 |
"code.google.com/p/go.net/websocket" |
| 24 |
+ "github.com/docker/libcontainer/user" |
|
| 25 |
+ "github.com/gorilla/mux" |
|
| 24 | 26 |
|
| 25 | 27 |
"github.com/docker/docker/api" |
| 26 | 28 |
"github.com/docker/docker/engine" |
| 27 | 29 |
"github.com/docker/docker/pkg/listenbuffer" |
| 30 |
+ "github.com/docker/docker/pkg/log" |
|
| 28 | 31 |
"github.com/docker/docker/pkg/parsers" |
| 29 | 32 |
"github.com/docker/docker/pkg/systemd" |
| 30 | 33 |
"github.com/docker/docker/pkg/version" |
| 31 | 34 |
"github.com/docker/docker/registry" |
| 32 | 35 |
"github.com/docker/docker/utils" |
| 33 |
- "github.com/docker/libcontainer/user" |
|
| 34 |
- "github.com/gorilla/mux" |
|
| 35 | 36 |
) |
| 36 | 37 |
|
| 37 | 38 |
var ( |
| ... | ... |
@@ -88,7 +88,7 @@ func httpError(w http.ResponseWriter, err error) {
|
| 88 | 88 |
} |
| 89 | 89 |
|
| 90 | 90 |
if err != nil {
|
| 91 |
- utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
|
|
| 91 |
+ log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
|
|
| 92 | 92 |
http.Error(w, err.Error(), statusCode) |
| 93 | 93 |
} |
| 94 | 94 |
} |
| ... | ... |
@@ -439,7 +439,7 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit |
| 439 | 439 |
stdoutBuffer = bytes.NewBuffer(nil) |
| 440 | 440 |
) |
| 441 | 441 |
if err := config.Decode(r.Body); err != nil {
|
| 442 |
- utils.Errorf("%s", err)
|
|
| 442 |
+ log.Errorf("%s", err)
|
|
| 443 | 443 |
} |
| 444 | 444 |
|
| 445 | 445 |
if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
|
| ... | ... |
@@ -878,7 +878,7 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp |
| 878 | 878 |
job.Stdout.Add(ws) |
| 879 | 879 |
job.Stderr.Set(ws) |
| 880 | 880 |
if err := job.Run(); err != nil {
|
| 881 |
- utils.Errorf("Error attaching websocket: %s", err)
|
|
| 881 |
+ log.Errorf("Error attaching websocket: %s", err)
|
|
| 882 | 882 |
} |
| 883 | 883 |
}) |
| 884 | 884 |
h.ServeHTTP(w, r) |
| ... | ... |
@@ -1005,7 +1005,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp |
| 1005 | 1005 |
job := eng.Job("container_copy", vars["name"], copyData.Get("Resource"))
|
| 1006 | 1006 |
job.Stdout.Add(w) |
| 1007 | 1007 |
if err := job.Run(); err != nil {
|
| 1008 |
- utils.Errorf("%s", err.Error())
|
|
| 1008 |
+ log.Errorf("%s", err.Error())
|
|
| 1009 | 1009 |
if strings.Contains(err.Error(), "No such container") {
|
| 1010 | 1010 |
w.WriteHeader(http.StatusNotFound) |
| 1011 | 1011 |
} else if strings.Contains(err.Error(), "no such file or directory") {
|
| ... | ... |
@@ -1033,16 +1033,16 @@ func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r |
| 1033 | 1033 |
func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc {
|
| 1034 | 1034 |
return func(w http.ResponseWriter, r *http.Request) {
|
| 1035 | 1035 |
// log the request |
| 1036 |
- utils.Debugf("Calling %s %s", localMethod, localRoute)
|
|
| 1036 |
+ log.Debugf("Calling %s %s", localMethod, localRoute)
|
|
| 1037 | 1037 |
|
| 1038 | 1038 |
if logging {
|
| 1039 |
- log.Println(r.Method, r.RequestURI) |
|
| 1039 |
+ log.Infof("%s %s\n", r.Method, r.RequestURI)
|
|
| 1040 | 1040 |
} |
| 1041 | 1041 |
|
| 1042 | 1042 |
if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
|
| 1043 | 1043 |
userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
|
| 1044 | 1044 |
if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
|
| 1045 |
- utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
|
|
| 1045 |
+ log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
|
|
| 1046 | 1046 |
} |
| 1047 | 1047 |
} |
| 1048 | 1048 |
version := version.Version(mux.Vars(r)["version"]) |
| ... | ... |
@@ -1059,7 +1059,7 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local |
| 1059 | 1059 |
} |
| 1060 | 1060 |
|
| 1061 | 1061 |
if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
|
| 1062 |
- utils.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
|
|
| 1062 |
+ log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
|
|
| 1063 | 1063 |
httpError(w, err) |
| 1064 | 1064 |
} |
| 1065 | 1065 |
} |
| ... | ... |
@@ -1148,7 +1148,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st |
| 1148 | 1148 |
|
| 1149 | 1149 |
for method, routes := range m {
|
| 1150 | 1150 |
for route, fct := range routes {
|
| 1151 |
- utils.Debugf("Registering %s, %s", method, route)
|
|
| 1151 |
+ log.Debugf("Registering %s, %s", method, route)
|
|
| 1152 | 1152 |
// NOTE: scope issue, make sure the variables are local and won't be changed |
| 1153 | 1153 |
localRoute := route |
| 1154 | 1154 |
localFct := fct |
| ... | ... |
@@ -1238,7 +1238,7 @@ func changeGroup(addr string, nameOrGid string) error {
|
| 1238 | 1238 |
return err |
| 1239 | 1239 |
} |
| 1240 | 1240 |
|
| 1241 |
- utils.Debugf("%s group found. gid: %d", nameOrGid, gid)
|
|
| 1241 |
+ log.Debugf("%s group found. gid: %d", nameOrGid, gid)
|
|
| 1242 | 1242 |
return os.Chown(addr, 0, gid) |
| 1243 | 1243 |
} |
| 1244 | 1244 |
|
| ... | ... |
@@ -1309,7 +1309,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
|
| 1309 | 1309 |
switch proto {
|
| 1310 | 1310 |
case "tcp": |
| 1311 | 1311 |
if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
|
| 1312 |
- log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
|
| 1312 |
+ log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
|
|
| 1313 | 1313 |
} |
| 1314 | 1314 |
case "unix": |
| 1315 | 1315 |
socketGroup := job.Getenv("SocketGroup")
|
| ... | ... |
@@ -1317,7 +1317,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
|
| 1317 | 1317 |
if err := changeGroup(addr, socketGroup); err != nil {
|
| 1318 | 1318 |
if socketGroup == "docker" {
|
| 1319 | 1319 |
// if the user hasn't explicitly specified the group ownership, don't fail on errors. |
| 1320 |
- utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
|
|
| 1320 |
+ log.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error())
|
|
| 1321 | 1321 |
} else {
|
| 1322 | 1322 |
return err |
| 1323 | 1323 |
} |
| ... | ... |
@@ -1352,7 +1352,7 @@ func ServeApi(job *engine.Job) engine.Status {
|
| 1352 | 1352 |
return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
| 1353 | 1353 |
} |
| 1354 | 1354 |
go func() {
|
| 1355 |
- log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
|
|
| 1355 |
+ log.Infof("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
|
|
| 1356 | 1356 |
chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) |
| 1357 | 1357 |
}() |
| 1358 | 1358 |
} |
| ... | ... |
@@ -16,9 +16,11 @@ import ( |
| 16 | 16 |
"strings" |
| 17 | 17 |
"syscall" |
| 18 | 18 |
|
| 19 |
+ "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
| 20 |
+ |
|
| 21 |
+ "github.com/docker/docker/pkg/log" |
|
| 19 | 22 |
"github.com/docker/docker/pkg/system" |
| 20 | 23 |
"github.com/docker/docker/utils" |
| 21 |
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
|
| 22 | 24 |
) |
| 23 | 25 |
|
| 24 | 26 |
type ( |
| ... | ... |
@@ -61,7 +63,7 @@ func DetectCompression(source []byte) Compression {
|
| 61 | 61 |
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
| 62 | 62 |
} {
|
| 63 | 63 |
if len(source) < len(m) {
|
| 64 |
- utils.Debugf("Len too short")
|
|
| 64 |
+ log.Debugf("Len too short")
|
|
| 65 | 65 |
continue |
| 66 | 66 |
} |
| 67 | 67 |
if bytes.Compare(m, source[:len(m)]) == 0 {
|
| ... | ... |
@@ -83,7 +85,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
| 83 | 83 |
if err != nil {
|
| 84 | 84 |
return nil, err |
| 85 | 85 |
} |
| 86 |
- utils.Debugf("[tar autodetect] n: %v", bs)
|
|
| 86 |
+ log.Debugf("[tar autodetect] n: %v", bs)
|
|
| 87 | 87 |
|
| 88 | 88 |
compression := DetectCompression(bs) |
| 89 | 89 |
|
| ... | ... |
@@ -252,7 +254,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L |
| 252 | 252 |
} |
| 253 | 253 |
|
| 254 | 254 |
case tar.TypeXGlobalHeader: |
| 255 |
- utils.Debugf("PAX Global Extended Headers found and ignored")
|
|
| 255 |
+ log.Debugf("PAX Global Extended Headers found and ignored")
|
|
| 256 | 256 |
return nil |
| 257 | 257 |
|
| 258 | 258 |
default: |
| ... | ... |
@@ -340,7 +342,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) |
| 340 | 340 |
for _, include := range options.Includes {
|
| 341 | 341 |
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
|
| 342 | 342 |
if err != nil {
|
| 343 |
- utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
|
|
| 343 |
+ log.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
|
|
| 344 | 344 |
return nil |
| 345 | 345 |
} |
| 346 | 346 |
|
| ... | ... |
@@ -351,7 +353,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) |
| 351 | 351 |
|
| 352 | 352 |
skip, err := utils.Matches(relFilePath, options.Excludes) |
| 353 | 353 |
if err != nil {
|
| 354 |
- utils.Debugf("Error matching %s\n", relFilePath, err)
|
|
| 354 |
+ log.Debugf("Error matching %s\n", relFilePath, err)
|
|
| 355 | 355 |
return err |
| 356 | 356 |
} |
| 357 | 357 |
|
| ... | ... |
@@ -363,7 +365,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) |
| 363 | 363 |
} |
| 364 | 364 |
|
| 365 | 365 |
if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil {
|
| 366 |
- utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
|
|
| 366 |
+ log.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
|
|
| 367 | 367 |
} |
| 368 | 368 |
return nil |
| 369 | 369 |
}) |
| ... | ... |
@@ -371,13 +373,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) |
| 371 | 371 |
|
| 372 | 372 |
// Make sure to check the error on Close. |
| 373 | 373 |
if err := tw.Close(); err != nil {
|
| 374 |
- utils.Debugf("Can't close tar writer: %s\n", err)
|
|
| 374 |
+ log.Debugf("Can't close tar writer: %s\n", err)
|
|
| 375 | 375 |
} |
| 376 | 376 |
if err := compressWriter.Close(); err != nil {
|
| 377 |
- utils.Debugf("Can't close compress writer: %s\n", err)
|
|
| 377 |
+ log.Debugf("Can't close compress writer: %s\n", err)
|
|
| 378 | 378 |
} |
| 379 | 379 |
if err := pipeWriter.Close(); err != nil {
|
| 380 |
- utils.Debugf("Can't close pipe writer: %s\n", err)
|
|
| 380 |
+ log.Debugf("Can't close pipe writer: %s\n", err)
|
|
| 381 | 381 |
} |
| 382 | 382 |
}() |
| 383 | 383 |
|
| ... | ... |
@@ -489,7 +491,7 @@ loop: |
| 489 | 489 |
// the output of one piped into the other. If either Tar or Untar fails, |
| 490 | 490 |
// TarUntar aborts and returns the error. |
| 491 | 491 |
func TarUntar(src string, dst string) error {
|
| 492 |
- utils.Debugf("TarUntar(%s %s)", src, dst)
|
|
| 492 |
+ log.Debugf("TarUntar(%s %s)", src, dst)
|
|
| 493 | 493 |
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
|
| 494 | 494 |
if err != nil {
|
| 495 | 495 |
return err |
| ... | ... |
@@ -526,11 +528,11 @@ func CopyWithTar(src, dst string) error {
|
| 526 | 526 |
return CopyFileWithTar(src, dst) |
| 527 | 527 |
} |
| 528 | 528 |
// Create dst, copy src's content into it |
| 529 |
- utils.Debugf("Creating dest directory: %s", dst)
|
|
| 529 |
+ log.Debugf("Creating dest directory: %s", dst)
|
|
| 530 | 530 |
if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
|
| 531 | 531 |
return err |
| 532 | 532 |
} |
| 533 |
- utils.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
|
| 533 |
+ log.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
|
| 534 | 534 |
return TarUntar(src, dst) |
| 535 | 535 |
} |
| 536 | 536 |
|
| ... | ... |
@@ -541,7 +543,7 @@ func CopyWithTar(src, dst string) error {
|
| 541 | 541 |
// If `dst` ends with a trailing slash '/', the final destination path |
| 542 | 542 |
// will be `dst/base(src)`. |
| 543 | 543 |
func CopyFileWithTar(src, dst string) (err error) {
|
| 544 |
- utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
|
| 544 |
+ log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
|
| 545 | 545 |
srcSt, err := os.Stat(src) |
| 546 | 546 |
if err != nil {
|
| 547 | 547 |
return err |
| ... | ... |
@@ -11,9 +11,10 @@ import ( |
| 11 | 11 |
"syscall" |
| 12 | 12 |
"time" |
| 13 | 13 |
|
| 14 |
- "github.com/docker/docker/pkg/system" |
|
| 15 |
- "github.com/docker/docker/utils" |
|
| 16 | 14 |
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
| 15 |
+ |
|
| 16 |
+ "github.com/docker/docker/pkg/log" |
|
| 17 |
+ "github.com/docker/docker/pkg/system" |
|
| 17 | 18 |
) |
| 18 | 19 |
|
| 19 | 20 |
type ChangeType int |
| ... | ... |
@@ -363,19 +364,19 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
|
| 363 | 363 |
ChangeTime: timestamp, |
| 364 | 364 |
} |
| 365 | 365 |
if err := tw.WriteHeader(hdr); err != nil {
|
| 366 |
- utils.Debugf("Can't write whiteout header: %s\n", err)
|
|
| 366 |
+ log.Debugf("Can't write whiteout header: %s\n", err)
|
|
| 367 | 367 |
} |
| 368 | 368 |
} else {
|
| 369 | 369 |
path := filepath.Join(dir, change.Path) |
| 370 | 370 |
if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil {
|
| 371 |
- utils.Debugf("Can't add file %s to tar: %s\n", path, err)
|
|
| 371 |
+ log.Debugf("Can't add file %s to tar: %s\n", path, err)
|
|
| 372 | 372 |
} |
| 373 | 373 |
} |
| 374 | 374 |
} |
| 375 | 375 |
|
| 376 | 376 |
// Make sure to check the error on Close. |
| 377 | 377 |
if err := tw.Close(); err != nil {
|
| 378 |
- utils.Debugf("Can't close layer: %s\n", err)
|
|
| 378 |
+ log.Debugf("Can't close layer: %s\n", err)
|
|
| 379 | 379 |
} |
| 380 | 380 |
writer.Close() |
| 381 | 381 |
}() |
| ... | ... |
@@ -9,6 +9,7 @@ import ( |
| 9 | 9 |
|
| 10 | 10 |
"github.com/docker/docker/engine" |
| 11 | 11 |
"github.com/docker/docker/pkg/jsonlog" |
| 12 |
+ "github.com/docker/docker/pkg/log" |
|
| 12 | 13 |
"github.com/docker/docker/utils" |
| 13 | 14 |
) |
| 14 | 15 |
|
| ... | ... |
@@ -36,25 +37,25 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
| 36 | 36 |
cLog, err := container.ReadLog("json")
|
| 37 | 37 |
if err != nil && os.IsNotExist(err) {
|
| 38 | 38 |
// Legacy logs |
| 39 |
- utils.Debugf("Old logs format")
|
|
| 39 |
+ log.Debugf("Old logs format")
|
|
| 40 | 40 |
if stdout {
|
| 41 | 41 |
cLog, err := container.ReadLog("stdout")
|
| 42 | 42 |
if err != nil {
|
| 43 |
- utils.Errorf("Error reading logs (stdout): %s", err)
|
|
| 43 |
+ log.Errorf("Error reading logs (stdout): %s", err)
|
|
| 44 | 44 |
} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
|
| 45 |
- utils.Errorf("Error streaming logs (stdout): %s", err)
|
|
| 45 |
+ log.Errorf("Error streaming logs (stdout): %s", err)
|
|
| 46 | 46 |
} |
| 47 | 47 |
} |
| 48 | 48 |
if stderr {
|
| 49 | 49 |
cLog, err := container.ReadLog("stderr")
|
| 50 | 50 |
if err != nil {
|
| 51 |
- utils.Errorf("Error reading logs (stderr): %s", err)
|
|
| 51 |
+ log.Errorf("Error reading logs (stderr): %s", err)
|
|
| 52 | 52 |
} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
|
| 53 |
- utils.Errorf("Error streaming logs (stderr): %s", err)
|
|
| 53 |
+ log.Errorf("Error streaming logs (stderr): %s", err)
|
|
| 54 | 54 |
} |
| 55 | 55 |
} |
| 56 | 56 |
} else if err != nil {
|
| 57 |
- utils.Errorf("Error reading logs (json): %s", err)
|
|
| 57 |
+ log.Errorf("Error reading logs (json): %s", err)
|
|
| 58 | 58 |
} else {
|
| 59 | 59 |
dec := json.NewDecoder(cLog) |
| 60 | 60 |
for {
|
| ... | ... |
@@ -63,7 +64,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
| 63 | 63 |
if err := dec.Decode(l); err == io.EOF {
|
| 64 | 64 |
break |
| 65 | 65 |
} else if err != nil {
|
| 66 |
- utils.Errorf("Error streaming logs: %s", err)
|
|
| 66 |
+ log.Errorf("Error streaming logs: %s", err)
|
|
| 67 | 67 |
break |
| 68 | 68 |
} |
| 69 | 69 |
if l.Stream == "stdout" && stdout {
|
| ... | ... |
@@ -88,7 +89,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
| 88 | 88 |
r, w := io.Pipe() |
| 89 | 89 |
go func() {
|
| 90 | 90 |
defer w.Close() |
| 91 |
- defer utils.Debugf("Closing buffered stdin pipe")
|
|
| 91 |
+ defer log.Debugf("Closing buffered stdin pipe")
|
|
| 92 | 92 |
io.Copy(w, job.Stdin) |
| 93 | 93 |
}() |
| 94 | 94 |
cStdin = r |
| ... | ... |
@@ -131,8 +132,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 131 | 131 |
errors <- err |
| 132 | 132 |
} else {
|
| 133 | 133 |
go func() {
|
| 134 |
- utils.Debugf("attach: stdin: begin")
|
|
| 135 |
- defer utils.Debugf("attach: stdin: end")
|
|
| 134 |
+ log.Debugf("attach: stdin: begin")
|
|
| 135 |
+ defer log.Debugf("attach: stdin: end")
|
|
| 136 | 136 |
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr |
| 137 | 137 |
if container.Config.StdinOnce && !container.Config.Tty {
|
| 138 | 138 |
defer cStdin.Close() |
| ... | ... |
@@ -155,7 +156,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 155 | 155 |
err = nil |
| 156 | 156 |
} |
| 157 | 157 |
if err != nil {
|
| 158 |
- utils.Errorf("attach: stdin: %s", err)
|
|
| 158 |
+ log.Errorf("attach: stdin: %s", err)
|
|
| 159 | 159 |
} |
| 160 | 160 |
errors <- err |
| 161 | 161 |
}() |
| ... | ... |
@@ -168,8 +169,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 168 | 168 |
} else {
|
| 169 | 169 |
cStdout = p |
| 170 | 170 |
go func() {
|
| 171 |
- utils.Debugf("attach: stdout: begin")
|
|
| 172 |
- defer utils.Debugf("attach: stdout: end")
|
|
| 171 |
+ log.Debugf("attach: stdout: begin")
|
|
| 172 |
+ defer log.Debugf("attach: stdout: end")
|
|
| 173 | 173 |
// If we are in StdinOnce mode, then close stdin |
| 174 | 174 |
if container.Config.StdinOnce && stdin != nil {
|
| 175 | 175 |
defer stdin.Close() |
| ... | ... |
@@ -182,7 +183,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 182 | 182 |
err = nil |
| 183 | 183 |
} |
| 184 | 184 |
if err != nil {
|
| 185 |
- utils.Errorf("attach: stdout: %s", err)
|
|
| 185 |
+ log.Errorf("attach: stdout: %s", err)
|
|
| 186 | 186 |
} |
| 187 | 187 |
errors <- err |
| 188 | 188 |
}() |
| ... | ... |
@@ -193,7 +194,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 193 | 193 |
defer stdinCloser.Close() |
| 194 | 194 |
} |
| 195 | 195 |
if cStdout, err := container.StdoutPipe(); err != nil {
|
| 196 |
- utils.Errorf("attach: stdout pipe: %s", err)
|
|
| 196 |
+ log.Errorf("attach: stdout pipe: %s", err)
|
|
| 197 | 197 |
} else {
|
| 198 | 198 |
io.Copy(&utils.NopWriter{}, cStdout)
|
| 199 | 199 |
} |
| ... | ... |
@@ -206,8 +207,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 206 | 206 |
} else {
|
| 207 | 207 |
cStderr = p |
| 208 | 208 |
go func() {
|
| 209 |
- utils.Debugf("attach: stderr: begin")
|
|
| 210 |
- defer utils.Debugf("attach: stderr: end")
|
|
| 209 |
+ log.Debugf("attach: stderr: begin")
|
|
| 210 |
+ defer log.Debugf("attach: stderr: end")
|
|
| 211 | 211 |
// If we are in StdinOnce mode, then close stdin |
| 212 | 212 |
if container.Config.StdinOnce && stdin != nil {
|
| 213 | 213 |
defer stdin.Close() |
| ... | ... |
@@ -220,7 +221,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 220 | 220 |
err = nil |
| 221 | 221 |
} |
| 222 | 222 |
if err != nil {
|
| 223 |
- utils.Errorf("attach: stderr: %s", err)
|
|
| 223 |
+ log.Errorf("attach: stderr: %s", err)
|
|
| 224 | 224 |
} |
| 225 | 225 |
errors <- err |
| 226 | 226 |
}() |
| ... | ... |
@@ -232,7 +233,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 232 | 232 |
} |
| 233 | 233 |
|
| 234 | 234 |
if cStderr, err := container.StderrPipe(); err != nil {
|
| 235 |
- utils.Errorf("attach: stdout pipe: %s", err)
|
|
| 235 |
+ log.Errorf("attach: stdout pipe: %s", err)
|
|
| 236 | 236 |
} else {
|
| 237 | 237 |
io.Copy(&utils.NopWriter{}, cStderr)
|
| 238 | 238 |
} |
| ... | ... |
@@ -252,14 +253,14 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo |
| 252 | 252 |
// FIXME: how to clean up the stdin goroutine without the unwanted side effect |
| 253 | 253 |
// of closing the passed stdin? Add an intermediary io.Pipe? |
| 254 | 254 |
for i := 0; i < nJobs; i += 1 {
|
| 255 |
- utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
|
|
| 255 |
+ log.Debugf("attach: waiting for job %d/%d", i+1, nJobs)
|
|
| 256 | 256 |
if err := <-errors; err != nil {
|
| 257 |
- utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
|
|
| 257 |
+ log.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err)
|
|
| 258 | 258 |
return err |
| 259 | 259 |
} |
| 260 |
- utils.Debugf("attach: job %d completed successfully", i+1)
|
|
| 260 |
+ log.Debugf("attach: job %d completed successfully", i+1)
|
|
| 261 | 261 |
} |
| 262 |
- utils.Debugf("attach: all jobs completed successfully")
|
|
| 262 |
+ log.Debugf("attach: all jobs completed successfully")
|
|
| 263 | 263 |
return nil |
| 264 | 264 |
}) |
| 265 | 265 |
} |
| ... | ... |
@@ -23,6 +23,7 @@ import ( |
| 23 | 23 |
"github.com/docker/docker/archive" |
| 24 | 24 |
"github.com/docker/docker/engine" |
| 25 | 25 |
"github.com/docker/docker/nat" |
| 26 |
+ "github.com/docker/docker/pkg/log" |
|
| 26 | 27 |
"github.com/docker/docker/pkg/parsers" |
| 27 | 28 |
"github.com/docker/docker/pkg/symlink" |
| 28 | 29 |
"github.com/docker/docker/pkg/system" |
| ... | ... |
@@ -262,11 +263,11 @@ func (b *buildFile) probeCache() (bool, error) {
|
| 262 | 262 |
return false, err |
| 263 | 263 |
} else if cache != nil {
|
| 264 | 264 |
fmt.Fprintf(b.outStream, " ---> Using cache\n") |
| 265 |
- utils.Debugf("[BUILDER] Use cached version")
|
|
| 265 |
+ log.Debugf("[BUILDER] Use cached version")
|
|
| 266 | 266 |
b.image = cache.ID |
| 267 | 267 |
return true, nil |
| 268 | 268 |
} else {
|
| 269 |
- utils.Debugf("[BUILDER] Cache miss")
|
|
| 269 |
+ log.Debugf("[BUILDER] Cache miss")
|
|
| 270 | 270 |
} |
| 271 | 271 |
} |
| 272 | 272 |
return false, nil |
| ... | ... |
@@ -288,7 +289,7 @@ func (b *buildFile) CmdRun(args string) error {
|
| 288 | 288 |
|
| 289 | 289 |
defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
| 290 | 290 |
|
| 291 |
- utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
|
| 291 |
+ log.Debugf("Command to be executed: %v", b.config.Cmd)
|
|
| 292 | 292 |
|
| 293 | 293 |
hit, err := b.probeCache() |
| 294 | 294 |
if err != nil {
|
| ... | ... |
@@ -378,7 +379,7 @@ func (b *buildFile) CmdEnv(args string) error {
|
| 378 | 378 |
func (b *buildFile) buildCmdFromJson(args string) []string {
|
| 379 | 379 |
var cmd []string |
| 380 | 380 |
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
| 381 |
- utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
|
| 381 |
+ log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
|
| 382 | 382 |
cmd = []string{"/bin/sh", "-c", args}
|
| 383 | 383 |
} |
| 384 | 384 |
return cmd |
| ... | ... |
@@ -551,7 +552,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string, decompre |
| 551 | 551 |
if err := archive.UntarPath(origPath, tarDest); err == nil {
|
| 552 | 552 |
return nil |
| 553 | 553 |
} else if err != io.EOF {
|
| 554 |
- utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
|
| 554 |
+ log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
|
| 555 | 555 |
} |
| 556 | 556 |
} |
| 557 | 557 |
|
| ... | ... |
@@ -6,7 +6,6 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"io" |
| 8 | 8 |
"io/ioutil" |
| 9 |
- "log" |
|
| 10 | 9 |
"os" |
| 11 | 10 |
"path" |
| 12 | 11 |
"path/filepath" |
| ... | ... |
@@ -15,6 +14,9 @@ import ( |
| 15 | 15 |
"syscall" |
| 16 | 16 |
"time" |
| 17 | 17 |
|
| 18 |
+ "github.com/docker/libcontainer/devices" |
|
| 19 |
+ "github.com/docker/libcontainer/label" |
|
| 20 |
+ |
|
| 18 | 21 |
"github.com/docker/docker/archive" |
| 19 | 22 |
"github.com/docker/docker/daemon/execdriver" |
| 20 | 23 |
"github.com/docker/docker/daemon/graphdriver" |
| ... | ... |
@@ -23,13 +25,12 @@ import ( |
| 23 | 23 |
"github.com/docker/docker/links" |
| 24 | 24 |
"github.com/docker/docker/nat" |
| 25 | 25 |
"github.com/docker/docker/pkg/broadcastwriter" |
| 26 |
+ "github.com/docker/docker/pkg/log" |
|
| 26 | 27 |
"github.com/docker/docker/pkg/networkfs/etchosts" |
| 27 | 28 |
"github.com/docker/docker/pkg/networkfs/resolvconf" |
| 28 | 29 |
"github.com/docker/docker/pkg/symlink" |
| 29 | 30 |
"github.com/docker/docker/runconfig" |
| 30 | 31 |
"github.com/docker/docker/utils" |
| 31 |
- "github.com/docker/libcontainer/devices" |
|
| 32 |
- "github.com/docker/libcontainer/label" |
|
| 33 | 32 |
) |
| 34 | 33 |
|
| 35 | 34 |
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" |
| ... | ... |
@@ -171,7 +172,7 @@ func (container *Container) WriteHostConfig() error {
|
| 171 | 171 |
func (container *Container) LogEvent(action string) {
|
| 172 | 172 |
d := container.daemon |
| 173 | 173 |
if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil {
|
| 174 |
- utils.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
|
|
| 174 |
+ log.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
|
|
| 175 | 175 |
} |
| 176 | 176 |
} |
| 177 | 177 |
|
| ... | ... |
@@ -503,7 +504,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
| 503 | 503 |
pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) |
| 504 | 504 |
exitCode, err = container.daemon.Run(container, pipes, callback) |
| 505 | 505 |
if err != nil {
|
| 506 |
- utils.Errorf("Error running container: %s", err)
|
|
| 506 |
+ log.Errorf("Error running container: %s", err)
|
|
| 507 | 507 |
} |
| 508 | 508 |
container.State.SetStopped(exitCode) |
| 509 | 509 |
|
| ... | ... |
@@ -519,7 +520,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
| 519 | 519 |
// This will cause it to be restarted when the engine is restarted. |
| 520 | 520 |
if container.daemon != nil && container.daemon.eng != nil && !container.daemon.eng.IsShutdown() {
|
| 521 | 521 |
if err := container.toDisk(); err != nil {
|
| 522 |
- utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
|
|
| 522 |
+ log.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
|
|
| 523 | 523 |
} |
| 524 | 524 |
} |
| 525 | 525 |
return err |
| ... | ... |
@@ -536,28 +537,28 @@ func (container *Container) cleanup() {
|
| 536 | 536 |
} |
| 537 | 537 |
if container.Config.OpenStdin {
|
| 538 | 538 |
if err := container.stdin.Close(); err != nil {
|
| 539 |
- utils.Errorf("%s: Error close stdin: %s", container.ID, err)
|
|
| 539 |
+ log.Errorf("%s: Error close stdin: %s", container.ID, err)
|
|
| 540 | 540 |
} |
| 541 | 541 |
} |
| 542 | 542 |
if err := container.stdout.Clean(); err != nil {
|
| 543 |
- utils.Errorf("%s: Error close stdout: %s", container.ID, err)
|
|
| 543 |
+ log.Errorf("%s: Error close stdout: %s", container.ID, err)
|
|
| 544 | 544 |
} |
| 545 | 545 |
if err := container.stderr.Clean(); err != nil {
|
| 546 |
- utils.Errorf("%s: Error close stderr: %s", container.ID, err)
|
|
| 546 |
+ log.Errorf("%s: Error close stderr: %s", container.ID, err)
|
|
| 547 | 547 |
} |
| 548 | 548 |
if container.command != nil && container.command.Terminal != nil {
|
| 549 | 549 |
if err := container.command.Terminal.Close(); err != nil {
|
| 550 |
- utils.Errorf("%s: Error closing terminal: %s", container.ID, err)
|
|
| 550 |
+ log.Errorf("%s: Error closing terminal: %s", container.ID, err)
|
|
| 551 | 551 |
} |
| 552 | 552 |
} |
| 553 | 553 |
|
| 554 | 554 |
if err := container.Unmount(); err != nil {
|
| 555 |
- log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
|
|
| 555 |
+ log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
|
|
| 556 | 556 |
} |
| 557 | 557 |
} |
| 558 | 558 |
|
| 559 | 559 |
func (container *Container) KillSig(sig int) error {
|
| 560 |
- utils.Debugf("Sending %d to %s", sig, container.ID)
|
|
| 560 |
+ log.Debugf("Sending %d to %s", sig, container.ID)
|
|
| 561 | 561 |
container.Lock() |
| 562 | 562 |
defer container.Unlock() |
| 563 | 563 |
|
| ... | ... |
@@ -606,7 +607,7 @@ func (container *Container) Kill() error {
|
| 606 | 606 |
if _, err := container.State.WaitStop(10 * time.Second); err != nil {
|
| 607 | 607 |
// Ensure that we don't kill ourselves |
| 608 | 608 |
if pid := container.State.GetPid(); pid != 0 {
|
| 609 |
- log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
|
| 609 |
+ log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
|
| 610 | 610 |
if err := syscall.Kill(pid, 9); err != nil {
|
| 611 | 611 |
return err |
| 612 | 612 |
} |
| ... | ... |
@@ -624,7 +625,7 @@ func (container *Container) Stop(seconds int) error {
|
| 624 | 624 |
|
| 625 | 625 |
// 1. Send a SIGTERM |
| 626 | 626 |
if err := container.KillSig(15); err != nil {
|
| 627 |
- log.Print("Failed to send SIGTERM to the process, force killing")
|
|
| 627 |
+ log.Infof("Failed to send SIGTERM to the process, force killing")
|
|
| 628 | 628 |
if err := container.KillSig(9); err != nil {
|
| 629 | 629 |
return err |
| 630 | 630 |
} |
| ... | ... |
@@ -632,7 +633,7 @@ func (container *Container) Stop(seconds int) error {
|
| 632 | 632 |
|
| 633 | 633 |
// 2. Wait for the process to exit on its own |
| 634 | 634 |
if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil {
|
| 635 |
- log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
|
| 635 |
+ log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
|
| 636 | 636 |
// 3. If it doesn't, then send SIGKILL |
| 637 | 637 |
if err := container.Kill(); err != nil {
|
| 638 | 638 |
container.State.WaitStop(-1 * time.Second) |
| ... | ... |
@@ -761,7 +762,7 @@ func (container *Container) GetSize() (int64, int64) {
|
| 761 | 761 |
) |
| 762 | 762 |
|
| 763 | 763 |
if err := container.Mount(); err != nil {
|
| 764 |
- utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
|
|
| 764 |
+ log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
|
|
| 765 | 765 |
return sizeRw, sizeRootfs |
| 766 | 766 |
} |
| 767 | 767 |
defer container.Unmount() |
| ... | ... |
@@ -769,7 +770,7 @@ func (container *Container) GetSize() (int64, int64) {
|
| 769 | 769 |
if differ, ok := container.daemon.driver.(graphdriver.Differ); ok {
|
| 770 | 770 |
sizeRw, err = differ.DiffSize(container.ID) |
| 771 | 771 |
if err != nil {
|
| 772 |
- utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
|
|
| 772 |
+ log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
|
|
| 773 | 773 |
// FIXME: GetSize should return an error. Not changing it now in case |
| 774 | 774 |
// there is a side-effect. |
| 775 | 775 |
sizeRw = -1 |
| ... | ... |
@@ -866,7 +867,7 @@ func (container *Container) DisableLink(name string) {
|
| 866 | 866 |
if link, exists := container.activeLinks[name]; exists {
|
| 867 | 867 |
link.Disable() |
| 868 | 868 |
} else {
|
| 869 |
- utils.Debugf("Could not find active link for %s", name)
|
|
| 869 |
+ log.Debugf("Could not find active link for %s", name)
|
|
| 870 | 870 |
} |
| 871 | 871 |
} |
| 872 | 872 |
} |
| ... | ... |
@@ -978,15 +979,15 @@ func (container *Container) initializeNetworking() error {
|
| 978 | 978 |
// Make sure the config is compatible with the current kernel |
| 979 | 979 |
func (container *Container) verifyDaemonSettings() {
|
| 980 | 980 |
if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
|
| 981 |
- log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
| 981 |
+ log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
| 982 | 982 |
container.Config.Memory = 0 |
| 983 | 983 |
} |
| 984 | 984 |
if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
|
| 985 |
- log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
| 985 |
+ log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
| 986 | 986 |
container.Config.MemorySwap = -1 |
| 987 | 987 |
} |
| 988 | 988 |
if container.daemon.sysInfo.IPv4ForwardingDisabled {
|
| 989 |
- log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
|
| 989 |
+ log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work")
|
|
| 990 | 990 |
} |
| 991 | 991 |
} |
| 992 | 992 |
|
| ... | ... |
@@ -1123,7 +1124,7 @@ func (container *Container) waitForStart() error {
|
| 1123 | 1123 |
} |
| 1124 | 1124 |
container.State.SetRunning(command.Pid()) |
| 1125 | 1125 |
if err := container.toDisk(); err != nil {
|
| 1126 |
- utils.Debugf("%s", err)
|
|
| 1126 |
+ log.Debugf("%s", err)
|
|
| 1127 | 1127 |
} |
| 1128 | 1128 |
close(waitStart) |
| 1129 | 1129 |
} |
| ... | ... |
@@ -4,7 +4,6 @@ import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"io" |
| 6 | 6 |
"io/ioutil" |
| 7 |
- "log" |
|
| 8 | 7 |
"os" |
| 9 | 8 |
"path" |
| 10 | 9 |
"regexp" |
| ... | ... |
@@ -13,6 +12,8 @@ import ( |
| 13 | 13 |
"sync" |
| 14 | 14 |
"time" |
| 15 | 15 |
|
| 16 |
+ "github.com/docker/libcontainer/label" |
|
| 17 |
+ |
|
| 16 | 18 |
"github.com/docker/docker/archive" |
| 17 | 19 |
"github.com/docker/docker/daemon/execdriver" |
| 18 | 20 |
"github.com/docker/docker/daemon/execdriver/execdrivers" |
| ... | ... |
@@ -27,6 +28,7 @@ import ( |
| 27 | 27 |
"github.com/docker/docker/image" |
| 28 | 28 |
"github.com/docker/docker/pkg/broadcastwriter" |
| 29 | 29 |
"github.com/docker/docker/pkg/graphdb" |
| 30 |
+ "github.com/docker/docker/pkg/log" |
|
| 30 | 31 |
"github.com/docker/docker/pkg/namesgenerator" |
| 31 | 32 |
"github.com/docker/docker/pkg/networkfs/resolvconf" |
| 32 | 33 |
"github.com/docker/docker/pkg/parsers" |
| ... | ... |
@@ -35,7 +37,6 @@ import ( |
| 35 | 35 |
"github.com/docker/docker/pkg/truncindex" |
| 36 | 36 |
"github.com/docker/docker/runconfig" |
| 37 | 37 |
"github.com/docker/docker/utils" |
| 38 |
- "github.com/docker/libcontainer/label" |
|
| 39 | 38 |
) |
| 40 | 39 |
|
| 41 | 40 |
// Set the max depth to the aufs default that most |
| ... | ... |
@@ -217,7 +218,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con |
| 217 | 217 |
// if so, then we need to restart monitor and init a new lock |
| 218 | 218 |
// If the container is supposed to be running, make sure of it |
| 219 | 219 |
if container.State.IsRunning() {
|
| 220 |
- utils.Debugf("killing old running container %s", container.ID)
|
|
| 220 |
+ log.Debugf("killing old running container %s", container.ID)
|
|
| 221 | 221 |
|
| 222 | 222 |
existingPid := container.State.Pid |
| 223 | 223 |
container.State.SetStopped(0) |
| ... | ... |
@@ -234,23 +235,23 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con |
| 234 | 234 |
var err error |
| 235 | 235 |
cmd.Process, err = os.FindProcess(existingPid) |
| 236 | 236 |
if err != nil {
|
| 237 |
- utils.Debugf("cannot find existing process for %d", existingPid)
|
|
| 237 |
+ log.Debugf("cannot find existing process for %d", existingPid)
|
|
| 238 | 238 |
} |
| 239 | 239 |
daemon.execDriver.Terminate(cmd) |
| 240 | 240 |
} |
| 241 | 241 |
|
| 242 | 242 |
if err := container.Unmount(); err != nil {
|
| 243 |
- utils.Debugf("unmount error %s", err)
|
|
| 243 |
+ log.Debugf("unmount error %s", err)
|
|
| 244 | 244 |
} |
| 245 | 245 |
if err := container.ToDisk(); err != nil {
|
| 246 |
- utils.Debugf("saving stopped state to disk %s", err)
|
|
| 246 |
+ log.Debugf("saving stopped state to disk %s", err)
|
|
| 247 | 247 |
} |
| 248 | 248 |
|
| 249 | 249 |
info := daemon.execDriver.Info(container.ID) |
| 250 | 250 |
if !info.IsRunning() {
|
| 251 |
- utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
|
|
| 251 |
+ log.Debugf("Container %s was supposed to be running but is not.", container.ID)
|
|
| 252 | 252 |
|
| 253 |
- utils.Debugf("Marking as stopped")
|
|
| 253 |
+ log.Debugf("Marking as stopped")
|
|
| 254 | 254 |
|
| 255 | 255 |
container.State.SetStopped(-127) |
| 256 | 256 |
if err := container.ToDisk(); err != nil {
|
| ... | ... |
@@ -258,7 +259,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con |
| 258 | 258 |
} |
| 259 | 259 |
|
| 260 | 260 |
if daemon.config.AutoRestart {
|
| 261 |
- utils.Debugf("Marking as restarting")
|
|
| 261 |
+ log.Debugf("Marking as restarting")
|
|
| 262 | 262 |
|
| 263 | 263 |
if containersToStart != nil {
|
| 264 | 264 |
*containersToStart = append(*containersToStart, container) |
| ... | ... |
@@ -278,7 +279,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
|
| 278 | 278 |
container.Name = name |
| 279 | 279 |
|
| 280 | 280 |
if err := container.ToDisk(); err != nil {
|
| 281 |
- utils.Debugf("Error saving container name %s", err)
|
|
| 281 |
+ log.Debugf("Error saving container name %s", err)
|
|
| 282 | 282 |
} |
| 283 | 283 |
} |
| 284 | 284 |
return nil |
| ... | ... |
@@ -302,7 +303,7 @@ func (daemon *Daemon) restore() error {
|
| 302 | 302 |
) |
| 303 | 303 |
|
| 304 | 304 |
if !debug {
|
| 305 |
- fmt.Printf("Loading containers: ")
|
|
| 305 |
+ log.Infof("Loading containers: ")
|
|
| 306 | 306 |
} |
| 307 | 307 |
dir, err := ioutil.ReadDir(daemon.repository) |
| 308 | 308 |
if err != nil {
|
| ... | ... |
@@ -316,16 +317,16 @@ func (daemon *Daemon) restore() error {
|
| 316 | 316 |
fmt.Print(".")
|
| 317 | 317 |
} |
| 318 | 318 |
if err != nil {
|
| 319 |
- utils.Errorf("Failed to load container %v: %v", id, err)
|
|
| 319 |
+ log.Errorf("Failed to load container %v: %v", id, err)
|
|
| 320 | 320 |
continue |
| 321 | 321 |
} |
| 322 | 322 |
|
| 323 | 323 |
// Ignore the container if it does not support the current driver being used by the graph |
| 324 | 324 |
if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver {
|
| 325 |
- utils.Debugf("Loaded container %v", container.ID)
|
|
| 325 |
+ log.Debugf("Loaded container %v", container.ID)
|
|
| 326 | 326 |
containers[container.ID] = container |
| 327 | 327 |
} else {
|
| 328 |
- utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
|
|
| 328 |
+ log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
|
|
| 329 | 329 |
} |
| 330 | 330 |
} |
| 331 | 331 |
|
| ... | ... |
@@ -337,7 +338,7 @@ func (daemon *Daemon) restore() error {
|
| 337 | 337 |
e := entities[p] |
| 338 | 338 |
if container, ok := containers[e.ID()]; ok {
|
| 339 | 339 |
if err := daemon.register(container, false, &containersToStart); err != nil {
|
| 340 |
- utils.Debugf("Failed to register container %s: %s", container.ID, err)
|
|
| 340 |
+ log.Debugf("Failed to register container %s: %s", container.ID, err)
|
|
| 341 | 341 |
} |
| 342 | 342 |
delete(containers, e.ID()) |
| 343 | 343 |
} |
| ... | ... |
@@ -349,22 +350,22 @@ func (daemon *Daemon) restore() error {
|
| 349 | 349 |
// Try to set the default name for a container if it exists prior to links |
| 350 | 350 |
container.Name, err = daemon.generateNewName(container.ID) |
| 351 | 351 |
if err != nil {
|
| 352 |
- utils.Debugf("Setting default id - %s", err)
|
|
| 352 |
+ log.Debugf("Setting default id - %s", err)
|
|
| 353 | 353 |
} |
| 354 | 354 |
if err := daemon.register(container, false, &containersToStart); err != nil {
|
| 355 |
- utils.Debugf("Failed to register container %s: %s", container.ID, err)
|
|
| 355 |
+ log.Debugf("Failed to register container %s: %s", container.ID, err)
|
|
| 356 | 356 |
} |
| 357 | 357 |
} |
| 358 | 358 |
|
| 359 | 359 |
for _, container := range containersToStart {
|
| 360 |
- utils.Debugf("Starting container %d", container.ID)
|
|
| 360 |
+ log.Debugf("Starting container %d", container.ID)
|
|
| 361 | 361 |
if err := container.Start(); err != nil {
|
| 362 |
- utils.Debugf("Failed to start container %s: %s", container.ID, err)
|
|
| 362 |
+ log.Debugf("Failed to start container %s: %s", container.ID, err)
|
|
| 363 | 363 |
} |
| 364 | 364 |
} |
| 365 | 365 |
|
| 366 | 366 |
if !debug {
|
| 367 |
- fmt.Printf(": done.\n")
|
|
| 367 |
+ log.Infof(": done.\n")
|
|
| 368 | 368 |
} |
| 369 | 369 |
|
| 370 | 370 |
return nil |
| ... | ... |
@@ -707,7 +708,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) |
| 707 | 707 |
log.Fatalf("The Docker daemon needs to be run as root")
|
| 708 | 708 |
} |
| 709 | 709 |
if err := checkKernelAndArch(); err != nil {
|
| 710 |
- log.Fatal(err) |
|
| 710 |
+ log.Fatalf(err.Error()) |
|
| 711 | 711 |
} |
| 712 | 712 |
|
| 713 | 713 |
// set up the TempDir to use a canonical path |
| ... | ... |
@@ -748,7 +749,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) |
| 748 | 748 |
if err != nil {
|
| 749 | 749 |
return nil, err |
| 750 | 750 |
} |
| 751 |
- utils.Debugf("Using graph driver %s", driver)
|
|
| 751 |
+ log.Debugf("Using graph driver %s", driver)
|
|
| 752 | 752 |
|
| 753 | 753 |
// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled |
| 754 | 754 |
if config.EnableSelinuxSupport && driver.String() == "btrfs" {
|
| ... | ... |
@@ -766,7 +767,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) |
| 766 | 766 |
return nil, err |
| 767 | 767 |
} |
| 768 | 768 |
|
| 769 |
- utils.Debugf("Creating images graph")
|
|
| 769 |
+ log.Debugf("Creating images graph")
|
|
| 770 | 770 |
g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) |
| 771 | 771 |
if err != nil {
|
| 772 | 772 |
return nil, err |
| ... | ... |
@@ -778,12 +779,12 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) |
| 778 | 778 |
if err != nil {
|
| 779 | 779 |
return nil, err |
| 780 | 780 |
} |
| 781 |
- utils.Debugf("Creating volumes graph")
|
|
| 781 |
+ log.Debugf("Creating volumes graph")
|
|
| 782 | 782 |
volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver) |
| 783 | 783 |
if err != nil {
|
| 784 | 784 |
return nil, err |
| 785 | 785 |
} |
| 786 |
- utils.Debugf("Creating repository list")
|
|
| 786 |
+ log.Debugf("Creating repository list")
|
|
| 787 | 787 |
repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) |
| 788 | 788 |
if err != nil {
|
| 789 | 789 |
return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
|
| ... | ... |
@@ -862,18 +863,18 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) |
| 862 | 862 |
eng.OnShutdown(func() {
|
| 863 | 863 |
// FIXME: if these cleanup steps can be called concurrently, register |
| 864 | 864 |
// them as separate handlers to speed up total shutdown time |
| 865 |
- // FIXME: use engine logging instead of utils.Errorf |
|
| 865 |
+ // FIXME: use engine logging instead of log.Errorf |
|
| 866 | 866 |
if err := daemon.shutdown(); err != nil {
|
| 867 |
- utils.Errorf("daemon.shutdown(): %s", err)
|
|
| 867 |
+ log.Errorf("daemon.shutdown(): %s", err)
|
|
| 868 | 868 |
} |
| 869 | 869 |
if err := portallocator.ReleaseAll(); err != nil {
|
| 870 |
- utils.Errorf("portallocator.ReleaseAll(): %s", err)
|
|
| 870 |
+ log.Errorf("portallocator.ReleaseAll(): %s", err)
|
|
| 871 | 871 |
} |
| 872 | 872 |
if err := daemon.driver.Cleanup(); err != nil {
|
| 873 |
- utils.Errorf("daemon.driver.Cleanup(): %s", err.Error())
|
|
| 873 |
+ log.Errorf("daemon.driver.Cleanup(): %s", err.Error())
|
|
| 874 | 874 |
} |
| 875 | 875 |
if err := daemon.containerGraph.Close(); err != nil {
|
| 876 |
- utils.Errorf("daemon.containerGraph.Close(): %s", err.Error())
|
|
| 876 |
+ log.Errorf("daemon.containerGraph.Close(): %s", err.Error())
|
|
| 877 | 877 |
} |
| 878 | 878 |
}) |
| 879 | 879 |
|
| ... | ... |
@@ -882,20 +883,20 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) |
| 882 | 882 |
|
| 883 | 883 |
func (daemon *Daemon) shutdown() error {
|
| 884 | 884 |
group := sync.WaitGroup{}
|
| 885 |
- utils.Debugf("starting clean shutdown of all containers...")
|
|
| 885 |
+ log.Debugf("starting clean shutdown of all containers...")
|
|
| 886 | 886 |
for _, container := range daemon.List() {
|
| 887 | 887 |
c := container |
| 888 | 888 |
if c.State.IsRunning() {
|
| 889 |
- utils.Debugf("stopping %s", c.ID)
|
|
| 889 |
+ log.Debugf("stopping %s", c.ID)
|
|
| 890 | 890 |
group.Add(1) |
| 891 | 891 |
|
| 892 | 892 |
go func() {
|
| 893 | 893 |
defer group.Done() |
| 894 | 894 |
if err := c.KillSig(15); err != nil {
|
| 895 |
- utils.Debugf("kill 15 error for %s - %s", c.ID, err)
|
|
| 895 |
+ log.Debugf("kill 15 error for %s - %s", c.ID, err)
|
|
| 896 | 896 |
} |
| 897 | 897 |
c.State.WaitStop(-1 * time.Second) |
| 898 |
- utils.Debugf("container stopped %s", c.ID)
|
|
| 898 |
+ log.Debugf("container stopped %s", c.ID)
|
|
| 899 | 899 |
}() |
| 900 | 900 |
} |
| 901 | 901 |
} |
| ... | ... |
@@ -1056,7 +1057,7 @@ func (daemon *Daemon) checkLocaldns() error {
|
| 1056 | 1056 |
return err |
| 1057 | 1057 |
} |
| 1058 | 1058 |
if len(daemon.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
| 1059 |
- log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
|
|
| 1059 |
+ log.Infof("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
|
|
| 1060 | 1060 |
daemon.config.Dns = DefaultDns |
| 1061 | 1061 |
} |
| 1062 | 1062 |
return nil |
| ... | ... |
@@ -1107,11 +1108,11 @@ func checkKernelAndArch() error {
|
| 1107 | 1107 |
// the circumstances of pre-3.8 crashes are clearer. |
| 1108 | 1108 |
// For details see http://github.com/docker/docker/issues/407 |
| 1109 | 1109 |
if k, err := kernel.GetKernelVersion(); err != nil {
|
| 1110 |
- log.Printf("WARNING: %s\n", err)
|
|
| 1110 |
+ log.Infof("WARNING: %s\n", err)
|
|
| 1111 | 1111 |
} else {
|
| 1112 | 1112 |
if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
|
| 1113 | 1113 |
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
|
| 1114 |
- log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
|
| 1114 |
+ log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
|
| 1115 | 1115 |
} |
| 1116 | 1116 |
} |
| 1117 | 1117 |
} |
| ... | ... |
@@ -6,14 +6,14 @@ import ( |
| 6 | 6 |
"github.com/docker/docker/daemon/graphdriver" |
| 7 | 7 |
"github.com/docker/docker/daemon/graphdriver/aufs" |
| 8 | 8 |
"github.com/docker/docker/graph" |
| 9 |
- "github.com/docker/docker/utils" |
|
| 9 |
+ "github.com/docker/docker/pkg/log" |
|
| 10 | 10 |
) |
| 11 | 11 |
|
| 12 | 12 |
// Given the graphdriver ad, if it is aufs, then migrate it. |
| 13 | 13 |
// If aufs driver is not built, this func is a noop. |
| 14 | 14 |
func migrateIfAufs(driver graphdriver.Driver, root string) error {
|
| 15 | 15 |
if ad, ok := driver.(*aufs.Driver); ok {
|
| 16 |
- utils.Debugf("Migrating existing containers")
|
|
| 16 |
+ log.Debugf("Migrating existing containers")
|
|
| 17 | 17 |
if err := ad.Migrate(root, graph.SetupInitLayer); err != nil {
|
| 18 | 18 |
return err |
| 19 | 19 |
} |
| ... | ... |
@@ -2,14 +2,13 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 |
- "log" |
|
| 6 | 5 |
"os" |
| 7 | 6 |
"path" |
| 8 | 7 |
"path/filepath" |
| 9 | 8 |
"strings" |
| 10 | 9 |
|
| 11 | 10 |
"github.com/docker/docker/engine" |
| 12 |
- "github.com/docker/docker/utils" |
|
| 11 |
+ "github.com/docker/docker/pkg/log" |
|
| 13 | 12 |
) |
| 14 | 13 |
|
| 15 | 14 |
// FIXME: rename to ContainerRemove for consistency with the CLI command. |
| ... | ... |
@@ -118,7 +117,7 @@ func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status {
|
| 118 | 118 |
for volumeId := range volumes {
|
| 119 | 119 |
// If the requested volu |
| 120 | 120 |
if c, exists := usedVolumes[volumeId]; exists {
|
| 121 |
- log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
|
| 121 |
+ log.Infof("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
|
| 122 | 122 |
continue |
| 123 | 123 |
} |
| 124 | 124 |
if err := daemon.Volumes().Delete(volumeId); err != nil {
|
| ... | ... |
@@ -153,7 +152,7 @@ func (daemon *Daemon) Destroy(container *Container) error {
|
| 153 | 153 |
daemon.containers.Delete(container.ID) |
| 154 | 154 |
|
| 155 | 155 |
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
|
| 156 |
- utils.Debugf("Unable to remove container from link graph: %s", err)
|
|
| 156 |
+ log.Debugf("Unable to remove container from link graph: %s", err)
|
|
| 157 | 157 |
} |
| 158 | 158 |
|
| 159 | 159 |
if err := daemon.driver.Remove(container.ID); err != nil {
|
| ... | ... |
@@ -14,13 +14,15 @@ import ( |
| 14 | 14 |
"syscall" |
| 15 | 15 |
"time" |
| 16 | 16 |
|
| 17 |
+ "github.com/kr/pty" |
|
| 18 |
+ |
|
| 17 | 19 |
"github.com/docker/docker/daemon/execdriver" |
| 20 |
+ "github.com/docker/docker/pkg/log" |
|
| 18 | 21 |
"github.com/docker/docker/pkg/term" |
| 19 | 22 |
"github.com/docker/docker/utils" |
| 20 | 23 |
"github.com/docker/libcontainer/cgroups" |
| 21 | 24 |
"github.com/docker/libcontainer/label" |
| 22 | 25 |
"github.com/docker/libcontainer/mount/nodes" |
| 23 |
- "github.com/kr/pty" |
|
| 24 | 26 |
) |
| 25 | 27 |
|
| 26 | 28 |
const DriverName = "lxc" |
| ... | ... |
@@ -318,7 +320,7 @@ func (i *info) IsRunning() bool {
|
| 318 | 318 |
|
| 319 | 319 |
output, err := i.driver.getInfo(i.ID) |
| 320 | 320 |
if err != nil {
|
| 321 |
- utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
|
|
| 321 |
+ log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
|
|
| 322 | 322 |
return false |
| 323 | 323 |
} |
| 324 | 324 |
if strings.Contains(string(output), "RUNNING") {
|
| ... | ... |
@@ -32,6 +32,7 @@ import ( |
| 32 | 32 |
|
| 33 | 33 |
"github.com/docker/docker/archive" |
| 34 | 34 |
"github.com/docker/docker/daemon/graphdriver" |
| 35 |
+ "github.com/docker/docker/pkg/log" |
|
| 35 | 36 |
mountpk "github.com/docker/docker/pkg/mount" |
| 36 | 37 |
"github.com/docker/docker/utils" |
| 37 | 38 |
"github.com/docker/libcontainer/label" |
| ... | ... |
@@ -209,7 +210,7 @@ func (a *Driver) Remove(id string) error {
|
| 209 | 209 |
defer a.Unlock() |
| 210 | 210 |
|
| 211 | 211 |
if a.active[id] != 0 {
|
| 212 |
- utils.Errorf("Warning: removing active id %s\n", id)
|
|
| 212 |
+ log.Errorf("Warning: removing active id %s\n", id)
|
|
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 | 215 |
// Make sure the dir is umounted first |
| ... | ... |
@@ -378,7 +379,7 @@ func (a *Driver) Cleanup() error {
|
| 378 | 378 |
|
| 379 | 379 |
for _, id := range ids {
|
| 380 | 380 |
if err := a.unmount(id); err != nil {
|
| 381 |
- utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
|
|
| 381 |
+ log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
|
|
| 382 | 382 |
} |
| 383 | 383 |
} |
| 384 | 384 |
|
| ... | ... |
@@ -1,14 +1,15 @@ |
| 1 | 1 |
package aufs |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
- "github.com/docker/docker/utils" |
|
| 5 | 4 |
"os/exec" |
| 6 | 5 |
"syscall" |
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/pkg/log" |
|
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 | 10 |
func Unmount(target string) error {
|
| 10 | 11 |
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
|
| 11 |
- utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
|
|
| 12 |
+ log.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
|
|
| 12 | 13 |
} |
| 13 | 14 |
if err := syscall.Unmount(target, 0); err != nil {
|
| 14 | 15 |
return err |
| ... | ... |
@@ -7,7 +7,7 @@ import ( |
| 7 | 7 |
"os" |
| 8 | 8 |
"syscall" |
| 9 | 9 |
|
| 10 |
- "github.com/docker/docker/utils" |
|
| 10 |
+ "github.com/docker/docker/pkg/log" |
|
| 11 | 11 |
) |
| 12 | 12 |
|
| 13 | 13 |
func stringToLoopName(src string) [LoNameSize]uint8 {
|
| ... | ... |
@@ -39,20 +39,20 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil |
| 39 | 39 |
fi, err := os.Stat(target) |
| 40 | 40 |
if err != nil {
|
| 41 | 41 |
if os.IsNotExist(err) {
|
| 42 |
- utils.Errorf("There are no more loopback devices available.")
|
|
| 42 |
+ log.Errorf("There are no more loopback devices available.")
|
|
| 43 | 43 |
} |
| 44 | 44 |
return nil, ErrAttachLoopbackDevice |
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 | 47 |
if fi.Mode()&os.ModeDevice != os.ModeDevice {
|
| 48 |
- utils.Errorf("Loopback device %s is not a block device.", target)
|
|
| 48 |
+ log.Errorf("Loopback device %s is not a block device.", target)
|
|
| 49 | 49 |
continue |
| 50 | 50 |
} |
| 51 | 51 |
|
| 52 | 52 |
// OpenFile adds O_CLOEXEC |
| 53 | 53 |
loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) |
| 54 | 54 |
if err != nil {
|
| 55 |
- utils.Errorf("Error openning loopback device: %s", err)
|
|
| 55 |
+ log.Errorf("Error openning loopback device: %s", err)
|
|
| 56 | 56 |
return nil, ErrAttachLoopbackDevice |
| 57 | 57 |
} |
| 58 | 58 |
|
| ... | ... |
@@ -62,7 +62,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil |
| 62 | 62 |
|
| 63 | 63 |
// If the error is EBUSY, then try the next loopback |
| 64 | 64 |
if err != syscall.EBUSY {
|
| 65 |
- utils.Errorf("Cannot set up loopback device %s: %s", target, err)
|
|
| 65 |
+ log.Errorf("Cannot set up loopback device %s: %s", target, err)
|
|
| 66 | 66 |
return nil, ErrAttachLoopbackDevice |
| 67 | 67 |
} |
| 68 | 68 |
|
| ... | ... |
@@ -75,7 +75,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil |
| 75 | 75 |
|
| 76 | 76 |
// This can't happen, but let's be sure |
| 77 | 77 |
if loopFile == nil {
|
| 78 |
- utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
|
|
| 78 |
+ log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
|
|
| 79 | 79 |
return nil, ErrAttachLoopbackDevice |
| 80 | 80 |
} |
| 81 | 81 |
|
| ... | ... |
@@ -91,13 +91,13 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) {
|
| 91 | 91 |
// loopback from index 0. |
| 92 | 92 |
startIndex, err := getNextFreeLoopbackIndex() |
| 93 | 93 |
if err != nil {
|
| 94 |
- utils.Debugf("Error retrieving the next available loopback: %s", err)
|
|
| 94 |
+ log.Debugf("Error retrieving the next available loopback: %s", err)
|
|
| 95 | 95 |
} |
| 96 | 96 |
|
| 97 | 97 |
// OpenFile adds O_CLOEXEC |
| 98 | 98 |
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) |
| 99 | 99 |
if err != nil {
|
| 100 |
- utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
|
|
| 100 |
+ log.Errorf("Error openning sparse file %s: %s", sparseName, err)
|
|
| 101 | 101 |
return nil, ErrAttachLoopbackDevice |
| 102 | 102 |
} |
| 103 | 103 |
defer sparseFile.Close() |
| ... | ... |
@@ -115,11 +115,11 @@ func attachLoopDevice(sparseName string) (loop *os.File, err error) {
|
| 115 | 115 |
} |
| 116 | 116 |
|
| 117 | 117 |
if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
|
| 118 |
- utils.Errorf("Cannot set up loopback device info: %s", err)
|
|
| 118 |
+ log.Errorf("Cannot set up loopback device info: %s", err)
|
|
| 119 | 119 |
|
| 120 | 120 |
// If the call failed, then free the loopback device |
| 121 | 121 |
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
|
| 122 |
- utils.Errorf("Error while cleaning up the loopback device")
|
|
| 122 |
+ log.Errorf("Error while cleaning up the loopback device")
|
|
| 123 | 123 |
} |
| 124 | 124 |
loopFile.Close() |
| 125 | 125 |
return nil, ErrAttachLoopbackDevice |
| ... | ... |
@@ -19,9 +19,9 @@ import ( |
| 19 | 19 |
"time" |
| 20 | 20 |
|
| 21 | 21 |
"github.com/docker/docker/daemon/graphdriver" |
| 22 |
+ "github.com/docker/docker/pkg/log" |
|
| 22 | 23 |
"github.com/docker/docker/pkg/parsers" |
| 23 | 24 |
"github.com/docker/docker/pkg/units" |
| 24 |
- "github.com/docker/docker/utils" |
|
| 25 | 25 |
"github.com/docker/libcontainer/label" |
| 26 | 26 |
) |
| 27 | 27 |
|
| ... | ... |
@@ -174,7 +174,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
|
| 174 | 174 |
if !os.IsNotExist(err) {
|
| 175 | 175 |
return "", err |
| 176 | 176 |
} |
| 177 |
- utils.Debugf("Creating loopback file %s for device-manage use", filename)
|
|
| 177 |
+ log.Debugf("Creating loopback file %s for device-manage use", filename)
|
|
| 178 | 178 |
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) |
| 179 | 179 |
if err != nil {
|
| 180 | 180 |
return "", err |
| ... | ... |
@@ -252,7 +252,7 @@ func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) {
|
| 252 | 252 |
} |
| 253 | 253 |
|
| 254 | 254 |
func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) {
|
| 255 |
- utils.Debugf("registerDevice(%v, %v)", id, hash)
|
|
| 255 |
+ log.Debugf("registerDevice(%v, %v)", id, hash)
|
|
| 256 | 256 |
info := &DevInfo{
|
| 257 | 257 |
Hash: hash, |
| 258 | 258 |
DeviceId: id, |
| ... | ... |
@@ -278,7 +278,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev |
| 278 | 278 |
} |
| 279 | 279 |
|
| 280 | 280 |
func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
|
| 281 |
- utils.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
|
|
| 281 |
+ log.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
|
|
| 282 | 282 |
|
| 283 | 283 |
if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
|
| 284 | 284 |
return nil |
| ... | ... |
@@ -385,13 +385,13 @@ func (devices *DeviceSet) setupBaseImage() error {
|
| 385 | 385 |
} |
| 386 | 386 |
|
| 387 | 387 |
if oldInfo != nil && !oldInfo.Initialized {
|
| 388 |
- utils.Debugf("Removing uninitialized base image")
|
|
| 388 |
+ log.Debugf("Removing uninitialized base image")
|
|
| 389 | 389 |
if err := devices.deleteDevice(oldInfo); err != nil {
|
| 390 | 390 |
return err |
| 391 | 391 |
} |
| 392 | 392 |
} |
| 393 | 393 |
|
| 394 |
- utils.Debugf("Initializing base device-manager snapshot")
|
|
| 394 |
+ log.Debugf("Initializing base device-manager snapshot")
|
|
| 395 | 395 |
|
| 396 | 396 |
id := devices.nextDeviceId |
| 397 | 397 |
|
| ... | ... |
@@ -403,14 +403,14 @@ func (devices *DeviceSet) setupBaseImage() error {
|
| 403 | 403 |
// Ids are 24bit, so wrap around |
| 404 | 404 |
devices.nextDeviceId = (id + 1) & 0xffffff |
| 405 | 405 |
|
| 406 |
- utils.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
|
|
| 406 |
+ log.Debugf("Registering base device (id %v) with FS size %v", id, devices.baseFsSize)
|
|
| 407 | 407 |
info, err := devices.registerDevice(id, "", devices.baseFsSize) |
| 408 | 408 |
if err != nil {
|
| 409 | 409 |
_ = deleteDevice(devices.getPoolDevName(), id) |
| 410 | 410 |
return err |
| 411 | 411 |
} |
| 412 | 412 |
|
| 413 |
- utils.Debugf("Creating filesystem on base device-manager snapshot")
|
|
| 413 |
+ log.Debugf("Creating filesystem on base device-manager snapshot")
|
|
| 414 | 414 |
|
| 415 | 415 |
if err = devices.activateDeviceIfNeeded(info); err != nil {
|
| 416 | 416 |
return err |
| ... | ... |
@@ -448,7 +448,7 @@ func (devices *DeviceSet) log(level int, file string, line int, dmError int, mes |
| 448 | 448 |
return // Ignore _LOG_DEBUG |
| 449 | 449 |
} |
| 450 | 450 |
|
| 451 |
- utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
|
| 451 |
+ log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
|
| 452 | 452 |
} |
| 453 | 453 |
|
| 454 | 454 |
func major(device uint64) uint64 {
|
| ... | ... |
@@ -552,13 +552,13 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
| 552 | 552 |
// - The target of this device is at major <maj> and minor <min> |
| 553 | 553 |
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself. |
| 554 | 554 |
devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
|
| 555 |
- utils.Debugf("Generated prefix: %s", devices.devicePrefix)
|
|
| 555 |
+ log.Debugf("Generated prefix: %s", devices.devicePrefix)
|
|
| 556 | 556 |
|
| 557 | 557 |
// Check for the existence of the device <prefix>-pool |
| 558 |
- utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
|
|
| 558 |
+ log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
|
|
| 559 | 559 |
info, err := getInfo(devices.getPoolName()) |
| 560 | 560 |
if info == nil {
|
| 561 |
- utils.Debugf("Error device getInfo: %s", err)
|
|
| 561 |
+ log.Debugf("Error device getInfo: %s", err)
|
|
| 562 | 562 |
return err |
| 563 | 563 |
} |
| 564 | 564 |
|
| ... | ... |
@@ -574,7 +574,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
| 574 | 574 |
|
| 575 | 575 |
// If the pool doesn't exist, create it |
| 576 | 576 |
if info.Exists == 0 {
|
| 577 |
- utils.Debugf("Pool doesn't exist. Creating it.")
|
|
| 577 |
+ log.Debugf("Pool doesn't exist. Creating it.")
|
|
| 578 | 578 |
|
| 579 | 579 |
var ( |
| 580 | 580 |
dataFile *os.File |
| ... | ... |
@@ -596,7 +596,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
| 596 | 596 |
|
| 597 | 597 |
data, err := devices.ensureImage("data", devices.dataLoopbackSize)
|
| 598 | 598 |
if err != nil {
|
| 599 |
- utils.Debugf("Error device ensureImage (data): %s\n", err)
|
|
| 599 |
+ log.Debugf("Error device ensureImage (data): %s\n", err)
|
|
| 600 | 600 |
return err |
| 601 | 601 |
} |
| 602 | 602 |
|
| ... | ... |
@@ -627,7 +627,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
| 627 | 627 |
|
| 628 | 628 |
metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
|
| 629 | 629 |
if err != nil {
|
| 630 |
- utils.Debugf("Error device ensureImage (metadata): %s\n", err)
|
|
| 630 |
+ log.Debugf("Error device ensureImage (metadata): %s\n", err)
|
|
| 631 | 631 |
return err |
| 632 | 632 |
} |
| 633 | 633 |
|
| ... | ... |
@@ -659,7 +659,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
| 659 | 659 |
// Setup the base image |
| 660 | 660 |
if doInit {
|
| 661 | 661 |
if err := devices.setupBaseImage(); err != nil {
|
| 662 |
- utils.Debugf("Error device setupBaseImage: %s\n", err)
|
|
| 662 |
+ log.Debugf("Error device setupBaseImage: %s\n", err)
|
|
| 663 | 663 |
return err |
| 664 | 664 |
} |
| 665 | 665 |
} |
| ... | ... |
@@ -686,7 +686,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
|
| 686 | 686 |
deviceId := devices.nextDeviceId |
| 687 | 687 |
|
| 688 | 688 |
if err := createSnapDevice(devices.getPoolDevName(), &deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil {
|
| 689 |
- utils.Debugf("Error creating snap device: %s\n", err)
|
|
| 689 |
+ log.Debugf("Error creating snap device: %s\n", err)
|
|
| 690 | 690 |
return err |
| 691 | 691 |
} |
| 692 | 692 |
|
| ... | ... |
@@ -695,7 +695,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
|
| 695 | 695 |
|
| 696 | 696 |
if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil {
|
| 697 | 697 |
deleteDevice(devices.getPoolDevName(), deviceId) |
| 698 |
- utils.Debugf("Error registering device: %s\n", err)
|
|
| 698 |
+ log.Debugf("Error registering device: %s\n", err)
|
|
| 699 | 699 |
return err |
| 700 | 700 |
} |
| 701 | 701 |
return nil |
| ... | ... |
@@ -708,7 +708,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
|
| 708 | 708 |
// manually |
| 709 | 709 |
if err := devices.activateDeviceIfNeeded(info); err == nil {
|
| 710 | 710 |
if err := BlockDeviceDiscard(info.DevName()); err != nil {
|
| 711 |
- utils.Debugf("Error discarding block on device: %s (ignoring)\n", err)
|
|
| 711 |
+ log.Debugf("Error discarding block on device: %s (ignoring)\n", err)
|
|
| 712 | 712 |
} |
| 713 | 713 |
} |
| 714 | 714 |
} |
| ... | ... |
@@ -716,13 +716,13 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
|
| 716 | 716 |
devinfo, _ := getInfo(info.Name()) |
| 717 | 717 |
if devinfo != nil && devinfo.Exists != 0 {
|
| 718 | 718 |
if err := devices.removeDeviceAndWait(info.Name()); err != nil {
|
| 719 |
- utils.Debugf("Error removing device: %s\n", err)
|
|
| 719 |
+ log.Debugf("Error removing device: %s\n", err)
|
|
| 720 | 720 |
return err |
| 721 | 721 |
} |
| 722 | 722 |
} |
| 723 | 723 |
|
| 724 | 724 |
if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil {
|
| 725 |
- utils.Debugf("Error deleting device: %s\n", err)
|
|
| 725 |
+ log.Debugf("Error deleting device: %s\n", err)
|
|
| 726 | 726 |
return err |
| 727 | 727 |
} |
| 728 | 728 |
|
| ... | ... |
@@ -735,7 +735,7 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
|
| 735 | 735 |
devices.devicesLock.Lock() |
| 736 | 736 |
devices.Devices[info.Hash] = info |
| 737 | 737 |
devices.devicesLock.Unlock() |
| 738 |
- utils.Debugf("Error removing meta data: %s\n", err)
|
|
| 738 |
+ log.Debugf("Error removing meta data: %s\n", err)
|
|
| 739 | 739 |
return err |
| 740 | 740 |
} |
| 741 | 741 |
|
| ... | ... |
@@ -758,8 +758,8 @@ func (devices *DeviceSet) DeleteDevice(hash string) error {
|
| 758 | 758 |
} |
| 759 | 759 |
|
| 760 | 760 |
func (devices *DeviceSet) deactivatePool() error {
|
| 761 |
- utils.Debugf("[devmapper] deactivatePool()")
|
|
| 762 |
- defer utils.Debugf("[devmapper] deactivatePool END")
|
|
| 761 |
+ log.Debugf("[devmapper] deactivatePool()")
|
|
| 762 |
+ defer log.Debugf("[devmapper] deactivatePool END")
|
|
| 763 | 763 |
devname := devices.getPoolDevName() |
| 764 | 764 |
devinfo, err := getInfo(devname) |
| 765 | 765 |
if err != nil {
|
| ... | ... |
@@ -773,13 +773,13 @@ func (devices *DeviceSet) deactivatePool() error {
|
| 773 | 773 |
} |
| 774 | 774 |
|
| 775 | 775 |
func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
|
| 776 |
- utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
|
|
| 777 |
- defer utils.Debugf("[devmapper] deactivateDevice END")
|
|
| 776 |
+ log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
|
|
| 777 |
+ defer log.Debugf("[devmapper] deactivateDevice END")
|
|
| 778 | 778 |
|
| 779 | 779 |
// Wait for the unmount to be effective, |
| 780 | 780 |
// by watching the value of Info.OpenCount for the device |
| 781 | 781 |
if err := devices.waitClose(info); err != nil {
|
| 782 |
- utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err)
|
|
| 782 |
+ log.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err)
|
|
| 783 | 783 |
} |
| 784 | 784 |
|
| 785 | 785 |
devinfo, err := getInfo(info.Name()) |
| ... | ... |
@@ -829,8 +829,8 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
|
| 829 | 829 |
// a) the device registered at <device_set_prefix>-<hash> is removed, |
| 830 | 830 |
// or b) the 10 second timeout expires. |
| 831 | 831 |
func (devices *DeviceSet) waitRemove(devname string) error {
|
| 832 |
- utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
|
|
| 833 |
- defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
|
|
| 832 |
+ log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
|
|
| 833 |
+ defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
|
|
| 834 | 834 |
i := 0 |
| 835 | 835 |
for ; i < 1000; i += 1 {
|
| 836 | 836 |
devinfo, err := getInfo(devname) |
| ... | ... |
@@ -840,7 +840,7 @@ func (devices *DeviceSet) waitRemove(devname string) error {
|
| 840 | 840 |
return nil |
| 841 | 841 |
} |
| 842 | 842 |
if i%100 == 0 {
|
| 843 |
- utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
|
|
| 843 |
+ log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
|
|
| 844 | 844 |
} |
| 845 | 845 |
if devinfo.Exists == 0 {
|
| 846 | 846 |
break |
| ... | ... |
@@ -867,7 +867,7 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error {
|
| 867 | 867 |
return err |
| 868 | 868 |
} |
| 869 | 869 |
if i%100 == 0 {
|
| 870 |
- utils.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
|
|
| 870 |
+ log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
|
|
| 871 | 871 |
} |
| 872 | 872 |
if devinfo.OpenCount == 0 {
|
| 873 | 873 |
break |
| ... | ... |
@@ -884,9 +884,9 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error {
|
| 884 | 884 |
|
| 885 | 885 |
func (devices *DeviceSet) Shutdown() error {
|
| 886 | 886 |
|
| 887 |
- utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
|
|
| 888 |
- utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
|
|
| 889 |
- defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
|
|
| 887 |
+ log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
|
|
| 888 |
+ log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
|
|
| 889 |
+ defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
|
|
| 890 | 890 |
|
| 891 | 891 |
var devs []*DevInfo |
| 892 | 892 |
|
| ... | ... |
@@ -903,12 +903,12 @@ func (devices *DeviceSet) Shutdown() error {
|
| 903 | 903 |
// container. This means it'll go away from the global scope directly, |
| 904 | 904 |
// and the device will be released when that container dies. |
| 905 | 905 |
if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
|
| 906 |
- utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
|
|
| 906 |
+ log.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
|
|
| 907 | 907 |
} |
| 908 | 908 |
|
| 909 | 909 |
devices.Lock() |
| 910 | 910 |
if err := devices.deactivateDevice(info); err != nil {
|
| 911 |
- utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
|
|
| 911 |
+ log.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
|
|
| 912 | 912 |
} |
| 913 | 913 |
devices.Unlock() |
| 914 | 914 |
} |
| ... | ... |
@@ -920,7 +920,7 @@ func (devices *DeviceSet) Shutdown() error {
|
| 920 | 920 |
info.lock.Lock() |
| 921 | 921 |
devices.Lock() |
| 922 | 922 |
if err := devices.deactivateDevice(info); err != nil {
|
| 923 |
- utils.Debugf("Shutdown deactivate base , error: %s\n", err)
|
|
| 923 |
+ log.Debugf("Shutdown deactivate base , error: %s\n", err)
|
|
| 924 | 924 |
} |
| 925 | 925 |
devices.Unlock() |
| 926 | 926 |
info.lock.Unlock() |
| ... | ... |
@@ -928,7 +928,7 @@ func (devices *DeviceSet) Shutdown() error {
|
| 928 | 928 |
|
| 929 | 929 |
devices.Lock() |
| 930 | 930 |
if err := devices.deactivatePool(); err != nil {
|
| 931 |
- utils.Debugf("Shutdown deactivate pool , error: %s\n", err)
|
|
| 931 |
+ log.Debugf("Shutdown deactivate pool , error: %s\n", err)
|
|
| 932 | 932 |
} |
| 933 | 933 |
devices.Unlock() |
| 934 | 934 |
|
| ... | ... |
@@ -992,8 +992,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
| 992 | 992 |
} |
| 993 | 993 |
|
| 994 | 994 |
func (devices *DeviceSet) UnmountDevice(hash string) error {
|
| 995 |
- utils.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
|
|
| 996 |
- defer utils.Debugf("[devmapper] UnmountDevice END")
|
|
| 995 |
+ log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
|
|
| 996 |
+ defer log.Debugf("[devmapper] UnmountDevice END")
|
|
| 997 | 997 |
|
| 998 | 998 |
info, err := devices.lookupDevice(hash) |
| 999 | 999 |
if err != nil {
|
| ... | ... |
@@ -1015,11 +1015,11 @@ func (devices *DeviceSet) UnmountDevice(hash string) error {
|
| 1015 | 1015 |
return nil |
| 1016 | 1016 |
} |
| 1017 | 1017 |
|
| 1018 |
- utils.Debugf("[devmapper] Unmount(%s)", info.mountPath)
|
|
| 1018 |
+ log.Debugf("[devmapper] Unmount(%s)", info.mountPath)
|
|
| 1019 | 1019 |
if err := syscall.Unmount(info.mountPath, 0); err != nil {
|
| 1020 | 1020 |
return err |
| 1021 | 1021 |
} |
| 1022 |
- utils.Debugf("[devmapper] Unmount done")
|
|
| 1022 |
+ log.Debugf("[devmapper] Unmount done")
|
|
| 1023 | 1023 |
|
| 1024 | 1024 |
if err := devices.deactivateDevice(info); err != nil {
|
| 1025 | 1025 |
return err |
| ... | ... |
@@ -9,7 +9,7 @@ import ( |
| 9 | 9 |
"runtime" |
| 10 | 10 |
"syscall" |
| 11 | 11 |
|
| 12 |
- "github.com/docker/docker/utils" |
|
| 12 |
+ "github.com/docker/docker/pkg/log" |
|
| 13 | 13 |
) |
| 14 | 14 |
|
| 15 | 15 |
type DevmapperLogger interface {
|
| ... | ... |
@@ -198,7 +198,7 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, |
| 198 | 198 |
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
|
| 199 | 199 |
loopInfo, err := ioctlLoopGetStatus64(file.Fd()) |
| 200 | 200 |
if err != nil {
|
| 201 |
- utils.Errorf("Error get loopback backing file: %s\n", err)
|
|
| 201 |
+ log.Errorf("Error get loopback backing file: %s\n", err)
|
|
| 202 | 202 |
return 0, 0, ErrGetLoopbackBackingFile |
| 203 | 203 |
} |
| 204 | 204 |
return loopInfo.loDevice, loopInfo.loInode, nil |
| ... | ... |
@@ -206,7 +206,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
|
| 206 | 206 |
|
| 207 | 207 |
func LoopbackSetCapacity(file *os.File) error {
|
| 208 | 208 |
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
|
| 209 |
- utils.Errorf("Error loopbackSetCapacity: %s", err)
|
|
| 209 |
+ log.Errorf("Error loopbackSetCapacity: %s", err)
|
|
| 210 | 210 |
return ErrLoopbackSetCapacity |
| 211 | 211 |
} |
| 212 | 212 |
return nil |
| ... | ... |
@@ -246,7 +246,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
|
| 246 | 246 |
|
| 247 | 247 |
func UdevWait(cookie uint) error {
|
| 248 | 248 |
if res := DmUdevWait(cookie); res != 1 {
|
| 249 |
- utils.Debugf("Failed to wait on udev cookie %d", cookie)
|
|
| 249 |
+ log.Debugf("Failed to wait on udev cookie %d", cookie)
|
|
| 250 | 250 |
return ErrUdevWait |
| 251 | 251 |
} |
| 252 | 252 |
return nil |
| ... | ... |
@@ -265,7 +265,7 @@ func logInit(logger DevmapperLogger) {
|
| 265 | 265 |
|
| 266 | 266 |
func SetDevDir(dir string) error {
|
| 267 | 267 |
if res := DmSetDevDir(dir); res != 1 {
|
| 268 |
- utils.Debugf("Error dm_set_dev_dir")
|
|
| 268 |
+ log.Debugf("Error dm_set_dev_dir")
|
|
| 269 | 269 |
return ErrSetDevDir |
| 270 | 270 |
} |
| 271 | 271 |
return nil |
| ... | ... |
@@ -286,7 +286,7 @@ func RemoveDevice(name string) error {
|
| 286 | 286 |
return ErrCreateRemoveTask |
| 287 | 287 |
} |
| 288 | 288 |
if err := task.SetName(name); err != nil {
|
| 289 |
- utils.Debugf("Can't set task name %s", name)
|
|
| 289 |
+ log.Debugf("Can't set task name %s", name)
|
|
| 290 | 290 |
return err |
| 291 | 291 |
} |
| 292 | 292 |
if err := task.Run(); err != nil {
|
| ... | ... |
@@ -298,7 +298,7 @@ func RemoveDevice(name string) error {
|
| 298 | 298 |
func GetBlockDeviceSize(file *os.File) (uint64, error) {
|
| 299 | 299 |
size, err := ioctlBlkGetSize64(file.Fd()) |
| 300 | 300 |
if err != nil {
|
| 301 |
- utils.Errorf("Error getblockdevicesize: %s", err)
|
|
| 301 |
+ log.Errorf("Error getblockdevicesize: %s", err)
|
|
| 302 | 302 |
return 0, ErrGetBlockSize |
| 303 | 303 |
} |
| 304 | 304 |
return uint64(size), nil |
| ... | ... |
@@ -417,21 +417,21 @@ func getDriverVersion() (string, error) {
|
| 417 | 417 |
func getStatus(name string) (uint64, uint64, string, string, error) {
|
| 418 | 418 |
task, err := createTask(DeviceStatus, name) |
| 419 | 419 |
if task == nil {
|
| 420 |
- utils.Debugf("getStatus: Error createTask: %s", err)
|
|
| 420 |
+ log.Debugf("getStatus: Error createTask: %s", err)
|
|
| 421 | 421 |
return 0, 0, "", "", err |
| 422 | 422 |
} |
| 423 | 423 |
if err := task.Run(); err != nil {
|
| 424 |
- utils.Debugf("getStatus: Error Run: %s", err)
|
|
| 424 |
+ log.Debugf("getStatus: Error Run: %s", err)
|
|
| 425 | 425 |
return 0, 0, "", "", err |
| 426 | 426 |
} |
| 427 | 427 |
|
| 428 | 428 |
devinfo, err := task.GetInfo() |
| 429 | 429 |
if err != nil {
|
| 430 |
- utils.Debugf("getStatus: Error GetInfo: %s", err)
|
|
| 430 |
+ log.Debugf("getStatus: Error GetInfo: %s", err)
|
|
| 431 | 431 |
return 0, 0, "", "", err |
| 432 | 432 |
} |
| 433 | 433 |
if devinfo.Exists == 0 {
|
| 434 |
- utils.Debugf("getStatus: Non existing device %s", name)
|
|
| 434 |
+ log.Debugf("getStatus: Non existing device %s", name)
|
|
| 435 | 435 |
return 0, 0, "", "", fmt.Errorf("Non existing device %s", name)
|
| 436 | 436 |
} |
| 437 | 437 |
|
| ... | ... |
@@ -491,7 +491,7 @@ func resumeDevice(name string) error {
|
| 491 | 491 |
} |
| 492 | 492 |
|
| 493 | 493 |
func createDevice(poolName string, deviceId *int) error {
|
| 494 |
- utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
|
|
| 494 |
+ log.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, *deviceId)
|
|
| 495 | 495 |
|
| 496 | 496 |
for {
|
| 497 | 497 |
task, err := createTask(DeviceTargetMsg, poolName) |
| ... | ... |
@@ -542,8 +542,8 @@ func deleteDevice(poolName string, deviceId int) error {
|
| 542 | 542 |
} |
| 543 | 543 |
|
| 544 | 544 |
func removeDevice(name string) error {
|
| 545 |
- utils.Debugf("[devmapper] removeDevice START")
|
|
| 546 |
- defer utils.Debugf("[devmapper] removeDevice END")
|
|
| 545 |
+ log.Debugf("[devmapper] removeDevice START")
|
|
| 546 |
+ defer log.Debugf("[devmapper] removeDevice END")
|
|
| 547 | 547 |
task, err := createTask(DeviceRemove, name) |
| 548 | 548 |
if task == nil {
|
| 549 | 549 |
return err |
| ... | ... |
@@ -9,8 +9,8 @@ import ( |
| 9 | 9 |
"path" |
| 10 | 10 |
|
| 11 | 11 |
"github.com/docker/docker/daemon/graphdriver" |
| 12 |
+ "github.com/docker/docker/pkg/log" |
|
| 12 | 13 |
"github.com/docker/docker/pkg/mount" |
| 13 |
- "github.com/docker/docker/utils" |
|
| 14 | 14 |
) |
| 15 | 15 |
|
| 16 | 16 |
func init() {
|
| ... | ... |
@@ -138,7 +138,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
| 138 | 138 |
|
| 139 | 139 |
func (d *Driver) Put(id string) {
|
| 140 | 140 |
if err := d.DeviceSet.UnmountDevice(id); err != nil {
|
| 141 |
- utils.Errorf("Warning: error unmounting device %s: %s\n", id, err)
|
|
| 141 |
+ log.Errorf("Warning: error unmounting device %s: %s\n", id, err)
|
|
| 142 | 142 |
} |
| 143 | 143 |
} |
| 144 | 144 |
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/docker/dockerversion" |
| 8 | 8 |
"github.com/docker/docker/engine" |
| 9 |
+ "github.com/docker/docker/pkg/log" |
|
| 9 | 10 |
"github.com/docker/docker/pkg/parsers/kernel" |
| 10 | 11 |
"github.com/docker/docker/pkg/parsers/operatingsystem" |
| 11 | 12 |
"github.com/docker/docker/registry" |
| ... | ... |
@@ -30,7 +31,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
|
| 30 | 30 |
operatingSystem = s |
| 31 | 31 |
} |
| 32 | 32 |
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
|
| 33 |
- utils.Errorf("Could not determine if daemon is containerized: %v", err)
|
|
| 33 |
+ log.Errorf("Could not determine if daemon is containerized: %v", err)
|
|
| 34 | 34 |
operatingSystem += " (error determining if containerized)" |
| 35 | 35 |
} else if inContainer {
|
| 36 | 36 |
operatingSystem += " (containerized)" |
| ... | ... |
@@ -9,11 +9,11 @@ import ( |
| 9 | 9 |
"strconv" |
| 10 | 10 |
"time" |
| 11 | 11 |
|
| 12 |
+ "github.com/docker/docker/pkg/log" |
|
| 12 | 13 |
"github.com/docker/docker/pkg/tailfile" |
| 13 | 14 |
|
| 14 | 15 |
"github.com/docker/docker/engine" |
| 15 | 16 |
"github.com/docker/docker/pkg/jsonlog" |
| 16 |
- "github.com/docker/docker/utils" |
|
| 17 | 17 |
) |
| 18 | 18 |
|
| 19 | 19 |
func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| ... | ... |
@@ -47,31 +47,31 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| 47 | 47 |
cLog, err := container.ReadLog("json")
|
| 48 | 48 |
if err != nil && os.IsNotExist(err) {
|
| 49 | 49 |
// Legacy logs |
| 50 |
- utils.Debugf("Old logs format")
|
|
| 50 |
+ log.Debugf("Old logs format")
|
|
| 51 | 51 |
if stdout {
|
| 52 | 52 |
cLog, err := container.ReadLog("stdout")
|
| 53 | 53 |
if err != nil {
|
| 54 |
- utils.Errorf("Error reading logs (stdout): %s", err)
|
|
| 54 |
+ log.Errorf("Error reading logs (stdout): %s", err)
|
|
| 55 | 55 |
} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
|
| 56 |
- utils.Errorf("Error streaming logs (stdout): %s", err)
|
|
| 56 |
+ log.Errorf("Error streaming logs (stdout): %s", err)
|
|
| 57 | 57 |
} |
| 58 | 58 |
} |
| 59 | 59 |
if stderr {
|
| 60 | 60 |
cLog, err := container.ReadLog("stderr")
|
| 61 | 61 |
if err != nil {
|
| 62 |
- utils.Errorf("Error reading logs (stderr): %s", err)
|
|
| 62 |
+ log.Errorf("Error reading logs (stderr): %s", err)
|
|
| 63 | 63 |
} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
|
| 64 |
- utils.Errorf("Error streaming logs (stderr): %s", err)
|
|
| 64 |
+ log.Errorf("Error streaming logs (stderr): %s", err)
|
|
| 65 | 65 |
} |
| 66 | 66 |
} |
| 67 | 67 |
} else if err != nil {
|
| 68 |
- utils.Errorf("Error reading logs (json): %s", err)
|
|
| 68 |
+ log.Errorf("Error reading logs (json): %s", err)
|
|
| 69 | 69 |
} else {
|
| 70 | 70 |
if tail != "all" {
|
| 71 | 71 |
var err error |
| 72 | 72 |
lines, err = strconv.Atoi(tail) |
| 73 | 73 |
if err != nil {
|
| 74 |
- utils.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err)
|
|
| 74 |
+ log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err)
|
|
| 75 | 75 |
lines = -1 |
| 76 | 76 |
} |
| 77 | 77 |
} |
| ... | ... |
@@ -95,7 +95,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| 95 | 95 |
if err := dec.Decode(l); err == io.EOF {
|
| 96 | 96 |
break |
| 97 | 97 |
} else if err != nil {
|
| 98 |
- utils.Errorf("Error streaming logs: %s", err)
|
|
| 98 |
+ log.Errorf("Error streaming logs: %s", err)
|
|
| 99 | 99 |
break |
| 100 | 100 |
} |
| 101 | 101 |
logLine := l.Log |
| ... | ... |
@@ -127,7 +127,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| 127 | 127 |
} |
| 128 | 128 |
err := <-errors |
| 129 | 129 |
if err != nil {
|
| 130 |
- utils.Errorf("%s", err)
|
|
| 130 |
+ log.Errorf("%s", err)
|
|
| 131 | 131 |
} |
| 132 | 132 |
} |
| 133 | 133 |
return engine.StatusOK |
| ... | ... |
@@ -3,7 +3,6 @@ package bridge |
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"io/ioutil" |
| 6 |
- "log" |
|
| 7 | 6 |
"net" |
| 8 | 7 |
"strings" |
| 9 | 8 |
"sync" |
| ... | ... |
@@ -14,9 +13,9 @@ import ( |
| 14 | 14 |
"github.com/docker/docker/daemon/networkdriver/portmapper" |
| 15 | 15 |
"github.com/docker/docker/engine" |
| 16 | 16 |
"github.com/docker/docker/pkg/iptables" |
| 17 |
+ "github.com/docker/docker/pkg/log" |
|
| 17 | 18 |
"github.com/docker/docker/pkg/networkfs/resolvconf" |
| 18 | 19 |
"github.com/docker/docker/pkg/parsers/kernel" |
| 19 |
- "github.com/docker/docker/utils" |
|
| 20 | 20 |
"github.com/docker/libcontainer/netlink" |
| 21 | 21 |
) |
| 22 | 22 |
|
| ... | ... |
@@ -197,7 +196,7 @@ func setupIPTables(addr net.Addr, icc bool) error {
|
| 197 | 197 |
iptables.Raw(append([]string{"-D"}, acceptArgs...)...)
|
| 198 | 198 |
|
| 199 | 199 |
if !iptables.Exists(dropArgs...) {
|
| 200 |
- utils.Debugf("Disable inter-container communication")
|
|
| 200 |
+ log.Debugf("Disable inter-container communication")
|
|
| 201 | 201 |
if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil {
|
| 202 | 202 |
return fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
|
| 203 | 203 |
} else if len(output) != 0 {
|
| ... | ... |
@@ -208,7 +207,7 @@ func setupIPTables(addr net.Addr, icc bool) error {
|
| 208 | 208 |
iptables.Raw(append([]string{"-D"}, dropArgs...)...)
|
| 209 | 209 |
|
| 210 | 210 |
if !iptables.Exists(acceptArgs...) {
|
| 211 |
- utils.Debugf("Enable inter-container communication")
|
|
| 211 |
+ log.Debugf("Enable inter-container communication")
|
|
| 212 | 212 |
if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil {
|
| 213 | 213 |
return fmt.Errorf("Unable to allow intercontainer communication: %s", err)
|
| 214 | 214 |
} else if len(output) != 0 {
|
| ... | ... |
@@ -272,7 +271,7 @@ func createBridge(bridgeIP string) error {
|
| 272 | 272 |
ifaceAddr = addr |
| 273 | 273 |
break |
| 274 | 274 |
} else {
|
| 275 |
- utils.Debugf("%s %s", addr, err)
|
|
| 275 |
+ log.Debugf("%s %s", addr, err)
|
|
| 276 | 276 |
} |
| 277 | 277 |
} |
| 278 | 278 |
} |
| ... | ... |
@@ -281,7 +280,7 @@ func createBridge(bridgeIP string) error {
|
| 281 | 281 |
if ifaceAddr == "" {
|
| 282 | 282 |
return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface)
|
| 283 | 283 |
} |
| 284 |
- utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
|
|
| 284 |
+ log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
|
|
| 285 | 285 |
|
| 286 | 286 |
if err := createBridgeIface(bridgeIface); err != nil {
|
| 287 | 287 |
return err |
| ... | ... |
@@ -311,7 +310,7 @@ func createBridgeIface(name string) error {
|
| 311 | 311 |
// only set the bridge's mac address if the kernel version is > 3.3 |
| 312 | 312 |
// before that it was not supported |
| 313 | 313 |
setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3) |
| 314 |
- utils.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
|
|
| 314 |
+ log.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
|
|
| 315 | 315 |
return netlink.CreateBridge(name, setBridgeMacAddr) |
| 316 | 316 |
} |
| 317 | 317 |
|
| ... | ... |
@@ -364,12 +363,12 @@ func Release(job *engine.Job) engine.Status {
|
| 364 | 364 |
|
| 365 | 365 |
for _, nat := range containerInterface.PortMappings {
|
| 366 | 366 |
if err := portmapper.Unmap(nat); err != nil {
|
| 367 |
- log.Printf("Unable to unmap port %s: %s", nat, err)
|
|
| 367 |
+ log.Infof("Unable to unmap port %s: %s", nat, err)
|
|
| 368 | 368 |
} |
| 369 | 369 |
} |
| 370 | 370 |
|
| 371 | 371 |
if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
|
| 372 |
- log.Printf("Unable to release ip %s\n", err)
|
|
| 372 |
+ log.Infof("Unable to release ip %s\n", err)
|
|
| 373 | 373 |
} |
| 374 | 374 |
return engine.StatusOK |
| 375 | 375 |
} |
| ... | ... |
@@ -9,8 +9,8 @@ import ( |
| 9 | 9 |
|
| 10 | 10 |
"github.com/docker/docker/archive" |
| 11 | 11 |
"github.com/docker/docker/engine" |
| 12 |
+ "github.com/docker/docker/pkg/log" |
|
| 12 | 13 |
"github.com/docker/docker/pkg/parsers" |
| 13 |
- "github.com/docker/docker/utils" |
|
| 14 | 14 |
) |
| 15 | 15 |
|
| 16 | 16 |
// CmdImageExport exports all images with the given tag. All versions |
| ... | ... |
@@ -30,7 +30,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
| 30 | 30 |
} |
| 31 | 31 |
defer os.RemoveAll(tempdir) |
| 32 | 32 |
|
| 33 |
- utils.Debugf("Serializing %s", name)
|
|
| 33 |
+ log.Debugf("Serializing %s", name)
|
|
| 34 | 34 |
|
| 35 | 35 |
rootRepoMap := map[string]Repository{}
|
| 36 | 36 |
rootRepo, err := s.Get(name) |
| ... | ... |
@@ -77,7 +77,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
| 77 | 77 |
return job.Error(err) |
| 78 | 78 |
} |
| 79 | 79 |
} else {
|
| 80 |
- utils.Debugf("There were no repositories to write")
|
|
| 80 |
+ log.Debugf("There were no repositories to write")
|
|
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 | 83 |
fs, err := archive.Tar(tempdir, archive.Uncompressed) |
| ... | ... |
@@ -89,7 +89,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
| 89 | 89 |
if _, err := io.Copy(job.Stdout, fs); err != nil {
|
| 90 | 90 |
return job.Error(err) |
| 91 | 91 |
} |
| 92 |
- utils.Debugf("End Serializing %s", name)
|
|
| 92 |
+ log.Debugf("End Serializing %s", name)
|
|
| 93 | 93 |
return engine.StatusOK |
| 94 | 94 |
} |
| 95 | 95 |
|
| ... | ... |
@@ -16,6 +16,7 @@ import ( |
| 16 | 16 |
"github.com/docker/docker/daemon/graphdriver" |
| 17 | 17 |
"github.com/docker/docker/dockerversion" |
| 18 | 18 |
"github.com/docker/docker/image" |
| 19 |
+ "github.com/docker/docker/pkg/log" |
|
| 19 | 20 |
"github.com/docker/docker/pkg/truncindex" |
| 20 | 21 |
"github.com/docker/docker/runconfig" |
| 21 | 22 |
"github.com/docker/docker/utils" |
| ... | ... |
@@ -64,7 +65,7 @@ func (graph *Graph) restore() error {
|
| 64 | 64 |
} |
| 65 | 65 |
} |
| 66 | 66 |
graph.idIndex = truncindex.NewTruncIndex(ids) |
| 67 |
- utils.Debugf("Restored %d elements", len(dir))
|
|
| 67 |
+ log.Debugf("Restored %d elements", len(dir))
|
|
| 68 | 68 |
return nil |
| 69 | 69 |
} |
| 70 | 70 |
|
| ... | ... |
@@ -10,7 +10,7 @@ import ( |
| 10 | 10 |
"github.com/docker/docker/archive" |
| 11 | 11 |
"github.com/docker/docker/engine" |
| 12 | 12 |
"github.com/docker/docker/image" |
| 13 |
- "github.com/docker/docker/utils" |
|
| 13 |
+ "github.com/docker/docker/pkg/log" |
|
| 14 | 14 |
) |
| 15 | 15 |
|
| 16 | 16 |
// Loads a set of images into the repository. This is the complementary of ImageExport. |
| ... | ... |
@@ -93,22 +93,22 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
|
| 93 | 93 |
|
| 94 | 94 |
func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
|
| 95 | 95 |
if err := eng.Job("image_get", address).Run(); err != nil {
|
| 96 |
- utils.Debugf("Loading %s", address)
|
|
| 96 |
+ log.Debugf("Loading %s", address)
|
|
| 97 | 97 |
|
| 98 | 98 |
imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) |
| 99 | 99 |
if err != nil {
|
| 100 |
- utils.Debugf("Error reading json", err)
|
|
| 100 |
+ log.Debugf("Error reading json", err)
|
|
| 101 | 101 |
return err |
| 102 | 102 |
} |
| 103 | 103 |
|
| 104 | 104 |
layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) |
| 105 | 105 |
if err != nil {
|
| 106 |
- utils.Debugf("Error reading embedded tar", err)
|
|
| 106 |
+ log.Debugf("Error reading embedded tar", err)
|
|
| 107 | 107 |
return err |
| 108 | 108 |
} |
| 109 | 109 |
img, err := image.NewImgJSON(imageJson) |
| 110 | 110 |
if err != nil {
|
| 111 |
- utils.Debugf("Error unmarshalling json", err)
|
|
| 111 |
+ log.Debugf("Error unmarshalling json", err)
|
|
| 112 | 112 |
return err |
| 113 | 113 |
} |
| 114 | 114 |
if img.Parent != "" {
|
| ... | ... |
@@ -122,7 +122,7 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string |
| 122 | 122 |
return err |
| 123 | 123 |
} |
| 124 | 124 |
} |
| 125 |
- utils.Debugf("Completed processing %s", address)
|
|
| 125 |
+ log.Debugf("Completed processing %s", address)
|
|
| 126 | 126 |
|
| 127 | 127 |
return nil |
| 128 | 128 |
} |
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
|
| 11 | 11 |
"github.com/docker/docker/engine" |
| 12 | 12 |
"github.com/docker/docker/image" |
| 13 |
+ "github.com/docker/docker/pkg/log" |
|
| 13 | 14 |
"github.com/docker/docker/registry" |
| 14 | 15 |
"github.com/docker/docker/utils" |
| 15 | 16 |
) |
| ... | ... |
@@ -85,10 +86,10 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, |
| 85 | 85 |
} |
| 86 | 86 |
} |
| 87 | 87 |
|
| 88 |
- utils.Debugf("Retrieving the tag list")
|
|
| 88 |
+ log.Debugf("Retrieving the tag list")
|
|
| 89 | 89 |
tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) |
| 90 | 90 |
if err != nil {
|
| 91 |
- utils.Errorf("%v", err)
|
|
| 91 |
+ log.Errorf("%v", err)
|
|
| 92 | 92 |
return err |
| 93 | 93 |
} |
| 94 | 94 |
|
| ... | ... |
@@ -100,7 +101,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, |
| 100 | 100 |
} |
| 101 | 101 |
} |
| 102 | 102 |
|
| 103 |
- utils.Debugf("Registering tags")
|
|
| 103 |
+ log.Debugf("Registering tags")
|
|
| 104 | 104 |
// If no tag has been specified, pull them all |
| 105 | 105 |
if askedTag == "" {
|
| 106 | 106 |
for tag, id := range tagsList {
|
| ... | ... |
@@ -119,7 +120,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, |
| 119 | 119 |
for _, image := range repoData.ImgList {
|
| 120 | 120 |
downloadImage := func(img *registry.ImgData) {
|
| 121 | 121 |
if askedTag != "" && img.Tag != askedTag {
|
| 122 |
- utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
|
|
| 122 |
+ log.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
|
|
| 123 | 123 |
if parallel {
|
| 124 | 124 |
errors <- nil |
| 125 | 125 |
} |
| ... | ... |
@@ -127,7 +128,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, |
| 127 | 127 |
} |
| 128 | 128 |
|
| 129 | 129 |
if img.Tag == "" {
|
| 130 |
- utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
|
| 130 |
+ log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
|
| 131 | 131 |
if parallel {
|
| 132 | 132 |
errors <- nil |
| 133 | 133 |
} |
| ... | ... |
@@ -141,7 +142,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, localName, |
| 141 | 141 |
<-c |
| 142 | 142 |
out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) |
| 143 | 143 |
} else {
|
| 144 |
- utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
|
| 144 |
+ log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
|
| 145 | 145 |
} |
| 146 | 146 |
if parallel {
|
| 147 | 147 |
errors <- nil |
| ... | ... |
@@ -224,7 +225,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint |
| 224 | 224 |
|
| 225 | 225 |
// ensure no two downloads of the same layer happen at the same time |
| 226 | 226 |
if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
|
| 227 |
- utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
|
| 227 |
+ log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
|
| 228 | 228 |
<-c |
| 229 | 229 |
} |
| 230 | 230 |
defer s.poolRemove("pull", "layer:"+id)
|
| ... | ... |
@@ -9,6 +9,7 @@ import ( |
| 9 | 9 |
|
| 10 | 10 |
"github.com/docker/docker/archive" |
| 11 | 11 |
"github.com/docker/docker/engine" |
| 12 |
+ "github.com/docker/docker/pkg/log" |
|
| 12 | 13 |
"github.com/docker/docker/registry" |
| 13 | 14 |
"github.com/docker/docker/utils" |
| 14 | 15 |
) |
| ... | ... |
@@ -54,15 +55,15 @@ func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string |
| 54 | 54 |
if len(imageList) == 0 {
|
| 55 | 55 |
return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
|
| 56 | 56 |
} |
| 57 |
- utils.Debugf("Image list: %v", imageList)
|
|
| 58 |
- utils.Debugf("Tags by image: %v", tagsByImage)
|
|
| 57 |
+ log.Debugf("Image list: %v", imageList)
|
|
| 58 |
+ log.Debugf("Tags by image: %v", tagsByImage)
|
|
| 59 | 59 |
|
| 60 | 60 |
return imageList, tagsByImage, nil |
| 61 | 61 |
} |
| 62 | 62 |
|
| 63 | 63 |
func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
|
| 64 | 64 |
out = utils.NewWriteFlusher(out) |
| 65 |
- utils.Debugf("Local repo: %s", localRepo)
|
|
| 65 |
+ log.Debugf("Local repo: %s", localRepo)
|
|
| 66 | 66 |
imgList, tagsByImage, err := s.getImageList(localRepo, tag) |
| 67 | 67 |
if err != nil {
|
| 68 | 68 |
return err |
| ... | ... |
@@ -96,9 +97,9 @@ func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, |
| 96 | 96 |
} |
| 97 | 97 |
} |
| 98 | 98 |
|
| 99 |
- utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
|
|
| 99 |
+ log.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
|
|
| 100 | 100 |
for _, data := range imageIndex {
|
| 101 |
- utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
|
|
| 101 |
+ log.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
|
|
| 102 | 102 |
} |
| 103 | 103 |
|
| 104 | 104 |
// Register all the images in a repository with the registry |
| ... | ... |
@@ -170,7 +171,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, remote, imgID, |
| 170 | 170 |
defer os.RemoveAll(layerData.Name()) |
| 171 | 171 |
|
| 172 | 172 |
// Send the layer |
| 173 |
- utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
|
|
| 173 |
+ log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
|
|
| 174 | 174 |
|
| 175 | 175 |
checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) |
| 176 | 176 |
if err != nil {
|
| ... | ... |
@@ -6,7 +6,7 @@ import ( |
| 6 | 6 |
|
| 7 | 7 |
"github.com/docker/docker/engine" |
| 8 | 8 |
"github.com/docker/docker/image" |
| 9 |
- "github.com/docker/docker/utils" |
|
| 9 |
+ "github.com/docker/docker/pkg/log" |
|
| 10 | 10 |
) |
| 11 | 11 |
|
| 12 | 12 |
func (s *TagStore) Install(eng *engine.Engine) error {
|
| ... | ... |
@@ -173,7 +173,7 @@ func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status {
|
| 173 | 173 |
if written, err := io.Copy(job.Stdout, fs); err != nil {
|
| 174 | 174 |
return job.Error(err) |
| 175 | 175 |
} else {
|
| 176 |
- utils.Debugf("rendered layer for %s of [%d] size", image.ID, written)
|
|
| 176 |
+ log.Debugf("rendered layer for %s of [%d] size", image.ID, written)
|
|
| 177 | 177 |
} |
| 178 | 178 |
|
| 179 | 179 |
return engine.StatusOK |
| ... | ... |
@@ -3,15 +3,17 @@ package image |
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 |
- "github.com/docker/docker/archive" |
|
| 7 |
- "github.com/docker/docker/daemon/graphdriver" |
|
| 8 |
- "github.com/docker/docker/runconfig" |
|
| 9 |
- "github.com/docker/docker/utils" |
|
| 10 | 6 |
"io/ioutil" |
| 11 | 7 |
"os" |
| 12 | 8 |
"path" |
| 13 | 9 |
"strconv" |
| 14 | 10 |
"time" |
| 11 |
+ |
|
| 12 |
+ "github.com/docker/docker/archive" |
|
| 13 |
+ "github.com/docker/docker/daemon/graphdriver" |
|
| 14 |
+ "github.com/docker/docker/pkg/log" |
|
| 15 |
+ "github.com/docker/docker/runconfig" |
|
| 16 |
+ "github.com/docker/docker/utils" |
|
| 15 | 17 |
) |
| 16 | 18 |
|
| 17 | 19 |
type Image struct {
|
| ... | ... |
@@ -87,11 +89,11 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, ro |
| 87 | 87 |
} |
| 88 | 88 |
} else {
|
| 89 | 89 |
start := time.Now().UTC() |
| 90 |
- utils.Debugf("Start untar layer")
|
|
| 90 |
+ log.Debugf("Start untar layer")
|
|
| 91 | 91 |
if err := archive.ApplyLayer(layer, layerData); err != nil {
|
| 92 | 92 |
return err |
| 93 | 93 |
} |
| 94 |
- utils.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
|
| 94 |
+ log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
|
| 95 | 95 |
|
| 96 | 96 |
if img.Parent == "" {
|
| 97 | 97 |
if size, err = utils.TreeSize(layer); err != nil {
|
| ... | ... |
@@ -299,7 +301,7 @@ func (img *Image) Depth() (int, error) {
|
| 299 | 299 |
func NewImgJSON(src []byte) (*Image, error) {
|
| 300 | 300 |
ret := &Image{}
|
| 301 | 301 |
|
| 302 |
- utils.Debugf("Json string: {%s}", src)
|
|
| 302 |
+ log.Debugf("Json string: {%s}", src)
|
|
| 303 | 303 |
// FIXME: Is there a cleaner way to "purify" the input json? |
| 304 | 304 |
if err := json.Unmarshal(src, ret); err != nil {
|
| 305 | 305 |
return nil, err |
| ... | ... |
@@ -13,6 +13,7 @@ import ( |
| 13 | 13 |
|
| 14 | 14 |
"github.com/docker/docker/api/client" |
| 15 | 15 |
"github.com/docker/docker/daemon" |
| 16 |
+ "github.com/docker/docker/pkg/log" |
|
| 16 | 17 |
"github.com/docker/docker/pkg/term" |
| 17 | 18 |
"github.com/docker/docker/utils" |
| 18 | 19 |
) |
| ... | ... |
@@ -174,7 +175,7 @@ func TestRunDisconnectTty(t *testing.T) {
|
| 174 | 174 |
// We're simulating a disconnect so the return value doesn't matter. What matters is the |
| 175 | 175 |
// fact that CmdRun returns. |
| 176 | 176 |
if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil {
|
| 177 |
- utils.Debugf("Error CmdRun: %s", err)
|
|
| 177 |
+ log.Debugf("Error CmdRun: %s", err)
|
|
| 178 | 178 |
} |
| 179 | 179 |
}() |
| 180 | 180 |
|
| ... | ... |
@@ -414,7 +415,7 @@ func TestAttachDisconnect(t *testing.T) {
|
| 414 | 414 |
go func() {
|
| 415 | 415 |
// Start a process in daemon mode |
| 416 | 416 |
if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil {
|
| 417 |
- utils.Debugf("Error CmdRun: %s", err)
|
|
| 417 |
+ log.Debugf("Error CmdRun: %s", err)
|
|
| 418 | 418 |
} |
| 419 | 419 |
}() |
| 420 | 420 |
|
| ... | ... |
@@ -4,7 +4,7 @@ import ( |
| 4 | 4 |
"bytes" |
| 5 | 5 |
"fmt" |
| 6 | 6 |
"io" |
| 7 |
- "log" |
|
| 7 |
+ std_log "log" |
|
| 8 | 8 |
"net" |
| 9 | 9 |
"net/url" |
| 10 | 10 |
"os" |
| ... | ... |
@@ -20,6 +20,7 @@ import ( |
| 20 | 20 |
"github.com/docker/docker/engine" |
| 21 | 21 |
"github.com/docker/docker/image" |
| 22 | 22 |
"github.com/docker/docker/nat" |
| 23 |
+ "github.com/docker/docker/pkg/log" |
|
| 23 | 24 |
"github.com/docker/docker/reexec" |
| 24 | 25 |
"github.com/docker/docker/runconfig" |
| 25 | 26 |
"github.com/docker/docker/utils" |
| ... | ... |
@@ -99,7 +100,7 @@ func init() {
|
| 99 | 99 |
} |
| 100 | 100 |
|
| 101 | 101 |
if uid := syscall.Geteuid(); uid != 0 {
|
| 102 |
- log.Fatal("docker tests need to be run as root")
|
|
| 102 |
+ log.Fatalf("docker tests need to be run as root")
|
|
| 103 | 103 |
} |
| 104 | 104 |
|
| 105 | 105 |
// Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) |
| ... | ... |
@@ -133,7 +134,7 @@ func init() {
|
| 133 | 133 |
} |
| 134 | 134 |
|
| 135 | 135 |
func setupBaseImage() {
|
| 136 |
- eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase) |
|
| 136 |
+ eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) |
|
| 137 | 137 |
job := eng.Job("image_inspect", unitTestImageName)
|
| 138 | 138 |
img, _ := job.Stdout.AddEnv() |
| 139 | 139 |
// If the unit test is not found, try to download it. |
| ... | ... |
@@ -149,17 +150,17 @@ func setupBaseImage() {
|
| 149 | 149 |
|
| 150 | 150 |
func spawnGlobalDaemon() {
|
| 151 | 151 |
if globalDaemon != nil {
|
| 152 |
- utils.Debugf("Global daemon already exists. Skipping.")
|
|
| 152 |
+ log.Debugf("Global daemon already exists. Skipping.")
|
|
| 153 | 153 |
return |
| 154 | 154 |
} |
| 155 |
- t := log.New(os.Stderr, "", 0) |
|
| 155 |
+ t := std_log.New(os.Stderr, "", 0) |
|
| 156 | 156 |
eng := NewTestEngine(t) |
| 157 | 157 |
globalEngine = eng |
| 158 | 158 |
globalDaemon = mkDaemonFromEngine(eng, t) |
| 159 | 159 |
|
| 160 | 160 |
// Spawn a Daemon |
| 161 | 161 |
go func() {
|
| 162 |
- utils.Debugf("Spawning global daemon for integration tests")
|
|
| 162 |
+ log.Debugf("Spawning global daemon for integration tests")
|
|
| 163 | 163 |
listenURL := &url.URL{
|
| 164 | 164 |
Scheme: testDaemonProto, |
| 165 | 165 |
Host: testDaemonAddr, |
| ... | ... |
@@ -197,7 +198,7 @@ func spawnRogueHttpsDaemon() {
|
| 197 | 197 |
} |
| 198 | 198 |
|
| 199 | 199 |
func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
|
| 200 |
- t := log.New(os.Stderr, "", 0) |
|
| 200 |
+ t := std_log.New(os.Stderr, "", 0) |
|
| 201 | 201 |
root, err := newTestDirectory(unitTestStoreBase) |
| 202 | 202 |
if err != nil {
|
| 203 | 203 |
t.Fatal(err) |
| ... | ... |
@@ -209,7 +210,7 @@ func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
|
| 209 | 209 |
|
| 210 | 210 |
// Spawn a Daemon |
| 211 | 211 |
go func() {
|
| 212 |
- utils.Debugf("Spawning https daemon for integration tests")
|
|
| 212 |
+ log.Debugf("Spawning https daemon for integration tests")
|
|
| 213 | 213 |
listenURL := &url.URL{
|
| 214 | 214 |
Scheme: testDaemonHttpsProto, |
| 215 | 215 |
Host: addr, |
| ... | ... |
@@ -4,11 +4,11 @@ import ( |
| 4 | 4 |
"bytes" |
| 5 | 5 |
"encoding/json" |
| 6 | 6 |
"io" |
| 7 |
- "log" |
|
| 8 | 7 |
"sync" |
| 9 | 8 |
"time" |
| 10 | 9 |
|
| 11 | 10 |
"github.com/docker/docker/pkg/jsonlog" |
| 11 |
+ "github.com/docker/docker/pkg/log" |
|
| 12 | 12 |
) |
| 13 | 13 |
|
| 14 | 14 |
// BroadcastWriter accumulate multiple io.WriteCloser by stream. |
| ... | ... |
@@ -56,7 +56,7 @@ func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
|
| 56 | 56 |
} |
| 57 | 57 |
b, err := json.Marshal(jsonlog.JSONLog{Log: line, Stream: stream, Created: created})
|
| 58 | 58 |
if err != nil {
|
| 59 |
- log.Printf("Error making JSON log line: %s", err)
|
|
| 59 |
+ log.Errorf("Error making JSON log line: %s", err)
|
|
| 60 | 60 |
continue |
| 61 | 61 |
} |
| 62 | 62 |
b = append(b, '\n') |
| ... | ... |
@@ -3,9 +3,10 @@ package httputils |
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"io" |
| 6 |
- "log" |
|
| 7 | 6 |
"net/http" |
| 8 | 7 |
"time" |
| 8 |
+ |
|
| 9 |
+ "github.com/docker/docker/pkg/log" |
|
| 9 | 10 |
) |
| 10 | 11 |
|
| 11 | 12 |
type resumableRequestReader struct {
|
| ... | ... |
@@ -71,7 +72,7 @@ func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
|
| 71 | 71 |
r.cleanUpResponse() |
| 72 | 72 |
} |
| 73 | 73 |
if err != nil && err != io.EOF {
|
| 74 |
- log.Printf("encountered error during pull and clearing it before resume: %s", err)
|
|
| 74 |
+ log.Infof("encountered error during pull and clearing it before resume: %s", err)
|
|
| 75 | 75 |
err = nil |
| 76 | 76 |
} |
| 77 | 77 |
return n, err |
| 78 | 78 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,77 @@ |
| 0 |
+package log |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io" |
|
| 5 |
+ "os" |
|
| 6 |
+ "runtime" |
|
| 7 |
+ "strings" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+type priority int |
|
| 11 |
+ |
|
| 12 |
+const ( |
|
| 13 |
+ errorFormat = "[%s] %s:%d %s\n" |
|
| 14 |
+ logFormat = "[%s] %s\n" |
|
| 15 |
+ |
|
| 16 |
+ fatal priority = iota |
|
| 17 |
+ error |
|
| 18 |
+ info |
|
| 19 |
+ debug |
|
| 20 |
+) |
|
| 21 |
+ |
|
| 22 |
+func (p priority) String() string {
|
|
| 23 |
+ switch p {
|
|
| 24 |
+ case fatal: |
|
| 25 |
+ return "fatal" |
|
| 26 |
+ case error: |
|
| 27 |
+ return "error" |
|
| 28 |
+ case info: |
|
| 29 |
+ return "info" |
|
| 30 |
+ case debug: |
|
| 31 |
+ return "debug" |
|
| 32 |
+ } |
|
| 33 |
+ |
|
| 34 |
+ return "" |
|
| 35 |
+} |
|
| 36 |
+ |
|
| 37 |
+// Debug function, if the debug flag is set, then display. Do nothing otherwise |
|
| 38 |
+// If Docker is in damon mode, also send the debug info on the socket |
|
| 39 |
+func Debugf(format string, a ...interface{}) {
|
|
| 40 |
+ if os.Getenv("DEBUG") != "" {
|
|
| 41 |
+ logf(os.Stderr, debug, format, a...) |
|
| 42 |
+ } |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+func Infof(format string, a ...interface{}) {
|
|
| 46 |
+ logf(os.Stdout, info, format, a...) |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+func Errorf(format string, a ...interface{}) {
|
|
| 50 |
+ logf(os.Stderr, error, format, a...) |
|
| 51 |
+} |
|
| 52 |
+ |
|
| 53 |
+func Fatalf(format string, a ...interface{}) {
|
|
| 54 |
+ logf(os.Stderr, fatal, format, a...) |
|
| 55 |
+ os.Exit(1) |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+func logf(stream io.Writer, level priority, format string, a ...interface{}) {
|
|
| 59 |
+ var prefix string |
|
| 60 |
+ |
|
| 61 |
+ if level <= error || level == debug {
|
|
| 62 |
+ // Retrieve the stack infos |
|
| 63 |
+ _, file, line, ok := runtime.Caller(2) |
|
| 64 |
+ if !ok {
|
|
| 65 |
+ file = "<unknown>" |
|
| 66 |
+ line = -1 |
|
| 67 |
+ } else {
|
|
| 68 |
+ file = file[strings.LastIndex(file, "/")+1:] |
|
| 69 |
+ } |
|
| 70 |
+ prefix = fmt.Sprintf(errorFormat, level.String(), file, line, format) |
|
| 71 |
+ } else {
|
|
| 72 |
+ prefix = fmt.Sprintf(logFormat, level.String(), format) |
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ fmt.Fprintf(stream, prefix, a...) |
|
| 76 |
+} |
| 0 | 77 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,37 @@ |
| 0 |
+package log |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "regexp" |
|
| 5 |
+ |
|
| 6 |
+ "testing" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func TestLogFatalf(t *testing.T) {
|
|
| 10 |
+ var output *bytes.Buffer |
|
| 11 |
+ |
|
| 12 |
+ tests := []struct {
|
|
| 13 |
+ Level priority |
|
| 14 |
+ Format string |
|
| 15 |
+ Values []interface{}
|
|
| 16 |
+ ExpectedPattern string |
|
| 17 |
+ }{
|
|
| 18 |
+ {fatal, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[fatal\\] testing.go:\\d+ 1 \\+ 1 = 2"},
|
|
| 19 |
+ {error, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[error\\] testing.go:\\d+ 1 \\+ 1 = 2"},
|
|
| 20 |
+ {info, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[info\\] 1 \\+ 1 = 2"},
|
|
| 21 |
+ {debug, "%d + %d = %d", []interface{}{1, 1, 2}, "\\[debug\\] testing.go:\\d+ 1 \\+ 1 = 2"},
|
|
| 22 |
+ } |
|
| 23 |
+ |
|
| 24 |
+ for i, test := range tests {
|
|
| 25 |
+ output = &bytes.Buffer{}
|
|
| 26 |
+ logf(output, test.Level, test.Format, test.Values...) |
|
| 27 |
+ |
|
| 28 |
+ expected := regexp.MustCompile(test.ExpectedPattern) |
|
| 29 |
+ if !expected.MatchString(output.String()) {
|
|
| 30 |
+ t.Errorf("[%d] Log output does not match expected pattern:\n\tExpected: %s\n\tOutput: %s",
|
|
| 31 |
+ i, |
|
| 32 |
+ expected.String(), |
|
| 33 |
+ output.String()) |
|
| 34 |
+ } |
|
| 35 |
+ } |
|
| 36 |
+} |
| ... | ... |
@@ -7,12 +7,13 @@ import ( |
| 7 | 7 |
"encoding/hex" |
| 8 | 8 |
"hash" |
| 9 | 9 |
"io" |
| 10 |
- "log" |
|
| 11 | 10 |
"sort" |
| 12 | 11 |
"strconv" |
| 13 | 12 |
"strings" |
| 14 | 13 |
|
| 15 | 14 |
"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" |
| 15 |
+ |
|
| 16 |
+ "github.com/docker/docker/pkg/log" |
|
| 16 | 17 |
) |
| 17 | 18 |
|
| 18 | 19 |
type TarSum struct {
|
| ... | ... |
@@ -170,11 +171,11 @@ func (ts *TarSum) Sum(extra []byte) string {
|
| 170 | 170 |
h.Write(extra) |
| 171 | 171 |
} |
| 172 | 172 |
for _, sum := range sums {
|
| 173 |
- log.Printf("-->%s<--", sum)
|
|
| 173 |
+ log.Infof("-->%s<--", sum)
|
|
| 174 | 174 |
h.Write([]byte(sum)) |
| 175 | 175 |
} |
| 176 | 176 |
checksum := "tarsum+sha256:" + hex.EncodeToString(h.Sum(nil)) |
| 177 |
- log.Printf("checksum processed: %s", checksum)
|
|
| 177 |
+ log.Infof("checksum processed: %s", checksum)
|
|
| 178 | 178 |
return checksum |
| 179 | 179 |
} |
| 180 | 180 |
|
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"strings" |
| 16 | 16 |
"time" |
| 17 | 17 |
|
| 18 |
+ "github.com/docker/docker/pkg/log" |
|
| 18 | 19 |
"github.com/docker/docker/utils" |
| 19 | 20 |
) |
| 20 | 21 |
|
| ... | ... |
@@ -186,17 +187,17 @@ func pingRegistryEndpoint(endpoint string) (RegistryInfo, error) {
|
| 186 | 186 |
Standalone: true, |
| 187 | 187 |
} |
| 188 | 188 |
if err := json.Unmarshal(jsonString, &info); err != nil {
|
| 189 |
- utils.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
|
|
| 189 |
+ log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
|
|
| 190 | 190 |
// don't stop here. Just assume sane defaults |
| 191 | 191 |
} |
| 192 | 192 |
if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
|
| 193 |
- utils.Debugf("Registry version header: '%s'", hdr)
|
|
| 193 |
+ log.Debugf("Registry version header: '%s'", hdr)
|
|
| 194 | 194 |
info.Version = hdr |
| 195 | 195 |
} |
| 196 |
- utils.Debugf("RegistryInfo.Version: %q", info.Version)
|
|
| 196 |
+ log.Debugf("RegistryInfo.Version: %q", info.Version)
|
|
| 197 | 197 |
|
| 198 | 198 |
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
|
| 199 |
- utils.Debugf("Registry standalone header: '%s'", standalone)
|
|
| 199 |
+ log.Debugf("Registry standalone header: '%s'", standalone)
|
|
| 200 | 200 |
// Accepted values are "true" (case-insensitive) and "1". |
| 201 | 201 |
if strings.EqualFold(standalone, "true") || standalone == "1" {
|
| 202 | 202 |
info.Standalone = true |
| ... | ... |
@@ -204,7 +205,7 @@ func pingRegistryEndpoint(endpoint string) (RegistryInfo, error) {
|
| 204 | 204 |
// there is a header set, and it is not "true" or "1", so assume fails |
| 205 | 205 |
info.Standalone = false |
| 206 | 206 |
} |
| 207 |
- utils.Debugf("RegistryInfo.Standalone: %q", info.Standalone)
|
|
| 207 |
+ log.Debugf("RegistryInfo.Standalone: %q", info.Standalone)
|
|
| 208 | 208 |
return info, nil |
| 209 | 209 |
} |
| 210 | 210 |
|
| ... | ... |
@@ -274,7 +275,7 @@ func ExpandAndVerifyRegistryUrl(hostname string) (string, error) {
|
| 274 | 274 |
} |
| 275 | 275 |
endpoint := fmt.Sprintf("https://%s/v1/", hostname)
|
| 276 | 276 |
if _, err := pingRegistryEndpoint(endpoint); err != nil {
|
| 277 |
- utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
|
|
| 277 |
+ log.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
|
|
| 278 | 278 |
endpoint = fmt.Sprintf("http://%s/v1/", hostname)
|
| 279 | 279 |
if _, err = pingRegistryEndpoint(endpoint); err != nil {
|
| 280 | 280 |
//TODO: triggering highland build can be done there without "failing" |
| ... | ... |
@@ -3,8 +3,6 @@ package registry |
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 |
- "github.com/docker/docker/utils" |
|
| 7 |
- "github.com/gorilla/mux" |
|
| 8 | 6 |
"io" |
| 9 | 7 |
"io/ioutil" |
| 10 | 8 |
"net/http" |
| ... | ... |
@@ -14,6 +12,10 @@ import ( |
| 14 | 14 |
"strings" |
| 15 | 15 |
"testing" |
| 16 | 16 |
"time" |
| 17 |
+ |
|
| 18 |
+ "github.com/gorilla/mux" |
|
| 19 |
+ |
|
| 20 |
+ "github.com/docker/docker/pkg/log" |
|
| 17 | 21 |
) |
| 18 | 22 |
|
| 19 | 23 |
var ( |
| ... | ... |
@@ -96,7 +98,7 @@ func init() {
|
| 96 | 96 |
|
| 97 | 97 |
func handlerAccessLog(handler http.Handler) http.Handler {
|
| 98 | 98 |
logHandler := func(w http.ResponseWriter, r *http.Request) {
|
| 99 |
- utils.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
|
|
| 99 |
+ log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
|
|
| 100 | 100 |
handler.ServeHTTP(w, r) |
| 101 | 101 |
} |
| 102 | 102 |
return http.HandlerFunc(logHandler) |
| ... | ... |
@@ -4,7 +4,7 @@ import ( |
| 4 | 4 |
"strings" |
| 5 | 5 |
|
| 6 | 6 |
"github.com/docker/docker/nat" |
| 7 |
- "github.com/docker/docker/utils" |
|
| 7 |
+ "github.com/docker/docker/pkg/log" |
|
| 8 | 8 |
) |
| 9 | 9 |
|
| 10 | 10 |
func Merge(userConf, imageConf *Config) error {
|
| ... | ... |
@@ -50,7 +50,7 @@ func Merge(userConf, imageConf *Config) error {
|
| 50 | 50 |
} |
| 51 | 51 |
if len(imageConf.PortSpecs) > 0 {
|
| 52 | 52 |
// FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. |
| 53 |
- utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", "))
|
|
| 53 |
+ log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", "))
|
|
| 54 | 54 |
if userConf.ExposedPorts == nil {
|
| 55 | 55 |
userConf.ExposedPorts = make(nat.PortSet) |
| 56 | 56 |
} |
| ... | ... |
@@ -226,7 +226,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf |
| 226 | 226 |
// parse the '-e' and '--env' after, to allow override |
| 227 | 227 |
envVariables = append(envVariables, flEnv.GetAll()...) |
| 228 | 228 |
// boo, there's no debug output for docker run |
| 229 |
- //utils.Debugf("Environment variables for the container: %#v", envVariables)
|
|
| 229 |
+ //log.Debugf("Environment variables for the container: %#v", envVariables)
|
|
| 230 | 230 |
|
| 231 | 231 |
netMode, err := parseNetMode(*flNetMode) |
| 232 | 232 |
if err != nil {
|
| ... | ... |
@@ -4,6 +4,8 @@ import ( |
| 4 | 4 |
"io" |
| 5 | 5 |
"net/http" |
| 6 | 6 |
"strings" |
| 7 |
+ |
|
| 8 |
+ "github.com/docker/docker/pkg/log" |
|
| 7 | 9 |
) |
| 8 | 10 |
|
| 9 | 11 |
// VersionInfo is used to model entities which has a version. |
| ... | ... |
@@ -157,6 +159,6 @@ func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d |
| 157 | 157 |
return nil, err |
| 158 | 158 |
} |
| 159 | 159 |
} |
| 160 |
- Debugf("%v -- HEADERS: %v", req.URL, req.Header)
|
|
| 160 |
+ log.Debugf("%v -- HEADERS: %v", req.URL, req.Header)
|
|
| 161 | 161 |
return req, err |
| 162 | 162 |
} |
| ... | ... |
@@ -4,6 +4,8 @@ import ( |
| 4 | 4 |
"encoding/binary" |
| 5 | 5 |
"errors" |
| 6 | 6 |
"io" |
| 7 |
+ |
|
| 8 |
+ "github.com/docker/docker/pkg/log" |
|
| 7 | 9 |
) |
| 8 | 10 |
|
| 9 | 11 |
const ( |
| ... | ... |
@@ -85,13 +87,13 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) |
| 85 | 85 |
nr += nr2 |
| 86 | 86 |
if er == io.EOF {
|
| 87 | 87 |
if nr < StdWriterPrefixLen {
|
| 88 |
- Debugf("Corrupted prefix: %v", buf[:nr])
|
|
| 88 |
+ log.Debugf("Corrupted prefix: %v", buf[:nr])
|
|
| 89 | 89 |
return written, nil |
| 90 | 90 |
} |
| 91 | 91 |
break |
| 92 | 92 |
} |
| 93 | 93 |
if er != nil {
|
| 94 |
- Debugf("Error reading header: %s", er)
|
|
| 94 |
+ log.Debugf("Error reading header: %s", er)
|
|
| 95 | 95 |
return 0, er |
| 96 | 96 |
} |
| 97 | 97 |
} |
| ... | ... |
@@ -107,18 +109,18 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) |
| 107 | 107 |
// Write on stderr |
| 108 | 108 |
out = dsterr |
| 109 | 109 |
default: |
| 110 |
- Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
|
|
| 110 |
+ log.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
|
|
| 111 | 111 |
return 0, ErrInvalidStdHeader |
| 112 | 112 |
} |
| 113 | 113 |
|
| 114 | 114 |
// Retrieve the size of the frame |
| 115 | 115 |
frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) |
| 116 |
- Debugf("framesize: %d", frameSize)
|
|
| 116 |
+ log.Debugf("framesize: %d", frameSize)
|
|
| 117 | 117 |
|
| 118 | 118 |
// Check if the buffer is big enough to read the frame. |
| 119 | 119 |
// Extend it if necessary. |
| 120 | 120 |
if frameSize+StdWriterPrefixLen > bufLen {
|
| 121 |
- Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
|
|
| 121 |
+ log.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
|
|
| 122 | 122 |
buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) |
| 123 | 123 |
bufLen = len(buf) |
| 124 | 124 |
} |
| ... | ... |
@@ -130,13 +132,13 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) |
| 130 | 130 |
nr += nr2 |
| 131 | 131 |
if er == io.EOF {
|
| 132 | 132 |
if nr < frameSize+StdWriterPrefixLen {
|
| 133 |
- Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
|
|
| 133 |
+ log.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
|
|
| 134 | 134 |
return written, nil |
| 135 | 135 |
} |
| 136 | 136 |
break |
| 137 | 137 |
} |
| 138 | 138 |
if er != nil {
|
| 139 |
- Debugf("Error reading frame: %s", er)
|
|
| 139 |
+ log.Debugf("Error reading frame: %s", er)
|
|
| 140 | 140 |
return 0, er |
| 141 | 141 |
} |
| 142 | 142 |
} |
| ... | ... |
@@ -144,12 +146,12 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) |
| 144 | 144 |
// Write the retrieved frame (without header) |
| 145 | 145 |
nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) |
| 146 | 146 |
if ew != nil {
|
| 147 |
- Debugf("Error writing frame: %s", ew)
|
|
| 147 |
+ log.Debugf("Error writing frame: %s", ew)
|
|
| 148 | 148 |
return 0, ew |
| 149 | 149 |
} |
| 150 | 150 |
// If the frame has not been fully written: error |
| 151 | 151 |
if nw != frameSize {
|
| 152 |
- Debugf("Error Short Write: (%d on %d)", nw, frameSize)
|
|
| 152 |
+ log.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
|
|
| 153 | 153 |
return 0, io.ErrShortWrite |
| 154 | 154 |
} |
| 155 | 155 |
written += int64(nw) |
| ... | ... |
@@ -20,6 +20,7 @@ import ( |
| 20 | 20 |
"syscall" |
| 21 | 21 |
|
| 22 | 22 |
"github.com/docker/docker/dockerversion" |
| 23 |
+ "github.com/docker/docker/pkg/log" |
|
| 23 | 24 |
) |
| 24 | 25 |
|
| 25 | 26 |
type KeyValuePair struct {
|
| ... | ... |
@@ -54,31 +55,6 @@ func Download(url string) (resp *http.Response, err error) {
|
| 54 | 54 |
return resp, nil |
| 55 | 55 |
} |
| 56 | 56 |
|
| 57 |
-func logf(level string, format string, a ...interface{}) {
|
|
| 58 |
- // Retrieve the stack infos |
|
| 59 |
- _, file, line, ok := runtime.Caller(2) |
|
| 60 |
- if !ok {
|
|
| 61 |
- file = "<unknown>" |
|
| 62 |
- line = -1 |
|
| 63 |
- } else {
|
|
| 64 |
- file = file[strings.LastIndex(file, "/")+1:] |
|
| 65 |
- } |
|
| 66 |
- |
|
| 67 |
- fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...)
|
|
| 68 |
-} |
|
| 69 |
- |
|
| 70 |
-// Debug function, if the debug flag is set, then display. Do nothing otherwise |
|
| 71 |
-// If Docker is in damon mode, also send the debug info on the socket |
|
| 72 |
-func Debugf(format string, a ...interface{}) {
|
|
| 73 |
- if os.Getenv("DEBUG") != "" {
|
|
| 74 |
- logf("debug", format, a...)
|
|
| 75 |
- } |
|
| 76 |
-} |
|
| 77 |
- |
|
| 78 |
-func Errorf(format string, a ...interface{}) {
|
|
| 79 |
- logf("error", format, a...)
|
|
| 80 |
-} |
|
| 81 |
- |
|
| 82 | 57 |
func Trunc(s string, maxlen int) string {
|
| 83 | 58 |
if len(s) <= maxlen {
|
| 84 | 59 |
return s |
| ... | ... |
@@ -264,7 +240,7 @@ func (r *bufReader) Close() error {
|
| 264 | 264 |
|
| 265 | 265 |
func GetTotalUsedFds() int {
|
| 266 | 266 |
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
| 267 |
- Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
|
| 267 |
+ log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
|
| 268 | 268 |
} else {
|
| 269 | 269 |
return len(fds) |
| 270 | 270 |
} |
| ... | ... |
@@ -705,15 +681,15 @@ func Matches(relFilePath string, patterns []string) (bool, error) {
|
| 705 | 705 |
for _, exclude := range patterns {
|
| 706 | 706 |
matched, err := filepath.Match(exclude, relFilePath) |
| 707 | 707 |
if err != nil {
|
| 708 |
- Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
|
|
| 708 |
+ log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
|
|
| 709 | 709 |
return false, err |
| 710 | 710 |
} |
| 711 | 711 |
if matched {
|
| 712 | 712 |
if filepath.Clean(relFilePath) == "." {
|
| 713 |
- Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
|
|
| 713 |
+ log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
|
|
| 714 | 714 |
continue |
| 715 | 715 |
} |
| 716 |
- Debugf("Skipping excluded path: %s", relFilePath)
|
|
| 716 |
+ log.Debugf("Skipping excluded path: %s", relFilePath)
|
|
| 717 | 717 |
return true, nil |
| 718 | 718 |
} |
| 719 | 719 |
} |