* Events subsystem merged from `server/events.go` and
`utils/jsonmessagepublisher.go` and moved to `events/events.go`
* Only public interface for this subsystem is engine jobs
* There is two new engine jobs - `log_event` and `subscribers_count`
* There is auxiliary function `container.LogEvent` for logging events for
containers
Docker-DCO-1.1-Signed-off-by: Alexandr Morozov <lk4d4math@gmail.com> (github: LK4D4)
[solomon@docker.com: resolve merge conflicts]
Signed-off-by: Solomon Hykes <solomon@docker.com>
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"github.com/docker/docker/daemon/networkdriver/bridge" |
| 9 | 9 |
"github.com/docker/docker/dockerversion" |
| 10 | 10 |
"github.com/docker/docker/engine" |
| 11 |
+ "github.com/docker/docker/events" |
|
| 11 | 12 |
"github.com/docker/docker/pkg/parsers/kernel" |
| 12 | 13 |
"github.com/docker/docker/registry" |
| 13 | 14 |
"github.com/docker/docker/server" |
| ... | ... |
@@ -20,6 +21,9 @@ func Register(eng *engine.Engine) error {
|
| 20 | 20 |
if err := remote(eng); err != nil {
|
| 21 | 21 |
return err |
| 22 | 22 |
} |
| 23 |
+ if err := events.New().Install(eng); err != nil {
|
|
| 24 |
+ return err |
|
| 25 |
+ } |
|
| 23 | 26 |
if err := eng.Register("version", dockerVersion); err != nil {
|
| 24 | 27 |
return err |
| 25 | 28 |
} |
| ... | ... |
@@ -168,6 +168,13 @@ func (container *Container) WriteHostConfig() error {
|
| 168 | 168 |
return ioutil.WriteFile(pth, data, 0666) |
| 169 | 169 |
} |
| 170 | 170 |
|
| 171 |
+func (container *Container) LogEvent(action string) {
|
|
| 172 |
+ d := container.daemon |
|
| 173 |
+ if err := d.eng.Job("log_event", action, container.ID, d.Repositories().ImageName(container.Image)).Run(); err != nil {
|
|
| 174 |
+ utils.Errorf("Error running container: %s", err)
|
|
| 175 |
+ } |
|
| 176 |
+} |
|
| 177 |
+ |
|
| 171 | 178 |
func (container *Container) getResourcePath(path string) (string, error) {
|
| 172 | 179 |
cleanPath := filepath.Join("/", path)
|
| 173 | 180 |
return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) |
| ... | ... |
@@ -508,7 +515,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
|
| 508 | 508 |
container.stdin, container.stdinPipe = io.Pipe() |
| 509 | 509 |
} |
| 510 | 510 |
if container.daemon != nil && container.daemon.srv != nil {
|
| 511 |
- container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
|
|
| 511 |
+ container.LogEvent("die")
|
|
| 512 | 512 |
} |
| 513 | 513 |
if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
|
| 514 | 514 |
// FIXME: here is race condition between two RUN instructions in Dockerfile |
| ... | ... |
@@ -40,7 +40,7 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
|
| 40 | 40 |
if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
|
| 41 | 41 |
job.Errorf("IPv4 forwarding is disabled.\n")
|
| 42 | 42 |
} |
| 43 |
- job.Eng.Job("log", "create", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 43 |
+ container.LogEvent("create")
|
|
| 44 | 44 |
// FIXME: this is necessary because daemon.Create might return a nil container |
| 45 | 45 |
// with a non-nil error. This should not happen! Once it's fixed we |
| 46 | 46 |
// can remove this workaround. |
| ... | ... |
@@ -70,7 +70,7 @@ func (daemon *Daemon) ContainerDestroy(job *engine.Job) engine.Status {
|
| 70 | 70 |
if err := daemon.Destroy(container); err != nil {
|
| 71 | 71 |
return job.Errorf("Cannot destroy container %s: %s", name, err)
|
| 72 | 72 |
} |
| 73 |
- job.Eng.Job("log", "destroy", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 73 |
+ container.LogEvent("destroy")
|
|
| 74 | 74 |
|
| 75 | 75 |
if removeVolume {
|
| 76 | 76 |
var ( |
| ... | ... |
@@ -23,7 +23,7 @@ func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
|
| 23 | 23 |
return job.Errorf("%s: %s", name, err)
|
| 24 | 24 |
} |
| 25 | 25 |
// FIXME: factor job-specific LogEvent to engine.Job.Run() |
| 26 |
- job.Eng.Job("log", "export", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 26 |
+ container.LogEvent("export")
|
|
| 27 | 27 |
return engine.StatusOK |
| 28 | 28 |
} |
| 29 | 29 |
return job.Errorf("No such container: %s", name)
|
| ... | ... |
@@ -93,7 +93,7 @@ func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine. |
| 93 | 93 |
out := &engine.Env{}
|
| 94 | 94 |
out.Set("Untagged", repoName+":"+tag)
|
| 95 | 95 |
imgs.Add(out) |
| 96 |
- eng.Job("log", "untag", img.ID, "").Run()
|
|
| 96 |
+ eng.Job("log_event", "untag", img.ID, "").Run()
|
|
| 97 | 97 |
} |
| 98 | 98 |
} |
| 99 | 99 |
tags = daemon.Repositories().ByID()[img.ID] |
| ... | ... |
@@ -111,7 +111,7 @@ func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine. |
| 111 | 111 |
out := &engine.Env{}
|
| 112 | 112 |
out.Set("Deleted", img.ID)
|
| 113 | 113 |
imgs.Add(out) |
| 114 |
- eng.Job("log", "delete", img.ID, "").Run()
|
|
| 114 |
+ eng.Job("log_event", "delete", img.ID, "").Run()
|
|
| 115 | 115 |
if img.Parent != "" && !noprune {
|
| 116 | 116 |
err := daemon.DeleteImage(eng, img.Parent, imgs, false, force, noprune) |
| 117 | 117 |
if first {
|
| ... | ... |
@@ -44,7 +44,7 @@ func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
|
| 44 | 44 |
if err := container.Kill(); err != nil {
|
| 45 | 45 |
return job.Errorf("Cannot kill container %s: %s", name, err)
|
| 46 | 46 |
} |
| 47 |
- job.Eng.Job("log", "kill", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 47 |
+ container.LogEvent("kill")
|
|
| 48 | 48 |
} else {
|
| 49 | 49 |
// Otherwise, just send the requested signal |
| 50 | 50 |
if err := container.KillSig(int(sig)); err != nil {
|
| ... | ... |
@@ -16,7 +16,7 @@ func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status {
|
| 16 | 16 |
if err := container.Pause(); err != nil {
|
| 17 | 17 |
return job.Errorf("Cannot pause container %s: %s", name, err)
|
| 18 | 18 |
} |
| 19 |
- job.Eng.Job("log", "pause", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 19 |
+ container.LogEvent("pause")
|
|
| 20 | 20 |
return engine.StatusOK |
| 21 | 21 |
} |
| 22 | 22 |
|
| ... | ... |
@@ -32,6 +32,6 @@ func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status {
|
| 32 | 32 |
if err := container.Unpause(); err != nil {
|
| 33 | 33 |
return job.Errorf("Cannot unpause container %s: %s", name, err)
|
| 34 | 34 |
} |
| 35 |
- job.Eng.Job("log", "unpause", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 35 |
+ container.LogEvent("unpause")
|
|
| 36 | 36 |
return engine.StatusOK |
| 37 | 37 |
} |
| ... | ... |
@@ -19,7 +19,7 @@ func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
|
| 19 | 19 |
if err := container.Restart(int(t)); err != nil {
|
| 20 | 20 |
return job.Errorf("Cannot restart container %s: %s\n", name, err)
|
| 21 | 21 |
} |
| 22 |
- job.Eng.Job("log", "restart", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 22 |
+ container.LogEvent("restart")
|
|
| 23 | 23 |
} else {
|
| 24 | 24 |
return job.Errorf("No such container: %s\n", name)
|
| 25 | 25 |
} |
| ... | ... |
@@ -36,8 +36,7 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
|
| 36 | 36 |
if err := container.Start(); err != nil {
|
| 37 | 37 |
return job.Errorf("Cannot start container %s: %s", name, err)
|
| 38 | 38 |
} |
| 39 |
- job.Eng.Job("log", "start", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 40 |
- |
|
| 39 |
+ container.LogEvent("start")
|
|
| 41 | 40 |
return engine.StatusOK |
| 42 | 41 |
} |
| 43 | 42 |
|
| ... | ... |
@@ -22,7 +22,7 @@ func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
|
| 22 | 22 |
if err := container.Stop(int(t)); err != nil {
|
| 23 | 23 |
return job.Errorf("Cannot stop container %s: %s\n", name, err)
|
| 24 | 24 |
} |
| 25 |
- job.Eng.Job("log", "stop", container.ID, daemon.Repositories().ImageName(container.Image)).Run()
|
|
| 25 |
+ container.LogEvent("stop")
|
|
| 26 | 26 |
} else {
|
| 27 | 27 |
return job.Errorf("No such container: %s\n", name)
|
| 28 | 28 |
} |
| 29 | 29 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,176 @@ |
| 0 |
+package events |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "encoding/json" |
|
| 4 |
+ "sync" |
|
| 5 |
+ "time" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/engine" |
|
| 8 |
+ "github.com/docker/docker/utils" |
|
| 9 |
+) |
|
| 10 |
+ |
|
| 11 |
+const eventsLimit = 64 |
|
| 12 |
+ |
|
| 13 |
+type listener chan<- *utils.JSONMessage |
|
| 14 |
+ |
|
| 15 |
+type Events struct {
|
|
| 16 |
+ mu sync.RWMutex |
|
| 17 |
+ events []*utils.JSONMessage |
|
| 18 |
+ subscribers []listener |
|
| 19 |
+} |
|
| 20 |
+ |
|
| 21 |
+func New() *Events {
|
|
| 22 |
+ return &Events{
|
|
| 23 |
+ events: make([]*utils.JSONMessage, 0, eventsLimit), |
|
| 24 |
+ } |
|
| 25 |
+} |
|
| 26 |
+ |
|
| 27 |
+// Install installs events public api in docker engine |
|
| 28 |
+func (e *Events) Install(eng *engine.Engine) error {
|
|
| 29 |
+ // Here you should describe public interface |
|
| 30 |
+ jobs := map[string]engine.Handler{
|
|
| 31 |
+ "events": e.Get, |
|
| 32 |
+ "log_event": e.Log, |
|
| 33 |
+ "subscribers_count": e.SubscribersCount, |
|
| 34 |
+ } |
|
| 35 |
+ for name, job := range jobs {
|
|
| 36 |
+ if err := eng.Register(name, job); err != nil {
|
|
| 37 |
+ return err |
|
| 38 |
+ } |
|
| 39 |
+ } |
|
| 40 |
+ return nil |
|
| 41 |
+} |
|
| 42 |
+ |
|
| 43 |
+func (e *Events) Get(job *engine.Job) engine.Status {
|
|
| 44 |
+ var ( |
|
| 45 |
+ since = job.GetenvInt64("since")
|
|
| 46 |
+ until = job.GetenvInt64("until")
|
|
| 47 |
+ timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) |
|
| 48 |
+ ) |
|
| 49 |
+ |
|
| 50 |
+ // If no until, disable timeout |
|
| 51 |
+ if until == 0 {
|
|
| 52 |
+ timeout.Stop() |
|
| 53 |
+ } |
|
| 54 |
+ |
|
| 55 |
+ listener := make(chan *utils.JSONMessage) |
|
| 56 |
+ e.subscribe(listener) |
|
| 57 |
+ defer e.unsubscribe(listener) |
|
| 58 |
+ |
|
| 59 |
+ job.Stdout.Write(nil) |
|
| 60 |
+ |
|
| 61 |
+ // Resend every event in the [since, until] time interval. |
|
| 62 |
+ if since != 0 {
|
|
| 63 |
+ if err := e.writeCurrent(job, since, until); err != nil {
|
|
| 64 |
+ return job.Error(err) |
|
| 65 |
+ } |
|
| 66 |
+ } |
|
| 67 |
+ |
|
| 68 |
+ for {
|
|
| 69 |
+ select {
|
|
| 70 |
+ case event, ok := <-listener: |
|
| 71 |
+ if !ok {
|
|
| 72 |
+ return engine.StatusOK |
|
| 73 |
+ } |
|
| 74 |
+ if err := writeEvent(job, event); err != nil {
|
|
| 75 |
+ return job.Error(err) |
|
| 76 |
+ } |
|
| 77 |
+ case <-timeout.C: |
|
| 78 |
+ return engine.StatusOK |
|
| 79 |
+ } |
|
| 80 |
+ } |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+func (e *Events) Log(job *engine.Job) engine.Status {
|
|
| 84 |
+ if len(job.Args) != 3 {
|
|
| 85 |
+ return job.Errorf("usage: %s ACTION ID FROM", job.Name)
|
|
| 86 |
+ } |
|
| 87 |
+ // not waiting for receivers |
|
| 88 |
+ go e.log(job.Args[0], job.Args[1], job.Args[2]) |
|
| 89 |
+ return engine.StatusOK |
|
| 90 |
+} |
|
| 91 |
+ |
|
| 92 |
+func (e *Events) SubscribersCount(job *engine.Job) engine.Status {
|
|
| 93 |
+ ret := &engine.Env{}
|
|
| 94 |
+ ret.SetInt("count", e.subscribersCount())
|
|
| 95 |
+ ret.WriteTo(job.Stdout) |
|
| 96 |
+ return engine.StatusOK |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+func writeEvent(job *engine.Job, event *utils.JSONMessage) error {
|
|
| 100 |
+ // When sending an event JSON serialization errors are ignored, but all |
|
| 101 |
+ // other errors lead to the eviction of the listener. |
|
| 102 |
+ if b, err := json.Marshal(event); err == nil {
|
|
| 103 |
+ if _, err = job.Stdout.Write(b); err != nil {
|
|
| 104 |
+ return err |
|
| 105 |
+ } |
|
| 106 |
+ } |
|
| 107 |
+ return nil |
|
| 108 |
+} |
|
| 109 |
+ |
|
| 110 |
+func (e *Events) writeCurrent(job *engine.Job, since, until int64) error {
|
|
| 111 |
+ e.mu.RLock() |
|
| 112 |
+ for _, event := range e.events {
|
|
| 113 |
+ if event.Time >= since && (event.Time <= until || until == 0) {
|
|
| 114 |
+ if err := writeEvent(job, event); err != nil {
|
|
| 115 |
+ e.mu.RUnlock() |
|
| 116 |
+ return err |
|
| 117 |
+ } |
|
| 118 |
+ } |
|
| 119 |
+ } |
|
| 120 |
+ e.mu.RUnlock() |
|
| 121 |
+ return nil |
|
| 122 |
+} |
|
| 123 |
+ |
|
| 124 |
+func (e *Events) subscribersCount() int {
|
|
| 125 |
+ e.mu.RLock() |
|
| 126 |
+ c := len(e.subscribers) |
|
| 127 |
+ e.mu.RUnlock() |
|
| 128 |
+ return c |
|
| 129 |
+} |
|
| 130 |
+ |
|
| 131 |
+func (e *Events) log(action, id, from string) {
|
|
| 132 |
+ e.mu.Lock() |
|
| 133 |
+ now := time.Now().UTC().Unix() |
|
| 134 |
+ jm := &utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
|
|
| 135 |
+ if len(e.events) == cap(e.events) {
|
|
| 136 |
+ // discard oldest event |
|
| 137 |
+ copy(e.events, e.events[1:]) |
|
| 138 |
+ e.events[len(e.events)-1] = jm |
|
| 139 |
+ } else {
|
|
| 140 |
+ e.events = append(e.events, jm) |
|
| 141 |
+ } |
|
| 142 |
+ for _, s := range e.subscribers {
|
|
| 143 |
+ // We give each subscriber a 100ms time window to receive the event, |
|
| 144 |
+ // after which we move to the next. |
|
| 145 |
+ select {
|
|
| 146 |
+ case s <- jm: |
|
| 147 |
+ case <-time.After(100 * time.Millisecond): |
|
| 148 |
+ } |
|
| 149 |
+ } |
|
| 150 |
+ e.mu.Unlock() |
|
| 151 |
+} |
|
| 152 |
+ |
|
| 153 |
+func (e *Events) subscribe(l listener) {
|
|
| 154 |
+ e.mu.Lock() |
|
| 155 |
+ e.subscribers = append(e.subscribers, l) |
|
| 156 |
+ e.mu.Unlock() |
|
| 157 |
+} |
|
| 158 |
+ |
|
| 159 |
+// unsubscribe closes and removes the specified listener from the list of |
|
| 160 |
+// previously registed ones. |
|
| 161 |
+// It returns a boolean value indicating if the listener was successfully |
|
| 162 |
+// found, closed and unregistered. |
|
| 163 |
+func (e *Events) unsubscribe(l listener) bool {
|
|
| 164 |
+ e.mu.Lock() |
|
| 165 |
+ for i, subscriber := range e.subscribers {
|
|
| 166 |
+ if subscriber == l {
|
|
| 167 |
+ close(l) |
|
| 168 |
+ e.subscribers = append(e.subscribers[:i], e.subscribers[i+1:]...) |
|
| 169 |
+ e.mu.Unlock() |
|
| 170 |
+ return true |
|
| 171 |
+ } |
|
| 172 |
+ } |
|
| 173 |
+ e.mu.Unlock() |
|
| 174 |
+ return false |
|
| 175 |
+} |
| 0 | 176 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,154 @@ |
| 0 |
+package events |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "encoding/json" |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io" |
|
| 7 |
+ "testing" |
|
| 8 |
+ "time" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/docker/docker/engine" |
|
| 11 |
+ "github.com/docker/docker/utils" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+func TestEventsPublish(t *testing.T) {
|
|
| 15 |
+ e := New() |
|
| 16 |
+ l1 := make(chan *utils.JSONMessage) |
|
| 17 |
+ l2 := make(chan *utils.JSONMessage) |
|
| 18 |
+ e.subscribe(l1) |
|
| 19 |
+ e.subscribe(l2) |
|
| 20 |
+ count := e.subscribersCount() |
|
| 21 |
+ if count != 2 {
|
|
| 22 |
+ t.Fatalf("Must be 2 subscribers, got %d", count)
|
|
| 23 |
+ } |
|
| 24 |
+ go e.log("test", "cont", "image")
|
|
| 25 |
+ select {
|
|
| 26 |
+ case msg := <-l1: |
|
| 27 |
+ if len(e.events) != 1 {
|
|
| 28 |
+ t.Fatalf("Must be only one event, got %d", len(e.events))
|
|
| 29 |
+ } |
|
| 30 |
+ if msg.Status != "test" {
|
|
| 31 |
+ t.Fatalf("Status should be test, got %s", msg.Status)
|
|
| 32 |
+ } |
|
| 33 |
+ if msg.ID != "cont" {
|
|
| 34 |
+ t.Fatalf("ID should be cont, got %s", msg.ID)
|
|
| 35 |
+ } |
|
| 36 |
+ if msg.From != "image" {
|
|
| 37 |
+ t.Fatalf("From should be image, got %s", msg.From)
|
|
| 38 |
+ } |
|
| 39 |
+ case <-time.After(1 * time.Second): |
|
| 40 |
+ t.Fatal("Timeout waiting for broadcasted message")
|
|
| 41 |
+ } |
|
| 42 |
+ select {
|
|
| 43 |
+ case msg := <-l2: |
|
| 44 |
+ if len(e.events) != 1 {
|
|
| 45 |
+ t.Fatalf("Must be only one event, got %d", len(e.events))
|
|
| 46 |
+ } |
|
| 47 |
+ if msg.Status != "test" {
|
|
| 48 |
+ t.Fatalf("Status should be test, got %s", msg.Status)
|
|
| 49 |
+ } |
|
| 50 |
+ if msg.ID != "cont" {
|
|
| 51 |
+ t.Fatalf("ID should be cont, got %s", msg.ID)
|
|
| 52 |
+ } |
|
| 53 |
+ if msg.From != "image" {
|
|
| 54 |
+ t.Fatalf("From should be image, got %s", msg.From)
|
|
| 55 |
+ } |
|
| 56 |
+ case <-time.After(1 * time.Second): |
|
| 57 |
+ t.Fatal("Timeout waiting for broadcasted message")
|
|
| 58 |
+ } |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+func TestEventsPublishTimeout(t *testing.T) {
|
|
| 62 |
+ e := New() |
|
| 63 |
+ l := make(chan *utils.JSONMessage) |
|
| 64 |
+ e.subscribe(l) |
|
| 65 |
+ |
|
| 66 |
+ c := make(chan struct{})
|
|
| 67 |
+ go func() {
|
|
| 68 |
+ e.log("test", "cont", "image")
|
|
| 69 |
+ close(c) |
|
| 70 |
+ }() |
|
| 71 |
+ |
|
| 72 |
+ select {
|
|
| 73 |
+ case <-c: |
|
| 74 |
+ case <-time.After(time.Second): |
|
| 75 |
+ t.Fatal("Timeout publishing message")
|
|
| 76 |
+ } |
|
| 77 |
+} |
|
| 78 |
+ |
|
| 79 |
+func TestLogEvents(t *testing.T) {
|
|
| 80 |
+ e := New() |
|
| 81 |
+ eng := engine.New() |
|
| 82 |
+ if err := e.Install(eng); err != nil {
|
|
| 83 |
+ t.Fatal(err) |
|
| 84 |
+ } |
|
| 85 |
+ |
|
| 86 |
+ for i := 0; i < eventsLimit+16; i++ {
|
|
| 87 |
+ action := fmt.Sprintf("action_%d", i)
|
|
| 88 |
+ id := fmt.Sprintf("cont_%d", i)
|
|
| 89 |
+ from := fmt.Sprintf("image_%d", i)
|
|
| 90 |
+ job := eng.Job("log_event", action, id, from)
|
|
| 91 |
+ if err := job.Run(); err != nil {
|
|
| 92 |
+ t.Fatal(err) |
|
| 93 |
+ } |
|
| 94 |
+ } |
|
| 95 |
+ time.Sleep(50 * time.Millisecond) |
|
| 96 |
+ if len(e.events) != eventsLimit {
|
|
| 97 |
+ t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))
|
|
| 98 |
+ } |
|
| 99 |
+ |
|
| 100 |
+ job := eng.Job("events")
|
|
| 101 |
+ job.SetenvInt64("since", 1)
|
|
| 102 |
+ job.SetenvInt64("until", time.Now().Unix())
|
|
| 103 |
+ buf := bytes.NewBuffer(nil) |
|
| 104 |
+ job.Stdout.Add(buf) |
|
| 105 |
+ if err := job.Run(); err != nil {
|
|
| 106 |
+ t.Fatal(err) |
|
| 107 |
+ } |
|
| 108 |
+ buf = bytes.NewBuffer(buf.Bytes()) |
|
| 109 |
+ dec := json.NewDecoder(buf) |
|
| 110 |
+ var msgs []utils.JSONMessage |
|
| 111 |
+ for {
|
|
| 112 |
+ var jm utils.JSONMessage |
|
| 113 |
+ if err := dec.Decode(&jm); err != nil {
|
|
| 114 |
+ if err == io.EOF {
|
|
| 115 |
+ break |
|
| 116 |
+ } |
|
| 117 |
+ t.Fatal(err) |
|
| 118 |
+ } |
|
| 119 |
+ msgs = append(msgs, jm) |
|
| 120 |
+ } |
|
| 121 |
+ if len(msgs) != eventsLimit {
|
|
| 122 |
+ t.Fatalf("Must be %d events, got %d", eventsLimit, len(msgs))
|
|
| 123 |
+ } |
|
| 124 |
+ first := msgs[0] |
|
| 125 |
+ if first.Status != "action_16" {
|
|
| 126 |
+ t.Fatalf("First action is %s, must be action_15", first.Status)
|
|
| 127 |
+ } |
|
| 128 |
+ last := msgs[len(msgs)-1] |
|
| 129 |
+ if last.Status != "action_79" {
|
|
| 130 |
+ t.Fatalf("First action is %s, must be action_79", first.Status)
|
|
| 131 |
+ } |
|
| 132 |
+} |
|
| 133 |
+ |
|
| 134 |
+func TestEventsCountJob(t *testing.T) {
|
|
| 135 |
+ e := New() |
|
| 136 |
+ eng := engine.New() |
|
| 137 |
+ if err := e.Install(eng); err != nil {
|
|
| 138 |
+ t.Fatal(err) |
|
| 139 |
+ } |
|
| 140 |
+ l1 := make(chan *utils.JSONMessage) |
|
| 141 |
+ l2 := make(chan *utils.JSONMessage) |
|
| 142 |
+ e.subscribe(l1) |
|
| 143 |
+ e.subscribe(l2) |
|
| 144 |
+ job := eng.Job("subscribers_count")
|
|
| 145 |
+ env, _ := job.Stdout.AddEnv() |
|
| 146 |
+ if err := job.Run(); err != nil {
|
|
| 147 |
+ t.Fatal(err) |
|
| 148 |
+ } |
|
| 149 |
+ count := env.GetInt("count")
|
|
| 150 |
+ if count != 2 {
|
|
| 151 |
+ t.Fatalf("There must be 2 subscribers, got %d", count)
|
|
| 152 |
+ } |
|
| 153 |
+} |
| 0 | 154 |
deleted file mode 100644 |
| ... | ... |
@@ -1,108 +0,0 @@ |
| 1 |
-// DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE. |
|
| 2 |
-// |
|
| 3 |
-// For additional commments see server/server.go |
|
| 4 |
-// |
|
| 5 |
-package server |
|
| 6 |
- |
|
| 7 |
-import ( |
|
| 8 |
- "encoding/json" |
|
| 9 |
- "time" |
|
| 10 |
- |
|
| 11 |
- "github.com/docker/docker/engine" |
|
| 12 |
- "github.com/docker/docker/utils" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-func (srv *Server) Events(job *engine.Job) engine.Status {
|
|
| 16 |
- if len(job.Args) != 0 {
|
|
| 17 |
- return job.Errorf("Usage: %s", job.Name)
|
|
| 18 |
- } |
|
| 19 |
- |
|
| 20 |
- var ( |
|
| 21 |
- since = job.GetenvInt64("since")
|
|
| 22 |
- until = job.GetenvInt64("until")
|
|
| 23 |
- timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) |
|
| 24 |
- ) |
|
| 25 |
- |
|
| 26 |
- // If no until, disable timeout |
|
| 27 |
- if until == 0 {
|
|
| 28 |
- timeout.Stop() |
|
| 29 |
- } |
|
| 30 |
- |
|
| 31 |
- listener := make(chan utils.JSONMessage) |
|
| 32 |
- srv.eventPublisher.Subscribe(listener) |
|
| 33 |
- defer srv.eventPublisher.Unsubscribe(listener) |
|
| 34 |
- |
|
| 35 |
- // When sending an event JSON serialization errors are ignored, but all |
|
| 36 |
- // other errors lead to the eviction of the listener. |
|
| 37 |
- sendEvent := func(event *utils.JSONMessage) error {
|
|
| 38 |
- if b, err := json.Marshal(event); err == nil {
|
|
| 39 |
- if _, err = job.Stdout.Write(b); err != nil {
|
|
| 40 |
- return err |
|
| 41 |
- } |
|
| 42 |
- } |
|
| 43 |
- return nil |
|
| 44 |
- } |
|
| 45 |
- |
|
| 46 |
- job.Stdout.Write(nil) |
|
| 47 |
- |
|
| 48 |
- // Resend every event in the [since, until] time interval. |
|
| 49 |
- if since != 0 {
|
|
| 50 |
- for _, event := range srv.GetEvents() {
|
|
| 51 |
- if event.Time >= since && (event.Time <= until || until == 0) {
|
|
| 52 |
- if err := sendEvent(&event); err != nil {
|
|
| 53 |
- return job.Error(err) |
|
| 54 |
- } |
|
| 55 |
- } |
|
| 56 |
- } |
|
| 57 |
- } |
|
| 58 |
- |
|
| 59 |
- for {
|
|
| 60 |
- select {
|
|
| 61 |
- case event, ok := <-listener: |
|
| 62 |
- if !ok {
|
|
| 63 |
- return engine.StatusOK |
|
| 64 |
- } |
|
| 65 |
- if err := sendEvent(&event); err != nil {
|
|
| 66 |
- return job.Error(err) |
|
| 67 |
- } |
|
| 68 |
- case <-timeout.C: |
|
| 69 |
- return engine.StatusOK |
|
| 70 |
- } |
|
| 71 |
- } |
|
| 72 |
-} |
|
| 73 |
- |
|
| 74 |
-// FIXME: this is a shim to allow breaking up other parts of Server without |
|
| 75 |
-// dragging the sphagetti dependency along. |
|
| 76 |
-func (srv *Server) Log(job *engine.Job) engine.Status {
|
|
| 77 |
- if len(job.Args) != 3 {
|
|
| 78 |
- return job.Errorf("usage: %s ACTION ID FROM", job.Name)
|
|
| 79 |
- } |
|
| 80 |
- srv.LogEvent(job.Args[0], job.Args[1], job.Args[2]) |
|
| 81 |
- return engine.StatusOK |
|
| 82 |
-} |
|
| 83 |
- |
|
| 84 |
-func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
|
|
| 85 |
- now := time.Now().UTC().Unix() |
|
| 86 |
- jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
|
|
| 87 |
- srv.AddEvent(jm) |
|
| 88 |
- srv.eventPublisher.Publish(jm) |
|
| 89 |
- return &jm |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-func (srv *Server) AddEvent(jm utils.JSONMessage) {
|
|
| 93 |
- srv.Lock() |
|
| 94 |
- if len(srv.events) == cap(srv.events) {
|
|
| 95 |
- // discard oldest event |
|
| 96 |
- copy(srv.events, srv.events[1:]) |
|
| 97 |
- srv.events[len(srv.events)-1] = jm |
|
| 98 |
- } else {
|
|
| 99 |
- srv.events = append(srv.events, jm) |
|
| 100 |
- } |
|
| 101 |
- srv.Unlock() |
|
| 102 |
-} |
|
| 103 |
- |
|
| 104 |
-func (srv *Server) GetEvents() []utils.JSONMessage {
|
|
| 105 |
- srv.RLock() |
|
| 106 |
- defer srv.RUnlock() |
|
| 107 |
- return srv.events |
|
| 108 |
-} |
| ... | ... |
@@ -86,12 +86,10 @@ func InitServer(job *engine.Job) engine.Status {
|
| 86 | 86 |
job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
|
| 87 | 87 |
|
| 88 | 88 |
for name, handler := range map[string]engine.Handler{
|
| 89 |
- "info": srv.DockerInfo, |
|
| 90 |
- "log": srv.Log, |
|
| 91 |
- "build": srv.Build, |
|
| 92 |
- "pull": srv.ImagePull, |
|
| 93 |
- "events": srv.Events, |
|
| 94 |
- "push": srv.ImagePush, |
|
| 89 |
+ "info": srv.DockerInfo, |
|
| 90 |
+ "build": srv.Build, |
|
| 91 |
+ "pull": srv.ImagePull, |
|
| 92 |
+ "push": srv.ImagePush, |
|
| 95 | 93 |
} {
|
| 96 | 94 |
if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
|
| 97 | 95 |
return job.Error(err) |
| ... | ... |
@@ -117,12 +115,10 @@ func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) |
| 117 | 117 |
return nil, err |
| 118 | 118 |
} |
| 119 | 119 |
srv := &Server{
|
| 120 |
- Eng: eng, |
|
| 121 |
- daemon: daemon, |
|
| 122 |
- pullingPool: make(map[string]chan struct{}),
|
|
| 123 |
- pushingPool: make(map[string]chan struct{}),
|
|
| 124 |
- events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events |
|
| 125 |
- eventPublisher: utils.NewJSONMessagePublisher(), |
|
| 120 |
+ Eng: eng, |
|
| 121 |
+ daemon: daemon, |
|
| 122 |
+ pullingPool: make(map[string]chan struct{}),
|
|
| 123 |
+ pushingPool: make(map[string]chan struct{}),
|
|
| 126 | 124 |
} |
| 127 | 125 |
daemon.SetServer(srv) |
| 128 | 126 |
return srv, nil |
| ... | ... |
@@ -67,6 +67,11 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
| 67 | 67 |
initPath = srv.daemon.SystemInitPath() |
| 68 | 68 |
} |
| 69 | 69 |
|
| 70 |
+ cjob := job.Eng.Job("subscribers_count")
|
|
| 71 |
+ env, _ := cjob.Stdout.AddEnv() |
|
| 72 |
+ if err := cjob.Run(); err != nil {
|
|
| 73 |
+ return job.Error(err) |
|
| 74 |
+ } |
|
| 70 | 75 |
v := &engine.Env{}
|
| 71 | 76 |
v.SetInt("Containers", len(srv.daemon.List()))
|
| 72 | 77 |
v.SetInt("Images", imgcount)
|
| ... | ... |
@@ -79,7 +84,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
| 79 | 79 |
v.SetInt("NFd", utils.GetTotalUsedFds())
|
| 80 | 80 |
v.SetInt("NGoroutines", runtime.NumGoroutine())
|
| 81 | 81 |
v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
|
| 82 |
- v.SetInt("NEventsListener", srv.eventPublisher.SubscribersCount())
|
|
| 82 |
+ v.SetInt("NEventsListener", env.GetInt("count"))
|
|
| 83 | 83 |
v.Set("KernelVersion", kernelVersion)
|
| 84 | 84 |
v.Set("OperatingSystem", operatingSystem)
|
| 85 | 85 |
v.Set("IndexServerAddress", registry.IndexServerAddress())
|
| ... | ... |
@@ -128,12 +133,10 @@ func (srv *Server) Close() error {
|
| 128 | 128 |
|
| 129 | 129 |
type Server struct {
|
| 130 | 130 |
sync.RWMutex |
| 131 |
- daemon *daemon.Daemon |
|
| 132 |
- pullingPool map[string]chan struct{}
|
|
| 133 |
- pushingPool map[string]chan struct{}
|
|
| 134 |
- events []utils.JSONMessage |
|
| 135 |
- eventPublisher *utils.JSONMessagePublisher |
|
| 136 |
- Eng *engine.Engine |
|
| 137 |
- running bool |
|
| 138 |
- tasks sync.WaitGroup |
|
| 131 |
+ daemon *daemon.Daemon |
|
| 132 |
+ pullingPool map[string]chan struct{}
|
|
| 133 |
+ pushingPool map[string]chan struct{}
|
|
| 134 |
+ Eng *engine.Engine |
|
| 135 |
+ running bool |
|
| 136 |
+ tasks sync.WaitGroup |
|
| 139 | 137 |
} |
| ... | ... |
@@ -1,11 +1,6 @@ |
| 1 | 1 |
package server |
| 2 | 2 |
|
| 3 |
-import ( |
|
| 4 |
- "testing" |
|
| 5 |
- "time" |
|
| 6 |
- |
|
| 7 |
- "github.com/docker/docker/utils" |
|
| 8 |
-) |
|
| 3 |
+import "testing" |
|
| 9 | 4 |
|
| 10 | 5 |
func TestPools(t *testing.T) {
|
| 11 | 6 |
srv := &Server{
|
| ... | ... |
@@ -44,55 +39,3 @@ func TestPools(t *testing.T) {
|
| 44 | 44 |
t.Fatalf("Expected `Unknown pool type`")
|
| 45 | 45 |
} |
| 46 | 46 |
} |
| 47 |
- |
|
| 48 |
-func TestLogEvent(t *testing.T) {
|
|
| 49 |
- srv := &Server{
|
|
| 50 |
- events: make([]utils.JSONMessage, 0, 64), |
|
| 51 |
- eventPublisher: utils.NewJSONMessagePublisher(), |
|
| 52 |
- } |
|
| 53 |
- |
|
| 54 |
- srv.LogEvent("fakeaction", "fakeid", "fakeimage")
|
|
| 55 |
- |
|
| 56 |
- listener := make(chan utils.JSONMessage) |
|
| 57 |
- srv.eventPublisher.Subscribe(listener) |
|
| 58 |
- |
|
| 59 |
- srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
|
|
| 60 |
- |
|
| 61 |
- numEvents := len(srv.GetEvents()) |
|
| 62 |
- if numEvents != 2 {
|
|
| 63 |
- t.Fatalf("Expected 2 events, found %d", numEvents)
|
|
| 64 |
- } |
|
| 65 |
- go func() {
|
|
| 66 |
- time.Sleep(200 * time.Millisecond) |
|
| 67 |
- srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
|
|
| 68 |
- time.Sleep(200 * time.Millisecond) |
|
| 69 |
- srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
|
|
| 70 |
- }() |
|
| 71 |
- |
|
| 72 |
- setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
|
|
| 73 |
- for i := 2; i < 4; i++ {
|
|
| 74 |
- event := <-listener |
|
| 75 |
- if event != srv.GetEvents()[i] {
|
|
| 76 |
- t.Fatalf("Event received it different than expected")
|
|
| 77 |
- } |
|
| 78 |
- } |
|
| 79 |
- }) |
|
| 80 |
-} |
|
| 81 |
- |
|
| 82 |
-// FIXME: this is duplicated from integration/commands_test.go |
|
| 83 |
-func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
|
|
| 84 |
- c := make(chan bool) |
|
| 85 |
- |
|
| 86 |
- // Make sure we are not too long |
|
| 87 |
- go func() {
|
|
| 88 |
- time.Sleep(d) |
|
| 89 |
- c <- true |
|
| 90 |
- }() |
|
| 91 |
- go func() {
|
|
| 92 |
- f() |
|
| 93 |
- c <- false |
|
| 94 |
- }() |
|
| 95 |
- if <-c && msg != "" {
|
|
| 96 |
- t.Fatal(msg) |
|
| 97 |
- } |
|
| 98 |
-} |
| 99 | 47 |
deleted file mode 100644 |
| ... | ... |
@@ -1,61 +0,0 @@ |
| 1 |
-package utils |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "sync" |
|
| 5 |
- "time" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-func NewJSONMessagePublisher() *JSONMessagePublisher {
|
|
| 9 |
- return &JSONMessagePublisher{}
|
|
| 10 |
-} |
|
| 11 |
- |
|
| 12 |
-type JSONMessageListener chan<- JSONMessage |
|
| 13 |
- |
|
| 14 |
-type JSONMessagePublisher struct {
|
|
| 15 |
- m sync.RWMutex |
|
| 16 |
- subscribers []JSONMessageListener |
|
| 17 |
-} |
|
| 18 |
- |
|
| 19 |
-func (p *JSONMessagePublisher) Subscribe(l JSONMessageListener) {
|
|
| 20 |
- p.m.Lock() |
|
| 21 |
- p.subscribers = append(p.subscribers, l) |
|
| 22 |
- p.m.Unlock() |
|
| 23 |
-} |
|
| 24 |
- |
|
| 25 |
-func (p *JSONMessagePublisher) SubscribersCount() int {
|
|
| 26 |
- p.m.RLock() |
|
| 27 |
- count := len(p.subscribers) |
|
| 28 |
- p.m.RUnlock() |
|
| 29 |
- return count |
|
| 30 |
-} |
|
| 31 |
- |
|
| 32 |
-// Unsubscribe closes and removes the specified listener from the list of |
|
| 33 |
-// previously registed ones. |
|
| 34 |
-// It returns a boolean value indicating if the listener was successfully |
|
| 35 |
-// found, closed and unregistered. |
|
| 36 |
-func (p *JSONMessagePublisher) Unsubscribe(l JSONMessageListener) bool {
|
|
| 37 |
- p.m.Lock() |
|
| 38 |
- defer p.m.Unlock() |
|
| 39 |
- |
|
| 40 |
- for i, subscriber := range p.subscribers {
|
|
| 41 |
- if subscriber == l {
|
|
| 42 |
- close(l) |
|
| 43 |
- p.subscribers = append(p.subscribers[:i], p.subscribers[i+1:]...) |
|
| 44 |
- return true |
|
| 45 |
- } |
|
| 46 |
- } |
|
| 47 |
- return false |
|
| 48 |
-} |
|
| 49 |
- |
|
| 50 |
-func (p *JSONMessagePublisher) Publish(m JSONMessage) {
|
|
| 51 |
- p.m.RLock() |
|
| 52 |
- for _, subscriber := range p.subscribers {
|
|
| 53 |
- // We give each subscriber a 100ms time window to receive the event, |
|
| 54 |
- // after which we move to the next. |
|
| 55 |
- select {
|
|
| 56 |
- case subscriber <- m: |
|
| 57 |
- case <-time.After(100 * time.Millisecond): |
|
| 58 |
- } |
|
| 59 |
- } |
|
| 60 |
- p.m.RUnlock() |
|
| 61 |
-} |
| 62 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,73 +0,0 @@ |
| 1 |
-package utils |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "testing" |
|
| 5 |
- "time" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-func assertSubscribersCount(t *testing.T, q *JSONMessagePublisher, expected int) {
|
|
| 9 |
- if q.SubscribersCount() != expected {
|
|
| 10 |
- t.Fatalf("Expected %d registered subscribers, got %d", expected, q.SubscribersCount())
|
|
| 11 |
- } |
|
| 12 |
-} |
|
| 13 |
- |
|
| 14 |
-func TestJSONMessagePublisherSubscription(t *testing.T) {
|
|
| 15 |
- q := NewJSONMessagePublisher() |
|
| 16 |
- l1 := make(chan JSONMessage) |
|
| 17 |
- l2 := make(chan JSONMessage) |
|
| 18 |
- |
|
| 19 |
- assertSubscribersCount(t, q, 0) |
|
| 20 |
- q.Subscribe(l1) |
|
| 21 |
- assertSubscribersCount(t, q, 1) |
|
| 22 |
- q.Subscribe(l2) |
|
| 23 |
- assertSubscribersCount(t, q, 2) |
|
| 24 |
- |
|
| 25 |
- q.Unsubscribe(l1) |
|
| 26 |
- q.Unsubscribe(l2) |
|
| 27 |
- assertSubscribersCount(t, q, 0) |
|
| 28 |
-} |
|
| 29 |
- |
|
| 30 |
-func TestJSONMessagePublisherPublish(t *testing.T) {
|
|
| 31 |
- q := NewJSONMessagePublisher() |
|
| 32 |
- l1 := make(chan JSONMessage) |
|
| 33 |
- l2 := make(chan JSONMessage) |
|
| 34 |
- |
|
| 35 |
- go func() {
|
|
| 36 |
- for {
|
|
| 37 |
- select {
|
|
| 38 |
- case <-l1: |
|
| 39 |
- close(l1) |
|
| 40 |
- l1 = nil |
|
| 41 |
- case <-l2: |
|
| 42 |
- close(l2) |
|
| 43 |
- l2 = nil |
|
| 44 |
- case <-time.After(1 * time.Second): |
|
| 45 |
- q.Unsubscribe(l1) |
|
| 46 |
- q.Unsubscribe(l2) |
|
| 47 |
- t.Fatal("Timeout waiting for broadcasted message")
|
|
| 48 |
- } |
|
| 49 |
- } |
|
| 50 |
- }() |
|
| 51 |
- |
|
| 52 |
- q.Subscribe(l1) |
|
| 53 |
- q.Subscribe(l2) |
|
| 54 |
- q.Publish(JSONMessage{})
|
|
| 55 |
-} |
|
| 56 |
- |
|
| 57 |
-func TestJSONMessagePublishTimeout(t *testing.T) {
|
|
| 58 |
- q := NewJSONMessagePublisher() |
|
| 59 |
- l := make(chan JSONMessage) |
|
| 60 |
- q.Subscribe(l) |
|
| 61 |
- |
|
| 62 |
- c := make(chan struct{})
|
|
| 63 |
- go func() {
|
|
| 64 |
- q.Publish(JSONMessage{})
|
|
| 65 |
- close(c) |
|
| 66 |
- }() |
|
| 67 |
- |
|
| 68 |
- select {
|
|
| 69 |
- case <-c: |
|
| 70 |
- case <-time.After(time.Second): |
|
| 71 |
- t.Fatal("Timeout publishing message")
|
|
| 72 |
- } |
|
| 73 |
-} |