Make a minor change to prepare for generic controller references (since
PetSets and RCs could conflict over pods).
| ... | ... |
@@ -80,5 +80,5 @@ type SortedDeploymentConfigPipeline []DeploymentConfigPipeline |
| 80 | 80 |
func (m SortedDeploymentConfigPipeline) Len() int { return len(m) }
|
| 81 | 81 |
func (m SortedDeploymentConfigPipeline) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
|
| 82 | 82 |
func (m SortedDeploymentConfigPipeline) Less(i, j int) bool {
|
| 83 |
- return CompareObjectMeta(&m[i].Deployment.ObjectMeta, &m[j].Deployment.ObjectMeta) |
|
| 83 |
+ return CompareObjectMeta(&m[i].Deployment.DeploymentConfig.ObjectMeta, &m[j].Deployment.DeploymentConfig.ObjectMeta) |
|
| 84 | 84 |
} |
| 85 | 85 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,51 @@ |
| 0 |
+package graphview |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ osgraph "github.com/openshift/origin/pkg/api/graph" |
|
| 4 |
+ kubeedges "github.com/openshift/origin/pkg/api/kubegraph" |
|
| 5 |
+ kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+type PetSet struct {
|
|
| 9 |
+ PetSet *kubegraph.PetSetNode |
|
| 10 |
+ |
|
| 11 |
+ OwnedPods []*kubegraph.PodNode |
|
| 12 |
+ CreatedPods []*kubegraph.PodNode |
|
| 13 |
+ |
|
| 14 |
+ // TODO: handle conflicting once controller refs are present, not worth it yet |
|
| 15 |
+} |
|
| 16 |
+ |
|
| 17 |
+// AllPetSets returns all the PetSets that aren't in the excludes set and the set of covered NodeIDs |
|
| 18 |
+func AllPetSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]PetSet, IntSet) {
|
|
| 19 |
+ covered := IntSet{}
|
|
| 20 |
+ views := []PetSet{}
|
|
| 21 |
+ |
|
| 22 |
+ for _, uncastNode := range g.NodesByKind(kubegraph.PetSetNodeKind) {
|
|
| 23 |
+ if excludeNodeIDs.Has(uncastNode.ID()) {
|
|
| 24 |
+ continue |
|
| 25 |
+ } |
|
| 26 |
+ |
|
| 27 |
+ view, covers := NewPetSet(g, uncastNode.(*kubegraph.PetSetNode)) |
|
| 28 |
+ covered.Insert(covers.List()...) |
|
| 29 |
+ views = append(views, view) |
|
| 30 |
+ } |
|
| 31 |
+ |
|
| 32 |
+ return views, covered |
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+// NewPetSet returns the PetSet and a set of all the NodeIDs covered by the PetSet |
|
| 36 |
+func NewPetSet(g osgraph.Graph, node *kubegraph.PetSetNode) (PetSet, IntSet) {
|
|
| 37 |
+ covered := IntSet{}
|
|
| 38 |
+ covered.Insert(node.ID()) |
|
| 39 |
+ |
|
| 40 |
+ view := PetSet{}
|
|
| 41 |
+ view.PetSet = node |
|
| 42 |
+ |
|
| 43 |
+ for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) {
|
|
| 44 |
+ podNode := uncastPodNode.(*kubegraph.PodNode) |
|
| 45 |
+ covered.Insert(podNode.ID()) |
|
| 46 |
+ view.OwnedPods = append(view.OwnedPods, podNode) |
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ return view, covered |
|
| 50 |
+} |
| ... | ... |
@@ -60,13 +60,13 @@ func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationCont |
| 60 | 60 |
rcView.RC = rcNode |
| 61 | 61 |
rcView.ConflictingRCIDToPods = map[int][]*kubegraph.PodNode{}
|
| 62 | 62 |
|
| 63 |
- for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) {
|
|
| 63 |
+ for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) {
|
|
| 64 | 64 |
podNode := uncastPodNode.(*kubegraph.PodNode) |
| 65 | 65 |
covered.Insert(podNode.ID()) |
| 66 | 66 |
rcView.OwnedPods = append(rcView.OwnedPods, podNode) |
| 67 | 67 |
|
| 68 | 68 |
// check to see if this pod is managed by more than one RC |
| 69 |
- uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind) |
|
| 69 |
+ uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind) |
|
| 70 | 70 |
if len(uncastOwningRCs) > 1 {
|
| 71 | 71 |
for _, uncastOwningRC := range uncastOwningRCs {
|
| 72 | 72 |
if uncastOwningRC.ID() == rcNode.ID() {
|
| ... | ... |
@@ -21,10 +21,13 @@ type ServiceGroup struct {
|
| 21 | 21 |
|
| 22 | 22 |
DeploymentConfigPipelines []DeploymentConfigPipeline |
| 23 | 23 |
ReplicationControllers []ReplicationController |
| 24 |
+ PetSets []PetSet |
|
| 24 | 25 |
|
| 25 |
- FulfillingDCs []*deploygraph.DeploymentConfigNode |
|
| 26 |
- FulfillingRCs []*kubegraph.ReplicationControllerNode |
|
| 27 |
- FulfillingPods []*kubegraph.PodNode |
|
| 26 |
+ // TODO: this has to stop |
|
| 27 |
+ FulfillingPetSets []*kubegraph.PetSetNode |
|
| 28 |
+ FulfillingDCs []*deploygraph.DeploymentConfigNode |
|
| 29 |
+ FulfillingRCs []*kubegraph.ReplicationControllerNode |
|
| 30 |
+ FulfillingPods []*kubegraph.PodNode |
|
| 28 | 31 |
|
| 29 | 32 |
ExposingRoutes []*routegraph.RouteNode |
| 30 | 33 |
} |
| ... | ... |
@@ -66,6 +69,8 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi |
| 66 | 66 |
service.FulfillingRCs = append(service.FulfillingRCs, castContainer) |
| 67 | 67 |
case *kubegraph.PodNode: |
| 68 | 68 |
service.FulfillingPods = append(service.FulfillingPods, castContainer) |
| 69 |
+ case *kubegraph.PetSetNode: |
|
| 70 |
+ service.FulfillingPetSets = append(service.FulfillingPetSets, castContainer) |
|
| 69 | 71 |
default: |
| 70 | 72 |
utilruntime.HandleError(fmt.Errorf("unrecognized container: %v", castContainer))
|
| 71 | 73 |
} |
| ... | ... |
@@ -97,6 +102,13 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi |
| 97 | 97 |
service.ReplicationControllers = append(service.ReplicationControllers, rcView) |
| 98 | 98 |
} |
| 99 | 99 |
|
| 100 |
+ for _, fulfillingPetSet := range service.FulfillingPetSets {
|
|
| 101 |
+ view, covers := NewPetSet(g, fulfillingPetSet) |
|
| 102 |
+ |
|
| 103 |
+ covered.Insert(covers.List()...) |
|
| 104 |
+ service.PetSets = append(service.PetSets, view) |
|
| 105 |
+ } |
|
| 106 |
+ |
|
| 100 | 107 |
for _, fulfillingPod := range service.FulfillingPods {
|
| 101 | 108 |
_, podCovers := NewPod(g, fulfillingPod) |
| 102 | 109 |
covered.Insert(podCovers.List()...) |
| ... | ... |
@@ -78,7 +78,7 @@ func TestBareRCGroup(t *testing.T) {
|
| 78 | 78 |
|
| 79 | 79 |
kubeedges.AddAllExposedPodTemplateSpecEdges(g) |
| 80 | 80 |
kubeedges.AddAllExposedPodEdges(g) |
| 81 |
- kubeedges.AddAllManagedByRCPodEdges(g) |
|
| 81 |
+ kubeedges.AddAllManagedByControllerPodEdges(g) |
|
| 82 | 82 |
|
| 83 | 83 |
coveredNodes := IntSet{}
|
| 84 | 84 |
|
| ... | ... |
@@ -399,7 +399,7 @@ func TestGraph(t *testing.T) {
|
| 399 | 399 |
} |
| 400 | 400 |
|
| 401 | 401 |
for _, bareDCPipeline := range bareDCPipelines {
|
| 402 |
- t.Logf("from %s", bareDCPipeline.Deployment.Name)
|
|
| 402 |
+ t.Logf("from %s", bareDCPipeline.Deployment.DeploymentConfig.Name)
|
|
| 403 | 403 |
for _, path := range bareDCPipeline.Images {
|
| 404 | 404 |
t.Logf(" %v", path)
|
| 405 | 405 |
} |
| ... | ... |
@@ -413,7 +413,7 @@ func TestGraph(t *testing.T) {
|
| 413 | 413 |
indent := " " |
| 414 | 414 |
|
| 415 | 415 |
for _, deployment := range serviceGroup.DeploymentConfigPipelines {
|
| 416 |
- t.Logf("%sdeployment %s", indent, deployment.Deployment.Name)
|
|
| 416 |
+ t.Logf("%sdeployment %s", indent, deployment.Deployment.DeploymentConfig.Name)
|
|
| 417 | 417 |
for _, image := range deployment.Images {
|
| 418 | 418 |
t.Logf("%s image %s", indent, image.Image.ImageSpec())
|
| 419 | 419 |
if image.Build != nil {
|
| ... | ... |
@@ -21,11 +21,11 @@ func FindDuelingReplicationControllers(g osgraph.Graph, f osgraph.Namer) []osgra |
| 21 | 21 |
for _, uncastRCNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) {
|
| 22 | 22 |
rcNode := uncastRCNode.(*kubegraph.ReplicationControllerNode) |
| 23 | 23 |
|
| 24 |
- for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) {
|
|
| 24 |
+ for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) {
|
|
| 25 | 25 |
podNode := uncastPodNode.(*kubegraph.PodNode) |
| 26 | 26 |
|
| 27 | 27 |
// check to see if this pod is managed by more than one RC |
| 28 |
- uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind) |
|
| 28 |
+ uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind) |
|
| 29 | 29 |
if len(uncastOwningRCs) > 1 {
|
| 30 | 30 |
involvedRCNames := []string{}
|
| 31 | 31 |
relatedNodes := []graph.Node{uncastPodNode}
|
| ... | ... |
@@ -14,7 +14,7 @@ func TestDuelingRC(t *testing.T) {
|
| 14 | 14 |
t.Fatalf("unexpected error: %v", err)
|
| 15 | 15 |
} |
| 16 | 16 |
|
| 17 |
- kubeedges.AddAllManagedByRCPodEdges(g) |
|
| 17 |
+ kubeedges.AddAllManagedByControllerPodEdges(g) |
|
| 18 | 18 |
|
| 19 | 19 |
markers := FindDuelingReplicationControllers(g, osgraph.DefaultNamer) |
| 20 | 20 |
if e, a := 2, len(markers); e != a {
|
| ... | ... |
@@ -8,6 +8,8 @@ import ( |
| 8 | 8 |
|
| 9 | 9 |
kapi "k8s.io/kubernetes/pkg/api" |
| 10 | 10 |
_ "k8s.io/kubernetes/pkg/api/install" |
| 11 |
+ "k8s.io/kubernetes/pkg/api/unversioned" |
|
| 12 |
+ kapps "k8s.io/kubernetes/pkg/apis/apps" |
|
| 11 | 13 |
"k8s.io/kubernetes/pkg/apis/autoscaling" |
| 12 | 14 |
"k8s.io/kubernetes/pkg/runtime" |
| 13 | 15 |
|
| ... | ... |
@@ -38,6 +40,14 @@ func TestNamespaceEdgeMatching(t *testing.T) {
|
| 38 | 38 |
rc.Spec.Selector = map[string]string{"a": "1"}
|
| 39 | 39 |
kubegraph.EnsureReplicationControllerNode(g, rc) |
| 40 | 40 |
|
| 41 |
+ p := &kapps.PetSet{}
|
|
| 42 |
+ p.Namespace = namespace |
|
| 43 |
+ p.Name = "the-petset" |
|
| 44 |
+ p.Spec.Selector = &unversioned.LabelSelector{
|
|
| 45 |
+ MatchLabels: map[string]string{"a": "1"},
|
|
| 46 |
+ } |
|
| 47 |
+ kubegraph.EnsurePetSetNode(g, p) |
|
| 48 |
+ |
|
| 41 | 49 |
svc := &kapi.Service{}
|
| 42 | 50 |
svc.Namespace = namespace |
| 43 | 51 |
svc.Name = "the-svc" |
| ... | ... |
@@ -49,7 +59,7 @@ func TestNamespaceEdgeMatching(t *testing.T) {
|
| 49 | 49 |
fn("other", g)
|
| 50 | 50 |
AddAllExposedPodEdges(g) |
| 51 | 51 |
AddAllExposedPodTemplateSpecEdges(g) |
| 52 |
- AddAllManagedByRCPodEdges(g) |
|
| 52 |
+ AddAllManagedByControllerPodEdges(g) |
|
| 53 | 53 |
|
| 54 | 54 |
for _, edge := range g.Edges() {
|
| 55 | 55 |
nsTo, err := namespaceFor(edge.To()) |
| ... | ... |
@@ -79,6 +89,10 @@ func namespaceFor(node graph.Node) (string, error) {
|
| 79 | 79 |
return node.(*kubegraph.PodSpecNode).Namespace, nil |
| 80 | 80 |
case *kapi.ReplicationControllerSpec: |
| 81 | 81 |
return node.(*kubegraph.ReplicationControllerSpecNode).Namespace, nil |
| 82 |
+ case *kapps.PetSetSpec: |
|
| 83 |
+ return node.(*kubegraph.PetSetSpecNode).Namespace, nil |
|
| 84 |
+ case *kapi.PodTemplateSpec: |
|
| 85 |
+ return node.(*kubegraph.PodTemplateSpecNode).Namespace, nil |
|
| 82 | 86 |
default: |
| 83 | 87 |
return "", fmt.Errorf("unknown object: %#v", obj)
|
| 84 | 88 |
} |
| ... | ... |
@@ -20,8 +20,8 @@ import ( |
| 20 | 20 |
const ( |
| 21 | 21 |
// ExposedThroughServiceEdgeKind goes from a PodTemplateSpec or a Pod to Service. The head should make the service's selector. |
| 22 | 22 |
ExposedThroughServiceEdgeKind = "ExposedThroughService" |
| 23 |
- // ManagedByRCEdgeKind goes from Pod to ReplicationController when the Pod satisfies the ReplicationController's label selector |
|
| 24 |
- ManagedByRCEdgeKind = "ManagedByRC" |
|
| 23 |
+ // ManagedByControllerEdgeKind goes from Pod to controller when the Pod satisfies a controller's label selector |
|
| 24 |
+ ManagedByControllerEdgeKind = "ManagedByController" |
|
| 25 | 25 |
// MountedSecretEdgeKind goes from PodSpec to Secret indicating that is or will be a request to mount a volume with the Secret. |
| 26 | 26 |
MountedSecretEdgeKind = "MountedSecret" |
| 27 | 27 |
// MountableSecretEdgeKind goes from ServiceAccount to Secret indicating that the SA allows the Secret to be mounted |
| ... | ... |
@@ -91,31 +91,36 @@ func AddAllExposedPodEdges(g osgraph.MutableUniqueGraph) {
|
| 91 | 91 |
} |
| 92 | 92 |
} |
| 93 | 93 |
|
| 94 |
-// AddManagedByRCPodEdges ensures that a directed edge exists between an RC and all the pods |
|
| 94 |
+// AddManagedByControllerPodEdges ensures that a directed edge exists between a controller and all the pods |
|
| 95 | 95 |
// in the graph that match the label selector |
| 96 |
-func AddManagedByRCPodEdges(g osgraph.MutableUniqueGraph, rcNode *kubegraph.ReplicationControllerNode) {
|
|
| 97 |
- if rcNode.Spec.Selector == nil {
|
|
| 96 |
+func AddManagedByControllerPodEdges(g osgraph.MutableUniqueGraph, to graph.Node, namespace string, selector map[string]string) {
|
|
| 97 |
+ if selector == nil {
|
|
| 98 | 98 |
return |
| 99 | 99 |
} |
| 100 |
- query := labels.SelectorFromSet(rcNode.Spec.Selector) |
|
| 100 |
+ query := labels.SelectorFromSet(selector) |
|
| 101 | 101 |
for _, n := range g.(graph.Graph).Nodes() {
|
| 102 | 102 |
switch target := n.(type) {
|
| 103 | 103 |
case *kubegraph.PodNode: |
| 104 |
- if target.Namespace != rcNode.Namespace {
|
|
| 104 |
+ if target.Namespace != namespace {
|
|
| 105 | 105 |
continue |
| 106 | 106 |
} |
| 107 | 107 |
if query.Matches(labels.Set(target.Labels)) {
|
| 108 |
- g.AddEdge(target, rcNode, ManagedByRCEdgeKind) |
|
| 108 |
+ g.AddEdge(target, to, ManagedByControllerEdgeKind) |
|
| 109 | 109 |
} |
| 110 | 110 |
} |
| 111 | 111 |
} |
| 112 | 112 |
} |
| 113 | 113 |
|
| 114 |
-// AddAllManagedByRCPodEdges calls AddManagedByRCPodEdges for every ServiceNode in the graph |
|
| 115 |
-func AddAllManagedByRCPodEdges(g osgraph.MutableUniqueGraph) {
|
|
| 114 |
+// AddAllManagedByControllerPodEdges calls AddManagedByControllerPodEdges for every node in the graph |
|
| 115 |
+// TODO: should do this through an interface (selects pods) |
|
| 116 |
+func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) {
|
|
| 116 | 117 |
for _, node := range g.(graph.Graph).Nodes() {
|
| 117 |
- if rcNode, ok := node.(*kubegraph.ReplicationControllerNode); ok {
|
|
| 118 |
- AddManagedByRCPodEdges(g, rcNode) |
|
| 118 |
+ switch cast := node.(type) {
|
|
| 119 |
+ case *kubegraph.ReplicationControllerNode: |
|
| 120 |
+ AddManagedByControllerPodEdges(g, cast, cast.ReplicationController.Namespace, cast.ReplicationController.Spec.Selector) |
|
| 121 |
+ case *kubegraph.PetSetNode: |
|
| 122 |
+ // TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments) |
|
| 123 |
+ AddManagedByControllerPodEdges(g, cast, cast.PetSet.Namespace, cast.PetSet.Spec.Selector.MatchLabels) |
|
| 119 | 124 |
} |
| 120 | 125 |
} |
| 121 | 126 |
} |
| ... | ... |
@@ -4,6 +4,7 @@ import ( |
| 4 | 4 |
"github.com/gonum/graph" |
| 5 | 5 |
|
| 6 | 6 |
kapi "k8s.io/kubernetes/pkg/api" |
| 7 |
+ kapps "k8s.io/kubernetes/pkg/apis/apps" |
|
| 7 | 8 |
"k8s.io/kubernetes/pkg/apis/autoscaling" |
| 8 | 9 |
|
| 9 | 10 |
osgraph "github.com/openshift/origin/pkg/api/graph" |
| ... | ... |
@@ -154,3 +155,33 @@ func EnsureHorizontalPodAutoscalerNode(g osgraph.MutableUniqueGraph, hpa *autosc |
| 154 | 154 |
}, |
| 155 | 155 |
).(*HorizontalPodAutoscalerNode) |
| 156 | 156 |
} |
| 157 |
+ |
|
| 158 |
+func EnsurePetSetNode(g osgraph.MutableUniqueGraph, petset *kapps.PetSet) *PetSetNode {
|
|
| 159 |
+ nodeName := PetSetNodeName(petset) |
|
| 160 |
+ node := osgraph.EnsureUnique(g, |
|
| 161 |
+ nodeName, |
|
| 162 |
+ func(node osgraph.Node) graph.Node {
|
|
| 163 |
+ return &PetSetNode{node, petset}
|
|
| 164 |
+ }, |
|
| 165 |
+ ).(*PetSetNode) |
|
| 166 |
+ |
|
| 167 |
+ specNode := EnsurePetSetSpecNode(g, &petset.Spec, petset.Namespace, nodeName) |
|
| 168 |
+ g.AddEdge(node, specNode, osgraph.ContainsEdgeKind) |
|
| 169 |
+ |
|
| 170 |
+ return node |
|
| 171 |
+} |
|
| 172 |
+ |
|
| 173 |
+func EnsurePetSetSpecNode(g osgraph.MutableUniqueGraph, spec *kapps.PetSetSpec, namespace string, ownerName osgraph.UniqueName) *PetSetSpecNode {
|
|
| 174 |
+ specName := PetSetSpecNodeName(spec, ownerName) |
|
| 175 |
+ specNode := osgraph.EnsureUnique(g, |
|
| 176 |
+ specName, |
|
| 177 |
+ func(node osgraph.Node) graph.Node {
|
|
| 178 |
+ return &PetSetSpecNode{node, spec, namespace, ownerName}
|
|
| 179 |
+ }, |
|
| 180 |
+ ).(*PetSetSpecNode) |
|
| 181 |
+ |
|
| 182 |
+ ptSpecNode := EnsurePodTemplateSpecNode(g, &spec.Template, namespace, specName) |
|
| 183 |
+ g.AddEdge(specNode, ptSpecNode, osgraph.ContainsEdgeKind) |
|
| 184 |
+ |
|
| 185 |
+ return specNode |
|
| 186 |
+} |
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"reflect" |
| 6 | 6 |
|
| 7 | 7 |
kapi "k8s.io/kubernetes/pkg/api" |
| 8 |
+ kapps "k8s.io/kubernetes/pkg/apis/apps" |
|
| 8 | 9 |
"k8s.io/kubernetes/pkg/apis/autoscaling" |
| 9 | 10 |
|
| 10 | 11 |
osgraph "github.com/openshift/origin/pkg/api/graph" |
| ... | ... |
@@ -20,6 +21,8 @@ var ( |
| 20 | 20 |
ServiceAccountNodeKind = reflect.TypeOf(kapi.ServiceAccount{}).Name()
|
| 21 | 21 |
SecretNodeKind = reflect.TypeOf(kapi.Secret{}).Name()
|
| 22 | 22 |
HorizontalPodAutoscalerNodeKind = reflect.TypeOf(autoscaling.HorizontalPodAutoscaler{}).Name()
|
| 23 |
+ PetSetNodeKind = reflect.TypeOf(kapps.PetSet{}).Name()
|
|
| 24 |
+ PetSetSpecNodeKind = reflect.TypeOf(kapps.PetSetSpec{}).Name()
|
|
| 23 | 25 |
) |
| 24 | 26 |
|
| 25 | 27 |
func ServiceNodeName(o *kapi.Service) osgraph.UniqueName {
|
| ... | ... |
@@ -108,7 +111,7 @@ func ReplicationControllerNodeName(o *kapi.ReplicationController) osgraph.Unique |
| 108 | 108 |
|
| 109 | 109 |
type ReplicationControllerNode struct {
|
| 110 | 110 |
osgraph.Node |
| 111 |
- *kapi.ReplicationController |
|
| 111 |
+ ReplicationController *kapi.ReplicationController |
|
| 112 | 112 |
|
| 113 | 113 |
IsFound bool |
| 114 | 114 |
} |
| ... | ... |
@@ -139,8 +142,8 @@ func ReplicationControllerSpecNodeName(o *kapi.ReplicationControllerSpec, ownerN |
| 139 | 139 |
|
| 140 | 140 |
type ReplicationControllerSpecNode struct {
|
| 141 | 141 |
osgraph.Node |
| 142 |
- *kapi.ReplicationControllerSpec |
|
| 143 |
- Namespace string |
|
| 142 |
+ ReplicationControllerSpec *kapi.ReplicationControllerSpec |
|
| 143 |
+ Namespace string |
|
| 144 | 144 |
|
| 145 | 145 |
OwnerName osgraph.UniqueName |
| 146 | 146 |
} |
| ... | ... |
@@ -267,3 +270,56 @@ func (*HorizontalPodAutoscalerNode) Kind() string {
|
| 267 | 267 |
func (n HorizontalPodAutoscalerNode) UniqueName() osgraph.UniqueName {
|
| 268 | 268 |
return HorizontalPodAutoscalerNodeName(n.HorizontalPodAutoscaler) |
| 269 | 269 |
} |
| 270 |
+ |
|
| 271 |
+func PetSetNodeName(o *kapps.PetSet) osgraph.UniqueName {
|
|
| 272 |
+ return osgraph.GetUniqueRuntimeObjectNodeName(PetSetNodeKind, o) |
|
| 273 |
+} |
|
| 274 |
+ |
|
| 275 |
+type PetSetNode struct {
|
|
| 276 |
+ osgraph.Node |
|
| 277 |
+ PetSet *kapps.PetSet |
|
| 278 |
+} |
|
| 279 |
+ |
|
| 280 |
+func (n PetSetNode) Object() interface{} {
|
|
| 281 |
+ return n.PetSet |
|
| 282 |
+} |
|
| 283 |
+ |
|
| 284 |
+func (n PetSetNode) String() string {
|
|
| 285 |
+ return string(n.UniqueName()) |
|
| 286 |
+} |
|
| 287 |
+ |
|
| 288 |
+func (n PetSetNode) UniqueName() osgraph.UniqueName {
|
|
| 289 |
+ return PetSetNodeName(n.PetSet) |
|
| 290 |
+} |
|
| 291 |
+ |
|
| 292 |
+func (*PetSetNode) Kind() string {
|
|
| 293 |
+ return PetSetNodeKind |
|
| 294 |
+} |
|
| 295 |
+ |
|
| 296 |
+func PetSetSpecNodeName(o *kapps.PetSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName {
|
|
| 297 |
+ return osgraph.UniqueName(fmt.Sprintf("%s|%v", PetSetSpecNodeKind, ownerName))
|
|
| 298 |
+} |
|
| 299 |
+ |
|
| 300 |
+type PetSetSpecNode struct {
|
|
| 301 |
+ osgraph.Node |
|
| 302 |
+ PetSetSpec *kapps.PetSetSpec |
|
| 303 |
+ Namespace string |
|
| 304 |
+ |
|
| 305 |
+ OwnerName osgraph.UniqueName |
|
| 306 |
+} |
|
| 307 |
+ |
|
| 308 |
+func (n PetSetSpecNode) Object() interface{} {
|
|
| 309 |
+ return n.PetSetSpec |
|
| 310 |
+} |
|
| 311 |
+ |
|
| 312 |
+func (n PetSetSpecNode) String() string {
|
|
| 313 |
+ return string(n.UniqueName()) |
|
| 314 |
+} |
|
| 315 |
+ |
|
| 316 |
+func (n PetSetSpecNode) UniqueName() osgraph.UniqueName {
|
|
| 317 |
+ return PetSetSpecNodeName(n.PetSetSpec, n.OwnerName) |
|
| 318 |
+} |
|
| 319 |
+ |
|
| 320 |
+func (*PetSetSpecNode) Kind() string {
|
|
| 321 |
+ return PetSetSpecNodeKind |
|
| 322 |
+} |
| ... | ... |
@@ -11,6 +11,7 @@ import ( |
| 11 | 11 |
kapi "k8s.io/kubernetes/pkg/api" |
| 12 | 12 |
kapierrors "k8s.io/kubernetes/pkg/api/errors" |
| 13 | 13 |
"k8s.io/kubernetes/pkg/api/unversioned" |
| 14 |
+ kapps "k8s.io/kubernetes/pkg/apis/apps" |
|
| 14 | 15 |
"k8s.io/kubernetes/pkg/apis/autoscaling" |
| 15 | 16 |
kclient "k8s.io/kubernetes/pkg/client/unversioned" |
| 16 | 17 |
utilerrors "k8s.io/kubernetes/pkg/util/errors" |
| ... | ... |
@@ -69,6 +70,7 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set |
| 69 | 69 |
&secretLoader{namespace: namespace, lister: d.K},
|
| 70 | 70 |
&rcLoader{namespace: namespace, lister: d.K},
|
| 71 | 71 |
&podLoader{namespace: namespace, lister: d.K},
|
| 72 |
+ &petsetLoader{namespace: namespace, lister: d.K.Apps()},
|
|
| 72 | 73 |
&horizontalPodAutoscalerLoader{namespace: namespace, lister: d.K.Autoscaling()},
|
| 73 | 74 |
// TODO check swagger for feature enablement and selectively add bcLoader and buildLoader |
| 74 | 75 |
// then remove errors.TolerateNotFoundError method. |
| ... | ... |
@@ -108,7 +110,7 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set |
| 108 | 108 |
|
| 109 | 109 |
kubeedges.AddAllExposedPodTemplateSpecEdges(g) |
| 110 | 110 |
kubeedges.AddAllExposedPodEdges(g) |
| 111 |
- kubeedges.AddAllManagedByRCPodEdges(g) |
|
| 111 |
+ kubeedges.AddAllManagedByControllerPodEdges(g) |
|
| 112 | 112 |
kubeedges.AddAllRequestedServiceAccountEdges(g) |
| 113 | 113 |
kubeedges.AddAllMountableSecretEdges(g) |
| 114 | 114 |
kubeedges.AddAllMountedSecretEdges(g) |
| ... | ... |
@@ -188,6 +190,10 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error |
| 188 | 188 |
printLines(out, indent, 1, describeDeploymentInServiceGroup(local, dcPipeline, graphview.MaxRecentContainerRestartsForRC(g, dcPipeline.ActiveDeployment))...) |
| 189 | 189 |
} |
| 190 | 190 |
|
| 191 |
+ for _, node := range service.FulfillingPetSets {
|
|
| 192 |
+ printLines(out, indent, 1, describePetSetInServiceGroup(local, node)...) |
|
| 193 |
+ } |
|
| 194 |
+ |
|
| 191 | 195 |
rcNode: |
| 192 | 196 |
for _, rcNode := range service.FulfillingRCs {
|
| 193 | 197 |
for _, coveredDC := range service.FulfillingDCs {
|
| ... | ... |
@@ -199,14 +205,20 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error |
| 199 | 199 |
} |
| 200 | 200 |
|
| 201 | 201 |
pod: |
| 202 |
- for _, podNode := range service.FulfillingPods {
|
|
| 202 |
+ for _, node := range service.FulfillingPods {
|
|
| 203 | 203 |
// skip pods that have been displayed in a roll-up of RCs and DCs (by implicit usage of RCs) |
| 204 | 204 |
for _, coveredRC := range service.FulfillingRCs {
|
| 205 |
- if g.Edge(podNode, coveredRC) != nil {
|
|
| 205 |
+ if g.Edge(node, coveredRC) != nil {
|
|
| 206 |
+ continue pod |
|
| 207 |
+ } |
|
| 208 |
+ } |
|
| 209 |
+ // TODO: collapse into FulfillingControllers |
|
| 210 |
+ for _, covered := range service.FulfillingPetSets {
|
|
| 211 |
+ if g.Edge(node, covered) != nil {
|
|
| 206 | 212 |
continue pod |
| 207 | 213 |
} |
| 208 | 214 |
} |
| 209 |
- printLines(out, indent, 1, describePodInServiceGroup(local, podNode)...) |
|
| 215 |
+ printLines(out, indent, 1, describePodInServiceGroup(local, node)...) |
|
| 210 | 216 |
} |
| 211 | 217 |
} |
| 212 | 218 |
|
| ... | ... |
@@ -423,9 +435,11 @@ func (f namespacedFormatter) ResourceName(obj interface{}) string {
|
| 423 | 423 |
case *kubegraph.ServiceAccountNode: |
| 424 | 424 |
return namespaceNameWithType("sa", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace)
|
| 425 | 425 |
case *kubegraph.ReplicationControllerNode: |
| 426 |
- return namespaceNameWithType("rc", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace)
|
|
| 426 |
+ return namespaceNameWithType("rc", t.ReplicationController.Name, t.ReplicationController.Namespace, f.currentNamespace, f.hideNamespace)
|
|
| 427 | 427 |
case *kubegraph.HorizontalPodAutoscalerNode: |
| 428 | 428 |
return namespaceNameWithType("hpa", t.HorizontalPodAutoscaler.Name, t.HorizontalPodAutoscaler.Namespace, f.currentNamespace, f.hideNamespace)
|
| 429 |
+ case *kubegraph.PetSetNode: |
|
| 430 |
+ return namespaceNameWithType("petset", t.PetSet.Name, t.PetSet.Namespace, f.currentNamespace, f.hideNamespace)
|
|
| 429 | 431 |
|
| 430 | 432 |
case *imagegraph.ImageStreamNode: |
| 431 | 433 |
return namespaceNameWithType("is", t.ImageStream.Name, t.ImageStream.Namespace, f.currentNamespace, f.hideNamespace)
|
| ... | ... |
@@ -467,15 +481,15 @@ func describeAllProjectsOnServer(f formatter, server string) string {
|
| 467 | 467 |
} |
| 468 | 468 |
|
| 469 | 469 |
func describeDeploymentInServiceGroup(f formatter, deploy graphview.DeploymentConfigPipeline, restartCount int32) []string {
|
| 470 |
- local := namespacedFormatter{currentNamespace: deploy.Deployment.Namespace}
|
|
| 470 |
+ local := namespacedFormatter{currentNamespace: deploy.Deployment.DeploymentConfig.Namespace}
|
|
| 471 | 471 |
|
| 472 | 472 |
includeLastPass := deploy.ActiveDeployment == nil |
| 473 | 473 |
if len(deploy.Images) == 1 {
|
| 474 | 474 |
format := "%s deploys %s %s" |
| 475 |
- if deploy.Deployment.Spec.Test {
|
|
| 475 |
+ if deploy.Deployment.DeploymentConfig.Spec.Test {
|
|
| 476 | 476 |
format = "%s test deploys %s %s" |
| 477 | 477 |
} |
| 478 |
- lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.Namespace), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))}
|
|
| 478 |
+ lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.DeploymentConfig.Namespace), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))}
|
|
| 479 | 479 |
if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") {
|
| 480 | 480 |
segments := strings.SplitN(lines[0], " <- ", 2) |
| 481 | 481 |
lines[0] = segments[0] + " <-" |
| ... | ... |
@@ -487,18 +501,27 @@ func describeDeploymentInServiceGroup(f formatter, deploy graphview.DeploymentCo |
| 487 | 487 |
} |
| 488 | 488 |
|
| 489 | 489 |
format := "%s deploys %s" |
| 490 |
- if deploy.Deployment.Spec.Test {
|
|
| 490 |
+ if deploy.Deployment.DeploymentConfig.Spec.Test {
|
|
| 491 | 491 |
format = "%s test deploys %s" |
| 492 | 492 |
} |
| 493 | 493 |
lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))}
|
| 494 | 494 |
for _, image := range deploy.Images {
|
| 495 |
- lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.Namespace)) |
|
| 495 |
+ lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.DeploymentConfig.Namespace)) |
|
| 496 | 496 |
lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...)
|
| 497 | 497 |
lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartCount, maxDisplayDeployments)...) |
| 498 | 498 |
} |
| 499 | 499 |
return lines |
| 500 | 500 |
} |
| 501 | 501 |
|
| 502 |
+func describePetSetInServiceGroup(f formatter, node *kubegraph.PetSetNode) []string {
|
|
| 503 |
+ images := []string{}
|
|
| 504 |
+ for _, container := range node.PetSet.Spec.Template.Spec.Containers {
|
|
| 505 |
+ images = append(images, container.Image) |
|
| 506 |
+ } |
|
| 507 |
+ |
|
| 508 |
+ return []string{fmt.Sprintf("%s manages %s, %s", f.ResourceName(node), strings.Join(images, ", "), describePetSetStatus(node.PetSet))}
|
|
| 509 |
+} |
|
| 510 |
+ |
|
| 502 | 511 |
func describeRCInServiceGroup(f formatter, rcNode *kubegraph.ReplicationControllerNode) []string {
|
| 503 | 512 |
if rcNode.ReplicationController.Spec.Template == nil {
|
| 504 | 513 |
return []string{}
|
| ... | ... |
@@ -916,7 +939,7 @@ func describeDeployments(f formatter, dcNode *deploygraph.DeploymentConfigNode, |
| 916 | 916 |
|
| 917 | 917 |
switch {
|
| 918 | 918 |
case count == -1: |
| 919 |
- if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete {
|
|
| 919 |
+ if deployutil.DeploymentStatusFor(deployment.ReplicationController) == deployapi.DeploymentStatusComplete {
|
|
| 920 | 920 |
return out |
| 921 | 921 |
} |
| 922 | 922 |
default: |
| ... | ... |
@@ -944,48 +967,51 @@ func describeDeploymentStatus(deploy *kapi.ReplicationController, first, test bo |
| 944 | 944 |
reason = fmt.Sprintf(": %s", reason)
|
| 945 | 945 |
} |
| 946 | 946 |
// TODO: encode fail time in the rc |
| 947 |
- return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(deploy, false, restartCount))
|
|
| 947 |
+ return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount))
|
|
| 948 | 948 |
case deployapi.DeploymentStatusComplete: |
| 949 | 949 |
// TODO: pod status output |
| 950 | 950 |
if test {
|
| 951 | 951 |
return fmt.Sprintf("test deployment #%d deployed %s ago", version, timeAt)
|
| 952 | 952 |
} |
| 953 |
- return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(deploy, first, restartCount))
|
|
| 953 |
+ return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, first, restartCount))
|
|
| 954 | 954 |
case deployapi.DeploymentStatusRunning: |
| 955 | 955 |
format := "deployment #%d running%s for %s%s" |
| 956 | 956 |
if test {
|
| 957 | 957 |
format = "test deployment #%d running%s for %s%s" |
| 958 | 958 |
} |
| 959 |
- return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(deploy, false, restartCount)) |
|
| 959 |
+ return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount)) |
|
| 960 | 960 |
default: |
| 961 |
- return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(deploy, false, restartCount))
|
|
| 961 |
+ return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount))
|
|
| 962 | 962 |
} |
| 963 | 963 |
} |
| 964 | 964 |
|
| 965 |
+func describePetSetStatus(p *kapps.PetSet) string {
|
|
| 966 |
+ timeAt := strings.ToLower(formatRelativeTime(p.CreationTimestamp.Time)) |
|
| 967 |
+ return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(p.Status.Replicas), int32(p.Spec.Replicas), false, 0))
|
|
| 968 |
+} |
|
| 969 |
+ |
|
| 965 | 970 |
func describeRCStatus(rc *kapi.ReplicationController) string {
|
| 966 | 971 |
timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time)) |
| 967 |
- return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc, false, 0))
|
|
| 972 |
+ return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.Replicas, rc.Spec.Replicas, false, 0))
|
|
| 968 | 973 |
} |
| 969 | 974 |
|
| 970 |
-func describePodSummaryInline(rc *kapi.ReplicationController, includeEmpty bool, restartCount int32) string {
|
|
| 971 |
- s := describePodSummary(rc, includeEmpty, restartCount) |
|
| 975 |
+func describePodSummaryInline(actual, requested int32, includeEmpty bool, restartCount int32) string {
|
|
| 976 |
+ s := describePodSummary(actual, requested, includeEmpty, restartCount) |
|
| 972 | 977 |
if len(s) == 0 {
|
| 973 | 978 |
return s |
| 974 | 979 |
} |
| 975 | 980 |
change := "" |
| 976 |
- desired := rc.Spec.Replicas |
|
| 977 | 981 |
switch {
|
| 978 |
- case desired < rc.Status.Replicas: |
|
| 979 |
- change = fmt.Sprintf(" reducing to %d", desired)
|
|
| 980 |
- case desired > rc.Status.Replicas: |
|
| 981 |
- change = fmt.Sprintf(" growing to %d", desired)
|
|
| 982 |
+ case requested < actual: |
|
| 983 |
+ change = fmt.Sprintf(" reducing to %d", requested)
|
|
| 984 |
+ case requested > actual: |
|
| 985 |
+ change = fmt.Sprintf(" growing to %d", requested)
|
|
| 982 | 986 |
} |
| 983 | 987 |
return fmt.Sprintf(" - %s%s", s, change)
|
| 984 | 988 |
} |
| 985 | 989 |
|
| 986 |
-func describePodSummary(rc *kapi.ReplicationController, includeEmpty bool, restartCount int32) string {
|
|
| 987 |
- actual, requested := rc.Status.Replicas, rc.Spec.Replicas |
|
| 988 |
- restartWarn := "" |
|
| 990 |
+func describePodSummary(actual, requested int32, includeEmpty bool, restartCount int32) string {
|
|
| 991 |
+ var restartWarn string |
|
| 989 | 992 |
if restartCount > 0 {
|
| 990 | 993 |
restartWarn = fmt.Sprintf(" (warning: %d restarts)", restartCount)
|
| 991 | 994 |
} |
| ... | ... |
@@ -1192,6 +1218,30 @@ func (l *podLoader) AddToGraph(g osgraph.Graph) error {
|
| 1192 | 1192 |
return nil |
| 1193 | 1193 |
} |
| 1194 | 1194 |
|
| 1195 |
+type petsetLoader struct {
|
|
| 1196 |
+ namespace string |
|
| 1197 |
+ lister kclient.PetSetNamespacer |
|
| 1198 |
+ items []kapps.PetSet |
|
| 1199 |
+} |
|
| 1200 |
+ |
|
| 1201 |
+func (l *petsetLoader) Load() error {
|
|
| 1202 |
+ list, err := l.lister.PetSets(l.namespace).List(kapi.ListOptions{})
|
|
| 1203 |
+ if err != nil {
|
|
| 1204 |
+ return err |
|
| 1205 |
+ } |
|
| 1206 |
+ |
|
| 1207 |
+ l.items = list.Items |
|
| 1208 |
+ return nil |
|
| 1209 |
+} |
|
| 1210 |
+ |
|
| 1211 |
+func (l *petsetLoader) AddToGraph(g osgraph.Graph) error {
|
|
| 1212 |
+ for i := range l.items {
|
|
| 1213 |
+ kubegraph.EnsurePetSetNode(g, &l.items[i]) |
|
| 1214 |
+ } |
|
| 1215 |
+ |
|
| 1216 |
+ return nil |
|
| 1217 |
+} |
|
| 1218 |
+ |
|
| 1195 | 1219 |
type horizontalPodAutoscalerLoader struct {
|
| 1196 | 1220 |
namespace string |
| 1197 | 1221 |
lister kclient.HorizontalPodAutoscalersNamespacer |
| ... | ... |
@@ -270,6 +270,22 @@ func TestProjectStatus(t *testing.T) {
|
| 270 | 270 |
}, |
| 271 | 271 |
Time: mustParseTime("2015-04-07T04:12:25Z"),
|
| 272 | 272 |
}, |
| 273 |
+ "with pet sets": {
|
|
| 274 |
+ Path: "../../../../test/testdata/app-scenarios/petset.yaml", |
|
| 275 |
+ Extra: []runtime.Object{
|
|
| 276 |
+ &projectapi.Project{
|
|
| 277 |
+ ObjectMeta: kapi.ObjectMeta{Name: "example", Namespace: ""},
|
|
| 278 |
+ }, |
|
| 279 |
+ }, |
|
| 280 |
+ ErrFn: func(err error) bool { return err == nil },
|
|
| 281 |
+ Contains: []string{
|
|
| 282 |
+ "In project example on server https://example.com:8443\n", |
|
| 283 |
+ "svc/galera[default] (headless):3306", |
|
| 284 |
+ "petset/mysql manages erkules/galera:basic, created less than a second ago - 3 pods", |
|
| 285 |
+ "* pod/mysql-1[default] has restarted 7 times", |
|
| 286 |
+ }, |
|
| 287 |
+ Time: mustParseTime("2015-04-07T04:12:25Z"),
|
|
| 288 |
+ }, |
|
| 273 | 289 |
"restarting pod": {
|
| 274 | 290 |
Path: "../../../api/graph/test/restarting-pod.yaml", |
| 275 | 291 |
Extra: []runtime.Object{
|
| ... | ... |
@@ -103,7 +103,7 @@ func FindDeploymentConfigReadinessWarnings(g osgraph.Graph, f osgraph.Namer, set |
| 103 | 103 |
Node: |
| 104 | 104 |
for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) {
|
| 105 | 105 |
dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) |
| 106 |
- if t := dcNode.Spec.Template; t != nil && len(t.Spec.Containers) > 0 {
|
|
| 106 |
+ if t := dcNode.DeploymentConfig.Spec.Template; t != nil && len(t.Spec.Containers) > 0 {
|
|
| 107 | 107 |
for _, container := range t.Spec.Containers {
|
| 108 | 108 |
if container.ReadinessProbe != nil {
|
| 109 | 109 |
continue Node |
| ... | ... |
@@ -64,7 +64,7 @@ func AddAllTriggerEdges(g osgraph.MutableUniqueGraph) {
|
| 64 | 64 |
func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *deploygraph.DeploymentConfigNode) *deploygraph.DeploymentConfigNode {
|
| 65 | 65 |
for _, n := range g.(graph.Graph).Nodes() {
|
| 66 | 66 |
if rcNode, ok := n.(*kubegraph.ReplicationControllerNode); ok {
|
| 67 |
- if rcNode.Namespace != node.Namespace {
|
|
| 67 |
+ if rcNode.ReplicationController.Namespace != node.DeploymentConfig.Namespace {
|
|
| 68 | 68 |
continue |
| 69 | 69 |
} |
| 70 | 70 |
if BelongsToDeploymentConfig(node.DeploymentConfig, rcNode.ReplicationController) {
|
| ... | ... |
@@ -26,7 +26,7 @@ func RelevantDeployments(g osgraph.Graph, dcNode *deploygraph.DeploymentConfigNo |
| 26 | 26 |
|
| 27 | 27 |
sort.Sort(RecentDeploymentReferences(allDeployments)) |
| 28 | 28 |
|
| 29 |
- if dcNode.DeploymentConfig.Status.LatestVersion == deployutil.DeploymentVersionFor(allDeployments[0]) {
|
|
| 29 |
+ if dcNode.DeploymentConfig.Status.LatestVersion == deployutil.DeploymentVersionFor(allDeployments[0].ReplicationController) {
|
|
| 30 | 30 |
return allDeployments[0], allDeployments[1:] |
| 31 | 31 |
} |
| 32 | 32 |
|
| 24 | 24 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,496 @@ |
| 0 |
+apiVersion: v1 |
|
| 1 |
+items: |
|
| 2 |
+- apiVersion: apps/v1alpha1 |
|
| 3 |
+ kind: PetSet |
|
| 4 |
+ metadata: |
|
| 5 |
+ creationTimestamp: 2016-07-21T15:53:09Z |
|
| 6 |
+ generation: 3 |
|
| 7 |
+ labels: |
|
| 8 |
+ app: mysql |
|
| 9 |
+ name: mysql |
|
| 10 |
+ namespace: default |
|
| 11 |
+ resourceVersion: "6790" |
|
| 12 |
+ selfLink: /apis/apps/v1alpha1/namespaces/default/petsets/mysql |
|
| 13 |
+ uid: 3900c985-4f5b-11e6-b8a1-080027242396 |
|
| 14 |
+ spec: |
|
| 15 |
+ replicas: 3 |
|
| 16 |
+ selector: |
|
| 17 |
+ matchLabels: |
|
| 18 |
+ app: mysql |
|
| 19 |
+ serviceName: galera |
|
| 20 |
+ template: |
|
| 21 |
+ metadata: |
|
| 22 |
+ annotations: |
|
| 23 |
+ pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]'
|
|
| 24 |
+ pod.alpha.kubernetes.io/initialized: "true" |
|
| 25 |
+ creationTimestamp: null |
|
| 26 |
+ labels: |
|
| 27 |
+ app: mysql |
|
| 28 |
+ spec: |
|
| 29 |
+ containers: |
|
| 30 |
+ - args: |
|
| 31 |
+ - --defaults-file=/etc/mysql/my-galera.cnf |
|
| 32 |
+ - --user=root |
|
| 33 |
+ image: erkules/galera:basic |
|
| 34 |
+ imagePullPolicy: IfNotPresent |
|
| 35 |
+ name: mysql |
|
| 36 |
+ ports: |
|
| 37 |
+ - containerPort: 3306 |
|
| 38 |
+ name: mysql |
|
| 39 |
+ protocol: TCP |
|
| 40 |
+ - containerPort: 4444 |
|
| 41 |
+ name: sst |
|
| 42 |
+ protocol: TCP |
|
| 43 |
+ - containerPort: 4567 |
|
| 44 |
+ name: replication |
|
| 45 |
+ protocol: TCP |
|
| 46 |
+ - containerPort: 4568 |
|
| 47 |
+ name: ist |
|
| 48 |
+ protocol: TCP |
|
| 49 |
+ readinessProbe: |
|
| 50 |
+ exec: |
|
| 51 |
+ command: |
|
| 52 |
+ - sh |
|
| 53 |
+ - -c |
|
| 54 |
+ - mysql -u root -e 'show databases;' |
|
| 55 |
+ failureThreshold: 3 |
|
| 56 |
+ initialDelaySeconds: 15 |
|
| 57 |
+ periodSeconds: 10 |
|
| 58 |
+ successThreshold: 1 |
|
| 59 |
+ timeoutSeconds: 5 |
|
| 60 |
+ resources: {}
|
|
| 61 |
+ terminationMessagePath: /dev/termination-log |
|
| 62 |
+ volumeMounts: |
|
| 63 |
+ - mountPath: /var/lib/ |
|
| 64 |
+ name: datadir |
|
| 65 |
+ - mountPath: /etc/mysql |
|
| 66 |
+ name: config |
|
| 67 |
+ dnsPolicy: ClusterFirst |
|
| 68 |
+ restartPolicy: Always |
|
| 69 |
+ securityContext: {}
|
|
| 70 |
+ terminationGracePeriodSeconds: 30 |
|
| 71 |
+ volumes: |
|
| 72 |
+ - emptyDir: {}
|
|
| 73 |
+ name: config |
|
| 74 |
+ - emptyDir: {}
|
|
| 75 |
+ name: workdir |
|
| 76 |
+ volumeClaimTemplates: |
|
| 77 |
+ - metadata: |
|
| 78 |
+ annotations: |
|
| 79 |
+ volume.alpha.kubernetes.io/storage-class: anything |
|
| 80 |
+ creationTimestamp: null |
|
| 81 |
+ name: datadir |
|
| 82 |
+ spec: |
|
| 83 |
+ accessModes: |
|
| 84 |
+ - ReadWriteOnce |
|
| 85 |
+ resources: |
|
| 86 |
+ requests: |
|
| 87 |
+ storage: 10Gi |
|
| 88 |
+ status: |
|
| 89 |
+ phase: Pending |
|
| 90 |
+ status: |
|
| 91 |
+ replicas: 3 |
|
| 92 |
+- apiVersion: v1 |
|
| 93 |
+ kind: Service |
|
| 94 |
+ metadata: |
|
| 95 |
+ annotations: |
|
| 96 |
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" |
|
| 97 |
+ creationTimestamp: 2016-07-21T15:53:09Z |
|
| 98 |
+ labels: |
|
| 99 |
+ app: mysql |
|
| 100 |
+ name: galera |
|
| 101 |
+ namespace: default |
|
| 102 |
+ resourceVersion: "343" |
|
| 103 |
+ selfLink: /api/v1/namespaces/default/services/galera |
|
| 104 |
+ uid: 38fb3915-4f5b-11e6-b8a1-080027242396 |
|
| 105 |
+ spec: |
|
| 106 |
+ clusterIP: None |
|
| 107 |
+ portalIP: None |
|
| 108 |
+ ports: |
|
| 109 |
+ - name: mysql |
|
| 110 |
+ port: 3306 |
|
| 111 |
+ protocol: TCP |
|
| 112 |
+ targetPort: 3306 |
|
| 113 |
+ selector: |
|
| 114 |
+ app: mysql |
|
| 115 |
+ sessionAffinity: None |
|
| 116 |
+ type: ClusterIP |
|
| 117 |
+ status: |
|
| 118 |
+ loadBalancer: {}
|
|
| 119 |
+- apiVersion: v1 |
|
| 120 |
+ kind: Pod |
|
| 121 |
+ metadata: |
|
| 122 |
+ annotations: |
|
| 123 |
+ kubernetes.io/created-by: | |
|
| 124 |
+ {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6784"}}
|
|
| 125 |
+ openshift.io/scc: anyuid |
|
| 126 |
+ pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:12Z","finishedAt":"2016-07-27T02:41:12Z","containerID":"docker://5c727d8732899605fcfe3eecbeeb02576f18f5b989496073340427a8d2134622"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://5c727d8732899605fcfe3eecbeeb02576f18f5b989496073340427a8d2134622"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:14Z","finishedAt":"2016-07-27T02:41:15Z","containerID":"docker://ab4ca0b3b6ec4860cd55c615534e1e2b11f4c3a33746783aab145919feb2446e"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://ab4ca0b3b6ec4860cd55c615534e1e2b11f4c3a33746783aab145919feb2446e"}]'
|
|
| 127 |
+ pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]'
|
|
| 128 |
+ pod.alpha.kubernetes.io/initialized: "true" |
|
| 129 |
+ pod.beta.kubernetes.io/hostname: mysql-0 |
|
| 130 |
+ pod.beta.kubernetes.io/subdomain: galera |
|
| 131 |
+ creationTimestamp: 2016-07-27T02:41:09Z |
|
| 132 |
+ generateName: mysql- |
|
| 133 |
+ labels: |
|
| 134 |
+ app: mysql |
|
| 135 |
+ name: mysql-0 |
|
| 136 |
+ namespace: default |
|
| 137 |
+ resourceVersion: "7191" |
|
| 138 |
+ selfLink: /api/v1/namespaces/default/pods/mysql-0 |
|
| 139 |
+ uid: 92e49e79-53a3-11e6-b45a-080027242396 |
|
| 140 |
+ spec: |
|
| 141 |
+ containers: |
|
| 142 |
+ - args: |
|
| 143 |
+ - --defaults-file=/etc/mysql/my-galera.cnf |
|
| 144 |
+ - --user=root |
|
| 145 |
+ image: erkules/galera:basic |
|
| 146 |
+ imagePullPolicy: IfNotPresent |
|
| 147 |
+ name: mysql |
|
| 148 |
+ ports: |
|
| 149 |
+ - containerPort: 3306 |
|
| 150 |
+ name: mysql |
|
| 151 |
+ protocol: TCP |
|
| 152 |
+ - containerPort: 4444 |
|
| 153 |
+ name: sst |
|
| 154 |
+ protocol: TCP |
|
| 155 |
+ - containerPort: 4567 |
|
| 156 |
+ name: replication |
|
| 157 |
+ protocol: TCP |
|
| 158 |
+ - containerPort: 4568 |
|
| 159 |
+ name: ist |
|
| 160 |
+ protocol: TCP |
|
| 161 |
+ readinessProbe: |
|
| 162 |
+ exec: |
|
| 163 |
+ command: |
|
| 164 |
+ - sh |
|
| 165 |
+ - -c |
|
| 166 |
+ - mysql -u root -e 'show databases;' |
|
| 167 |
+ failureThreshold: 3 |
|
| 168 |
+ initialDelaySeconds: 15 |
|
| 169 |
+ periodSeconds: 10 |
|
| 170 |
+ successThreshold: 1 |
|
| 171 |
+ timeoutSeconds: 5 |
|
| 172 |
+ resources: {}
|
|
| 173 |
+ securityContext: |
|
| 174 |
+ capabilities: |
|
| 175 |
+ drop: |
|
| 176 |
+ - MKNOD |
|
| 177 |
+ - SYS_CHROOT |
|
| 178 |
+ privileged: false |
|
| 179 |
+ seLinuxOptions: |
|
| 180 |
+ level: s0:c5,c0 |
|
| 181 |
+ terminationMessagePath: /dev/termination-log |
|
| 182 |
+ volumeMounts: |
|
| 183 |
+ - mountPath: /var/lib/ |
|
| 184 |
+ name: datadir |
|
| 185 |
+ - mountPath: /etc/mysql |
|
| 186 |
+ name: config |
|
| 187 |
+ - mountPath: /var/run/secrets/kubernetes.io/serviceaccount |
|
| 188 |
+ name: default-token-au2xq |
|
| 189 |
+ readOnly: true |
|
| 190 |
+ dnsPolicy: ClusterFirst |
|
| 191 |
+ host: localhost.localdomain |
|
| 192 |
+ imagePullSecrets: |
|
| 193 |
+ - name: default-dockercfg-pzhsj |
|
| 194 |
+ nodeName: localhost.localdomain |
|
| 195 |
+ restartPolicy: Always |
|
| 196 |
+ securityContext: |
|
| 197 |
+ seLinuxOptions: |
|
| 198 |
+ level: s0:c5,c0 |
|
| 199 |
+ serviceAccount: default |
|
| 200 |
+ serviceAccountName: default |
|
| 201 |
+ terminationGracePeriodSeconds: 30 |
|
| 202 |
+ volumes: |
|
| 203 |
+ - name: datadir |
|
| 204 |
+ persistentVolumeClaim: |
|
| 205 |
+ claimName: datadir-mysql-0 |
|
| 206 |
+ - emptyDir: {}
|
|
| 207 |
+ name: config |
|
| 208 |
+ - emptyDir: {}
|
|
| 209 |
+ name: workdir |
|
| 210 |
+ - name: default-token-au2xq |
|
| 211 |
+ secret: |
|
| 212 |
+ secretName: default-token-au2xq |
|
| 213 |
+ status: |
|
| 214 |
+ conditions: |
|
| 215 |
+ - lastProbeTime: null |
|
| 216 |
+ lastTransitionTime: 2016-07-27T02:41:15Z |
|
| 217 |
+ status: "True" |
|
| 218 |
+ type: Initialized |
|
| 219 |
+ - lastProbeTime: null |
|
| 220 |
+ lastTransitionTime: 2016-07-27T03:00:47Z |
|
| 221 |
+ status: "True" |
|
| 222 |
+ type: Ready |
|
| 223 |
+ - lastProbeTime: null |
|
| 224 |
+ lastTransitionTime: 2016-07-27T02:41:09Z |
|
| 225 |
+ status: "True" |
|
| 226 |
+ type: PodScheduled |
|
| 227 |
+ containerStatuses: |
|
| 228 |
+ - containerID: docker://f2406b0f697c525df44b64aec6b1f6024ab88d9df80256426247dc6e9a92cb30 |
|
| 229 |
+ image: erkules/galera:basic |
|
| 230 |
+ imageID: docker://sha256:b4780e247a38c12612f539ce1ac8e0988e1781d56fddf719c80fb8d4d7b8bbde |
|
| 231 |
+ lastState: {}
|
|
| 232 |
+ name: mysql |
|
| 233 |
+ ready: true |
|
| 234 |
+ restartCount: 0 |
|
| 235 |
+ state: |
|
| 236 |
+ running: |
|
| 237 |
+ startedAt: 2016-07-27T02:41:16Z |
|
| 238 |
+ hostIP: 10.0.2.15 |
|
| 239 |
+ phase: Running |
|
| 240 |
+ podIP: 172.17.0.2 |
|
| 241 |
+ startTime: 2016-07-27T02:41:09Z |
|
| 242 |
+- apiVersion: v1 |
|
| 243 |
+ kind: Pod |
|
| 244 |
+ metadata: |
|
| 245 |
+ annotations: |
|
| 246 |
+ kubernetes.io/created-by: | |
|
| 247 |
+ {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}}
|
|
| 248 |
+ openshift.io/scc: anyuid |
|
| 249 |
+ pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:42Z","finishedAt":"2016-07-27T02:41:42Z","containerID":"docker://2538c65f65557955c02745ef4021181cf322c8dc0db62144dd1e1f8ea9f7fa54"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://2538c65f65557955c02745ef4021181cf322c8dc0db62144dd1e1f8ea9f7fa54"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:44Z","finishedAt":"2016-07-27T02:41:45Z","containerID":"docker://4df7188d37033c182e675d45179941766bd1e6a013469038f43fa3fecc2cc06d"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://4df7188d37033c182e675d45179941766bd1e6a013469038f43fa3fecc2cc06d"}]'
|
|
| 250 |
+ pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]'
|
|
| 251 |
+ pod.alpha.kubernetes.io/initialized: "true" |
|
| 252 |
+ pod.beta.kubernetes.io/hostname: mysql-1 |
|
| 253 |
+ pod.beta.kubernetes.io/subdomain: galera |
|
| 254 |
+ creationTimestamp: 2016-07-27T02:41:39Z |
|
| 255 |
+ generateName: mysql- |
|
| 256 |
+ labels: |
|
| 257 |
+ app: mysql |
|
| 258 |
+ name: mysql-1 |
|
| 259 |
+ namespace: default |
|
| 260 |
+ resourceVersion: "7195" |
|
| 261 |
+ selfLink: /api/v1/namespaces/default/pods/mysql-1 |
|
| 262 |
+ uid: a4da4725-53a3-11e6-b45a-080027242396 |
|
| 263 |
+ spec: |
|
| 264 |
+ containers: |
|
| 265 |
+ - args: |
|
| 266 |
+ - --defaults-file=/etc/mysql/my-galera.cnf |
|
| 267 |
+ - --user=root |
|
| 268 |
+ image: erkules/galera:basic |
|
| 269 |
+ imagePullPolicy: IfNotPresent |
|
| 270 |
+ name: mysql |
|
| 271 |
+ ports: |
|
| 272 |
+ - containerPort: 3306 |
|
| 273 |
+ name: mysql |
|
| 274 |
+ protocol: TCP |
|
| 275 |
+ - containerPort: 4444 |
|
| 276 |
+ name: sst |
|
| 277 |
+ protocol: TCP |
|
| 278 |
+ - containerPort: 4567 |
|
| 279 |
+ name: replication |
|
| 280 |
+ protocol: TCP |
|
| 281 |
+ - containerPort: 4568 |
|
| 282 |
+ name: ist |
|
| 283 |
+ protocol: TCP |
|
| 284 |
+ readinessProbe: |
|
| 285 |
+ exec: |
|
| 286 |
+ command: |
|
| 287 |
+ - sh |
|
| 288 |
+ - -c |
|
| 289 |
+ - mysql -u root -e 'show databases;' |
|
| 290 |
+ failureThreshold: 3 |
|
| 291 |
+ initialDelaySeconds: 15 |
|
| 292 |
+ periodSeconds: 10 |
|
| 293 |
+ successThreshold: 1 |
|
| 294 |
+ timeoutSeconds: 5 |
|
| 295 |
+ resources: {}
|
|
| 296 |
+ securityContext: |
|
| 297 |
+ capabilities: |
|
| 298 |
+ drop: |
|
| 299 |
+ - MKNOD |
|
| 300 |
+ - SYS_CHROOT |
|
| 301 |
+ privileged: false |
|
| 302 |
+ seLinuxOptions: |
|
| 303 |
+ level: s0:c5,c0 |
|
| 304 |
+ terminationMessagePath: /dev/termination-log |
|
| 305 |
+ volumeMounts: |
|
| 306 |
+ - mountPath: /var/lib/ |
|
| 307 |
+ name: datadir |
|
| 308 |
+ - mountPath: /etc/mysql |
|
| 309 |
+ name: config |
|
| 310 |
+ - mountPath: /var/run/secrets/kubernetes.io/serviceaccount |
|
| 311 |
+ name: default-token-au2xq |
|
| 312 |
+ readOnly: true |
|
| 313 |
+ dnsPolicy: ClusterFirst |
|
| 314 |
+ host: localhost.localdomain |
|
| 315 |
+ imagePullSecrets: |
|
| 316 |
+ - name: default-dockercfg-pzhsj |
|
| 317 |
+ nodeName: localhost.localdomain |
|
| 318 |
+ restartPolicy: Always |
|
| 319 |
+ securityContext: |
|
| 320 |
+ seLinuxOptions: |
|
| 321 |
+ level: s0:c5,c0 |
|
| 322 |
+ serviceAccount: default |
|
| 323 |
+ serviceAccountName: default |
|
| 324 |
+ terminationGracePeriodSeconds: 30 |
|
| 325 |
+ volumes: |
|
| 326 |
+ - name: datadir |
|
| 327 |
+ persistentVolumeClaim: |
|
| 328 |
+ claimName: datadir-mysql-1 |
|
| 329 |
+ - emptyDir: {}
|
|
| 330 |
+ name: config |
|
| 331 |
+ - emptyDir: {}
|
|
| 332 |
+ name: workdir |
|
| 333 |
+ - name: default-token-au2xq |
|
| 334 |
+ secret: |
|
| 335 |
+ secretName: default-token-au2xq |
|
| 336 |
+ status: |
|
| 337 |
+ conditions: |
|
| 338 |
+ - lastProbeTime: null |
|
| 339 |
+ lastTransitionTime: 2016-07-27T02:41:46Z |
|
| 340 |
+ status: "True" |
|
| 341 |
+ type: Initialized |
|
| 342 |
+ - lastProbeTime: null |
|
| 343 |
+ lastTransitionTime: 2016-07-27T03:00:58Z |
|
| 344 |
+ status: "True" |
|
| 345 |
+ type: Ready |
|
| 346 |
+ - lastProbeTime: null |
|
| 347 |
+ lastTransitionTime: 2016-07-27T02:41:39Z |
|
| 348 |
+ status: "True" |
|
| 349 |
+ type: PodScheduled |
|
| 350 |
+ containerStatuses: |
|
| 351 |
+ - containerID: docker://be1d5be42ab23d1db23f4552141e9068e2385ba19c3e84596e047eb6d2762d1c |
|
| 352 |
+ image: erkules/galera:basic |
|
| 353 |
+ imageID: docker://sha256:b4780e247a38c12612f539ce1ac8e0988e1781d56fddf719c80fb8d4d7b8bbde |
|
| 354 |
+ lastState: |
|
| 355 |
+ terminated: |
|
| 356 |
+ containerID: docker://9a662fa5b74a962fa362c6a5d632fe3642b12fefde36c8158ab1a50d8fa4e33e |
|
| 357 |
+ exitCode: 1 |
|
| 358 |
+ finishedAt: 2016-07-27T02:51:40Z |
|
| 359 |
+ reason: Error |
|
| 360 |
+ startedAt: 2016-07-27T02:51:05Z |
|
| 361 |
+ name: mysql |
|
| 362 |
+ ready: true |
|
| 363 |
+ restartCount: 7 |
|
| 364 |
+ state: |
|
| 365 |
+ running: |
|
| 366 |
+ startedAt: 2016-07-27T03:00:39Z |
|
| 367 |
+ hostIP: 10.0.2.15 |
|
| 368 |
+ phase: Running |
|
| 369 |
+ podIP: 172.17.0.3 |
|
| 370 |
+ startTime: 2016-07-27T02:41:39Z |
|
| 371 |
+- apiVersion: v1 |
|
| 372 |
+ kind: Pod |
|
| 373 |
+ metadata: |
|
| 374 |
+ annotations: |
|
| 375 |
+ kubernetes.io/created-by: | |
|
| 376 |
+ {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}}
|
|
| 377 |
+ openshift.io/scc: anyuid |
|
| 378 |
+ pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T03:01:01Z","finishedAt":"2016-07-27T03:01:01Z","containerID":"docker://af008b4ce59d36695fbabf40ae2f7431b51441eb2e9c6962378937c06ac69a35"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://af008b4ce59d36695fbabf40ae2f7431b51441eb2e9c6962378937c06ac69a35"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T03:01:02Z","finishedAt":"2016-07-27T03:01:03Z","containerID":"docker://ee97005854130335b54a65429865956260b7729e51e6363ab05e63d5c7c9ee48"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://ee97005854130335b54a65429865956260b7729e51e6363ab05e63d5c7c9ee48"}]'
|
|
| 379 |
+ pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]'
|
|
| 380 |
+ pod.alpha.kubernetes.io/initialized: "true" |
|
| 381 |
+ pod.beta.kubernetes.io/hostname: mysql-2 |
|
| 382 |
+ pod.beta.kubernetes.io/subdomain: galera |
|
| 383 |
+ creationTimestamp: 2016-07-27T03:00:58Z |
|
| 384 |
+ generateName: mysql- |
|
| 385 |
+ labels: |
|
| 386 |
+ app: mysql |
|
| 387 |
+ name: mysql-2 |
|
| 388 |
+ namespace: default |
|
| 389 |
+ resourceVersion: "7226" |
|
| 390 |
+ selfLink: /api/v1/namespaces/default/pods/mysql-2 |
|
| 391 |
+ uid: 57e618f1-53a6-11e6-b215-080027242396 |
|
| 392 |
+ spec: |
|
| 393 |
+ containers: |
|
| 394 |
+ - args: |
|
| 395 |
+ - --defaults-file=/etc/mysql/my-galera.cnf |
|
| 396 |
+ - --user=root |
|
| 397 |
+ image: erkules/galera:basic |
|
| 398 |
+ imagePullPolicy: IfNotPresent |
|
| 399 |
+ name: mysql |
|
| 400 |
+ ports: |
|
| 401 |
+ - containerPort: 3306 |
|
| 402 |
+ name: mysql |
|
| 403 |
+ protocol: TCP |
|
| 404 |
+ - containerPort: 4444 |
|
| 405 |
+ name: sst |
|
| 406 |
+ protocol: TCP |
|
| 407 |
+ - containerPort: 4567 |
|
| 408 |
+ name: replication |
|
| 409 |
+ protocol: TCP |
|
| 410 |
+ - containerPort: 4568 |
|
| 411 |
+ name: ist |
|
| 412 |
+ protocol: TCP |
|
| 413 |
+ readinessProbe: |
|
| 414 |
+ exec: |
|
| 415 |
+ command: |
|
| 416 |
+ - sh |
|
| 417 |
+ - -c |
|
| 418 |
+ - mysql -u root -e 'show databases;' |
|
| 419 |
+ failureThreshold: 3 |
|
| 420 |
+ initialDelaySeconds: 15 |
|
| 421 |
+ periodSeconds: 10 |
|
| 422 |
+ successThreshold: 1 |
|
| 423 |
+ timeoutSeconds: 5 |
|
| 424 |
+ resources: {}
|
|
| 425 |
+ securityContext: |
|
| 426 |
+ capabilities: |
|
| 427 |
+ drop: |
|
| 428 |
+ - MKNOD |
|
| 429 |
+ - SYS_CHROOT |
|
| 430 |
+ privileged: false |
|
| 431 |
+ seLinuxOptions: |
|
| 432 |
+ level: s0:c5,c0 |
|
| 433 |
+ terminationMessagePath: /dev/termination-log |
|
| 434 |
+ volumeMounts: |
|
| 435 |
+ - mountPath: /var/lib/ |
|
| 436 |
+ name: datadir |
|
| 437 |
+ - mountPath: /etc/mysql |
|
| 438 |
+ name: config |
|
| 439 |
+ - mountPath: /var/run/secrets/kubernetes.io/serviceaccount |
|
| 440 |
+ name: default-token-au2xq |
|
| 441 |
+ readOnly: true |
|
| 442 |
+ dnsPolicy: ClusterFirst |
|
| 443 |
+ host: localhost.localdomain |
|
| 444 |
+ imagePullSecrets: |
|
| 445 |
+ - name: default-dockercfg-pzhsj |
|
| 446 |
+ nodeName: localhost.localdomain |
|
| 447 |
+ restartPolicy: Always |
|
| 448 |
+ securityContext: |
|
| 449 |
+ seLinuxOptions: |
|
| 450 |
+ level: s0:c5,c0 |
|
| 451 |
+ serviceAccount: default |
|
| 452 |
+ serviceAccountName: default |
|
| 453 |
+ terminationGracePeriodSeconds: 30 |
|
| 454 |
+ volumes: |
|
| 455 |
+ - name: datadir |
|
| 456 |
+ persistentVolumeClaim: |
|
| 457 |
+ claimName: datadir-mysql-2 |
|
| 458 |
+ - emptyDir: {}
|
|
| 459 |
+ name: config |
|
| 460 |
+ - emptyDir: {}
|
|
| 461 |
+ name: workdir |
|
| 462 |
+ - name: default-token-au2xq |
|
| 463 |
+ secret: |
|
| 464 |
+ secretName: default-token-au2xq |
|
| 465 |
+ status: |
|
| 466 |
+ conditions: |
|
| 467 |
+ - lastProbeTime: null |
|
| 468 |
+ lastTransitionTime: 2016-07-27T03:01:03Z |
|
| 469 |
+ status: "True" |
|
| 470 |
+ type: Initialized |
|
| 471 |
+ - lastProbeTime: null |
|
| 472 |
+ lastTransitionTime: 2016-07-27T03:01:28Z |
|
| 473 |
+ status: "True" |
|
| 474 |
+ type: Ready |
|
| 475 |
+ - lastProbeTime: null |
|
| 476 |
+ lastTransitionTime: 2016-07-27T03:00:58Z |
|
| 477 |
+ status: "True" |
|
| 478 |
+ type: PodScheduled |
|
| 479 |
+ containerStatuses: |
|
| 480 |
+ - containerID: docker://82b774855cdb5d12d98e7bc34f4f9d4e88e757e9cc2da1593e2e2f66e3241e5f |
|
| 481 |
+ image: erkules/galera:basic |
|
| 482 |
+ imageID: docker://sha256:b4780e247a38c12612f539ce1ac8e0988e1781d56fddf719c80fb8d4d7b8bbde |
|
| 483 |
+ lastState: {}
|
|
| 484 |
+ name: mysql |
|
| 485 |
+ ready: true |
|
| 486 |
+ restartCount: 0 |
|
| 487 |
+ state: |
|
| 488 |
+ running: |
|
| 489 |
+ startedAt: 2016-07-27T03:01:04Z |
|
| 490 |
+ hostIP: 10.0.2.15 |
|
| 491 |
+ phase: Running |
|
| 492 |
+ podIP: 172.17.0.4 |
|
| 493 |
+ startTime: 2016-07-27T03:00:58Z |
|
| 494 |
+kind: List |
|
| 495 |
+metadata: {}
|