Browse code

move images extended tests to new tag and directory

Ben Parees authored on 2016/10/11 23:15:39
Showing 29 changed files
... ...
@@ -10,6 +10,7 @@ import (
10 10
 	_ "github.com/openshift/origin/test/extended/deployments"
11 11
 	_ "github.com/openshift/origin/test/extended/dns"
12 12
 	_ "github.com/openshift/origin/test/extended/idling"
13
+	_ "github.com/openshift/origin/test/extended/image_ecosystem"
13 14
 	_ "github.com/openshift/origin/test/extended/imageapis"
14 15
 	_ "github.com/openshift/origin/test/extended/images"
15 16
 	_ "github.com/openshift/origin/test/extended/jenkins"
16 17
new file mode 100644
... ...
@@ -0,0 +1,45 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+	"time"
6
+
7
+	"k8s.io/kubernetes/pkg/labels"
8
+
9
+	exutil "github.com/openshift/origin/test/extended/util"
10
+)
11
+
12
+// RunInPodContainer will run provided command in the specified pod container.
13
+func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) error {
14
+	pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
15
+	if err != nil {
16
+		return err
17
+	}
18
+	if len(pods) != 1 {
19
+		return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector)
20
+	}
21
+
22
+	pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
23
+	if err != nil {
24
+		return err
25
+	}
26
+	args := []string{pod.Name, "-c", pod.Spec.Containers[0].Name, "--"}
27
+	args = append(args, cmd...)
28
+	return oc.Run("exec").Args(args...).Execute()
29
+}
30
+
31
+// CheckPageContains makes a http request for an example application and checks
32
+// that the result contains given string
33
+func CheckPageContains(oc *exutil.CLI, endpoint, path, contents string) (bool, error) {
34
+	address, err := exutil.GetEndpointAddress(oc, endpoint)
35
+	if err != nil {
36
+		return false, err
37
+	}
38
+
39
+	response, err := exutil.FetchURL(fmt.Sprintf("http://%s/%s", address, path), 3*time.Minute)
40
+	if err != nil {
41
+		return false, err
42
+	}
43
+	return strings.Contains(response, contents), nil
44
+}
0 45
new file mode 100644
... ...
@@ -0,0 +1,41 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+
5
+	g "github.com/onsi/ginkgo"
6
+	o "github.com/onsi/gomega"
7
+
8
+	exutil "github.com/openshift/origin/test/extended/util"
9
+)
10
+
11
+var _ = g.Describe("[image_ecosystem][mariadb][Slow] openshift mariadb image", func() {
12
+	defer g.GinkgoRecover()
13
+	var (
14
+		templatePath = exutil.FixturePath("..", "..", "examples", "db-templates", "mariadb-ephemeral-template.json")
15
+		oc           = exutil.NewCLI("mariadb-create", exutil.KubeConfigPath())
16
+	)
17
+	g.Describe("Creating from a template", func() {
18
+		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
19
+			oc.SetOutputDir(exutil.TestContext.OutputDir)
20
+
21
+			g.By(fmt.Sprintf("calling oc process -f %q", templatePath))
22
+			configFile, err := oc.Run("process").Args("-f", templatePath).OutputToFile("config.json")
23
+			o.Expect(err).NotTo(o.HaveOccurred())
24
+
25
+			g.By(fmt.Sprintf("calling oc create -f %q", configFile))
26
+			err = oc.Run("create").Args("-f", configFile).Execute()
27
+			o.Expect(err).NotTo(o.HaveOccurred())
28
+
29
+			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
30
+			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
31
+			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mariadb", oc)
32
+			o.Expect(err).NotTo(o.HaveOccurred())
33
+
34
+			g.By("expecting the mariadb service get endpoints")
35
+			err = oc.KubeFramework().WaitForAnEndpoint("mariadb")
36
+			o.Expect(err).NotTo(o.HaveOccurred())
37
+		})
38
+	})
39
+
40
+})
0 41
new file mode 100644
... ...
@@ -0,0 +1,62 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+
5
+	g "github.com/onsi/ginkgo"
6
+	o "github.com/onsi/gomega"
7
+
8
+	"time"
9
+
10
+	exutil "github.com/openshift/origin/test/extended/util"
11
+	"github.com/openshift/origin/test/extended/util/db"
12
+)
13
+
14
+var _ = g.Describe("[image_ecosystem][mongodb] openshift mongodb image", func() {
15
+	defer g.GinkgoRecover()
16
+
17
+	templatePath := exutil.FixturePath("..", "..", "examples", "db-templates", "mongodb-ephemeral-template.json")
18
+	oc := exutil.NewCLI("mongodb-create", exutil.KubeConfigPath()).Verbose()
19
+
20
+	g.Describe("creating from a template", func() {
21
+		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
22
+
23
+			exutil.CheckOpenShiftNamespaceImageStreams(oc)
24
+			g.By("creating a new app")
25
+			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())
26
+
27
+			g.By("waiting for the deployment to complete")
28
+			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb", oc)
29
+			o.Expect(err).ShouldNot(o.HaveOccurred())
30
+
31
+			g.By("expecting the mongodb pod is running")
32
+			podNames, err := exutil.WaitForPods(
33
+				oc.KubeREST().Pods(oc.Namespace()),
34
+				exutil.ParseLabelsOrDie("name=mongodb"),
35
+				exutil.CheckPodIsRunningFn,
36
+				1,
37
+				1*time.Minute,
38
+			)
39
+			o.Expect(err).ShouldNot(o.HaveOccurred())
40
+			o.Expect(podNames).Should(o.HaveLen(1))
41
+
42
+			g.By("expecting the mongodb service is answering for ping")
43
+			mongo := db.NewMongoDB(podNames[0])
44
+			ok, err := mongo.IsReady(oc)
45
+			o.Expect(err).ShouldNot(o.HaveOccurred())
46
+			o.Expect(ok).Should(o.BeTrue())
47
+
48
+			g.By("expecting that we can insert a new record")
49
+			result, err := mongo.Query(oc, `db.foo.save({ "status": "passed" })`)
50
+			o.Expect(err).ShouldNot(o.HaveOccurred())
51
+			o.Expect(result).Should(o.ContainSubstring(`WriteResult({ "nInserted" : 1 })`))
52
+
53
+			g.By("expecting that we can read a record")
54
+			findCmd := "printjson(db.foo.find({}, {_id: 0}).toArray())" // don't include _id field to output because it changes every time
55
+			result, err = mongo.Query(oc, findCmd)
56
+			o.Expect(err).ShouldNot(o.HaveOccurred())
57
+			o.Expect(result).Should(o.ContainSubstring(`{ "status" : "passed" }`))
58
+		})
59
+	})
60
+
61
+})
0 62
new file mode 100644
... ...
@@ -0,0 +1,118 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"strconv"
5
+	"time"
6
+
7
+	g "github.com/onsi/ginkgo"
8
+	o "github.com/onsi/gomega"
9
+
10
+	exutil "github.com/openshift/origin/test/extended/util"
11
+	"github.com/openshift/origin/test/extended/util/db"
12
+)
13
+
14
+var _ = g.Describe("[image_ecosystem][mongodb] openshift mongodb replication", func() {
15
+	defer g.GinkgoRecover()
16
+
17
+	const (
18
+		templatePath         = "https://raw.githubusercontent.com/sclorg/mongodb-container/master/2.4/examples/replica/mongodb-clustered.json"
19
+		deploymentConfigName = "mongodb"
20
+		expectedValue        = `{ "status" : "passed" }`
21
+		insertCmd            = "db.bar.save(" + expectedValue + ")"
22
+	)
23
+
24
+	const (
25
+		expectedReplicasAfterDeployment = 3
26
+		expectedReplicasAfterScalingUp  = expectedReplicasAfterDeployment + 2
27
+	)
28
+
29
+	oc := exutil.NewCLI("mongodb-replica", exutil.KubeConfigPath()).Verbose()
30
+
31
+	g.Describe("creating from a template", func() {
32
+		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
33
+
34
+			exutil.CheckOpenShiftNamespaceImageStreams(oc)
35
+			g.By("creating a new app")
36
+			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())
37
+
38
+			g.By("waiting for the deployment to complete")
39
+			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc)
40
+			o.Expect(err).NotTo(o.HaveOccurred())
41
+
42
+			podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica")
43
+			mongo := db.NewMongoDB(podNames[0])
44
+
45
+			g.By(fmt.Sprintf("expecting that replica set have %d members", expectedReplicasAfterDeployment))
46
+			assertMembersInReplica(oc, mongo, expectedReplicasAfterDeployment)
47
+
48
+			g.By("expecting that we can insert a new record on primary node")
49
+			replicaSet := mongo.(exutil.ReplicaSet)
50
+			_, err = replicaSet.QueryPrimary(oc, insertCmd)
51
+			o.Expect(err).ShouldNot(o.HaveOccurred())
52
+
53
+			g.By("expecting that we can read a record from all members")
54
+			for _, podName := range podNames {
55
+				tryToReadFromPod(oc, podName, expectedValue)
56
+			}
57
+
58
+			g.By(fmt.Sprintf("scaling deployment config %s to %d replicas", deploymentConfigName, expectedReplicasAfterScalingUp))
59
+
60
+			err = oc.Run("scale").Args("dc", deploymentConfigName, "--replicas="+fmt.Sprint(expectedReplicasAfterScalingUp), "--timeout=30s").Execute()
61
+			o.Expect(err).NotTo(o.HaveOccurred())
62
+
63
+			podNames = waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterScalingUp, "mongodb-replica")
64
+			mongo = db.NewMongoDB(podNames[0])
65
+
66
+			g.By("expecting that scaling replica set up should have more members")
67
+			assertMembersInReplica(oc, mongo, expectedReplicasAfterScalingUp)
68
+		})
69
+	})
70
+
71
+})
72
+
73
+func tryToReadFromPod(oc *exutil.CLI, podName, expectedValue string) {
74
+	// don't include _id field to output because it changes every time
75
+	findCmd := "rs.slaveOk(); printjson(db.bar.find({}, {_id: 0}).toArray())"
76
+
77
+	fmt.Fprintf(g.GinkgoWriter, "DEBUG: reading record from pod %v\n", podName)
78
+
79
+	mongoPod := db.NewMongoDB(podName)
80
+	result, err := mongoPod.Query(oc, findCmd)
81
+	o.Expect(err).ShouldNot(o.HaveOccurred())
82
+	o.Expect(result).Should(o.ContainSubstring(expectedValue))
83
+}
84
+
85
+func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []string {
86
+	g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label))
87
+
88
+	podNames, err := exutil.WaitForPods(
89
+		oc.KubeREST().Pods(oc.Namespace()),
90
+		exutil.ParseLabelsOrDie("name="+label),
91
+		exutil.CheckPodIsRunningFn,
92
+		number,
93
+		1*time.Minute,
94
+	)
95
+	o.Expect(err).ShouldNot(o.HaveOccurred())
96
+	o.Expect(podNames).Should(o.HaveLen(number))
97
+
98
+	return podNames
99
+}
100
+
101
+func assertMembersInReplica(oc *exutil.CLI, db exutil.Database, expectedReplicas int) {
102
+	isMasterCmd := "printjson(db.isMaster())"
103
+	getReplicaHostsCmd := "print(db.isMaster().hosts.length)"
104
+
105
+	// pod is running but we need to wait when it will be really ready (became member of the replica)
106
+	err := exutil.WaitForQueryOutputSatisfies(oc, db, 1*time.Minute, false, isMasterCmd, func(commandOutput string) bool {
107
+		return commandOutput != ""
108
+	})
109
+	o.Expect(err).ShouldNot(o.HaveOccurred())
110
+
111
+	isMasterOutput, _ := db.Query(oc, isMasterCmd)
112
+	fmt.Fprintf(g.GinkgoWriter, "DEBUG: Output of the db.isMaster() command: %v\n", isMasterOutput)
113
+
114
+	members, err := db.Query(oc, getReplicaHostsCmd)
115
+	o.Expect(err).ShouldNot(o.HaveOccurred())
116
+	o.Expect(members).Should(o.Equal(strconv.Itoa(expectedReplicas)))
117
+}
0 118
new file mode 100644
... ...
@@ -0,0 +1,41 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+
5
+	g "github.com/onsi/ginkgo"
6
+	o "github.com/onsi/gomega"
7
+
8
+	exutil "github.com/openshift/origin/test/extended/util"
9
+)
10
+
11
+var _ = g.Describe("[image_ecosystem][mysql][Slow] openshift mysql image", func() {
12
+	defer g.GinkgoRecover()
13
+	var (
14
+		templatePath = exutil.FixturePath("..", "..", "examples", "db-templates", "mysql-ephemeral-template.json")
15
+		oc           = exutil.NewCLI("mysql-create", exutil.KubeConfigPath())
16
+	)
17
+	g.Describe("Creating from a template", func() {
18
+		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
19
+			oc.SetOutputDir(exutil.TestContext.OutputDir)
20
+
21
+			g.By(fmt.Sprintf("calling oc process -f %q", templatePath))
22
+			configFile, err := oc.Run("process").Args("-f", templatePath).OutputToFile("config.json")
23
+			o.Expect(err).NotTo(o.HaveOccurred())
24
+
25
+			g.By(fmt.Sprintf("calling oc create -f %q", configFile))
26
+			err = oc.Run("create").Args("-f", configFile).Execute()
27
+			o.Expect(err).NotTo(o.HaveOccurred())
28
+
29
+			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
30
+			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
31
+			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mysql", oc)
32
+			o.Expect(err).NotTo(o.HaveOccurred())
33
+
34
+			g.By("expecting the mysql service get endpoints")
35
+			err = oc.KubeFramework().WaitForAnEndpoint("mysql")
36
+			o.Expect(err).NotTo(o.HaveOccurred())
37
+		})
38
+	})
39
+
40
+})
0 41
new file mode 100644
... ...
@@ -0,0 +1,197 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	g "github.com/onsi/ginkgo"
7
+	o "github.com/onsi/gomega"
8
+
9
+	templateapi "github.com/openshift/origin/pkg/template/api"
10
+	exutil "github.com/openshift/origin/test/extended/util"
11
+	"github.com/openshift/origin/test/extended/util/db"
12
+	testutil "github.com/openshift/origin/test/util"
13
+
14
+	kapi "k8s.io/kubernetes/pkg/api"
15
+	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+)
17
+
18
+type testCase struct {
19
+	Version         string
20
+	TemplatePath    string
21
+	SkipReplication bool
22
+}
23
+
24
+var (
25
+	testCases = []testCase{
26
+		{
27
+			"5.5",
28
+			"https://raw.githubusercontent.com/sclorg/mysql-container/master/5.5/examples/replica/mysql_replica.json",
29
+			// NOTE: Set to true in case of flakes.
30
+			false,
31
+		},
32
+		{
33
+			"5.6",
34
+			"https://raw.githubusercontent.com/sclorg/mysql-container/master/5.6/examples/replica/mysql_replica.json",
35
+			false,
36
+		},
37
+	}
38
+	helperTemplate = exutil.FixturePath("..", "..", "examples", "db-templates", "mysql-ephemeral-template.json")
39
+	helperName     = "mysql-helper"
40
+)
41
+
42
+// CreateMySQLReplicationHelpers creates a set of MySQL helpers for master,
43
+// slave and an extra helper that is used for remote login test.
44
+func CreateMySQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
45
+	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 1*time.Minute)
46
+	o.Expect(err).NotTo(o.HaveOccurred())
47
+	masterPod := podNames[0]
48
+
49
+	slavePods, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", slaveDeployment)), exutil.CheckPodIsRunningFn, slaveCount, 2*time.Minute)
50
+	o.Expect(err).NotTo(o.HaveOccurred())
51
+
52
+	// Create MySQL helper for master
53
+	master := db.NewMysql(masterPod, "")
54
+
55
+	// Create MySQL helpers for slaves
56
+	slaves := make([]exutil.Database, len(slavePods))
57
+	for i := range slavePods {
58
+		slave := db.NewMysql(slavePods[i], masterPod)
59
+		slaves[i] = slave
60
+	}
61
+
62
+	helperNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", helperDeployment)), exutil.CheckPodIsRunningFn, 1, 1*time.Minute)
63
+	o.Expect(err).NotTo(o.HaveOccurred())
64
+	helper := db.NewMysql(helperNames[0], masterPod)
65
+
66
+	return master, slaves, helper
67
+}
68
+
69
+func cleanup(oc *exutil.CLI) {
70
+	exutil.DumpImageStreams(oc)
71
+	oc.AsAdmin().Run("delete").Args("all", "--all", "-n", oc.Namespace()).Execute()
72
+	exutil.DumpImageStreams(oc)
73
+	oc.AsAdmin().Run("delete").Args("pvc", "--all", "-n", oc.Namespace()).Execute()
74
+	exutil.CleanupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace())
75
+}
76
+
77
+func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
78
+	return func() {
79
+		oc.SetOutputDir(exutil.TestContext.OutputDir)
80
+		defer cleanup(oc)
81
+
82
+		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
83
+		o.Expect(err).NotTo(o.HaveOccurred())
84
+
85
+		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
86
+		o.Expect(err).NotTo(o.HaveOccurred())
87
+
88
+		exutil.CheckOpenShiftNamespaceImageStreams(oc)
89
+		err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute()
90
+		o.Expect(err).NotTo(o.HaveOccurred())
91
+
92
+		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
93
+		o.Expect(err).NotTo(o.HaveOccurred())
94
+
95
+		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
96
+		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
97
+		g.By("waiting for the deployment to complete")
98
+		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), helperName, oc)
99
+		o.Expect(err).NotTo(o.HaveOccurred())
100
+
101
+		g.By("waiting for an endpoint")
102
+		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
103
+		o.Expect(err).NotTo(o.HaveOccurred())
104
+
105
+		tableCounter := 0
106
+		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
107
+			tableCounter++
108
+			table := fmt.Sprintf("table_%0.2d", tableCounter)
109
+
110
+			g.By("creating replication helpers")
111
+			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
112
+			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
113
+			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())
114
+
115
+			// Test if we can query as root
116
+			g.By("wait for mysql-master endpoint")
117
+			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
118
+			err := helper.TestRemoteLogin(oc, "mysql-master")
119
+			o.Expect(err).NotTo(o.HaveOccurred())
120
+
121
+			// Create a new table with random name
122
+			g.By("create new table")
123
+			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
124
+			o.Expect(err).NotTo(o.HaveOccurred())
125
+
126
+			// Write new data to the table through master
127
+			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
128
+			o.Expect(err).NotTo(o.HaveOccurred())
129
+
130
+			// Make sure data is present on master
131
+			err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
132
+			o.Expect(err).NotTo(o.HaveOccurred())
133
+
134
+			// Make sure data was replicated to all slaves
135
+			for _, slave := range slaves {
136
+				err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
137
+				o.Expect(err).NotTo(o.HaveOccurred())
138
+			}
139
+
140
+			return master, slaves, helper
141
+		}
142
+
143
+		g.By("after initial deployment")
144
+		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)
145
+
146
+		if tc.SkipReplication {
147
+			return
148
+		}
149
+
150
+		g.By("after master is restarted by changing the Deployment Config")
151
+		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
152
+		o.Expect(err).NotTo(o.HaveOccurred())
153
+		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
154
+		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
155
+
156
+		g.By("after master is restarted by deleting the pod")
157
+		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
158
+		o.Expect(err).NotTo(o.HaveOccurred())
159
+		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
160
+		o.Expect(err).NotTo(o.HaveOccurred())
161
+		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
162
+
163
+		g.By("after slave is restarted by deleting the pod")
164
+		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
165
+		o.Expect(err).NotTo(o.HaveOccurred())
166
+		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
167
+		o.Expect(err).NotTo(o.HaveOccurred())
168
+		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
169
+
170
+		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
171
+		o.Expect(err).NotTo(o.HaveOccurred())
172
+		o.Expect(len(pods.Items)).To(o.Equal(1))
173
+
174
+		// NOTE: Commented out, current template does not support multiple replicas.
175
+		/*
176
+			g.By("after slave is scaled to 0 and then back to 4 replicas")
177
+			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
178
+			o.Expect(err).NotTo(o.HaveOccurred())
179
+			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
180
+			o.Expect(err).NotTo(o.HaveOccurred())
181
+			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
182
+			o.Expect(err).NotTo(o.HaveOccurred())
183
+			assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
184
+		*/
185
+	}
186
+}
187
+
188
+var _ = g.Describe("[image_ecosystem][mysql][Slow] openshift mysql replication", func() {
189
+	defer g.GinkgoRecover()
190
+
191
+	ocs := make([]*exutil.CLI, len(testCases))
192
+	for i, tc := range testCases {
193
+		ocs[i] = exutil.NewCLI(fmt.Sprintf("mysql-replication-%d", i), exutil.KubeConfigPath())
194
+		g.It(fmt.Sprintf("MySQL replication template for %s: %s", tc.Version, tc.TemplatePath), replicationTestFactory(ocs[i], tc))
195
+	}
196
+})
0 197
new file mode 100644
... ...
@@ -0,0 +1,183 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	g "github.com/onsi/ginkgo"
7
+	o "github.com/onsi/gomega"
8
+
9
+	templateapi "github.com/openshift/origin/pkg/template/api"
10
+	exutil "github.com/openshift/origin/test/extended/util"
11
+	"github.com/openshift/origin/test/extended/util/db"
12
+	testutil "github.com/openshift/origin/test/util"
13
+
14
+	kapi "k8s.io/kubernetes/pkg/api"
15
+	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+)
17
+
18
+var (
19
+	postgreSQLReplicationTemplate = "https://raw.githubusercontent.com/sclorg/postgresql-container/master/examples/replica/postgresql_replica.json"
20
+	postgreSQLEphemeralTemplate   = exutil.FixturePath("..", "..", "examples", "db-templates", "postgresql-ephemeral-template.json")
21
+	postgreSQLHelperName          = "postgresql-helper"
22
+	postgreSQLImages              = []string{
23
+		"postgresql:9.2",
24
+		"postgresql:9.4",
25
+		"postgresql:9.5",
26
+	}
27
+)
28
+
29
+var _ = g.Describe("[LocalNode][image_ecosystem][postgresql][Slow] openshift postgresql replication", func() {
30
+	defer g.GinkgoRecover()
31
+
32
+	for i, image := range postgreSQLImages {
33
+		oc := exutil.NewCLI(fmt.Sprintf("postgresql-replication-%d", i), exutil.KubeConfigPath())
34
+		testFn := PostgreSQLReplicationTestFactory(oc, image)
35
+		g.It(fmt.Sprintf("postgresql replication works for %s", image), testFn)
36
+	}
37
+})
38
+
39
+// CreatePostgreSQLReplicationHelpers creates a set of PostgreSQL helpers for master,
40
+// slave an en extra helper that is used for remote login test.
41
+func CreatePostgreSQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
42
+	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
43
+	o.Expect(err).NotTo(o.HaveOccurred())
44
+	masterPod := podNames[0]
45
+
46
+	slavePods, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", slaveDeployment)), exutil.CheckPodIsRunningFn, slaveCount, 3*time.Minute)
47
+	o.Expect(err).NotTo(o.HaveOccurred())
48
+
49
+	// Create PostgreSQL helper for master
50
+	master := db.NewPostgreSQL(masterPod, "")
51
+
52
+	// Create PostgreSQL helpers for slaves
53
+	slaves := make([]exutil.Database, len(slavePods))
54
+	for i := range slavePods {
55
+		slave := db.NewPostgreSQL(slavePods[i], masterPod)
56
+		slaves[i] = slave
57
+	}
58
+
59
+	helperNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", helperDeployment)), exutil.CheckPodIsRunningFn, 1, 1*time.Minute)
60
+	o.Expect(err).NotTo(o.HaveOccurred())
61
+	helper := db.NewPostgreSQL(helperNames[0], masterPod)
62
+
63
+	return master, slaves, helper
64
+}
65
+
66
+func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() {
67
+	return func() {
68
+		oc.SetOutputDir(exutil.TestContext.OutputDir)
69
+		defer cleanup(oc)
70
+
71
+		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
72
+		o.Expect(err).NotTo(o.HaveOccurred())
73
+
74
+		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
75
+		o.Expect(err).NotTo(o.HaveOccurred())
76
+
77
+		exutil.CheckOpenShiftNamespaceImageStreams(oc)
78
+		err = oc.Run("new-app").Args("-f", postgreSQLReplicationTemplate, "-p", fmt.Sprintf("IMAGESTREAMTAG=%s", image)).Execute()
79
+		o.Expect(err).NotTo(o.HaveOccurred())
80
+
81
+		err = oc.Run("new-app").Args("-f", postgreSQLEphemeralTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", postgreSQLHelperName)).Execute()
82
+		o.Expect(err).NotTo(o.HaveOccurred())
83
+
84
+		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
85
+		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
86
+		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), postgreSQLHelperName, oc)
87
+		o.Expect(err).NotTo(o.HaveOccurred())
88
+
89
+		err = oc.KubeFramework().WaitForAnEndpoint(postgreSQLHelperName)
90
+		o.Expect(err).NotTo(o.HaveOccurred())
91
+
92
+		tableCounter := 0
93
+		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
94
+			check := func(err error) {
95
+				if err != nil {
96
+					exutil.DumpDeploymentLogs("postgresql-master", oc)
97
+					exutil.DumpDeploymentLogs("postgresql-slave", oc)
98
+				}
99
+				o.Expect(err).NotTo(o.HaveOccurred())
100
+			}
101
+
102
+			tableCounter++
103
+			table := fmt.Sprintf("table_%0.2d", tableCounter)
104
+
105
+			master, slaves, helper := CreatePostgreSQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", postgreSQLHelperName), slaveCount)
106
+			err := exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})
107
+			if err != nil {
108
+				exutil.DumpDeploymentLogs("postgresql-master", oc)
109
+				exutil.DumpDeploymentLogs("postgresql-helper", oc)
110
+			}
111
+			o.Expect(err).NotTo(o.HaveOccurred())
112
+
113
+			err = exutil.WaitUntilAllHelpersAreUp(oc, slaves)
114
+			check(err)
115
+
116
+			// Test if we can query as admin
117
+			oc.KubeFramework().WaitForAnEndpoint("postgresql-master")
118
+			err = helper.TestRemoteLogin(oc, "postgresql-master")
119
+			check(err)
120
+
121
+			// Create a new table with random name
122
+			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
123
+			check(err)
124
+
125
+			// Write new data to the table through master
126
+			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
127
+			check(err)
128
+
129
+			// Make sure data is present on master
130
+			err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false,
131
+				fmt.Sprintf("SELECT * FROM %s;", table),
132
+				"col1 | val1\ncol2 | val2")
133
+			check(err)
134
+
135
+			// Make sure data was replicated to all slaves
136
+			for _, slave := range slaves {
137
+				err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false,
138
+					fmt.Sprintf("SELECT * FROM %s;", table),
139
+					"col1 | val1\ncol2 | val2")
140
+				check(err)
141
+			}
142
+
143
+			return master, slaves, helper
144
+		}
145
+
146
+		g.By("after initial deployment")
147
+		master, _, _ := assertReplicationIsWorking("postgresql-master-1", "postgresql-slave-1", 1)
148
+
149
+		g.By("after master is restarted by changing the Deployment Config")
150
+		err = oc.Run("env").Args("dc", "postgresql-master", "POSTGRESQL_ADMIN_PASSWORD=newpass").Execute()
151
+		o.Expect(err).NotTo(o.HaveOccurred())
152
+		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
153
+		master, _, _ = assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
154
+
155
+		g.By("after master is restarted by deleting the pod")
156
+		err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-master-2").Execute()
157
+		o.Expect(err).NotTo(o.HaveOccurred())
158
+		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
159
+		o.Expect(err).NotTo(o.HaveOccurred())
160
+		_, slaves, _ := assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
161
+
162
+		g.By("after slave is restarted by deleting the pod")
163
+		err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-slave-1").Execute()
164
+		o.Expect(err).NotTo(o.HaveOccurred())
165
+		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
166
+		o.Expect(err).NotTo(o.HaveOccurred())
167
+		assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
168
+
169
+		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=postgresql-slave-1")})
170
+		o.Expect(err).NotTo(o.HaveOccurred())
171
+		o.Expect(len(pods.Items)).To(o.Equal(1))
172
+
173
+		g.By("after slave is scaled to 0 and then back to 4 replicas")
174
+		err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=0").Execute()
175
+		o.Expect(err).NotTo(o.HaveOccurred())
176
+		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
177
+		o.Expect(err).NotTo(o.HaveOccurred())
178
+		err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=4").Execute()
179
+		o.Expect(err).NotTo(o.HaveOccurred())
180
+		assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 4)
181
+	}
182
+}
0 183
new file mode 100644
... ...
@@ -0,0 +1,131 @@
0
+package image_ecosystem
1
+
2
+import "fmt"
3
+
4
+type ImageBaseType string
5
+
6
+const (
7
+	RHELBased   ImageBaseType = "rhel7"
8
+	CentosBased ImageBaseType = "centos7"
9
+	AllImages   ImageBaseType = "all"
10
+)
11
+
12
+type tc struct {
13
+	// The image version string (eg. '27' or '34')
14
+	Version string
15
+	// The base OS ('rhel7' or 'centos7')
16
+	BaseOS ImageBaseType
17
+	// Command to execute
18
+	Cmd string
19
+	// Expected output from the command
20
+	Expected string
21
+
22
+	// Repository is either openshift/ or rhcsl/
23
+	// The default is 'openshift'
24
+	Repository string
25
+
26
+	// Internal: We resolve this in JustBeforeEach
27
+	DockerImageReference string
28
+}
29
+
30
+// Internal OpenShift registry to fetch the RHEL7 images from
31
+const InternalRegistryAddr = "ci.dev.openshift.redhat.com:5000"
32
+
33
+// This is a complete list of supported S2I images
34
+var s2iImages = map[string][]tc{
35
+	"ruby": {
36
+		{
37
+			Version:  "20",
38
+			Cmd:      "ruby --version",
39
+			Expected: "ruby 2.0.0",
40
+		},
41
+		{
42
+			Version:  "22",
43
+			Cmd:      "ruby --version",
44
+			Expected: "ruby 2.2.2",
45
+		},
46
+	},
47
+	"python": {
48
+		{
49
+			Version:  "27",
50
+			Cmd:      "python --version",
51
+			Expected: "Python 2.7.8",
52
+		},
53
+		{
54
+			Version:  "33",
55
+			Cmd:      "python --version",
56
+			Expected: "Python 3.3.2",
57
+		},
58
+	},
59
+	"nodejs": {
60
+		{
61
+			Version:  "010",
62
+			Cmd:      "node --version",
63
+			Expected: "v0.10",
64
+		},
65
+	},
66
+	"perl": {
67
+		{
68
+			Version:  "516",
69
+			Cmd:      "perl --version",
70
+			Expected: "v5.16.3",
71
+		},
72
+		{
73
+			Version:  "520",
74
+			Cmd:      "perl --version",
75
+			Expected: "v5.20.1",
76
+		},
77
+	},
78
+	"php": {
79
+		{
80
+			Version:  "55",
81
+			Cmd:      "php --version",
82
+			Expected: "5.5",
83
+		},
84
+		{
85
+			Version:  "56",
86
+			Cmd:      "php --version",
87
+			Expected: "5.6",
88
+		},
89
+	},
90
+}
91
+
92
+func GetTestCaseForImages(base ImageBaseType) map[string][]tc {
93
+	if base == AllImages {
94
+		result := GetTestCaseForImages(RHELBased)
95
+		for n, t := range GetTestCaseForImages(CentosBased) {
96
+			result[n] = append(result[n], t...)
97
+		}
98
+		return result
99
+	}
100
+	result := make(map[string][]tc)
101
+	for name, variants := range s2iImages {
102
+		switch base {
103
+		case RHELBased:
104
+			for i := range variants {
105
+				variants[i].BaseOS = RHELBased
106
+				resolveDockerImageReference(name, &variants[i])
107
+				result[name] = append(result[name], variants[i])
108
+			}
109
+		case CentosBased:
110
+			for i := range variants {
111
+				variants[i].BaseOS = CentosBased
112
+				resolveDockerImageReference(name, &variants[i])
113
+				result[name] = append(result[name], variants[i])
114
+
115
+			}
116
+		}
117
+	}
118
+	return result
119
+}
120
+
121
+// resolveDockerImageReferences resolves the pull specs for all images
122
+func resolveDockerImageReference(name string, t *tc) {
123
+	if len(t.Repository) == 0 {
124
+		t.Repository = "openshift"
125
+	}
126
+	t.DockerImageReference = fmt.Sprintf("%s/%s-%s-%s", t.Repository, name, t.Version, t.BaseOS)
127
+	if t.BaseOS == RHELBased {
128
+		t.DockerImageReference = fmt.Sprintf("%s/%s", InternalRegistryAddr, t.DockerImageReference)
129
+	}
130
+}
0 131
new file mode 100644
... ...
@@ -0,0 +1,87 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	g "github.com/onsi/ginkgo"
7
+	o "github.com/onsi/gomega"
8
+
9
+	kapi "k8s.io/kubernetes/pkg/api"
10
+
11
+	exutil "github.com/openshift/origin/test/extended/util"
12
+)
13
+
14
+var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl image", func() {
15
+	defer g.GinkgoRecover()
16
+	var (
17
+		dancerTemplate = "https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json"
18
+		oc             = exutil.NewCLI("s2i-perl", exutil.KubeConfigPath())
19
+		modifyCommand  = []string{"sed", "-ie", `s/data => \$data\[0\]/data => "1337"/`, "lib/default.pm"}
20
+		pageCountFn    = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) }
21
+		dcName         = "dancer-mysql-example-1"
22
+		dcLabel        = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
23
+	)
24
+
25
+	g.Describe("Dancer example", func() {
26
+		g.It(fmt.Sprintf("should work with hot deploy"), func() {
27
+			oc.SetOutputDir(exutil.TestContext.OutputDir)
28
+
29
+			exutil.CheckOpenShiftNamespaceImageStreams(oc)
30
+			g.By(fmt.Sprintf("calling oc new-app -f %q", dancerTemplate))
31
+			err := oc.Run("new-app").Args("-f", dancerTemplate).Execute()
32
+			o.Expect(err).NotTo(o.HaveOccurred())
33
+
34
+			g.By("waiting for build to finish")
35
+			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
36
+			if err != nil {
37
+				exutil.DumpBuildLogs("dancer-mysql-example", oc)
38
+			}
39
+			o.Expect(err).NotTo(o.HaveOccurred())
40
+
41
+			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
42
+			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
43
+			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "dancer-mysql-example", oc)
44
+			o.Expect(err).NotTo(o.HaveOccurred())
45
+
46
+			g.By("waiting for endpoint")
47
+			err = oc.KubeFramework().WaitForAnEndpoint("dancer-mysql-example")
48
+			o.Expect(err).NotTo(o.HaveOccurred())
49
+
50
+			assertPageCountIs := func(i int) {
51
+				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
52
+				o.Expect(err).NotTo(o.HaveOccurred())
53
+
54
+				result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFn(i))
55
+				o.Expect(err).NotTo(o.HaveOccurred())
56
+				o.Expect(result).To(o.BeTrue())
57
+			}
58
+
59
+			g.By("checking page count")
60
+			assertPageCountIs(1)
61
+			assertPageCountIs(2)
62
+
63
+			g.By("modifying the source code with disabled hot deploy")
64
+			RunInPodContainer(oc, dcLabel, modifyCommand)
65
+			assertPageCountIs(3)
66
+
67
+			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
68
+			o.Expect(err).NotTo(o.HaveOccurred())
69
+			o.Expect(len(pods.Items)).To(o.Equal(1))
70
+
71
+			g.By("turning on hot-deploy")
72
+			err = oc.Run("env").Args("rc", dcName, "PERL_APACHE2_RELOAD=true").Execute()
73
+			o.Expect(err).NotTo(o.HaveOccurred())
74
+			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
75
+			o.Expect(err).NotTo(o.HaveOccurred())
76
+			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
77
+			o.Expect(err).NotTo(o.HaveOccurred())
78
+			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
79
+			o.Expect(err).NotTo(o.HaveOccurred())
80
+
81
+			g.By("modifying the source code with enabled hot deploy")
82
+			RunInPodContainer(oc, dcLabel, modifyCommand)
83
+			assertPageCountIs(1337)
84
+		})
85
+	})
86
+})
0 87
new file mode 100644
... ...
@@ -0,0 +1,69 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	g "github.com/onsi/ginkgo"
7
+	o "github.com/onsi/gomega"
8
+
9
+	exutil "github.com/openshift/origin/test/extended/util"
10
+)
11
+
12
+var _ = g.Describe("[image_ecosystem][php][Slow] hot deploy for openshift php image", func() {
13
+	defer g.GinkgoRecover()
14
+	var (
15
+		cakephpTemplate = "https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json"
16
+		oc              = exutil.NewCLI("s2i-php", exutil.KubeConfigPath())
17
+		hotDeployParam  = "OPCACHE_REVALIDATE_FREQ=0"
18
+		modifyCommand   = []string{"sed", "-ie", `s/\$result\['c'\]/1337/`, "app/View/Layouts/default.ctp"}
19
+		pageCountFn     = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) }
20
+		dcName          = "cakephp-mysql-example-1"
21
+		dcLabel         = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
22
+	)
23
+	g.Describe("CakePHP example", func() {
24
+		g.It(fmt.Sprintf("should work with hot deploy"), func() {
25
+			oc.SetOutputDir(exutil.TestContext.OutputDir)
26
+
27
+			exutil.CheckOpenShiftNamespaceImageStreams(oc)
28
+			g.By(fmt.Sprintf("calling oc new-app -f %q -p %q", cakephpTemplate, hotDeployParam))
29
+			err := oc.Run("new-app").Args("-f", cakephpTemplate, "-p", hotDeployParam).Execute()
30
+			o.Expect(err).NotTo(o.HaveOccurred())
31
+
32
+			g.By("waiting for build to finish")
33
+			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
34
+			if err != nil {
35
+				exutil.DumpBuildLogs("cakephp-mysql-example", oc)
36
+			}
37
+			o.Expect(err).NotTo(o.HaveOccurred())
38
+
39
+			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
40
+			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
41
+			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "cakephp-mysql-example", oc)
42
+			o.Expect(err).NotTo(o.HaveOccurred())
43
+
44
+			g.By("waiting for endpoint")
45
+			err = oc.KubeFramework().WaitForAnEndpoint("cakephp-mysql-example")
46
+			o.Expect(err).NotTo(o.HaveOccurred())
47
+
48
+			assertPageCountIs := func(i int) {
49
+				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
50
+				o.Expect(err).NotTo(o.HaveOccurred())
51
+
52
+				result, err := CheckPageContains(oc, "cakephp-mysql-example", "", pageCountFn(i))
53
+				o.Expect(err).NotTo(o.HaveOccurred())
54
+				o.Expect(result).To(o.BeTrue())
55
+			}
56
+
57
+			g.By("checking page count")
58
+
59
+			assertPageCountIs(1)
60
+			assertPageCountIs(2)
61
+
62
+			g.By("modifying the source code with disabled hot deploy")
63
+			RunInPodContainer(oc, dcLabel, modifyCommand)
64
+			g.By("checking page count after modifying the source code")
65
+			assertPageCountIs(1337)
66
+		})
67
+	})
68
+})
0 69
new file mode 100644
... ...
@@ -0,0 +1,88 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	g "github.com/onsi/ginkgo"
7
+	o "github.com/onsi/gomega"
8
+
9
+	kapi "k8s.io/kubernetes/pkg/api"
10
+
11
+	exutil "github.com/openshift/origin/test/extended/util"
12
+)
13
+
14
+var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift python image", func() {
15
+	defer g.GinkgoRecover()
16
+
17
+	var (
18
+		oc               = exutil.NewCLI("s2i-python", exutil.KubeConfigPath())
19
+		djangoRepository = "https://github.com/openshift/django-ex.git"
20
+		modifyCommand    = []string{"sed", "-ie", `s/'count': PageView.objects.count()/'count': 1337/`, "welcome/views.py"}
21
+		pageCountFn      = func(count int) string { return fmt.Sprintf("Page views: %d", count) }
22
+		dcName           = "django-ex-1"
23
+		dcLabel          = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
24
+	)
25
+	g.Describe("Django example", func() {
26
+		g.It(fmt.Sprintf("should work with hot deploy"), func() {
27
+			oc.SetOutputDir(exutil.TestContext.OutputDir)
28
+
29
+			err := exutil.WaitForOpenShiftNamespaceImageStreams(oc)
30
+			o.Expect(err).NotTo(o.HaveOccurred())
31
+			g.By(fmt.Sprintf("calling oc new-app %s", djangoRepository))
32
+			err = oc.Run("new-app").Args(djangoRepository, "--strategy=source").Execute()
33
+			o.Expect(err).NotTo(o.HaveOccurred())
34
+
35
+			g.By("waiting for build to finish")
36
+			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "django-ex-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
37
+			if err != nil {
38
+				exutil.DumpBuildLogs("django-ex", oc)
39
+			}
40
+			o.Expect(err).NotTo(o.HaveOccurred())
41
+
42
+			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
43
+			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
44
+			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "django-ex", oc)
45
+			o.Expect(err).NotTo(o.HaveOccurred())
46
+
47
+			g.By("waiting for endpoint")
48
+			err = oc.KubeFramework().WaitForAnEndpoint("django-ex")
49
+			o.Expect(err).NotTo(o.HaveOccurred())
50
+
51
+			assertPageCountIs := func(i int) {
52
+				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
53
+				o.Expect(err).NotTo(o.HaveOccurred())
54
+
55
+				result, err := CheckPageContains(oc, "django-ex", "", pageCountFn(i))
56
+				o.Expect(err).NotTo(o.HaveOccurred())
57
+				o.Expect(result).To(o.BeTrue())
58
+			}
59
+
60
+			g.By("checking page count")
61
+			assertPageCountIs(1)
62
+			assertPageCountIs(2)
63
+
64
+			g.By("modifying the source code with disabled hot deploy")
65
+			RunInPodContainer(oc, dcLabel, modifyCommand)
66
+			assertPageCountIs(3)
67
+
68
+			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
69
+			o.Expect(err).NotTo(o.HaveOccurred())
70
+			o.Expect(len(pods.Items)).To(o.Equal(1))
71
+
72
+			g.By("turning on hot-deploy")
73
+			err = oc.Run("env").Args("rc", dcName, "APP_CONFIG=conf/reload.py").Execute()
74
+			o.Expect(err).NotTo(o.HaveOccurred())
75
+			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
76
+			o.Expect(err).NotTo(o.HaveOccurred())
77
+			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
78
+			o.Expect(err).NotTo(o.HaveOccurred())
79
+			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
80
+			o.Expect(err).NotTo(o.HaveOccurred())
81
+
82
+			g.By("modifying the source code with enabled hot deploy")
83
+			RunInPodContainer(oc, dcLabel, modifyCommand)
84
+			assertPageCountIs(1337)
85
+		})
86
+	})
87
+})
0 88
new file mode 100644
... ...
@@ -0,0 +1,87 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	g "github.com/onsi/ginkgo"
7
+	o "github.com/onsi/gomega"
8
+
9
+	kapi "k8s.io/kubernetes/pkg/api"
10
+
11
+	exutil "github.com/openshift/origin/test/extended/util"
12
+)
13
+
14
+var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby image", func() {
15
+	defer g.GinkgoRecover()
16
+	var (
17
+		railsTemplate = "https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json"
18
+		oc            = exutil.NewCLI("s2i-ruby", exutil.KubeConfigPath())
19
+		modifyCommand = []string{"sed", "-ie", `s%render :file => 'public/index.html'%%`, "app/controllers/welcome_controller.rb"}
20
+		removeCommand = []string{"rm", "-f", "public/index.html"}
21
+		dcName        = "rails-postgresql-example-1"
22
+		dcLabel       = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
23
+	)
24
+	g.Describe("Rails example", func() {
25
+		g.It(fmt.Sprintf("should work with hot deploy"), func() {
26
+			oc.SetOutputDir(exutil.TestContext.OutputDir)
27
+
28
+			exutil.CheckOpenShiftNamespaceImageStreams(oc)
29
+			g.By(fmt.Sprintf("calling oc new-app -f %q", railsTemplate))
30
+			err := oc.Run("new-app").Args("-f", railsTemplate).Execute()
31
+			o.Expect(err).NotTo(o.HaveOccurred())
32
+
33
+			g.By("waiting for build to finish")
34
+			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
35
+			if err != nil {
36
+				exutil.DumpBuildLogs("rails-postgresql-example", oc)
37
+			}
38
+			o.Expect(err).NotTo(o.HaveOccurred())
39
+
40
+			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
41
+			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
42
+			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "rails-postgresql-example", oc)
43
+			o.Expect(err).NotTo(o.HaveOccurred())
44
+
45
+			g.By("waiting for endpoint")
46
+			err = oc.KubeFramework().WaitForAnEndpoint("rails-postgresql-example")
47
+			o.Expect(err).NotTo(o.HaveOccurred())
48
+
49
+			assertPageContent := func(content string) {
50
+				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
51
+				o.Expect(err).NotTo(o.HaveOccurred())
52
+
53
+				result, err := CheckPageContains(oc, "rails-postgresql-example", "", content)
54
+				o.Expect(err).NotTo(o.HaveOccurred())
55
+				o.Expect(result).To(o.BeTrue())
56
+			}
57
+
58
+			g.By("testing application content")
59
+			assertPageContent("Welcome to your Rails application on OpenShift")
60
+			g.By("modifying the source code with disabled hot deploy")
61
+			RunInPodContainer(oc, dcLabel, modifyCommand)
62
+			RunInPodContainer(oc, dcLabel, removeCommand)
63
+			g.By("testing application content source modification")
64
+			assertPageContent("Welcome to your Rails application on OpenShift")
65
+
66
+			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
67
+			o.Expect(err).NotTo(o.HaveOccurred())
68
+			o.Expect(len(pods.Items)).To(o.Equal(1))
69
+
70
+			g.By("turning on hot-deploy")
71
+			err = oc.Run("env").Args("rc", dcName, "RAILS_ENV=development").Execute()
72
+			o.Expect(err).NotTo(o.HaveOccurred())
73
+			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
74
+			o.Expect(err).NotTo(o.HaveOccurred())
75
+			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
76
+			o.Expect(err).NotTo(o.HaveOccurred())
77
+			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
78
+			o.Expect(err).NotTo(o.HaveOccurred())
79
+
80
+			g.By("modifying the source code with enabled hot deploy")
81
+			RunInPodContainer(oc, dcLabel, modifyCommand)
82
+			RunInPodContainer(oc, dcLabel, removeCommand)
83
+			assertPageContent("Hello, Rails!")
84
+		})
85
+	})
86
+})
0 87
new file mode 100644
... ...
@@ -0,0 +1,225 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+	"time"
5
+
6
+	g "github.com/onsi/ginkgo"
7
+	o "github.com/onsi/gomega"
8
+
9
+	exutil "github.com/openshift/origin/test/extended/util"
10
+)
11
+
12
+type SampleRepoConfig struct {
13
+	repoName               string
14
+	templateURL            string
15
+	buildConfigName        string
16
+	serviceName            string
17
+	deploymentConfigName   string
18
+	expectedString         string
19
+	appPath                string
20
+	dbDeploymentConfigName string
21
+	dbServiceName          string
22
+}
23
+
24
+// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
25
+// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
26
+// and then confirm the application is serving an expected string value.
27
+func NewSampleRepoTest(c SampleRepoConfig) func() {
28
+	return func() {
29
+		defer g.GinkgoRecover()
30
+		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())
31
+
32
+		g.JustBeforeEach(func() {
33
+			g.By("Waiting for builder service account")
34
+			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
35
+			o.Expect(err).NotTo(o.HaveOccurred())
36
+		})
37
+
38
+		g.Describe("Building "+c.repoName+" app from new-app", func() {
39
+			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
40
+				oc.SetOutputDir(exutil.TestContext.OutputDir)
41
+
42
+				err := exutil.WaitForOpenShiftNamespaceImageStreams(oc)
43
+				o.Expect(err).NotTo(o.HaveOccurred())
44
+				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
45
+				err = oc.Run("new-app").Args("-f", c.templateURL).Execute()
46
+				o.Expect(err).NotTo(o.HaveOccurred())
47
+
48
+				// all the templates automatically start a build.
49
+				buildName := c.buildConfigName + "-1"
50
+
51
+				g.By("expecting the build is in the Complete phase")
52
+				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
53
+				if err != nil {
54
+					exutil.DumpBuildLogs(c.buildConfigName, oc)
55
+				}
56
+				o.Expect(err).NotTo(o.HaveOccurred())
57
+
58
+				g.By("expecting the app deployment to be complete")
59
+				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
60
+				o.Expect(err).NotTo(o.HaveOccurred())
61
+
62
+				if len(c.dbDeploymentConfigName) > 0 {
63
+					g.By("expecting the db deployment to be complete")
64
+					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
65
+					o.Expect(err).NotTo(o.HaveOccurred())
66
+
67
+					g.By("expecting the db service is available")
68
+					serviceIP, err := oc.Run("get").Args("service", c.dbServiceName).Template("{{ .spec.clusterIP }}").Output()
69
+					o.Expect(err).NotTo(o.HaveOccurred())
70
+					o.Expect(serviceIP).ShouldNot(o.Equal(""))
71
+
72
+					g.By("expecting a db endpoint is available")
73
+					err = oc.KubeFramework().WaitForAnEndpoint(c.dbServiceName)
74
+					o.Expect(err).NotTo(o.HaveOccurred())
75
+				}
76
+
77
+				g.By("expecting the app service is available")
78
+				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
79
+				o.Expect(err).NotTo(o.HaveOccurred())
80
+				o.Expect(serviceIP).ShouldNot(o.Equal(""))
81
+
82
+				g.By("expecting an app endpoint is available")
83
+				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
84
+				o.Expect(err).NotTo(o.HaveOccurred())
85
+
86
+				g.By("verifying string from app request")
87
+				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second))
88
+				o.Expect(err).NotTo(o.HaveOccurred())
89
+				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
90
+			})
91
+		})
92
+	}
93
+}
94
+
95
+var _ = g.Describe("[image_ecosystem][Slow] openshift sample application repositories", func() {
96
+
97
+	g.Describe("[image_ecosystem][ruby] test ruby images with rails-ex db repo", NewSampleRepoTest(
98
+		SampleRepoConfig{
99
+			"rails-postgresql",
100
+			"https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json",
101
+			"rails-postgresql-example",
102
+			"rails-postgresql-example",
103
+			"rails-postgresql-example",
104
+			"Listing articles",
105
+			"/articles",
106
+			"postgresql",
107
+			"postgresql",
108
+		},
109
+	))
110
+
111
+	g.Describe("[image_ecosystem][python] test python images with django-ex db repo", NewSampleRepoTest(
112
+		SampleRepoConfig{
113
+			"django-psql",
114
+			"https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json",
115
+			"django-psql-example",
116
+			"django-psql-example",
117
+			"django-psql-example",
118
+			"Page views: 1",
119
+			"",
120
+			"postgresql",
121
+			"postgresql",
122
+		},
123
+	))
124
+
125
+	g.Describe("[image_ecosystem][nodejs] test nodejs images with nodejs-ex db repo", NewSampleRepoTest(
126
+		SampleRepoConfig{
127
+			"nodejs-mongodb",
128
+			"https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json",
129
+			"nodejs-mongodb-example",
130
+			"nodejs-mongodb-example",
131
+			"nodejs-mongodb-example",
132
+			"<span class=\"code\" id=\"count-value\">1</span>",
133
+			"",
134
+			"mongodb",
135
+			"mongodb",
136
+		},
137
+	))
138
+
139
+	var _ = g.Describe("[image_ecosystem][php] test php images with cakephp-ex db repo", NewSampleRepoTest(
140
+		SampleRepoConfig{
141
+			"cakephp-mysql",
142
+			"https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json",
143
+			"cakephp-mysql-example",
144
+			"cakephp-mysql-example",
145
+			"cakephp-mysql-example",
146
+			"<span class=\"code\" id=\"count-value\">1</span>",
147
+			"",
148
+			"mysql",
149
+			"mysql",
150
+		},
151
+	))
152
+
153
+	var _ = g.Describe("[image_ecosystem][perl] test perl images with dancer-ex db repo", NewSampleRepoTest(
154
+		SampleRepoConfig{
155
+			"dancer-mysql",
156
+			"https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json",
157
+			"dancer-mysql-example",
158
+			"dancer-mysql-example",
159
+			"dancer-mysql-example",
160
+			"<span class=\"code\" id=\"count-value\">1</span>",
161
+			"",
162
+			"database",
163
+			"database",
164
+		},
165
+	))
166
+
167
+	// test the no-db templates too
168
+	g.Describe("[image_ecosystem][python] test python images with django-ex repo", NewSampleRepoTest(
169
+		SampleRepoConfig{
170
+			"django",
171
+			"https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django.json",
172
+			"django-example",
173
+			"django-example",
174
+			"django-example",
175
+			"Welcome",
176
+			"",
177
+			"",
178
+			"",
179
+		},
180
+	))
181
+
182
+	g.Describe("[image_ecosystem][nodejs] images with nodejs-ex repo", NewSampleRepoTest(
183
+		SampleRepoConfig{
184
+			"nodejs",
185
+			"https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs.json",
186
+			"nodejs-example",
187
+			"nodejs-example",
188
+			"nodejs-example",
189
+			"Welcome",
190
+			"",
191
+			"",
192
+			"",
193
+		},
194
+	))
195
+
196
+	var _ = g.Describe("[image_ecosystem][php] test php images with cakephp-ex repo", NewSampleRepoTest(
197
+		SampleRepoConfig{
198
+			"cakephp",
199
+			"https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp.json",
200
+			"cakephp-example",
201
+			"cakephp-example",
202
+			"cakephp-example",
203
+			"Welcome",
204
+			"",
205
+			"",
206
+			"",
207
+		},
208
+	))
209
+
210
+	var _ = g.Describe("[image_ecosystem][perl] test perl images with dancer-ex repo", NewSampleRepoTest(
211
+		SampleRepoConfig{
212
+			"dancer",
213
+			"https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer.json",
214
+			"dancer-example",
215
+			"dancer-example",
216
+			"dancer-example",
217
+			"Welcome",
218
+			"",
219
+			"",
220
+			"",
221
+		},
222
+	))
223
+
224
+})
0 225
new file mode 100644
... ...
@@ -0,0 +1,76 @@
0
+package image_ecosystem
1
+
2
+import (
3
+	"fmt"
4
+
5
+	g "github.com/onsi/ginkgo"
6
+	o "github.com/onsi/gomega"
7
+
8
+	exutil "github.com/openshift/origin/test/extended/util"
9
+	kapi "k8s.io/kubernetes/pkg/api"
10
+)
11
+
12
+func getPodNameForTest(image string, t tc) string {
13
+	return fmt.Sprintf("%s-%s-%s", image, t.Version, t.BaseOS)
14
+}
15
+
16
+var _ = g.Describe("[image_ecosystem][Slow] openshift images should be SCL enabled", func() {
17
+	defer g.GinkgoRecover()
18
+	var oc = exutil.NewCLI("s2i-usage", exutil.KubeConfigPath())
19
+
20
+	g.JustBeforeEach(func() {
21
+		g.By("waiting for builder service account")
22
+		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
23
+		o.Expect(err).NotTo(o.HaveOccurred())
24
+	})
25
+
26
+	for image, tcs := range GetTestCaseForImages(AllImages) {
27
+		for _, t := range tcs {
28
+			g.Describe("returning s2i usage when running the image", func() {
29
+				g.It(fmt.Sprintf("%q should print the usage", t.DockerImageReference), func() {
30
+					g.By(fmt.Sprintf("creating a sample pod for %q", t.DockerImageReference))
31
+					pod := exutil.GetPodForContainer(kapi.Container{
32
+						Name:  "test",
33
+						Image: t.DockerImageReference,
34
+					})
35
+					oc.KubeFramework().TestContainerOutput(getPodNameForTest(image, t), pod, 0, []string{"Sample invocation"})
36
+				})
37
+			})
38
+
39
+			g.Describe("using the SCL in s2i images", func() {
40
+				g.It(fmt.Sprintf("%q should be SCL enabled", t.DockerImageReference), func() {
41
+					g.By(fmt.Sprintf("creating a sample pod for %q with /bin/bash -c command", t.DockerImageReference))
42
+					pod := exutil.GetPodForContainer(kapi.Container{
43
+						Image:   t.DockerImageReference,
44
+						Name:    "test",
45
+						Command: []string{"/bin/bash", "-c", t.Cmd},
46
+					})
47
+
48
+					oc.KubeFramework().TestContainerOutput(getPodNameForTest(image, t), pod, 0, []string{t.Expected})
49
+
50
+					g.By(fmt.Sprintf("creating a sample pod for %q", t.DockerImageReference))
51
+					pod = exutil.GetPodForContainer(kapi.Container{
52
+						Image:   t.DockerImageReference,
53
+						Name:    "test",
54
+						Command: []string{"/usr/bin/sleep", "infinity"},
55
+					})
56
+					_, err := oc.KubeREST().Pods(oc.Namespace()).Create(pod)
57
+					o.Expect(err).NotTo(o.HaveOccurred())
58
+
59
+					err = oc.KubeFramework().WaitForPodRunning(pod.Name)
60
+					o.Expect(err).NotTo(o.HaveOccurred())
61
+
62
+					g.By("calling the binary using 'oc exec /bin/bash -c'")
63
+					out, err := oc.Run("exec").Args("-p", pod.Name, "--", "/bin/bash", "-c", t.Cmd).Output()
64
+					o.Expect(err).NotTo(o.HaveOccurred())
65
+					o.Expect(out).Should(o.ContainSubstring(t.Expected))
66
+
67
+					g.By("calling the binary using 'oc exec /bin/sh -ic'")
68
+					out, err = oc.Run("exec").Args("-p", pod.Name, "--", "/bin/sh", "-ic", t.Cmd).Output()
69
+					o.Expect(err).NotTo(o.HaveOccurred())
70
+					o.Expect(out).Should(o.ContainSubstring(t.Expected))
71
+				})
72
+			})
73
+		}
74
+	}
75
+})
... ...
@@ -11,12 +11,9 @@ import (
11 11
 	"regexp"
12 12
 	"strconv"
13 13
 	"strings"
14
-	"time"
15 14
 
16 15
 	dockerclient "github.com/fsouza/go-dockerclient"
17 16
 
18
-	"k8s.io/kubernetes/pkg/labels"
19
-
20 17
 	"github.com/openshift/origin/pkg/client"
21 18
 	"github.com/openshift/origin/pkg/image/api"
22 19
 	exutil "github.com/openshift/origin/test/extended/util"
... ...
@@ -57,40 +54,6 @@ func GetImageLabels(c client.ImageStreamImageInterface, imageRepoName, imageRef
57 57
 	return image.Image.DockerImageMetadata.Config.Labels, nil
58 58
 }
59 59
 
60
-// RunInPodContainer will run provided command in the specified pod container.
61
-func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) error {
62
-	pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
63
-	if err != nil {
64
-		return err
65
-	}
66
-	if len(pods) != 1 {
67
-		return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector)
68
-	}
69
-
70
-	pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
71
-	if err != nil {
72
-		return err
73
-	}
74
-	args := []string{pod.Name, "-c", pod.Spec.Containers[0].Name, "--"}
75
-	args = append(args, cmd...)
76
-	return oc.Run("exec").Args(args...).Execute()
77
-}
78
-
79
-// CheckPageContains makes a http request for an example application and checks
80
-// that the result contains given string
81
-func CheckPageContains(oc *exutil.CLI, endpoint, path, contents string) (bool, error) {
82
-	address, err := exutil.GetEndpointAddress(oc, endpoint)
83
-	if err != nil {
84
-		return false, err
85
-	}
86
-
87
-	response, err := exutil.FetchURL(fmt.Sprintf("http://%s/%s", address, path), 3*time.Minute)
88
-	if err != nil {
89
-		return false, err
90
-	}
91
-	return strings.Contains(response, contents), nil
92
-}
93
-
94 60
 // BuildAndPushImageOfSizeWithBuilder tries to build an image of wanted size and number of layers. Built image
95 61
 // is stored as an image stream tag <name>:<tag>. If shouldSucceed is false, a build is expected to fail with
96 62
 // a denied error. Note the size is only approximate. Resulting image size will be different depending on used
97 63
deleted file mode 100644
... ...
@@ -1,41 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-
6
-	g "github.com/onsi/ginkgo"
7
-	o "github.com/onsi/gomega"
8
-
9
-	exutil "github.com/openshift/origin/test/extended/util"
10
-)
11
-
12
-var _ = g.Describe("[images][mariadb][Slow] openshift mariadb image", func() {
13
-	defer g.GinkgoRecover()
14
-	var (
15
-		templatePath = exutil.FixturePath("..", "..", "examples", "db-templates", "mariadb-ephemeral-template.json")
16
-		oc           = exutil.NewCLI("mariadb-create", exutil.KubeConfigPath())
17
-	)
18
-	g.Describe("Creating from a template", func() {
19
-		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
20
-			oc.SetOutputDir(exutil.TestContext.OutputDir)
21
-
22
-			g.By(fmt.Sprintf("calling oc process -f %q", templatePath))
23
-			configFile, err := oc.Run("process").Args("-f", templatePath).OutputToFile("config.json")
24
-			o.Expect(err).NotTo(o.HaveOccurred())
25
-
26
-			g.By(fmt.Sprintf("calling oc create -f %q", configFile))
27
-			err = oc.Run("create").Args("-f", configFile).Execute()
28
-			o.Expect(err).NotTo(o.HaveOccurred())
29
-
30
-			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
31
-			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
32
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mariadb", oc)
33
-			o.Expect(err).NotTo(o.HaveOccurred())
34
-
35
-			g.By("expecting the mariadb service get endpoints")
36
-			err = oc.KubeFramework().WaitForAnEndpoint("mariadb")
37
-			o.Expect(err).NotTo(o.HaveOccurred())
38
-		})
39
-	})
40
-
41
-})
42 1
deleted file mode 100644
... ...
@@ -1,62 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-
6
-	g "github.com/onsi/ginkgo"
7
-	o "github.com/onsi/gomega"
8
-
9
-	"time"
10
-
11
-	exutil "github.com/openshift/origin/test/extended/util"
12
-	"github.com/openshift/origin/test/extended/util/db"
13
-)
14
-
15
-var _ = g.Describe("[images][mongodb] openshift mongodb image", func() {
16
-	defer g.GinkgoRecover()
17
-
18
-	templatePath := exutil.FixturePath("..", "..", "examples", "db-templates", "mongodb-ephemeral-template.json")
19
-	oc := exutil.NewCLI("mongodb-create", exutil.KubeConfigPath()).Verbose()
20
-
21
-	g.Describe("creating from a template", func() {
22
-		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
23
-
24
-			exutil.CheckOpenShiftNamespaceImageStreams(oc)
25
-			g.By("creating a new app")
26
-			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())
27
-
28
-			g.By("waiting for the deployment to complete")
29
-			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb", oc)
30
-			o.Expect(err).ShouldNot(o.HaveOccurred())
31
-
32
-			g.By("expecting the mongodb pod is running")
33
-			podNames, err := exutil.WaitForPods(
34
-				oc.KubeREST().Pods(oc.Namespace()),
35
-				exutil.ParseLabelsOrDie("name=mongodb"),
36
-				exutil.CheckPodIsRunningFn,
37
-				1,
38
-				1*time.Minute,
39
-			)
40
-			o.Expect(err).ShouldNot(o.HaveOccurred())
41
-			o.Expect(podNames).Should(o.HaveLen(1))
42
-
43
-			g.By("expecting the mongodb service is answering for ping")
44
-			mongo := db.NewMongoDB(podNames[0])
45
-			ok, err := mongo.IsReady(oc)
46
-			o.Expect(err).ShouldNot(o.HaveOccurred())
47
-			o.Expect(ok).Should(o.BeTrue())
48
-
49
-			g.By("expecting that we can insert a new record")
50
-			result, err := mongo.Query(oc, `db.foo.save({ "status": "passed" })`)
51
-			o.Expect(err).ShouldNot(o.HaveOccurred())
52
-			o.Expect(result).Should(o.ContainSubstring(`WriteResult({ "nInserted" : 1 })`))
53
-
54
-			g.By("expecting that we can read a record")
55
-			findCmd := "printjson(db.foo.find({}, {_id: 0}).toArray())" // don't include _id field to output because it changes every time
56
-			result, err = mongo.Query(oc, findCmd)
57
-			o.Expect(err).ShouldNot(o.HaveOccurred())
58
-			o.Expect(result).Should(o.ContainSubstring(`{ "status" : "passed" }`))
59
-		})
60
-	})
61
-
62
-})
63 1
deleted file mode 100644
... ...
@@ -1,118 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"strconv"
6
-	"time"
7
-
8
-	g "github.com/onsi/ginkgo"
9
-	o "github.com/onsi/gomega"
10
-
11
-	exutil "github.com/openshift/origin/test/extended/util"
12
-	"github.com/openshift/origin/test/extended/util/db"
13
-)
14
-
15
-var _ = g.Describe("[images][mongodb] openshift mongodb replication", func() {
16
-	defer g.GinkgoRecover()
17
-
18
-	const (
19
-		templatePath         = "https://raw.githubusercontent.com/sclorg/mongodb-container/master/2.4/examples/replica/mongodb-clustered.json"
20
-		deploymentConfigName = "mongodb"
21
-		expectedValue        = `{ "status" : "passed" }`
22
-		insertCmd            = "db.bar.save(" + expectedValue + ")"
23
-	)
24
-
25
-	const (
26
-		expectedReplicasAfterDeployment = 3
27
-		expectedReplicasAfterScalingUp  = expectedReplicasAfterDeployment + 2
28
-	)
29
-
30
-	oc := exutil.NewCLI("mongodb-replica", exutil.KubeConfigPath()).Verbose()
31
-
32
-	g.Describe("creating from a template", func() {
33
-		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
34
-
35
-			exutil.CheckOpenShiftNamespaceImageStreams(oc)
36
-			g.By("creating a new app")
37
-			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())
38
-
39
-			g.By("waiting for the deployment to complete")
40
-			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc)
41
-			o.Expect(err).NotTo(o.HaveOccurred())
42
-
43
-			podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica")
44
-			mongo := db.NewMongoDB(podNames[0])
45
-
46
-			g.By(fmt.Sprintf("expecting that replica set have %d members", expectedReplicasAfterDeployment))
47
-			assertMembersInReplica(oc, mongo, expectedReplicasAfterDeployment)
48
-
49
-			g.By("expecting that we can insert a new record on primary node")
50
-			replicaSet := mongo.(exutil.ReplicaSet)
51
-			_, err = replicaSet.QueryPrimary(oc, insertCmd)
52
-			o.Expect(err).ShouldNot(o.HaveOccurred())
53
-
54
-			g.By("expecting that we can read a record from all members")
55
-			for _, podName := range podNames {
56
-				tryToReadFromPod(oc, podName, expectedValue)
57
-			}
58
-
59
-			g.By(fmt.Sprintf("scaling deployment config %s to %d replicas", deploymentConfigName, expectedReplicasAfterScalingUp))
60
-
61
-			err = oc.Run("scale").Args("dc", deploymentConfigName, "--replicas="+fmt.Sprint(expectedReplicasAfterScalingUp), "--timeout=30s").Execute()
62
-			o.Expect(err).NotTo(o.HaveOccurred())
63
-
64
-			podNames = waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterScalingUp, "mongodb-replica")
65
-			mongo = db.NewMongoDB(podNames[0])
66
-
67
-			g.By("expecting that scaling replica set up should have more members")
68
-			assertMembersInReplica(oc, mongo, expectedReplicasAfterScalingUp)
69
-		})
70
-	})
71
-
72
-})
73
-
74
-func tryToReadFromPod(oc *exutil.CLI, podName, expectedValue string) {
75
-	// don't include _id field to output because it changes every time
76
-	findCmd := "rs.slaveOk(); printjson(db.bar.find({}, {_id: 0}).toArray())"
77
-
78
-	fmt.Fprintf(g.GinkgoWriter, "DEBUG: reading record from pod %v\n", podName)
79
-
80
-	mongoPod := db.NewMongoDB(podName)
81
-	result, err := mongoPod.Query(oc, findCmd)
82
-	o.Expect(err).ShouldNot(o.HaveOccurred())
83
-	o.Expect(result).Should(o.ContainSubstring(expectedValue))
84
-}
85
-
86
-func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []string {
87
-	g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label))
88
-
89
-	podNames, err := exutil.WaitForPods(
90
-		oc.KubeREST().Pods(oc.Namespace()),
91
-		exutil.ParseLabelsOrDie("name="+label),
92
-		exutil.CheckPodIsRunningFn,
93
-		number,
94
-		1*time.Minute,
95
-	)
96
-	o.Expect(err).ShouldNot(o.HaveOccurred())
97
-	o.Expect(podNames).Should(o.HaveLen(number))
98
-
99
-	return podNames
100
-}
101
-
102
-func assertMembersInReplica(oc *exutil.CLI, db exutil.Database, expectedReplicas int) {
103
-	isMasterCmd := "printjson(db.isMaster())"
104
-	getReplicaHostsCmd := "print(db.isMaster().hosts.length)"
105
-
106
-	// pod is running but we need to wait when it will be really ready (became member of the replica)
107
-	err := exutil.WaitForQueryOutputSatisfies(oc, db, 1*time.Minute, false, isMasterCmd, func(commandOutput string) bool {
108
-		return commandOutput != ""
109
-	})
110
-	o.Expect(err).ShouldNot(o.HaveOccurred())
111
-
112
-	isMasterOutput, _ := db.Query(oc, isMasterCmd)
113
-	fmt.Fprintf(g.GinkgoWriter, "DEBUG: Output of the db.isMaster() command: %v\n", isMasterOutput)
114
-
115
-	members, err := db.Query(oc, getReplicaHostsCmd)
116
-	o.Expect(err).ShouldNot(o.HaveOccurred())
117
-	o.Expect(members).Should(o.Equal(strconv.Itoa(expectedReplicas)))
118
-}
119 1
deleted file mode 100644
... ...
@@ -1,41 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-
6
-	g "github.com/onsi/ginkgo"
7
-	o "github.com/onsi/gomega"
8
-
9
-	exutil "github.com/openshift/origin/test/extended/util"
10
-)
11
-
12
-var _ = g.Describe("[images][mysql][Slow] openshift mysql image", func() {
13
-	defer g.GinkgoRecover()
14
-	var (
15
-		templatePath = exutil.FixturePath("..", "..", "examples", "db-templates", "mysql-ephemeral-template.json")
16
-		oc           = exutil.NewCLI("mysql-create", exutil.KubeConfigPath())
17
-	)
18
-	g.Describe("Creating from a template", func() {
19
-		g.It(fmt.Sprintf("should process and create the %q template", templatePath), func() {
20
-			oc.SetOutputDir(exutil.TestContext.OutputDir)
21
-
22
-			g.By(fmt.Sprintf("calling oc process -f %q", templatePath))
23
-			configFile, err := oc.Run("process").Args("-f", templatePath).OutputToFile("config.json")
24
-			o.Expect(err).NotTo(o.HaveOccurred())
25
-
26
-			g.By(fmt.Sprintf("calling oc create -f %q", configFile))
27
-			err = oc.Run("create").Args("-f", configFile).Execute()
28
-			o.Expect(err).NotTo(o.HaveOccurred())
29
-
30
-			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
31
-			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
32
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mysql", oc)
33
-			o.Expect(err).NotTo(o.HaveOccurred())
34
-
35
-			g.By("expecting the mysql service get endpoints")
36
-			err = oc.KubeFramework().WaitForAnEndpoint("mysql")
37
-			o.Expect(err).NotTo(o.HaveOccurred())
38
-		})
39
-	})
40
-
41
-})
42 1
deleted file mode 100644
... ...
@@ -1,197 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"time"
6
-
7
-	g "github.com/onsi/ginkgo"
8
-	o "github.com/onsi/gomega"
9
-
10
-	templateapi "github.com/openshift/origin/pkg/template/api"
11
-	exutil "github.com/openshift/origin/test/extended/util"
12
-	"github.com/openshift/origin/test/extended/util/db"
13
-	testutil "github.com/openshift/origin/test/util"
14
-
15
-	kapi "k8s.io/kubernetes/pkg/api"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
17
-)
18
-
19
-type testCase struct {
20
-	Version         string
21
-	TemplatePath    string
22
-	SkipReplication bool
23
-}
24
-
25
-var (
26
-	testCases = []testCase{
27
-		{
28
-			"5.5",
29
-			"https://raw.githubusercontent.com/sclorg/mysql-container/master/5.5/examples/replica/mysql_replica.json",
30
-			// NOTE: Set to true in case of flakes.
31
-			false,
32
-		},
33
-		{
34
-			"5.6",
35
-			"https://raw.githubusercontent.com/sclorg/mysql-container/master/5.6/examples/replica/mysql_replica.json",
36
-			false,
37
-		},
38
-	}
39
-	helperTemplate = exutil.FixturePath("..", "..", "examples", "db-templates", "mysql-ephemeral-template.json")
40
-	helperName     = "mysql-helper"
41
-)
42
-
43
-// CreateMySQLReplicationHelpers creates a set of MySQL helpers for master,
44
-// slave and an extra helper that is used for remote login test.
45
-func CreateMySQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
46
-	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 1*time.Minute)
47
-	o.Expect(err).NotTo(o.HaveOccurred())
48
-	masterPod := podNames[0]
49
-
50
-	slavePods, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", slaveDeployment)), exutil.CheckPodIsRunningFn, slaveCount, 2*time.Minute)
51
-	o.Expect(err).NotTo(o.HaveOccurred())
52
-
53
-	// Create MySQL helper for master
54
-	master := db.NewMysql(masterPod, "")
55
-
56
-	// Create MySQL helpers for slaves
57
-	slaves := make([]exutil.Database, len(slavePods))
58
-	for i := range slavePods {
59
-		slave := db.NewMysql(slavePods[i], masterPod)
60
-		slaves[i] = slave
61
-	}
62
-
63
-	helperNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", helperDeployment)), exutil.CheckPodIsRunningFn, 1, 1*time.Minute)
64
-	o.Expect(err).NotTo(o.HaveOccurred())
65
-	helper := db.NewMysql(helperNames[0], masterPod)
66
-
67
-	return master, slaves, helper
68
-}
69
-
70
-func cleanup(oc *exutil.CLI) {
71
-	exutil.DumpImageStreams(oc)
72
-	oc.AsAdmin().Run("delete").Args("all", "--all", "-n", oc.Namespace()).Execute()
73
-	exutil.DumpImageStreams(oc)
74
-	oc.AsAdmin().Run("delete").Args("pvc", "--all", "-n", oc.Namespace()).Execute()
75
-	exutil.CleanupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace())
76
-}
77
-
78
-func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
79
-	return func() {
80
-		oc.SetOutputDir(exutil.TestContext.OutputDir)
81
-		defer cleanup(oc)
82
-
83
-		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
84
-		o.Expect(err).NotTo(o.HaveOccurred())
85
-
86
-		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
87
-		o.Expect(err).NotTo(o.HaveOccurred())
88
-
89
-		exutil.CheckOpenShiftNamespaceImageStreams(oc)
90
-		err = oc.Run("new-app").Args("-f", tc.TemplatePath).Execute()
91
-		o.Expect(err).NotTo(o.HaveOccurred())
92
-
93
-		err = oc.Run("new-app").Args("-f", helperTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", helperName)).Execute()
94
-		o.Expect(err).NotTo(o.HaveOccurred())
95
-
96
-		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
97
-		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
98
-		g.By("waiting for the deployment to complete")
99
-		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), helperName, oc)
100
-		o.Expect(err).NotTo(o.HaveOccurred())
101
-
102
-		g.By("waiting for an endpoint")
103
-		err = oc.KubeFramework().WaitForAnEndpoint(helperName)
104
-		o.Expect(err).NotTo(o.HaveOccurred())
105
-
106
-		tableCounter := 0
107
-		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
108
-			tableCounter++
109
-			table := fmt.Sprintf("table_%0.2d", tableCounter)
110
-
111
-			g.By("creating replication helpers")
112
-			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
113
-			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
114
-			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())
115
-
116
-			// Test if we can query as root
117
-			g.By("wait for mysql-master endpoint")
118
-			oc.KubeFramework().WaitForAnEndpoint("mysql-master")
119
-			err := helper.TestRemoteLogin(oc, "mysql-master")
120
-			o.Expect(err).NotTo(o.HaveOccurred())
121
-
122
-			// Create a new table with random name
123
-			g.By("create new table")
124
-			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
125
-			o.Expect(err).NotTo(o.HaveOccurred())
126
-
127
-			// Write new data to the table through master
128
-			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
129
-			o.Expect(err).NotTo(o.HaveOccurred())
130
-
131
-			// Make sure data is present on master
132
-			err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
133
-			o.Expect(err).NotTo(o.HaveOccurred())
134
-
135
-			// Make sure data was replicated to all slaves
136
-			for _, slave := range slaves {
137
-				err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false, fmt.Sprintf("SELECT * FROM %s\\G;", table), "col1: val1\ncol2: val2")
138
-				o.Expect(err).NotTo(o.HaveOccurred())
139
-			}
140
-
141
-			return master, slaves, helper
142
-		}
143
-
144
-		g.By("after initial deployment")
145
-		master, _, _ := assertReplicationIsWorking("mysql-master-1", "mysql-slave-1", 1)
146
-
147
-		if tc.SkipReplication {
148
-			return
149
-		}
150
-
151
-		g.By("after master is restarted by changing the Deployment Config")
152
-		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
153
-		o.Expect(err).NotTo(o.HaveOccurred())
154
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
155
-		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
156
-
157
-		g.By("after master is restarted by deleting the pod")
158
-		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
159
-		o.Expect(err).NotTo(o.HaveOccurred())
160
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
161
-		o.Expect(err).NotTo(o.HaveOccurred())
162
-		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
163
-
164
-		g.By("after slave is restarted by deleting the pod")
165
-		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
166
-		o.Expect(err).NotTo(o.HaveOccurred())
167
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
168
-		o.Expect(err).NotTo(o.HaveOccurred())
169
-		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
170
-
171
-		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
172
-		o.Expect(err).NotTo(o.HaveOccurred())
173
-		o.Expect(len(pods.Items)).To(o.Equal(1))
174
-
175
-		// NOTE: Commented out, current template does not support multiple replicas.
176
-		/*
177
-			g.By("after slave is scaled to 0 and then back to 4 replicas")
178
-			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
179
-			o.Expect(err).NotTo(o.HaveOccurred())
180
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
181
-			o.Expect(err).NotTo(o.HaveOccurred())
182
-			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
183
-			o.Expect(err).NotTo(o.HaveOccurred())
184
-			assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 4)
185
-		*/
186
-	}
187
-}
188
-
189
-var _ = g.Describe("[images][mysql][Slow] openshift mysql replication", func() {
190
-	defer g.GinkgoRecover()
191
-
192
-	ocs := make([]*exutil.CLI, len(testCases))
193
-	for i, tc := range testCases {
194
-		ocs[i] = exutil.NewCLI(fmt.Sprintf("mysql-replication-%d", i), exutil.KubeConfigPath())
195
-		g.It(fmt.Sprintf("MySQL replication template for %s: %s", tc.Version, tc.TemplatePath), replicationTestFactory(ocs[i], tc))
196
-	}
197
-})
198 1
deleted file mode 100644
... ...
@@ -1,183 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"time"
6
-
7
-	g "github.com/onsi/ginkgo"
8
-	o "github.com/onsi/gomega"
9
-
10
-	templateapi "github.com/openshift/origin/pkg/template/api"
11
-	exutil "github.com/openshift/origin/test/extended/util"
12
-	"github.com/openshift/origin/test/extended/util/db"
13
-	testutil "github.com/openshift/origin/test/util"
14
-
15
-	kapi "k8s.io/kubernetes/pkg/api"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
17
-)
18
-
19
-var (
20
-	postgreSQLReplicationTemplate = "https://raw.githubusercontent.com/sclorg/postgresql-container/master/examples/replica/postgresql_replica.json"
21
-	postgreSQLEphemeralTemplate   = exutil.FixturePath("..", "..", "examples", "db-templates", "postgresql-ephemeral-template.json")
22
-	postgreSQLHelperName          = "postgresql-helper"
23
-	postgreSQLImages              = []string{
24
-		"postgresql:9.2",
25
-		"postgresql:9.4",
26
-		"postgresql:9.5",
27
-	}
28
-)
29
-
30
-var _ = g.Describe("[LocalNode][images][postgresql][Slow] openshift postgresql replication", func() {
31
-	defer g.GinkgoRecover()
32
-
33
-	for i, image := range postgreSQLImages {
34
-		oc := exutil.NewCLI(fmt.Sprintf("postgresql-replication-%d", i), exutil.KubeConfigPath())
35
-		testFn := PostgreSQLReplicationTestFactory(oc, image)
36
-		g.It(fmt.Sprintf("postgresql replication works for %s", image), testFn)
37
-	}
38
-})
39
-
40
-// CreatePostgreSQLReplicationHelpers creates a set of PostgreSQL helpers for master,
41
-// slave an en extra helper that is used for remote login test.
42
-func CreatePostgreSQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
43
-	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
44
-	o.Expect(err).NotTo(o.HaveOccurred())
45
-	masterPod := podNames[0]
46
-
47
-	slavePods, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", slaveDeployment)), exutil.CheckPodIsRunningFn, slaveCount, 3*time.Minute)
48
-	o.Expect(err).NotTo(o.HaveOccurred())
49
-
50
-	// Create PostgreSQL helper for master
51
-	master := db.NewPostgreSQL(masterPod, "")
52
-
53
-	// Create PostgreSQL helpers for slaves
54
-	slaves := make([]exutil.Database, len(slavePods))
55
-	for i := range slavePods {
56
-		slave := db.NewPostgreSQL(slavePods[i], masterPod)
57
-		slaves[i] = slave
58
-	}
59
-
60
-	helperNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", helperDeployment)), exutil.CheckPodIsRunningFn, 1, 1*time.Minute)
61
-	o.Expect(err).NotTo(o.HaveOccurred())
62
-	helper := db.NewPostgreSQL(helperNames[0], masterPod)
63
-
64
-	return master, slaves, helper
65
-}
66
-
67
-func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() {
68
-	return func() {
69
-		oc.SetOutputDir(exutil.TestContext.OutputDir)
70
-		defer cleanup(oc)
71
-
72
-		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
73
-		o.Expect(err).NotTo(o.HaveOccurred())
74
-
75
-		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
76
-		o.Expect(err).NotTo(o.HaveOccurred())
77
-
78
-		exutil.CheckOpenShiftNamespaceImageStreams(oc)
79
-		err = oc.Run("new-app").Args("-f", postgreSQLReplicationTemplate, "-p", fmt.Sprintf("IMAGESTREAMTAG=%s", image)).Execute()
80
-		o.Expect(err).NotTo(o.HaveOccurred())
81
-
82
-		err = oc.Run("new-app").Args("-f", postgreSQLEphemeralTemplate, "-p", fmt.Sprintf("DATABASE_SERVICE_NAME=%s", postgreSQLHelperName)).Execute()
83
-		o.Expect(err).NotTo(o.HaveOccurred())
84
-
85
-		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
86
-		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
87
-		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), postgreSQLHelperName, oc)
88
-		o.Expect(err).NotTo(o.HaveOccurred())
89
-
90
-		err = oc.KubeFramework().WaitForAnEndpoint(postgreSQLHelperName)
91
-		o.Expect(err).NotTo(o.HaveOccurred())
92
-
93
-		tableCounter := 0
94
-		assertReplicationIsWorking := func(masterDeployment, slaveDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
95
-			check := func(err error) {
96
-				if err != nil {
97
-					exutil.DumpDeploymentLogs("postgresql-master", oc)
98
-					exutil.DumpDeploymentLogs("postgresql-slave", oc)
99
-				}
100
-				o.Expect(err).NotTo(o.HaveOccurred())
101
-			}
102
-
103
-			tableCounter++
104
-			table := fmt.Sprintf("table_%0.2d", tableCounter)
105
-
106
-			master, slaves, helper := CreatePostgreSQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", postgreSQLHelperName), slaveCount)
107
-			err := exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})
108
-			if err != nil {
109
-				exutil.DumpDeploymentLogs("postgresql-master", oc)
110
-				exutil.DumpDeploymentLogs("postgresql-helper", oc)
111
-			}
112
-			o.Expect(err).NotTo(o.HaveOccurred())
113
-
114
-			err = exutil.WaitUntilAllHelpersAreUp(oc, slaves)
115
-			check(err)
116
-
117
-			// Test if we can query as admin
118
-			oc.KubeFramework().WaitForAnEndpoint("postgresql-master")
119
-			err = helper.TestRemoteLogin(oc, "postgresql-master")
120
-			check(err)
121
-
122
-			// Create a new table with random name
123
-			_, err = master.Query(oc, fmt.Sprintf("CREATE TABLE %s (col1 VARCHAR(20), col2 VARCHAR(20));", table))
124
-			check(err)
125
-
126
-			// Write new data to the table through master
127
-			_, err = master.Query(oc, fmt.Sprintf("INSERT INTO %s (col1, col2) VALUES ('val1', 'val2');", table))
128
-			check(err)
129
-
130
-			// Make sure data is present on master
131
-			err = exutil.WaitForQueryOutputContains(oc, master, 10*time.Second, false,
132
-				fmt.Sprintf("SELECT * FROM %s;", table),
133
-				"col1 | val1\ncol2 | val2")
134
-			check(err)
135
-
136
-			// Make sure data was replicated to all slaves
137
-			for _, slave := range slaves {
138
-				err = exutil.WaitForQueryOutputContains(oc, slave, 90*time.Second, false,
139
-					fmt.Sprintf("SELECT * FROM %s;", table),
140
-					"col1 | val1\ncol2 | val2")
141
-				check(err)
142
-			}
143
-
144
-			return master, slaves, helper
145
-		}
146
-
147
-		g.By("after initial deployment")
148
-		master, _, _ := assertReplicationIsWorking("postgresql-master-1", "postgresql-slave-1", 1)
149
-
150
-		g.By("after master is restarted by changing the Deployment Config")
151
-		err = oc.Run("env").Args("dc", "postgresql-master", "POSTGRESQL_ADMIN_PASSWORD=newpass").Execute()
152
-		o.Expect(err).NotTo(o.HaveOccurred())
153
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
154
-		master, _, _ = assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
155
-
156
-		g.By("after master is restarted by deleting the pod")
157
-		err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-master-2").Execute()
158
-		o.Expect(err).NotTo(o.HaveOccurred())
159
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
160
-		o.Expect(err).NotTo(o.HaveOccurred())
161
-		_, slaves, _ := assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
162
-
163
-		g.By("after slave is restarted by deleting the pod")
164
-		err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-slave-1").Execute()
165
-		o.Expect(err).NotTo(o.HaveOccurred())
166
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
167
-		o.Expect(err).NotTo(o.HaveOccurred())
168
-		assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
169
-
170
-		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=postgresql-slave-1")})
171
-		o.Expect(err).NotTo(o.HaveOccurred())
172
-		o.Expect(len(pods.Items)).To(o.Equal(1))
173
-
174
-		g.By("after slave is scaled to 0 and then back to 4 replicas")
175
-		err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=0").Execute()
176
-		o.Expect(err).NotTo(o.HaveOccurred())
177
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
178
-		o.Expect(err).NotTo(o.HaveOccurred())
179
-		err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=4").Execute()
180
-		o.Expect(err).NotTo(o.HaveOccurred())
181
-		assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 4)
182
-	}
183
-}
184 1
deleted file mode 100644
... ...
@@ -1,131 +0,0 @@
1
-package images
2
-
3
-import "fmt"
4
-
5
-type ImageBaseType string
6
-
7
-const (
8
-	RHELBased   ImageBaseType = "rhel7"
9
-	CentosBased ImageBaseType = "centos7"
10
-	AllImages   ImageBaseType = "all"
11
-)
12
-
13
-type tc struct {
14
-	// The image version string (eg. '27' or '34')
15
-	Version string
16
-	// The base OS ('rhel7' or 'centos7')
17
-	BaseOS ImageBaseType
18
-	// Command to execute
19
-	Cmd string
20
-	// Expected output from the command
21
-	Expected string
22
-
23
-	// Repository is either openshift/ or rhcsl/
24
-	// The default is 'openshift'
25
-	Repository string
26
-
27
-	// Internal: We resolve this in JustBeforeEach
28
-	DockerImageReference string
29
-}
30
-
31
-// Internal OpenShift registry to fetch the RHEL7 images from
32
-const InternalRegistryAddr = "ci.dev.openshift.redhat.com:5000"
33
-
34
-// This is a complete list of supported S2I images
35
-var s2iImages = map[string][]tc{
36
-	"ruby": {
37
-		{
38
-			Version:  "20",
39
-			Cmd:      "ruby --version",
40
-			Expected: "ruby 2.0.0",
41
-		},
42
-		{
43
-			Version:  "22",
44
-			Cmd:      "ruby --version",
45
-			Expected: "ruby 2.2.2",
46
-		},
47
-	},
48
-	"python": {
49
-		{
50
-			Version:  "27",
51
-			Cmd:      "python --version",
52
-			Expected: "Python 2.7.8",
53
-		},
54
-		{
55
-			Version:  "33",
56
-			Cmd:      "python --version",
57
-			Expected: "Python 3.3.2",
58
-		},
59
-	},
60
-	"nodejs": {
61
-		{
62
-			Version:  "010",
63
-			Cmd:      "node --version",
64
-			Expected: "v0.10",
65
-		},
66
-	},
67
-	"perl": {
68
-		{
69
-			Version:  "516",
70
-			Cmd:      "perl --version",
71
-			Expected: "v5.16.3",
72
-		},
73
-		{
74
-			Version:  "520",
75
-			Cmd:      "perl --version",
76
-			Expected: "v5.20.1",
77
-		},
78
-	},
79
-	"php": {
80
-		{
81
-			Version:  "55",
82
-			Cmd:      "php --version",
83
-			Expected: "5.5",
84
-		},
85
-		{
86
-			Version:  "56",
87
-			Cmd:      "php --version",
88
-			Expected: "5.6",
89
-		},
90
-	},
91
-}
92
-
93
-func GetTestCaseForImages(base ImageBaseType) map[string][]tc {
94
-	if base == AllImages {
95
-		result := GetTestCaseForImages(RHELBased)
96
-		for n, t := range GetTestCaseForImages(CentosBased) {
97
-			result[n] = append(result[n], t...)
98
-		}
99
-		return result
100
-	}
101
-	result := make(map[string][]tc)
102
-	for name, variants := range s2iImages {
103
-		switch base {
104
-		case RHELBased:
105
-			for i := range variants {
106
-				variants[i].BaseOS = RHELBased
107
-				resolveDockerImageReference(name, &variants[i])
108
-				result[name] = append(result[name], variants[i])
109
-			}
110
-		case CentosBased:
111
-			for i := range variants {
112
-				variants[i].BaseOS = CentosBased
113
-				resolveDockerImageReference(name, &variants[i])
114
-				result[name] = append(result[name], variants[i])
115
-
116
-			}
117
-		}
118
-	}
119
-	return result
120
-}
121
-
122
-// resolveDockerImageReferences resolves the pull specs for all images
123
-func resolveDockerImageReference(name string, t *tc) {
124
-	if len(t.Repository) == 0 {
125
-		t.Repository = "openshift"
126
-	}
127
-	t.DockerImageReference = fmt.Sprintf("%s/%s-%s-%s", t.Repository, name, t.Version, t.BaseOS)
128
-	if t.BaseOS == RHELBased {
129
-		t.DockerImageReference = fmt.Sprintf("%s/%s", InternalRegistryAddr, t.DockerImageReference)
130
-	}
131
-}
132 1
deleted file mode 100644
... ...
@@ -1,87 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"time"
6
-
7
-	g "github.com/onsi/ginkgo"
8
-	o "github.com/onsi/gomega"
9
-
10
-	kapi "k8s.io/kubernetes/pkg/api"
11
-
12
-	exutil "github.com/openshift/origin/test/extended/util"
13
-)
14
-
15
-var _ = g.Describe("[images][perl][Slow] hot deploy for openshift perl image", func() {
16
-	defer g.GinkgoRecover()
17
-	var (
18
-		dancerTemplate = "https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json"
19
-		oc             = exutil.NewCLI("s2i-perl", exutil.KubeConfigPath())
20
-		modifyCommand  = []string{"sed", "-ie", `s/data => \$data\[0\]/data => "1337"/`, "lib/default.pm"}
21
-		pageCountFn    = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) }
22
-		dcName         = "dancer-mysql-example-1"
23
-		dcLabel        = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
24
-	)
25
-
26
-	g.Describe("Dancer example", func() {
27
-		g.It(fmt.Sprintf("should work with hot deploy"), func() {
28
-			oc.SetOutputDir(exutil.TestContext.OutputDir)
29
-
30
-			exutil.CheckOpenShiftNamespaceImageStreams(oc)
31
-			g.By(fmt.Sprintf("calling oc new-app -f %q", dancerTemplate))
32
-			err := oc.Run("new-app").Args("-f", dancerTemplate).Execute()
33
-			o.Expect(err).NotTo(o.HaveOccurred())
34
-
35
-			g.By("waiting for build to finish")
36
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
37
-			if err != nil {
38
-				exutil.DumpBuildLogs("dancer-mysql-example", oc)
39
-			}
40
-			o.Expect(err).NotTo(o.HaveOccurred())
41
-
42
-			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
43
-			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
44
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "dancer-mysql-example", oc)
45
-			o.Expect(err).NotTo(o.HaveOccurred())
46
-
47
-			g.By("waiting for endpoint")
48
-			err = oc.KubeFramework().WaitForAnEndpoint("dancer-mysql-example")
49
-			o.Expect(err).NotTo(o.HaveOccurred())
50
-
51
-			assertPageCountIs := func(i int) {
52
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
53
-				o.Expect(err).NotTo(o.HaveOccurred())
54
-
55
-				result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFn(i))
56
-				o.Expect(err).NotTo(o.HaveOccurred())
57
-				o.Expect(result).To(o.BeTrue())
58
-			}
59
-
60
-			g.By("checking page count")
61
-			assertPageCountIs(1)
62
-			assertPageCountIs(2)
63
-
64
-			g.By("modifying the source code with disabled hot deploy")
65
-			RunInPodContainer(oc, dcLabel, modifyCommand)
66
-			assertPageCountIs(3)
67
-
68
-			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
69
-			o.Expect(err).NotTo(o.HaveOccurred())
70
-			o.Expect(len(pods.Items)).To(o.Equal(1))
71
-
72
-			g.By("turning on hot-deploy")
73
-			err = oc.Run("env").Args("rc", dcName, "PERL_APACHE2_RELOAD=true").Execute()
74
-			o.Expect(err).NotTo(o.HaveOccurred())
75
-			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
76
-			o.Expect(err).NotTo(o.HaveOccurred())
77
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
78
-			o.Expect(err).NotTo(o.HaveOccurred())
79
-			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
80
-			o.Expect(err).NotTo(o.HaveOccurred())
81
-
82
-			g.By("modifying the source code with enabled hot deploy")
83
-			RunInPodContainer(oc, dcLabel, modifyCommand)
84
-			assertPageCountIs(1337)
85
-		})
86
-	})
87
-})
88 1
deleted file mode 100644
... ...
@@ -1,69 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"time"
6
-
7
-	g "github.com/onsi/ginkgo"
8
-	o "github.com/onsi/gomega"
9
-
10
-	exutil "github.com/openshift/origin/test/extended/util"
11
-)
12
-
13
-var _ = g.Describe("[images][php][Slow] hot deploy for openshift php image", func() {
14
-	defer g.GinkgoRecover()
15
-	var (
16
-		cakephpTemplate = "https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json"
17
-		oc              = exutil.NewCLI("s2i-php", exutil.KubeConfigPath())
18
-		hotDeployParam  = "OPCACHE_REVALIDATE_FREQ=0"
19
-		modifyCommand   = []string{"sed", "-ie", `s/\$result\['c'\]/1337/`, "app/View/Layouts/default.ctp"}
20
-		pageCountFn     = func(count int) string { return fmt.Sprintf(`<span class="code" id="count-value">%d</span>`, count) }
21
-		dcName          = "cakephp-mysql-example-1"
22
-		dcLabel         = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
23
-	)
24
-	g.Describe("CakePHP example", func() {
25
-		g.It(fmt.Sprintf("should work with hot deploy"), func() {
26
-			oc.SetOutputDir(exutil.TestContext.OutputDir)
27
-
28
-			exutil.CheckOpenShiftNamespaceImageStreams(oc)
29
-			g.By(fmt.Sprintf("calling oc new-app -f %q -p %q", cakephpTemplate, hotDeployParam))
30
-			err := oc.Run("new-app").Args("-f", cakephpTemplate, "-p", hotDeployParam).Execute()
31
-			o.Expect(err).NotTo(o.HaveOccurred())
32
-
33
-			g.By("waiting for build to finish")
34
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
35
-			if err != nil {
36
-				exutil.DumpBuildLogs("cakephp-mysql-example", oc)
37
-			}
38
-			o.Expect(err).NotTo(o.HaveOccurred())
39
-
40
-			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
41
-			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
42
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "cakephp-mysql-example", oc)
43
-			o.Expect(err).NotTo(o.HaveOccurred())
44
-
45
-			g.By("waiting for endpoint")
46
-			err = oc.KubeFramework().WaitForAnEndpoint("cakephp-mysql-example")
47
-			o.Expect(err).NotTo(o.HaveOccurred())
48
-
49
-			assertPageCountIs := func(i int) {
50
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
51
-				o.Expect(err).NotTo(o.HaveOccurred())
52
-
53
-				result, err := CheckPageContains(oc, "cakephp-mysql-example", "", pageCountFn(i))
54
-				o.Expect(err).NotTo(o.HaveOccurred())
55
-				o.Expect(result).To(o.BeTrue())
56
-			}
57
-
58
-			g.By("checking page count")
59
-
60
-			assertPageCountIs(1)
61
-			assertPageCountIs(2)
62
-
63
-			g.By("modifying the source code with disabled hot deploy")
64
-			RunInPodContainer(oc, dcLabel, modifyCommand)
65
-			g.By("checking page count after modifying the source code")
66
-			assertPageCountIs(1337)
67
-		})
68
-	})
69
-})
70 1
deleted file mode 100644
... ...
@@ -1,88 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"time"
6
-
7
-	g "github.com/onsi/ginkgo"
8
-	o "github.com/onsi/gomega"
9
-
10
-	kapi "k8s.io/kubernetes/pkg/api"
11
-
12
-	exutil "github.com/openshift/origin/test/extended/util"
13
-)
14
-
15
-var _ = g.Describe("[images][python][Slow] hot deploy for openshift python image", func() {
16
-	defer g.GinkgoRecover()
17
-
18
-	var (
19
-		oc               = exutil.NewCLI("s2i-python", exutil.KubeConfigPath())
20
-		djangoRepository = "https://github.com/openshift/django-ex.git"
21
-		modifyCommand    = []string{"sed", "-ie", `s/'count': PageView.objects.count()/'count': 1337/`, "welcome/views.py"}
22
-		pageCountFn      = func(count int) string { return fmt.Sprintf("Page views: %d", count) }
23
-		dcName           = "django-ex-1"
24
-		dcLabel          = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
25
-	)
26
-	g.Describe("Django example", func() {
27
-		g.It(fmt.Sprintf("should work with hot deploy"), func() {
28
-			oc.SetOutputDir(exutil.TestContext.OutputDir)
29
-
30
-			err := exutil.WaitForOpenShiftNamespaceImageStreams(oc)
31
-			o.Expect(err).NotTo(o.HaveOccurred())
32
-			g.By(fmt.Sprintf("calling oc new-app %s", djangoRepository))
33
-			err = oc.Run("new-app").Args(djangoRepository, "--strategy=source").Execute()
34
-			o.Expect(err).NotTo(o.HaveOccurred())
35
-
36
-			g.By("waiting for build to finish")
37
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "django-ex-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
38
-			if err != nil {
39
-				exutil.DumpBuildLogs("django-ex", oc)
40
-			}
41
-			o.Expect(err).NotTo(o.HaveOccurred())
42
-
43
-			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
44
-			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
45
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "django-ex", oc)
46
-			o.Expect(err).NotTo(o.HaveOccurred())
47
-
48
-			g.By("waiting for endpoint")
49
-			err = oc.KubeFramework().WaitForAnEndpoint("django-ex")
50
-			o.Expect(err).NotTo(o.HaveOccurred())
51
-
52
-			assertPageCountIs := func(i int) {
53
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
54
-				o.Expect(err).NotTo(o.HaveOccurred())
55
-
56
-				result, err := CheckPageContains(oc, "django-ex", "", pageCountFn(i))
57
-				o.Expect(err).NotTo(o.HaveOccurred())
58
-				o.Expect(result).To(o.BeTrue())
59
-			}
60
-
61
-			g.By("checking page count")
62
-			assertPageCountIs(1)
63
-			assertPageCountIs(2)
64
-
65
-			g.By("modifying the source code with disabled hot deploy")
66
-			RunInPodContainer(oc, dcLabel, modifyCommand)
67
-			assertPageCountIs(3)
68
-
69
-			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
70
-			o.Expect(err).NotTo(o.HaveOccurred())
71
-			o.Expect(len(pods.Items)).To(o.Equal(1))
72
-
73
-			g.By("turning on hot-deploy")
74
-			err = oc.Run("env").Args("rc", dcName, "APP_CONFIG=conf/reload.py").Execute()
75
-			o.Expect(err).NotTo(o.HaveOccurred())
76
-			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
77
-			o.Expect(err).NotTo(o.HaveOccurred())
78
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
79
-			o.Expect(err).NotTo(o.HaveOccurred())
80
-			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
81
-			o.Expect(err).NotTo(o.HaveOccurred())
82
-
83
-			g.By("modifying the source code with enabled hot deploy")
84
-			RunInPodContainer(oc, dcLabel, modifyCommand)
85
-			assertPageCountIs(1337)
86
-		})
87
-	})
88
-})
89 1
deleted file mode 100644
... ...
@@ -1,87 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"time"
6
-
7
-	g "github.com/onsi/ginkgo"
8
-	o "github.com/onsi/gomega"
9
-
10
-	kapi "k8s.io/kubernetes/pkg/api"
11
-
12
-	exutil "github.com/openshift/origin/test/extended/util"
13
-)
14
-
15
-var _ = g.Describe("[images][ruby][Slow] hot deploy for openshift ruby image", func() {
16
-	defer g.GinkgoRecover()
17
-	var (
18
-		railsTemplate = "https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json"
19
-		oc            = exutil.NewCLI("s2i-ruby", exutil.KubeConfigPath())
20
-		modifyCommand = []string{"sed", "-ie", `s%render :file => 'public/index.html'%%`, "app/controllers/welcome_controller.rb"}
21
-		removeCommand = []string{"rm", "-f", "public/index.html"}
22
-		dcName        = "rails-postgresql-example-1"
23
-		dcLabel       = exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", dcName))
24
-	)
25
-	g.Describe("Rails example", func() {
26
-		g.It(fmt.Sprintf("should work with hot deploy"), func() {
27
-			oc.SetOutputDir(exutil.TestContext.OutputDir)
28
-
29
-			exutil.CheckOpenShiftNamespaceImageStreams(oc)
30
-			g.By(fmt.Sprintf("calling oc new-app -f %q", railsTemplate))
31
-			err := oc.Run("new-app").Args("-f", railsTemplate).Execute()
32
-			o.Expect(err).NotTo(o.HaveOccurred())
33
-
34
-			g.By("waiting for build to finish")
35
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
36
-			if err != nil {
37
-				exutil.DumpBuildLogs("rails-postgresql-example", oc)
38
-			}
39
-			o.Expect(err).NotTo(o.HaveOccurred())
40
-
41
-			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
42
-			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
43
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "rails-postgresql-example", oc)
44
-			o.Expect(err).NotTo(o.HaveOccurred())
45
-
46
-			g.By("waiting for endpoint")
47
-			err = oc.KubeFramework().WaitForAnEndpoint("rails-postgresql-example")
48
-			o.Expect(err).NotTo(o.HaveOccurred())
49
-
50
-			assertPageContent := func(content string) {
51
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
52
-				o.Expect(err).NotTo(o.HaveOccurred())
53
-
54
-				result, err := CheckPageContains(oc, "rails-postgresql-example", "", content)
55
-				o.Expect(err).NotTo(o.HaveOccurred())
56
-				o.Expect(result).To(o.BeTrue())
57
-			}
58
-
59
-			g.By("testing application content")
60
-			assertPageContent("Welcome to your Rails application on OpenShift")
61
-			g.By("modifying the source code with disabled hot deploy")
62
-			RunInPodContainer(oc, dcLabel, modifyCommand)
63
-			RunInPodContainer(oc, dcLabel, removeCommand)
64
-			g.By("testing application content source modification")
65
-			assertPageContent("Welcome to your Rails application on OpenShift")
66
-
67
-			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
68
-			o.Expect(err).NotTo(o.HaveOccurred())
69
-			o.Expect(len(pods.Items)).To(o.Equal(1))
70
-
71
-			g.By("turning on hot-deploy")
72
-			err = oc.Run("env").Args("rc", dcName, "RAILS_ENV=development").Execute()
73
-			o.Expect(err).NotTo(o.HaveOccurred())
74
-			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
75
-			o.Expect(err).NotTo(o.HaveOccurred())
76
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
77
-			o.Expect(err).NotTo(o.HaveOccurred())
78
-			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
79
-			o.Expect(err).NotTo(o.HaveOccurred())
80
-
81
-			g.By("modifying the source code with enabled hot deploy")
82
-			RunInPodContainer(oc, dcLabel, modifyCommand)
83
-			RunInPodContainer(oc, dcLabel, removeCommand)
84
-			assertPageContent("Hello, Rails!")
85
-		})
86
-	})
87
-})
88 1
deleted file mode 100644
... ...
@@ -1,225 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-	"time"
6
-
7
-	g "github.com/onsi/ginkgo"
8
-	o "github.com/onsi/gomega"
9
-
10
-	exutil "github.com/openshift/origin/test/extended/util"
11
-)
12
-
13
-type SampleRepoConfig struct {
14
-	repoName               string
15
-	templateURL            string
16
-	buildConfigName        string
17
-	serviceName            string
18
-	deploymentConfigName   string
19
-	expectedString         string
20
-	appPath                string
21
-	dbDeploymentConfigName string
22
-	dbServiceName          string
23
-}
24
-
25
-// NewSampleRepoTest creates a function for a new ginkgo test case that will instantiate a template
26
-// from a url, kick off the buildconfig defined in that template, wait for the build/deploy,
27
-// and then confirm the application is serving an expected string value.
28
-func NewSampleRepoTest(c SampleRepoConfig) func() {
29
-	return func() {
30
-		defer g.GinkgoRecover()
31
-		var oc = exutil.NewCLI(c.repoName+"-repo-test", exutil.KubeConfigPath())
32
-
33
-		g.JustBeforeEach(func() {
34
-			g.By("Waiting for builder service account")
35
-			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
36
-			o.Expect(err).NotTo(o.HaveOccurred())
37
-		})
38
-
39
-		g.Describe("Building "+c.repoName+" app from new-app", func() {
40
-			g.It(fmt.Sprintf("should build a "+c.repoName+" image and run it in a pod"), func() {
41
-				oc.SetOutputDir(exutil.TestContext.OutputDir)
42
-
43
-				err := exutil.WaitForOpenShiftNamespaceImageStreams(oc)
44
-				o.Expect(err).NotTo(o.HaveOccurred())
45
-				g.By(fmt.Sprintf("calling oc new-app with the " + c.repoName + " example template"))
46
-				err = oc.Run("new-app").Args("-f", c.templateURL).Execute()
47
-				o.Expect(err).NotTo(o.HaveOccurred())
48
-
49
-				// all the templates automatically start a build.
50
-				buildName := c.buildConfigName + "-1"
51
-
52
-				g.By("expecting the build is in the Complete phase")
53
-				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
54
-				if err != nil {
55
-					exutil.DumpBuildLogs(c.buildConfigName, oc)
56
-				}
57
-				o.Expect(err).NotTo(o.HaveOccurred())
58
-
59
-				g.By("expecting the app deployment to be complete")
60
-				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
61
-				o.Expect(err).NotTo(o.HaveOccurred())
62
-
63
-				if len(c.dbDeploymentConfigName) > 0 {
64
-					g.By("expecting the db deployment to be complete")
65
-					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
66
-					o.Expect(err).NotTo(o.HaveOccurred())
67
-
68
-					g.By("expecting the db service is available")
69
-					serviceIP, err := oc.Run("get").Args("service", c.dbServiceName).Template("{{ .spec.clusterIP }}").Output()
70
-					o.Expect(err).NotTo(o.HaveOccurred())
71
-					o.Expect(serviceIP).ShouldNot(o.Equal(""))
72
-
73
-					g.By("expecting a db endpoint is available")
74
-					err = oc.KubeFramework().WaitForAnEndpoint(c.dbServiceName)
75
-					o.Expect(err).NotTo(o.HaveOccurred())
76
-				}
77
-
78
-				g.By("expecting the app service is available")
79
-				serviceIP, err := oc.Run("get").Args("service", c.serviceName).Template("{{ .spec.clusterIP }}").Output()
80
-				o.Expect(err).NotTo(o.HaveOccurred())
81
-				o.Expect(serviceIP).ShouldNot(o.Equal(""))
82
-
83
-				g.By("expecting an app endpoint is available")
84
-				err = oc.KubeFramework().WaitForAnEndpoint(c.serviceName)
85
-				o.Expect(err).NotTo(o.HaveOccurred())
86
-
87
-				g.By("verifying string from app request")
88
-				response, err := exutil.FetchURL("http://"+serviceIP+":8080"+c.appPath, time.Duration(30*time.Second))
89
-				o.Expect(err).NotTo(o.HaveOccurred())
90
-				o.Expect(response).Should(o.ContainSubstring(c.expectedString))
91
-			})
92
-		})
93
-	}
94
-}
95
-
96
-var _ = g.Describe("[images][Slow] openshift sample application repositories", func() {
97
-
98
-	g.Describe("[images][ruby] test ruby images with rails-ex db repo", NewSampleRepoTest(
99
-		SampleRepoConfig{
100
-			"rails-postgresql",
101
-			"https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json",
102
-			"rails-postgresql-example",
103
-			"rails-postgresql-example",
104
-			"rails-postgresql-example",
105
-			"Listing articles",
106
-			"/articles",
107
-			"postgresql",
108
-			"postgresql",
109
-		},
110
-	))
111
-
112
-	g.Describe("[images][python] test python images with django-ex db repo", NewSampleRepoTest(
113
-		SampleRepoConfig{
114
-			"django-psql",
115
-			"https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json",
116
-			"django-psql-example",
117
-			"django-psql-example",
118
-			"django-psql-example",
119
-			"Page views: 1",
120
-			"",
121
-			"postgresql",
122
-			"postgresql",
123
-		},
124
-	))
125
-
126
-	g.Describe("[images][nodejs] test nodejs images with nodejs-ex db repo", NewSampleRepoTest(
127
-		SampleRepoConfig{
128
-			"nodejs-mongodb",
129
-			"https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json",
130
-			"nodejs-mongodb-example",
131
-			"nodejs-mongodb-example",
132
-			"nodejs-mongodb-example",
133
-			"<span class=\"code\" id=\"count-value\">1</span>",
134
-			"",
135
-			"mongodb",
136
-			"mongodb",
137
-		},
138
-	))
139
-
140
-	var _ = g.Describe("[images][php] test php images with cakephp-ex db repo", NewSampleRepoTest(
141
-		SampleRepoConfig{
142
-			"cakephp-mysql",
143
-			"https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json",
144
-			"cakephp-mysql-example",
145
-			"cakephp-mysql-example",
146
-			"cakephp-mysql-example",
147
-			"<span class=\"code\" id=\"count-value\">1</span>",
148
-			"",
149
-			"mysql",
150
-			"mysql",
151
-		},
152
-	))
153
-
154
-	var _ = g.Describe("[images][perl] test perl images with dancer-ex db repo", NewSampleRepoTest(
155
-		SampleRepoConfig{
156
-			"dancer-mysql",
157
-			"https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json",
158
-			"dancer-mysql-example",
159
-			"dancer-mysql-example",
160
-			"dancer-mysql-example",
161
-			"<span class=\"code\" id=\"count-value\">1</span>",
162
-			"",
163
-			"database",
164
-			"database",
165
-		},
166
-	))
167
-
168
-	// test the no-db templates too
169
-	g.Describe("[images][python] test python images with django-ex repo", NewSampleRepoTest(
170
-		SampleRepoConfig{
171
-			"django",
172
-			"https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django.json",
173
-			"django-example",
174
-			"django-example",
175
-			"django-example",
176
-			"Welcome",
177
-			"",
178
-			"",
179
-			"",
180
-		},
181
-	))
182
-
183
-	g.Describe("[images][nodejs] images with nodejs-ex repo", NewSampleRepoTest(
184
-		SampleRepoConfig{
185
-			"nodejs",
186
-			"https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs.json",
187
-			"nodejs-example",
188
-			"nodejs-example",
189
-			"nodejs-example",
190
-			"Welcome",
191
-			"",
192
-			"",
193
-			"",
194
-		},
195
-	))
196
-
197
-	var _ = g.Describe("[images][php] test php images with cakephp-ex repo", NewSampleRepoTest(
198
-		SampleRepoConfig{
199
-			"cakephp",
200
-			"https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp.json",
201
-			"cakephp-example",
202
-			"cakephp-example",
203
-			"cakephp-example",
204
-			"Welcome",
205
-			"",
206
-			"",
207
-			"",
208
-		},
209
-	))
210
-
211
-	var _ = g.Describe("[images][perl] test perl images with dancer-ex repo", NewSampleRepoTest(
212
-		SampleRepoConfig{
213
-			"dancer",
214
-			"https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer.json",
215
-			"dancer-example",
216
-			"dancer-example",
217
-			"dancer-example",
218
-			"Welcome",
219
-			"",
220
-			"",
221
-			"",
222
-		},
223
-	))
224
-
225
-})
226 1
deleted file mode 100644
... ...
@@ -1,76 +0,0 @@
1
-package images
2
-
3
-import (
4
-	"fmt"
5
-
6
-	g "github.com/onsi/ginkgo"
7
-	o "github.com/onsi/gomega"
8
-
9
-	exutil "github.com/openshift/origin/test/extended/util"
10
-	kapi "k8s.io/kubernetes/pkg/api"
11
-)
12
-
13
-func getPodNameForTest(image string, t tc) string {
14
-	return fmt.Sprintf("%s-%s-%s", image, t.Version, t.BaseOS)
15
-}
16
-
17
-var _ = g.Describe("[images][Slow] openshift images should be SCL enabled", func() {
18
-	defer g.GinkgoRecover()
19
-	var oc = exutil.NewCLI("s2i-usage", exutil.KubeConfigPath())
20
-
21
-	g.JustBeforeEach(func() {
22
-		g.By("waiting for builder service account")
23
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
24
-		o.Expect(err).NotTo(o.HaveOccurred())
25
-	})
26
-
27
-	for image, tcs := range GetTestCaseForImages(AllImages) {
28
-		for _, t := range tcs {
29
-			g.Describe("returning s2i usage when running the image", func() {
30
-				g.It(fmt.Sprintf("%q should print the usage", t.DockerImageReference), func() {
31
-					g.By(fmt.Sprintf("creating a sample pod for %q", t.DockerImageReference))
32
-					pod := exutil.GetPodForContainer(kapi.Container{
33
-						Name:  "test",
34
-						Image: t.DockerImageReference,
35
-					})
36
-					oc.KubeFramework().TestContainerOutput(getPodNameForTest(image, t), pod, 0, []string{"Sample invocation"})
37
-				})
38
-			})
39
-
40
-			g.Describe("using the SCL in s2i images", func() {
41
-				g.It(fmt.Sprintf("%q should be SCL enabled", t.DockerImageReference), func() {
42
-					g.By(fmt.Sprintf("creating a sample pod for %q with /bin/bash -c command", t.DockerImageReference))
43
-					pod := exutil.GetPodForContainer(kapi.Container{
44
-						Image:   t.DockerImageReference,
45
-						Name:    "test",
46
-						Command: []string{"/bin/bash", "-c", t.Cmd},
47
-					})
48
-
49
-					oc.KubeFramework().TestContainerOutput(getPodNameForTest(image, t), pod, 0, []string{t.Expected})
50
-
51
-					g.By(fmt.Sprintf("creating a sample pod for %q", t.DockerImageReference))
52
-					pod = exutil.GetPodForContainer(kapi.Container{
53
-						Image:   t.DockerImageReference,
54
-						Name:    "test",
55
-						Command: []string{"/usr/bin/sleep", "infinity"},
56
-					})
57
-					_, err := oc.KubeREST().Pods(oc.Namespace()).Create(pod)
58
-					o.Expect(err).NotTo(o.HaveOccurred())
59
-
60
-					err = oc.KubeFramework().WaitForPodRunning(pod.Name)
61
-					o.Expect(err).NotTo(o.HaveOccurred())
62
-
63
-					g.By("calling the binary using 'oc exec /bin/bash -c'")
64
-					out, err := oc.Run("exec").Args("-p", pod.Name, "--", "/bin/bash", "-c", t.Cmd).Output()
65
-					o.Expect(err).NotTo(o.HaveOccurred())
66
-					o.Expect(out).Should(o.ContainSubstring(t.Expected))
67
-
68
-					g.By("calling the binary using 'oc exec /bin/sh -ic'")
69
-					out, err = oc.Run("exec").Args("-p", pod.Name, "--", "/bin/sh", "-ic", t.Cmd).Output()
70
-					o.Expect(err).NotTo(o.HaveOccurred())
71
-					o.Expect(out).Should(o.ContainSubstring(t.Expected))
72
-				})
73
-			})
74
-		}
75
-	}
76
-})