Browse code

Switch to internalclientset - boring changes

Maciej Szulik authored on 2016/11/20 05:36:13
Showing 280 changed files
... ...
@@ -8,9 +8,9 @@ items:
8 8
     labels:
9 9
       app: mysql
10 10
     name: mysql
11
-    namespace: default
11
+    namespace: example
12 12
     resourceVersion: "6790"
13
-    selfLink: /apis/apps/v1alpha1/namespaces/default/petsets/mysql
13
+    selfLink: /apis/apps/v1alpha1/namespaces/example/petsets/mysql
14 14
     uid: 3900c985-4f5b-11e6-b8a1-080027242396
15 15
   spec:
16 16
     replicas: 3
... ...
@@ -99,9 +99,9 @@ items:
99 99
     labels:
100 100
       app: mysql
101 101
     name: galera
102
-    namespace: default
102
+    namespace: example
103 103
     resourceVersion: "343"
104
-    selfLink: /api/v1/namespaces/default/services/galera
104
+    selfLink: /api/v1/namespaces/example/services/galera
105 105
     uid: 38fb3915-4f5b-11e6-b8a1-080027242396
106 106
   spec:
107 107
     clusterIP: None
... ...
@@ -121,7 +121,7 @@ items:
121 121
   metadata:
122 122
     annotations:
123 123
       kubernetes.io/created-by: |
124
-        {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6784"}}
124
+        {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"example","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6784"}}
125 125
       openshift.io/scc: anyuid
126 126
       pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:12Z","finishedAt":"2016-07-27T02:41:12Z","containerID":"docker://5c727d8732899605fcfe3eecbeeb02576f18f5b989496073340427a8d2134622"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://5c727d8732899605fcfe3eecbeeb02576f18f5b989496073340427a8d2134622"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:14Z","finishedAt":"2016-07-27T02:41:15Z","containerID":"docker://ab4ca0b3b6ec4860cd55c615534e1e2b11f4c3a33746783aab145919feb2446e"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://ab4ca0b3b6ec4860cd55c615534e1e2b11f4c3a33746783aab145919feb2446e"}]'
127 127
       pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]'
... ...
@@ -133,9 +133,9 @@ items:
133 133
     labels:
134 134
       app: mysql
135 135
     name: mysql-0
136
-    namespace: default
136
+    namespace: example
137 137
     resourceVersion: "7191"
138
-    selfLink: /api/v1/namespaces/default/pods/mysql-0
138
+    selfLink: /api/v1/namespaces/example/pods/mysql-0
139 139
     uid: 92e49e79-53a3-11e6-b45a-080027242396
140 140
   spec:
141 141
     containers:
... ...
@@ -244,7 +244,7 @@ items:
244 244
   metadata:
245 245
     annotations:
246 246
       kubernetes.io/created-by: |
247
-        {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}}
247
+        {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"example","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}}
248 248
       openshift.io/scc: anyuid
249 249
       pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:42Z","finishedAt":"2016-07-27T02:41:42Z","containerID":"docker://2538c65f65557955c02745ef4021181cf322c8dc0db62144dd1e1f8ea9f7fa54"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://2538c65f65557955c02745ef4021181cf322c8dc0db62144dd1e1f8ea9f7fa54"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T02:41:44Z","finishedAt":"2016-07-27T02:41:45Z","containerID":"docker://4df7188d37033c182e675d45179941766bd1e6a013469038f43fa3fecc2cc06d"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://4df7188d37033c182e675d45179941766bd1e6a013469038f43fa3fecc2cc06d"}]'
250 250
       pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]'
... ...
@@ -256,9 +256,9 @@ items:
256 256
     labels:
257 257
       app: mysql
258 258
     name: mysql-1
259
-    namespace: default
259
+    namespace: example
260 260
     resourceVersion: "7195"
261
-    selfLink: /api/v1/namespaces/default/pods/mysql-1
261
+    selfLink: /api/v1/namespaces/example/pods/mysql-1
262 262
     uid: a4da4725-53a3-11e6-b45a-080027242396
263 263
   spec:
264 264
     containers:
... ...
@@ -373,7 +373,7 @@ items:
373 373
   metadata:
374 374
     annotations:
375 375
       kubernetes.io/created-by: |
376
-        {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"default","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}}
376
+        {"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"PetSet","namespace":"example","name":"mysql","uid":"3900c985-4f5b-11e6-b8a1-080027242396","apiVersion":"apps","resourceVersion":"6790"}}
377 377
       openshift.io/scc: anyuid
378 378
       pod.alpha.kubernetes.io/init-container-statuses: '[{"name":"install","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T03:01:01Z","finishedAt":"2016-07-27T03:01:01Z","containerID":"docker://af008b4ce59d36695fbabf40ae2f7431b51441eb2e9c6962378937c06ac69a35"}},"lastState":{},"ready":true,"restartCount":0,"image":"gcr.io/google_containers/galera-install:0.1","imageID":"docker://sha256:56ef857005d0ce479f2db0e4ee0ece05e0766ebfa7e79e27e1513915262a18ec","containerID":"docker://af008b4ce59d36695fbabf40ae2f7431b51441eb2e9c6962378937c06ac69a35"},{"name":"bootstrap","state":{"terminated":{"exitCode":0,"reason":"Completed","startedAt":"2016-07-27T03:01:02Z","finishedAt":"2016-07-27T03:01:03Z","containerID":"docker://ee97005854130335b54a65429865956260b7729e51e6363ab05e63d5c7c9ee48"}},"lastState":{},"ready":true,"restartCount":0,"image":"debian:jessie","imageID":"docker://sha256:1b088884749bd93867ddb48ff404d4bbff09a17af8d95bc863efa5d133f87b78","containerID":"docker://ee97005854130335b54a65429865956260b7729e51e6363ab05e63d5c7c9ee48"}]'
379 379
       pod.alpha.kubernetes.io/init-containers: '[{"name":"install","image":"gcr.io/google_containers/galera-install:0.1","args":["--work-dir=/work-dir"],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"Always"},{"name":"bootstrap","image":"debian:jessie","command":["/work-dir/peer-finder"],"args":["-on-start=\"/work-dir/on-start.sh\"","-service=galera"],"env":[{"name":"POD_NAMESPACE","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}}],"resources":{},"volumeMounts":[{"name":"workdir","mountPath":"/work-dir"},{"name":"config","mountPath":"/etc/mysql"},{"name":"default-token-au2xq","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]'
... ...
@@ -385,9 +385,9 @@ items:
385 385
     labels:
386 386
       app: mysql
387 387
     name: mysql-2
388
-    namespace: default
388
+    namespace: example
389 389
     resourceVersion: "7226"
390
-    selfLink: /api/v1/namespaces/default/pods/mysql-2
390
+    selfLink: /api/v1/namespaces/example/pods/mysql-2
391 391
     uid: 57e618f1-53a6-11e6-b215-080027242396
392 392
   spec:
393 393
     containers:
... ...
@@ -11,7 +11,7 @@ import (
11 11
 	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13 13
 	apierrors "k8s.io/kubernetes/pkg/api/errors"
14
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
15 15
 	"k8s.io/kubernetes/pkg/serviceaccount"
16 16
 
17 17
 	"github.com/openshift/origin/pkg/bootstrap/docker/errors"
... ...
@@ -31,8 +31,8 @@ const (
31 31
 )
32 32
 
33 33
 // InstallRegistry checks whether a registry is installed and installs one if not already installed
34
-func (h *Helper) InstallRegistry(kubeClient kclient.Interface, f *clientcmd.Factory, configDir, images string, out, errout io.Writer) error {
35
-	_, err := kubeClient.Services(DefaultNamespace).Get(SvcDockerRegistry)
34
+func (h *Helper) InstallRegistry(kubeClient kclientset.Interface, f *clientcmd.Factory, configDir, images string, out, errout io.Writer) error {
35
+	_, err := kubeClient.Core().Services(DefaultNamespace).Get(SvcDockerRegistry)
36 36
 	if err == nil {
37 37
 		// If there's no error, the registry already exists
38 38
 		return nil
... ...
@@ -69,8 +69,8 @@ func (h *Helper) InstallRegistry(kubeClient kclient.Interface, f *clientcmd.Fact
69 69
 }
70 70
 
71 71
 // InstallRouter installs a default router on the OpenShift server
72
-func (h *Helper) InstallRouter(kubeClient kclient.Interface, f *clientcmd.Factory, configDir, images, hostIP string, portForwarding bool, out, errout io.Writer) error {
73
-	_, err := kubeClient.Services(DefaultNamespace).Get(SvcRouter)
72
+func (h *Helper) InstallRouter(kubeClient kclientset.Interface, f *clientcmd.Factory, configDir, images, hostIP string, portForwarding bool, out, errout io.Writer) error {
73
+	_, err := kubeClient.Core().Services(DefaultNamespace).Get(SvcRouter)
74 74
 	if err == nil {
75 75
 		// Router service already exists, nothing to do
76 76
 		return nil
... ...
@@ -84,18 +84,18 @@ func (h *Helper) InstallRouter(kubeClient kclient.Interface, f *clientcmd.Factor
84 84
 	// Create service account for router
85 85
 	routerSA := &kapi.ServiceAccount{}
86 86
 	routerSA.Name = "router"
87
-	_, err = kubeClient.ServiceAccounts("default").Create(routerSA)
87
+	_, err = kubeClient.Core().ServiceAccounts("default").Create(routerSA)
88 88
 	if err != nil {
89 89
 		return errors.NewError("cannot create router service account").WithCause(err).WithDetails(h.OriginLog())
90 90
 	}
91 91
 
92 92
 	// Add router SA to privileged SCC
93
-	privilegedSCC, err := kubeClient.SecurityContextConstraints().Get("privileged")
93
+	privilegedSCC, err := kubeClient.Core().SecurityContextConstraints().Get("privileged")
94 94
 	if err != nil {
95 95
 		return errors.NewError("cannot retrieve privileged SCC").WithCause(err).WithDetails(h.OriginLog())
96 96
 	}
97 97
 	privilegedSCC.Users = append(privilegedSCC.Users, serviceaccount.MakeUsername("default", "router"))
98
-	_, err = kubeClient.SecurityContextConstraints().Update(privilegedSCC)
98
+	_, err = kubeClient.Core().SecurityContextConstraints().Update(privilegedSCC)
99 99
 	if err != nil {
100 100
 		return errors.NewError("cannot update privileged SCC").WithCause(err).WithDetails(h.OriginLog())
101 101
 	}
... ...
@@ -186,10 +186,10 @@ func AddRoleToServiceAccount(osClient client.Interface, role, sa, namespace stri
186 186
 	return addRole.AddRole()
187 187
 }
188 188
 
189
-func AddSCCToServiceAccount(kubeClient kclient.Interface, scc, sa, namespace string) error {
189
+func AddSCCToServiceAccount(kubeClient kclientset.Interface, scc, sa, namespace string) error {
190 190
 	modifySCC := policy.SCCModificationOptions{
191 191
 		SCCName:      scc,
192
-		SCCInterface: kubeClient,
192
+		SCCInterface: kubeClient.Core(),
193 193
 		Subjects: []kapi.ObjectReference{
194 194
 			{
195 195
 				Namespace: namespace,
... ...
@@ -51,12 +51,12 @@ func instantiateTemplate(client client.Interface, mapper configcmd.Mapper, templ
51 51
 
52 52
 // InstallLogging checks whether logging is installed and installs it if not already installed
53 53
 func (h *Helper) InstallLogging(f *clientcmd.Factory, publicHostname, loggerHost, imagePrefix, imageVersion string) error {
54
-	osClient, kubeClient, err := f.Clients()
54
+	osClient, _, kubeClient, err := f.Clients()
55 55
 	if err != nil {
56 56
 		return errors.NewError("cannot obtain API clients").WithCause(err).WithDetails(h.OriginLog())
57 57
 	}
58 58
 
59
-	_, err = kubeClient.Namespaces().Get(loggingNamespace)
59
+	_, err = kubeClient.Core().Namespaces().Get(loggingNamespace)
60 60
 	if err == nil {
61 61
 		// If there's no error, the logging namespace already exists and we won't initialize it
62 62
 		return nil
... ...
@@ -91,7 +91,7 @@ func (h *Helper) InstallLogging(f *clientcmd.Factory, publicHostname, loggerHost
91 91
 	}
92 92
 
93 93
 	// Label all nodes with default fluentd label
94
-	nodeList, err := kubeClient.Nodes().List(kapi.ListOptions{})
94
+	nodeList, err := kubeClient.Core().Nodes().List(kapi.ListOptions{})
95 95
 	if err != nil {
96 96
 		return errors.NewError("cannot retrieve nodes").WithCause(err).WithDetails(h.OriginLog())
97 97
 	}
... ...
@@ -99,7 +99,7 @@ func (h *Helper) InstallLogging(f *clientcmd.Factory, publicHostname, loggerHost
99 99
 	// Iterate through all nodes (there should only be one)
100 100
 	for _, node := range nodeList.Items {
101 101
 		node.Labels["logging-infra-fluentd"] = "true"
102
-		if _, err = kubeClient.Nodes().Update(&node); err != nil {
102
+		if _, err = kubeClient.Core().Nodes().Update(&node); err != nil {
103 103
 			return errors.NewError("cannot update labels on node %s", node.Name).WithCause(err)
104 104
 		}
105 105
 	}
... ...
@@ -113,7 +113,7 @@ func (h *Helper) InstallLogging(f *clientcmd.Factory, publicHostname, loggerHost
113 113
 		"es-cluster-size":   "1",
114 114
 		"es-instance-ram":   "1024M",
115 115
 	}
116
-	kubeClient.ConfigMaps(loggingNamespace).Create(loggingConfig)
116
+	kubeClient.Core().ConfigMaps(loggingNamespace).Create(loggingConfig)
117 117
 
118 118
 	// Instantiate logging deployer
119 119
 	deployerParams := map[string]string{
... ...
@@ -20,7 +20,7 @@ const (
20 20
 
21 21
 // InstallMetrics checks whether metrics is installed and installs it if not already installed
22 22
 func (h *Helper) InstallMetrics(f *clientcmd.Factory, hostName, imagePrefix, imageVersion string) error {
23
-	osClient, kubeClient, err := f.Clients()
23
+	osClient, kubeClient, _, err := f.Clients()
24 24
 	if err != nil {
25 25
 		return errors.NewError("cannot obtain API clients").WithCause(err).WithDetails(h.OriginLog())
26 26
 	}
... ...
@@ -14,7 +14,7 @@ import (
14 14
 
15 15
 // CreateProject creates a project
16 16
 func CreateProject(f *clientcmd.Factory, name, display, desc, basecmd string, out io.Writer) error {
17
-	client, _, err := f.Clients()
17
+	client, _, _, err := f.Clients()
18 18
 	if err != nil {
19 19
 		return nil
20 20
 	}
... ...
@@ -98,7 +98,7 @@ func (c *ClientStatusConfig) Status(f *clientcmd.Factory, out io.Writer) error {
98 98
 }
99 99
 
100 100
 func isHealthy(f *clientcmd.Factory) (bool, error) {
101
-	osClient, _, err := f.Clients()
101
+	osClient, _, _, err := f.Clients()
102 102
 	if err != nil {
103 103
 		return false, err
104 104
 	}
... ...
@@ -15,7 +15,7 @@ import (
15 15
 	"github.com/spf13/cobra"
16 16
 
17 17
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
18
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
18
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
19 19
 	kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
20 20
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
21 21
 
... ...
@@ -795,12 +795,16 @@ func (c *ClientStartConfig) Factory() (*clientcmd.Factory, error) {
795 795
 }
796 796
 
797 797
 // Clients returns clients for OpenShift and Kube
798
-func (c *ClientStartConfig) Clients() (*client.Client, *kclient.Client, error) {
798
+func (c *ClientStartConfig) Clients() (*client.Client, *kclientset.Clientset, error) {
799 799
 	f, err := c.Factory()
800 800
 	if err != nil {
801 801
 		return nil, nil, err
802 802
 	}
803
-	return f.Clients()
803
+	oc, _, kcset, err := f.Clients()
804
+	if err != nil {
805
+		return nil, nil, err
806
+	}
807
+	return oc, kcset, nil
804 808
 }
805 809
 
806 810
 // OpenShiftHelper returns a helper object to work with OpenShift on the server
... ...
@@ -943,7 +947,7 @@ func (c *ClientStartConfig) ShouldInitializeData() bool {
943 943
 			return true
944 944
 		}
945 945
 
946
-		if _, err = kclient.Services(openshift.DefaultNamespace).Get(openshift.SvcDockerRegistry); err != nil {
946
+		if _, err = kclient.Core().Services(openshift.DefaultNamespace).Get(openshift.SvcDockerRegistry); err != nil {
947 947
 			return true
948 948
 		}
949 949
 
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"github.com/golang/glog"
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
11
-	"k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclient "k8s.io/kubernetes/pkg/client/unversioned"
12 12
 	"k8s.io/kubernetes/pkg/kubectl"
13 13
 	ktypes "k8s.io/kubernetes/pkg/types"
14 14
 	kutilerrors "k8s.io/kubernetes/pkg/util/errors"
... ...
@@ -84,7 +84,7 @@ func (reaper *BuildConfigReaper) Stop(namespace, name string, timeout time.Durat
84 84
 	if len(bcBuilds) > 0 {
85 85
 
86 86
 		// Add paused annotation to the build config pending the deletion
87
-		err = unversioned.RetryOnConflict(unversioned.DefaultRetry, func() error {
87
+		err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
88 88
 
89 89
 			bc, err := reaper.oc.BuildConfigs(namespace).Get(name)
90 90
 			if err != nil {
... ...
@@ -9,8 +9,8 @@ import (
9 9
 	errors "k8s.io/kubernetes/pkg/api/errors"
10 10
 	"k8s.io/kubernetes/pkg/api/unversioned"
11 11
 	"k8s.io/kubernetes/pkg/client/cache"
12
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
12 13
 	"k8s.io/kubernetes/pkg/client/record"
13
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14 14
 
15 15
 	builddefaults "github.com/openshift/origin/pkg/build/admission/defaults"
16 16
 	buildoverrides "github.com/openshift/origin/pkg/build/admission/overrides"
... ...
@@ -269,7 +269,7 @@ func (bc *BuildController) resolveOutputDockerImageReference(build *buildapi.Bui
269 269
 type BuildPodController struct {
270 270
 	BuildStore   cache.Store
271 271
 	BuildUpdater buildclient.BuildUpdater
272
-	SecretClient kclient.SecretsNamespacer
272
+	SecretClient kcoreclient.SecretsGetter
273 273
 	PodManager   podManager
274 274
 }
275 275
 
... ...
@@ -8,7 +8,7 @@ import (
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
11
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12 12
 
13 13
 	buildapi "github.com/openshift/origin/pkg/build/api"
14 14
 	buildtest "github.com/openshift/origin/pkg/build/controller/test"
... ...
@@ -305,7 +305,8 @@ func mockBuildConfig(baseImage, triggerImage, repoName, repoTag string) *buildap
305 305
 	dockerfile := "FROM foo"
306 306
 	return &buildapi.BuildConfig{
307 307
 		ObjectMeta: kapi.ObjectMeta{
308
-			Name: "testBuildCfg",
308
+			Name:      "testBuildCfg",
309
+			Namespace: kapi.NamespaceDefault,
309 310
 		},
310 311
 		Spec: buildapi.BuildConfigSpec{
311 312
 			CommonSpec: buildapi.CommonSpec{
... ...
@@ -346,7 +347,8 @@ func mockImageStream(repoName, dockerImageRepo string, tags map[string]string) *
346 346
 
347 347
 	return &imageapi.ImageStream{
348 348
 		ObjectMeta: kapi.ObjectMeta{
349
-			Name: repoName,
349
+			Name:      repoName,
350
+			Namespace: kapi.NamespaceDefault,
350 351
 		},
351 352
 		Status: imageapi.ImageStreamStatus{
352 353
 			DockerImageRepository: dockerImageRepo,
... ...
@@ -379,14 +381,14 @@ func (i *buildConfigInstantiator) Instantiate(namespace string, request *buildap
379 379
 
380 380
 func mockBuildConfigInstantiator(buildcfg *buildapi.BuildConfig, imageStream *imageapi.ImageStream, image *imageapi.Image) *buildConfigInstantiator {
381 381
 	builderAccount := kapi.ServiceAccount{
382
-		ObjectMeta: kapi.ObjectMeta{Name: bootstrappolicy.BuilderServiceAccountName},
382
+		ObjectMeta: kapi.ObjectMeta{Name: bootstrappolicy.BuilderServiceAccountName, Namespace: kapi.NamespaceDefault},
383 383
 		Secrets:    []kapi.ObjectReference{},
384 384
 	}
385 385
 	instantiator := &buildConfigInstantiator{}
386 386
 	instantiator.buildConfigUpdater = &mockBuildConfigUpdater{}
387 387
 	generator := buildgenerator.BuildGenerator{
388
-		Secrets:         testclient.NewSimpleFake(),
389
-		ServiceAccounts: testclient.NewSimpleFake(&builderAccount),
388
+		Secrets:         fake.NewSimpleClientset().Core(),
389
+		ServiceAccounts: fake.NewSimpleClientset(&builderAccount).Core(),
390 390
 		Client: buildgenerator.Client{
391 391
 			GetBuildConfigFunc: func(ctx kapi.Context, name string) (*buildapi.BuildConfig, error) {
392 392
 				return buildcfg, nil
... ...
@@ -12,7 +12,7 @@ import (
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13 13
 	"k8s.io/kubernetes/pkg/api/errors"
14 14
 	"k8s.io/kubernetes/pkg/api/unversioned"
15
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
16 16
 	"k8s.io/kubernetes/pkg/credentialprovider"
17 17
 	kvalidation "k8s.io/kubernetes/pkg/util/validation"
18 18
 
... ...
@@ -47,8 +47,8 @@ func IsFatal(err error) bool {
47 47
 type BuildGenerator struct {
48 48
 	Client                    GeneratorClient
49 49
 	DefaultServiceAccountName string
50
-	ServiceAccounts           kclient.ServiceAccountsNamespacer
51
-	Secrets                   kclient.SecretsNamespacer
50
+	ServiceAccounts           kcoreclient.ServiceAccountsGetter
51
+	Secrets                   kcoreclient.SecretsGetter
52 52
 }
53 53
 
54 54
 // GeneratorClient is the API client used by the generator
... ...
@@ -10,8 +10,7 @@ import (
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11 11
 	"k8s.io/kubernetes/pkg/api/errors"
12 12
 	"k8s.io/kubernetes/pkg/api/resource"
13
-
14
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
13
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
15 14
 	"k8s.io/kubernetes/pkg/runtime"
16 15
 
17 16
 	buildapi "github.com/openshift/origin/pkg/build/api"
... ...
@@ -239,7 +238,7 @@ func TestInstantiateGenerateBuildError(t *testing.T) {
239 239
 		fakeSecrets = append(fakeSecrets, s)
240 240
 	}
241 241
 	generator := BuildGenerator{
242
-		Secrets:         testclient.NewSimpleFake(fakeSecrets...),
242
+		Secrets:         fake.NewSimpleClientset(fakeSecrets...).Core(),
243 243
 		ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()),
244 244
 		Client: Client{
245 245
 			GetBuildConfigFunc: func(ctx kapi.Context, name string) (*buildapi.BuildConfig, error) {
... ...
@@ -339,6 +338,7 @@ func TestInstantiateWithImageTrigger(t *testing.T) {
339 339
 	source := mocks.MockSource()
340 340
 	for _, tc := range tests {
341 341
 		bc := &buildapi.BuildConfig{
342
+			ObjectMeta: kapi.ObjectMeta{Namespace: kapi.NamespaceDefault},
342 343
 			Spec: buildapi.BuildConfigSpec{
343 344
 				CommonSpec: buildapi.CommonSpec{
344 345
 					Strategy: buildapi.BuildStrategy{
... ...
@@ -741,7 +741,7 @@ func TestGenerateBuildFromConfig(t *testing.T) {
741 741
 	bc := &buildapi.BuildConfig{
742 742
 		ObjectMeta: kapi.ObjectMeta{
743 743
 			Name:      "test-build-config",
744
-			Namespace: "test-namespace",
744
+			Namespace: kapi.NamespaceDefault,
745 745
 			Labels:    map[string]string{"testlabel": "testvalue"},
746 746
 		},
747 747
 		Spec: buildapi.BuildConfigSpec{
... ...
@@ -829,7 +829,8 @@ func TestGenerateBuildWithImageTagForSourceStrategyImageRepository(t *testing.T)
829 829
 	output := mocks.MockOutput()
830 830
 	bc := &buildapi.BuildConfig{
831 831
 		ObjectMeta: kapi.ObjectMeta{
832
-			Name: "test-build-config",
832
+			Name:      "test-build-config",
833
+			Namespace: kapi.NamespaceDefault,
833 834
 		},
834 835
 		Spec: buildapi.BuildConfigSpec{
835 836
 			CommonSpec: buildapi.CommonSpec{
... ...
@@ -849,7 +850,7 @@ func TestGenerateBuildWithImageTagForSourceStrategyImageRepository(t *testing.T)
849 849
 		fakeSecrets = append(fakeSecrets, s)
850 850
 	}
851 851
 	generator := BuildGenerator{
852
-		Secrets:         testclient.NewSimpleFake(fakeSecrets...),
852
+		Secrets:         fake.NewSimpleClientset(fakeSecrets...).Core(),
853 853
 		ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()),
854 854
 		Client: Client{
855 855
 			GetImageStreamFunc: func(ctx kapi.Context, name string) (*imageapi.ImageStream, error) {
... ...
@@ -907,7 +908,8 @@ func TestGenerateBuildWithImageTagForDockerStrategyImageRepository(t *testing.T)
907 907
 	output := mocks.MockOutput()
908 908
 	bc := &buildapi.BuildConfig{
909 909
 		ObjectMeta: kapi.ObjectMeta{
910
-			Name: "test-build-config",
910
+			Name:      "test-build-config",
911
+			Namespace: kapi.NamespaceDefault,
911 912
 		},
912 913
 		Spec: buildapi.BuildConfigSpec{
913 914
 			CommonSpec: buildapi.CommonSpec{
... ...
@@ -927,7 +929,7 @@ func TestGenerateBuildWithImageTagForDockerStrategyImageRepository(t *testing.T)
927 927
 		fakeSecrets = append(fakeSecrets, s)
928 928
 	}
929 929
 	generator := BuildGenerator{
930
-		Secrets:         testclient.NewSimpleFake(fakeSecrets...),
930
+		Secrets:         fake.NewSimpleClientset(fakeSecrets...).Core(),
931 931
 		ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()),
932 932
 		Client: Client{
933 933
 			GetImageStreamFunc: func(ctx kapi.Context, name string) (*imageapi.ImageStream, error) {
... ...
@@ -984,7 +986,8 @@ func TestGenerateBuildWithImageTagForCustomStrategyImageRepository(t *testing.T)
984 984
 	output := mocks.MockOutput()
985 985
 	bc := &buildapi.BuildConfig{
986 986
 		ObjectMeta: kapi.ObjectMeta{
987
-			Name: "test-build-config",
987
+			Name:      "test-build-config",
988
+			Namespace: kapi.NamespaceDefault,
988 989
 		},
989 990
 		Spec: buildapi.BuildConfigSpec{
990 991
 			CommonSpec: buildapi.CommonSpec{
... ...
@@ -1004,7 +1007,7 @@ func TestGenerateBuildWithImageTagForCustomStrategyImageRepository(t *testing.T)
1004 1004
 		fakeSecrets = append(fakeSecrets, s)
1005 1005
 	}
1006 1006
 	generator := BuildGenerator{
1007
-		Secrets:         testclient.NewSimpleFake(fakeSecrets...),
1007
+		Secrets:         fake.NewSimpleClientset(fakeSecrets...).Core(),
1008 1008
 		ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()),
1009 1009
 		Client: Client{
1010 1010
 			GetImageStreamFunc: func(ctx kapi.Context, name string) (*imageapi.ImageStream, error) {
... ...
@@ -1562,7 +1565,7 @@ func mockBuildGenerator() *BuildGenerator {
1562 1562
 	}
1563 1563
 	var b *buildapi.Build
1564 1564
 	return &BuildGenerator{
1565
-		Secrets:         testclient.NewSimpleFake(fakeSecrets...),
1565
+		Secrets:         fake.NewSimpleClientset(fakeSecrets...).Core(),
1566 1566
 		ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()),
1567 1567
 		Client: Client{
1568 1568
 			GetBuildConfigFunc: func(ctx kapi.Context, name string) (*buildapi.BuildConfig, error) {
... ...
@@ -3,13 +3,13 @@ package test
3 3
 import (
4 4
 	"fmt"
5 5
 
6
-	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
7 6
 	kapi "k8s.io/kubernetes/pkg/api"
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
7
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
8
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10 9
 	"k8s.io/kubernetes/pkg/runtime"
11 10
 
12 11
 	buildapi "github.com/openshift/origin/pkg/build/api"
12
+	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
13 13
 	imageapi "github.com/openshift/origin/pkg/image/api"
14 14
 )
15 15
 
... ...
@@ -32,41 +32,48 @@ var (
32 32
 	}
33 33
 )
34 34
 
35
-func MockBuilderSecrets() (secrets []*kapi.Secret) {
36
-	i := 1
35
+func MockBuilderSecrets() []*kapi.Secret {
36
+	var secrets []*kapi.Secret
37 37
 	for name, conf := range SampleDockerConfigs {
38 38
 		secrets = append(secrets, &kapi.Secret{
39 39
 			ObjectMeta: kapi.ObjectMeta{
40
-				Name: name,
40
+				Name:      name,
41
+				Namespace: kapi.NamespaceDefault,
41 42
 			},
42 43
 			Type: kapi.SecretTypeDockercfg,
43 44
 			Data: map[string][]byte{".dockercfg": conf},
44 45
 		})
45
-		i++
46 46
 	}
47 47
 	return secrets
48 48
 }
49 49
 
50
-func MockBuilderServiceAccount(secrets []*kapi.Secret) kclient.ServiceAccountsNamespacer {
50
+func MockBuilderServiceAccount(secrets []*kapi.Secret) kcoreclient.ServiceAccountsGetter {
51 51
 	var (
52 52
 		secretRefs  []kapi.ObjectReference
53 53
 		fakeObjects []runtime.Object
54 54
 	)
55 55
 	for _, secret := range secrets {
56
-		secretRefs = append(secretRefs, kapi.ObjectReference{Name: secret.Name, Kind: "Secret"})
56
+		secretRefs = append(secretRefs, kapi.ObjectReference{
57
+			Name: secret.Name,
58
+			Kind: "Secret",
59
+		})
57 60
 		fakeObjects = append(fakeObjects, secret)
58 61
 	}
59 62
 	fakeObjects = append(fakeObjects, &kapi.ServiceAccount{
60
-		ObjectMeta: kapi.ObjectMeta{Name: bootstrappolicy.BuilderServiceAccountName},
61
-		Secrets:    secretRefs,
63
+		ObjectMeta: kapi.ObjectMeta{
64
+			Name:      bootstrappolicy.BuilderServiceAccountName,
65
+			Namespace: kapi.NamespaceDefault,
66
+		},
67
+		Secrets: secretRefs,
62 68
 	})
63
-	return testclient.NewSimpleFake(fakeObjects...)
69
+	return fake.NewSimpleClientset(fakeObjects...).Core()
64 70
 }
65 71
 
66 72
 func MockBuildConfig(source buildapi.BuildSource, strategy buildapi.BuildStrategy, output buildapi.BuildOutput) *buildapi.BuildConfig {
67 73
 	return &buildapi.BuildConfig{
68 74
 		ObjectMeta: kapi.ObjectMeta{
69
-			Name: "test-build-config",
75
+			Name:      "test-build-config",
76
+			Namespace: kapi.NamespaceDefault,
70 77
 			Labels: map[string]string{
71 78
 				"testbclabel": "testbcvalue",
72 79
 			},
... ...
@@ -12,7 +12,7 @@ import (
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13 13
 	"k8s.io/kubernetes/pkg/api/errors"
14 14
 	"k8s.io/kubernetes/pkg/api/rest"
15
-	"k8s.io/kubernetes/pkg/client/unversioned"
15
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
16 16
 	"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
17 17
 	kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
18 18
 	kubeletremotecommand "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
... ...
@@ -61,7 +61,7 @@ func (s *InstantiateREST) Create(ctx kapi.Context, obj runtime.Object) (runtime.
61 61
 	return s.generator.Instantiate(ctx, request)
62 62
 }
63 63
 
64
-func NewBinaryStorage(generator *generator.BuildGenerator, watcher rest.Watcher, podClient unversioned.PodsNamespacer, info kubeletclient.ConnectionInfoGetter) *BinaryInstantiateREST {
64
+func NewBinaryStorage(generator *generator.BuildGenerator, watcher rest.Watcher, podClient kcoreclient.PodsGetter, info kubeletclient.ConnectionInfoGetter) *BinaryInstantiateREST {
65 65
 	return &BinaryInstantiateREST{
66 66
 		Generator:      generator,
67 67
 		Watcher:        watcher,
... ...
@@ -230,7 +230,7 @@ func (h *binaryInstantiateHandler) handle(r io.Reader) (runtime.Object, error) {
230 230
 }
231 231
 
232 232
 type podGetter struct {
233
-	podsNamespacer unversioned.PodsNamespacer
233
+	podsNamespacer kcoreclient.PodsGetter
234 234
 }
235 235
 
236 236
 func (g *podGetter) Get(ctx kapi.Context, name string) (runtime.Object, error) {
... ...
@@ -4,7 +4,7 @@ import (
4 4
 	"testing"
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
7
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
8 8
 	"k8s.io/kubernetes/pkg/runtime"
9 9
 
10 10
 	buildapi "github.com/openshift/origin/pkg/build/api"
... ...
@@ -22,7 +22,7 @@ func TestCreateInstantiate(t *testing.T) {
22 22
 		fakeSecrets = append(fakeSecrets, s)
23 23
 	}
24 24
 	rest := InstantiateREST{&generator.BuildGenerator{
25
-		Secrets:         testclient.NewSimpleFake(fakeSecrets...),
25
+		Secrets:         fake.NewSimpleClientset(fakeSecrets...).Core(),
26 26
 		ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()),
27 27
 		Client: generator.Client{
28 28
 			GetBuildConfigFunc: func(ctx kapi.Context, name string) (*buildapi.BuildConfig, error) {
... ...
@@ -9,7 +9,7 @@ import (
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	"k8s.io/kubernetes/pkg/api/errors"
11 11
 	"k8s.io/kubernetes/pkg/api/rest"
12
-	"k8s.io/kubernetes/pkg/client/unversioned"
12
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 13
 	kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
14 14
 	genericrest "k8s.io/kubernetes/pkg/registry/generic/rest"
15 15
 	"k8s.io/kubernetes/pkg/registry/pod"
... ...
@@ -31,7 +31,7 @@ type REST struct {
31 31
 }
32 32
 
33 33
 type podGetter struct {
34
-	podsNamespacer unversioned.PodsNamespacer
34
+	kcoreclient.PodsGetter
35 35
 }
36 36
 
37 37
 func (g *podGetter) Get(ctx kapi.Context, name string) (runtime.Object, error) {
... ...
@@ -39,7 +39,7 @@ func (g *podGetter) Get(ctx kapi.Context, name string) (runtime.Object, error) {
39 39
 	if !ok {
40 40
 		return nil, errors.NewBadRequest("namespace parameter required.")
41 41
 	}
42
-	return g.podsNamespacer.Pods(ns).Get(name)
42
+	return g.Pods(ns).Get(name)
43 43
 }
44 44
 
45 45
 const defaultTimeout time.Duration = 10 * time.Second
... ...
@@ -47,7 +47,7 @@ const defaultTimeout time.Duration = 10 * time.Second
47 47
 // NewREST creates a new REST for BuildLog
48 48
 // Takes build registry and pod client to get necessary attributes to assemble
49 49
 // URL to which the request shall be redirected in order to get build logs.
50
-func NewREST(getter rest.Getter, watcher rest.Watcher, pn unversioned.PodsNamespacer, connectionInfo kubeletclient.ConnectionInfoGetter) *REST {
50
+func NewREST(getter rest.Getter, watcher rest.Watcher, pn kcoreclient.PodsGetter, connectionInfo kubeletclient.ConnectionInfoGetter) *REST {
51 51
 	return &REST{
52 52
 		Getter:         getter,
53 53
 		Watcher:        watcher,
... ...
@@ -5,30 +5,29 @@ import (
5 5
 
6 6
 	"k8s.io/kubernetes/pkg/api/errors"
7 7
 	"k8s.io/kubernetes/pkg/apis/extensions"
8
-	unversioned_extensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kextensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
10 9
 
11 10
 	"github.com/openshift/origin/pkg/api/latest"
12 11
 )
13 12
 
14 13
 type delegatingScaleInterface struct {
15 14
 	dcs    DeploymentConfigInterface
16
-	scales kclient.ScaleInterface
15
+	scales kextensionsclient.ScaleInterface
17 16
 }
18 17
 
19 18
 type delegatingScaleNamespacer struct {
20 19
 	dcNS    DeploymentConfigsNamespacer
21
-	scaleNS kclient.ScaleNamespacer
20
+	scaleNS kextensionsclient.ScalesGetter
22 21
 }
23 22
 
24
-func (c *delegatingScaleNamespacer) Scales(namespace string) unversioned_extensions.ScaleInterface {
23
+func (c *delegatingScaleNamespacer) Scales(namespace string) kextensionsclient.ScaleInterface {
25 24
 	return &delegatingScaleInterface{
26 25
 		dcs:    c.dcNS.DeploymentConfigs(namespace),
27 26
 		scales: c.scaleNS.Scales(namespace),
28 27
 	}
29 28
 }
30 29
 
31
-func NewDelegatingScaleNamespacer(dcNamespacer DeploymentConfigsNamespacer, sNamespacer kclient.ScaleNamespacer) unversioned_extensions.ScalesGetter {
30
+func NewDelegatingScaleNamespacer(dcNamespacer DeploymentConfigsNamespacer, sNamespacer kextensionsclient.ScalesGetter) kextensionsclient.ScalesGetter {
32 31
 	return &delegatingScaleNamespacer{
33 32
 		dcNS:    dcNamespacer,
34 33
 		scaleNS: sNamespacer,
... ...
@@ -22,7 +22,7 @@ var (
22 22
 func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
23 23
 	available := availableClientDiagnostics
24 24
 
25
-	osClient, kubeClient, clientErr := o.Factory.Clients()
25
+	osClient, _, kubeClient, clientErr := o.Factory.Clients()
26 26
 	if clientErr != nil {
27 27
 		o.Logger.Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
28 28
 		available = sets.NewString(clientdiags.ConfigContextsName)
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"regexp"
6 6
 	"strings"
7 7
 
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
9 9
 	clientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
10 10
 	clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
11 11
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -44,7 +44,7 @@ func (o DiagnosticsOptions) buildClusterDiagnostics(rawConfig *clientcmdapi.Conf
44 44
 
45 45
 	var (
46 46
 		clusterClient  *client.Client
47
-		kclusterClient *kclient.Client
47
+		kclusterClient *kclientset.Clientset
48 48
 	)
49 49
 
50 50
 	clusterClient, kclusterClient, found, serverUrl, err := o.findClusterClients(rawConfig)
... ...
@@ -84,7 +84,7 @@ func (o DiagnosticsOptions) buildClusterDiagnostics(rawConfig *clientcmdapi.Conf
84 84
 }
85 85
 
86 86
 // attempts to find which context in the config might be a cluster-admin for the server in the current context.
87
-func (o DiagnosticsOptions) findClusterClients(rawConfig *clientcmdapi.Config) (*client.Client, *kclient.Client, bool, string, error) {
87
+func (o DiagnosticsOptions) findClusterClients(rawConfig *clientcmdapi.Config) (*client.Client, *kclientset.Clientset, bool, string, error) {
88 88
 	if o.ClientClusterContext != "" { // user has specified cluster context to use
89 89
 		if context, exists := rawConfig.Contexts[o.ClientClusterContext]; exists {
90 90
 			configErr := fmt.Errorf("Specified '%s' as cluster-admin context, but it was not found in your client configuration.", o.ClientClusterContext)
... ...
@@ -120,13 +120,13 @@ func (o DiagnosticsOptions) findClusterClients(rawConfig *clientcmdapi.Config) (
120 120
 }
121 121
 
122 122
 // makes the client from the specified context and determines whether it is a cluster-admin.
123
-func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, contextName string, context *clientcmdapi.Context) (*client.Client, *kclient.Client, bool, string, error) {
123
+func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, contextName string, context *clientcmdapi.Context) (*client.Client, *kclientset.Clientset, bool, string, error) {
124 124
 	overrides := &clientcmd.ConfigOverrides{Context: *context}
125 125
 	clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, overrides)
126 126
 	serverUrl := rawConfig.Clusters[context.Cluster].Server
127 127
 	factory := osclientcmd.NewFactory(clientConfig)
128 128
 	o.Logger.Debug("CED1005", fmt.Sprintf("Checking if context is cluster-admin: '%s'", contextName))
129
-	if osClient, kubeClient, err := factory.Clients(); err != nil {
129
+	if osClient, _, kubeClient, err := factory.Clients(); err != nil {
130 130
 		o.Logger.Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err))
131 131
 		return nil, nil, false, "", nil
132 132
 	} else {
... ...
@@ -134,7 +134,7 @@ func (o NetworkPodDiagnosticsOptions) buildNetworkPodDiagnostics() ([]types.Diag
134 134
 	clientFlags := flag.NewFlagSet("client", flag.ContinueOnError) // hide the extensive set of client flags
135 135
 	factory := osclientcmd.New(clientFlags)                        // that would otherwise be added to this command
136 136
 
137
-	osClient, kubeClient, clientErr := factory.Clients()
137
+	osClient, _, kubeClient, clientErr := factory.Clients()
138 138
 	if clientErr != nil {
139 139
 		return diagnostics, false, []error{clientErr}
140 140
 	}
... ...
@@ -95,7 +95,7 @@ func (o *GroupModificationOptions) Complete(f *clientcmd.Factory, args []string)
95 95
 	o.Group = args[0]
96 96
 	o.Users = append(o.Users, args[1:]...)
97 97
 
98
-	osClient, _, err := f.Clients()
98
+	osClient, _, _, err := f.Clients()
99 99
 	if err != nil {
100 100
 		return err
101 101
 	}
... ...
@@ -79,7 +79,7 @@ func (o *NewGroupOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, arg
79 79
 		o.Users = append(o.Users, args[1:]...)
80 80
 	}
81 81
 
82
-	osClient, _, err := f.Clients()
82
+	osClient, _, _, err := f.Clients()
83 83
 	if err != nil {
84 84
 		return err
85 85
 	}
... ...
@@ -147,7 +147,7 @@ func (o *PruneOptions) Complete(whitelistFile, blacklistFile, configFile string,
147 147
 		return err
148 148
 	}
149 149
 
150
-	osClient, _, err := f.Clients()
150
+	osClient, _, _, err := f.Clients()
151 151
 	if err != nil {
152 152
 		return err
153 153
 	}
... ...
@@ -220,7 +220,7 @@ func (o *SyncOptions) Complete(typeArg, whitelistFile, blacklistFile, configFile
220 220
 		return err
221 221
 	}
222 222
 
223
-	osClient, _, err := f.Clients()
223
+	osClient, _, _, err := f.Clients()
224 224
 	if err != nil {
225 225
 		return err
226 226
 	}
... ...
@@ -129,7 +129,7 @@ func (o *MigrateImageReferenceOptions) Complete(f *clientcmd.Factory, c *cobra.C
129 129
 		return err
130 130
 	}
131 131
 
132
-	osclient, _, err := f.Clients()
132
+	osclient, _, _, err := f.Clients()
133 133
 	if err != nil {
134 134
 		return err
135 135
 	}
... ...
@@ -131,7 +131,7 @@ func (o *ResourceOptions) Complete(f *clientcmd.Factory, c *cobra.Command) error
131 131
 		}
132 132
 	}
133 133
 
134
-	oclient, _, err := f.Clients()
134
+	oclient, _, _, err := f.Clients()
135 135
 	if err != nil {
136 136
 		return err
137 137
 	}
... ...
@@ -13,7 +13,7 @@ import (
13 13
 	kapi "k8s.io/kubernetes/pkg/api"
14 14
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
15 15
 	"k8s.io/kubernetes/pkg/api/meta"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
17 17
 	"k8s.io/kubernetes/pkg/kubectl/resource"
18 18
 	"k8s.io/kubernetes/pkg/labels"
19 19
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -30,7 +30,7 @@ import (
30 30
 type ProjectOptions struct {
31 31
 	DefaultNamespace string
32 32
 	Oclient          *osclient.Client
33
-	Kclient          *kclient.Client
33
+	Kclient          *kclientset.Clientset
34 34
 	Out              io.Writer
35 35
 
36 36
 	Mapper            meta.RESTMapper
... ...
@@ -49,7 +49,7 @@ func (p *ProjectOptions) Complete(f *clientcmd.Factory, c *cobra.Command, args [
49 49
 	if err != nil {
50 50
 		return err
51 51
 	}
52
-	oc, kc, err := f.Clients()
52
+	oc, _, kc, err := f.Clients()
53 53
 	if err != nil {
54 54
 		return err
55 55
 	}
... ...
@@ -50,7 +50,7 @@ func (n *NodeOptions) Complete(f *clientcmd.Factory, c *cobra.Command, args []st
50 50
 	if err != nil {
51 51
 		return err
52 52
 	}
53
-	_, kc, err := f.Clients()
53
+	_, kc, _, err := f.Clients()
54 54
 	if err != nil {
55 55
 		return err
56 56
 	}
... ...
@@ -117,7 +117,7 @@ func (o *canIOptions) Complete(f *clientcmd.Factory, args []string) error {
117 117
 	}
118 118
 
119 119
 	var err error
120
-	oclient, _, err := f.Clients()
120
+	oclient, _, _, err := f.Clients()
121 121
 	if err != nil {
122 122
 		return err
123 123
 	}
... ...
@@ -251,7 +251,7 @@ func (o *RoleModificationOptions) CompleteUserWithSA(f *clientcmd.Factory, args
251 251
 		return errors.New("you must specify at least one user or service account")
252 252
 	}
253 253
 
254
-	osClient, _, err := f.Clients()
254
+	osClient, _, _, err := f.Clients()
255 255
 	if err != nil {
256 256
 		return err
257 257
 	}
... ...
@@ -277,7 +277,7 @@ func (o *RoleModificationOptions) Complete(f *clientcmd.Factory, args []string,
277 277
 	o.RoleName = args[0]
278 278
 	*target = append(*target, args[1:]...)
279 279
 
280
-	osClient, _, err := f.Clients()
280
+	osClient, _, _, err := f.Clients()
281 281
 	if err != nil {
282 282
 		return err
283 283
 	}
... ...
@@ -8,7 +8,8 @@ import (
8 8
 	"github.com/spf13/cobra"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
12
+	adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
12 13
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
13 14
 
14 15
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
... ...
@@ -35,7 +36,7 @@ var (
35 35
 
36 36
 type SCCModificationOptions struct {
37 37
 	SCCName      string
38
-	SCCInterface kclient.SecurityContextConstraintsInterface
38
+	SCCInterface kcoreclient.SecurityContextConstraintsGetter
39 39
 
40 40
 	DefaultSubjectNamespace string
41 41
 	Subjects                []kapi.ObjectReference
... ...
@@ -144,11 +145,11 @@ func (o *SCCModificationOptions) CompleteUsers(f *clientcmd.Factory, args []stri
144 144
 		return errors.New("you must specify at least one user or service account")
145 145
 	}
146 146
 
147
-	var err error
148
-	_, o.SCCInterface, err = f.Clients()
147
+	_, kc, _, err := f.Clients()
149 148
 	if err != nil {
150 149
 		return err
151 150
 	}
151
+	o.SCCInterface = adapter.FromUnversionedClient(kc).Core()
152 152
 
153 153
 	o.DefaultSubjectNamespace, _, err = f.DefaultNamespace()
154 154
 	if err != nil {
... ...
@@ -170,11 +171,11 @@ func (o *SCCModificationOptions) CompleteGroups(f *clientcmd.Factory, args []str
170 170
 	o.SCCName = args[0]
171 171
 	o.Subjects = authorizationapi.BuildSubjects([]string{}, args[1:], uservalidation.ValidateUserName, uservalidation.ValidateGroupName)
172 172
 
173
-	var err error
174
-	_, o.SCCInterface, err = f.Clients()
173
+	_, kc, _, err := f.Clients()
175 174
 	if err != nil {
176 175
 		return err
177 176
 	}
177
+	o.SCCInterface = adapter.FromUnversionedClient(kc).Core()
178 178
 
179 179
 	o.DefaultSubjectNamespace, _, err = f.DefaultNamespace()
180 180
 	if err != nil {
... ...
@@ -5,7 +5,8 @@ import (
5 5
 	"testing"
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
8
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
9
+	"k8s.io/kubernetes/pkg/client/testing/core"
9 10
 	"k8s.io/kubernetes/pkg/runtime"
10 11
 
11 12
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
... ...
@@ -116,19 +117,19 @@ func TestModifySCC(t *testing.T) {
116 116
 	}
117 117
 
118 118
 	for tcName, tc := range tests {
119
-		fakeClient := ktestclient.NewSimpleFake()
120
-		fakeClient.PrependReactor("get", "securitycontextconstraints", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
119
+		fakeClient := fake.NewSimpleClientset()
120
+		fakeClient.PrependReactor("get", "securitycontextconstraints", func(action core.Action) (handled bool, ret runtime.Object, err error) {
121 121
 			return true, tc.startingSCC, nil
122 122
 		})
123 123
 		var actualSCC *kapi.SecurityContextConstraints
124
-		fakeClient.PrependReactor("update", "securitycontextconstraints", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
125
-			actualSCC = action.(ktestclient.UpdateAction).GetObject().(*kapi.SecurityContextConstraints)
124
+		fakeClient.PrependReactor("update", "securitycontextconstraints", func(action core.Action) (handled bool, ret runtime.Object, err error) {
125
+			actualSCC = action.(core.UpdateAction).GetObject().(*kapi.SecurityContextConstraints)
126 126
 			return true, actualSCC, nil
127 127
 		})
128 128
 
129 129
 		o := &SCCModificationOptions{
130 130
 			SCCName:                 "foo",
131
-			SCCInterface:            fakeClient,
131
+			SCCInterface:            fakeClient.Core(),
132 132
 			DefaultSubjectNamespace: "",
133 133
 			Subjects:                tc.subjects,
134 134
 		}
... ...
@@ -115,7 +115,7 @@ func NewCmdReconcileClusterRoleBindings(name, fullName string, f *clientcmd.Fact
115 115
 }
116 116
 
117 117
 func (o *ReconcileClusterRoleBindingsOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, args []string, excludeUsers, excludeGroups []string) error {
118
-	oclient, _, err := f.Clients()
118
+	oclient, _, _, err := f.Clients()
119 119
 	if err != nil {
120 120
 		return err
121 121
 	}
... ...
@@ -110,7 +110,7 @@ func NewCmdReconcileClusterRoles(name, fullName string, f *clientcmd.Factory, ou
110 110
 }
111 111
 
112 112
 func (o *ReconcileClusterRolesOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, args []string) error {
113
-	oclient, _, err := f.Clients()
113
+	oclient, _, _, err := f.Clients()
114 114
 	if err != nil {
115 115
 		return err
116 116
 	}
... ...
@@ -10,7 +10,7 @@ import (
10 10
 
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12 12
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
13
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
14 14
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
15 15
 	sccutil "k8s.io/kubernetes/pkg/securitycontextconstraints/util"
16 16
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -38,8 +38,8 @@ type ReconcileSCCOptions struct {
38 38
 	Out    io.Writer
39 39
 	Output string
40 40
 
41
-	SCCClient kclient.SecurityContextConstraintInterface
42
-	NSClient  kclient.NamespaceInterface
41
+	SCCClient kcoreclient.SecurityContextConstraintsInterface
42
+	NSClient  kcoreclient.NamespaceInterface
43 43
 }
44 44
 
45 45
 var (
... ...
@@ -112,12 +112,12 @@ func (o *ReconcileSCCOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory,
112 112
 		return kcmdutil.UsageError(cmd, "no arguments are allowed")
113 113
 	}
114 114
 
115
-	_, kClient, err := f.Clients()
115
+	_, _, kClient, err := f.Clients()
116 116
 	if err != nil {
117 117
 		return err
118 118
 	}
119
-	o.SCCClient = kClient.SecurityContextConstraints()
120
-	o.NSClient = kClient.Namespaces()
119
+	o.SCCClient = kClient.Core().SecurityContextConstraints()
120
+	o.NSClient = kClient.Core().Namespaces()
121 121
 	o.Output = kcmdutil.GetFlagString(cmd, "output")
122 122
 
123 123
 	return nil
... ...
@@ -84,7 +84,7 @@ func (o *RemoveFromProjectOptions) Complete(f *clientcmd.Factory, args []string,
84 84
 	*target = append(*target, args...)
85 85
 
86 86
 	var err error
87
-	if o.Client, _, err = f.Clients(); err != nil {
87
+	if o.Client, _, _, err = f.Clients(); err != nil {
88 88
 		return err
89 89
 	}
90 90
 	if o.BindingNamespace, _, err = f.DefaultNamespace(); err != nil {
... ...
@@ -44,7 +44,7 @@ func NewCmdWhoCan(name, fullName string, f *clientcmd.Factory, out io.Writer) *c
44 44
 			}
45 45
 
46 46
 			var err error
47
-			options.client, _, err = f.Clients()
47
+			options.client, _, _, err = f.Clients()
48 48
 			kcmdutil.CheckErr(err)
49 49
 
50 50
 			options.bindingNamespace, _, err = f.DefaultNamespace()
... ...
@@ -55,7 +55,7 @@ func NewCmdNewProject(name, fullName string, f *clientcmd.Factory, out io.Writer
55 55
 			}
56 56
 
57 57
 			var err error
58
-			if options.Client, _, err = f.Clients(); err != nil {
58
+			if options.Client, _, _, err = f.Clients(); err != nil {
59 59
 				kcmdutil.CheckErr(err)
60 60
 			}
61 61
 
... ...
@@ -97,7 +97,7 @@ func (o *PruneBuildsOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command,
97 97
 	}
98 98
 	o.Out = out
99 99
 
100
-	osClient, _, err := f.Clients()
100
+	osClient, _, _, err := f.Clients()
101 101
 	if err != nil {
102 102
 		return err
103 103
 	}
... ...
@@ -10,7 +10,7 @@ import (
10 10
 	"github.com/spf13/cobra"
11 11
 
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
14 14
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
15 15
 
16 16
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -47,7 +47,7 @@ type PruneDeploymentsOptions struct {
47 47
 	Namespace       string
48 48
 
49 49
 	OSClient client.Interface
50
-	KClient  kclient.Interface
50
+	KClient  kclientset.Interface
51 51
 	Out      io.Writer
52 52
 }
53 53
 
... ...
@@ -99,7 +99,7 @@ func (o *PruneDeploymentsOptions) Complete(f *clientcmd.Factory, cmd *cobra.Comm
99 99
 	}
100 100
 	o.Out = out
101 101
 
102
-	osClient, kClient, err := f.Clients()
102
+	osClient, _, kClient, err := f.Clients()
103 103
 	if err != nil {
104 104
 		return err
105 105
 	}
... ...
@@ -134,7 +134,7 @@ func (o PruneDeploymentsOptions) Run() error {
134 134
 		deploymentConfigs = append(deploymentConfigs, &deploymentConfigList.Items[i])
135 135
 	}
136 136
 
137
-	deploymentList, err := o.KClient.ReplicationControllers(o.Namespace).List(kapi.ListOptions{})
137
+	deploymentList, err := o.KClient.Core().ReplicationControllers(o.Namespace).List(kapi.ListOptions{})
138 138
 	if err != nil {
139 139
 		return err
140 140
 	}
... ...
@@ -159,7 +159,7 @@ func (o PruneDeploymentsOptions) Run() error {
159 159
 	deploymentDeleter := &describingDeploymentDeleter{w: w}
160 160
 
161 161
 	if o.Confirm {
162
-		deploymentDeleter.delegate = prune.NewDeploymentDeleter(o.KClient, o.KClient)
162
+		deploymentDeleter.delegate = prune.NewDeploymentDeleter(o.KClient.Core(), o.KClient.Core())
163 163
 	} else {
164 164
 		fmt.Fprintln(os.Stderr, "Dry run enabled - no modifications will be made. Add --confirm to remove deployments")
165 165
 	}
... ...
@@ -4,13 +4,13 @@ import (
4 4
 	"io/ioutil"
5 5
 	"testing"
6 6
 
7
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
7
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
8 8
 
9 9
 	"github.com/openshift/origin/pkg/client/testclient"
10 10
 )
11 11
 
12 12
 func TestDeploymentPruneNamespaced(t *testing.T) {
13
-	kFake := ktestclient.NewSimpleFake()
13
+	kFake := fake.NewSimpleClientset()
14 14
 	osFake := testclient.NewSimpleFake()
15 15
 	opts := &PruneDeploymentsOptions{
16 16
 		Namespace: "foo",
... ...
@@ -15,8 +15,8 @@ import (
15 15
 
16 16
 	"github.com/spf13/cobra"
17 17
 	kapi "k8s.io/kubernetes/pkg/api"
18
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
18 19
 	"k8s.io/kubernetes/pkg/client/restclient"
19
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
20 20
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
21 21
 	knet "k8s.io/kubernetes/pkg/util/net"
22 22
 
... ...
@@ -75,7 +75,7 @@ type PruneImagesOptions struct {
75 75
 	Namespace           string
76 76
 
77 77
 	OSClient       client.Interface
78
-	KClient        kclient.Interface
78
+	KClient        kclientset.Interface
79 79
 	RegistryClient *http.Client
80 80
 	Out            io.Writer
81 81
 }
... ...
@@ -180,12 +180,12 @@ func (o PruneImagesOptions) Run() error {
180 180
 		return err
181 181
 	}
182 182
 
183
-	allPods, err := o.KClient.Pods(o.Namespace).List(kapi.ListOptions{})
183
+	allPods, err := o.KClient.Core().Pods(o.Namespace).List(kapi.ListOptions{})
184 184
 	if err != nil {
185 185
 		return err
186 186
 	}
187 187
 
188
-	allRCs, err := o.KClient.ReplicationControllers(o.Namespace).List(kapi.ListOptions{})
188
+	allRCs, err := o.KClient.Core().ReplicationControllers(o.Namespace).List(kapi.ListOptions{})
189 189
 	if err != nil {
190 190
 		return err
191 191
 	}
... ...
@@ -209,7 +209,7 @@ func (o PruneImagesOptions) Run() error {
209 209
 		return err
210 210
 	}
211 211
 
212
-	limitRangesList, err := o.KClient.LimitRanges(o.Namespace).List(kapi.ListOptions{})
212
+	limitRangesList, err := o.KClient.Core().LimitRanges(o.Namespace).List(kapi.ListOptions{})
213 213
 	if err != nil {
214 214
 		return err
215 215
 	}
... ...
@@ -424,7 +424,7 @@ func (p *describingManifestDeleter) DeleteManifest(registryClient *http.Client,
424 424
 }
425 425
 
426 426
 // getClients returns a Kube client, OpenShift client, and registry client.
427
-func getClients(f *clientcmd.Factory, caBundle string) (*client.Client, *kclient.Client, *http.Client, error) {
427
+func getClients(f *clientcmd.Factory, caBundle string) (*client.Client, *kclientset.Clientset, *http.Client, error) {
428 428
 	clientConfig, err := f.OpenShiftClientConfig.ClientConfig()
429 429
 	if err != nil {
430 430
 		return nil, nil, nil, err
... ...
@@ -433,13 +433,13 @@ func getClients(f *clientcmd.Factory, caBundle string) (*client.Client, *kclient
433 433
 	var (
434 434
 		token          string
435 435
 		osClient       *client.Client
436
-		kClient        *kclient.Client
436
+		kClient        *kclientset.Clientset
437 437
 		registryClient *http.Client
438 438
 	)
439 439
 
440 440
 	switch {
441 441
 	case len(clientConfig.BearerToken) > 0:
442
-		osClient, kClient, err = f.Clients()
442
+		osClient, _, kClient, err = f.Clients()
443 443
 		if err != nil {
444 444
 			return nil, nil, nil, err
445 445
 		}
... ...
@@ -4,13 +4,13 @@ import (
4 4
 	"io/ioutil"
5 5
 	"testing"
6 6
 
7
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
7
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
8 8
 
9 9
 	"github.com/openshift/origin/pkg/client/testclient"
10 10
 )
11 11
 
12 12
 func TestImagePruneNamespaced(t *testing.T) {
13
-	kFake := ktestclient.NewSimpleFake()
13
+	kFake := fake.NewSimpleClientset()
14 14
 	osFake := testclient.NewSimpleFake()
15 15
 	opts := &PruneImagesOptions{
16 16
 		Namespace: "foo",
... ...
@@ -16,8 +16,8 @@ import (
16 16
 	"k8s.io/kubernetes/pkg/api/errors"
17 17
 	"k8s.io/kubernetes/pkg/api/resource"
18 18
 	"k8s.io/kubernetes/pkg/apis/extensions"
19
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
19 20
 	"k8s.io/kubernetes/pkg/client/restclient"
20
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
21 21
 	kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
22 22
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
23 23
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -88,7 +88,7 @@ type RegistryOptions struct {
88 88
 	nodeSelector  map[string]string
89 89
 	ports         []kapi.ContainerPort
90 90
 	namespace     string
91
-	serviceClient kclient.ServicesNamespacer
91
+	serviceClient kcoreclient.ServicesGetter
92 92
 	image         string
93 93
 }
94 94
 
... ...
@@ -242,10 +242,11 @@ func (opts *RegistryOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command,
242 242
 		return fmt.Errorf("error getting namespace: %v", nsErr)
243 243
 	}
244 244
 
245
-	var kClientErr error
246
-	if _, opts.serviceClient, kClientErr = f.Clients(); kClientErr != nil {
245
+	_, _, kClient, kClientErr := f.Clients()
246
+	if kClientErr != nil {
247 247
 		return fmt.Errorf("error getting client: %v", kClientErr)
248 248
 	}
249
+	opts.serviceClient = kClient.Core()
249 250
 
250 251
 	opts.Config.Action.Bulk.Mapper = clientcmd.ResourceMapper(f)
251 252
 	opts.Config.Action.Out, opts.Config.Action.ErrOut = out, errout
... ...
@@ -550,7 +550,7 @@ func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out, errout io.Write
550 550
 	if err != nil {
551 551
 		return fmt.Errorf("error getting client: %v", err)
552 552
 	}
553
-	_, kClient, err := f.Clients()
553
+	_, kClient, _, err := f.Clients()
554 554
 	if err != nil {
555 555
 		return fmt.Errorf("error getting client: %v", err)
556 556
 	}
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"github.com/spf13/cobra"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
13 13
 	"k8s.io/kubernetes/pkg/util/sets"
14 14
 
... ...
@@ -68,13 +68,13 @@ type TopImagesOptions struct {
68 68
 	// helpers
69 69
 	out      io.Writer
70 70
 	osClient client.Interface
71
-	kClient  kclient.Interface
71
+	kClient  kclientset.Interface
72 72
 }
73 73
 
74 74
 // Complete turns a partially defined TopImagesOptions into a solvent structure
75 75
 // which can be validated and used for showing limits usage.
76 76
 func (o *TopImagesOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
77
-	osClient, kClient, err := f.Clients()
77
+	osClient, _, kClient, err := f.Clients()
78 78
 	if err != nil {
79 79
 		return err
80 80
 	}
... ...
@@ -96,7 +96,7 @@ func (o *TopImagesOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, ar
96 96
 	}
97 97
 	o.Streams = allStreams
98 98
 
99
-	allPods, err := kClient.Pods(namespace).List(kapi.ListOptions{})
99
+	allPods, err := kClient.Core().Pods(namespace).List(kapi.ListOptions{})
100 100
 	if err != nil {
101 101
 		return err
102 102
 	}
... ...
@@ -65,7 +65,7 @@ type TopImageStreamsOptions struct {
65 65
 // Complete turns a partially defined TopImageStreamsOptions into a solvent structure
66 66
 // which can be validated and used for showing limits usage.
67 67
 func (o *TopImageStreamsOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
68
-	osClient, _, err := f.Clients()
68
+	osClient, _, _, err := f.Clients()
69 69
 	if err != nil {
70 70
 		return err
71 71
 	}
... ...
@@ -82,7 +82,7 @@ func RunBuildLogs(fullName string, f *clientcmd.Factory, out io.Writer, cmd *cob
82 82
 		return err
83 83
 	}
84 84
 
85
-	c, _, err := f.Clients()
85
+	c, _, _, err := f.Clients()
86 86
 	if err != nil {
87 87
 		return err
88 88
 	}
... ...
@@ -123,7 +123,7 @@ func (o *CancelBuildOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command,
123 123
 		}
124 124
 	}
125 125
 
126
-	client, _, err := f.Clients()
126
+	client, _, _, err := f.Clients()
127 127
 	if err != nil {
128 128
 		return err
129 129
 	}
... ...
@@ -118,7 +118,7 @@ func (o *CreateClusterQuotaOptions) Complete(cmd *cobra.Command, f *clientcmd.Fa
118 118
 		o.ClusterQuota.Spec.Quota.Hard[kapi.ResourceName(tokens[0])] = quantity
119 119
 	}
120 120
 
121
-	o.Client, _, err = f.Clients()
121
+	o.Client, _, _, err = f.Clients()
122 122
 	if err != nil {
123 123
 		return err
124 124
 	}
... ...
@@ -105,7 +105,7 @@ func (o *CreateDeploymentConfigOptions) Complete(cmd *cobra.Command, f *clientcm
105 105
 		return err
106 106
 	}
107 107
 
108
-	o.Client, _, err = f.Clients()
108
+	o.Client, _, _, err = f.Clients()
109 109
 	if err != nil {
110 110
 		return err
111 111
 	}
... ...
@@ -87,7 +87,7 @@ func (o *CreateIdentityOptions) Complete(cmd *cobra.Command, f *clientcmd.Factor
87 87
 
88 88
 	o.DryRun = cmdutil.GetFlagBool(cmd, "dry-run")
89 89
 
90
-	client, _, err := f.Clients()
90
+	client, _, _, err := f.Clients()
91 91
 	if err != nil {
92 92
 		return err
93 93
 	}
... ...
@@ -88,7 +88,7 @@ func (o *CreateImageStreamOptions) Complete(cmd *cobra.Command, f *clientcmd.Fac
88 88
 		return err
89 89
 	}
90 90
 
91
-	o.Client, _, err = f.Clients()
91
+	o.Client, _, _, err = f.Clients()
92 92
 	if err != nil {
93 93
 		return err
94 94
 	}
... ...
@@ -71,7 +71,7 @@ func (o *CreatePolicyBindingOptions) Complete(cmd *cobra.Command, f *clientcmd.F
71 71
 	}
72 72
 	o.BindingNamespace = namespace
73 73
 
74
-	client, _, err := f.Clients()
74
+	client, _, _, err := f.Clients()
75 75
 	if err != nil {
76 76
 		return err
77 77
 	}
... ...
@@ -91,7 +91,7 @@ func NewCmdCreateEdgeRoute(fullName string, f *clientcmd.Factory, out io.Writer)
91 91
 
92 92
 // CreateEdgeRoute implements the behavior to run the create edge route command.
93 93
 func CreateEdgeRoute(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
94
-	oc, kc, err := f.Clients()
94
+	oc, kc, _, err := f.Clients()
95 95
 	if err != nil {
96 96
 		return err
97 97
 	}
... ...
@@ -206,7 +206,7 @@ func NewCmdCreatePassthroughRoute(fullName string, f *clientcmd.Factory, out io.
206 206
 
207 207
 // CreatePassthroughRoute implements the behavior to run the create passthrough route command.
208 208
 func CreatePassthroughRoute(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
209
-	oc, kc, err := f.Clients()
209
+	oc, kc, _, err := f.Clients()
210 210
 	if err != nil {
211 211
 		return err
212 212
 	}
... ...
@@ -311,7 +311,7 @@ func NewCmdCreateReencryptRoute(fullName string, f *clientcmd.Factory, out io.Wr
311 311
 
312 312
 // CreateReencryptRoute implements the behavior to run the create reencrypt route command.
313 313
 func CreateReencryptRoute(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
314
-	oc, kc, err := f.Clients()
314
+	oc, kc, _, err := f.Clients()
315 315
 	if err != nil {
316 316
 		return err
317 317
 	}
... ...
@@ -81,7 +81,7 @@ func (o *CreateUserOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, a
81 81
 
82 82
 	o.DryRun = cmdutil.GetFlagBool(cmd, "dry-run")
83 83
 
84
-	client, _, err := f.Clients()
84
+	client, _, _, err := f.Clients()
85 85
 	if err != nil {
86 86
 		return err
87 87
 	}
... ...
@@ -80,7 +80,7 @@ func (o *CreateUserIdentityMappingOptions) Complete(cmd *cobra.Command, f *clien
80 80
 
81 81
 	o.DryRun = cmdutil.GetFlagBool(cmd, "dry-run")
82 82
 
83
-	client, _, err := f.Clients()
83
+	client, _, _, err := f.Clients()
84 84
 	if err != nil {
85 85
 		return err
86 86
 	}
... ...
@@ -274,7 +274,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, args [
274 274
 	}
275 275
 	o.Attach.Config = config
276 276
 
277
-	_, kc, err := f.Clients()
277
+	_, kc, _, err := f.Clients()
278 278
 	if err != nil {
279 279
 		return err
280 280
 	}
... ...
@@ -136,7 +136,7 @@ func (o *DeployOptions) Complete(f *clientcmd.Factory, args []string, out io.Wri
136 136
 	}
137 137
 	var err error
138 138
 
139
-	o.osClient, o.kubeClient, err = f.Clients()
139
+	o.osClient, o.kubeClient, _, err = f.Clients()
140 140
 	if err != nil {
141 141
 		return err
142 142
 	}
... ...
@@ -76,7 +76,7 @@ func validate(cmd *cobra.Command, f *clientcmd.Factory, args []string) error {
76 76
 		return err
77 77
 	}
78 78
 
79
-	_, kc, err := f.Clients()
79
+	_, kc, _, err := f.Clients()
80 80
 	if err != nil {
81 81
 		return err
82 82
 	}
... ...
@@ -14,7 +14,6 @@ import (
14 14
 	"k8s.io/kubernetes/pkg/api/meta"
15 15
 	"k8s.io/kubernetes/pkg/api/unversioned"
16 16
 	"k8s.io/kubernetes/pkg/apis/extensions"
17
-	clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
18 17
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
19 18
 	"k8s.io/kubernetes/pkg/kubectl/resource"
20 19
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -526,16 +525,15 @@ func (o *IdleOptions) RunIdle(f *clientcmd.Factory) error {
526 526
 		fmt.Fprintf(o.errOut, "warning: continuing on for valid scalable resources, but an error occured while finding scalable resources to idle: %v", err)
527 527
 	}
528 528
 
529
-	oclient, kclient, err := f.Clients()
529
+	oclient, _, kclient, err := f.Clients()
530 530
 	if err != nil {
531 531
 		return err
532 532
 	}
533 533
 
534
-	delegScaleGetter := osclient.NewDelegatingScaleNamespacer(oclient, kclient)
534
+	delegScaleGetter := osclient.NewDelegatingScaleNamespacer(oclient, kclient.Extensions())
535 535
 	dcGetter := deployclient.New(oclient.RESTClient)
536
-	rcGetter := clientset.FromUnversionedClient(kclient)
537 536
 
538
-	scaleAnnotater := utilunidling.NewScaleAnnotater(delegScaleGetter, dcGetter, rcGetter, func(currentReplicas int32, annotations map[string]string) {
537
+	scaleAnnotater := utilunidling.NewScaleAnnotater(delegScaleGetter, dcGetter, kclient.Core(), func(currentReplicas int32, annotations map[string]string) {
539 538
 		annotations[unidlingapi.IdledAtAnnotation] = nowTime.UTC().Format(time.RFC3339)
540 539
 		annotations[unidlingapi.PreviousScaleAnnotation] = fmt.Sprintf("%v", currentReplicas)
541 540
 	})
... ...
@@ -626,7 +624,7 @@ func (o *IdleOptions) RunIdle(f *clientcmd.Factory) error {
626 626
 		idled := ""
627 627
 		if !o.dryRun {
628 628
 			info.scale.Spec.Replicas = 0
629
-			scaleUpdater := utilunidling.NewScaleUpdater(f.JSONEncoder(), info.namespace, dcGetter, rcGetter)
629
+			scaleUpdater := utilunidling.NewScaleUpdater(f.JSONEncoder(), info.namespace, dcGetter, kclient.Core())
630 630
 			if err := scaleAnnotater.UpdateObjectScale(scaleUpdater, info.namespace, scaleRef, info.obj, info.scale); err != nil {
631 631
 				fmt.Fprintf(o.errOut, "error: unable to scale %s %s/%s to 0, but still listed as target for unidling: %v\n", scaleRef.Kind, info.namespace, scaleRef.Name, err)
632 632
 				hadError = true
... ...
@@ -137,7 +137,7 @@ func (o *AppJSONOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args
137 137
 	}
138 138
 	o.Namespace = ns
139 139
 
140
-	o.Client, _, err = f.Clients()
140
+	o.Client, _, _, err = f.Clients()
141 141
 	return err
142 142
 }
143 143
 
... ...
@@ -129,7 +129,7 @@ func (o *DockerComposeOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command
129 129
 	}
130 130
 	o.Namespace = ns
131 131
 
132
-	o.Client, _, err = f.Clients()
132
+	o.Client, _, _, err = f.Clients()
133 133
 	return err
134 134
 }
135 135
 
... ...
@@ -98,7 +98,7 @@ func (o *ImportImageOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command,
98 98
 	}
99 99
 	o.Namespace = namespace
100 100
 
101
-	osClient, _, err := f.Clients()
101
+	osClient, _, _, err := f.Clients()
102 102
 	if err != nil {
103 103
 		return err
104 104
 	}
... ...
@@ -17,8 +17,8 @@ import (
17 17
 
18 18
 	kapi "k8s.io/kubernetes/pkg/api"
19 19
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
20
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
20 21
 	"k8s.io/kubernetes/pkg/client/restclient"
21
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
22 22
 	ctl "k8s.io/kubernetes/pkg/kubectl"
23 23
 	kcmd "k8s.io/kubernetes/pkg/kubectl/cmd"
24 24
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
... ...
@@ -348,8 +348,8 @@ func followInstallation(config *newcmd.AppConfig, input string, pod *kapi.Pod, l
348 348
 
349 349
 	// we cannot retrieve logs until the pod is out of pending
350 350
 	// TODO: move this to the server side
351
-	podClient := config.KubeClient.Pods(pod.Namespace)
352
-	if err := wait.PollImmediate(500*time.Millisecond, 60*time.Second, installationStarted(podClient, pod.Name, config.KubeClient.Secrets(pod.Namespace))); err != nil {
351
+	podClient := config.KubeClient.Core().Pods(pod.Namespace)
352
+	if err := wait.PollImmediate(500*time.Millisecond, 60*time.Second, installationStarted(podClient, pod.Name, config.KubeClient.Core().Secrets(pod.Namespace))); err != nil {
353 353
 		return err
354 354
 	}
355 355
 
... ...
@@ -384,7 +384,7 @@ func followInstallation(config *newcmd.AppConfig, input string, pod *kapi.Pod, l
384 384
 	return nil
385 385
 }
386 386
 
387
-func installationStarted(c kclient.PodInterface, name string, s kclient.SecretsInterface) wait.ConditionFunc {
387
+func installationStarted(c kcoreclient.PodInterface, name string, s kcoreclient.SecretInterface) wait.ConditionFunc {
388 388
 	return func() (bool, error) {
389 389
 		pod, err := c.Get(name)
390 390
 		if err != nil {
... ...
@@ -397,7 +397,7 @@ func installationStarted(c kclient.PodInterface, name string, s kclient.SecretsI
397 397
 		if secret, err := s.Get(name); err == nil {
398 398
 			if secret.Annotations[newcmd.GeneratedForJob] == "true" &&
399 399
 				secret.Annotations[newcmd.GeneratedForJobFor] == pod.Annotations[newcmd.GeneratedForJobFor] {
400
-				if err := s.Delete(name); err != nil {
400
+				if err := s.Delete(name, nil); err != nil {
401 401
 					glog.V(4).Infof("Failed to delete install secret %s: %v", name, err)
402 402
 				}
403 403
 			}
... ...
@@ -406,7 +406,7 @@ func installationStarted(c kclient.PodInterface, name string, s kclient.SecretsI
406 406
 	}
407 407
 }
408 408
 
409
-func installationComplete(c kclient.PodInterface, name string, out io.Writer) wait.ConditionFunc {
409
+func installationComplete(c kcoreclient.PodInterface, name string, out io.Writer) wait.ConditionFunc {
410 410
 	return func() (bool, error) {
411 411
 		pod, err := c.Get(name)
412 412
 		if err != nil {
... ...
@@ -475,7 +475,7 @@ func CompleteAppConfig(config *newcmd.AppConfig, f *clientcmd.Factory, c *cobra.
475 475
 		return err
476 476
 	}
477 477
 
478
-	osclient, kclient, err := f.Clients()
478
+	osclient, _, kclient, err := f.Clients()
479 479
 	if err != nil {
480 480
 		return err
481 481
 	}
... ...
@@ -150,7 +150,7 @@ func RunProcess(f *clientcmd.Factory, out, errout io.Writer, cmd *cobra.Command,
150 150
 
151 151
 	mapper, typer := f.Object(false)
152 152
 
153
-	client, _, err := f.Clients()
153
+	client, _, _, err := f.Clients()
154 154
 	if err != nil {
155 155
 		return err
156 156
 	}
... ...
@@ -8,8 +8,8 @@ import (
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 12
 	"k8s.io/kubernetes/pkg/client/restclient"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13 13
 	kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
14 14
 	clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
15 15
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
... ...
@@ -27,7 +27,7 @@ import (
27 27
 type ProjectOptions struct {
28 28
 	Config       clientcmdapi.Config
29 29
 	ClientConfig *restclient.Config
30
-	ClientFn     func() (*client.Client, kclient.Interface, error)
30
+	ClientFn     func() (*client.Client, kclientset.Interface, error)
31 31
 	Out          io.Writer
32 32
 	PathOptions  *kclientcmd.PathOptions
33 33
 
... ...
@@ -107,8 +107,9 @@ func (o *ProjectOptions) Complete(f *clientcmd.Factory, args []string, out io.Wr
107 107
 		return err
108 108
 	}
109 109
 
110
-	o.ClientFn = func() (*client.Client, kclient.Interface, error) {
111
-		return f.Clients()
110
+	o.ClientFn = func() (*client.Client, kclientset.Interface, error) {
111
+		oc, _, kc, err := f.Clients()
112
+		return oc, kc, err
112 113
 	}
113 114
 
114 115
 	o.Out = out
... ...
@@ -279,14 +280,14 @@ func (o ProjectOptions) RunProject() error {
279 279
 	return nil
280 280
 }
281 281
 
282
-func confirmProjectAccess(currentProject string, oClient *client.Client, kClient kclient.Interface) error {
282
+func confirmProjectAccess(currentProject string, oClient *client.Client, kClient kclientset.Interface) error {
283 283
 	_, projectErr := oClient.Projects().Get(currentProject)
284 284
 	if !kapierrors.IsNotFound(projectErr) && !kapierrors.IsForbidden(projectErr) {
285 285
 		return projectErr
286 286
 	}
287 287
 
288 288
 	// at this point we know the error is a not found or forbidden, but we'll test namespaces just in case we're running on kube
289
-	if _, err := kClient.Namespaces().Get(currentProject); err == nil {
289
+	if _, err := kClient.Core().Namespaces().Get(currentProject); err == nil {
290 290
 		return nil
291 291
 	}
292 292
 
... ...
@@ -294,7 +295,7 @@ func confirmProjectAccess(currentProject string, oClient *client.Client, kClient
294 294
 	return projectErr
295 295
 }
296 296
 
297
-func getProjects(oClient *client.Client, kClient kclient.Interface) ([]api.Project, error) {
297
+func getProjects(oClient *client.Client, kClient kclientset.Interface) ([]api.Project, error) {
298 298
 	projects, err := oClient.Projects().List(kapi.ListOptions{})
299 299
 	if err == nil {
300 300
 		return projects.Items, nil
... ...
@@ -304,7 +305,7 @@ func getProjects(oClient *client.Client, kClient kclient.Interface) ([]api.Proje
304 304
 		return nil, err
305 305
 	}
306 306
 
307
-	namespaces, err := kClient.Namespaces().List(kapi.ListOptions{})
307
+	namespaces, err := kClient.Core().Namespaces().List(kapi.ListOptions{})
308 308
 	if err != nil {
309 309
 		return nil, err
310 310
 	}
... ...
@@ -5,8 +5,8 @@ import (
5 5
 	"io"
6 6
 	"sort"
7 7
 
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8 9
 	"k8s.io/kubernetes/pkg/client/restclient"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10 10
 	kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
11 11
 	clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
12 12
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
... ...
@@ -24,7 +24,7 @@ type ProjectsOptions struct {
24 24
 	Config       clientcmdapi.Config
25 25
 	ClientConfig *restclient.Config
26 26
 	Client       *client.Client
27
-	KubeClient   kclient.Interface
27
+	KubeClient   kclientset.Interface
28 28
 	Out          io.Writer
29 29
 	PathOptions  *kclientcmd.PathOptions
30 30
 
... ...
@@ -98,7 +98,7 @@ func (o *ProjectsOptions) Complete(f *clientcmd.Factory, args []string, commandN
98 98
 		return err
99 99
 	}
100 100
 
101
-	o.Client, o.KubeClient, err = f.Clients()
101
+	o.Client, _, o.KubeClient, err = f.Clients()
102 102
 	if err != nil {
103 103
 		return err
104 104
 	}
... ...
@@ -87,7 +87,7 @@ func NewCmdRequestProject(name, baseName string, f *clientcmd.Factory, out, erro
87 87
 			kcmdutil.CheckErr(o.Complete(f, cmd, args))
88 88
 
89 89
 			var err error
90
-			o.Client, _, err = f.Clients()
90
+			o.Client, _, _, err = f.Clients()
91 91
 			kcmdutil.CheckErr(err)
92 92
 
93 93
 			kcmdutil.CheckErr(o.Run())
... ...
@@ -137,7 +137,7 @@ func (o *RollbackOptions) Complete(f *clientcmd.Factory, args []string, out io.W
137 137
 		return resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder())
138 138
 	}
139 139
 
140
-	oClient, kClient, err := f.Clients()
140
+	oClient, kClient, _, err := f.Clients()
141 141
 	if err != nil {
142 142
 		return err
143 143
 	}
... ...
@@ -9,7 +9,7 @@ import (
9 9
 	"github.com/spf13/cobra"
10 10
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
11 11
 	"k8s.io/kubernetes/pkg/api/meta"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
12
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
13 13
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
14 14
 	"k8s.io/kubernetes/pkg/kubectl/resource"
15 15
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -47,7 +47,7 @@ type RolloutLatestOptions struct {
47 47
 	again  bool
48 48
 
49 49
 	oc              client.Interface
50
-	kc              kclient.Interface
50
+	kc              kclientset.Interface
51 51
 	baseCommandName string
52 52
 }
53 53
 
... ...
@@ -95,7 +95,7 @@ func (o *RolloutLatestOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command
95 95
 
96 96
 	o.DryRun = kcmdutil.GetFlagBool(cmd, "dry-run")
97 97
 
98
-	o.oc, o.kc, err = f.Clients()
98
+	o.oc, _, o.kc, err = f.Clients()
99 99
 	if err != nil {
100 100
 		return err
101 101
 	}
... ...
@@ -139,7 +139,7 @@ func (o RolloutLatestOptions) RunRolloutLatest() error {
139 139
 	}
140 140
 
141 141
 	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
142
-	deployment, err := o.kc.ReplicationControllers(config.Namespace).Get(deploymentName)
142
+	deployment, err := o.kc.Core().ReplicationControllers(config.Namespace).Get(deploymentName)
143 143
 	switch {
144 144
 	case err == nil:
145 145
 		// Reject attempts to start a concurrent deployment.
... ...
@@ -17,7 +17,7 @@ import (
17 17
 	"k8s.io/kubernetes/pkg/api/meta"
18 18
 	kresource "k8s.io/kubernetes/pkg/api/resource"
19 19
 	"k8s.io/kubernetes/pkg/api/unversioned"
20
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
20
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
21 21
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
22 22
 	"k8s.io/kubernetes/pkg/kubectl/resource"
23 23
 	"k8s.io/kubernetes/pkg/labels"
... ...
@@ -101,7 +101,7 @@ type VolumeOptions struct {
101 101
 	Typer                  runtime.ObjectTyper
102 102
 	RESTClientFactory      func(mapping *meta.RESTMapping) (resource.RESTClient, error)
103 103
 	UpdatePodSpecForObject func(obj runtime.Object, fn func(*kapi.PodSpec) error) (bool, error)
104
-	Client                 kclient.PersistentVolumeClaimsNamespacer
104
+	Client                 kcoreclient.PersistentVolumeClaimsGetter
105 105
 	Encoder                runtime.Encoder
106 106
 
107 107
 	// Resource selection
... ...
@@ -343,11 +343,11 @@ func (v *VolumeOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, out,
343 343
 	if err != nil {
344 344
 		return err
345 345
 	}
346
-	_, kc, err := f.Clients()
346
+	_, _, kc, err := f.Clients()
347 347
 	if err != nil {
348 348
 		return err
349 349
 	}
350
-	v.Client = kc
350
+	v.Client = kc.Core()
351 351
 
352 352
 	cmdNamespace, explicit, err := f.DefaultNamespace()
353 353
 	if err != nil {
... ...
@@ -215,7 +215,7 @@ func (o *StartBuildOptions) Complete(f *clientcmd.Factory, in io.Reader, out, er
215 215
 		return err
216 216
 	}
217 217
 
218
-	client, _, err := f.Clients()
218
+	client, _, _, err := f.Clients()
219 219
 	if err != nil {
220 220
 		return err
221 221
 	}
... ...
@@ -98,7 +98,7 @@ func (o *StatusOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, baseC
98 98
 	o.securityPolicyCommandFormat = "oadm policy add-scc-to-user anyuid -n %s -z %s"
99 99
 	o.setProbeCommandName = fmt.Sprintf("%s set probe", cmd.Parent().CommandPath())
100 100
 
101
-	client, kclient, err := f.Clients()
101
+	client, kclient, kclientset, err := f.Clients()
102 102
 	if err != nil {
103 103
 		return err
104 104
 	}
... ...
@@ -123,7 +123,8 @@ func (o *StatusOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, baseC
123 123
 	}
124 124
 
125 125
 	o.describer = &describe.ProjectStatusDescriber{
126
-		K:       kclient,
126
+		OldK:    kclient,
127
+		K:       kclientset,
127 128
 		C:       client,
128 129
 		Server:  config.Host,
129 130
 		Suggest: o.verbose,
... ...
@@ -145,7 +145,7 @@ func (o *TagOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []s
145 145
 
146 146
 	// Setup client.
147 147
 	var err error
148
-	o.osClient, _, err = f.Clients()
148
+	o.osClient, _, _, err = f.Clients()
149 149
 	if err != nil {
150 150
 		return err
151 151
 	}
... ...
@@ -10,6 +10,7 @@ import (
10 10
 	etcdversion "github.com/coreos/etcd/version"
11 11
 
12 12
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
13
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
13 14
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14 15
 	kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
15 16
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
... ...
@@ -33,7 +34,7 @@ type VersionOptions struct {
33 33
 	Out      io.Writer
34 34
 
35 35
 	ClientConfig kclientcmd.ClientConfig
36
-	Clients      func() (*client.Client, *kclient.Client, error)
36
+	Clients      func() (*client.Client, *kclient.Client, *kclientset.Clientset, error)
37 37
 
38 38
 	Timeout time.Duration
39 39
 
... ...
@@ -136,7 +137,7 @@ func (o VersionOptions) RunVersion() error {
136 136
 		}
137 137
 		versionHost = clientConfig.Host
138 138
 
139
-		oClient, kClient, err := o.Clients()
139
+		oClient, kClient, _, err := o.Clients()
140 140
 		if err != nil {
141 141
 			done <- err
142 142
 			return
... ...
@@ -99,7 +99,7 @@ func RunWhoAmI(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []s
99 99
 		return nil
100 100
 	}
101 101
 
102
-	client, _, err := f.Clients()
102
+	client, _, _, err := f.Clients()
103 103
 	if err != nil {
104 104
 		return err
105 105
 	}
... ...
@@ -7,7 +7,6 @@ import (
7 7
 	"github.com/gonum/graph"
8 8
 	"github.com/gonum/graph/concrete"
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
11 10
 	"k8s.io/kubernetes/pkg/util/sets"
12 11
 
13 12
 	"github.com/openshift/origin/pkg/client/testclient"
... ...
@@ -196,14 +195,10 @@ func TestChainDescriber(t *testing.T) {
196 196
 	}
197 197
 
198 198
 	for _, test := range tests {
199
-		o := ktestclient.NewObjects(kapi.Scheme, kapi.Codecs.UniversalDecoder())
200
-		if len(test.path) > 0 {
201
-			if err := ktestclient.AddObjectsFromPath(test.path, o, kapi.Codecs.UniversalDecoder()); err != nil {
202
-				t.Fatal(err)
203
-			}
199
+		oc, _, _, err := testclient.NewFixtureClients(kapi.Codecs.UniversalDecoder(), test.defaultNamespace, test.path)
200
+		if err != nil {
201
+			t.Fatal(err)
204 202
 		}
205
-
206
-		oc, _ := testclient.NewFixtureClients(o)
207 203
 		ist := imagegraph.MakeImageStreamTagObjectMeta(test.defaultNamespace, test.name, test.tag)
208 204
 
209 205
 		desc, err := NewChainDescriber(oc, test.namespaces, test.output).Describe(ist, test.includeInputImg, test.reverse)
... ...
@@ -10,7 +10,7 @@ import (
10 10
 	"strings"
11 11
 
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
14 14
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
15 15
 	kvalidation "k8s.io/kubernetes/pkg/util/validation"
16 16
 
... ...
@@ -57,7 +57,7 @@ type CreateSecretOptions struct {
57 57
 	// Directory sources are listed and any direct file children included (but subfolders are not traversed)
58 58
 	Sources []string
59 59
 
60
-	SecretsInterface kclient.SecretsInterface
60
+	SecretsInterface kcoreclient.SecretInterface
61 61
 
62 62
 	// Writer to write warnings to
63 63
 	Stderr io.Writer
... ...
@@ -129,7 +129,7 @@ func (o *CreateSecretOptions) Complete(args []string, f *clientcmd.Factory) erro
129 129
 	}
130 130
 
131 131
 	if f != nil {
132
-		_, kubeClient, err := f.Clients()
132
+		_, _, kubeClient, err := f.Clients()
133 133
 		if err != nil {
134 134
 			return err
135 135
 		}
... ...
@@ -137,7 +137,7 @@ func (o *CreateSecretOptions) Complete(args []string, f *clientcmd.Factory) erro
137 137
 		if err != nil {
138 138
 			return err
139 139
 		}
140
-		o.SecretsInterface = kubeClient.Secrets(namespace)
140
+		o.SecretsInterface = kubeClient.Core().Secrets(namespace)
141 141
 	}
142 142
 
143 143
 	return nil
... ...
@@ -92,7 +92,7 @@ func (o *BuildChainOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, a
92 92
 	}
93 93
 
94 94
 	// Setup client
95
-	oc, _, err := f.Clients()
95
+	oc, _, _, err := f.Clients()
96 96
 	if err != nil {
97 97
 		return err
98 98
 	}
... ...
@@ -177,7 +177,7 @@ func Run(f *clientcmd.Factory, options *ipfailover.IPFailoverConfigCmdOptions, c
177 177
 	if err != nil {
178 178
 		return err
179 179
 	}
180
-	_, kClient, err := f.Clients()
180
+	_, kClient, _, err := f.Clients()
181 181
 	if err != nil {
182 182
 		return fmt.Errorf("error getting client: %v", err)
183 183
 	}
... ...
@@ -214,7 +214,7 @@ func (o *F5RouterOptions) Run() error {
214 214
 		return err
215 215
 	}
216 216
 
217
-	oc, kc, err := o.Config.Clients()
217
+	oc, _, kc, err := o.Config.Clients()
218 218
 	if err != nil {
219 219
 		return err
220 220
 	}
... ...
@@ -9,7 +9,8 @@ import (
9 9
 	"github.com/spf13/pflag"
10 10
 
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
12
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 14
 	"k8s.io/kubernetes/pkg/fields"
14 15
 	"k8s.io/kubernetes/pkg/labels"
15 16
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -210,7 +211,7 @@ func (o *RouterSelection) Complete() error {
210 210
 }
211 211
 
212 212
 // NewFactory initializes a factory that will watch the requested routes
213
-func (o *RouterSelection) NewFactory(oc oclient.Interface, kc kclient.Interface) *controllerfactory.RouterControllerFactory {
213
+func (o *RouterSelection) NewFactory(oc oclient.Interface, kc kclientset.Interface) *controllerfactory.RouterControllerFactory {
214 214
 	factory := controllerfactory.NewDefaultRouterControllerFactory(oc, kc)
215 215
 	factory.Labels = o.Labels
216 216
 	factory.Fields = o.Fields
... ...
@@ -219,7 +220,7 @@ func (o *RouterSelection) NewFactory(oc oclient.Interface, kc kclient.Interface)
219 219
 	switch {
220 220
 	case o.NamespaceLabels != nil:
221 221
 		glog.Infof("Router is only using routes in namespaces matching %s", o.NamespaceLabels)
222
-		factory.Namespaces = namespaceNames{kc.Namespaces(), o.NamespaceLabels}
222
+		factory.Namespaces = namespaceNames{kc.Core().Namespaces(), o.NamespaceLabels}
223 223
 	case o.ProjectLabels != nil:
224 224
 		glog.Infof("Router is only using routes in projects matching %s", o.ProjectLabels)
225 225
 		factory.Namespaces = projectNames{oc.Projects(), o.ProjectLabels}
... ...
@@ -251,7 +252,7 @@ func (n projectNames) NamespaceNames() (sets.String, error) {
251 251
 
252 252
 // namespaceNames returns the names of namespaces matching the label selector
253 253
 type namespaceNames struct {
254
-	client   kclient.NamespaceInterface
254
+	client   kcoreclient.NamespaceInterface
255 255
 	selector labels.Selector
256 256
 }
257 257
 
... ...
@@ -192,12 +192,12 @@ func (o *TemplateRouterOptions) Run() error {
192 192
 		AllowWildcardRoutes:    o.RouterSelection.AllowWildcardRoutes,
193 193
 	}
194 194
 
195
-	oc, kc, err := o.Config.Clients()
195
+	oc, _, kc, err := o.Config.Clients()
196 196
 	if err != nil {
197 197
 		return err
198 198
 	}
199 199
 
200
-	svcFetcher := templateplugin.NewListWatchServiceLookup(kc, 10*time.Minute)
200
+	svcFetcher := templateplugin.NewListWatchServiceLookup(kc.Core(), 10*time.Minute)
201 201
 	templatePlugin, err := templateplugin.NewTemplatePlugin(pluginCfg, svcFetcher)
202 202
 	if err != nil {
203 203
 		return err
... ...
@@ -24,27 +24,18 @@ import (
24 24
 	"k8s.io/kubernetes/pkg/apis/componentconfig"
25 25
 	extv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
26 26
 	policyv1alpha1 "k8s.io/kubernetes/pkg/apis/policy/v1alpha1"
27
-	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
27
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
28 28
 	"k8s.io/kubernetes/pkg/client/record"
29 29
 	"k8s.io/kubernetes/pkg/client/restclient"
30 30
 	"k8s.io/kubernetes/pkg/client/typed/dynamic"
31 31
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
32
-	clientadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
33
-	"k8s.io/kubernetes/pkg/controller/deployment"
34
-	"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
35
-	"k8s.io/kubernetes/pkg/master"
36
-	"k8s.io/kubernetes/pkg/registry/generic"
37
-	"k8s.io/kubernetes/pkg/runtime"
38
-	"k8s.io/kubernetes/pkg/runtime/serializer"
39
-	"k8s.io/kubernetes/pkg/storage"
40
-	storagefactory "k8s.io/kubernetes/pkg/storage/storagebackend/factory"
41
-	utilwait "k8s.io/kubernetes/pkg/util/wait"
42
-
43
-	client "k8s.io/kubernetes/pkg/client/unversioned"
32
+	adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
44 33
 	"k8s.io/kubernetes/pkg/controller/daemon"
34
+	"k8s.io/kubernetes/pkg/controller/deployment"
45 35
 	"k8s.io/kubernetes/pkg/controller/disruption"
46 36
 	endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
47 37
 	"k8s.io/kubernetes/pkg/controller/garbagecollector"
38
+	"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
48 39
 	jobcontroller "k8s.io/kubernetes/pkg/controller/job"
49 40
 	namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
50 41
 	nodecontroller "k8s.io/kubernetes/pkg/controller/node"
... ...
@@ -58,6 +49,13 @@ import (
58 58
 	servicecontroller "k8s.io/kubernetes/pkg/controller/service"
59 59
 	attachdetachcontroller "k8s.io/kubernetes/pkg/controller/volume/attachdetach"
60 60
 	persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
61
+	"k8s.io/kubernetes/pkg/master"
62
+	"k8s.io/kubernetes/pkg/registry/generic"
63
+	"k8s.io/kubernetes/pkg/runtime"
64
+	"k8s.io/kubernetes/pkg/runtime/serializer"
65
+	"k8s.io/kubernetes/pkg/storage"
66
+	storagefactory "k8s.io/kubernetes/pkg/storage/storagebackend/factory"
67
+	utilwait "k8s.io/kubernetes/pkg/util/wait"
61 68
 
62 69
 	"k8s.io/kubernetes/pkg/registry/endpoint"
63 70
 	endpointsetcd "k8s.io/kubernetes/pkg/registry/endpoint/etcd"
... ...
@@ -164,7 +162,7 @@ func newMasterLeases(storage storage.Interface) election.Leases {
164 164
 }
165 165
 
166 166
 // RunNamespaceController starts the Kubernetes Namespace Manager
167
-func (c *MasterConfig) RunNamespaceController(kubeClient internalclientset.Interface, clientPool dynamic.ClientPool) {
167
+func (c *MasterConfig) RunNamespaceController(kubeClient kclientset.Interface, clientPool dynamic.ClientPool) {
168 168
 	// Find the list of namespaced resources via discovery that the namespace controller must manage
169 169
 	groupVersionResources, err := kubeClient.Discovery().ServerPreferredNamespacedResources()
170 170
 	if err != nil {
... ...
@@ -174,7 +172,7 @@ func (c *MasterConfig) RunNamespaceController(kubeClient internalclientset.Inter
174 174
 	go namespaceController.Run(int(c.ControllerManager.ConcurrentNamespaceSyncs), utilwait.NeverStop)
175 175
 }
176 176
 
177
-func (c *MasterConfig) RunPersistentVolumeController(client *client.Client, namespace, recyclerImageName, recyclerServiceAccountName string) {
177
+func (c *MasterConfig) RunPersistentVolumeController(client *kclientset.Clientset, namespace, recyclerImageName, recyclerServiceAccountName string) {
178 178
 	s := c.ControllerManager
179 179
 
180 180
 	alphaProvisioner, err := kctrlmgr.NewAlphaVolumeProvisioner(c.CloudProvider, s.VolumeConfiguration)
... ...
@@ -183,7 +181,7 @@ func (c *MasterConfig) RunPersistentVolumeController(client *client.Client, name
183 183
 	}
184 184
 
185 185
 	volumeController := persistentvolumecontroller.NewPersistentVolumeController(
186
-		clientadapter.FromUnversionedClient(client),
186
+		client,
187 187
 		s.PVClaimBinderSyncPeriod.Duration,
188 188
 		alphaProvisioner,
189 189
 		probeRecyclableVolumePlugins(s.VolumeConfiguration, namespace, recyclerImageName, recyclerServiceAccountName),
... ...
@@ -196,11 +194,11 @@ func (c *MasterConfig) RunPersistentVolumeController(client *client.Client, name
196 196
 	volumeController.Run(utilwait.NeverStop)
197 197
 }
198 198
 
199
-func (c *MasterConfig) RunPersistentVolumeAttachDetachController(client *client.Client) {
199
+func (c *MasterConfig) RunPersistentVolumeAttachDetachController(client *kclientset.Clientset) {
200 200
 	s := c.ControllerManager
201 201
 	attachDetachController, err :=
202 202
 		attachdetachcontroller.NewAttachDetachController(
203
-			clientadapter.FromUnversionedClient(client),
203
+			client,
204 204
 			c.Informers.Pods().Informer(),
205 205
 			c.Informers.Nodes().Informer(),
206 206
 			c.Informers.PersistentVolumeClaims().Informer(),
... ...
@@ -270,10 +268,10 @@ func probeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration, na
270 270
 	return allPlugins
271 271
 }
272 272
 
273
-func (c *MasterConfig) RunReplicaSetController(client *client.Client) {
273
+func (c *MasterConfig) RunReplicaSetController(client *kclientset.Clientset) {
274 274
 	controller := replicasetcontroller.NewReplicaSetController(
275 275
 		c.Informers.Pods().Informer(),
276
-		clientadapter.FromUnversionedClient(client),
276
+		client,
277 277
 		kctrlmgr.ResyncPeriod(c.ControllerManager),
278 278
 		replicasetcontroller.BurstReplicas,
279 279
 		int(c.ControllerManager.LookupCacheSizeForRC),
... ...
@@ -283,10 +281,10 @@ func (c *MasterConfig) RunReplicaSetController(client *client.Client) {
283 283
 }
284 284
 
285 285
 // RunReplicationController starts the Kubernetes replication controller sync loop
286
-func (c *MasterConfig) RunReplicationController(client *client.Client) {
286
+func (c *MasterConfig) RunReplicationController(client *kclientset.Clientset) {
287 287
 	controllerManager := replicationcontroller.NewReplicationManager(
288 288
 		c.Informers.Pods().Informer(),
289
-		clientadapter.FromUnversionedClient(client),
289
+		client,
290 290
 		kctrlmgr.ResyncPeriod(c.ControllerManager),
291 291
 		replicationcontroller.BurstReplicas,
292 292
 		int(c.ControllerManager.LookupCacheSizeForRC),
... ...
@@ -295,17 +293,17 @@ func (c *MasterConfig) RunReplicationController(client *client.Client) {
295 295
 	go controllerManager.Run(int(c.ControllerManager.ConcurrentRCSyncs), utilwait.NeverStop)
296 296
 }
297 297
 
298
-func (c *MasterConfig) RunDeploymentController(client *client.Client) {
298
+func (c *MasterConfig) RunDeploymentController(client *kclientset.Clientset) {
299 299
 	controller := deployment.NewDeploymentController(
300
-		clientadapter.FromUnversionedClient(client),
300
+		client,
301 301
 		kctrlmgr.ResyncPeriod(c.ControllerManager),
302 302
 	)
303 303
 	go controller.Run(int(c.ControllerManager.ConcurrentDeploymentSyncs), utilwait.NeverStop)
304 304
 }
305 305
 
306 306
 // RunJobController starts the Kubernetes job controller sync loop
307
-func (c *MasterConfig) RunJobController(client *client.Client) {
308
-	controller := jobcontroller.NewJobController(c.Informers.Pods().Informer(), clientadapter.FromUnversionedClient(client))
307
+func (c *MasterConfig) RunJobController(client *kclientset.Clientset) {
308
+	controller := jobcontroller.NewJobController(c.Informers.Pods().Informer(), client)
309 309
 	go controller.Run(int(c.ControllerManager.ConcurrentJobSyncs), utilwait.NeverStop)
310 310
 }
311 311
 
... ...
@@ -321,28 +319,27 @@ func (c *MasterConfig) RunScheduledJobController(config *restclient.Config) {
321 321
 }
322 322
 
323 323
 // RunDisruptionBudgetController starts the Kubernetes disruption budget controller
324
-func (c *MasterConfig) RunDisruptionBudgetController(client *client.Client) {
324
+func (c *MasterConfig) RunDisruptionBudgetController(client *kclient.Client) {
325 325
 	go disruption.NewDisruptionController(c.Informers.Pods().Informer(), client).Run(utilwait.NeverStop)
326 326
 }
327 327
 
328 328
 // RunHPAController starts the Kubernetes hpa controller sync loop
329
-func (c *MasterConfig) RunHPAController(oc *osclient.Client, kc *client.Client, heapsterNamespace string) {
330
-	clientsetClient := clientadapter.FromUnversionedClient(kc)
329
+func (c *MasterConfig) RunHPAController(oc *osclient.Client, kc *kclientset.Clientset, heapsterNamespace string) {
331 330
 	delegatingScaleNamespacer := osclient.NewDelegatingScaleNamespacer(oc, kc)
332 331
 	podautoscaler := podautoscalercontroller.NewHorizontalController(
333
-		clientsetClient,
332
+		kc,
334 333
 		delegatingScaleNamespacer,
335
-		clientsetClient,
336
-		metrics.NewHeapsterMetricsClient(clientsetClient, heapsterNamespace, "https", "heapster", ""),
334
+		kc,
335
+		metrics.NewHeapsterMetricsClient(kc, heapsterNamespace, "https", "heapster", ""),
337 336
 		c.ControllerManager.HorizontalPodAutoscalerSyncPeriod.Duration,
338 337
 	)
339 338
 	go podautoscaler.Run(utilwait.NeverStop)
340 339
 }
341 340
 
342
-func (c *MasterConfig) RunDaemonSetsController(client *client.Client) {
341
+func (c *MasterConfig) RunDaemonSetsController(client *kclientset.Clientset) {
343 342
 	controller := daemon.NewDaemonSetsController(
344 343
 		c.Informers.Pods().Informer(),
345
-		clientadapter.FromUnversionedClient(client),
344
+		client,
346 345
 		kctrlmgr.ResyncPeriod(c.ControllerManager),
347 346
 		int(c.ControllerManager.LookupCacheSizeForDaemonSet),
348 347
 	)
... ...
@@ -350,8 +347,8 @@ func (c *MasterConfig) RunDaemonSetsController(client *client.Client) {
350 350
 }
351 351
 
352 352
 // RunEndpointController starts the Kubernetes replication controller sync loop
353
-func (c *MasterConfig) RunEndpointController(client *client.Client) {
354
-	endpoints := endpointcontroller.NewEndpointController(c.Informers.Pods().Informer(), clientadapter.FromUnversionedClient(client))
353
+func (c *MasterConfig) RunEndpointController(client *kclientset.Clientset) {
354
+	endpoints := endpointcontroller.NewEndpointController(c.Informers.Pods().Informer(), client)
355 355
 	go endpoints.Run(int(c.ControllerManager.ConcurrentEndpointSyncs), utilwait.NeverStop)
356 356
 
357 357
 }
... ...
@@ -371,9 +368,9 @@ func (c *MasterConfig) RunScheduler() {
371 371
 }
372 372
 
373 373
 // RunGCController handles deletion of terminated pods.
374
-func (c *MasterConfig) RunGCController(client *client.Client) {
374
+func (c *MasterConfig) RunGCController(client *kclientset.Clientset) {
375 375
 	if c.ControllerManager.TerminatedPodGCThreshold > 0 {
376
-		gcController := gccontroller.New(clientadapter.FromUnversionedClient(client), kctrlmgr.ResyncPeriod(c.ControllerManager), int(c.ControllerManager.TerminatedPodGCThreshold))
376
+		gcController := gccontroller.New(client, kctrlmgr.ResyncPeriod(c.ControllerManager), int(c.ControllerManager.TerminatedPodGCThreshold))
377 377
 		go gcController.Run(utilwait.NeverStop)
378 378
 	}
379 379
 }
... ...
@@ -417,7 +414,7 @@ func (c *MasterConfig) RunNodeController() {
417 417
 	controller, err := nodecontroller.NewNodeController(
418 418
 		c.Informers.Pods().Informer(),
419 419
 		c.CloudProvider,
420
-		clientadapter.FromUnversionedClient(c.KubeClient),
420
+		adapter.FromUnversionedClient(c.KubeClient),
421 421
 		s.PodEvictionTimeout.Duration,
422 422
 
423 423
 		s.NodeEvictionRate,
... ...
@@ -443,12 +440,12 @@ func (c *MasterConfig) RunNodeController() {
443 443
 }
444 444
 
445 445
 // RunServiceLoadBalancerController starts the service loadbalancer controller if the cloud provider is configured.
446
-func (c *MasterConfig) RunServiceLoadBalancerController(client *client.Client) {
446
+func (c *MasterConfig) RunServiceLoadBalancerController(client *kclientset.Clientset) {
447 447
 	if c.CloudProvider == nil {
448 448
 		glog.V(2).Infof("Service controller will not start - no cloud provider configured")
449 449
 		return
450 450
 	}
451
-	serviceController, err := servicecontroller.New(c.CloudProvider, clientadapter.FromUnversionedClient(client), c.ControllerManager.ClusterName)
451
+	serviceController, err := servicecontroller.New(c.CloudProvider, client, c.ControllerManager.ClusterName)
452 452
 	if err != nil {
453 453
 		glog.Errorf("Unable to start service controller: %v", err)
454 454
 	} else {
... ...
@@ -457,7 +454,7 @@ func (c *MasterConfig) RunServiceLoadBalancerController(client *client.Client) {
457 457
 }
458 458
 
459 459
 // RunPetSetController starts the PetSet controller
460
-func (c *MasterConfig) RunPetSetController(client *client.Client) {
460
+func (c *MasterConfig) RunPetSetController(client *kclient.Client) {
461 461
 	ps := petsetcontroller.NewPetSetController(c.Informers.Pods().Informer(), client, kctrlmgr.ResyncPeriod(c.ControllerManager)())
462 462
 	go ps.Run(1, utilwait.NeverStop)
463 463
 }
... ...
@@ -19,8 +19,7 @@ import (
19 19
 	"k8s.io/kubernetes/pkg/apis/componentconfig"
20 20
 	"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
21 21
 	"k8s.io/kubernetes/pkg/client/cache"
22
-	client "k8s.io/kubernetes/pkg/client/unversioned"
23
-	clientadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
22
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
24 23
 	"k8s.io/kubernetes/pkg/cloudprovider"
25 24
 	"k8s.io/kubernetes/pkg/kubelet"
26 25
 	"k8s.io/kubernetes/pkg/kubelet/dockertools"
... ...
@@ -55,7 +54,7 @@ type NodeConfig struct {
55 55
 	Containerized bool
56 56
 
57 57
 	// Client to connect to the master.
58
-	Client *client.Client
58
+	Client *kclientset.Clientset
59 59
 	// DockerClient is a client to connect to Docker
60 60
 	DockerClient dockertools.DockerInterface
61 61
 	// KubeletServer contains the KubeletServer configuration
... ...
@@ -90,12 +89,12 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enable
90 90
 	if err != nil {
91 91
 		return nil, err
92 92
 	}
93
-	kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
93
+	_, kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
94 94
 	if err != nil {
95 95
 		return nil, err
96 96
 	}
97 97
 	// Make a separate client for event reporting, to avoid event QPS blocking node calls
98
-	eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
98
+	_, eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
99 99
 	if err != nil {
100 100
 		return nil, err
101 101
 	}
... ...
@@ -232,15 +231,15 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enable
232 232
 
233 233
 	// provide any config overrides
234 234
 	//deps.NodeName = options.NodeName
235
-	deps.KubeClient = clientadapter.FromUnversionedClient(kubeClient)
236
-	deps.EventClient = clientadapter.FromUnversionedClient(eventClient)
235
+	deps.KubeClient = kubeClient
236
+	deps.EventClient = eventClient
237 237
 
238 238
 	// Setup auth
239 239
 	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
240 240
 	if err != nil {
241 241
 		return nil, err
242 242
 	}
243
-	authn, err := newAuthenticator(deps.KubeClient.Authentication(), clientCAs, authnTTL, options.AuthConfig.AuthenticationCacheSize)
243
+	authn, err := newAuthenticator(kubeClient.Authentication(), clientCAs, authnTTL, options.AuthConfig.AuthenticationCacheSize)
244 244
 	if err != nil {
245 245
 		return nil, err
246 246
 	}
... ...
@@ -326,7 +325,7 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enable
326 326
 		services, serviceStore := dns.NewCachedServiceAccessorAndStore()
327 327
 		endpoints, endpointsStore := dns.NewCachedEndpointsAccessorAndStore()
328 328
 		if !enableProxy {
329
-			endpoints = kubeClient
329
+			endpoints = deps.KubeClient
330 330
 			endpointsStore = nil
331 331
 		}
332 332
 
... ...
@@ -89,7 +89,7 @@ func (c *AuthConfig) InstallAPI(container *restful.Container) ([]string, error)
89 89
 		return nil, err
90 90
 	}
91 91
 	clientRegistry := clientregistry.NewRegistry(clientStorage)
92
-	combinedOAuthClientGetter := saoauth.NewServiceAccountOAuthClientGetter(c.KubeClient, c.KubeClient, c.OpenShiftClient, clientRegistry, oauthapi.GrantHandlerType(c.Options.GrantConfig.ServiceAccountMethod))
92
+	combinedOAuthClientGetter := saoauth.NewServiceAccountOAuthClientGetter(c.KubeClient.Core(), c.KubeClient.Core(), c.OpenShiftClient, clientRegistry, oauthapi.GrantHandlerType(c.Options.GrantConfig.ServiceAccountMethod))
93 93
 
94 94
 	accessTokenStorage, err := accesstokenetcd.NewREST(c.RESTOptionsGetter, combinedOAuthClientGetter, c.EtcdBackends...)
95 95
 	if err != nil {
... ...
@@ -7,7 +7,7 @@ import (
7 7
 
8 8
 	"github.com/pborman/uuid"
9 9
 
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 	"k8s.io/kubernetes/pkg/storage"
12 12
 
13 13
 	"github.com/openshift/origin/pkg/auth/server/session"
... ...
@@ -28,7 +28,7 @@ type AuthConfig struct {
28 28
 	AssetPublicAddresses []string
29 29
 
30 30
 	// KubeClient is kubeclient with enough permission for the auth API
31
-	KubeClient kclient.Interface
31
+	KubeClient kclientset.Interface
32 32
 
33 33
 	// OpenShiftClient is osclient with enough permission for the auth API
34 34
 	OpenShiftClient osclient.Interface
... ...
@@ -7,13 +7,14 @@ import (
7 7
 
8 8
 	"k8s.io/kubernetes/pkg/api/rest"
9 9
 	extapi "k8s.io/kubernetes/pkg/apis/extensions"
10
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11 12
 	kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
12 13
 	"k8s.io/kubernetes/pkg/storage/storagebackend"
13 14
 
14 15
 	_ "github.com/openshift/origin/pkg/api/install"
15 16
 	"github.com/openshift/origin/pkg/api/validation"
16
-	otestclient "github.com/openshift/origin/pkg/client/testclient"
17
+	"github.com/openshift/origin/pkg/client/testclient"
17 18
 	"github.com/openshift/origin/pkg/controller/shared"
18 19
 	deployapi "github.com/openshift/origin/pkg/deploy/api"
19 20
 	quotaapi "github.com/openshift/origin/pkg/quota/api"
... ...
@@ -72,11 +73,12 @@ func TestValidationRegistration(t *testing.T) {
72 72
 
73 73
 // fakeMasterConfig creates a new fake master config with an empty kubelet config and dummy storage.
74 74
 func fakeMasterConfig() *MasterConfig {
75
-	informerFactory := shared.NewInformerFactory(testclient.NewSimpleFake(), otestclient.NewSimpleFake(), shared.DefaultListerWatcherOverrides{}, 1*time.Second)
75
+	informerFactory := shared.NewInformerFactory(fake.NewSimpleClientset(), testclient.NewSimpleFake(), shared.DefaultListerWatcherOverrides{}, 1*time.Second)
76 76
 	return &MasterConfig{
77
-		KubeletClientConfig:           &kubeletclient.KubeletClientConfig{},
78
-		RESTOptionsGetter:             restoptions.NewSimpleGetter(&storagebackend.Config{ServerList: []string{"localhost"}}),
79
-		Informers:                     informerFactory,
80
-		ClusterQuotaMappingController: clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()),
77
+		KubeletClientConfig:                   &kubeletclient.KubeletClientConfig{},
78
+		RESTOptionsGetter:                     restoptions.NewSimpleGetter(&storagebackend.Config{ServerList: []string{"localhost"}}),
79
+		Informers:                             informerFactory,
80
+		ClusterQuotaMappingController:         clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()),
81
+		PrivilegedLoopbackKubernetesClientset: &kclientset.Clientset{},
81 82
 	}
82 83
 }
... ...
@@ -305,7 +305,7 @@ func StartNode(nodeConfig configapi.NodeConfig, components *utilflags.ComponentF
305 305
 		glog.Infof("Starting node networking %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
306 306
 	}
307 307
 
308
-	_, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig, nodeConfig.MasterClientConnectionOverrides)
308
+	_, _, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig, nodeConfig.MasterClientConnectionOverrides)
309 309
 	if err != nil {
310 310
 		return err
311 311
 	}
... ...
@@ -37,10 +37,10 @@ func (f *podInformer) Informer() framework.SharedIndexInformer {
37 37
 	if lw == nil {
38 38
 		lw = &cache.ListWatch{
39 39
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
40
-				return f.kubeClient.Pods(kapi.NamespaceAll).List(options)
40
+				return f.kubeClient.Core().Pods(kapi.NamespaceAll).List(options)
41 41
 			},
42 42
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
43
-				return f.kubeClient.Pods(kapi.NamespaceAll).Watch(options)
43
+				return f.kubeClient.Core().Pods(kapi.NamespaceAll).Watch(options)
44 44
 			},
45 45
 		}
46 46
 
... ...
@@ -92,10 +92,10 @@ func (f *nodeInformer) Informer() framework.SharedIndexInformer {
92 92
 	if lw == nil {
93 93
 		lw = &cache.ListWatch{
94 94
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
95
-				return f.kubeClient.Nodes().List(options)
95
+				return f.kubeClient.Core().Nodes().List(options)
96 96
 			},
97 97
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
98
-				return f.kubeClient.Nodes().Watch(options)
98
+				return f.kubeClient.Core().Nodes().Watch(options)
99 99
 			},
100 100
 		}
101 101
 
... ...
@@ -147,10 +147,10 @@ func (f *persistentVolumeInformer) Informer() framework.SharedIndexInformer {
147 147
 	if lw == nil {
148 148
 		lw = &cache.ListWatch{
149 149
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
150
-				return f.kubeClient.PersistentVolumes().List(options)
150
+				return f.kubeClient.Core().PersistentVolumes().List(options)
151 151
 			},
152 152
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
153
-				return f.kubeClient.PersistentVolumes().Watch(options)
153
+				return f.kubeClient.Core().PersistentVolumes().Watch(options)
154 154
 			},
155 155
 		}
156 156
 
... ...
@@ -202,10 +202,10 @@ func (f *persistentVolumeClaimInformer) Informer() framework.SharedIndexInformer
202 202
 	if lw == nil {
203 203
 		lw = &cache.ListWatch{
204 204
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
205
-				return f.kubeClient.PersistentVolumeClaims(kapi.NamespaceAll).List(options)
205
+				return f.kubeClient.Core().PersistentVolumeClaims(kapi.NamespaceAll).List(options)
206 206
 			},
207 207
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
208
-				return f.kubeClient.PersistentVolumeClaims(kapi.NamespaceAll).Watch(options)
208
+				return f.kubeClient.Core().PersistentVolumeClaims(kapi.NamespaceAll).Watch(options)
209 209
 			},
210 210
 		}
211 211
 
... ...
@@ -257,10 +257,10 @@ func (f *replicationControllerInformer) Informer() framework.SharedIndexInformer
257 257
 	if lw == nil {
258 258
 		lw = &cache.ListWatch{
259 259
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
260
-				return f.kubeClient.ReplicationControllers(kapi.NamespaceAll).List(options)
260
+				return f.kubeClient.Core().ReplicationControllers(kapi.NamespaceAll).List(options)
261 261
 			},
262 262
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
263
-				return f.kubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(options)
263
+				return f.kubeClient.Core().ReplicationControllers(kapi.NamespaceAll).Watch(options)
264 264
 			},
265 265
 		}
266 266
 	}
... ...
@@ -311,10 +311,10 @@ func (f *namespaceInformer) Informer() framework.SharedIndexInformer {
311 311
 	if lw == nil {
312 312
 		lw = &cache.ListWatch{
313 313
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
314
-				return f.kubeClient.Namespaces().List(options)
314
+				return f.kubeClient.Core().Namespaces().List(options)
315 315
 			},
316 316
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
317
-				return f.kubeClient.Namespaces().Watch(options)
317
+				return f.kubeClient.Core().Namespaces().Watch(options)
318 318
 			},
319 319
 		}
320 320
 	}
... ...
@@ -365,10 +365,10 @@ func (f *limitRangeInformer) Informer() framework.SharedIndexInformer {
365 365
 	if lw == nil {
366 366
 		lw = &cache.ListWatch{
367 367
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
368
-				return f.kubeClient.LimitRanges(kapi.NamespaceAll).List(options)
368
+				return f.kubeClient.Core().LimitRanges(kapi.NamespaceAll).List(options)
369 369
 			},
370 370
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
371
-				return f.kubeClient.LimitRanges(kapi.NamespaceAll).Watch(options)
371
+				return f.kubeClient.Core().LimitRanges(kapi.NamespaceAll).Watch(options)
372 372
 			},
373 373
 		}
374 374
 	}
... ...
@@ -36,10 +36,10 @@ func (s *securityContextConstraintsInformer) Informer() framework.SharedIndexInf
36 36
 	informer = framework.NewSharedIndexInformer(
37 37
 		&cache.ListWatch{
38 38
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
39
-				return s.kubeClient.SecurityContextConstraints().List(options)
39
+				return s.kubeClient.Core().SecurityContextConstraints().List(options)
40 40
 			},
41 41
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
42
-				return s.kubeClient.SecurityContextConstraints().Watch(options)
42
+				return s.kubeClient.Core().SecurityContextConstraints().Watch(options)
43 43
 			},
44 44
 		},
45 45
 		informerObj,
... ...
@@ -36,10 +36,10 @@ func (s *serviceAccountInformer) Informer() framework.SharedIndexInformer {
36 36
 	informer = framework.NewSharedIndexInformer(
37 37
 		&cache.ListWatch{
38 38
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
39
-				return s.kubeClient.ServiceAccounts(kapi.NamespaceAll).List(options)
39
+				return s.kubeClient.Core().ServiceAccounts(kapi.NamespaceAll).List(options)
40 40
 			},
41 41
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
42
-				return s.kubeClient.ServiceAccounts(kapi.NamespaceAll).Watch(options)
42
+				return s.kubeClient.Core().ServiceAccounts(kapi.NamespaceAll).Watch(options)
43 43
 			},
44 44
 		},
45 45
 		informerObj,
... ...
@@ -7,7 +7,7 @@ import (
7 7
 
8 8
 	"k8s.io/kubernetes/pkg/api/unversioned"
9 9
 	"k8s.io/kubernetes/pkg/client/cache"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 	"k8s.io/kubernetes/pkg/controller/framework"
12 12
 	"k8s.io/kubernetes/pkg/controller/framework/informers"
13 13
 
... ...
@@ -57,7 +57,7 @@ func (o DefaultListerWatcherOverrides) GetListerWatcher(resource unversioned.Gro
57 57
 	return o[resource]
58 58
 }
59 59
 
60
-func NewInformerFactory(kubeClient kclient.Interface, originClient oclient.Interface, customListerWatchers ListerWatcherOverrides, defaultResync time.Duration) InformerFactory {
60
+func NewInformerFactory(kubeClient kclientset.Interface, originClient oclient.Interface, customListerWatchers ListerWatcherOverrides, defaultResync time.Duration) InformerFactory {
61 61
 	return &sharedInformerFactory{
62 62
 		kubeClient:           kubeClient,
63 63
 		originClient:         originClient,
... ...
@@ -72,7 +72,7 @@ func NewInformerFactory(kubeClient kclient.Interface, originClient oclient.Inter
72 72
 }
73 73
 
74 74
 type sharedInformerFactory struct {
75
-	kubeClient           kclient.Interface
75
+	kubeClient           kclientset.Interface
76 76
 	originClient         oclient.Interface
77 77
 	customListerWatchers ListerWatcherOverrides
78 78
 	defaultResync        time.Duration
... ...
@@ -22,7 +22,9 @@ const (
22 22
 func OkDeploymentConfig(version int64) *deployapi.DeploymentConfig {
23 23
 	return &deployapi.DeploymentConfig{
24 24
 		ObjectMeta: kapi.ObjectMeta{
25
-			Name: "config",
25
+			Name:      "config",
26
+			Namespace: kapi.NamespaceDefault,
27
+			SelfLink:  "/oapi/v1/namespaces/default/deploymentconfig/config",
26 28
 		},
27 29
 		Spec:   OkDeploymentConfigSpec(),
28 30
 		Status: OkDeploymentConfigStatus(version),
... ...
@@ -75,12 +75,12 @@ func TestStop(t *testing.T) {
75 75
 			},
76 76
 			kexpected: []ktestclient.Action{
77 77
 				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"})}),
78
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
79
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
80
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
81
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
82
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
83
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-1"),
78
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
79
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
80
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
81
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
82
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
83
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-1"),
84 84
 			},
85 85
 			err: false,
86 86
 		},
... ...
@@ -98,12 +98,12 @@ func TestStop(t *testing.T) {
98 98
 			},
99 99
 			kexpected: []ktestclient.Action{
100 100
 				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"})}),
101
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
102
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
103
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
104
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
105
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
106
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-1"),
101
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
102
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
103
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
104
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
105
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
106
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-1"),
107 107
 			},
108 108
 			err: false,
109 109
 		},
... ...
@@ -121,36 +121,36 @@ func TestStop(t *testing.T) {
121 121
 			},
122 122
 			kexpected: []ktestclient.Action{
123 123
 				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"})}),
124
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
125
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
126
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
127
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
128
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
129
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-1"),
130
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-2"),
131
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
132
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-2"),
133
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
134
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-2"),
135
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-2"),
136
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-3"),
137
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
138
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-3"),
139
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
140
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-3"),
141
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-3"),
142
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-4"),
143
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
144
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-4"),
145
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
146
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-4"),
147
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-4"),
148
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-5"),
149
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
150
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-5"),
151
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
152
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-5"),
153
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-5"),
124
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
125
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
126
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
127
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
128
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
129
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-1"),
130
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-2"),
131
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
132
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-2"),
133
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
134
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-2"),
135
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-2"),
136
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-3"),
137
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
138
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-3"),
139
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
140
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-3"),
141
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-3"),
142
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-4"),
143
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
144
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-4"),
145
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
146
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-4"),
147
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-4"),
148
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-5"),
149
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
150
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-5"),
151
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
152
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-5"),
153
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-5"),
154 154
 			},
155 155
 			err: false,
156 156
 		},
... ...
@@ -168,36 +168,36 @@ func TestStop(t *testing.T) {
168 168
 			},
169 169
 			kexpected: []ktestclient.Action{
170 170
 				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"})}),
171
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
172
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
173
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
174
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
175
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
176
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-1"),
177
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-2"),
178
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
179
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-2"),
180
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
181
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-2"),
182
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-2"),
183
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-3"),
184
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
185
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-3"),
186
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
187
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-3"),
188
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-3"),
189
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-4"),
190
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
191
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-4"),
192
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
193
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-4"),
194
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-4"),
195
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-5"),
196
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
197
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-5"),
198
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
199
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-5"),
200
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-5"),
171
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
172
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
173
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
174
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
175
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
176
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-1"),
177
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-2"),
178
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
179
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-2"),
180
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
181
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-2"),
182
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-2"),
183
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-3"),
184
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
185
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-3"),
186
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
187
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-3"),
188
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-3"),
189
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-4"),
190
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
191
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-4"),
192
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
193
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-4"),
194
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-4"),
195
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-5"),
196
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
197
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-5"),
198
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
199
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-5"),
200
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-5"),
201 201
 			},
202 202
 			err: false,
203 203
 		},
... ...
@@ -212,12 +212,12 @@ func TestStop(t *testing.T) {
212 212
 			},
213 213
 			kexpected: []ktestclient.Action{
214 214
 				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{"openshift.io/deployment-config.name": "config"})}),
215
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
216
-				ktestclient.NewListAction("replicationcontrollers", "", kapi.ListOptions{}),
217
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
218
-				ktestclient.NewUpdateAction("replicationcontrollers", "", nil),
219
-				ktestclient.NewGetAction("replicationcontrollers", "", "config-1"),
220
-				ktestclient.NewDeleteAction("replicationcontrollers", "", "config-1"),
215
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
216
+				ktestclient.NewListAction("replicationcontrollers", "default", kapi.ListOptions{}),
217
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
218
+				ktestclient.NewUpdateAction("replicationcontrollers", "default", nil),
219
+				ktestclient.NewGetAction("replicationcontrollers", "default", "config-1"),
220
+				ktestclient.NewDeleteAction("replicationcontrollers", "default", "config-1"),
221 221
 			},
222 222
 			err: false,
223 223
 		},
... ...
@@ -7,7 +7,8 @@ import (
7 7
 	"text/tabwriter"
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
11 12
 	"k8s.io/kubernetes/pkg/kubectl"
12 13
 
13 14
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -15,14 +16,14 @@ import (
15 15
 	deployutil "github.com/openshift/origin/pkg/deploy/util"
16 16
 )
17 17
 
18
-func NewDeploymentConfigHistoryViewer(oc client.Interface, kc kclient.Interface) kubectl.HistoryViewer {
19
-	return &DeploymentConfigHistoryViewer{dn: oc, rn: kc}
18
+func NewDeploymentConfigHistoryViewer(oc client.Interface, kc kclientset.Interface) kubectl.HistoryViewer {
19
+	return &DeploymentConfigHistoryViewer{dn: oc, rn: kc.Core()}
20 20
 }
21 21
 
22 22
 // DeploymentConfigHistoryViewer is an implementation of the kubectl HistoryViewer interface
23 23
 // for deployment configs.
24 24
 type DeploymentConfigHistoryViewer struct {
25
-	rn kclient.ReplicationControllersNamespacer
25
+	rn kcoreclient.ReplicationControllersGetter
26 26
 	dn client.DeploymentConfigsNamespacer
27 27
 }
28 28
 
... ...
@@ -4,7 +4,8 @@ import (
4 4
 	"time"
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
7
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
8 9
 	"k8s.io/kubernetes/pkg/kubectl"
9 10
 	"k8s.io/kubernetes/pkg/util/wait"
10 11
 
... ...
@@ -13,16 +14,16 @@ import (
13 13
 )
14 14
 
15 15
 // NewDeploymentConfigScaler returns a new scaler for deploymentConfigs
16
-func NewDeploymentConfigScaler(oc client.Interface, kc kclient.Interface) kubectl.Scaler {
17
-	return &DeploymentConfigScaler{rcClient: kc, dcClient: oc, clientInterface: kc}
16
+func NewDeploymentConfigScaler(oc client.Interface, kc kclientset.Interface) kubectl.Scaler {
17
+	return &DeploymentConfigScaler{rcClient: kc.Core(), dcClient: oc, clientInterface: kc}
18 18
 }
19 19
 
20 20
 // DeploymentConfigScaler is a wrapper for the kubectl Scaler client
21 21
 type DeploymentConfigScaler struct {
22
-	rcClient kclient.ReplicationControllersNamespacer
22
+	rcClient kcoreclient.ReplicationControllersGetter
23 23
 	dcClient client.DeploymentConfigsNamespacer
24 24
 
25
-	clientInterface kclient.Interface
25
+	clientInterface kclientset.Interface
26 26
 }
27 27
 
28 28
 // Scale updates the DeploymentConfig with the provided namespace/name, to a
... ...
@@ -79,13 +80,13 @@ func (scaler *DeploymentConfigScaler) ScaleSimple(namespace, name string, precon
79 79
 // unversioned.ControllerHasDesiredReplicas. This  is necessary because when
80 80
 // scaling an RC via a DC, the RC spec replica count is not immediately
81 81
 // updated to match the owning DC.
82
-func controllerHasSpecifiedReplicas(c kclient.Interface, controller *kapi.ReplicationController, specifiedReplicas int32) wait.ConditionFunc {
82
+func controllerHasSpecifiedReplicas(c kclientset.Interface, controller *kapi.ReplicationController, specifiedReplicas int32) wait.ConditionFunc {
83 83
 	// If we're given a controller where the status lags the spec, it either means that the controller is stale,
84 84
 	// or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case.
85 85
 	desiredGeneration := controller.Generation
86 86
 
87 87
 	return func() (bool, error) {
88
-		ctrl, err := c.ReplicationControllers(controller.Namespace).Get(controller.Name)
88
+		ctrl, err := c.Core().ReplicationControllers(controller.Namespace).Get(controller.Name)
89 89
 		if err != nil {
90 90
 			return false, err
91 91
 		}
... ...
@@ -6,6 +6,8 @@ import (
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8 8
 	"k8s.io/kubernetes/pkg/apis/extensions"
9
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
10
+	"k8s.io/kubernetes/pkg/client/testing/core"
9 11
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
10 12
 	"k8s.io/kubernetes/pkg/kubectl"
11 13
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -41,7 +43,7 @@ func TestScale(t *testing.T) {
41 41
 	for _, test := range tests {
42 42
 		t.Logf("evaluating test %q", test.name)
43 43
 		oc := &testclient.Fake{}
44
-		kc := &ktestclient.Fake{}
44
+		kc := &fake.Clientset{}
45 45
 		scaler := NewDeploymentConfigScaler(oc, kc)
46 46
 
47 47
 		config := deploytest.OkDeploymentConfig(1)
... ...
@@ -66,7 +68,7 @@ func TestScale(t *testing.T) {
66 66
 			deployment.Status.Replicas = deployment.Spec.Replicas
67 67
 			return true, scale, nil
68 68
 		})
69
-		kc.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
69
+		kc.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
70 70
 			return true, deployment, nil
71 71
 		})
72 72
 
... ...
@@ -8,8 +8,8 @@ import (
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
10 10
 	"k8s.io/kubernetes/pkg/client/cache"
11
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
11 12
 	"k8s.io/kubernetes/pkg/client/record"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13 13
 	"k8s.io/kubernetes/pkg/runtime"
14 14
 	utilruntime "k8s.io/kubernetes/pkg/util/runtime"
15 15
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -41,9 +41,9 @@ func (e actionableError) Error() string { return string(e) }
41 41
 //   2. If the deployment failed, the deployer pod is not deleted.
42 42
 type DeploymentController struct {
43 43
 	// rn is used for updating replication controllers.
44
-	rn kclient.ReplicationControllersNamespacer
44
+	rn kcoreclient.ReplicationControllersGetter
45 45
 	// pn is used for creating, updating, and deleting deployer pods.
46
-	pn kclient.PodsNamespacer
46
+	pn kcoreclient.PodsGetter
47 47
 
48 48
 	// queue contains replication controllers that need to be synced.
49 49
 	queue workqueue.RateLimitingInterface
... ...
@@ -11,8 +11,9 @@ import (
11 11
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
12 12
 	"k8s.io/kubernetes/pkg/api/resource"
13 13
 	"k8s.io/kubernetes/pkg/client/cache"
14
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
14
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
15
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
16
+	"k8s.io/kubernetes/pkg/client/testing/core"
16 17
 	"k8s.io/kubernetes/pkg/controller/framework"
17 18
 	"k8s.io/kubernetes/pkg/runtime"
18 19
 
... ...
@@ -28,11 +29,11 @@ var (
28 28
 	codec = kapi.Codecs.LegacyCodec(deployapiv1.SchemeGroupVersion)
29 29
 )
30 30
 
31
-func okDeploymentController(fake kclient.Interface, deployment *kapi.ReplicationController, hookPodNames []string, related bool, deployerStatus kapi.PodPhase) *DeploymentController {
31
+func okDeploymentController(client kclientset.Interface, deployment *kapi.ReplicationController, hookPodNames []string, related bool, deployerStatus kapi.PodPhase) *DeploymentController {
32 32
 	rcInformer := framework.NewSharedIndexInformer(&cache.ListWatch{}, &kapi.ReplicationController{}, 2*time.Minute, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
33 33
 	podInformer := framework.NewSharedIndexInformer(&cache.ListWatch{}, &kapi.Pod{}, 2*time.Minute, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
34 34
 
35
-	c := NewDeploymentController(rcInformer, podInformer, fake, "sa:test", "openshift/origin-deployer", env, codec)
35
+	c := NewDeploymentController(rcInformer, podInformer, client, "sa:test", "openshift/origin-deployer", env, codec)
36 36
 
37 37
 	// deployer pod
38 38
 	if deployment != nil {
... ...
@@ -101,14 +102,14 @@ func TestHandle_createPodOk(t *testing.T) {
101 101
 		expectedContainer = okContainer()
102 102
 	)
103 103
 
104
-	fake := &ktestclient.Fake{}
105
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
106
-		pod := action.(ktestclient.CreateAction).GetObject().(*kapi.Pod)
104
+	client := &fake.Clientset{}
105
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
106
+		pod := action.(core.CreateAction).GetObject().(*kapi.Pod)
107 107
 		createdPod = pod
108 108
 		return true, pod, nil
109 109
 	})
110
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
111
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
110
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
111
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
112 112
 		updatedDeployment = rc
113 113
 		return true, rc, nil
114 114
 	})
... ...
@@ -120,7 +121,7 @@ func TestHandle_createPodOk(t *testing.T) {
120 120
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
121 121
 	deployment.Spec.Template.Spec.NodeSelector = map[string]string{"labelKey1": "labelValue1", "labelKey2": "labelValue2"}
122 122
 
123
-	controller := okDeploymentController(fake, nil, nil, true, kapi.PodUnknown)
123
+	controller := okDeploymentController(client, nil, nil, true, kapi.PodUnknown)
124 124
 
125 125
 	if err := controller.Handle(deployment); err != nil {
126 126
 		t.Fatalf("unexpected error: %v", err)
... ...
@@ -194,13 +195,13 @@ func TestHandle_createPodOk(t *testing.T) {
194 194
 func TestHandle_createPodFail(t *testing.T) {
195 195
 	var updatedDeployment *kapi.ReplicationController
196 196
 
197
-	fake := &ktestclient.Fake{}
198
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
199
-		name := action.(ktestclient.CreateAction).GetObject().(*kapi.Pod).Name
197
+	client := &fake.Clientset{}
198
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
199
+		name := action.(core.CreateAction).GetObject().(*kapi.Pod).Name
200 200
 		return true, nil, fmt.Errorf("failed to create pod %q", name)
201 201
 	})
202
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
203
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
202
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
203
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
204 204
 		updatedDeployment = rc
205 205
 		return true, rc, nil
206 206
 	})
... ...
@@ -209,7 +210,7 @@ func TestHandle_createPodFail(t *testing.T) {
209 209
 	deployment, _ := deployutil.MakeDeployment(config, codec)
210 210
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
211 211
 
212
-	controller := okDeploymentController(fake, nil, nil, true, kapi.PodUnknown)
212
+	controller := okDeploymentController(client, nil, nil, true, kapi.PodUnknown)
213 213
 
214 214
 	err := controller.Handle(deployment)
215 215
 	if err == nil {
... ...
@@ -265,18 +266,18 @@ func TestHandle_deployerPodAlreadyExists(t *testing.T) {
265 265
 		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
266 266
 		deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
267 267
 
268
-		fake := &ktestclient.Fake{}
269
-		fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
270
-			name := action.(ktestclient.CreateAction).GetObject().(*kapi.Pod).Name
268
+		client := &fake.Clientset{}
269
+		client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
270
+			name := action.(core.CreateAction).GetObject().(*kapi.Pod).Name
271 271
 			return true, nil, kerrors.NewAlreadyExists(kapi.Resource("Pod"), name)
272 272
 		})
273
-		fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
274
-			rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
273
+		client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
274
+			rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
275 275
 			updatedDeployment = rc
276 276
 			return true, rc, nil
277 277
 		})
278 278
 
279
-		controller := okDeploymentController(fake, deployment, nil, true, test.podPhase)
279
+		controller := okDeploymentController(client, deployment, nil, true, test.podPhase)
280 280
 
281 281
 		if err := controller.Handle(deployment); err != nil {
282 282
 			t.Errorf("%s: unexpected error: %v", test.name, err)
... ...
@@ -304,18 +305,18 @@ func TestHandle_unrelatedPodAlreadyExists(t *testing.T) {
304 304
 	deployment, _ := deployutil.MakeDeployment(config, codec)
305 305
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
306 306
 
307
-	fake := &ktestclient.Fake{}
308
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
309
-		name := action.(ktestclient.CreateAction).GetObject().(*kapi.Pod).Name
307
+	client := &fake.Clientset{}
308
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
309
+		name := action.(core.CreateAction).GetObject().(*kapi.Pod).Name
310 310
 		return true, nil, kerrors.NewAlreadyExists(kapi.Resource("Pod"), name)
311 311
 	})
312
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
313
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
312
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
313
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
314 314
 		updatedDeployment = rc
315 315
 		return true, rc, nil
316 316
 	})
317 317
 
318
-	controller := okDeploymentController(fake, deployment, nil, false, kapi.PodRunning)
318
+	controller := okDeploymentController(client, deployment, nil, false, kapi.PodRunning)
319 319
 
320 320
 	if err := controller.Handle(deployment); err != nil {
321 321
 		t.Fatalf("unexpected error: %v", err)
... ...
@@ -345,18 +346,18 @@ func TestHandle_unrelatedPodAlreadyExistsTestScaled(t *testing.T) {
345 345
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
346 346
 	deployment.Spec.Replicas = 1
347 347
 
348
-	fake := &ktestclient.Fake{}
349
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
350
-		name := action.(ktestclient.CreateAction).GetObject().(*kapi.Pod).Name
348
+	client := &fake.Clientset{}
349
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
350
+		name := action.(core.CreateAction).GetObject().(*kapi.Pod).Name
351 351
 		return true, nil, kerrors.NewAlreadyExists(kapi.Resource("Pod"), name)
352 352
 	})
353
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
354
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
353
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
354
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
355 355
 		updatedDeployment = rc
356 356
 		return true, rc, nil
357 357
 	})
358 358
 
359
-	controller := okDeploymentController(fake, deployment, nil, false, kapi.PodRunning)
359
+	controller := okDeploymentController(client, deployment, nil, false, kapi.PodRunning)
360 360
 
361 361
 	if err := controller.Handle(deployment); err != nil {
362 362
 		t.Fatalf("unexpected error: %v", err)
... ...
@@ -409,20 +410,20 @@ func TestHandle_noop(t *testing.T) {
409 409
 	}
410 410
 
411 411
 	for _, test := range tests {
412
-		fake := &ktestclient.Fake{}
412
+		client := fake.NewSimpleClientset()
413 413
 
414 414
 		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
415 415
 		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(test.deploymentPhase)
416 416
 
417
-		controller := okDeploymentController(fake, deployment, nil, true, test.podPhase)
417
+		controller := okDeploymentController(client, deployment, nil, true, test.podPhase)
418 418
 
419 419
 		if err := controller.Handle(deployment); err != nil {
420 420
 			t.Errorf("%s: unexpected error: %v", test.name, err)
421 421
 			continue
422 422
 		}
423 423
 
424
-		if len(fake.Actions()) > 0 {
425
-			t.Errorf("%s: unexpected actions: %v", test.name, fake.Actions())
424
+		if len(client.Actions()) > 0 {
425
+			t.Errorf("%s: unexpected actions: %v", test.name, client.Actions())
426 426
 		}
427 427
 	}
428 428
 }
... ...
@@ -432,13 +433,13 @@ func TestHandle_noop(t *testing.T) {
432 432
 func TestHandle_failedTest(t *testing.T) {
433 433
 	var updatedDeployment *kapi.ReplicationController
434 434
 
435
-	fake := &ktestclient.Fake{}
436
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
435
+	client := &fake.Clientset{}
436
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
437 437
 		t.Fatalf("unexpected call to create pod")
438 438
 		return true, nil, nil
439 439
 	})
440
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
441
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
440
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
441
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
442 442
 		updatedDeployment = rc
443 443
 		return true, rc, nil
444 444
 	})
... ...
@@ -449,7 +450,7 @@ func TestHandle_failedTest(t *testing.T) {
449 449
 	deployment.Spec.Replicas = 1
450 450
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning)
451 451
 
452
-	controller := okDeploymentController(fake, deployment, nil, true, kapi.PodFailed)
452
+	controller := okDeploymentController(client, deployment, nil, true, kapi.PodFailed)
453 453
 
454 454
 	if err := controller.Handle(deployment); err != nil {
455 455
 		t.Fatalf("unexpected error: %v", err)
... ...
@@ -469,17 +470,17 @@ func TestHandle_cleanupPodOk(t *testing.T) {
469 469
 	hookPods := []string{"pre", "mid", "post"}
470 470
 	deletedPodNames := []string{}
471 471
 
472
-	fake := &ktestclient.Fake{}
473
-	fake.AddReactor("delete", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
474
-		name := action.(ktestclient.DeleteAction).GetName()
472
+	client := &fake.Clientset{}
473
+	client.AddReactor("delete", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
474
+		name := action.(core.DeleteAction).GetName()
475 475
 		deletedPodNames = append(deletedPodNames, name)
476 476
 		return true, nil, nil
477 477
 	})
478
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
478
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
479 479
 		t.Fatalf("unexpected call to create pod")
480 480
 		return true, nil, nil
481 481
 	})
482
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
482
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
483 483
 		t.Fatalf("unexpected deployment update")
484 484
 		return true, nil, nil
485 485
 	})
... ...
@@ -489,7 +490,7 @@ func TestHandle_cleanupPodOk(t *testing.T) {
489 489
 	deployment, _ := deployutil.MakeDeployment(config, codec)
490 490
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete)
491 491
 
492
-	controller := okDeploymentController(fake, deployment, hookPods, true, kapi.PodSucceeded)
492
+	controller := okDeploymentController(client, deployment, hookPods, true, kapi.PodSucceeded)
493 493
 	hookPods = append(hookPods, deployment.Name)
494 494
 
495 495
 	if err := controller.Handle(deployment); err != nil {
... ...
@@ -512,18 +513,18 @@ func TestHandle_cleanupPodOkTest(t *testing.T) {
512 512
 	deletedPodNames := []string{}
513 513
 	var updatedDeployment *kapi.ReplicationController
514 514
 
515
-	fake := &ktestclient.Fake{}
516
-	fake.AddReactor("delete", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
517
-		name := action.(ktestclient.DeleteAction).GetName()
515
+	client := &fake.Clientset{}
516
+	client.AddReactor("delete", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
517
+		name := action.(core.DeleteAction).GetName()
518 518
 		deletedPodNames = append(deletedPodNames, name)
519 519
 		return true, nil, nil
520 520
 	})
521
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
521
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
522 522
 		t.Fatalf("unexpected call to create pod")
523 523
 		return true, nil, nil
524 524
 	})
525
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
526
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
525
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
526
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
527 527
 		updatedDeployment = rc
528 528
 		return true, rc, nil
529 529
 	})
... ...
@@ -534,7 +535,7 @@ func TestHandle_cleanupPodOkTest(t *testing.T) {
534 534
 	deployment.Spec.Replicas = 1
535 535
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning)
536 536
 
537
-	controller := okDeploymentController(fake, deployment, hookPods, true, kapi.PodSucceeded)
537
+	controller := okDeploymentController(client, deployment, hookPods, true, kapi.PodSucceeded)
538 538
 	hookPods = append(hookPods, deployment.Name)
539 539
 
540 540
 	if err := controller.Handle(deployment); err != nil {
... ...
@@ -557,16 +558,16 @@ func TestHandle_cleanupPodOkTest(t *testing.T) {
557 557
 // TestHandle_cleanupPodNoop ensures that an attempt to delete pods is not made
558 558
 // if the deployer pods are not listed based on a label query
559 559
 func TestHandle_cleanupPodNoop(t *testing.T) {
560
-	fake := &ktestclient.Fake{}
561
-	fake.AddReactor("delete", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
560
+	client := &fake.Clientset{}
561
+	client.AddReactor("delete", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
562 562
 		t.Fatalf("unexpected call to delete pod")
563 563
 		return true, nil, nil
564 564
 	})
565
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
565
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
566 566
 		t.Fatalf("unexpected call to create pod")
567 567
 		return true, nil, nil
568 568
 	})
569
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
569
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
570 570
 		t.Fatalf("unexpected deployment update")
571 571
 		return true, nil, nil
572 572
 	})
... ...
@@ -576,7 +577,7 @@ func TestHandle_cleanupPodNoop(t *testing.T) {
576 576
 	deployment, _ := deployutil.MakeDeployment(config, codec)
577 577
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete)
578 578
 
579
-	controller := okDeploymentController(fake, deployment, nil, true, kapi.PodSucceeded)
579
+	controller := okDeploymentController(client, deployment, nil, true, kapi.PodSucceeded)
580 580
 	pod := deployerPod(deployment, "", true)
581 581
 	pod.Labels[deployapi.DeployerPodForDeploymentLabel] = "unrelated"
582 582
 	controller.podStore.Update(pod)
... ...
@@ -589,15 +590,15 @@ func TestHandle_cleanupPodNoop(t *testing.T) {
589 589
 // TestHandle_cleanupPodFail ensures that a failed attempt to clean up the
590 590
 // deployer pod for a completed deployment results in an actionable error.
591 591
 func TestHandle_cleanupPodFail(t *testing.T) {
592
-	fake := &ktestclient.Fake{}
593
-	fake.AddReactor("delete", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
592
+	client := &fake.Clientset{}
593
+	client.AddReactor("delete", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
594 594
 		return true, nil, kerrors.NewInternalError(fmt.Errorf("deployer pod internal error"))
595 595
 	})
596
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
596
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
597 597
 		t.Fatalf("unexpected call to create pod")
598 598
 		return true, nil, nil
599 599
 	})
600
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
600
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
601 601
 		t.Fatalf("unexpected deployment update")
602 602
 		return true, nil, nil
603 603
 	})
... ...
@@ -607,7 +608,7 @@ func TestHandle_cleanupPodFail(t *testing.T) {
607 607
 	deployment, _ := deployutil.MakeDeployment(config, codec)
608 608
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete)
609 609
 
610
-	controller := okDeploymentController(fake, deployment, nil, true, kapi.PodSucceeded)
610
+	controller := okDeploymentController(client, deployment, nil, true, kapi.PodSucceeded)
611 611
 
612 612
 	err := controller.Handle(deployment)
613 613
 	if err == nil {
... ...
@@ -623,13 +624,13 @@ func TestHandle_cleanupPodFail(t *testing.T) {
623 623
 func TestHandle_cancelNew(t *testing.T) {
624 624
 	var updatedDeployment *kapi.ReplicationController
625 625
 
626
-	fake := &ktestclient.Fake{}
627
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
626
+	client := &fake.Clientset{}
627
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
628 628
 		t.Fatalf("unexpected call to create pod")
629 629
 		return true, nil, nil
630 630
 	})
631
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
632
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
631
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
632
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
633 633
 		updatedDeployment = rc
634 634
 		return true, rc, nil
635 635
 	})
... ...
@@ -638,7 +639,7 @@ func TestHandle_cancelNew(t *testing.T) {
638 638
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
639 639
 	deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
640 640
 
641
-	controller := okDeploymentController(fake, deployment, nil, true, kapi.PodRunning)
641
+	controller := okDeploymentController(client, deployment, nil, true, kapi.PodRunning)
642 642
 
643 643
 	if err := controller.Handle(deployment); err != nil {
644 644
 		t.Fatalf("unexpected error: %v", err)
... ...
@@ -659,22 +660,22 @@ func TestHandle_cleanupNewWithDeployers(t *testing.T) {
659 659
 	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
660 660
 	deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
661 661
 
662
-	fake := &ktestclient.Fake{}
663
-	fake.AddReactor("delete", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
662
+	client := &fake.Clientset{}
663
+	client.AddReactor("delete", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
664 664
 		deletedDeployer = true
665 665
 		return true, nil, nil
666 666
 	})
667
-	fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
667
+	client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
668 668
 		t.Fatalf("unexpected call to create pod")
669 669
 		return true, nil, nil
670 670
 	})
671
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
672
-		rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
671
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
672
+		rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
673 673
 		updatedDeployment = rc
674 674
 		return true, nil, nil
675 675
 	})
676 676
 
677
-	controller := okDeploymentController(fake, deployment, nil, true, kapi.PodRunning)
677
+	controller := okDeploymentController(client, deployment, nil, true, kapi.PodRunning)
678 678
 
679 679
 	if err := controller.Handle(deployment); err != nil {
680 680
 		t.Fatalf("unexpected error: %v", err)
... ...
@@ -738,12 +739,12 @@ func TestHandle_cleanupPostNew(t *testing.T) {
738 738
 	for _, test := range tests {
739 739
 		deletedPods := 0
740 740
 
741
-		fake := &ktestclient.Fake{}
742
-		fake.AddReactor("delete", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
741
+		client := &fake.Clientset{}
742
+		client.AddReactor("delete", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
743 743
 			deletedPods++
744 744
 			return true, nil, nil
745 745
 		})
746
-		fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
746
+		client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
747 747
 			// None of these tests should transition the phase.
748 748
 			t.Errorf("%s: unexpected call to update a deployment", test.name)
749 749
 			return true, nil, nil
... ...
@@ -753,7 +754,7 @@ func TestHandle_cleanupPostNew(t *testing.T) {
753 753
 		deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
754 754
 		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(test.deploymentPhase)
755 755
 
756
-		controller := okDeploymentController(fake, deployment, hookPods, true, test.podPhase)
756
+		controller := okDeploymentController(client, deployment, hookPods, true, test.podPhase)
757 757
 
758 758
 		if err := controller.Handle(deployment); err != nil {
759 759
 			t.Errorf("%s: unexpected error: %v", test.name, err)
... ...
@@ -787,18 +788,22 @@ func TestHandle_deployerPodDisappeared(t *testing.T) {
787 787
 		var updatedDeployment *kapi.ReplicationController
788 788
 		updateCalled := false
789 789
 
790
-		fake := &ktestclient.Fake{}
791
-		fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
792
-			rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
790
+		client := &fake.Clientset{}
791
+		client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
792
+			rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
793 793
 			updatedDeployment = rc
794 794
 			updateCalled = true
795 795
 			return true, nil, nil
796 796
 		})
797 797
 
798
-		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
798
+		deployment, err := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
799
+		if err != nil {
800
+			t.Errorf("%s: unexpected error: %v", test.name, err)
801
+			continue
802
+		}
799 803
 		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(test.phase)
800 804
 
801
-		controller := okDeploymentController(fake, nil, nil, true, kapi.PodUnknown)
805
+		controller := okDeploymentController(client, nil, nil, true, kapi.PodUnknown)
802 806
 
803 807
 		if err := controller.Handle(deployment); err != nil {
804 808
 			t.Errorf("%s: unexpected error: %v", test.name, err)
... ...
@@ -904,9 +909,9 @@ func TestHandle_transitionFromDeployer(t *testing.T) {
904 904
 		var updatedDeployment *kapi.ReplicationController
905 905
 		updateCalled := false
906 906
 
907
-		fake := &ktestclient.Fake{}
908
-		fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
909
-			rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
907
+		client := &fake.Clientset{}
908
+		client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
909
+			rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
910 910
 			updatedDeployment = rc
911 911
 			updateCalled = true
912 912
 			return true, nil, nil
... ...
@@ -915,7 +920,7 @@ func TestHandle_transitionFromDeployer(t *testing.T) {
915 915
 		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
916 916
 		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(test.deploymentPhase)
917 917
 
918
-		controller := okDeploymentController(fake, deployment, nil, true, test.podPhase)
918
+		controller := okDeploymentController(client, deployment, nil, true, test.podPhase)
919 919
 
920 920
 		if err := controller.Handle(deployment); err != nil {
921 921
 			t.Errorf("%s: unexpected error: %v", test.name, err)
... ...
@@ -969,12 +974,12 @@ func TestDeployerCustomLabelsAndAnnotations(t *testing.T) {
969 969
 		config.Spec.Strategy.Annotations = test.annotations
970 970
 		deployment, _ := deployutil.MakeDeployment(config, codec)
971 971
 
972
-		fake := &ktestclient.Fake{}
973
-		fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
972
+		client := &fake.Clientset{}
973
+		client.AddReactor("create", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
974 974
 			return true, deployerPod(deployment, "", true), nil
975 975
 		})
976 976
 
977
-		controller := okDeploymentController(fake, nil, nil, true, kapi.PodUnknown)
977
+		controller := okDeploymentController(client, nil, nil, true, kapi.PodUnknown)
978 978
 
979 979
 		podTemplate, err := controller.makeDeployerPod(deployment)
980 980
 		if err != nil {
... ...
@@ -9,6 +9,7 @@ import (
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
11 11
 	"k8s.io/kubernetes/pkg/client/cache"
12
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
12 13
 	"k8s.io/kubernetes/pkg/client/record"
13 14
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14 15
 	"k8s.io/kubernetes/pkg/labels"
... ...
@@ -48,7 +49,7 @@ type DeploymentConfigController struct {
48 48
 	// dn provides access to deploymentconfigs.
49 49
 	dn osclient.DeploymentConfigsNamespacer
50 50
 	// rn provides access to replication controllers.
51
-	rn kclient.ReplicationControllersNamespacer
51
+	rn kcoreclient.ReplicationControllersGetter
52 52
 
53 53
 	// queue contains deployment configs that need to be synced.
54 54
 	queue workqueue.RateLimitingInterface
... ...
@@ -8,6 +8,8 @@ import (
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	"k8s.io/kubernetes/pkg/client/cache"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12
+	"k8s.io/kubernetes/pkg/client/testing/core"
11 13
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
12 14
 	"k8s.io/kubernetes/pkg/controller/framework"
13 15
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -341,18 +343,18 @@ func TestHandleScenarios(t *testing.T) {
341 341
 
342 342
 		oc := &testclient.Fake{}
343 343
 		oc.AddReactor("update", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
344
-			dc := action.(ktestclient.UpdateAction).GetObject().(*deployapi.DeploymentConfig)
344
+			dc := action.(core.UpdateAction).GetObject().(*deployapi.DeploymentConfig)
345 345
 			updatedConfig = dc
346 346
 			return true, dc, nil
347 347
 		})
348
-		kc := &ktestclient.Fake{}
349
-		kc.AddReactor("create", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
350
-			rc := action.(ktestclient.CreateAction).GetObject().(*kapi.ReplicationController)
348
+		kc := &fake.Clientset{}
349
+		kc.AddReactor("create", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
350
+			rc := action.(core.CreateAction).GetObject().(*kapi.ReplicationController)
351 351
 			deployments[rc.Name] = *rc
352 352
 			return true, rc, nil
353 353
 		})
354
-		kc.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
355
-			rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
354
+		kc.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
355
+			rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
356 356
 			deployments[rc.Name] = *rc
357 357
 			return true, rc, nil
358 358
 		})
... ...
@@ -374,10 +376,10 @@ func TestHandleScenarios(t *testing.T) {
374 374
 		rcInformer := framework.NewSharedIndexInformer(
375 375
 			&cache.ListWatch{
376 376
 				ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
377
-					return kc.ReplicationControllers(kapi.NamespaceAll).List(options)
377
+					return kc.Core().ReplicationControllers(kapi.NamespaceAll).List(options)
378 378
 				},
379 379
 				WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
380
-					return kc.ReplicationControllers(kapi.NamespaceAll).Watch(options)
380
+					return kc.Core().ReplicationControllers(kapi.NamespaceAll).Watch(options)
381 381
 				},
382 382
 			},
383 383
 			&kapi.ReplicationController{},
... ...
@@ -387,10 +389,10 @@ func TestHandleScenarios(t *testing.T) {
387 387
 		podInformer := framework.NewSharedIndexInformer(
388 388
 			&cache.ListWatch{
389 389
 				ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
390
-					return kc.Pods(kapi.NamespaceAll).List(options)
390
+					return kc.Core().Pods(kapi.NamespaceAll).List(options)
391 391
 				},
392 392
 				WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
393
-					return kc.Pods(kapi.NamespaceAll).Watch(options)
393
+					return kc.Core().Pods(kapi.NamespaceAll).Watch(options)
394 394
 				},
395 395
 			},
396 396
 			&kapi.Pod{},
... ...
@@ -6,6 +6,7 @@ import (
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8 8
 	"k8s.io/kubernetes/pkg/client/cache"
9
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
9 10
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
10 11
 	"k8s.io/kubernetes/pkg/controller/framework"
11 12
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -37,10 +38,10 @@ var (
37 37
 	rcInformer = framework.NewSharedIndexInformer(
38 38
 		&cache.ListWatch{
39 39
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
40
-				return (&ktestclient.Fake{}).ReplicationControllers(kapi.NamespaceAll).List(options)
40
+				return (fake.NewSimpleClientset()).Core().ReplicationControllers(kapi.NamespaceAll).List(options)
41 41
 			},
42 42
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
43
-				return (&ktestclient.Fake{}).ReplicationControllers(kapi.NamespaceAll).Watch(options)
43
+				return (fake.NewSimpleClientset()).Core().ReplicationControllers(kapi.NamespaceAll).Watch(options)
44 44
 			},
45 45
 		},
46 46
 		&kapi.ReplicationController{},
... ...
@@ -6,7 +6,7 @@ import (
6 6
 	"github.com/golang/glog"
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10 10
 
11 11
 	deployapi "github.com/openshift/origin/pkg/deploy/api"
12 12
 	deployutil "github.com/openshift/origin/pkg/deploy/util"
... ...
@@ -94,14 +94,14 @@ func (p *pruner) Prune(deleter DeploymentDeleter) error {
94 94
 
95 95
 // deploymentDeleter removes a deployment from OpenShift.
96 96
 type deploymentDeleter struct {
97
-	deployments kclient.ReplicationControllersNamespacer
98
-	pods        kclient.PodsNamespacer
97
+	deployments kcoreclient.ReplicationControllersGetter
98
+	pods        kcoreclient.PodsGetter
99 99
 }
100 100
 
101 101
 var _ DeploymentDeleter = &deploymentDeleter{}
102 102
 
103 103
 // NewDeploymentDeleter creates a new deploymentDeleter.
104
-func NewDeploymentDeleter(deployments kclient.ReplicationControllersNamespacer, pods kclient.PodsNamespacer) DeploymentDeleter {
104
+func NewDeploymentDeleter(deployments kcoreclient.ReplicationControllersGetter, pods kcoreclient.PodsGetter) DeploymentDeleter {
105 105
 	return &deploymentDeleter{
106 106
 		deployments: deployments,
107 107
 		pods:        pods,
... ...
@@ -3,6 +3,7 @@ package etcd
3 3
 import (
4 4
 	"testing"
5 5
 
6
+	kapi "k8s.io/kubernetes/pkg/api"
6 7
 	"k8s.io/kubernetes/pkg/fields"
7 8
 	"k8s.io/kubernetes/pkg/labels"
8 9
 	"k8s.io/kubernetes/pkg/registry/registrytest"
... ...
@@ -38,8 +39,7 @@ func TestCreate(t *testing.T) {
38 38
 	defer server.Terminate(t)
39 39
 	test := registrytest.New(t, storage.Store)
40 40
 	valid := validDeploymentConfig()
41
-	valid.Name = ""
42
-	valid.GenerateName = "test-"
41
+	valid.ObjectMeta = kapi.ObjectMeta{}
43 42
 	test.TestCreate(
44 43
 		valid,
45 44
 		// invalid
... ...
@@ -9,7 +9,8 @@ import (
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	"k8s.io/kubernetes/pkg/api/errors"
11 11
 	"k8s.io/kubernetes/pkg/api/rest"
12
-	"k8s.io/kubernetes/pkg/client/unversioned"
12
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13
+	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13 14
 	"k8s.io/kubernetes/pkg/controller"
14 15
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
15 16
 	kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
... ...
@@ -36,7 +37,7 @@ const (
36 36
 // podGetter implements the ResourceGetter interface. Used by LogLocation to
37 37
 // retrieve the deployer pod
38 38
 type podGetter struct {
39
-	pn unversioned.PodsNamespacer
39
+	pn kcoreclient.PodsGetter
40 40
 }
41 41
 
42 42
 // Get is responsible for retrieving the deployer pod
... ...
@@ -50,9 +51,11 @@ func (g *podGetter) Get(ctx kapi.Context, name string) (runtime.Object, error) {
50 50
 
51 51
 // REST is an implementation of RESTStorage for the api server.
52 52
 type REST struct {
53
-	dn       client.DeploymentConfigsNamespacer
54
-	rn       unversioned.ReplicationControllersNamespacer
55
-	pn       unversioned.PodsNamespacer
53
+	dn client.DeploymentConfigsNamespacer
54
+	rn kcoreclient.ReplicationControllersGetter
55
+	pn kcoreclient.PodsGetter
56
+	// TODO internalclientset: get rid of oldClient after next rebase
57
+	oldPn    kclient.PodsNamespacer
56 58
 	connInfo kubeletclient.ConnectionInfoGetter
57 59
 	timeout  time.Duration
58 60
 	interval time.Duration
... ...
@@ -65,11 +68,12 @@ var _ = rest.GetterWithOptions(&REST{})
65 65
 // one for deployments (replication controllers) and one for pods to get the necessary
66 66
 // attributes to assemble the URL to which the request shall be redirected in order to
67 67
 // get the deployment logs.
68
-func NewREST(dn client.DeploymentConfigsNamespacer, rn unversioned.ReplicationControllersNamespacer, pn unversioned.PodsNamespacer, connectionInfo kubeletclient.ConnectionInfoGetter) *REST {
68
+func NewREST(dn client.DeploymentConfigsNamespacer, rn kcoreclient.ReplicationControllersGetter, pn kcoreclient.PodsGetter, oldPn kclient.PodsNamespacer, connectionInfo kubeletclient.ConnectionInfoGetter) *REST {
69 69
 	return &REST{
70 70
 		dn:       dn,
71 71
 		rn:       rn,
72 72
 		pn:       pn,
73
+		oldPn:    oldPn,
73 74
 		connInfo: connectionInfo,
74 75
 		timeout:  defaultTimeout,
75 76
 		interval: defaultInterval,
... ...
@@ -221,7 +225,7 @@ func (r *REST) returnApplicationPodName(target *kapi.ReplicationController) (str
221 221
 	selector := labels.Set(target.Spec.Selector).AsSelector()
222 222
 	sortBy := func(pods []*kapi.Pod) sort.Interface { return controller.ByLogging(pods) }
223 223
 
224
-	pod, _, err := kcmdutil.GetFirstPod(r.pn, target.Namespace, selector, r.timeout, sortBy)
224
+	pod, _, err := kcmdutil.GetFirstPod(r.oldPn, target.Namespace, selector, r.timeout, sortBy)
225 225
 	if err != nil {
226 226
 		return "", errors.NewInternalError(err)
227 227
 	}
... ...
@@ -10,6 +10,8 @@ import (
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11 11
 	"k8s.io/kubernetes/pkg/api/errors"
12 12
 	"k8s.io/kubernetes/pkg/api/unversioned"
13
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
14
+	"k8s.io/kubernetes/pkg/client/testing/core"
13 15
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
14 16
 	kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
15 17
 	genericrest "k8s.io/kubernetes/pkg/registry/generic/rest"
... ...
@@ -103,26 +105,26 @@ func mockREST(version, desired int64, status api.DeploymentStatus) *REST {
103 103
 
104 104
 	// Fake deployments
105 105
 	fakeDeployments := makeDeploymentList(version)
106
-	fakeRn := ktestclient.NewSimpleFake(fakeDeployments)
107
-	fakeRn.PrependReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
106
+	fakeRn := fake.NewSimpleClientset(fakeDeployments)
107
+	fakeRn.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
108 108
 		return true, &fakeDeployments.Items[desired-1], nil
109 109
 	})
110 110
 
111 111
 	// Fake watcher for deployments
112 112
 	fakeWatch := watch.NewFake()
113
-	fakeRn.PrependWatchReactor("replicationcontrollers", ktestclient.DefaultWatchReactor(fakeWatch, nil))
113
+	fakeRn.PrependWatchReactor("replicationcontrollers", core.DefaultWatchReactor(fakeWatch, nil))
114 114
 	obj := &fakeDeployments.Items[desired-1]
115 115
 	obj.Annotations[api.DeploymentStatusAnnotation] = string(status)
116 116
 	go fakeWatch.Add(obj)
117 117
 
118
-	fakePn := ktestclient.NewSimpleFake()
118
+	oldPn := ktestclient.NewSimpleFake()
119 119
 	if status == api.DeploymentStatusComplete {
120 120
 		// If the deployment is complete, we will try to get the logs from the oldest
121 121
 		// application pod...
122
-		fakePn.PrependReactor("list", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
122
+		oldPn.PrependReactor("list", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
123 123
 			return true, fakePodList, nil
124 124
 		})
125
-		fakePn.PrependReactor("get", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
125
+		oldPn.PrependReactor("get", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
126 126
 			return true, &fakePodList.Items[0], nil
127 127
 		})
128 128
 	} else {
... ...
@@ -144,15 +146,49 @@ func mockREST(version, desired int64, status api.DeploymentStatus) *REST {
144 144
 				Phase: kapi.PodRunning,
145 145
 			},
146 146
 		}
147
-		fakePn.PrependReactor("get", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
147
+		oldPn.PrependReactor("get", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
148
+			return true, fakeDeployer, nil
149
+		})
150
+	}
151
+	fakePn := fake.NewSimpleClientset()
152
+	if status == api.DeploymentStatusComplete {
153
+		// If the deployment is complete, we will try to get the logs from the oldest
154
+		// application pod...
155
+		fakePn.PrependReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
156
+			return true, fakePodList, nil
157
+		})
158
+		fakePn.PrependReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
159
+			return true, &fakePodList.Items[0], nil
160
+		})
161
+	} else {
162
+		// ...otherwise try to get the logs from the deployer pod.
163
+		fakeDeployer := &kapi.Pod{
164
+			ObjectMeta: kapi.ObjectMeta{
165
+				Name:      deployutil.DeployerPodNameForDeployment(obj.Name),
166
+				Namespace: kapi.NamespaceDefault,
167
+			},
168
+			Spec: kapi.PodSpec{
169
+				Containers: []kapi.Container{
170
+					{
171
+						Name: deployutil.DeployerPodNameForDeployment(obj.Name) + "-container",
172
+					},
173
+				},
174
+				NodeName: "some-host",
175
+			},
176
+			Status: kapi.PodStatus{
177
+				Phase: kapi.PodRunning,
178
+			},
179
+		}
180
+		fakePn.PrependReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
148 181
 			return true, fakeDeployer, nil
149 182
 		})
150 183
 	}
151 184
 
152 185
 	return &REST{
153 186
 		dn:       fakeDn,
154
-		rn:       fakeRn,
155
-		pn:       fakePn,
187
+		rn:       fakeRn.Core(),
188
+		pn:       fakePn.Core(),
189
+		oldPn:    oldPn,
156 190
 		connInfo: connectionInfo,
157 191
 		timeout:  defaultTimeout,
158 192
 	}
... ...
@@ -10,7 +10,8 @@ import (
10 10
 	"k8s.io/kubernetes/pkg/api/errors"
11 11
 	"k8s.io/kubernetes/pkg/api/rest"
12 12
 	"k8s.io/kubernetes/pkg/api/unversioned"
13
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
14
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
14 15
 	"k8s.io/kubernetes/pkg/registry/generic/registry"
15 16
 	"k8s.io/kubernetes/pkg/runtime"
16 17
 	utilerrors "k8s.io/kubernetes/pkg/util/errors"
... ...
@@ -23,9 +24,9 @@ import (
23 23
 	imageapi "github.com/openshift/origin/pkg/image/api"
24 24
 )
25 25
 
26
-func NewREST(store registry.Store, oc client.Interface, kc kclient.Interface, decoder runtime.Decoder, admission admission.Interface) *REST {
26
+func NewREST(store registry.Store, oc client.Interface, kc kclientset.Interface, decoder runtime.Decoder, admission admission.Interface) *REST {
27 27
 	store.UpdateStrategy = Strategy
28
-	return &REST{store: &store, isn: oc, rn: kc, decoder: decoder, admit: admission}
28
+	return &REST{store: &store, isn: oc, rn: kc.Core(), decoder: decoder, admit: admission}
29 29
 }
30 30
 
31 31
 // REST implements the Creater interface.
... ...
@@ -34,7 +35,7 @@ var _ = rest.Creater(&REST{})
34 34
 type REST struct {
35 35
 	store   *registry.Store
36 36
 	isn     client.ImageStreamsNamespacer
37
-	rn      kclient.ReplicationControllersNamespacer
37
+	rn      kcoreclient.ReplicationControllersGetter
38 38
 	decoder runtime.Decoder
39 39
 	admit   admission.Interface
40 40
 }
... ...
@@ -173,7 +174,7 @@ func processTriggers(config *deployapi.DeploymentConfig, isn client.ImageStreams
173 173
 // canTrigger determines if we can trigger a new deployment for config based on the various deployment triggers.
174 174
 func canTrigger(
175 175
 	config *deployapi.DeploymentConfig,
176
-	rn kclient.ReplicationControllersNamespacer,
176
+	rn kcoreclient.ReplicationControllersGetter,
177 177
 	decoder runtime.Decoder,
178 178
 	force bool,
179 179
 ) (bool, []deployapi.DeploymentCause, error) {
... ...
@@ -251,7 +252,7 @@ func canTrigger(
251 251
 // decodeFromLatestDeployment will try to return the decoded version of the current deploymentconfig
252 252
 // found in the annotations of its latest deployment. If there is no previous deploymentconfig (ie.
253 253
 // latestVersion == 0), the returned deploymentconfig will be the same.
254
-func decodeFromLatestDeployment(config *deployapi.DeploymentConfig, rn kclient.ReplicationControllersNamespacer, decoder runtime.Decoder) (*deployapi.DeploymentConfig, error) {
254
+func decodeFromLatestDeployment(config *deployapi.DeploymentConfig, rn kcoreclient.ReplicationControllersGetter, decoder runtime.Decoder) (*deployapi.DeploymentConfig, error) {
255 255
 	if config.Status.LatestVersion == 0 {
256 256
 		return config, nil
257 257
 	}
... ...
@@ -5,6 +5,8 @@ import (
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7 7
 	"k8s.io/kubernetes/pkg/api/errors"
8
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
9
+	"k8s.io/kubernetes/pkg/client/testing/core"
8 10
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
9 11
 	"k8s.io/kubernetes/pkg/runtime"
10 12
 
... ...
@@ -660,8 +662,8 @@ func TestCanTrigger(t *testing.T) {
660 660
 	for _, test := range tests {
661 661
 		t.Logf("running scenario %q", test.name)
662 662
 
663
-		fake := &ktestclient.Fake{}
664
-		fake.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
663
+		client := &fake.Clientset{}
664
+		client.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
665 665
 			config := test.decoded
666 666
 			if config == nil {
667 667
 				config = test.config
... ...
@@ -673,7 +675,7 @@ func TestCanTrigger(t *testing.T) {
673 673
 
674 674
 		test.config = deploytest.RoundTripConfig(t, test.config)
675 675
 
676
-		got, gotCauses, err := canTrigger(test.config, fake, codec, test.force)
676
+		got, gotCauses, err := canTrigger(test.config, client.Core(), codec, test.force)
677 677
 		if err != nil && !test.expectedErr {
678 678
 			t.Errorf("unexpected error: %v", err)
679 679
 			continue
... ...
@@ -6,7 +6,7 @@ import (
6 6
 	"time"
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10 10
 	"k8s.io/kubernetes/pkg/fields"
11 11
 	"k8s.io/kubernetes/pkg/watch"
12 12
 
... ...
@@ -22,7 +22,7 @@ var (
22 22
 // WaitForRunningDeployment waits until the specified deployment is no longer New or Pending. Returns true if
23 23
 // the deployment became running, complete, or failed within timeout, false if it did not, and an error if any
24 24
 // other error state occurred. The last observed deployment state is returned.
25
-func WaitForRunningDeployment(rn kclient.ReplicationControllersNamespacer, observed *kapi.ReplicationController, timeout time.Duration) (*kapi.ReplicationController, bool, error) {
25
+func WaitForRunningDeployment(rn kcoreclient.ReplicationControllersGetter, observed *kapi.ReplicationController, timeout time.Duration) (*kapi.ReplicationController, bool, error) {
26 26
 	fieldSelector := fields.Set{"metadata.name": observed.Name}.AsSelector()
27 27
 	options := kapi.ListOptions{FieldSelector: fieldSelector, ResourceVersion: observed.ResourceVersion}
28 28
 	w, err := rn.ReplicationControllers(observed.Namespace).Watch(options)
... ...
@@ -5,7 +5,8 @@ import (
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7 7
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
9
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
9 10
 	"k8s.io/kubernetes/pkg/runtime"
10 11
 	"k8s.io/kubernetes/pkg/util/validation/field"
11 12
 
... ...
@@ -19,16 +20,16 @@ import (
19 19
 type REST struct {
20 20
 	generator RollbackGenerator
21 21
 	dn        client.DeploymentConfigsNamespacer
22
-	rn        kclient.ReplicationControllersNamespacer
22
+	rn        kcoreclient.ReplicationControllersGetter
23 23
 	codec     runtime.Codec
24 24
 }
25 25
 
26 26
 // NewREST safely creates a new REST.
27
-func NewREST(oc client.Interface, kc kclient.Interface, codec runtime.Codec) *REST {
27
+func NewREST(oc client.Interface, kc kclientset.Interface, codec runtime.Codec) *REST {
28 28
 	return &REST{
29 29
 		generator: NewRollbackGenerator(),
30 30
 		dn:        oc,
31
-		rn:        kc,
31
+		rn:        kc.Core(),
32 32
 		codec:     codec,
33 33
 	}
34 34
 }
... ...
@@ -7,6 +7,8 @@ import (
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
10
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11
+	"k8s.io/kubernetes/pkg/client/testing/core"
10 12
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
11 13
 	"k8s.io/kubernetes/pkg/runtime"
12 14
 
... ...
@@ -59,8 +61,8 @@ func TestCreateOk(t *testing.T) {
59 59
 	oc.AddReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
60 60
 		return true, deploytest.OkDeploymentConfig(2), nil
61 61
 	})
62
-	kc := &ktestclient.Fake{}
63
-	kc.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
62
+	kc := &fake.Clientset{}
63
+	kc.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
64 64
 		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
65 65
 		return true, deployment, nil
66 66
 	})
... ...
@@ -90,8 +92,8 @@ func TestCreateGeneratorError(t *testing.T) {
90 90
 	oc.AddReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
91 91
 		return true, deploytest.OkDeploymentConfig(2), nil
92 92
 	})
93
-	kc := &ktestclient.Fake{}
94
-	kc.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
93
+	kc := &fake.Clientset{}
94
+	kc.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
95 95
 		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
96 96
 		return true, deployment, nil
97 97
 	})
... ...
@@ -99,7 +101,7 @@ func TestCreateGeneratorError(t *testing.T) {
99 99
 	rest := REST{
100 100
 		generator: &terribleGenerator{},
101 101
 		dn:        oc,
102
-		rn:        kc,
102
+		rn:        kc.Core(),
103 103
 		codec:     kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion),
104 104
 	}
105 105
 
... ...
@@ -120,8 +122,8 @@ func TestCreateMissingDeployment(t *testing.T) {
120 120
 	oc.AddReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
121 121
 		return true, deploytest.OkDeploymentConfig(2), nil
122 122
 	})
123
-	kc := &ktestclient.Fake{}
124
-	kc.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
123
+	kc := &fake.Clientset{}
124
+	kc.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
125 125
 		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
126 126
 		return true, nil, kerrors.NewNotFound(kapi.Resource("replicationController"), deployment.Name)
127 127
 	})
... ...
@@ -147,8 +149,8 @@ func TestCreateInvalidDeployment(t *testing.T) {
147 147
 	oc.AddReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
148 148
 		return true, deploytest.OkDeploymentConfig(2), nil
149 149
 	})
150
-	kc := &ktestclient.Fake{}
151
-	kc.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
150
+	kc := &fake.Clientset{}
151
+	kc.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
152 152
 		// invalidate the encoded config
153 153
 		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
154 154
 		deployment.Annotations[deployapi.DeploymentEncodedConfigAnnotation] = ""
... ...
@@ -177,8 +179,8 @@ func TestCreateMissingDeploymentConfig(t *testing.T) {
177 177
 		dc := deploytest.OkDeploymentConfig(2)
178 178
 		return true, nil, kerrors.NewNotFound(deployapi.Resource("deploymentConfig"), dc.Name)
179 179
 	})
180
-	kc := &ktestclient.Fake{}
181
-	kc.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
180
+	kc := &fake.Clientset{}
181
+	kc.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
182 182
 		deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec)
183 183
 		return true, deployment, nil
184 184
 	})
... ...
@@ -8,8 +8,10 @@ import (
8 8
 	"time"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
11 12
 	"k8s.io/kubernetes/pkg/client/record"
12 13
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14
+	adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
13 15
 	"k8s.io/kubernetes/pkg/kubectl"
14 16
 	"k8s.io/kubernetes/pkg/runtime"
15 17
 
... ...
@@ -33,9 +35,9 @@ type RecreateDeploymentStrategy struct {
33 33
 	// until is a condition that, if reached, will cause the strategy to exit early
34 34
 	until string
35 35
 	// rcClient is a client to access replication controllers
36
-	rcClient kclient.ReplicationControllersNamespacer
36
+	rcClient kcoreclient.ReplicationControllersGetter
37 37
 	// eventClient is a client to access events
38
-	eventClient kclient.EventNamespacer
38
+	eventClient kcoreclient.EventsGetter
39 39
 	// getUpdateAcceptor returns an UpdateAcceptor to verify the first replica
40 40
 	// of the deployment.
41 41
 	getUpdateAcceptor func(time.Duration, int32) strat.UpdateAcceptor
... ...
@@ -62,27 +64,29 @@ const AcceptorInterval = 1 * time.Second
62 62
 
63 63
 // NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
64 64
 // a real HookExecutor and client.
65
-func NewRecreateDeploymentStrategy(client kclient.Interface, tagClient client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, out, errOut io.Writer, until string) *RecreateDeploymentStrategy {
65
+func NewRecreateDeploymentStrategy(oldClient kclient.Interface, tagClient client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, out, errOut io.Writer, until string) *RecreateDeploymentStrategy {
66 66
 	if out == nil {
67 67
 		out = ioutil.Discard
68 68
 	}
69 69
 	if errOut == nil {
70 70
 		errOut = ioutil.Discard
71 71
 	}
72
-	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
72
+	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), oldClient)
73
+	// TODO internalclientset: get rid of oldClient after next rebase
74
+	client := adapter.FromUnversionedClient(oldClient.(*kclient.Client))
73 75
 	return &RecreateDeploymentStrategy{
74 76
 		out:         out,
75 77
 		errOut:      errOut,
76 78
 		events:      events,
77 79
 		until:       until,
78
-		rcClient:    client,
79
-		eventClient: client,
80
+		rcClient:    client.Core(),
81
+		eventClient: client.Core(),
80 82
 		getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
81
-			return stratsupport.NewAcceptNewlyObservedReadyPods(out, client, timeout, AcceptorInterval, minReadySeconds)
83
+			return stratsupport.NewAcceptNewlyObservedReadyPods(out, client.Core(), timeout, AcceptorInterval, minReadySeconds)
82 84
 		},
83 85
 		scaler:       scaler,
84 86
 		decoder:      decoder,
85
-		hookExecutor: stratsupport.NewHookExecutor(client, tagClient, client, os.Stdout, decoder),
87
+		hookExecutor: stratsupport.NewHookExecutor(client.Core(), tagClient, client.Core(), os.Stdout, decoder),
86 88
 		retryTimeout: 120 * time.Second,
87 89
 		retryPeriod:  1 * time.Second,
88 90
 	}
... ...
@@ -8,8 +8,8 @@ import (
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	"k8s.io/kubernetes/pkg/apimachinery/registered"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
12
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 13
 
14 14
 	deployapi "github.com/openshift/origin/pkg/deploy/api"
15 15
 	deploytest "github.com/openshift/origin/pkg/deploy/api/test"
... ...
@@ -25,8 +25,8 @@ type fakeControllerClient struct {
25 25
 	deployment *kapi.ReplicationController
26 26
 }
27 27
 
28
-func (c *fakeControllerClient) ReplicationControllers(ns string) kclient.ReplicationControllerInterface {
29
-	return ktestclient.NewSimpleFake(c.deployment).ReplicationControllers(ns)
28
+func (c *fakeControllerClient) ReplicationControllers(ns string) kcoreclient.ReplicationControllerInterface {
29
+	return fake.NewSimpleClientset(c.deployment).Core().ReplicationControllers(ns)
30 30
 }
31 31
 
32 32
 func TestRecreate_initialDeployment(t *testing.T) {
... ...
@@ -40,7 +40,7 @@ func TestRecreate_initialDeployment(t *testing.T) {
40 40
 		retryPeriod:       1 * time.Millisecond,
41 41
 		getUpdateAcceptor: getUpdateAcceptor,
42 42
 		scaler:            scaler,
43
-		eventClient:       ktestclient.NewSimpleFake(),
43
+		eventClient:       fake.NewSimpleClientset().Core(),
44 44
 	}
45 45
 
46 46
 	config := deploytest.OkDeploymentConfig(1)
... ...
@@ -75,7 +75,7 @@ func TestRecreate_deploymentPreHookSuccess(t *testing.T) {
75 75
 		retryTimeout:      1 * time.Second,
76 76
 		retryPeriod:       1 * time.Millisecond,
77 77
 		getUpdateAcceptor: getUpdateAcceptor,
78
-		eventClient:       ktestclient.NewSimpleFake(),
78
+		eventClient:       fake.NewSimpleClientset().Core(),
79 79
 		rcClient:          &fakeControllerClient{deployment: deployment},
80 80
 		hookExecutor: &hookExecutorImpl{
81 81
 			executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error {
... ...
@@ -108,7 +108,7 @@ func TestRecreate_deploymentPreHookFail(t *testing.T) {
108 108
 		retryTimeout:      1 * time.Second,
109 109
 		retryPeriod:       1 * time.Millisecond,
110 110
 		getUpdateAcceptor: getUpdateAcceptor,
111
-		eventClient:       ktestclient.NewSimpleFake(),
111
+		eventClient:       fake.NewSimpleClientset().Core(),
112 112
 		rcClient:          &fakeControllerClient{deployment: deployment},
113 113
 		hookExecutor: &hookExecutorImpl{
114 114
 			executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error {
... ...
@@ -142,7 +142,7 @@ func TestRecreate_deploymentMidHookSuccess(t *testing.T) {
142 142
 		retryPeriod:       1 * time.Millisecond,
143 143
 		rcClient:          &fakeControllerClient{deployment: deployment},
144 144
 		getUpdateAcceptor: getUpdateAcceptor,
145
-		eventClient:       ktestclient.NewSimpleFake(),
145
+		eventClient:       fake.NewSimpleClientset().Core(),
146 146
 		hookExecutor: &hookExecutorImpl{
147 147
 			executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error {
148 148
 				hookExecuted = true
... ...
@@ -174,7 +174,7 @@ func TestRecreate_deploymentMidHookFail(t *testing.T) {
174 174
 		retryTimeout:      1 * time.Second,
175 175
 		retryPeriod:       1 * time.Millisecond,
176 176
 		rcClient:          &fakeControllerClient{deployment: deployment},
177
-		eventClient:       ktestclient.NewSimpleFake(),
177
+		eventClient:       fake.NewSimpleClientset().Core(),
178 178
 		getUpdateAcceptor: getUpdateAcceptor,
179 179
 		hookExecutor: &hookExecutorImpl{
180 180
 			executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error {
... ...
@@ -206,7 +206,7 @@ func TestRecreate_deploymentPostHookSuccess(t *testing.T) {
206 206
 		retryTimeout:      1 * time.Second,
207 207
 		retryPeriod:       1 * time.Millisecond,
208 208
 		rcClient:          &fakeControllerClient{deployment: deployment},
209
-		eventClient:       ktestclient.NewSimpleFake(),
209
+		eventClient:       fake.NewSimpleClientset().Core(),
210 210
 		getUpdateAcceptor: getUpdateAcceptor,
211 211
 		hookExecutor: &hookExecutorImpl{
212 212
 			executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error {
... ...
@@ -240,7 +240,7 @@ func TestRecreate_deploymentPostHookFail(t *testing.T) {
240 240
 		retryTimeout:      1 * time.Second,
241 241
 		retryPeriod:       1 * time.Millisecond,
242 242
 		rcClient:          &fakeControllerClient{deployment: deployment},
243
-		eventClient:       ktestclient.NewSimpleFake(),
243
+		eventClient:       fake.NewSimpleClientset().Core(),
244 244
 		getUpdateAcceptor: getUpdateAcceptor,
245 245
 		hookExecutor: &hookExecutorImpl{
246 246
 			executeFunc: func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, suffix, label string) error {
... ...
@@ -267,7 +267,7 @@ func TestRecreate_acceptorSuccess(t *testing.T) {
267 267
 	strategy := &RecreateDeploymentStrategy{
268 268
 		out:          &bytes.Buffer{},
269 269
 		errOut:       &bytes.Buffer{},
270
-		eventClient:  ktestclient.NewSimpleFake(),
270
+		eventClient:  fake.NewSimpleClientset().Core(),
271 271
 		decoder:      kapi.Codecs.UniversalDecoder(),
272 272
 		retryTimeout: 1 * time.Second,
273 273
 		retryPeriod:  1 * time.Millisecond,
... ...
@@ -317,7 +317,7 @@ func TestRecreate_acceptorFail(t *testing.T) {
317 317
 		retryTimeout: 1 * time.Second,
318 318
 		retryPeriod:  1 * time.Millisecond,
319 319
 		scaler:       scaler,
320
-		eventClient:  ktestclient.NewSimpleFake(),
320
+		eventClient:  fake.NewSimpleClientset().Core(),
321 321
 	}
322 322
 
323 323
 	acceptor := &testAcceptor{
... ...
@@ -10,8 +10,10 @@ import (
10 10
 
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12 12
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 14
 	"k8s.io/kubernetes/pkg/client/record"
14 15
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+	adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
15 17
 	"k8s.io/kubernetes/pkg/kubectl"
16 18
 	"k8s.io/kubernetes/pkg/runtime"
17 19
 	"k8s.io/kubernetes/pkg/util/wait"
... ...
@@ -53,9 +55,9 @@ type RollingDeploymentStrategy struct {
53 53
 	// initialStrategy is used when there are no prior deployments.
54 54
 	initialStrategy acceptingDeploymentStrategy
55 55
 	// rcClient is used to deal with ReplicationControllers.
56
-	rcClient kclient.ReplicationControllersNamespacer
56
+	rcClient kcoreclient.ReplicationControllersGetter
57 57
 	// eventClient is a client to access events
58
-	eventClient kclient.EventNamespacer
58
+	eventClient kcoreclient.EventsGetter
59 59
 	// tags is a client used to perform tag actions
60 60
 	tags client.ImageStreamTagsNamespacer
61 61
 	// rollingUpdate knows how to perform a rolling update.
... ...
@@ -87,31 +89,33 @@ type acceptingDeploymentStrategy interface {
87 87
 const AcceptorInterval = 1 * time.Second
88 88
 
89 89
 // NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
90
-func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, tags client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, initialStrategy acceptingDeploymentStrategy, out, errOut io.Writer, until string) *RollingDeploymentStrategy {
90
+func NewRollingDeploymentStrategy(namespace string, oldClient kclient.Interface, tags client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, initialStrategy acceptingDeploymentStrategy, out, errOut io.Writer, until string) *RollingDeploymentStrategy {
91 91
 	if out == nil {
92 92
 		out = ioutil.Discard
93 93
 	}
94 94
 	if errOut == nil {
95 95
 		errOut = ioutil.Discard
96 96
 	}
97
+	// TODO internalclientset: get rid of oldClient after next rebase
98
+	client := adapter.FromUnversionedClient(oldClient.(*kclient.Client))
97 99
 	return &RollingDeploymentStrategy{
98 100
 		out:             out,
99 101
 		errOut:          errOut,
100 102
 		until:           until,
101 103
 		decoder:         decoder,
102 104
 		initialStrategy: initialStrategy,
103
-		rcClient:        client,
104
-		eventClient:     client,
105
+		rcClient:        client.Core(),
106
+		eventClient:     client.Core(),
105 107
 		tags:            tags,
106 108
 		apiRetryPeriod:  DefaultApiRetryPeriod,
107 109
 		apiRetryTimeout: DefaultApiRetryTimeout,
108 110
 		rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
109
-			updater := kubectl.NewRollingUpdater(namespace, client)
111
+			updater := kubectl.NewRollingUpdater(namespace, oldClient)
110 112
 			return updater.Update(config)
111 113
 		},
112
-		hookExecutor: stratsupport.NewHookExecutor(client, tags, client, os.Stdout, decoder),
114
+		hookExecutor: stratsupport.NewHookExecutor(client.Core(), tags, client.Core(), os.Stdout, decoder),
113 115
 		getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
114
-			return stratsupport.NewAcceptNewlyObservedReadyPods(out, client, timeout, AcceptorInterval, minReadySeconds)
116
+			return stratsupport.NewAcceptNewlyObservedReadyPods(out, client.Core(), timeout, AcceptorInterval, minReadySeconds)
115 117
 		},
116 118
 	}
117 119
 }
... ...
@@ -8,7 +8,8 @@ import (
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	"k8s.io/kubernetes/pkg/apimachinery/registered"
11
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12
+	"k8s.io/kubernetes/pkg/client/testing/core"
12 13
 	"k8s.io/kubernetes/pkg/kubectl"
13 14
 	"k8s.io/kubernetes/pkg/runtime"
14 15
 
... ...
@@ -25,8 +26,8 @@ func TestRolling_deployInitial(t *testing.T) {
25 25
 
26 26
 	strategy := &RollingDeploymentStrategy{
27 27
 		decoder:     kapi.Codecs.UniversalDecoder(),
28
-		rcClient:    ktestclient.NewSimpleFake(),
29
-		eventClient: ktestclient.NewSimpleFake(),
28
+		rcClient:    fake.NewSimpleClientset().Core(),
29
+		eventClient: fake.NewSimpleClientset().Core(),
30 30
 		initialStrategy: &testStrategy{
31 31
 			deployFn: func(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
32 32
 				initialStrategyInvoked = true
... ...
@@ -69,13 +70,13 @@ func TestRolling_deployRolling(t *testing.T) {
69 69
 	}
70 70
 	deploymentUpdated := false
71 71
 
72
-	fake := &ktestclient.Fake{}
73
-	fake.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
74
-		name := action.(ktestclient.GetAction).GetName()
72
+	client := &fake.Clientset{}
73
+	client.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
74
+		name := action.(core.GetAction).GetName()
75 75
 		return true, deployments[name], nil
76 76
 	})
77
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
78
-		updated := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
77
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
78
+		updated := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
79 79
 		deploymentUpdated = true
80 80
 		return true, updated, nil
81 81
 	})
... ...
@@ -83,8 +84,8 @@ func TestRolling_deployRolling(t *testing.T) {
83 83
 	var rollingConfig *kubectl.RollingUpdaterConfig
84 84
 	strategy := &RollingDeploymentStrategy{
85 85
 		decoder:     kapi.Codecs.UniversalDecoder(),
86
-		rcClient:    fake,
87
-		eventClient: ktestclient.NewSimpleFake(),
86
+		rcClient:    client.Core(),
87
+		eventClient: fake.NewSimpleClientset().Core(),
88 88
 		initialStrategy: &testStrategy{
89 89
 			deployFn: func(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
90 90
 				t.Fatalf("unexpected call to initial strategy")
... ...
@@ -153,20 +154,20 @@ func TestRolling_deployRollingHooks(t *testing.T) {
153 153
 
154 154
 	deployments := map[string]*kapi.ReplicationController{latest.Name: latest}
155 155
 
156
-	fake := &ktestclient.Fake{}
157
-	fake.AddReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
158
-		name := action.(ktestclient.GetAction).GetName()
156
+	client := &fake.Clientset{}
157
+	client.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
158
+		name := action.(core.GetAction).GetName()
159 159
 		return true, deployments[name], nil
160 160
 	})
161
-	fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
162
-		updated := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController)
161
+	client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
162
+		updated := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController)
163 163
 		return true, updated, nil
164 164
 	})
165 165
 
166 166
 	strategy := &RollingDeploymentStrategy{
167 167
 		decoder:     kapi.Codecs.UniversalDecoder(),
168
-		rcClient:    fake,
169
-		eventClient: ktestclient.NewSimpleFake(),
168
+		rcClient:    client.Core(),
169
+		eventClient: fake.NewSimpleClientset().Core(),
170 170
 		initialStrategy: &testStrategy{
171 171
 			deployFn: func(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
172 172
 				t.Fatalf("unexpected call to initial strategy")
... ...
@@ -227,8 +228,8 @@ func TestRolling_deployInitialHooks(t *testing.T) {
227 227
 
228 228
 	strategy := &RollingDeploymentStrategy{
229 229
 		decoder:     kapi.Codecs.UniversalDecoder(),
230
-		rcClient:    ktestclient.NewSimpleFake(),
231
-		eventClient: ktestclient.NewSimpleFake(),
230
+		rcClient:    fake.NewSimpleClientset().Core(),
231
+		eventClient: fake.NewSimpleClientset().Core(),
232 232
 		initialStrategy: &testStrategy{
233 233
 			deployFn: func(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
234 234
 				return nil
... ...
@@ -12,7 +12,7 @@ import (
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13 13
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
14 14
 	"k8s.io/kubernetes/pkg/client/cache"
15
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
16 16
 	kdeployutil "k8s.io/kubernetes/pkg/controller/deployment/util"
17 17
 	"k8s.io/kubernetes/pkg/fields"
18 18
 	"k8s.io/kubernetes/pkg/labels"
... ...
@@ -36,7 +36,7 @@ const HookContainerName = "lifecycle"
36 36
 // HookExecutor executes a deployment lifecycle hook.
37 37
 type HookExecutor struct {
38 38
 	// pods provides client to pods
39
-	pods kclient.PodsNamespacer
39
+	pods kcoreclient.PodsGetter
40 40
 	// tags allows setting image stream tags
41 41
 	tags client.ImageStreamTagsNamespacer
42 42
 	// out is where hook pod logs should be written to.
... ...
@@ -44,13 +44,13 @@ type HookExecutor struct {
44 44
 	// decoder is used for encoding/decoding.
45 45
 	decoder runtime.Decoder
46 46
 	// recorder is used to emit events from hooks
47
-	events kclient.EventNamespacer
47
+	events kcoreclient.EventsGetter
48 48
 	// getPodLogs knows how to get logs from a pod and is used for testing
49 49
 	getPodLogs func(*kapi.Pod) (io.ReadCloser, error)
50 50
 }
51 51
 
52 52
 // NewHookExecutor makes a HookExecutor from a client.
53
-func NewHookExecutor(pods kclient.PodsNamespacer, tags client.ImageStreamTagsNamespacer, events kclient.EventNamespacer, out io.Writer, decoder runtime.Decoder) *HookExecutor {
53
+func NewHookExecutor(pods kcoreclient.PodsGetter, tags client.ImageStreamTagsNamespacer, events kcoreclient.EventsGetter, out io.Writer, decoder runtime.Decoder) *HookExecutor {
54 54
 	executor := &HookExecutor{
55 55
 		tags:    tags,
56 56
 		pods:    pods,
... ...
@@ -412,7 +412,7 @@ func canRetryReading(pod *kapi.Pod, restarts int32) (bool, int32) {
412 412
 // FIFO/reflector pair. This avoids managing watches directly.
413 413
 // A stop channel to close the watch's reflector is also returned.
414 414
 // It is the caller's responsibility to defer closing the stop channel to prevent leaking resources.
415
-func NewPodWatch(client kclient.PodInterface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
415
+func NewPodWatch(client kcoreclient.PodInterface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
416 416
 	fieldSelector := fields.OneTermEqualSelector("metadata.name", name)
417 417
 	podLW := &cache.ListWatch{
418 418
 		ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
... ...
@@ -438,7 +438,7 @@ func NewPodWatch(client kclient.PodInterface, namespace, name, resourceVersion s
438 438
 // from a real client.
439 439
 func NewAcceptNewlyObservedReadyPods(
440 440
 	out io.Writer,
441
-	kclient kclient.PodsNamespacer,
441
+	kclient kcoreclient.PodsGetter,
442 442
 	timeout time.Duration,
443 443
 	interval time.Duration,
444 444
 	minReadySeconds int32,
... ...
@@ -15,6 +15,8 @@ import (
15 15
 	"k8s.io/kubernetes/pkg/api/resource"
16 16
 	"k8s.io/kubernetes/pkg/api/unversioned"
17 17
 	"k8s.io/kubernetes/pkg/client/cache"
18
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
19
+	"k8s.io/kubernetes/pkg/client/testing/core"
18 20
 	"k8s.io/kubernetes/pkg/runtime"
19 21
 	"k8s.io/kubernetes/pkg/util/diff"
20 22
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -25,7 +27,6 @@ import (
25 25
 	deployv1 "github.com/openshift/origin/pkg/deploy/api/v1"
26 26
 	deployutil "github.com/openshift/origin/pkg/deploy/util"
27 27
 	"github.com/openshift/origin/pkg/util/namer"
28
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
29 28
 
30 29
 	_ "github.com/openshift/origin/pkg/api/install"
31 30
 )
... ...
@@ -34,12 +35,12 @@ func nowFunc() *unversioned.Time {
34 34
 	return &unversioned.Time{Time: time.Now().Add(-5 * time.Second)}
35 35
 }
36 36
 
37
-func newTestClient(config *deployapi.DeploymentConfig) *testclient.Fake {
38
-	client := &testclient.Fake{}
37
+func newTestClient(config *deployapi.DeploymentConfig) *fake.Clientset {
38
+	client := &fake.Clientset{}
39 39
 	// when creating a lifecycle pod, we query the deployer pod for the start time to
40 40
 	// calculate the active deadline seconds for the lifecycle pod.
41
-	client.AddReactor("get", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
42
-		action := a.(testclient.GetAction)
41
+	client.AddReactor("get", "pods", func(a core.Action) (handled bool, ret runtime.Object, err error) {
42
+		action := a.(core.GetAction)
43 43
 		if strings.HasPrefix(action.GetName(), config.Name) && strings.HasSuffix(action.GetName(), "-deploy") {
44 44
 			return true, &kapi.Pod{
45 45
 				ObjectMeta: kapi.ObjectMeta{
... ...
@@ -65,11 +66,11 @@ func TestHookExecutor_executeExecNewCreatePodFailure(t *testing.T) {
65 65
 	dc := deploytest.OkDeploymentConfig(1)
66 66
 	deployment, _ := deployutil.MakeDeployment(dc, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
67 67
 	client := newTestClient(dc)
68
-	client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
68
+	client.AddReactor("create", "pods", func(a core.Action) (handled bool, ret runtime.Object, err error) {
69 69
 		return true, nil, errors.New("could not create the pod")
70 70
 	})
71 71
 	executor := &HookExecutor{
72
-		pods:    client,
72
+		pods:    client.Core(),
73 73
 		decoder: kapi.Codecs.UniversalDecoder(),
74 74
 	}
75 75
 
... ...
@@ -94,15 +95,15 @@ func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) {
94 94
 	podCreated := make(chan struct{})
95 95
 
96 96
 	var createdPod *kapi.Pod
97
-	client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
97
+	client.AddReactor("create", "pods", func(a core.Action) (handled bool, ret runtime.Object, err error) {
98 98
 		defer close(podCreated)
99
-		action := a.(testclient.CreateAction)
99
+		action := a.(core.CreateAction)
100 100
 		object := action.GetObject()
101 101
 		createdPod = object.(*kapi.Pod)
102 102
 		return true, createdPod, nil
103 103
 	})
104 104
 	podsWatch := watch.NewFake()
105
-	client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil))
105
+	client.AddWatchReactor("pods", core.DefaultWatchReactor(podsWatch, nil))
106 106
 
107 107
 	podLogs := &bytes.Buffer{}
108 108
 	// Simulate creation of the lifecycle pod
... ...
@@ -116,7 +117,7 @@ func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) {
116 116
 	}()
117 117
 
118 118
 	executor := &HookExecutor{
119
-		pods:    client,
119
+		pods:    client.Core(),
120 120
 		out:     podLogs,
121 121
 		decoder: kapi.Codecs.UniversalDecoder(),
122 122
 		getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
... ...
@@ -162,15 +163,15 @@ func TestHookExecutor_executeExecNewPodFailed(t *testing.T) {
162 162
 	podCreated := make(chan struct{})
163 163
 
164 164
 	var createdPod *kapi.Pod
165
-	client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
165
+	client.AddReactor("create", "pods", func(a core.Action) (handled bool, ret runtime.Object, err error) {
166 166
 		defer close(podCreated)
167
-		action := a.(testclient.CreateAction)
167
+		action := a.(core.CreateAction)
168 168
 		object := action.GetObject()
169 169
 		createdPod = object.(*kapi.Pod)
170 170
 		return true, createdPod, nil
171 171
 	})
172 172
 	podsWatch := watch.NewFake()
173
-	client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil))
173
+	client.AddWatchReactor("pods", core.DefaultWatchReactor(podsWatch, nil))
174 174
 
175 175
 	go func() {
176 176
 		<-podCreated
... ...
@@ -182,7 +183,7 @@ func TestHookExecutor_executeExecNewPodFailed(t *testing.T) {
182 182
 	}()
183 183
 
184 184
 	executor := &HookExecutor{
185
-		pods:    client,
185
+		pods:    client.Core(),
186 186
 		out:     ioutil.Discard,
187 187
 		decoder: kapi.Codecs.UniversalDecoder(),
188 188
 		getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
... ...
@@ -9,7 +9,7 @@ import (
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11 11
 	"k8s.io/kubernetes/pkg/api/unversioned"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
12
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 13
 	"k8s.io/kubernetes/pkg/runtime"
14 14
 
15 15
 	deployutil "github.com/openshift/origin/pkg/deploy/util"
... ...
@@ -17,7 +17,7 @@ import (
17 17
 
18 18
 // RecordConfigEvent records an event for the deployment config referenced by the
19 19
 // deployment.
20
-func RecordConfigEvent(client kclient.EventNamespacer, deployment *kapi.ReplicationController, decoder runtime.Decoder, eventType, reason, msg string) {
20
+func RecordConfigEvent(client kcoreclient.EventsGetter, deployment *kapi.ReplicationController, decoder runtime.Decoder, eventType, reason, msg string) {
21 21
 	t := unversioned.Time{Time: time.Now()}
22 22
 	var obj runtime.Object = deployment
23 23
 	if config, err := deployutil.DecodeDeploymentConfig(deployment, decoder); err == nil {
... ...
@@ -53,7 +53,7 @@ func RecordConfigEvent(client kclient.EventNamespacer, deployment *kapi.Replicat
53 53
 
54 54
 // RecordConfigWarnings records all warning events from the replication controller to the
55 55
 // associated deployment config.
56
-func RecordConfigWarnings(client kclient.EventNamespacer, rc *kapi.ReplicationController, decoder runtime.Decoder, out io.Writer) {
56
+func RecordConfigWarnings(client kcoreclient.EventsGetter, rc *kapi.ReplicationController, decoder runtime.Decoder, out io.Writer) {
57 57
 	if rc == nil {
58 58
 		return
59 59
 	}
... ...
@@ -10,6 +10,7 @@ import (
10 10
 
11 11
 	"k8s.io/kubernetes/pkg/api"
12 12
 	"k8s.io/kubernetes/pkg/api/unversioned"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 14
 	kdeplutil "k8s.io/kubernetes/pkg/controller/deployment/util"
14 15
 	"k8s.io/kubernetes/pkg/fields"
15 16
 	"k8s.io/kubernetes/pkg/labels"
... ...
@@ -18,7 +19,6 @@ import (
18 18
 
19 19
 	deployapi "github.com/openshift/origin/pkg/deploy/api"
20 20
 	"github.com/openshift/origin/pkg/util/namer"
21
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
22 21
 )
23 22
 
24 23
 const (
... ...
@@ -568,7 +568,7 @@ func DeploymentsForCleanup(configuration *deployapi.DeploymentConfig, deployment
568 568
 
569 569
 // WaitForRunningDeployerPod waits a given period of time until the deployer pod
570 570
 // for given replication controller is not running.
571
-func WaitForRunningDeployerPod(podClient kclient.PodsNamespacer, rc *api.ReplicationController, timeout time.Duration) error {
571
+func WaitForRunningDeployerPod(podClient kcoreclient.PodsGetter, rc *api.ReplicationController, timeout time.Duration) error {
572 572
 	podName := DeployerPodNameForDeployment(rc.Name)
573 573
 	canGetLogs := func(p *api.Pod) bool {
574 574
 		return api.PodSucceeded == p.Status.Phase || api.PodFailed == p.Status.Phase || api.PodRunning == p.Status.Phase
... ...
@@ -228,7 +228,7 @@ func (d ConfigContext) Check() types.DiagnosticResult {
228 228
 
229 229
 	// Actually send a request to see if context has connectivity.
230 230
 	// Note: we cannot reuse factories as they cache the clients, so build new factory for each context.
231
-	osClient, _, err := osclientcmd.NewFactory(kclientcmd.NewDefaultClientConfig(*d.RawConfig, &kclientcmd.ConfigOverrides{Context: *context})).Clients()
231
+	osClient, _, _, err := osclientcmd.NewFactory(kclientcmd.NewDefaultClientConfig(*d.RawConfig, &kclientcmd.ConfigOverrides{Context: *context})).Clients()
232 232
 	// client create now *fails* if cannot connect to server; so, address connectivity errors below
233 233
 	if err == nil {
234 234
 		if projects, projerr := osClient.Projects().List(kapi.ListOptions{}); projerr != nil {
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"time"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 
13 13
 	osclientcmd "github.com/openshift/origin/pkg/cmd/util/clientcmd"
14 14
 	"github.com/openshift/origin/pkg/cmd/util/variable"
... ...
@@ -21,7 +21,7 @@ const (
21 21
 
22 22
 // DiagnosticPod is a diagnostic that runs a diagnostic pod and relays the results.
23 23
 type DiagnosticPod struct {
24
-	KubeClient          kclient.Client
24
+	KubeClient          kclientset.Clientset
25 25
 	Namespace           string
26 26
 	Level               int
27 27
 	Factory             *osclientcmd.Factory
... ...
@@ -60,7 +60,7 @@ func (d *DiagnosticPod) runDiagnosticPod(service *kapi.Service, r types.Diagnost
60 60
 		loglevel = 2 // need to show summary at least
61 61
 	}
62 62
 	imageName := d.ImageTemplate.ExpandOrDie("deployer")
63
-	pod, err := d.KubeClient.Pods(d.Namespace).Create(&kapi.Pod{
63
+	pod, err := d.KubeClient.Core().Pods(d.Namespace).Create(&kapi.Pod{
64 64
 		ObjectMeta: kapi.ObjectMeta{GenerateName: "pod-diagnostic-test-"},
65 65
 		Spec: kapi.PodSpec{
66 66
 			RestartPolicy: kapi.RestartPolicyNever,
... ...
@@ -80,11 +80,11 @@ func (d *DiagnosticPod) runDiagnosticPod(service *kapi.Service, r types.Diagnost
80 80
 	defer func() { // delete what we created, or notify that we couldn't
81 81
 		zero := int64(0)
82 82
 		delOpts := kapi.DeleteOptions{TypeMeta: pod.TypeMeta, GracePeriodSeconds: &zero}
83
-		if err := d.KubeClient.Pods(d.Namespace).Delete(pod.ObjectMeta.Name, &delOpts); err != nil {
83
+		if err := d.KubeClient.Core().Pods(d.Namespace).Delete(pod.ObjectMeta.Name, &delOpts); err != nil {
84 84
 			r.Error("DCl2002", err, fmt.Sprintf("Deleting diagnostic pod '%s' failed. Error: %s", pod.ObjectMeta.Name, fmt.Sprintf("(%T) %[1]s", err)))
85 85
 		}
86 86
 	}()
87
-	pod, err = d.KubeClient.Pods(d.Namespace).Get(pod.ObjectMeta.Name) // status is filled in post-create
87
+	pod, err = d.KubeClient.Core().Pods(d.Namespace).Get(pod.ObjectMeta.Name) // status is filled in post-create
88 88
 	if err != nil {
89 89
 		r.Error("DCli2003", err, fmt.Sprintf("Retrieving the diagnostic pod definition failed. Error: (%T) %[1]v", err))
90 90
 		return
... ...
@@ -7,7 +7,7 @@ import (
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	kapisext "k8s.io/kubernetes/pkg/apis/extensions"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 	"k8s.io/kubernetes/pkg/labels"
12 12
 
13 13
 	authapi "github.com/openshift/origin/pkg/authorization/api"
... ...
@@ -27,7 +27,7 @@ type AggregatedLogging struct {
27 27
 	masterConfig     *configapi.MasterConfig
28 28
 	MasterConfigFile string
29 29
 	OsClient         *client.Client
30
-	KubeClient       *kclient.Client
30
+	KubeClient       *kclientset.Clientset
31 31
 	result           types.DiagnosticResult
32 32
 }
33 33
 
... ...
@@ -45,7 +45,7 @@ const (
45 45
 var loggingSelector = labels.Set{loggingInfraKey: "support"}
46 46
 
47 47
 //NewAggregatedLogging returns the AggregatedLogging Diagnostic
48
-func NewAggregatedLogging(masterConfigFile string, kclient *kclient.Client, osclient *client.Client) *AggregatedLogging {
48
+func NewAggregatedLogging(masterConfigFile string, kclient *kclientset.Clientset, osclient *client.Client) *AggregatedLogging {
49 49
 	return &AggregatedLogging{nil, masterConfigFile, osclient, kclient, types.NewDiagnosticResult(AggregatedLoggingName)}
50 50
 }
51 51
 
... ...
@@ -149,8 +149,8 @@ func (d *AggregatedLogging) Check() types.DiagnosticResult {
149 149
 }
150 150
 
151 151
 const projectNodeSelectorWarning = `
152
-The project '%[1]s' was found with either a missing or non-empty node selector annotation.  
153
-This could keep Fluentd from running on certain nodes and collecting logs from the entire cluster.  
152
+The project '%[1]s' was found with either a missing or non-empty node selector annotation.
153
+This could keep Fluentd from running on certain nodes and collecting logs from the entire cluster.
154 154
 You can correct it by editing the project:
155 155
 
156 156
   $ oc edit namespace %[1]s
... ...
@@ -7,7 +7,7 @@ import (
7 7
 	"strings"
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 	"k8s.io/kubernetes/pkg/util/sets"
12 12
 
13 13
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -22,7 +22,7 @@ const (
22 22
 )
23 23
 
24 24
 //checkKibana verifies the various integration points between Kibana and logging
25
-func checkKibana(r types.DiagnosticResult, osClient *client.Client, kClient *kclient.Client, project string) {
25
+func checkKibana(r types.DiagnosticResult, osClient *client.Client, kClient *kclientset.Clientset, project string) {
26 26
 	oauthclient, err := osClient.OAuthClients().Get(kibanaProxyOauthClientName)
27 27
 	if err != nil {
28 28
 		r.Error("AGL0115", err, fmt.Sprintf("Error retrieving the OauthClient '%s': %s. Unable to check Kibana", kibanaProxyOauthClientName, err))
... ...
@@ -33,9 +33,9 @@ func checkKibana(r types.DiagnosticResult, osClient *client.Client, kClient *kcl
33 33
 }
34 34
 
35 35
 //checkKibanaSecret confirms the secret used by kibana matches that configured in the oauth client
36
-func checkKibanaSecret(r types.DiagnosticResult, osClient *client.Client, kClient *kclient.Client, project string, oauthclient *oauthapi.OAuthClient) {
36
+func checkKibanaSecret(r types.DiagnosticResult, osClient *client.Client, kClient *kclientset.Clientset, project string, oauthclient *oauthapi.OAuthClient) {
37 37
 	r.Debug("AGL0100", "Checking oauthclient secrets...")
38
-	secret, err := kClient.Secrets(project).Get(kibanaProxySecretName)
38
+	secret, err := kClient.Core().Secrets(project).Get(kibanaProxySecretName)
39 39
 	if err != nil {
40 40
 		r.Error("AGL0105", err, fmt.Sprintf("Error retrieving the secret '%s': %s", kibanaProxySecretName, err))
41 41
 		return
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"strings"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 
13 13
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
14 14
 	osclient "github.com/openshift/origin/pkg/client"
... ...
@@ -27,7 +27,7 @@ to proxy to pods over the Open vSwitch SDN.
27 27
 // This is currently required to have the master on the Open vSwitch SDN and able to communicate
28 28
 // with other nodes.
29 29
 type MasterNode struct {
30
-	KubeClient       *kclient.Client
30
+	KubeClient       *kclientset.Clientset
31 31
 	OsClient         *osclient.Client
32 32
 	ServerUrl        string
33 33
 	MasterConfigFile string // may often be empty if not being run on the host
... ...
@@ -82,7 +82,7 @@ func (d *MasterNode) CanRun() (bool, error) {
82 82
 func (d *MasterNode) Check() types.DiagnosticResult {
83 83
 	r := types.NewDiagnosticResult(MasterNodeName)
84 84
 
85
-	nodes, err := d.KubeClient.Nodes().List(kapi.ListOptions{})
85
+	nodes, err := d.KubeClient.Core().Nodes().List(kapi.ListOptions{})
86 86
 	if err != nil {
87 87
 		r.Error("DClu3002", err, fmt.Sprintf(clientErrorGettingNodes, err))
88 88
 		return r
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"fmt"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 
13 13
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
14 14
 	osclient "github.com/openshift/origin/pkg/client"
... ...
@@ -47,7 +47,7 @@ other options for 'oadm manage-node').
47 47
 
48 48
 // NodeDefinitions is a Diagnostic for analyzing the nodes in a cluster.
49 49
 type NodeDefinitions struct {
50
-	KubeClient *kclient.Client
50
+	KubeClient *kclientset.Clientset
51 51
 	OsClient   *osclient.Client
52 52
 }
53 53
 
... ...
@@ -81,7 +81,7 @@ func (d *NodeDefinitions) CanRun() (bool, error) {
81 81
 func (d *NodeDefinitions) Check() types.DiagnosticResult {
82 82
 	r := types.NewDiagnosticResult("NodeDefinition")
83 83
 
84
-	nodes, err := d.KubeClient.Nodes().List(kapi.ListOptions{})
84
+	nodes, err := d.KubeClient.Core().Nodes().List(kapi.ListOptions{})
85 85
 	if err != nil {
86 86
 		r.Error("DClu0001", err, fmt.Sprintf(clientErrorGettingNodes, err))
87 87
 		return r
... ...
@@ -9,7 +9,7 @@ import (
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11 11
 	kerrs "k8s.io/kubernetes/pkg/api/errors"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
12
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
13 13
 	"k8s.io/kubernetes/pkg/labels"
14 14
 
15 15
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
... ...
@@ -20,7 +20,7 @@ import (
20 20
 
21 21
 // ClusterRegistry is a Diagnostic to check that there is a working Docker registry.
22 22
 type ClusterRegistry struct {
23
-	KubeClient          *kclient.Client
23
+	KubeClient          *kclientset.Clientset
24 24
 	OsClient            *osclient.Client
25 25
 	PreventModification bool
26 26
 }
... ...
@@ -186,7 +186,7 @@ func (d *ClusterRegistry) Check() types.DiagnosticResult {
186 186
 }
187 187
 
188 188
 func (d *ClusterRegistry) getRegistryService(r types.DiagnosticResult) *kapi.Service {
189
-	service, err := d.KubeClient.Services(kapi.NamespaceDefault).Get(registryName)
189
+	service, err := d.KubeClient.Core().Services(kapi.NamespaceDefault).Get(registryName)
190 190
 	if err != nil && reflect.TypeOf(err) == reflect.TypeOf(&kerrs.StatusError{}) {
191 191
 		r.Warn("DClu1002", err, fmt.Sprintf(clGetRegNone, registryName, kapi.NamespaceDefault))
192 192
 		return nil
... ...
@@ -200,7 +200,7 @@ func (d *ClusterRegistry) getRegistryService(r types.DiagnosticResult) *kapi.Ser
200 200
 
201 201
 func (d *ClusterRegistry) getRegistryPods(service *kapi.Service, r types.DiagnosticResult) []*kapi.Pod {
202 202
 	runningPods := []*kapi.Pod{}
203
-	pods, err := d.KubeClient.Pods(kapi.NamespaceDefault).List(kapi.ListOptions{LabelSelector: labels.SelectorFromSet(service.Spec.Selector)})
203
+	pods, err := d.KubeClient.Core().Pods(kapi.NamespaceDefault).List(kapi.ListOptions{LabelSelector: labels.SelectorFromSet(service.Spec.Selector)})
204 204
 	if err != nil {
205 205
 		r.Error("DClu1005", err, fmt.Sprintf("Finding pods for '%s' service failed. This should never happen. Error: (%T) %[2]v", registryName, err))
206 206
 		return runningPods
... ...
@@ -252,7 +252,7 @@ func (d *ClusterRegistry) getRegistryPods(service *kapi.Service, r types.Diagnos
252 252
 
253 253
 func (d *ClusterRegistry) checkRegistryLogs(pod *kapi.Pod, r types.DiagnosticResult) {
254 254
 	// pull out logs from the pod
255
-	readCloser, err := d.KubeClient.RESTClient.Get().
255
+	readCloser, err := d.KubeClient.CoreClient.RESTClient.Get().
256 256
 		Namespace("default").Name(pod.ObjectMeta.Name).
257 257
 		Resource("pods").SubResource("log").
258 258
 		Param("follow", "false").
... ...
@@ -302,7 +302,7 @@ func (d *ClusterRegistry) checkRegistryLogs(pod *kapi.Pod, r types.DiagnosticRes
302 302
 }
303 303
 
304 304
 func (d *ClusterRegistry) checkRegistryEndpoints(pods []*kapi.Pod, r types.DiagnosticResult) bool {
305
-	endPoint, err := d.KubeClient.Endpoints(kapi.NamespaceDefault).Get(registryName)
305
+	endPoint, err := d.KubeClient.Core().Endpoints(kapi.NamespaceDefault).Get(registryName)
306 306
 	if err != nil {
307 307
 		r.Error("DClu1013", err, fmt.Sprintf(`Finding endpoints for "%s" service failed. This should never happen. Error: (%[2]T) %[2]v`, registryName, err))
308 308
 		return false
... ...
@@ -11,7 +11,7 @@ import (
11 11
 
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13 13
 	kerrs "k8s.io/kubernetes/pkg/api/errors"
14
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
15 15
 	"k8s.io/kubernetes/pkg/labels"
16 16
 
17 17
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
... ...
@@ -22,7 +22,7 @@ import (
22 22
 
23 23
 // ClusterRouter is a Diagnostic to check that there is a working router.
24 24
 type ClusterRouter struct {
25
-	KubeClient *kclient.Client
25
+	KubeClient *kclientset.Clientset
26 26
 	OsClient   *osclient.Client
27 27
 }
28 28
 
... ...
@@ -137,7 +137,7 @@ func (d *ClusterRouter) getRouterDC(r types.DiagnosticResult) *deployapi.Deploym
137 137
 }
138 138
 
139 139
 func (d *ClusterRouter) getRouterPods(dc *deployapi.DeploymentConfig, r types.DiagnosticResult) *kapi.PodList {
140
-	pods, err := d.KubeClient.Pods(kapi.NamespaceDefault).List(kapi.ListOptions{LabelSelector: labels.SelectorFromSet(dc.Spec.Selector)})
140
+	pods, err := d.KubeClient.Core().Pods(kapi.NamespaceDefault).List(kapi.ListOptions{LabelSelector: labels.SelectorFromSet(dc.Spec.Selector)})
141 141
 	if err != nil {
142 142
 		r.Error("DClu2004", err, fmt.Sprintf("Finding pods for '%s' DeploymentConfig failed. This should never happen. Error: (%[2]T) %[2]v", routerName, err))
143 143
 		return nil
... ...
@@ -170,7 +170,7 @@ func (s *lineScanner) Text() string { return s.Scanner.Text() }
170 170
 func (s *lineScanner) Close() error { return s.ReadCloser.Close() }
171 171
 
172 172
 func (d *ClusterRouter) getPodLogScanner(pod *kapi.Pod) (*lineScanner, error) {
173
-	readCloser, err := d.KubeClient.RESTClient.Get().
173
+	readCloser, err := d.KubeClient.CoreClient.RESTClient.Get().
174 174
 		Namespace(pod.ObjectMeta.Namespace).
175 175
 		Name(pod.ObjectMeta.Name).
176 176
 		Resource("pods").SubResource("log").
... ...
@@ -7,7 +7,7 @@ import (
7 7
 	"strings"
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 
12 12
 	hostdiag "github.com/openshift/origin/pkg/diagnostics/host"
13 13
 	"github.com/openshift/origin/pkg/diagnostics/types"
... ...
@@ -19,7 +19,7 @@ import (
19 19
 // Background: https://github.com/openshift/origin/issues/7808
20 20
 type ServiceExternalIPs struct {
21 21
 	MasterConfigFile string
22
-	KclusterClient   *kclient.Client
22
+	KclusterClient   *kclientset.Clientset
23 23
 }
24 24
 
25 25
 const ServiceExternalIPsName = "ServiceExternalIPs"
... ...
@@ -59,7 +59,7 @@ func (d *ServiceExternalIPs) Check() types.DiagnosticResult {
59 59
 			return r
60 60
 		}
61 61
 	}
62
-	services, err := d.KclusterClient.Services("").List(kapi.ListOptions{})
62
+	services, err := d.KclusterClient.Core().Services("").List(kapi.ListOptions{})
63 63
 	if err != nil {
64 64
 		r.Error("DH2005", err, fmt.Sprintf("Error while listing cluster services: (%[1]T) %[1]v", err))
65 65
 		return r
... ...
@@ -12,7 +12,7 @@ import (
12 12
 	flag "github.com/spf13/pflag"
13 13
 
14 14
 	kapi "k8s.io/kubernetes/pkg/api"
15
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
16 16
 
17 17
 	osclient "github.com/openshift/origin/pkg/client"
18 18
 	osclientcmd "github.com/openshift/origin/pkg/cmd/util/clientcmd"
... ...
@@ -26,7 +26,7 @@ const (
26 26
 
27 27
 // NetworkDiagnostic is a diagnostic that runs a network diagnostic pod and relays the results.
28 28
 type NetworkDiagnostic struct {
29
-	KubeClient          *kclient.Client
29
+	KubeClient          *kclientset.Clientset
30 30
 	OSClient            *osclient.Client
31 31
 	ClientFlags         *flag.FlagSet
32 32
 	Level               int
... ...
@@ -170,7 +170,7 @@ func (d *NetworkDiagnostic) runNetworkPod(command []string) error {
170 170
 		podName := kapi.SimpleNameGenerator.GenerateName(fmt.Sprintf("%s-", util.NetworkDiagPodNamePrefix))
171 171
 
172 172
 		pod := GetNetworkDiagnosticsPod(command, podName, node.Name)
173
-		_, err := d.KubeClient.Pods(d.nsName1).Create(pod)
173
+		_, err := d.KubeClient.Core().Pods(d.nsName1).Create(pod)
174 174
 		if err != nil {
175 175
 			return fmt.Errorf("Creating network diagnostic pod %q on node %q with command %q failed: %v", podName, node.Name, strings.Join(command, " "), err)
176 176
 		}
... ...
@@ -70,10 +70,10 @@ func (d *NetworkDiagnostic) TestSetup() error {
70 70
 
71 71
 func (d *NetworkDiagnostic) Cleanup() {
72 72
 	// Deleting namespaces will delete corresponding service accounts/pods in the namespace automatically.
73
-	d.KubeClient.Namespaces().Delete(d.nsName1)
74
-	d.KubeClient.Namespaces().Delete(d.nsName2)
75
-	d.KubeClient.Namespaces().Delete(d.globalnsName1)
76
-	d.KubeClient.Namespaces().Delete(d.globalnsName2)
73
+	d.KubeClient.Namespaces().Delete(d.nsName1, nil)
74
+	d.KubeClient.Namespaces().Delete(d.nsName2, nil)
75
+	d.KubeClient.Namespaces().Delete(d.globalnsName1, nil)
76
+	d.KubeClient.Namespaces().Delete(d.globalnsName2, nil)
77 77
 }
78 78
 
79 79
 func (d *NetworkDiagnostic) getPodList(nsName, prefix string) (*kapi.PodList, error) {
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"fmt"
6 6
 	"path/filepath"
7 7
 
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
9 9
 
10 10
 	"github.com/openshift/origin/pkg/diagnostics/networkpod/util"
11 11
 	"github.com/openshift/origin/pkg/diagnostics/types"
... ...
@@ -17,7 +17,7 @@ const (
17 17
 
18 18
 // CollectNetworkInfo is a Diagnostic to collect network information in the cluster.
19 19
 type CollectNetworkInfo struct {
20
-	KubeClient *kclient.Client
20
+	KubeClient *kclientset.Clientset
21 21
 }
22 22
 
23 23
 // Name is part of the Diagnostic interface and just returns name.
... ...
@@ -6,7 +6,7 @@ import (
6 6
 	"strings"
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
10 10
 	kcontainer "k8s.io/kubernetes/pkg/kubelet/container"
11 11
 	kexec "k8s.io/kubernetes/pkg/util/exec"
12 12
 
... ...
@@ -20,7 +20,7 @@ const (
20 20
 
21 21
 // CheckNodeNetwork is a Diagnostic to check that pods in the cluster can access its own node
22 22
 type CheckNodeNetwork struct {
23
-	KubeClient *kclient.Client
23
+	KubeClient *kclientset.Clientset
24 24
 }
25 25
 
26 26
 // Name is part of the Diagnostic interface and just returns name.
... ...
@@ -6,7 +6,7 @@ import (
6 6
 	"strings"
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
10 10
 	kcontainer "k8s.io/kubernetes/pkg/kubelet/container"
11 11
 	kexec "k8s.io/kubernetes/pkg/util/exec"
12 12
 
... ...
@@ -22,7 +22,7 @@ const (
22 22
 
23 23
 // CheckPodNetwork is a Diagnostic to check communication between pods in the cluster.
24 24
 type CheckPodNetwork struct {
25
-	KubeClient *kclient.Client
25
+	KubeClient *kclientset.Clientset
26 26
 	OSClient   *osclient.Client
27 27
 
28 28
 	vnidMap map[string]uint32
... ...
@@ -6,7 +6,7 @@ import (
6 6
 	"strings"
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
10 10
 	kcontainer "k8s.io/kubernetes/pkg/kubelet/container"
11 11
 	kexec "k8s.io/kubernetes/pkg/util/exec"
12 12
 
... ...
@@ -22,7 +22,7 @@ const (
22 22
 
23 23
 // CheckServiceNetwork is a Diagnostic to check communication between services in the cluster.
24 24
 type CheckServiceNetwork struct {
25
-	KubeClient *kclient.Client
25
+	KubeClient *kclientset.Clientset
26 26
 	OSClient   *osclient.Client
27 27
 
28 28
 	vnidMap map[string]uint32
... ...
@@ -141,9 +141,9 @@ func (d CheckServiceNetwork) checkConnection(pods []kapi.Pod, services []kapi.Se
141 141
 	}
142 142
 }
143 143
 
144
-func getAllServices(kubeClient *kclient.Client) ([]kapi.Service, error) {
144
+func getAllServices(kubeClient *kclientset.Clientset) ([]kapi.Service, error) {
145 145
 	filtered_srvs := []kapi.Service{}
146
-	serviceList, err := kubeClient.Services(kapi.NamespaceAll).List(kapi.ListOptions{})
146
+	serviceList, err := kubeClient.Core().Services(kapi.NamespaceAll).List(kapi.ListOptions{})
147 147
 	if err != nil {
148 148
 		return filtered_srvs, err
149 149
 	}
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"regexp"
9 9
 	"strings"
10 10
 
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 	kcontainer "k8s.io/kubernetes/pkg/kubelet/container"
13 13
 
14 14
 	"github.com/openshift/origin/pkg/diagnostics/types"
... ...
@@ -20,7 +20,7 @@ type LogInterface struct {
20 20
 	Logdir string
21 21
 }
22 22
 
23
-func (l *LogInterface) LogNode(kubeClient *kclient.Client) {
23
+func (l *LogInterface) LogNode(kubeClient *kclientset.Clientset) {
24 24
 	l.LogSystem()
25 25
 	l.LogServices()
26 26
 
... ...
@@ -163,7 +163,7 @@ func (l *LogInterface) logNetworkInterfaces() {
163 163
 	})
164 164
 }
165 165
 
166
-func (l *LogInterface) logPodInfo(kubeClient *kclient.Client) {
166
+func (l *LogInterface) logPodInfo(kubeClient *kclientset.Clientset) {
167 167
 	pods, _, err := GetLocalAndNonLocalDiagnosticPods(kubeClient)
168 168
 	if err != nil {
169 169
 		l.Result.Error("DLogNet1003", err, err.Error())
... ...
@@ -7,7 +7,7 @@ import (
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 	kubecmd "k8s.io/kubernetes/pkg/kubectl/cmd"
12 12
 
13 13
 	osclient "github.com/openshift/origin/pkg/client"
... ...
@@ -44,15 +44,15 @@ func GetOpenShiftNetworkPlugin(osClient *osclient.Client) (string, bool, error)
44 44
 	return cn.PluginName, sdnapi.IsOpenShiftNetworkPlugin(cn.PluginName), nil
45 45
 }
46 46
 
47
-func GetNodes(kubeClient *kclient.Client) ([]kapi.Node, error) {
48
-	nodeList, err := kubeClient.Nodes().List(kapi.ListOptions{})
47
+func GetNodes(kubeClient *kclientset.Clientset) ([]kapi.Node, error) {
48
+	nodeList, err := kubeClient.Core().Nodes().List(kapi.ListOptions{})
49 49
 	if err != nil {
50 50
 		return nil, fmt.Errorf("Listing nodes in the cluster failed. Error: %s", err)
51 51
 	}
52 52
 	return nodeList.Items, nil
53 53
 }
54 54
 
55
-func GetSchedulableNodes(kubeClient *kclient.Client) ([]kapi.Node, error) {
55
+func GetSchedulableNodes(kubeClient *kclientset.Clientset) ([]kapi.Node, error) {
56 56
 	filteredNodes := []kapi.Node{}
57 57
 	nodes, err := GetNodes(kubeClient)
58 58
 	if err != nil {
... ...
@@ -83,8 +83,8 @@ func GetSchedulableNodes(kubeClient *kclient.Client) ([]kapi.Node, error) {
83 83
 	return filteredNodes, nil
84 84
 }
85 85
 
86
-func GetLocalNode(kubeClient *kclient.Client) (string, string, error) {
87
-	nodeList, err := kubeClient.Nodes().List(kapi.ListOptions{})
86
+func GetLocalNode(kubeClient *kclientset.Clientset) (string, string, error) {
87
+	nodeList, err := kubeClient.Core().Nodes().List(kapi.ListOptions{})
88 88
 	if err != nil {
89 89
 		return "", "", err
90 90
 	}
... ...
@@ -109,7 +109,7 @@ func GetLocalNode(kubeClient *kclient.Client) (string, string, error) {
109 109
 }
110 110
 
111 111
 // Get local/non-local pods in network diagnostic namespaces
112
-func GetLocalAndNonLocalDiagnosticPods(kubeClient *kclient.Client) ([]kapi.Pod, []kapi.Pod, error) {
112
+func GetLocalAndNonLocalDiagnosticPods(kubeClient *kclientset.Clientset) ([]kapi.Pod, []kapi.Pod, error) {
113 113
 	pods, err := getSDNRunningPods(kubeClient)
114 114
 	if err != nil {
115 115
 		return nil, nil, err
... ...
@@ -210,8 +210,8 @@ func Execute(factory *osclientcmd.Factory, command []string, pod *kapi.Pod, in i
210 210
 	return execOptions.Run()
211 211
 }
212 212
 
213
-func getSDNRunningPods(kubeClient *kclient.Client) ([]kapi.Pod, error) {
214
-	podList, err := kubeClient.Pods(kapi.NamespaceAll).List(kapi.ListOptions{})
213
+func getSDNRunningPods(kubeClient *kclientset.Clientset) ([]kapi.Pod, error) {
214
+	podList, err := kubeClient.Core().Pods(kapi.NamespaceAll).List(kapi.ListOptions{})
215 215
 	if err != nil {
216 216
 		return nil, err
217 217
 	}
... ...
@@ -69,7 +69,7 @@ func (d PodCheckAuth) authenticateToMaster(token string, r types.DiagnosticResul
69 69
 			BearerToken:     token,
70 70
 		},
71 71
 	}
72
-	oclient, _, err := clientConfig.Clients()
72
+	oclient, _, _, err := clientConfig.Clients()
73 73
 	if err != nil {
74 74
 		r.Error("DP1002", err, fmt.Sprintf("could not create API clients from the service account client config: %v", err))
75 75
 		return
... ...
@@ -7,8 +7,8 @@ import (
7 7
 	"k8s.io/kubernetes/pkg/api"
8 8
 	"k8s.io/kubernetes/pkg/api/errors"
9 9
 	"k8s.io/kubernetes/pkg/client/cache"
10
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10 11
 	"k8s.io/kubernetes/pkg/client/restclient"
11
-	client "k8s.io/kubernetes/pkg/client/unversioned"
12 12
 	"k8s.io/kubernetes/pkg/fields"
13 13
 	"k8s.io/kubernetes/pkg/watch"
14 14
 )
... ...
@@ -16,7 +16,7 @@ import (
16 16
 // ServiceAccessor is the interface used by the ServiceResolver to access
17 17
 // services.
18 18
 type ServiceAccessor interface {
19
-	client.ServicesNamespacer
19
+	kcoreclient.ServicesGetter
20 20
 	ServiceByClusterIP(ip string) (*api.Service, error)
21 21
 }
22 22
 
... ...
@@ -70,7 +70,7 @@ func indexServiceByClusterIP(obj interface{}) ([]string, error) {
70 70
 	return []string{obj.(*api.Service).Spec.ClusterIP}, nil
71 71
 }
72 72
 
73
-func (a *cachedServiceAccessor) Services(namespace string) client.ServiceInterface {
73
+func (a *cachedServiceAccessor) Services(namespace string) kcoreclient.ServiceInterface {
74 74
 	return cachedServiceNamespacer{a, namespace}
75 75
 }
76 76
 
... ...
@@ -80,7 +80,7 @@ type cachedServiceNamespacer struct {
80 80
 	namespace string
81 81
 }
82 82
 
83
-var _ client.ServiceInterface = cachedServiceNamespacer{}
83
+var _ kcoreclient.ServiceInterface = cachedServiceNamespacer{}
84 84
 
85 85
 func (a cachedServiceNamespacer) Get(name string) (*api.Service, error) {
86 86
 	item, ok, err := a.accessor.store.Get(&api.Service{ObjectMeta: api.ObjectMeta{Namespace: a.namespace, Name: name}})
... ...
@@ -120,12 +120,18 @@ func (a cachedServiceNamespacer) Update(srv *api.Service) (*api.Service, error)
120 120
 func (a cachedServiceNamespacer) UpdateStatus(srv *api.Service) (*api.Service, error) {
121 121
 	return nil, fmt.Errorf("not implemented")
122 122
 }
123
-func (a cachedServiceNamespacer) Delete(name string) error {
123
+func (a cachedServiceNamespacer) Delete(name string, options *api.DeleteOptions) error {
124
+	return fmt.Errorf("not implemented")
125
+}
126
+func (a cachedServiceNamespacer) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
124 127
 	return fmt.Errorf("not implemented")
125 128
 }
126 129
 func (a cachedServiceNamespacer) Watch(options api.ListOptions) (watch.Interface, error) {
127 130
 	return nil, fmt.Errorf("not implemented")
128 131
 }
132
+func (a cachedServiceNamespacer) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*api.Service, error) {
133
+	return nil, fmt.Errorf("not implemented")
134
+}
129 135
 func (a cachedServiceNamespacer) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
130 136
 	return nil
131 137
 }
... ...
@@ -136,12 +142,12 @@ type cachedEndpointsAccessor struct {
136 136
 	store cache.Store
137 137
 }
138 138
 
139
-func NewCachedEndpointsAccessorAndStore() (client.EndpointsNamespacer, cache.Store) {
139
+func NewCachedEndpointsAccessorAndStore() (kcoreclient.EndpointsGetter, cache.Store) {
140 140
 	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
141 141
 	return &cachedEndpointsAccessor{store: store}, store
142 142
 }
143 143
 
144
-func (a *cachedEndpointsAccessor) Endpoints(namespace string) client.EndpointsInterface {
144
+func (a *cachedEndpointsAccessor) Endpoints(namespace string) kcoreclient.EndpointsInterface {
145 145
 	return cachedEndpointsNamespacer{accessor: a, namespace: namespace}
146 146
 }
147 147
 
... ...
@@ -151,7 +157,7 @@ type cachedEndpointsNamespacer struct {
151 151
 	namespace string
152 152
 }
153 153
 
154
-var _ client.EndpointsInterface = cachedEndpointsNamespacer{}
154
+var _ kcoreclient.EndpointsInterface = cachedEndpointsNamespacer{}
155 155
 
156 156
 func (a cachedEndpointsNamespacer) Get(name string) (*api.Endpoints, error) {
157 157
 	item, ok, err := a.accessor.store.Get(&api.Endpoints{ObjectMeta: api.ObjectMeta{Namespace: a.namespace, Name: name}})
... ...
@@ -173,9 +179,15 @@ func (a cachedEndpointsNamespacer) Create(srv *api.Endpoints) (*api.Endpoints, e
173 173
 func (a cachedEndpointsNamespacer) Update(srv *api.Endpoints) (*api.Endpoints, error) {
174 174
 	return nil, fmt.Errorf("not implemented")
175 175
 }
176
-func (a cachedEndpointsNamespacer) Delete(name string) error {
176
+func (a cachedEndpointsNamespacer) Delete(name string, options *api.DeleteOptions) error {
177
+	return fmt.Errorf("not implemented")
178
+}
179
+func (a cachedEndpointsNamespacer) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
177 180
 	return fmt.Errorf("not implemented")
178 181
 }
179 182
 func (a cachedEndpointsNamespacer) Watch(options api.ListOptions) (watch.Interface, error) {
180 183
 	return nil, fmt.Errorf("not implemented")
181 184
 }
185
+func (a cachedEndpointsNamespacer) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*api.Endpoints, error) {
186
+	return nil, fmt.Errorf("not implemented")
187
+}
... ...
@@ -13,7 +13,7 @@ import (
13 13
 	kapi "k8s.io/kubernetes/pkg/api"
14 14
 	kendpoints "k8s.io/kubernetes/pkg/api/endpoints"
15 15
 	"k8s.io/kubernetes/pkg/api/errors"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
17 17
 	"k8s.io/kubernetes/pkg/util/validation"
18 18
 
19 19
 	"github.com/skynetservices/skydns/msg"
... ...
@@ -27,7 +27,7 @@ import (
27 27
 type ServiceResolver struct {
28 28
 	config    *server.Config
29 29
 	accessor  ServiceAccessor
30
-	endpoints kclient.EndpointsNamespacer
30
+	endpoints kcoreclient.EndpointsGetter
31 31
 	base      string
32 32
 	fallback  FallbackFunc
33 33
 }
... ...
@@ -42,7 +42,7 @@ type FallbackFunc func(name string, exact bool) (string, bool)
42 42
 
43 43
 // NewServiceResolver creates an object that will return DNS record entries for
44 44
 // SkyDNS based on service names.
45
-func NewServiceResolver(config *server.Config, accessor ServiceAccessor, endpoints kclient.EndpointsNamespacer, fn FallbackFunc) *ServiceResolver {
45
+func NewServiceResolver(config *server.Config, accessor ServiceAccessor, endpoints kcoreclient.EndpointsGetter, fn FallbackFunc) *ServiceResolver {
46 46
 	domain := config.Domain
47 47
 	if !strings.HasSuffix(domain, ".") {
48 48
 		domain = domain + "."
... ...
@@ -12,8 +12,8 @@ import (
12 12
 	registryauth "github.com/docker/distribution/registry/auth"
13 13
 
14 14
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
15
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
15 16
 	"k8s.io/kubernetes/pkg/client/restclient"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
17 17
 
18 18
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
19 19
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -47,7 +47,7 @@ const (
47 47
 // RegistryClient encapsulates getting access to the OpenShift API.
48 48
 type RegistryClient interface {
49 49
 	// Clients return the authenticated clients to use with the server.
50
-	Clients() (client.Interface, kclient.Interface, error)
50
+	Clients() (client.Interface, kclientset.Interface, error)
51 51
 	// SafeClientConfig returns a client config without authentication info.
52 52
 	SafeClientConfig() restclient.Config
53 53
 }
... ...
@@ -68,8 +68,9 @@ func NewRegistryClient(config *clientcmd.Config) RegistryClient {
68 68
 }
69 69
 
70 70
 // Client returns the authenticated client to use with the server.
71
-func (r *registryClient) Clients() (client.Interface, kclient.Interface, error) {
72
-	return r.config.Clients()
71
+func (r *registryClient) Clients() (client.Interface, kclientset.Interface, error) {
72
+	oc, _, kc, err := r.config.Clients()
73
+	return oc, kc, err
73 74
 }
74 75
 
75 76
 // SafeClientConfig returns a client config without authentication info.
... ...
@@ -23,9 +23,9 @@ import (
23 23
 	"github.com/docker/distribution/registry/storage"
24 24
 
25 25
 	registrytest "github.com/openshift/origin/pkg/dockerregistry/testutil"
26
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
27
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
26 28
 	"k8s.io/kubernetes/pkg/client/restclient"
27
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
28
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
29 29
 
30 30
 	osclient "github.com/openshift/origin/pkg/client"
31 31
 	"github.com/openshift/origin/pkg/client/testclient"
... ...
@@ -55,7 +55,7 @@ func TestBlobDescriptorServiceIsApplied(t *testing.T) {
55 55
 
56 56
 	// TODO: get rid of those nasty global vars
57 57
 	backupRegistryClient := DefaultRegistryClient
58
-	DefaultRegistryClient = makeFakeRegistryClient(client, ktestclient.NewSimpleFake())
58
+	DefaultRegistryClient = makeFakeRegistryClient(client, fake.NewSimpleClientset())
59 59
 	defer func() {
60 60
 		// set it back once this test finishes to make other unit tests working
61 61
 		DefaultRegistryClient = backupRegistryClient
... ...
@@ -475,7 +475,7 @@ func (f *fakeAccessController) Authorized(ctx context.Context, access ...registr
475 475
 	return ctx, nil
476 476
 }
477 477
 
478
-func makeFakeRegistryClient(client osclient.Interface, kClient kclient.Interface) RegistryClient {
478
+func makeFakeRegistryClient(client osclient.Interface, kClient kclientset.Interface) RegistryClient {
479 479
 	return &fakeRegistryClient{
480 480
 		client:  client,
481 481
 		kClient: kClient,
... ...
@@ -484,10 +484,10 @@ func makeFakeRegistryClient(client osclient.Interface, kClient kclient.Interface
484 484
 
485 485
 type fakeRegistryClient struct {
486 486
 	client  osclient.Interface
487
-	kClient kclient.Interface
487
+	kClient kclientset.Interface
488 488
 }
489 489
 
490
-func (f *fakeRegistryClient) Clients() (osclient.Interface, kclient.Interface, error) {
490
+func (f *fakeRegistryClient) Clients() (osclient.Interface, kclientset.Interface, error) {
491 491
 	return f.client, f.kClient, nil
492 492
 }
493 493
 func (f *fakeRegistryClient) SafeClientConfig() restclient.Config {
... ...
@@ -21,7 +21,7 @@ import (
21 21
 	"github.com/docker/distribution/registry/handlers"
22 22
 	_ "github.com/docker/distribution/registry/storage/driver/inmemory"
23 23
 
24
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
24
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
25 25
 
26 26
 	"github.com/openshift/origin/pkg/client/testclient"
27 27
 	registrytest "github.com/openshift/origin/pkg/dockerregistry/testutil"
... ...
@@ -43,7 +43,7 @@ func TestPullthroughServeBlob(t *testing.T) {
43 43
 
44 44
 	// TODO: get rid of those nasty global vars
45 45
 	backupRegistryClient := DefaultRegistryClient
46
-	DefaultRegistryClient = makeFakeRegistryClient(client, ktestclient.NewSimpleFake())
46
+	DefaultRegistryClient = makeFakeRegistryClient(client, fake.NewSimpleClientset())
47 47
 	defer func() {
48 48
 		// set it back once this test finishes to make other unit tests working again
49 49
 		DefaultRegistryClient = backupRegistryClient
... ...
@@ -19,8 +19,8 @@ import (
19 19
 
20 20
 	kapi "k8s.io/kubernetes/pkg/api"
21 21
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
22
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
22 23
 	"k8s.io/kubernetes/pkg/client/restclient"
23
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
24 24
 	"k8s.io/kubernetes/pkg/util/sets"
25 25
 
26 26
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -109,7 +109,7 @@ func init() {
109 109
 				quotaEnforcing = newQuotaEnforcingConfig(ctx, os.Getenv(EnforceQuotaEnvVar), os.Getenv(ProjectCacheTTLEnvVar), options)
110 110
 			}
111 111
 
112
-			return newRepositoryWithClient(registryOSClient, kClient, kClient, ctx, repo, options)
112
+			return newRepositoryWithClient(registryOSClient, kClient.Core(), kClient.Core(), ctx, repo, options)
113 113
 		},
114 114
 	)
115 115
 
... ...
@@ -126,8 +126,8 @@ type repository struct {
126 126
 	distribution.Repository
127 127
 
128 128
 	ctx              context.Context
129
-	quotaClient      kclient.ResourceQuotasNamespacer
130
-	limitClient      kclient.LimitRangesNamespacer
129
+	quotaClient      kcoreclient.ResourceQuotasGetter
130
+	limitClient      kcoreclient.LimitRangesGetter
131 131
 	registryOSClient client.Interface
132 132
 	registryAddr     string
133 133
 	namespace        string
... ...
@@ -151,8 +151,8 @@ var _ distribution.ManifestService = &repository{}
151 151
 // newRepositoryWithClient returns a new repository middleware.
152 152
 func newRepositoryWithClient(
153 153
 	registryOSClient client.Interface,
154
-	quotaClient kclient.ResourceQuotasNamespacer,
155
-	limitClient kclient.LimitRangesNamespacer,
154
+	quotaClient kcoreclient.ResourceQuotasGetter,
155
+	limitClient kcoreclient.LimitRangesGetter,
156 156
 	ctx context.Context,
157 157
 	repo distribution.Repository,
158 158
 	options map[string]interface{},
... ...
@@ -22,6 +22,7 @@ import (
22 22
 	"github.com/docker/libtrust"
23 23
 
24 24
 	kapi "k8s.io/kubernetes/pkg/api"
25
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
25 26
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
26 27
 	"k8s.io/kubernetes/pkg/util/diff"
27 28
 
... ...
@@ -692,7 +693,7 @@ func (r *testRegistry) Repository(ctx context.Context, ref reference.Named) (dis
692 692
 		return nil, err
693 693
 	}
694 694
 
695
-	kFakeClient := ktestclient.NewSimpleFake()
695
+	kFakeClient := fake.NewSimpleClientset()
696 696
 
697 697
 	parts := strings.SplitN(ref.Name(), "/", 3)
698 698
 	if len(parts) != 2 {
... ...
@@ -703,8 +704,8 @@ func (r *testRegistry) Repository(ctx context.Context, ref reference.Named) (dis
703 703
 		Repository: repo,
704 704
 
705 705
 		ctx:              ctx,
706
-		quotaClient:      kFakeClient,
707
-		limitClient:      kFakeClient,
706
+		quotaClient:      kFakeClient.Core(),
707
+		limitClient:      kFakeClient.Core(),
708 708
 		registryOSClient: r.osClient,
709 709
 		registryAddr:     "localhost:5000",
710 710
 		namespace:        parts[0],
... ...
@@ -16,7 +16,7 @@ import (
16 16
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
17 17
 	"k8s.io/kubernetes/pkg/api/meta"
18 18
 	"k8s.io/kubernetes/pkg/api/validation"
19
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
19
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
20 20
 	"k8s.io/kubernetes/pkg/kubectl/resource"
21 21
 	"k8s.io/kubernetes/pkg/runtime"
22 22
 	kutilerrors "k8s.io/kubernetes/pkg/util/errors"
... ...
@@ -102,7 +102,7 @@ type AppConfig struct {
102 102
 	Out    io.Writer
103 103
 	ErrOut io.Writer
104 104
 
105
-	KubeClient kclient.Interface
105
+	KubeClient kclientset.Interface
106 106
 
107 107
 	Resolvers
108 108
 
... ...
@@ -451,7 +451,7 @@ func (c *AppConfig) installComponents(components app.ComponentReferences, env ap
451 451
 
452 452
 	serviceAccountName := "installer"
453 453
 	if token != nil && token.ServiceAccount {
454
-		if _, err := c.KubeClient.ServiceAccounts(c.OriginNamespace).Get(serviceAccountName); err != nil {
454
+		if _, err := c.KubeClient.Core().ServiceAccounts(c.OriginNamespace).Get(serviceAccountName); err != nil {
455 455
 			if kerrors.IsNotFound(err) {
456 456
 				objects = append(objects,
457 457
 					// create a new service account
... ...
@@ -9,6 +9,7 @@ import (
9 9
 	"testing"
10 10
 
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12 13
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
13 14
 	"k8s.io/kubernetes/pkg/runtime"
14 15
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -156,7 +157,7 @@ func TestBuildTemplates(t *testing.T) {
156 156
 		appCfg := AppConfig{}
157 157
 		appCfg.Out = &bytes.Buffer{}
158 158
 		appCfg.SetOpenShiftClient(&client.Fake{}, c.namespace, nil)
159
-		appCfg.KubeClient = ktestclient.NewSimpleFake()
159
+		appCfg.KubeClient = fake.NewSimpleClientset()
160 160
 		appCfg.TemplateSearcher = fakeTemplateSearcher()
161 161
 		appCfg.AddArguments([]string{c.templateName})
162 162
 		appCfg.TemplateParameters = []string{}
... ...
@@ -12,7 +12,7 @@ import (
12 12
 	apierrs "k8s.io/kubernetes/pkg/api/errors"
13 13
 	"k8s.io/kubernetes/pkg/api/unversioned"
14 14
 	kcache "k8s.io/kubernetes/pkg/client/cache"
15
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
15
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
16 16
 	"k8s.io/kubernetes/pkg/runtime"
17 17
 	"k8s.io/kubernetes/pkg/util/diff"
18 18
 
... ...
@@ -39,9 +39,9 @@ func (fn resolveFunc) ResolveObjectReference(ref *kapi.ObjectReference, defaultN
39 39
 }
40 40
 
41 41
 func setDefaultCache(p *imagePolicyPlugin) kcache.Indexer {
42
-	kclient := ktestclient.NewSimpleFake()
42
+	kclient := fake.NewSimpleClientset()
43 43
 	store := cache.NewCacheStore(kcache.MetaNamespaceKeyFunc)
44
-	p.SetProjectCache(cache.NewFake(kclient.Namespaces(), store, ""))
44
+	p.SetProjectCache(cache.NewFake(kclient.Core().Namespaces(), store, ""))
45 45
 	return store
46 46
 }
47 47
 
... ...
@@ -84,7 +84,7 @@ func (p *KeepalivedPlugin) GetNamespace() (string, error) {
84 84
 
85 85
 // GetDeploymentConfig gets the deployment config associated with this IP Failover configurator plugin.
86 86
 func (p *KeepalivedPlugin) GetDeploymentConfig() (*deployapi.DeploymentConfig, error) {
87
-	osClient, _, err := p.Factory.Clients()
87
+	osClient, _, _, err := p.Factory.Clients()
88 88
 	if err != nil {
89 89
 		return nil, fmt.Errorf("error getting client: %v", err)
90 90
 	}
... ...
@@ -8,8 +8,8 @@ import (
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	"k8s.io/kubernetes/pkg/api/unversioned"
10 10
 	"k8s.io/kubernetes/pkg/client/cache"
11
-	clientsetfake "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12
+	"k8s.io/kubernetes/pkg/client/testing/core"
13 13
 	"k8s.io/kubernetes/pkg/runtime"
14 14
 
15 15
 	buildapi "github.com/openshift/origin/pkg/build/api"
... ...
@@ -38,14 +38,14 @@ func TestIgnoreThatWhichCannotBeKnown(t *testing.T) {
38 38
 
39 39
 // TestAdmissionExists verifies you cannot create Origin content if namespace is not known
40 40
 func TestAdmissionExists(t *testing.T) {
41
-	mockClient := &testclient.Fake{}
42
-	mockClient.AddReactor("*", "*", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
41
+	mockClient := &fake.Clientset{}
42
+	mockClient.AddReactor("*", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
43 43
 		return true, &kapi.Namespace{}, fmt.Errorf("DOES NOT EXIST")
44 44
 	})
45 45
 
46
-	cache := projectcache.NewFake(mockClient.Namespaces(), projectcache.NewCacheStore(cache.MetaNamespaceKeyFunc), "")
46
+	cache := projectcache.NewFake(mockClient.Core().Namespaces(), projectcache.NewCacheStore(cache.MetaNamespaceKeyFunc), "")
47 47
 
48
-	mockClientset := clientsetfake.NewSimpleClientset()
48
+	mockClientset := fake.NewSimpleClientset()
49 49
 	handler := &lifecycle{client: mockClientset}
50 50
 	handler.SetProjectCache(cache)
51 51
 	build := &buildapi.Build{
... ...
@@ -81,13 +81,13 @@ func TestAdmissionExists(t *testing.T) {
81 81
 
82 82
 func TestSAR(t *testing.T) {
83 83
 	store := projectcache.NewCacheStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc))
84
-	mockClient := &testclient.Fake{}
85
-	mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
84
+	mockClient := &fake.Clientset{}
85
+	mockClient.AddReactor("get", "namespaces", func(action core.Action) (handled bool, ret runtime.Object, err error) {
86 86
 		return true, nil, fmt.Errorf("shouldn't get here")
87 87
 	})
88
-	cache := projectcache.NewFake(mockClient.Namespaces(), store, "")
88
+	cache := projectcache.NewFake(mockClient.Core().Namespaces(), store, "")
89 89
 
90
-	mockClientset := clientsetfake.NewSimpleClientset()
90
+	mockClientset := fake.NewSimpleClientset()
91 91
 	handler := &lifecycle{client: mockClientset, creatableResources: recommendedCreatableResources}
92 92
 	handler.SetProjectCache(cache)
93 93
 
... ...
@@ -6,8 +6,7 @@ import (
6 6
 	"k8s.io/kubernetes/pkg/admission"
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8 8
 	"k8s.io/kubernetes/pkg/client/cache"
9
-	clientsetfake "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
10
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
9
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11 10
 
12 11
 	projectcache "github.com/openshift/origin/pkg/project/cache"
13 12
 	"github.com/openshift/origin/pkg/util/labelselector"
... ...
@@ -15,7 +14,6 @@ import (
15 15
 
16 16
 // TestPodAdmission verifies various scenarios involving pod/project/global node label selectors
17 17
 func TestPodAdmission(t *testing.T) {
18
-	mockClient := &testclient.Fake{}
19 18
 	project := &kapi.Namespace{
20 19
 		ObjectMeta: kapi.ObjectMeta{
21 20
 			Name:      "testProject",
... ...
@@ -25,7 +23,7 @@ func TestPodAdmission(t *testing.T) {
25 25
 	projectStore := projectcache.NewCacheStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc))
26 26
 	projectStore.Add(project)
27 27
 
28
-	mockClientset := clientsetfake.NewSimpleClientset()
28
+	mockClientset := fake.NewSimpleClientset()
29 29
 	handler := &podNodeEnvironment{client: mockClientset}
30 30
 	pod := &kapi.Pod{
31 31
 		ObjectMeta: kapi.ObjectMeta{Name: "testPod"},
... ...
@@ -106,7 +104,7 @@ func TestPodAdmission(t *testing.T) {
106 106
 		},
107 107
 	}
108 108
 	for _, test := range tests {
109
-		cache := projectcache.NewFake(mockClient.Namespaces(), projectStore, test.defaultNodeSelector)
109
+		cache := projectcache.NewFake(mockClientset.Core().Namespaces(), projectStore, test.defaultNodeSelector)
110 110
 		handler.SetProjectCache(cache)
111 111
 		if !test.ignoreProjectNodeSelector {
112 112
 			project.ObjectMeta.Annotations = map[string]string{"openshift.io/node-selector": test.projectNodeSelector}
... ...
@@ -8,6 +8,7 @@ import (
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	"k8s.io/kubernetes/pkg/auth/user"
10 10
 	"k8s.io/kubernetes/pkg/client/cache"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11 12
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
12 13
 	"k8s.io/kubernetes/pkg/labels"
13 14
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -362,8 +363,8 @@ type projectCount struct {
362 362
 }
363 363
 
364 364
 func fakeProjectCache(requesters map[string]projectCount) *projectcache.ProjectCache {
365
-	kclient := &ktestclient.Fake{}
366
-	pCache := projectcache.NewFake(kclient.Namespaces(), projectcache.NewCacheStore(cache.MetaNamespaceKeyFunc), "")
365
+	kclientset := &fake.Clientset{}
366
+	pCache := projectcache.NewFake(kclientset.Core().Namespaces(), projectcache.NewCacheStore(cache.MetaNamespaceKeyFunc), "")
367 367
 	for requester, count := range requesters {
368 368
 		for i := 0; i < count.active; i++ {
369 369
 			pCache.Store.Add(fakeNs(requester, false))
... ...
@@ -10,7 +10,7 @@ import (
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11 11
 	"k8s.io/kubernetes/pkg/auth/user"
12 12
 	"k8s.io/kubernetes/pkg/client/cache"
13
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
14 14
 	"k8s.io/kubernetes/pkg/runtime"
15 15
 	"k8s.io/kubernetes/pkg/types"
16 16
 	utilruntime "k8s.io/kubernetes/pkg/util/runtime"
... ...
@@ -122,7 +122,7 @@ type AuthorizationCache struct {
122 122
 	// TODO remove this in favor of a list/watch mechanism for projects
123 123
 	allKnownNamespaces        sets.String
124 124
 	namespaceStore            cache.Store
125
-	namespaceInterface        kclient.NamespaceInterface
125
+	namespaceInterface        kcoreclient.NamespaceInterface
126 126
 	lastSyncResourceVersioner LastSyncResourceVersioner
127 127
 
128 128
 	clusterPolicyLister             client.SyncedClusterPoliciesListerInterface
... ...
@@ -150,7 +150,7 @@ type AuthorizationCache struct {
150 150
 }
151 151
 
152 152
 // NewAuthorizationCache creates a new AuthorizationCache
153
-func NewAuthorizationCache(reviewer Reviewer, namespaceInterface kclient.NamespaceInterface,
153
+func NewAuthorizationCache(reviewer Reviewer, namespaceInterface kcoreclient.NamespaceInterface,
154 154
 	clusterPolicyLister client.SyncedClusterPoliciesListerInterface, clusterPolicyBindingLister client.SyncedClusterPolicyBindingsListerInterface,
155 155
 	policyNamespacer client.SyncedPoliciesListerNamespacer, policyBindingNamespacer client.SyncedPolicyBindingsListerNamespacer,
156 156
 ) *AuthorizationCache {
... ...
@@ -7,7 +7,7 @@ import (
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	"k8s.io/kubernetes/pkg/auth/user"
10
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
10
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11 11
 	"k8s.io/kubernetes/pkg/util/sets"
12 12
 
13 13
 	authorizationapi "github.com/openshift/origin/pkg/authorization/api"
... ...
@@ -171,7 +171,7 @@ func TestSyncNamespace(t *testing.T) {
171 171
 			},
172 172
 		},
173 173
 	}
174
-	mockKubeClient := testclient.NewSimpleFake(&namespaceList)
174
+	mockKubeClient := fake.NewSimpleClientset(&namespaceList)
175 175
 
176 176
 	reviewer := &mockReviewer{
177 177
 		expectedResults: map[string]*mockReview{
... ...
@@ -192,7 +192,7 @@ func TestSyncNamespace(t *testing.T) {
192 192
 
193 193
 	mockPolicyCache := &MockPolicyClient{}
194 194
 
195
-	authorizationCache := NewAuthorizationCache(reviewer, mockKubeClient.Namespaces(), mockPolicyCache, mockPolicyCache, mockPolicyCache, mockPolicyCache)
195
+	authorizationCache := NewAuthorizationCache(reviewer, mockKubeClient.Core().Namespaces(), mockPolicyCache, mockPolicyCache, mockPolicyCache, mockPolicyCache)
196 196
 	// we prime the data we need here since we are not running reflectors
197 197
 	for i := range namespaceList.Items {
198 198
 		authorizationCache.namespaceStore.Add(&namespaceList.Items[i])
... ...
@@ -6,7 +6,7 @@ import (
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8 8
 	"k8s.io/kubernetes/pkg/auth/user"
9
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
9
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
10 10
 	"k8s.io/kubernetes/pkg/runtime"
11 11
 	"k8s.io/kubernetes/pkg/util/sets"
12 12
 	"k8s.io/kubernetes/pkg/util/wait"
... ...
@@ -21,9 +21,9 @@ func newTestWatcher(username string, groups []string, namespaces ...*kapi.Namesp
21 21
 	for i := range namespaces {
22 22
 		objects = append(objects, namespaces[i])
23 23
 	}
24
-	mockClient := testclient.NewSimpleFake(objects...)
24
+	mockClient := fake.NewSimpleClientset(objects...)
25 25
 
26
-	projectCache := projectcache.NewProjectCache(mockClient.Namespaces(), "")
26
+	projectCache := projectcache.NewProjectCache(mockClient.Core().Namespaces(), "")
27 27
 	projectCache.Run()
28 28
 	fakeAuthCache := &fakeAuthCache{}
29 29
 
... ...
@@ -6,7 +6,7 @@ import (
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8 8
 	"k8s.io/kubernetes/pkg/client/cache"
9
-	client "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10 10
 	"k8s.io/kubernetes/pkg/runtime"
11 11
 	"k8s.io/kubernetes/pkg/watch"
12 12
 
... ...
@@ -16,7 +16,7 @@ import (
16 16
 )
17 17
 
18 18
 // NewProjectCache returns a non-initialized ProjectCache. The cache needs to be run to begin functioning
19
-func NewProjectCache(client client.NamespaceInterface, defaultNodeSelector string) *ProjectCache {
19
+func NewProjectCache(client kcoreclient.NamespaceInterface, defaultNodeSelector string) *ProjectCache {
20 20
 	return &ProjectCache{
21 21
 		Client:              client,
22 22
 		DefaultNodeSelector: defaultNodeSelector,
... ...
@@ -24,7 +24,7 @@ func NewProjectCache(client client.NamespaceInterface, defaultNodeSelector strin
24 24
 }
25 25
 
26 26
 type ProjectCache struct {
27
-	Client              client.NamespaceInterface
27
+	Client              kcoreclient.NamespaceInterface
28 28
 	Store               cache.Indexer
29 29
 	DefaultNodeSelector string
30 30
 }
... ...
@@ -115,7 +115,7 @@ func (c *ProjectCache) Running() bool {
115 115
 }
116 116
 
117 117
 // NewFake is used for testing purpose only
118
-func NewFake(c client.NamespaceInterface, store cache.Indexer, defaultNodeSelector string) *ProjectCache {
118
+func NewFake(c kcoreclient.NamespaceInterface, store cache.Indexer, defaultNodeSelector string) *ProjectCache {
119 119
 	return &ProjectCache{
120 120
 		Client:              c,
121 121
 		Store:               store,
... ...
@@ -5,8 +5,7 @@ import (
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7 7
 	"k8s.io/kubernetes/pkg/client/cache"
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
-	clientadapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
10 9
 	"k8s.io/kubernetes/pkg/runtime"
11 10
 	"k8s.io/kubernetes/pkg/util/flowcontrol"
12 11
 	utilruntime "k8s.io/kubernetes/pkg/util/runtime"
... ...
@@ -20,17 +19,17 @@ type NamespaceControllerFactory struct {
20 20
 	// Client is an OpenShift client.
21 21
 	Client osclient.Interface
22 22
 	// KubeClient is a Kubernetes client.
23
-	KubeClient *kclient.Client
23
+	KubeClient *kclientset.Clientset
24 24
 }
25 25
 
26 26
 // Create creates a NamespaceController.
27 27
 func (factory *NamespaceControllerFactory) Create() controller.RunnableController {
28 28
 	namespaceLW := &cache.ListWatch{
29 29
 		ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
30
-			return factory.KubeClient.Namespaces().List(options)
30
+			return factory.KubeClient.Core().Namespaces().List(options)
31 31
 		},
32 32
 		WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
33
-			return factory.KubeClient.Namespaces().Watch(options)
33
+			return factory.KubeClient.Core().Namespaces().Watch(options)
34 34
 		},
35 35
 	}
36 36
 	queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc)
... ...
@@ -38,7 +37,7 @@ func (factory *NamespaceControllerFactory) Create() controller.RunnableControlle
38 38
 
39 39
 	namespaceController := &NamespaceController{
40 40
 		Client:     factory.Client,
41
-		KubeClient: clientadapter.FromUnversionedClient(factory.KubeClient),
41
+		KubeClient: factory.KubeClient,
42 42
 	}
43 43
 
44 44
 	return &controller.RetryController{
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"k8s.io/kubernetes/pkg/api/meta"
9 9
 	"k8s.io/kubernetes/pkg/api/rest"
10 10
 	"k8s.io/kubernetes/pkg/api/unversioned"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
12 12
 	"k8s.io/kubernetes/pkg/registry/generic"
13 13
 	nsregistry "k8s.io/kubernetes/pkg/registry/namespace"
14 14
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -27,7 +27,7 @@ import (
27 27
 
28 28
 type REST struct {
29 29
 	// client can modify Kubernetes namespaces
30
-	client kclient.NamespaceInterface
30
+	client kcoreclient.NamespaceInterface
31 31
 	// lister can enumerate project lists that enforce policy
32 32
 	lister projectauth.Lister
33 33
 	// Allows extended behavior during creation, required
... ...
@@ -40,7 +40,7 @@ type REST struct {
40 40
 }
41 41
 
42 42
 // NewREST returns a RESTStorage object that will work against Project resources
43
-func NewREST(client kclient.NamespaceInterface, lister projectauth.Lister, authCache *projectauth.AuthorizationCache, projectCache *projectcache.ProjectCache) *REST {
43
+func NewREST(client kcoreclient.NamespaceInterface, lister projectauth.Lister, authCache *projectauth.AuthorizationCache, projectCache *projectcache.ProjectCache) *REST {
44 44
 	return &REST{
45 45
 		client:         client,
46 46
 		lister:         lister,
... ...
@@ -171,7 +171,7 @@ var _ = rest.Deleter(&REST{})
171 171
 
172 172
 // Delete deletes a Project specified by its name
173 173
 func (s *REST) Delete(ctx kapi.Context, name string) (runtime.Object, error) {
174
-	return &unversioned.Status{Status: unversioned.StatusSuccess}, s.client.Delete(name)
174
+	return &unversioned.Status{Status: unversioned.StatusSuccess}, s.client.Delete(name, nil)
175 175
 }
176 176
 
177 177
 // decoratorFunc can mutate the provided object prior to being returned.
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"k8s.io/kubernetes/pkg/api/errors"
9 9
 	"k8s.io/kubernetes/pkg/api/unversioned"
10 10
 	"k8s.io/kubernetes/pkg/auth/user"
11
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12 12
 
13 13
 	"github.com/openshift/origin/pkg/project/api"
14 14
 )
... ...
@@ -30,9 +30,9 @@ func TestListProjects(t *testing.T) {
30 30
 			},
31 31
 		},
32 32
 	}
33
-	mockClient := testclient.NewSimpleFake(&namespaceList)
33
+	mockClient := fake.NewSimpleClientset(&namespaceList)
34 34
 	storage := REST{
35
-		client: mockClient.Namespaces(),
35
+		client: mockClient.Core().Namespaces(),
36 36
 		lister: &mockLister{&namespaceList},
37 37
 	}
38 38
 	user := &user.DefaultInfo{
... ...
@@ -68,8 +68,8 @@ func TestCreateProjectBadObject(t *testing.T) {
68 68
 }
69 69
 
70 70
 func TestCreateInvalidProject(t *testing.T) {
71
-	mockClient := &testclient.Fake{}
72
-	storage := NewREST(mockClient.Namespaces(), &mockLister{}, nil, nil)
71
+	mockClient := &fake.Clientset{}
72
+	storage := NewREST(mockClient.Core().Namespaces(), &mockLister{}, nil, nil)
73 73
 	_, err := storage.Create(kapi.NewContext(), &api.Project{
74 74
 		ObjectMeta: kapi.ObjectMeta{
75 75
 			Annotations: map[string]string{"openshift.io/display-name": "h\t\ni"},
... ...
@@ -81,8 +81,8 @@ func TestCreateInvalidProject(t *testing.T) {
81 81
 }
82 82
 
83 83
 func TestCreateProjectOK(t *testing.T) {
84
-	mockClient := &testclient.Fake{}
85
-	storage := NewREST(mockClient.Namespaces(), &mockLister{}, nil, nil)
84
+	mockClient := &fake.Clientset{}
85
+	storage := NewREST(mockClient.Core().Namespaces(), &mockLister{}, nil, nil)
86 86
 	_, err := storage.Create(kapi.NewContext(), &api.Project{
87 87
 		ObjectMeta: kapi.ObjectMeta{Name: "foo"},
88 88
 	})
... ...
@@ -98,8 +98,8 @@ func TestCreateProjectOK(t *testing.T) {
98 98
 }
99 99
 
100 100
 func TestGetProjectOK(t *testing.T) {
101
-	mockClient := testclient.NewSimpleFake(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "foo"}})
102
-	storage := NewREST(mockClient.Namespaces(), &mockLister{}, nil, nil)
101
+	mockClient := fake.NewSimpleClientset(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "foo"}})
102
+	storage := NewREST(mockClient.Core().Namespaces(), &mockLister{}, nil, nil)
103 103
 	project, err := storage.Get(kapi.NewContext(), "foo")
104 104
 	if project == nil {
105 105
 		t.Error("Unexpected nil project")
... ...
@@ -113,9 +113,9 @@ func TestGetProjectOK(t *testing.T) {
113 113
 }
114 114
 
115 115
 func TestDeleteProject(t *testing.T) {
116
-	mockClient := &testclient.Fake{}
116
+	mockClient := &fake.Clientset{}
117 117
 	storage := REST{
118
-		client: mockClient.Namespaces(),
118
+		client: mockClient.Core().Namespaces(),
119 119
 	}
120 120
 	obj, err := storage.Delete(kapi.NewContext(), "foo")
121 121
 	if obj == nil {
... ...
@@ -13,7 +13,6 @@ import (
13 13
 	"k8s.io/kubernetes/pkg/auth/user"
14 14
 	"k8s.io/kubernetes/pkg/client/cache"
15 15
 	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
16
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
17 16
 
18 17
 	configapilatest "github.com/openshift/origin/pkg/cmd/server/api/latest"
19 18
 	projectcache "github.com/openshift/origin/pkg/project/cache"
... ...
@@ -326,7 +325,7 @@ func fakeNamespace(pluginEnabled bool) *kapi.Namespace {
326 326
 func fakeProjectCache(ns *kapi.Namespace) *projectcache.ProjectCache {
327 327
 	store := projectcache.NewCacheStore(cache.MetaNamespaceKeyFunc)
328 328
 	store.Add(ns)
329
-	return projectcache.NewFake((&ktestclient.Fake{}).Namespaces(), store, "")
329
+	return projectcache.NewFake((&fake.Clientset{}).Core().Namespaces(), store, "")
330 330
 }
331 331
 
332 332
 func testConfig(lc2mr int64, cr2lr int64, mr2lr int64) *api.ClusterResourceOverrideConfig {
... ...
@@ -7,7 +7,7 @@ import (
7 7
 	"k8s.io/kubernetes/pkg/admission"
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	"k8s.io/kubernetes/pkg/client/cache"
10
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
10
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11 11
 
12 12
 	oadmission "github.com/openshift/origin/pkg/cmd/server/admission"
13 13
 	projectcache "github.com/openshift/origin/pkg/project/cache"
... ...
@@ -17,8 +17,8 @@ import (
17 17
 )
18 18
 
19 19
 func testCache(projectAnnotations map[string]string) *projectcache.ProjectCache {
20
-	kclient := &ktestclient.Fake{}
21
-	pCache := projectcache.NewFake(kclient.Namespaces(), projectcache.NewCacheStore(cache.MetaNamespaceKeyFunc), "")
20
+	kclient := &fake.Clientset{}
21
+	pCache := projectcache.NewFake(kclient.Core().Namespaces(), projectcache.NewCacheStore(cache.MetaNamespaceKeyFunc), "")
22 22
 	ns := &kapi.Namespace{}
23 23
 	ns.Name = "default"
24 24
 	ns.Annotations = projectAnnotations
... ...
@@ -10,6 +10,8 @@ import (
10 10
 
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12 12
 	"k8s.io/kubernetes/pkg/api/unversioned"
13
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
14
+	"k8s.io/kubernetes/pkg/client/testing/core"
13 15
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
14 16
 	"k8s.io/kubernetes/pkg/runtime"
15 17
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -50,9 +52,9 @@ func runFuzzer(t *testing.T) {
50 50
 	defer close(stopCh)
51 51
 
52 52
 	startingNamespaces := CreateStartingNamespaces()
53
-	kubeclient := ktestclient.NewSimpleFake(startingNamespaces...)
53
+	kubeclient := fake.NewSimpleClientset(startingNamespaces...)
54 54
 	nsWatch := watch.NewFake()
55
-	kubeclient.PrependWatchReactor("namespaces", ktestclient.DefaultWatchReactor(nsWatch, nil))
55
+	kubeclient.PrependWatchReactor("namespaces", core.DefaultWatchReactor(nsWatch, nil))
56 56
 
57 57
 	startingQuotas := CreateStartingQuotas()
58 58
 	originclient := testclient.NewSimpleFake(startingQuotas...)
... ...
@@ -1,7 +1,7 @@
1 1
 package allocation
2 2
 
3 3
 import (
4
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
4
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
5 5
 
6 6
 	osclient "github.com/openshift/origin/pkg/client"
7 7
 	"github.com/openshift/origin/pkg/route"
... ...
@@ -14,7 +14,7 @@ type RouteAllocationControllerFactory struct {
14 14
 	OSClient osclient.Interface
15 15
 
16 16
 	// KubeClient is a Kubernetes client.
17
-	KubeClient kclient.Interface
17
+	KubeClient kclientset.Interface
18 18
 }
19 19
 
20 20
 // Create a RouteAllocationController instance.
... ...
@@ -7,7 +7,8 @@ import (
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	"k8s.io/kubernetes/pkg/client/cache"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
11 12
 	"k8s.io/kubernetes/pkg/fields"
12 13
 	"k8s.io/kubernetes/pkg/labels"
13 14
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -25,9 +26,9 @@ import (
25 25
 // controller. It supports optional scoping on Namespace, Labels, and Fields of routes.
26 26
 // If Namespace is empty, it means "all namespaces".
27 27
 type RouterControllerFactory struct {
28
-	KClient        kclient.EndpointsNamespacer
28
+	KClient        kcoreclient.EndpointsGetter
29 29
 	OSClient       osclient.RoutesNamespacer
30
-	NodeClient     kclient.NodesInterface
30
+	NodeClient     kcoreclient.NodesGetter
31 31
 	Namespaces     controller.NamespaceLister
32 32
 	ResyncInterval time.Duration
33 33
 	Namespace      string
... ...
@@ -36,11 +37,11 @@ type RouterControllerFactory struct {
36 36
 }
37 37
 
38 38
 // NewDefaultRouterControllerFactory initializes a default router controller factory.
39
-func NewDefaultRouterControllerFactory(oc osclient.RoutesNamespacer, kc kclient.Interface) *RouterControllerFactory {
39
+func NewDefaultRouterControllerFactory(oc osclient.RoutesNamespacer, kc kclientset.Interface) *RouterControllerFactory {
40 40
 	return &RouterControllerFactory{
41
-		KClient:        kc,
41
+		KClient:        kc.Core(),
42 42
 		OSClient:       oc,
43
-		NodeClient:     kc,
43
+		NodeClient:     kc.Core(),
44 44
 		ResyncInterval: 10 * time.Minute,
45 45
 
46 46
 		Namespace: kapi.NamespaceAll,
... ...
@@ -257,7 +258,7 @@ func (lw *routeLW) Watch(options kapi.ListOptions) (watch.Interface, error) {
257 257
 
258 258
 // endpointsLW is a list watcher for routes.
259 259
 type endpointsLW struct {
260
-	client    kclient.EndpointsNamespacer
260
+	client    kcoreclient.EndpointsGetter
261 261
 	label     labels.Selector
262 262
 	field     fields.Selector
263 263
 	namespace string
... ...
@@ -278,7 +279,7 @@ func (lw *endpointsLW) Watch(options kapi.ListOptions) (watch.Interface, error)
278 278
 
279 279
 // nodeLW is a list watcher for nodes.
280 280
 type nodeLW struct {
281
-	client kclient.NodesInterface
281
+	client kcoreclient.NodesGetter
282 282
 	label  labels.Selector
283 283
 	field  fields.Selector
284 284
 }
... ...
@@ -7,7 +7,7 @@ import (
7 7
 	"k8s.io/kubernetes/pkg/api/errors"
8 8
 	"k8s.io/kubernetes/pkg/api/unversioned"
9 9
 	"k8s.io/kubernetes/pkg/client/cache"
10
-	client "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
11 11
 	"k8s.io/kubernetes/pkg/runtime"
12 12
 	"k8s.io/kubernetes/pkg/watch"
13 13
 )
... ...
@@ -17,7 +17,7 @@ type ServiceLookup interface {
17 17
 	LookupService(*api.Endpoints) (*api.Service, error)
18 18
 }
19 19
 
20
-func NewListWatchServiceLookup(svcGetter client.ServicesNamespacer, resync time.Duration) ServiceLookup {
20
+func NewListWatchServiceLookup(svcGetter kcoreclient.ServicesGetter, resync time.Duration) ServiceLookup {
21 21
 	svcStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
22 22
 	lw := &cache.ListWatch{
23 23
 		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
... ...
@@ -13,19 +13,19 @@ import (
13 13
 
14 14
 	kapi "k8s.io/kubernetes/pkg/api"
15 15
 	kapiunversioned "k8s.io/kubernetes/pkg/api/unversioned"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
17 17
 	kerrors "k8s.io/kubernetes/pkg/util/errors"
18 18
 )
19 19
 
20 20
 type OsdnMaster struct {
21
-	kClient         *kclient.Client
21
+	kClient         *kclientset.Clientset
22 22
 	osClient        *osclient.Client
23 23
 	networkInfo     *NetworkInfo
24 24
 	subnetAllocator *netutils.SubnetAllocator
25 25
 	vnids           *masterVNIDMap
26 26
 }
27 27
 
28
-func StartMaster(networkConfig osconfigapi.MasterNetworkConfig, osClient *osclient.Client, kClient *kclient.Client) error {
28
+func StartMaster(networkConfig osconfigapi.MasterNetworkConfig, osClient *osclient.Client, kClient *kclientset.Clientset) error {
29 29
 	if !osapi.IsOpenShiftNetworkPlugin(networkConfig.NetworkPluginName) {
30 30
 		return nil
31 31
 	}
... ...
@@ -145,7 +145,7 @@ func (master *OsdnMaster) validateNetworkConfig() error {
145 145
 	}
146 146
 
147 147
 	// Ensure each service is within the services network
148
-	services, err := master.kClient.Services(kapi.NamespaceAll).List(kapi.ListOptions{})
148
+	services, err := master.kClient.Core().Services(kapi.NamespaceAll).List(kapi.ListOptions{})
149 149
 	if err != nil {
150 150
 		return err
151 151
 	}
... ...
@@ -20,7 +20,7 @@ import (
20 20
 	docker "github.com/fsouza/go-dockerclient"
21 21
 
22 22
 	kapi "k8s.io/kubernetes/pkg/api"
23
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
23
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
24 24
 	"k8s.io/kubernetes/pkg/fields"
25 25
 	knetwork "k8s.io/kubernetes/pkg/kubelet/network"
26 26
 	"k8s.io/kubernetes/pkg/labels"
... ...
@@ -31,7 +31,7 @@ import (
31 31
 
32 32
 type OsdnNode struct {
33 33
 	multitenant        bool
34
-	kClient            *kclient.Client
34
+	kClient            *kclientset.Clientset
35 35
 	osClient           *osclient.Client
36 36
 	ovs                *ovs.Interface
37 37
 	networkInfo        *NetworkInfo
... ...
@@ -53,7 +53,7 @@ type OsdnNode struct {
53 53
 }
54 54
 
55 55
 // Called by higher layers to create the plugin SDN node instance
56
-func NewNodePlugin(pluginName string, osClient *osclient.Client, kClient *kclient.Client, hostname string, selfIP string, iptablesSyncPeriod time.Duration, mtu uint32) (*OsdnNode, error) {
56
+func NewNodePlugin(pluginName string, osClient *osclient.Client, kClient *kclientset.Clientset, hostname string, selfIP string, iptablesSyncPeriod time.Duration, mtu uint32) (*OsdnNode, error) {
57 57
 	if !osapi.IsOpenShiftNetworkPlugin(pluginName) {
58 58
 		return nil, nil
59 59
 	}
... ...
@@ -268,7 +268,7 @@ func (node *OsdnNode) GetLocalPods(namespace string) ([]kapi.Pod, error) {
268 268
 		LabelSelector: labels.Everything(),
269 269
 		FieldSelector: fieldSelector,
270 270
 	}
271
-	podList, err := node.kClient.Pods(namespace).List(opts)
271
+	podList, err := node.kClient.Core().Pods(namespace).List(opts)
272 272
 	if err != nil {
273 273
 		return nil, err
274 274
 	}
... ...
@@ -10,7 +10,7 @@ import (
10 10
 
11 11
 	"github.com/golang/glog"
12 12
 
13
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
14 14
 	knetwork "k8s.io/kubernetes/pkg/kubelet/network"
15 15
 	kubehostport "k8s.io/kubernetes/pkg/kubelet/network/hostport"
16 16
 
... ...
@@ -34,7 +34,7 @@ type podManager struct {
34 34
 
35 35
 	// Live pod setup/teardown stuff not used in testing code
36 36
 	multitenant     bool
37
-	kClient         *kclient.Client
37
+	kClient         *kclientset.Clientset
38 38
 	vnids           *nodeVNIDMap
39 39
 	ipamConfig      []byte
40 40
 	mtu             uint32
... ...
@@ -43,7 +43,7 @@ type podManager struct {
43 43
 }
44 44
 
45 45
 // Creates a new live podManager; used by node code
46
-func newPodManager(host knetwork.Host, multitenant bool, localSubnetCIDR string, netInfo *NetworkInfo, kClient *kclient.Client, vnids *nodeVNIDMap, mtu uint32) (*podManager, error) {
46
+func newPodManager(host knetwork.Host, multitenant bool, localSubnetCIDR string, netInfo *NetworkInfo, kClient *kclientset.Clientset, vnids *nodeVNIDMap, mtu uint32) (*podManager, error) {
47 47
 	pm := newDefaultPodManager(host)
48 48
 	pm.multitenant = multitenant
49 49
 	pm.kClient = kClient
... ...
@@ -12,7 +12,7 @@ import (
12 12
 
13 13
 	kapi "k8s.io/kubernetes/pkg/api"
14 14
 	"k8s.io/kubernetes/pkg/client/cache"
15
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
16 16
 	pconfig "k8s.io/kubernetes/pkg/proxy/config"
17 17
 	utilwait "k8s.io/kubernetes/pkg/util/wait"
18 18
 )
... ...
@@ -23,7 +23,7 @@ type proxyFirewallItem struct {
23 23
 }
24 24
 
25 25
 type OsdnProxy struct {
26
-	kClient              *kclient.Client
26
+	kClient              *kclientset.Clientset
27 27
 	osClient             *osclient.Client
28 28
 	networkInfo          *NetworkInfo
29 29
 	baseEndpointsHandler pconfig.EndpointsConfigHandler
... ...
@@ -34,7 +34,7 @@ type OsdnProxy struct {
34 34
 }
35 35
 
36 36
 // Called by higher layers to create the proxy plugin instance; only used by nodes
37
-func NewProxyPlugin(pluginName string, osClient *osclient.Client, kClient *kclient.Client) (*OsdnProxy, error) {
37
+func NewProxyPlugin(pluginName string, osClient *osclient.Client, kClient *kclientset.Clientset) (*OsdnProxy, error) {
38 38
 	if !osapi.IsOpenShiftMultitenantNetworkPlugin(pluginName) {
39 39
 		return nil, nil
40 40
 	}
... ...
@@ -5,7 +5,7 @@ import (
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7 7
 	"k8s.io/kubernetes/pkg/api/errors"
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
9 9
 	utilruntime "k8s.io/kubernetes/pkg/util/runtime"
10 10
 
11 11
 	"github.com/openshift/origin/pkg/security"
... ...
@@ -37,7 +37,7 @@ func DefaultMCSAllocation(from *uid.Range, to *mcs.Range, blockSize int) MCSAllo
37 37
 type Allocation struct {
38 38
 	uid    uidallocator.Interface
39 39
 	mcs    MCSAllocationFunc
40
-	client kclient.NamespaceInterface
40
+	client kcoreclient.NamespaceInterface
41 41
 }
42 42
 
43 43
 // retryCount is the number of times to retry on a conflict when updating a namespace
... ...
@@ -7,7 +7,8 @@ import (
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9 9
 	"k8s.io/kubernetes/pkg/api/errors"
10
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
10
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11
+	"k8s.io/kubernetes/pkg/client/testing/core"
11 12
 	"k8s.io/kubernetes/pkg/runtime"
12 13
 
13 14
 	"github.com/openshift/origin/pkg/security"
... ...
@@ -17,9 +18,9 @@ import (
17 17
 )
18 18
 
19 19
 func TestController(t *testing.T) {
20
-	var action testclient.Action
21
-	client := &testclient.Fake{}
22
-	client.AddReactor("*", "*", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
20
+	var action core.Action
21
+	client := &fake.Clientset{}
22
+	client.AddReactor("*", "*", func(a core.Action) (handled bool, ret runtime.Object, err error) {
23 23
 		action = a
24 24
 		return true, (*kapi.Namespace)(nil), nil
25 25
 	})
... ...
@@ -30,7 +31,7 @@ func TestController(t *testing.T) {
30 30
 	c := Allocation{
31 31
 		uid:    uida,
32 32
 		mcs:    DefaultMCSAllocation(uidr, mcsr, 5),
33
-		client: client.Namespaces(),
33
+		client: client.Core().Namespaces(),
34 34
 	}
35 35
 
36 36
 	err := c.Next(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "test"}})
... ...
@@ -38,7 +39,7 @@ func TestController(t *testing.T) {
38 38
 		t.Fatal(err)
39 39
 	}
40 40
 
41
-	got := action.(testclient.CreateAction).GetObject().(*kapi.Namespace)
41
+	got := action.(core.CreateAction).GetObject().(*kapi.Namespace)
42 42
 	if got.Annotations[security.UIDRangeAnnotation] != "10/2" {
43 43
 		t.Errorf("unexpected uid annotation: %#v", got)
44 44
 	}
... ...
@@ -57,7 +58,7 @@ func TestControllerError(t *testing.T) {
57 57
 	testCases := map[string]struct {
58 58
 		err     func() error
59 59
 		errFn   func(err error) bool
60
-		reactFn testclient.ReactionFunc
60
+		reactFn core.ReactionFunc
61 61
 		actions int
62 62
 	}{
63 63
 		"not found": {
... ...
@@ -72,7 +73,7 @@ func TestControllerError(t *testing.T) {
72 72
 		},
73 73
 		"conflict": {
74 74
 			actions: 4,
75
-			reactFn: func(a testclient.Action) (bool, runtime.Object, error) {
75
+			reactFn: func(a core.Action) (bool, runtime.Object, error) {
76 76
 				if a.Matches("get", "namespaces") {
77 77
 					return true, &kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "test"}}, nil
78 78
 				}
... ...
@@ -85,10 +86,10 @@ func TestControllerError(t *testing.T) {
85 85
 	}
86 86
 
87 87
 	for s, testCase := range testCases {
88
-		client := &testclient.Fake{}
88
+		client := &fake.Clientset{}
89 89
 
90 90
 		if testCase.reactFn == nil {
91
-			testCase.reactFn = func(a testclient.Action) (bool, runtime.Object, error) {
91
+			testCase.reactFn = func(a core.Action) (bool, runtime.Object, error) {
92 92
 				return true, (*kapi.Namespace)(nil), testCase.err()
93 93
 			}
94 94
 		}
... ...
@@ -101,7 +102,7 @@ func TestControllerError(t *testing.T) {
101 101
 		c := Allocation{
102 102
 			uid:    uida,
103 103
 			mcs:    DefaultMCSAllocation(uidr, mcsr, 5),
104
-			client: client.Namespaces(),
104
+			client: client.Core().Namespaces(),
105 105
 		}
106 106
 
107 107
 		err := c.Next(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "test"}})
... ...
@@ -5,7 +5,7 @@ import (
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7 7
 	"k8s.io/kubernetes/pkg/client/cache"
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
9 9
 	"k8s.io/kubernetes/pkg/runtime"
10 10
 	"k8s.io/kubernetes/pkg/util/flowcontrol"
11 11
 	utilruntime "k8s.io/kubernetes/pkg/util/runtime"
... ...
@@ -19,7 +19,7 @@ import (
19 19
 type AllocationFactory struct {
20 20
 	UIDAllocator uidallocator.Interface
21 21
 	MCSAllocator MCSAllocationFunc
22
-	Client       kclient.NamespaceInterface
22
+	Client       kcoreclient.NamespaceInterface
23 23
 	// Queue may be a FIFO queue of namespaces. If nil, will be initialized using
24 24
 	// the client.
25 25
 	Queue controller.ReQueue
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"time"
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8
-	client "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
9 9
 	"k8s.io/kubernetes/pkg/registry/rangeallocation"
10 10
 	utilruntime "k8s.io/kubernetes/pkg/util/runtime"
11 11
 	utilwait "k8s.io/kubernetes/pkg/util/wait"
... ...
@@ -24,14 +24,14 @@ import (
24 24
 //
25 25
 type Repair struct {
26 26
 	interval time.Duration
27
-	client   client.NamespaceInterface
27
+	client   kcoreclient.NamespaceInterface
28 28
 	alloc    rangeallocation.RangeRegistry
29 29
 	uidRange *uid.Range
30 30
 }
31 31
 
32 32
 // NewRepair creates a controller that periodically ensures that all UIDs labels that are allocated in the cluster
33 33
 // are claimed.
34
-func NewRepair(interval time.Duration, client client.NamespaceInterface, uidRange *uid.Range, alloc rangeallocation.RangeRegistry) *Repair {
34
+func NewRepair(interval time.Duration, client kcoreclient.NamespaceInterface, uidRange *uid.Range, alloc rangeallocation.RangeRegistry) *Repair {
35 35
 	return &Repair{
36 36
 		interval: interval,
37 37
 		client:   client,
... ...
@@ -5,7 +5,8 @@ import (
5 5
 	"time"
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
8
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
9
+	"k8s.io/kubernetes/pkg/client/testing/core"
9 10
 	"k8s.io/kubernetes/pkg/runtime"
10 11
 
11 12
 	"github.com/openshift/origin/pkg/security"
... ...
@@ -29,8 +30,8 @@ func (r *fakeRange) CreateOrUpdate(update *kapi.RangeAllocation) error {
29 29
 }
30 30
 
31 31
 func TestRepair(t *testing.T) {
32
-	client := &testclient.Fake{}
33
-	client.AddReactor("*", "*", func(a testclient.Action) (bool, runtime.Object, error) {
32
+	client := &fake.Clientset{}
33
+	client.AddReactor("*", "*", func(a core.Action) (bool, runtime.Object, error) {
34 34
 		list := &kapi.NamespaceList{
35 35
 			Items: []kapi.Namespace{
36 36
 				{ObjectMeta: kapi.ObjectMeta{Name: "default"}},
... ...
@@ -44,7 +45,7 @@ func TestRepair(t *testing.T) {
44 44
 	}
45 45
 
46 46
 	uidr, _ := uid.NewRange(10, 20, 2)
47
-	repair := NewRepair(0*time.Second, client.Namespaces(), uidr, alloc)
47
+	repair := NewRepair(0*time.Second, client.Core().Namespaces(), uidr, alloc)
48 48
 
49 49
 	err := repair.RunOnce()
50 50
 	if err != nil {
... ...
@@ -62,8 +63,8 @@ func TestRepair(t *testing.T) {
62 62
 }
63 63
 
64 64
 func TestRepairIgnoresMismatch(t *testing.T) {
65
-	client := &testclient.Fake{}
66
-	client.AddReactor("*", "*", func(a testclient.Action) (bool, runtime.Object, error) {
65
+	client := &fake.Clientset{}
66
+	client.AddReactor("*", "*", func(a core.Action) (bool, runtime.Object, error) {
67 67
 		list := &kapi.NamespaceList{
68 68
 			Items: []kapi.Namespace{
69 69
 				{
... ...
@@ -82,7 +83,7 @@ func TestRepairIgnoresMismatch(t *testing.T) {
82 82
 	}
83 83
 
84 84
 	uidr, _ := uid.NewRange(10, 20, 2)
85
-	repair := NewRepair(0*time.Second, client.Namespaces(), uidr, alloc)
85
+	repair := NewRepair(0*time.Second, client.Core().Namespaces(), uidr, alloc)
86 86
 
87 87
 	err := repair.RunOnce()
88 88
 	if err != nil {
... ...
@@ -9,8 +9,9 @@ import (
9 9
 	"time"
10 10
 
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
12
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
14
+	"k8s.io/kubernetes/pkg/client/testing/core"
14 15
 	"k8s.io/kubernetes/pkg/registry/service/ipallocator"
15 16
 	"k8s.io/kubernetes/pkg/runtime"
16 17
 	"k8s.io/kubernetes/pkg/util/workqueue"
... ...
@@ -19,29 +20,32 @@ import (
19 19
 
20 20
 const namespace = "ns"
21 21
 
22
-func newController(t *testing.T, client *ktestclient.Fake) *IngressIPController {
22
+func newController(t *testing.T, client *fake.Clientset) *IngressIPController {
23 23
 	_, ipNet, err := net.ParseCIDR("172.16.0.12/28")
24 24
 	if err != nil {
25 25
 		t.Fatalf("unexpected error: %v", err)
26 26
 	}
27
+	if client == nil {
28
+		client = fake.NewSimpleClientset()
29
+	}
27 30
 	return NewIngressIPController(client, ipNet, 10*time.Minute)
28 31
 }
29 32
 
30
-func controllerSetup(t *testing.T, startingObjects []runtime.Object) (*ktestclient.Fake, *watch.FakeWatcher, *IngressIPController) {
31
-	client := ktestclient.NewSimpleFake(startingObjects...)
33
+func controllerSetup(t *testing.T, startingObjects []runtime.Object) (*fake.Clientset, *watch.FakeWatcher, *IngressIPController) {
34
+	client := fake.NewSimpleClientset(startingObjects...)
32 35
 
33 36
 	fakeWatch := watch.NewFake()
34
-	client.PrependWatchReactor("*", ktestclient.DefaultWatchReactor(fakeWatch, nil))
37
+	client.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
35 38
 
36
-	client.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
37
-		obj := action.(ktestclient.CreateAction).GetObject()
39
+	client.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
40
+		obj := action.(core.CreateAction).GetObject()
38 41
 		fakeWatch.Add(obj)
39 42
 		return true, obj, nil
40 43
 	})
41 44
 
42 45
 	// Ensure that updates the controller makes are passed through to the watcher.
43
-	client.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
44
-		obj := action.(ktestclient.CreateAction).GetObject()
46
+	client.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
47
+		obj := action.(core.CreateAction).GetObject()
45 48
 		fakeWatch.Modify(obj)
46 49
 		return true, obj, nil
47 50
 	})
... ...
@@ -206,7 +210,7 @@ func TestProcessChange(t *testing.T) {
206 206
 	}
207 207
 	for _, test := range tests {
208 208
 		c := newController(t, nil)
209
-		c.persistenceHandler = func(client kclient.ServicesNamespacer, service *kapi.Service, targetStatus bool) error {
209
+		c.persistenceHandler = func(client kcoreclient.ServicesGetter, service *kapi.Service, targetStatus bool) error {
210 210
 			return nil
211 211
 		}
212 212
 		s := newService("svc", test.ip, test.lb)
... ...
@@ -279,7 +283,7 @@ func TestRecordAllocationReallocates(t *testing.T) {
279 279
 	c := newController(t, nil)
280 280
 	var persisted *kapi.Service
281 281
 	// Keep track of the last-persisted service
282
-	c.persistenceHandler = func(client kclient.ServicesNamespacer, service *kapi.Service, targetStatus bool) error {
282
+	c.persistenceHandler = func(client kcoreclient.ServicesGetter, service *kapi.Service, targetStatus bool) error {
283 283
 		persisted = service
284 284
 		return nil
285 285
 	}
... ...
@@ -304,7 +308,7 @@ func TestAllocateReleasesOnPersistenceFailure(t *testing.T) {
304 304
 	c := newController(t, nil)
305 305
 	expectedFree := c.ipAllocator.Free()
306 306
 	expectedErr := errors.New("Persistence failure")
307
-	c.persistenceHandler = func(client kclient.ServicesNamespacer, service *kapi.Service, targetStatus bool) error {
307
+	c.persistenceHandler = func(client kcoreclient.ServicesGetter, service *kapi.Service, targetStatus bool) error {
308 308
 		return expectedErr
309 309
 	}
310 310
 	s := newService("svc", "", true)
... ...
@@ -369,7 +373,7 @@ func TestClearLocalAllocation(t *testing.T) {
369 369
 
370 370
 func TestEnsureExternalIPRespectsNonIngress(t *testing.T) {
371 371
 	c := newController(t, nil)
372
-	c.persistenceHandler = func(client kclient.ServicesNamespacer, service *kapi.Service, targetStatus bool) error {
372
+	c.persistenceHandler = func(client kcoreclient.ServicesGetter, service *kapi.Service, targetStatus bool) error {
373 373
 		return nil
374 374
 	}
375 375
 	ingressIP := "172.16.0.1"
... ...
@@ -529,7 +533,7 @@ func TestClearPersistedAllocation(t *testing.T) {
529 529
 	for _, test := range tests {
530 530
 		c := newController(t, nil)
531 531
 		var persistedService *kapi.Service
532
-		c.persistenceHandler = func(client kclient.ServicesNamespacer, service *kapi.Service, targetStatus bool) error {
532
+		c.persistenceHandler = func(client kcoreclient.ServicesGetter, service *kapi.Service, targetStatus bool) error {
533 533
 			// Save the last persisted service
534 534
 			persistedService = service
535 535
 			return test.persistenceError
... ...
@@ -11,7 +11,7 @@ import (
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12 12
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
13 13
 	"k8s.io/kubernetes/pkg/client/cache"
14
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
15 15
 	"k8s.io/kubernetes/pkg/controller"
16 16
 	"k8s.io/kubernetes/pkg/controller/framework"
17 17
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -46,8 +46,8 @@ const (
46 46
 // ServiceServingCertController is responsible for synchronizing Service objects stored
47 47
 // in the system with actual running replica sets and pods.
48 48
 type ServiceServingCertController struct {
49
-	serviceClient kclient.ServicesNamespacer
50
-	secretClient  kclient.SecretsNamespacer
49
+	serviceClient kcoreclient.ServicesGetter
50
+	secretClient  kcoreclient.SecretsGetter
51 51
 
52 52
 	// Services that need to be checked
53 53
 	queue      workqueue.RateLimitingInterface
... ...
@@ -66,7 +66,7 @@ type ServiceServingCertController struct {
66 66
 
67 67
 // NewServiceServingCertController creates a new ServiceServingCertController.
68 68
 // TODO this should accept a shared informer
69
-func NewServiceServingCertController(serviceClient kclient.ServicesNamespacer, secretClient kclient.SecretsNamespacer, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertController {
69
+func NewServiceServingCertController(serviceClient kcoreclient.ServicesGetter, secretClient kcoreclient.SecretsGetter, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertController {
70 70
 	sc := &ServiceServingCertController{
71 71
 		serviceClient: serviceClient,
72 72
 		secretClient:  secretClient,
... ...
@@ -9,7 +9,8 @@ import (
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11 11
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
12
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
12
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
13
+	"k8s.io/kubernetes/pkg/client/testing/core"
13 14
 	"k8s.io/kubernetes/pkg/runtime"
14 15
 	"k8s.io/kubernetes/pkg/types"
15 16
 	"k8s.io/kubernetes/pkg/watch"
... ...
@@ -17,7 +18,7 @@ import (
17 17
 	"github.com/openshift/origin/pkg/cmd/server/admin"
18 18
 )
19 19
 
20
-func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}, t *testing.T) ( /*caName*/ string, *ktestclient.Fake, *watch.FakeWatcher, *ServiceServingCertController) {
20
+func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}, t *testing.T) ( /*caName*/ string, *fake.Clientset, *watch.FakeWatcher, *ServiceServingCertController) {
21 21
 	certDir, err := ioutil.TempDir("", "serving-cert-unit-")
22 22
 	if err != nil {
23 23
 		t.Fatalf("unexpected error: %v", err)
... ...
@@ -35,17 +36,17 @@ func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}
35 35
 		t.Fatalf("unexpected error: %v", err)
36 36
 	}
37 37
 
38
-	kubeclient := ktestclient.NewSimpleFake(startingObjects...)
38
+	kubeclient := fake.NewSimpleClientset(startingObjects...)
39 39
 	fakeWatch := watch.NewFake()
40
-	kubeclient.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
41
-		return true, action.(ktestclient.CreateAction).GetObject(), nil
40
+	kubeclient.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
41
+		return true, action.(core.CreateAction).GetObject(), nil
42 42
 	})
43
-	kubeclient.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
44
-		return true, action.(ktestclient.UpdateAction).GetObject(), nil
43
+	kubeclient.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
44
+		return true, action.(core.UpdateAction).GetObject(), nil
45 45
 	})
46
-	kubeclient.PrependWatchReactor("*", ktestclient.DefaultWatchReactor(fakeWatch, nil))
46
+	kubeclient.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
47 47
 
48
-	controller := NewServiceServingCertController(kubeclient, kubeclient, ca, "cluster.local", 10*time.Minute)
48
+	controller := NewServiceServingCertController(kubeclient.Core(), kubeclient.Core(), ca, "cluster.local", 10*time.Minute)
49 49
 
50 50
 	return caOptions.Name, kubeclient, fakeWatch, controller
51 51
 }
... ...
@@ -94,7 +95,7 @@ func TestBasicControllerFlow(t *testing.T) {
94 94
 	for _, action := range kubeclient.Actions() {
95 95
 		switch {
96 96
 		case action.Matches("create", "secrets"):
97
-			createSecret := action.(ktestclient.CreateAction)
97
+			createSecret := action.(core.CreateAction)
98 98
 			newSecret := createSecret.GetObject().(*kapi.Secret)
99 99
 			if newSecret.Name != expectedSecretName {
100 100
 				t.Errorf("expected %v, got %v", expectedSecretName, newSecret.Name)
... ...
@@ -111,7 +112,7 @@ func TestBasicControllerFlow(t *testing.T) {
111 111
 			foundSecret = true
112 112
 
113 113
 		case action.Matches("update", "services"):
114
-			updateService := action.(ktestclient.UpdateAction)
114
+			updateService := action.(core.UpdateAction)
115 115
 			service := updateService.GetObject().(*kapi.Service)
116 116
 			if !reflect.DeepEqual(service.Annotations, expectedServiceAnnotations) {
117 117
 				t.Errorf("expected %v, got %v", expectedServiceAnnotations, service.Annotations)
... ...
@@ -148,7 +149,7 @@ func TestAlreadyExistingSecretControllerFlow(t *testing.T) {
148 148
 	existingSecret.Annotations = expectedSecretAnnotations
149 149
 
150 150
 	caName, kubeclient, fakeWatch, controller := controllerSetup([]runtime.Object{existingSecret}, stopChannel, t)
151
-	kubeclient.PrependReactor("create", "secrets", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
151
+	kubeclient.PrependReactor("create", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
152 152
 		return true, &kapi.Secret{}, kapierrors.NewAlreadyExists(kapi.Resource("secrets"), "new-secret")
153 153
 	})
154 154
 	controller.syncHandler = func(serviceKey string) error {
... ...
@@ -187,7 +188,7 @@ func TestAlreadyExistingSecretControllerFlow(t *testing.T) {
187 187
 			foundSecret = true
188 188
 
189 189
 		case action.Matches("update", "services"):
190
-			updateService := action.(ktestclient.UpdateAction)
190
+			updateService := action.(core.UpdateAction)
191 191
 			service := updateService.GetObject().(*kapi.Service)
192 192
 			if !reflect.DeepEqual(service.Annotations, expectedServiceAnnotations) {
193 193
 				t.Errorf("expected %v, got %v", expectedServiceAnnotations, service.Annotations)
... ...
@@ -225,7 +226,7 @@ func TestAlreadyExistingSecretForDifferentUIDControllerFlow(t *testing.T) {
225 225
 	existingSecret.Annotations = map[string]string{ServiceUIDAnnotation: "wrong-uid", ServiceNameAnnotation: serviceName}
226 226
 
227 227
 	_, kubeclient, fakeWatch, controller := controllerSetup([]runtime.Object{existingSecret}, stopChannel, t)
228
-	kubeclient.PrependReactor("create", "secrets", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
228
+	kubeclient.PrependReactor("create", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
229 229
 		return true, &kapi.Secret{}, kapierrors.NewAlreadyExists(kapi.Resource("secrets"), "new-secret")
230 230
 	})
231 231
 	controller.syncHandler = func(serviceKey string) error {
... ...
@@ -264,7 +265,7 @@ func TestAlreadyExistingSecretForDifferentUIDControllerFlow(t *testing.T) {
264 264
 			foundSecret = true
265 265
 
266 266
 		case action.Matches("update", "services"):
267
-			updateService := action.(ktestclient.UpdateAction)
267
+			updateService := action.(core.UpdateAction)
268 268
 			service := updateService.GetObject().(*kapi.Service)
269 269
 			if !reflect.DeepEqual(service.Annotations, expectedServiceAnnotations) {
270 270
 				t.Errorf("expected %v, got %v", expectedServiceAnnotations, service.Annotations)
... ...
@@ -295,7 +296,7 @@ func TestSecretCreationErrorControllerFlow(t *testing.T) {
295 295
 	namespace := "ns"
296 296
 
297 297
 	_, kubeclient, fakeWatch, controller := controllerSetup([]runtime.Object{}, stopChannel, t)
298
-	kubeclient.PrependReactor("create", "secrets", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
298
+	kubeclient.PrependReactor("create", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
299 299
 		return true, &kapi.Secret{}, kapierrors.NewForbidden(kapi.Resource("secrets"), "new-secret", fmt.Errorf("any reason"))
300 300
 	})
301 301
 	controller.syncHandler = func(serviceKey string) error {
... ...
@@ -330,7 +331,7 @@ func TestSecretCreationErrorControllerFlow(t *testing.T) {
330 330
 	for _, action := range kubeclient.Actions() {
331 331
 		switch {
332 332
 		case action.Matches("update", "services"):
333
-			updateService := action.(ktestclient.UpdateAction)
333
+			updateService := action.(core.UpdateAction)
334 334
 			service := updateService.GetObject().(*kapi.Service)
335 335
 			if !reflect.DeepEqual(service.Annotations, expectedServiceAnnotations) {
336 336
 				t.Errorf("expected %v, got %v", expectedServiceAnnotations, service.Annotations)
... ...
@@ -357,13 +358,13 @@ func TestSkipGenerationControllerFlow(t *testing.T) {
357 357
 	namespace := "ns"
358 358
 
359 359
 	caName, kubeclient, fakeWatch, controller := controllerSetup([]runtime.Object{}, stopChannel, t)
360
-	kubeclient.PrependReactor("update", "service", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
360
+	kubeclient.PrependReactor("update", "service", func(action core.Action) (handled bool, ret runtime.Object, err error) {
361 361
 		return true, &kapi.Service{}, kapierrors.NewForbidden(kapi.Resource("fdsa"), "new-service", fmt.Errorf("any service reason"))
362 362
 	})
363
-	kubeclient.PrependReactor("create", "secret", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
363
+	kubeclient.PrependReactor("create", "secret", func(action core.Action) (handled bool, ret runtime.Object, err error) {
364 364
 		return true, &kapi.Secret{}, kapierrors.NewForbidden(kapi.Resource("asdf"), "new-secret", fmt.Errorf("any reason"))
365 365
 	})
366
-	kubeclient.PrependReactor("update", "secret", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
366
+	kubeclient.PrependReactor("update", "secret", func(action core.Action) (handled bool, ret runtime.Object, err error) {
367 367
 		return true, &kapi.Secret{}, kapierrors.NewForbidden(kapi.Resource("asdf"), "new-secret", fmt.Errorf("any reason"))
368 368
 	})
369 369
 	controller.syncHandler = func(serviceKey string) error {
... ...
@@ -3,21 +3,22 @@ package service
3 3
 import (
4 4
 	"testing"
5 5
 
6
-	"k8s.io/kubernetes/pkg/api"
6
+	kapi "k8s.io/kubernetes/pkg/api"
7 7
 	"k8s.io/kubernetes/pkg/api/errors"
8
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
8
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
9 9
 )
10 10
 
11 11
 func TestServiceResolverCacheEmpty(t *testing.T) {
12
-	fakeClient := testclient.NewSimpleFake(&api.Service{
13
-		ObjectMeta: api.ObjectMeta{
14
-			Name: "foo",
12
+	fakeClient := fake.NewSimpleClientset(&kapi.Service{
13
+		ObjectMeta: kapi.ObjectMeta{
14
+			Name:      "foo",
15
+			Namespace: kapi.NamespaceDefault,
15 16
 		},
16
-		Spec: api.ServiceSpec{
17
-			Ports: []api.ServicePort{{Port: 80}},
17
+		Spec: kapi.ServiceSpec{
18
+			Ports: []kapi.ServicePort{{Port: 80}},
18 19
 		},
19 20
 	})
20
-	cache := NewServiceResolverCache(fakeClient.Services("default").Get)
21
+	cache := NewServiceResolverCache(fakeClient.Core().Services("default").Get)
21 22
 	if v, ok := cache.resolve("FOO_SERVICE_HOST"); v != "" || !ok {
22 23
 		t.Errorf("unexpected cache item")
23 24
 	}
... ...
@@ -35,17 +36,17 @@ func TestServiceResolverCacheEmpty(t *testing.T) {
35 35
 }
36 36
 
37 37
 type fakeRetriever struct {
38
-	service *api.Service
38
+	service *kapi.Service
39 39
 	err     error
40 40
 }
41 41
 
42
-func (r fakeRetriever) Get(name string) (*api.Service, error) {
42
+func (r fakeRetriever) Get(name string) (*kapi.Service, error) {
43 43
 	return r.service, r.err
44 44
 }
45 45
 
46 46
 func TestServiceResolverCache(t *testing.T) {
47 47
 	c := fakeRetriever{
48
-		err: errors.NewNotFound(api.Resource("Service"), "bar"),
48
+		err: errors.NewNotFound(kapi.Resource("Service"), "bar"),
49 49
 	}
50 50
 	cache := NewServiceResolverCache(c.Get)
51 51
 	if v, ok := cache.resolve("FOO_SERVICE_HOST"); v != "" || ok {
... ...
@@ -53,10 +54,10 @@ func TestServiceResolverCache(t *testing.T) {
53 53
 	}
54 54
 
55 55
 	c = fakeRetriever{
56
-		service: &api.Service{
57
-			Spec: api.ServiceSpec{
56
+		service: &kapi.Service{
57
+			Spec: kapi.ServiceSpec{
58 58
 				ClusterIP: "127.0.0.1",
59
-				Ports:     []api.ServicePort{{Port: 80}},
59
+				Ports:     []kapi.ServicePort{{Port: 80}},
60 60
 			},
61 61
 		},
62 62
 	}
... ...
@@ -5,8 +5,10 @@ import (
5 5
 	"time"
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8 9
 	"k8s.io/kubernetes/pkg/client/restclient"
9 10
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
10 12
 
11 13
 	"github.com/openshift/origin/pkg/client"
12 14
 )
... ...
@@ -18,7 +20,7 @@ type TokenRetriever interface {
18 18
 
19 19
 // ClientLookupTokenRetriever uses its client to look up a service account token
20 20
 type ClientLookupTokenRetriever struct {
21
-	Client kclient.Interface
21
+	Client kclientset.Interface
22 22
 }
23 23
 
24 24
 // GetToken returns a token for the named service account or an error if none existed after a timeout
... ...
@@ -30,7 +32,7 @@ func (s *ClientLookupTokenRetriever) GetToken(namespace, name string) (string, e
30 30
 		}
31 31
 
32 32
 		// Get the service account
33
-		serviceAccount, err := s.Client.ServiceAccounts(namespace).Get(name)
33
+		serviceAccount, err := s.Client.Core().ServiceAccounts(namespace).Get(name)
34 34
 		if err != nil {
35 35
 			continue
36 36
 		}
... ...
@@ -38,7 +40,7 @@ func (s *ClientLookupTokenRetriever) GetToken(namespace, name string) (string, e
38 38
 		// Get the secrets
39 39
 		// TODO: JTL: create one directly once we have that ability
40 40
 		for _, secretRef := range serviceAccount.Secrets {
41
-			secret, err2 := s.Client.Secrets(namespace).Get(secretRef.Name)
41
+			secret, err2 := s.Client.Core().Secrets(namespace).Get(secretRef.Name)
42 42
 			if err2 != nil {
43 43
 				// Tolerate fetch errors on a particular secret
44 44
 				continue
... ...
@@ -54,8 +56,8 @@ func (s *ClientLookupTokenRetriever) GetToken(namespace, name string) (string, e
54 54
 }
55 55
 
56 56
 // Clients returns an OpenShift and Kubernetes client with the credentials of the named service account
57
-// TODO: change return types to client.Interface/kclient.Interface to allow auto-reloading credentials
58
-func Clients(config restclient.Config, tokenRetriever TokenRetriever, namespace, name string) (*restclient.Config, *client.Client, *kclient.Client, error) {
57
+// TODO: change return types to client.Interface/kclientset.Interface to allow auto-reloading credentials
58
+func Clients(config restclient.Config, tokenRetriever TokenRetriever, namespace, name string) (*restclient.Config, *client.Client, *kclient.Client, *kclientset.Clientset, error) {
59 59
 	// Clear existing auth info
60 60
 	config.Username = ""
61 61
 	config.Password = ""
... ...
@@ -82,23 +84,24 @@ func Clients(config restclient.Config, tokenRetriever TokenRetriever, namespace,
82 82
 	// TODO: refetch the token if the client encounters 401 errors
83 83
 	token, err := tokenRetriever.GetToken(namespace, name)
84 84
 	if err != nil {
85
-		return nil, nil, nil, err
85
+		return nil, nil, nil, nil, err
86 86
 	}
87 87
 	config.BearerToken = token
88 88
 
89 89
 	config.UserAgent = openshiftUserAgent
90 90
 	c, err := client.New(&config)
91 91
 	if err != nil {
92
-		return nil, nil, nil, err
92
+		return nil, nil, nil, nil, err
93 93
 	}
94 94
 
95 95
 	config.UserAgent = kubeUserAgent
96 96
 	kc, err := kclient.New(&config)
97 97
 	if err != nil {
98
-		return nil, nil, nil, err
98
+		return nil, nil, nil, nil, err
99 99
 	}
100
+	kcset := adapter.FromUnversionedClient(kc)
100 101
 
101
-	return &config, c, kc, nil
102
+	return &config, c, kc, kcset, nil
102 103
 }
103 104
 
104 105
 // IsValidServiceAccountToken returns true if the given secret contains a service account token valid for the given service account
... ...
@@ -8,10 +8,12 @@ import (
8 8
 	"time"
9 9
 
10 10
 	"github.com/golang/glog"
11
+
11 12
 	"k8s.io/kubernetes/pkg/api"
12 13
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
13 14
 	"k8s.io/kubernetes/pkg/client/cache"
14
-	client "k8s.io/kubernetes/pkg/client/unversioned"
15
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
16
+	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15 17
 	"k8s.io/kubernetes/pkg/controller"
16 18
 	"k8s.io/kubernetes/pkg/controller/framework"
17 19
 	"k8s.io/kubernetes/pkg/credentialprovider"
... ...
@@ -58,7 +60,7 @@ type DockercfgControllerOptions struct {
58 58
 }
59 59
 
60 60
 // NewDockercfgController returns a new *DockercfgController.
61
-func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController {
61
+func NewDockercfgController(cl kclientset.Interface, options DockercfgControllerOptions) *DockercfgController {
62 62
 	e := &DockercfgController{
63 63
 		client:               cl,
64 64
 		queue:                workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
... ...
@@ -69,10 +71,10 @@ func NewDockercfgController(cl client.Interface, options DockercfgControllerOpti
69 69
 	serviceAccountCache, e.serviceAccountController = framework.NewInformer(
70 70
 		&cache.ListWatch{
71 71
 			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
72
-				return e.client.ServiceAccounts(api.NamespaceAll).List(options)
72
+				return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
73 73
 			},
74 74
 			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
75
-				return e.client.ServiceAccounts(api.NamespaceAll).Watch(options)
75
+				return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
76 76
 			},
77 77
 		},
78 78
 		&api.ServiceAccount{},
... ...
@@ -98,11 +100,11 @@ func NewDockercfgController(cl client.Interface, options DockercfgControllerOpti
98 98
 		&cache.ListWatch{
99 99
 			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
100 100
 				options.FieldSelector = tokenSecretSelector
101
-				return e.client.Secrets(api.NamespaceAll).List(options)
101
+				return e.client.Core().Secrets(api.NamespaceAll).List(options)
102 102
 			},
103 103
 			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
104 104
 				options.FieldSelector = tokenSecretSelector
105
-				return e.client.Secrets(api.NamespaceAll).Watch(options)
105
+				return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
106 106
 			},
107 107
 		},
108 108
 		&api.Secret{},
... ...
@@ -121,7 +123,7 @@ func NewDockercfgController(cl client.Interface, options DockercfgControllerOpti
121 121
 
122 122
 // DockercfgController manages dockercfg secrets for ServiceAccount objects
123 123
 type DockercfgController struct {
124
-	client client.Interface
124
+	client kclientset.Interface
125 125
 
126 126
 	dockerURLLock        sync.Mutex
127 127
 	dockerURLs           []string
... ...
@@ -349,7 +351,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error {
349 349
 		// Clear the pending token annotation when updating
350 350
 		delete(serviceAccount.Annotations, PendingTokenAnnotation)
351 351
 
352
-		updatedSA, err := e.client.ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
352
+		updatedSA, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
353 353
 		if err == nil {
354 354
 			e.serviceAccountCache.Mutation(updatedSA)
355 355
 		}
... ...
@@ -366,7 +368,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error {
366 366
 	}
367 367
 
368 368
 	first := true
369
-	err = client.RetryOnConflict(client.DefaultBackoff, func() error {
369
+	err = kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
370 370
 		if !first {
371 371
 			obj, exists, err := e.serviceAccountCache.GetByKey(key)
372 372
 			if err != nil {
... ...
@@ -375,7 +377,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error {
375 375
 			if !exists || !needsDockercfgSecret(obj.(*api.ServiceAccount)) || serviceAccount.UID != obj.(*api.ServiceAccount).UID {
376 376
 				// somehow a dockercfg secret appeared or the SA disappeared.  cleanup the secret we made and return
377 377
 				glog.V(2).Infof("Deleting secret because the work is already done %s/%s", dockercfgSecret.Namespace, dockercfgSecret.Name)
378
-				e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name)
378
+				e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil)
379 379
 				return nil
380 380
 			}
381 381
 
... ...
@@ -392,7 +394,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error {
392 392
 		// Clear the pending token annotation when updating
393 393
 		delete(serviceAccount.Annotations, PendingTokenAnnotation)
394 394
 
395
-		updatedSA, err := e.client.ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
395
+		updatedSA, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
396 396
 		if err == nil {
397 397
 			e.serviceAccountCache.Mutation(updatedSA)
398 398
 		}
... ...
@@ -403,7 +405,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error {
403 403
 		// nothing to do.  Our choice was stale or we got a conflict.  Either way that means that the service account was updated.  We simply need to return because we'll get an update notification later
404 404
 		// we do need to clean up our dockercfgSecret.  token secrets are cleaned up by the controller handling service account dockercfg secret deletes
405 405
 		glog.V(2).Infof("Deleting secret %s/%s (err=%v)", dockercfgSecret.Namespace, dockercfgSecret.Name, err)
406
-		e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name)
406
+		e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil)
407 407
 	}
408 408
 	return err
409 409
 }
... ...
@@ -419,7 +421,7 @@ func (e *DockercfgController) createTokenSecret(serviceAccount *api.ServiceAccou
419 419
 			serviceAccount.Annotations = map[string]string{}
420 420
 		}
421 421
 		serviceAccount.Annotations[PendingTokenAnnotation] = pendingTokenName
422
-		updatedServiceAccount, err := e.client.ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
422
+		updatedServiceAccount, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount)
423 423
 		// Conflicts mean we'll get called to sync this service account again
424 424
 		if kapierrors.IsConflict(err) {
425 425
 			return nil, false, nil
... ...
@@ -456,7 +458,7 @@ func (e *DockercfgController) createTokenSecret(serviceAccount *api.ServiceAccou
456 456
 	}
457 457
 
458 458
 	glog.V(4).Infof("Creating token secret %q for service account %s/%s", tokenSecret.Name, serviceAccount.Namespace, serviceAccount.Name)
459
-	token, err := e.client.Secrets(tokenSecret.Namespace).Create(tokenSecret)
459
+	token, err := e.client.Core().Secrets(tokenSecret.Namespace).Create(tokenSecret)
460 460
 	// Already exists but not in cache means we'll get an add watch event and resync
461 461
 	if kapierrors.IsAlreadyExists(err) {
462 462
 		return nil, false, nil
... ...
@@ -513,7 +515,7 @@ func (e *DockercfgController) createDockerPullSecret(serviceAccount *api.Service
513 513
 	dockercfgSecret.Data[api.DockerConfigKey] = dockercfgContent
514 514
 
515 515
 	// Save the secret
516
-	createdSecret, err := e.client.Secrets(tokenSecret.Namespace).Create(dockercfgSecret)
516
+	createdSecret, err := e.client.Core().Secrets(tokenSecret.Namespace).Create(dockercfgSecret)
517 517
 	return createdSecret, err == nil, err
518 518
 }
519 519
 
... ...
@@ -5,10 +5,11 @@ import (
5 5
 	"time"
6 6
 
7 7
 	"github.com/golang/glog"
8
+
8 9
 	"k8s.io/kubernetes/pkg/api"
9 10
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
10 11
 	"k8s.io/kubernetes/pkg/client/cache"
11
-	client "k8s.io/kubernetes/pkg/client/unversioned"
12
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 13
 	"k8s.io/kubernetes/pkg/controller/framework"
13 14
 	"k8s.io/kubernetes/pkg/fields"
14 15
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -29,7 +30,7 @@ type DockercfgDeletedControllerOptions struct {
29 29
 }
30 30
 
31 31
 // NewDockercfgDeletedController returns a new *DockercfgDeletedController.
32
-func NewDockercfgDeletedController(cl client.Interface, options DockercfgDeletedControllerOptions) *DockercfgDeletedController {
32
+func NewDockercfgDeletedController(cl kclientset.Interface, options DockercfgDeletedControllerOptions) *DockercfgDeletedController {
33 33
 	e := &DockercfgDeletedController{
34 34
 		client: cl,
35 35
 	}
... ...
@@ -39,11 +40,11 @@ func NewDockercfgDeletedController(cl client.Interface, options DockercfgDeleted
39 39
 		&cache.ListWatch{
40 40
 			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
41 41
 				opts := api.ListOptions{FieldSelector: dockercfgSelector}
42
-				return e.client.Secrets(api.NamespaceAll).List(opts)
42
+				return e.client.Core().Secrets(api.NamespaceAll).List(opts)
43 43
 			},
44 44
 			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
45 45
 				opts := api.ListOptions{FieldSelector: dockercfgSelector, ResourceVersion: options.ResourceVersion}
46
-				return e.client.Secrets(api.NamespaceAll).Watch(opts)
46
+				return e.client.Core().Secrets(api.NamespaceAll).Watch(opts)
47 47
 			},
48 48
 		},
49 49
 		&api.Secret{},
... ...
@@ -61,7 +62,7 @@ func NewDockercfgDeletedController(cl client.Interface, options DockercfgDeleted
61 61
 type DockercfgDeletedController struct {
62 62
 	stopChan chan struct{}
63 63
 
64
-	client client.Interface
64
+	client kclientset.Interface
65 65
 
66 66
 	secretController *framework.Controller
67 67
 }
... ...
@@ -108,7 +109,7 @@ func (e *DockercfgDeletedController) secretDeleted(obj interface{}) {
108 108
 	}
109 109
 
110 110
 	// remove the reference token secret
111
-	if err := e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]); (err != nil) && !kapierrors.IsNotFound(err) {
111
+	if err := e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey], nil); (err != nil) && !kapierrors.IsNotFound(err) {
112 112
 		utilruntime.HandleError(err)
113 113
 	}
114 114
 }
... ...
@@ -149,7 +150,7 @@ func (e *DockercfgDeletedController) removeDockercfgSecretReference(dockercfgSec
149 149
 	serviceAccount.ImagePullSecrets = imagePullSecrets
150 150
 
151 151
 	if changed {
152
-		_, err = e.client.ServiceAccounts(dockercfgSecret.Namespace).Update(serviceAccount)
152
+		_, err = e.client.Core().ServiceAccounts(dockercfgSecret.Namespace).Update(serviceAccount)
153 153
 		if err != nil {
154 154
 			return err
155 155
 		}
... ...
@@ -165,7 +166,7 @@ func (e *DockercfgDeletedController) getServiceAccount(secret *api.Secret) (*api
165 165
 		return nil, nil
166 166
 	}
167 167
 
168
-	serviceAccount, err := e.client.ServiceAccounts(secret.Namespace).Get(saName)
168
+	serviceAccount, err := e.client.Core().ServiceAccounts(secret.Namespace).Get(saName)
169 169
 	if err != nil {
170 170
 		return nil, err
171 171
 	}
... ...
@@ -6,7 +6,9 @@ import (
6 6
 	"testing"
7 7
 
8 8
 	"k8s.io/kubernetes/pkg/api"
9
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
9
+	"k8s.io/kubernetes/pkg/api/unversioned"
10
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11
+	"k8s.io/kubernetes/pkg/client/testing/core"
10 12
 	"k8s.io/kubernetes/pkg/runtime"
11 13
 )
12 14
 
... ...
@@ -16,34 +18,34 @@ func TestDockercfgDeletion(t *testing.T) {
16 16
 
17 17
 		DeletedSecret *api.Secret
18 18
 
19
-		ExpectedActions []testclient.Action
19
+		ExpectedActions []core.Action
20 20
 	}{
21 21
 		"deleted dockercfg secret without serviceaccount": {
22 22
 			DeletedSecret: createdDockercfgSecret(),
23 23
 
24
-			ExpectedActions: []testclient.Action{
25
-				testclient.NewGetAction("serviceaccounts", "default", "default"),
26
-				testclient.NewDeleteAction("secrets", "default", "token-secret-1"),
24
+			ExpectedActions: []core.Action{
25
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "default", "default"),
26
+				core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", "token-secret-1"),
27 27
 			},
28 28
 		},
29 29
 		"deleted dockercfg secret with serviceaccount with reference": {
30 30
 			ClientObjects: []runtime.Object{serviceAccount(addTokenSecretReference(tokenSecretReferences()), imagePullSecretReferences()), createdDockercfgSecret()},
31 31
 
32 32
 			DeletedSecret: createdDockercfgSecret(),
33
-			ExpectedActions: []testclient.Action{
34
-				testclient.NewGetAction("serviceaccounts", "default", "default"),
35
-				testclient.NewUpdateAction("serviceaccounts", "default", serviceAccount(tokenSecretReferences(), emptyImagePullSecretReferences())),
36
-				testclient.NewDeleteAction("secrets", "default", "token-secret-1"),
33
+			ExpectedActions: []core.Action{
34
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "default", "default"),
35
+				core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "default", serviceAccount(tokenSecretReferences(), emptyImagePullSecretReferences())),
36
+				core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", "token-secret-1"),
37 37
 			},
38 38
 		},
39 39
 		"deleted dockercfg secret with serviceaccount without reference": {
40 40
 			ClientObjects: []runtime.Object{serviceAccount(addTokenSecretReference(tokenSecretReferences()), imagePullSecretReferences()), createdDockercfgSecret()},
41 41
 
42 42
 			DeletedSecret: createdDockercfgSecret(),
43
-			ExpectedActions: []testclient.Action{
44
-				testclient.NewGetAction("serviceaccounts", "default", "default"),
45
-				testclient.NewUpdateAction("serviceaccounts", "default", serviceAccount(tokenSecretReferences(), emptyImagePullSecretReferences())),
46
-				testclient.NewDeleteAction("secrets", "default", "token-secret-1"),
43
+			ExpectedActions: []core.Action{
44
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "default", "default"),
45
+				core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "default", serviceAccount(tokenSecretReferences(), emptyImagePullSecretReferences())),
46
+				core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", "token-secret-1"),
47 47
 			},
48 48
 		},
49 49
 	}
... ...
@@ -52,7 +54,7 @@ func TestDockercfgDeletion(t *testing.T) {
52 52
 		// Re-seed to reset name generation
53 53
 		rand.Seed(1)
54 54
 
55
-		client := testclient.NewSimpleFake(tc.ClientObjects...)
55
+		client := fake.NewSimpleClientset(tc.ClientObjects...)
56 56
 
57 57
 		controller := NewDockercfgDeletedController(client, DockercfgDeletedControllerOptions{})
58 58
 
... ...
@@ -4,10 +4,11 @@ import (
4 4
 	"time"
5 5
 
6 6
 	"github.com/golang/glog"
7
+
7 8
 	"k8s.io/kubernetes/pkg/api"
8 9
 	apierrors "k8s.io/kubernetes/pkg/api/errors"
9 10
 	"k8s.io/kubernetes/pkg/client/cache"
10
-	client "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 12
 	"k8s.io/kubernetes/pkg/controller/framework"
12 13
 	"k8s.io/kubernetes/pkg/fields"
13 14
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -23,7 +24,7 @@ type DockercfgTokenDeletedControllerOptions struct {
23 23
 }
24 24
 
25 25
 // NewDockercfgTokenDeletedController returns a new *DockercfgTokenDeletedController.
26
-func NewDockercfgTokenDeletedController(cl client.Interface, options DockercfgTokenDeletedControllerOptions) *DockercfgTokenDeletedController {
26
+func NewDockercfgTokenDeletedController(cl kclientset.Interface, options DockercfgTokenDeletedControllerOptions) *DockercfgTokenDeletedController {
27 27
 	e := &DockercfgTokenDeletedController{
28 28
 		client: cl,
29 29
 	}
... ...
@@ -33,11 +34,11 @@ func NewDockercfgTokenDeletedController(cl client.Interface, options DockercfgTo
33 33
 		&cache.ListWatch{
34 34
 			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
35 35
 				opts := api.ListOptions{FieldSelector: dockercfgSelector}
36
-				return e.client.Secrets(api.NamespaceAll).List(opts)
36
+				return e.client.Core().Secrets(api.NamespaceAll).List(opts)
37 37
 			},
38 38
 			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
39 39
 				opts := api.ListOptions{FieldSelector: dockercfgSelector, ResourceVersion: options.ResourceVersion}
40
-				return e.client.Secrets(api.NamespaceAll).Watch(opts)
40
+				return e.client.Core().Secrets(api.NamespaceAll).Watch(opts)
41 41
 			},
42 42
 		},
43 43
 		&api.Secret{},
... ...
@@ -55,7 +56,7 @@ func NewDockercfgTokenDeletedController(cl client.Interface, options DockercfgTo
55 55
 type DockercfgTokenDeletedController struct {
56 56
 	stopChan chan struct{}
57 57
 
58
-	client client.Interface
58
+	client kclientset.Interface
59 59
 
60 60
 	secretController *framework.Controller
61 61
 }
... ...
@@ -94,7 +95,7 @@ func (e *DockercfgTokenDeletedController) secretDeleted(obj interface{}) {
94 94
 
95 95
 	// remove the reference token secrets
96 96
 	for _, dockercfgSecret := range dockercfgSecrets {
97
-		if err := e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name); (err != nil) && !apierrors.IsNotFound(err) {
97
+		if err := e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil); (err != nil) && !apierrors.IsNotFound(err) {
98 98
 			utilruntime.HandleError(err)
99 99
 		}
100 100
 	}
... ...
@@ -105,7 +106,7 @@ func (e *DockercfgTokenDeletedController) findDockercfgSecrets(tokenSecret *api.
105 105
 	dockercfgSecrets := []*api.Secret{}
106 106
 
107 107
 	options := api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.SecretTypeField, string(api.SecretTypeDockercfg))}
108
-	potentialSecrets, err := e.client.Secrets(tokenSecret.Namespace).List(options)
108
+	potentialSecrets, err := e.client.Core().Secrets(tokenSecret.Namespace).List(options)
109 109
 	if err != nil {
110 110
 		return nil, err
111 111
 	}
... ...
@@ -6,7 +6,9 @@ import (
6 6
 	"testing"
7 7
 
8 8
 	"k8s.io/kubernetes/pkg/api"
9
-	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
9
+	"k8s.io/kubernetes/pkg/api/unversioned"
10
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11
+	"k8s.io/kubernetes/pkg/client/testing/core"
10 12
 	"k8s.io/kubernetes/pkg/fields"
11 13
 	"k8s.io/kubernetes/pkg/runtime"
12 14
 )
... ...
@@ -129,33 +131,33 @@ func TestTokenDeletion(t *testing.T) {
129 129
 
130 130
 		DeletedSecret *api.Secret
131 131
 
132
-		ExpectedActions []testclient.Action
132
+		ExpectedActions []core.Action
133 133
 	}{
134 134
 		"deleted token secret without serviceaccount": {
135 135
 			ClientObjects: []runtime.Object{serviceAccount(addTokenSecretReference(tokenSecretReferences()), imagePullSecretReferences()), createdDockercfgSecret()},
136 136
 			DeletedSecret: serviceAccountTokenSecret(),
137 137
 
138
-			ExpectedActions: []testclient.Action{
139
-				testclient.NewListAction("secrets", "default", api.ListOptions{FieldSelector: dockercfgSecretFieldSelector}),
140
-				testclient.NewDeleteAction("secrets", "default", "default-dockercfg-fplln"),
138
+			ExpectedActions: []core.Action{
139
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", api.ListOptions{FieldSelector: dockercfgSecretFieldSelector}),
140
+				core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", "default-dockercfg-fplln"),
141 141
 			},
142 142
 		},
143 143
 		"deleted token secret with serviceaccount with reference": {
144 144
 			ClientObjects: []runtime.Object{serviceAccount(addTokenSecretReference(tokenSecretReferences()), imagePullSecretReferences()), createdDockercfgSecret()},
145 145
 
146 146
 			DeletedSecret: serviceAccountTokenSecret(),
147
-			ExpectedActions: []testclient.Action{
148
-				testclient.NewListAction("secrets", "default", api.ListOptions{FieldSelector: dockercfgSecretFieldSelector}),
149
-				testclient.NewDeleteAction("secrets", "default", "default-dockercfg-fplln"),
147
+			ExpectedActions: []core.Action{
148
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", api.ListOptions{FieldSelector: dockercfgSecretFieldSelector}),
149
+				core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", "default-dockercfg-fplln"),
150 150
 			},
151 151
 		},
152 152
 		"deleted token secret with serviceaccount without reference": {
153 153
 			ClientObjects: []runtime.Object{serviceAccount(addTokenSecretReference(tokenSecretReferences()), imagePullSecretReferences()), createdDockercfgSecret()},
154 154
 
155 155
 			DeletedSecret: serviceAccountTokenSecret(),
156
-			ExpectedActions: []testclient.Action{
157
-				testclient.NewListAction("secrets", "default", api.ListOptions{FieldSelector: dockercfgSecretFieldSelector}),
158
-				testclient.NewDeleteAction("secrets", "default", "default-dockercfg-fplln"),
156
+			ExpectedActions: []core.Action{
157
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", api.ListOptions{FieldSelector: dockercfgSecretFieldSelector}),
158
+				core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, "default", "default-dockercfg-fplln"),
159 159
 			},
160 160
 		},
161 161
 	}
... ...
@@ -164,7 +166,7 @@ func TestTokenDeletion(t *testing.T) {
164 164
 		// Re-seed to reset name generation
165 165
 		rand.Seed(1)
166 166
 
167
-		client := testclient.NewSimpleFake(tc.ClientObjects...)
167
+		client := fake.NewSimpleClientset(tc.ClientObjects...)
168 168
 
169 169
 		controller := NewDockercfgTokenDeletedController(client, DockercfgTokenDeletedControllerOptions{})
170 170
 
... ...
@@ -11,7 +11,7 @@ import (
11 11
 
12 12
 	kapi "k8s.io/kubernetes/pkg/api"
13 13
 	"k8s.io/kubernetes/pkg/client/cache"
14
-	client "k8s.io/kubernetes/pkg/client/unversioned"
14
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
15 15
 	"k8s.io/kubernetes/pkg/controller"
16 16
 	"k8s.io/kubernetes/pkg/controller/framework"
17 17
 	"k8s.io/kubernetes/pkg/credentialprovider"
... ...
@@ -40,7 +40,7 @@ type DockerRegistryServiceControllerOptions struct {
40 40
 }
41 41
 
42 42
 // NewDockerRegistryServiceController returns a new *DockerRegistryServiceController.
43
-func NewDockerRegistryServiceController(cl client.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController {
43
+func NewDockerRegistryServiceController(cl kclientset.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController {
44 44
 	e := &DockerRegistryServiceController{
45 45
 		client:                cl,
46 46
 		dockercfgController:   options.DockercfgController,
... ...
@@ -55,11 +55,11 @@ func NewDockerRegistryServiceController(cl client.Interface, options DockerRegis
55 55
 		&cache.ListWatch{
56 56
 			ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
57 57
 				opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
58
-				return e.client.Services(options.RegistryNamespace).List(opts)
58
+				return e.client.Core().Services(options.RegistryNamespace).List(opts)
59 59
 			},
60 60
 			WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
61 61
 				opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
62
-				return e.client.Services(options.RegistryNamespace).Watch(opts)
62
+				return e.client.Core().Services(options.RegistryNamespace).Watch(opts)
63 63
 			},
64 64
 		},
65 65
 		&kapi.Service{},
... ...
@@ -83,10 +83,10 @@ func NewDockerRegistryServiceController(cl client.Interface, options DockerRegis
83 83
 	e.secretCache, e.secretController = framework.NewInformer(
84 84
 		&cache.ListWatch{
85 85
 			ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
86
-				return e.client.Secrets(kapi.NamespaceAll).List(dockercfgOptions)
86
+				return e.client.Core().Secrets(kapi.NamespaceAll).List(dockercfgOptions)
87 87
 			},
88 88
 			WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
89
-				return e.client.Secrets(kapi.NamespaceAll).Watch(dockercfgOptions)
89
+				return e.client.Core().Secrets(kapi.NamespaceAll).Watch(dockercfgOptions)
90 90
 			},
91 91
 		},
92 92
 		&kapi.Secret{},
... ...
@@ -101,7 +101,7 @@ func NewDockerRegistryServiceController(cl client.Interface, options DockerRegis
101 101
 
102 102
 // DockerRegistryServiceController manages ServiceToken secrets for Service objects
103 103
 type DockerRegistryServiceController struct {
104
-	client client.Interface
104
+	client kclientset.Interface
105 105
 
106 106
 	serviceName      string
107 107
 	serviceNamespace string
... ...
@@ -376,7 +376,7 @@ func (e *DockerRegistryServiceController) syncSecretUpdate(key string) error {
376 376
 	}
377 377
 	dockercfgSecret.Data[kapi.DockerConfigKey] = dockercfgContent
378 378
 
379
-	if _, err := e.client.Secrets(dockercfgSecret.Namespace).Update(dockercfgSecret); err != nil {
379
+	if _, err := e.client.Core().Secrets(dockercfgSecret.Namespace).Update(dockercfgSecret); err != nil {
380 380
 		return err
381 381
 	}
382 382
 
... ...
@@ -8,7 +8,8 @@ import (
8 8
 	"time"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
11
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
12
+	"k8s.io/kubernetes/pkg/client/testing/core"
12 13
 	"k8s.io/kubernetes/pkg/credentialprovider"
13 14
 	"k8s.io/kubernetes/pkg/runtime"
14 15
 	"k8s.io/kubernetes/pkg/watch"
... ...
@@ -29,16 +30,16 @@ var (
29 29
 	}
30 30
 )
31 31
 
32
-func controllerSetup(startingObjects []runtime.Object, t *testing.T) (*ktestclient.Fake, *watch.FakeWatcher, *DockerRegistryServiceController) {
33
-	kubeclient := ktestclient.NewSimpleFake(startingObjects...)
32
+func controllerSetup(startingObjects []runtime.Object, t *testing.T) (*fake.Clientset, *watch.FakeWatcher, *DockerRegistryServiceController) {
33
+	kubeclient := fake.NewSimpleClientset(startingObjects...)
34 34
 	fakeWatch := watch.NewFake()
35
-	kubeclient.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
36
-		return true, action.(ktestclient.CreateAction).GetObject(), nil
35
+	kubeclient.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
36
+		return true, action.(core.CreateAction).GetObject(), nil
37 37
 	})
38
-	kubeclient.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
39
-		return true, action.(ktestclient.UpdateAction).GetObject(), nil
38
+	kubeclient.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
39
+		return true, action.(core.UpdateAction).GetObject(), nil
40 40
 	})
41
-	kubeclient.PrependWatchReactor("services", ktestclient.DefaultWatchReactor(fakeWatch, nil))
41
+	kubeclient.PrependWatchReactor("services", core.DefaultWatchReactor(fakeWatch, nil))
42 42
 
43 43
 	controller := NewDockerRegistryServiceController(kubeclient, DockerRegistryServiceControllerOptions{
44 44
 		Resync:               10 * time.Minute,
... ...
@@ -70,10 +71,10 @@ func TestNoChangeNoOp(t *testing.T) {
70 70
 	received := make(chan bool)
71 71
 
72 72
 	kubeclient, fakeWatch, controller := controllerSetup([]runtime.Object{registryService}, t)
73
-	kubeclient.PrependReactor("update", "secrets", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
73
+	kubeclient.PrependReactor("update", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
74 74
 		return true, &kapi.Secret{}, fmt.Errorf("%v unexpected", action)
75 75
 	})
76
-	kubeclient.PrependReactor("create", "secrets", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
76
+	kubeclient.PrependReactor("create", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
77 77
 		return true, &kapi.Secret{}, fmt.Errorf("%v unexpected", action)
78 78
 	})
79 79
 	controller.syncRegistryLocationHandler = wrapHandler(received, controller.syncRegistryLocationChange, t)
... ...
@@ -153,7 +154,7 @@ func TestUpdateNewStyleSecret(t *testing.T) {
153 153
 	for _, action := range kubeclient.Actions() {
154 154
 		switch {
155 155
 		case action.Matches("update", "secrets"):
156
-			updateService := action.(ktestclient.UpdateAction)
156
+			updateService := action.(core.UpdateAction)
157 157
 			secret := updateService.GetObject().(*kapi.Secret)
158 158
 			actualDockercfg := &credentialprovider.DockerConfig{}
159 159
 			if err := json.Unmarshal(secret.Data[kapi.DockerConfigKey], actualDockercfg); err != nil {
... ...
@@ -242,7 +243,7 @@ func TestUpdateOldStyleSecretWithKey(t *testing.T) {
242 242
 	for _, action := range kubeclient.Actions() {
243 243
 		switch {
244 244
 		case action.Matches("update", "secrets"):
245
-			updateService := action.(ktestclient.UpdateAction)
245
+			updateService := action.(core.UpdateAction)
246 246
 			secret := updateService.GetObject().(*kapi.Secret)
247 247
 			actualDockercfg := &credentialprovider.DockerConfig{}
248 248
 			if err := json.Unmarshal(secret.Data[kapi.DockerConfigKey], actualDockercfg); err != nil {
... ...
@@ -290,7 +291,7 @@ func TestUpdateOldStyleSecretWithoutKey(t *testing.T) {
290 290
 	}
291 291
 
292 292
 	kubeclient, fakeWatch, controller := controllerSetup([]runtime.Object{tokenSecret, oldStyleDockercfgSecret}, t)
293
-	kubeclient.PrependReactor("get", "secrets", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
293
+	kubeclient.PrependReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
294 294
 		return true, tokenSecret, nil
295 295
 	})
296 296
 	controller.syncRegistryLocationHandler = wrapHandler(received, controller.syncRegistryLocationChange, t)
... ...
@@ -332,7 +333,7 @@ func TestUpdateOldStyleSecretWithoutKey(t *testing.T) {
332 332
 	for _, action := range kubeclient.Actions() {
333 333
 		switch {
334 334
 		case action.Matches("update", "secrets"):
335
-			updateService := action.(ktestclient.UpdateAction)
335
+			updateService := action.(core.UpdateAction)
336 336
 			secret := updateService.GetObject().(*kapi.Secret)
337 337
 			actualDockercfg := &credentialprovider.DockerConfig{}
338 338
 			if err := json.Unmarshal(secret.Data[kapi.DockerConfigKey], actualDockercfg); err != nil {
... ...
@@ -413,7 +414,7 @@ func TestClearSecretAndRecreate(t *testing.T) {
413 413
 	for _, action := range kubeclient.Actions() {
414 414
 		switch {
415 415
 		case action.Matches("update", "secrets"):
416
-			updateService := action.(ktestclient.UpdateAction)
416
+			updateService := action.(core.UpdateAction)
417 417
 			secret := updateService.GetObject().(*kapi.Secret)
418 418
 			actualDockercfg := &credentialprovider.DockerConfig{}
419 419
 			if err := json.Unmarshal(secret.Data[kapi.DockerConfigKey], actualDockercfg); err != nil {
... ...
@@ -460,7 +461,7 @@ func TestClearSecretAndRecreate(t *testing.T) {
460 460
 	for _, action := range kubeclient.Actions() {
461 461
 		switch {
462 462
 		case action.Matches("update", "secrets"):
463
-			updateService := action.(ktestclient.UpdateAction)
463
+			updateService := action.(core.UpdateAction)
464 464
 			secret := updateService.GetObject().(*kapi.Secret)
465 465
 			actualDockercfg := &credentialprovider.DockerConfig{}
466 466
 			if err := json.Unmarshal(secret.Data[kapi.DockerConfigKey], actualDockercfg); err != nil {
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"strings"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
12 12
 	"k8s.io/kubernetes/pkg/runtime"
13 13
 	"k8s.io/kubernetes/pkg/serviceaccount"
14 14
 
... ...
@@ -54,8 +54,8 @@ var routeGroupKind = routeapi.SchemeGroupVersion.WithKind(routeKind).GroupKind()
54 54
 // var ingressGroupKind = routeapi.SchemeGroupVersion.WithKind(IngressKind).GroupKind()
55 55
 
56 56
 type saOAuthClientAdapter struct {
57
-	saClient     kclient.ServiceAccountsNamespacer
58
-	secretClient kclient.SecretsNamespacer
57
+	saClient     kcoreclient.ServiceAccountsGetter
58
+	secretClient kcoreclient.SecretsGetter
59 59
 	routeClient  osclient.RoutesNamespacer
60 60
 	// TODO add ingress support
61 61
 	//ingressClient ??
... ...
@@ -182,7 +182,7 @@ func (uri *redirectURI) merge(m *model) {
182 182
 
183 183
 var _ oauthclient.Getter = &saOAuthClientAdapter{}
184 184
 
185
-func NewServiceAccountOAuthClientGetter(saClient kclient.ServiceAccountsNamespacer, secretClient kclient.SecretsNamespacer, routeClient osclient.RoutesNamespacer, delegate oauthclient.Getter, grantMethod oauthapi.GrantHandlerType) oauthclient.Getter {
185
+func NewServiceAccountOAuthClientGetter(saClient kcoreclient.ServiceAccountsGetter, secretClient kcoreclient.SecretsGetter, routeClient osclient.RoutesNamespacer, delegate oauthclient.Getter, grantMethod oauthapi.GrantHandlerType) oauthclient.Getter {
186 186
 	return &saOAuthClientAdapter{saClient: saClient, secretClient: secretClient, routeClient: routeClient, delegate: delegate, grantMethod: grantMethod, decoder: kapi.Codecs.UniversalDecoder()}
187 187
 }
188 188
 
... ...
@@ -6,6 +6,9 @@ import (
6 6
 	"testing"
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9
+	"k8s.io/kubernetes/pkg/api/unversioned"
10
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
11
+	"k8s.io/kubernetes/pkg/client/testing/core"
9 12
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
10 13
 	"k8s.io/kubernetes/pkg/runtime"
11 14
 	"k8s.io/kubernetes/pkg/types"
... ...
@@ -24,37 +27,37 @@ func TestGetClient(t *testing.T) {
24 24
 	testCases := []struct {
25 25
 		name       string
26 26
 		clientName string
27
-		kubeClient *ktestclient.Fake
27
+		kubeClient *fake.Clientset
28 28
 		osClient   *ostestclient.Fake
29 29
 
30 30
 		expectedDelegation  bool
31 31
 		expectedErr         string
32 32
 		expectedClient      *oauthapi.OAuthClient
33
-		expectedKubeActions []ktestclient.Action
33
+		expectedKubeActions []core.Action
34 34
 		expectedOSActions   []ktestclient.Action
35 35
 	}{
36 36
 		{
37 37
 			name:                "delegate",
38 38
 			clientName:          "not:serviceaccount",
39
-			kubeClient:          ktestclient.NewSimpleFake(),
39
+			kubeClient:          fake.NewSimpleClientset(),
40 40
 			osClient:            ostestclient.NewSimpleFake(),
41 41
 			expectedDelegation:  true,
42
-			expectedKubeActions: []ktestclient.Action{},
42
+			expectedKubeActions: []core.Action{},
43 43
 			expectedOSActions:   []ktestclient.Action{},
44 44
 		},
45 45
 		{
46 46
 			name:                "missing sa",
47 47
 			clientName:          "system:serviceaccount:ns-01:missing-sa",
48
-			kubeClient:          ktestclient.NewSimpleFake(),
48
+			kubeClient:          fake.NewSimpleClientset(),
49 49
 			osClient:            ostestclient.NewSimpleFake(),
50 50
 			expectedErr:         `ServiceAccount "missing-sa" not found`,
51
-			expectedKubeActions: []ktestclient.Action{ktestclient.NewGetAction("serviceaccounts", "ns-01", "missing-sa")},
51
+			expectedKubeActions: []core.Action{core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "missing-sa")},
52 52
 			expectedOSActions:   []ktestclient.Action{},
53 53
 		},
54 54
 		{
55 55
 			name:       "sa no redirects",
56 56
 			clientName: "system:serviceaccount:ns-01:default",
57
-			kubeClient: ktestclient.NewSimpleFake(
57
+			kubeClient: fake.NewSimpleClientset(
58 58
 				&kapi.ServiceAccount{
59 59
 					ObjectMeta: kapi.ObjectMeta{
60 60
 						Namespace:   "ns-01",
... ...
@@ -64,13 +67,13 @@ func TestGetClient(t *testing.T) {
64 64
 				}),
65 65
 			osClient:            ostestclient.NewSimpleFake(),
66 66
 			expectedErr:         `system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.<some-value>`,
67
-			expectedKubeActions: []ktestclient.Action{ktestclient.NewGetAction("serviceaccounts", "ns-01", "default")},
67
+			expectedKubeActions: []core.Action{core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default")},
68 68
 			expectedOSActions:   []ktestclient.Action{},
69 69
 		},
70 70
 		{
71 71
 			name:       "sa no tokens",
72 72
 			clientName: "system:serviceaccount:ns-01:default",
73
-			kubeClient: ktestclient.NewSimpleFake(
73
+			kubeClient: fake.NewSimpleClientset(
74 74
 				&kapi.ServiceAccount{
75 75
 					ObjectMeta: kapi.ObjectMeta{
76 76
 						Namespace:   "ns-01",
... ...
@@ -80,16 +83,16 @@ func TestGetClient(t *testing.T) {
80 80
 				}),
81 81
 			osClient:    ostestclient.NewSimpleFake(),
82 82
 			expectedErr: `system:serviceaccount:ns-01:default has no tokens`,
83
-			expectedKubeActions: []ktestclient.Action{
84
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
85
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
83
+			expectedKubeActions: []core.Action{
84
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
85
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
86 86
 			},
87 87
 			expectedOSActions: []ktestclient.Action{},
88 88
 		},
89 89
 		{
90 90
 			name:       "good SA",
91 91
 			clientName: "system:serviceaccount:ns-01:default",
92
-			kubeClient: ktestclient.NewSimpleFake(
92
+			kubeClient: fake.NewSimpleClientset(
93 93
 				&kapi.ServiceAccount{
94 94
 					ObjectMeta: kapi.ObjectMeta{
95 95
 						Namespace:   "ns-01",
... ...
@@ -118,16 +121,16 @@ func TestGetClient(t *testing.T) {
118 118
 				RedirectURIs:      []string{"http://anywhere"},
119 119
 				GrantMethod:       oauthapi.GrantHandlerPrompt,
120 120
 			},
121
-			expectedKubeActions: []ktestclient.Action{
122
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
123
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
121
+			expectedKubeActions: []core.Action{
122
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
123
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
124 124
 			},
125 125
 			expectedOSActions: []ktestclient.Action{},
126 126
 		},
127 127
 		{
128 128
 			name:       "good SA with valid, simple route redirects",
129 129
 			clientName: "system:serviceaccount:ns-01:default",
130
-			kubeClient: ktestclient.NewSimpleFake(
130
+			kubeClient: fake.NewSimpleClientset(
131 131
 				&kapi.ServiceAccount{
132 132
 					ObjectMeta: kapi.ObjectMeta{
133 133
 						Namespace: "ns-01",
... ...
@@ -176,9 +179,9 @@ func TestGetClient(t *testing.T) {
176 176
 				RedirectURIs:      []string{"http://anywhere", "https://example1.com/defaultpath"},
177 177
 				GrantMethod:       oauthapi.GrantHandlerPrompt,
178 178
 			},
179
-			expectedKubeActions: []ktestclient.Action{
180
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
181
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
179
+			expectedKubeActions: []core.Action{
180
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
181
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
182 182
 			},
183 183
 			expectedOSActions: []ktestclient.Action{
184 184
 				ktestclient.NewGetAction("routes", "ns-01", "route1"),
... ...
@@ -187,7 +190,7 @@ func TestGetClient(t *testing.T) {
187 187
 		{
188 188
 			name:       "good SA with invalid route redirects",
189 189
 			clientName: "system:serviceaccount:ns-01:default",
190
-			kubeClient: ktestclient.NewSimpleFake(
190
+			kubeClient: fake.NewSimpleClientset(
191 191
 				&kapi.ServiceAccount{
192 192
 					ObjectMeta: kapi.ObjectMeta{
193 193
 						Namespace: "ns-01",
... ...
@@ -239,16 +242,16 @@ func TestGetClient(t *testing.T) {
239 239
 				RedirectURIs:      []string{"http://anywhere"},
240 240
 				GrantMethod:       oauthapi.GrantHandlerPrompt,
241 241
 			},
242
-			expectedKubeActions: []ktestclient.Action{
243
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
244
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
242
+			expectedKubeActions: []core.Action{
243
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
244
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
245 245
 			},
246 246
 			expectedOSActions: []ktestclient.Action{},
247 247
 		},
248 248
 		{
249 249
 			name:       "good SA with a route that don't have a host",
250 250
 			clientName: "system:serviceaccount:ns-01:default",
251
-			kubeClient: ktestclient.NewSimpleFake(
251
+			kubeClient: fake.NewSimpleClientset(
252 252
 				&kapi.ServiceAccount{
253 253
 					ObjectMeta: kapi.ObjectMeta{
254 254
 						Namespace: "ns-01",
... ...
@@ -297,9 +300,9 @@ func TestGetClient(t *testing.T) {
297 297
 				RedirectURIs:      []string{"http://anywhere"},
298 298
 				GrantMethod:       oauthapi.GrantHandlerPrompt,
299 299
 			},
300
-			expectedKubeActions: []ktestclient.Action{
301
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
302
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
300
+			expectedKubeActions: []core.Action{
301
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
302
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
303 303
 			},
304 304
 			expectedOSActions: []ktestclient.Action{
305 305
 				ktestclient.NewGetAction("routes", "ns-01", "route1"),
... ...
@@ -308,7 +311,7 @@ func TestGetClient(t *testing.T) {
308 308
 		{
309 309
 			name:       "good SA with routes that don't have hosts, some of which are empty or duplicates",
310 310
 			clientName: "system:serviceaccount:ns-01:default",
311
-			kubeClient: ktestclient.NewSimpleFake(
311
+			kubeClient: fake.NewSimpleClientset(
312 312
 				&kapi.ServiceAccount{
313 313
 					ObjectMeta: kapi.ObjectMeta{
314 314
 						Namespace: "ns-01",
... ...
@@ -383,9 +386,9 @@ func TestGetClient(t *testing.T) {
383 383
 				RedirectURIs:      []string{"http://anywhere", "https://a.com/defaultpath", "https://a.com/path2", "https://b.com/defaultpath", "https://b.com/path2"},
384 384
 				GrantMethod:       oauthapi.GrantHandlerPrompt,
385 385
 			},
386
-			expectedKubeActions: []ktestclient.Action{
387
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
388
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
386
+			expectedKubeActions: []core.Action{
387
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
388
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
389 389
 			},
390 390
 			expectedOSActions: []ktestclient.Action{
391 391
 				ktestclient.NewListAction("routes", "ns-01", kapi.ListOptions{}),
... ...
@@ -394,7 +397,7 @@ func TestGetClient(t *testing.T) {
394 394
 		{
395 395
 			name:       "host overrides route data",
396 396
 			clientName: "system:serviceaccount:ns-01:default",
397
-			kubeClient: ktestclient.NewSimpleFake(
397
+			kubeClient: fake.NewSimpleClientset(
398 398
 				&kapi.ServiceAccount{
399 399
 					ObjectMeta: kapi.ObjectMeta{
400 400
 						Namespace: "ns-01",
... ...
@@ -462,9 +465,9 @@ func TestGetClient(t *testing.T) {
462 462
 				RedirectURIs:      []string{"https://google.com/otherpath", "https://redhat.com/defaultpath"},
463 463
 				GrantMethod:       oauthapi.GrantHandlerPrompt,
464 464
 			},
465
-			expectedKubeActions: []ktestclient.Action{
466
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
467
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
465
+			expectedKubeActions: []core.Action{
466
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
467
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
468 468
 			},
469 469
 			expectedOSActions: []ktestclient.Action{
470 470
 				ktestclient.NewListAction("routes", "ns-01", kapi.ListOptions{}),
... ...
@@ -473,7 +476,7 @@ func TestGetClient(t *testing.T) {
473 473
 		{
474 474
 			name:       "good SA with valid, route redirects using the same route twice",
475 475
 			clientName: "system:serviceaccount:ns-01:default",
476
-			kubeClient: ktestclient.NewSimpleFake(
476
+			kubeClient: fake.NewSimpleClientset(
477 477
 				&kapi.ServiceAccount{
478 478
 					ObjectMeta: kapi.ObjectMeta{
479 479
 						Namespace: "ns-01",
... ...
@@ -523,9 +526,9 @@ func TestGetClient(t *testing.T) {
523 523
 				RedirectURIs:      []string{"https://woot.com/awesomepath", "https://woot.com:8000"},
524 524
 				GrantMethod:       oauthapi.GrantHandlerPrompt,
525 525
 			},
526
-			expectedKubeActions: []ktestclient.Action{
527
-				ktestclient.NewGetAction("serviceaccounts", "ns-01", "default"),
528
-				ktestclient.NewListAction("secrets", "ns-01", kapi.ListOptions{}),
526
+			expectedKubeActions: []core.Action{
527
+				core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, "ns-01", "default"),
528
+				core.NewListAction(unversioned.GroupVersionResource{Resource: "secrets"}, "ns-01", kapi.ListOptions{}),
529 529
 			},
530 530
 			expectedOSActions: []ktestclient.Action{
531 531
 				ktestclient.NewGetAction("routes", "ns-01", "route1"),
... ...
@@ -535,7 +538,7 @@ func TestGetClient(t *testing.T) {
535 535
 
536 536
 	for _, tc := range testCases {
537 537
 		delegate := &fakeDelegate{}
538
-		getter := NewServiceAccountOAuthClientGetter(tc.kubeClient, tc.kubeClient, tc.osClient, delegate, oauthapi.GrantHandlerPrompt)
538
+		getter := NewServiceAccountOAuthClientGetter(tc.kubeClient.Core(), tc.kubeClient.Core(), tc.osClient, delegate, oauthapi.GrantHandlerPrompt)
539 539
 		client, err := getter.GetClient(kapi.NewContext(), tc.clientName)
540 540
 		switch {
541 541
 		case len(tc.expectedErr) == 0 && err == nil:
... ...
@@ -16,7 +16,7 @@ import (
16 16
 	"k8s.io/kubernetes/pkg/apimachinery/registered"
17 17
 	kextapi "k8s.io/kubernetes/pkg/apis/extensions"
18 18
 	"k8s.io/kubernetes/pkg/client/cache"
19
-	kclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
19
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
20 20
 	kextclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
21 21
 	"k8s.io/kubernetes/pkg/controller/framework"
22 22
 	"k8s.io/kubernetes/pkg/fields"
... ...
@@ -67,16 +67,16 @@ func (c *lastFiredCache) AddIfNewer(info types.NamespacedName, newLastFired time
67 67
 type UnidlingController struct {
68 68
 	controller          *framework.Controller
69 69
 	scaleNamespacer     kextclient.ScalesGetter
70
-	endpointsNamespacer kclient.EndpointsGetter
70
+	endpointsNamespacer kcoreclient.EndpointsGetter
71 71
 	queue               workqueue.RateLimitingInterface
72 72
 	lastFiredCache      *lastFiredCache
73 73
 
74 74
 	// TODO: remove these once we get the scale-source functionality in the scale endpoints
75 75
 	dcNamespacer deployclient.DeploymentConfigsGetter
76
-	rcNamespacer kclient.ReplicationControllersGetter
76
+	rcNamespacer kcoreclient.ReplicationControllersGetter
77 77
 }
78 78
 
79
-func NewUnidlingController(scaleNS kextclient.ScalesGetter, endptsNS kclient.EndpointsGetter, evtNS kclient.EventsGetter, dcNamespacer deployclient.DeploymentConfigsGetter, rcNamespacer kclient.ReplicationControllersGetter, resyncPeriod time.Duration) *UnidlingController {
79
+func NewUnidlingController(scaleNS kextclient.ScalesGetter, endptsNS kcoreclient.EndpointsGetter, evtNS kcoreclient.EventsGetter, dcNamespacer deployclient.DeploymentConfigsGetter, rcNamespacer kcoreclient.ReplicationControllersGetter, resyncPeriod time.Duration) *UnidlingController {
80 80
 	fieldSet := fields.Set{}
81 81
 	fieldSet["reason"] = unidlingapi.NeedPodsReason
82 82
 	fieldSelector := fieldSet.AsSelector()
... ...
@@ -1,21 +1,20 @@
1 1
 package util
2 2
 
3 3
 import (
4
-	deployapi "github.com/openshift/origin/pkg/deploy/api"
5
-	deployapiv1 "github.com/openshift/origin/pkg/deploy/api/v1"
6
-	deployclient "github.com/openshift/origin/pkg/deploy/client/clientset_generated/internalclientset/typed/core/unversioned"
7
-	unidlingapi "github.com/openshift/origin/pkg/unidling/api"
4
+	"github.com/golang/glog"
8 5
 
9 6
 	kapi "k8s.io/kubernetes/pkg/api"
10 7
 	kapiv1 "k8s.io/kubernetes/pkg/api/v1"
11 8
 	kextapi "k8s.io/kubernetes/pkg/apis/extensions"
9
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10
+	kextensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
12 11
 	"k8s.io/kubernetes/pkg/runtime"
13 12
 	"k8s.io/kubernetes/pkg/util/strategicpatch"
14 13
 
15
-	kclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
16
-	kextclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
17
-
18
-	"github.com/golang/glog"
14
+	deployapi "github.com/openshift/origin/pkg/deploy/api"
15
+	deployapiv1 "github.com/openshift/origin/pkg/deploy/api/v1"
16
+	deployclient "github.com/openshift/origin/pkg/deploy/client/clientset_generated/internalclientset/typed/core/unversioned"
17
+	unidlingapi "github.com/openshift/origin/pkg/unidling/api"
19 18
 )
20 19
 
21 20
 // TODO: remove the below functions once we get a way to mark/unmark an object as idled
... ...
@@ -23,7 +22,7 @@ import (
23 23
 
24 24
 type AnnotationFunc func(currentReplicas int32, annotations map[string]string)
25 25
 
26
-func NewScaleAnnotater(scales kextclient.ScalesGetter, dcs deployclient.DeploymentConfigsGetter, rcs kclient.ReplicationControllersGetter, changeAnnots AnnotationFunc) *ScaleAnnotater {
26
+func NewScaleAnnotater(scales kextensionsclient.ScalesGetter, dcs deployclient.DeploymentConfigsGetter, rcs kcoreclient.ReplicationControllersGetter, changeAnnots AnnotationFunc) *ScaleAnnotater {
27 27
 	return &ScaleAnnotater{
28 28
 		scales:            scales,
29 29
 		dcs:               dcs,
... ...
@@ -33,9 +32,9 @@ func NewScaleAnnotater(scales kextclient.ScalesGetter, dcs deployclient.Deployme
33 33
 }
34 34
 
35 35
 type ScaleAnnotater struct {
36
-	scales            kextclient.ScalesGetter
36
+	scales            kextensionsclient.ScalesGetter
37 37
 	dcs               deployclient.DeploymentConfigsGetter
38
-	rcs               kclient.ReplicationControllersGetter
38
+	rcs               kcoreclient.ReplicationControllersGetter
39 39
 	ChangeAnnotations AnnotationFunc
40 40
 }
41 41
 
... ...
@@ -49,10 +48,10 @@ type scaleUpdater struct {
49 49
 	encoder   runtime.Encoder
50 50
 	namespace string
51 51
 	dcGetter  deployclient.DeploymentConfigsGetter
52
-	rcGetter  kclient.ReplicationControllersGetter
52
+	rcGetter  kcoreclient.ReplicationControllersGetter
53 53
 }
54 54
 
55
-func NewScaleUpdater(encoder runtime.Encoder, namespace string, dcGetter deployclient.DeploymentConfigsGetter, rcGetter kclient.ReplicationControllersGetter) ScaleUpdater {
55
+func NewScaleUpdater(encoder runtime.Encoder, namespace string, dcGetter deployclient.DeploymentConfigsGetter, rcGetter kcoreclient.ReplicationControllersGetter) ScaleUpdater {
56 56
 	return scaleUpdater{
57 57
 		encoder:   encoder,
58 58
 		namespace: namespace,
... ...
@@ -6,7 +6,7 @@ import (
6 6
 	"github.com/golang/glog"
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8 8
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10 10
 	"k8s.io/kubernetes/pkg/kubectl"
11 11
 
12 12
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -16,7 +16,7 @@ func NewGroupReaper(
16 16
 	groupClient client.GroupsInterface,
17 17
 	clusterBindingClient client.ClusterRoleBindingsInterface,
18 18
 	bindingClient client.RoleBindingsNamespacer,
19
-	sccClient kclient.SecurityContextConstraintsInterface,
19
+	sccClient kcoreclient.SecurityContextConstraintsGetter,
20 20
 ) kubectl.Reaper {
21 21
 	return &GroupReaper{
22 22
 		groupClient:          groupClient,
... ...
@@ -30,7 +30,7 @@ type GroupReaper struct {
30 30
 	groupClient          client.GroupsInterface
31 31
 	clusterBindingClient client.ClusterRoleBindingsInterface
32 32
 	bindingClient        client.RoleBindingsNamespacer
33
-	sccClient            kclient.SecurityContextConstraintsInterface
33
+	sccClient            kcoreclient.SecurityContextConstraintsGetter
34 34
 }
35 35
 
36 36
 // Stop on a reaper is actually used for deletion.  In this case, we'll delete referencing identities, clusterBindings, and bindings,
... ...
@@ -5,6 +5,9 @@ import (
5 5
 	"testing"
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8
+	"k8s.io/kubernetes/pkg/api/unversioned"
9
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
10
+	"k8s.io/kubernetes/pkg/client/testing/core"
8 11
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
9 12
 	"k8s.io/kubernetes/pkg/runtime"
10 13
 
... ...
@@ -108,7 +111,7 @@ func TestGroupReaper(t *testing.T) {
108 108
 				},
109 109
 			},
110 110
 			expected: []interface{}{
111
-				ktestclient.UpdateActionImpl{ActionImpl: ktestclient.ActionImpl{Verb: "update", Resource: "securitycontextconstraints"}, Object: &kapi.SecurityContextConstraints{
111
+				core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "securitycontextconstraints"}}, Object: &kapi.SecurityContextConstraints{
112 112
 					ObjectMeta: kapi.ObjectMeta{Name: "scc-one-subject"},
113 113
 					Groups:     []string{},
114 114
 				}},
... ...
@@ -119,20 +122,24 @@ func TestGroupReaper(t *testing.T) {
119 119
 
120 120
 	for _, test := range tests {
121 121
 		tc := testclient.NewSimpleFake(test.objects...)
122
-		ktc := ktestclient.NewSimpleFake(test.objects...)
122
+		ktc := fake.NewSimpleClientset(test.objects...)
123 123
 
124 124
 		actual := []interface{}{}
125
-		reactor := func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
125
+		oreactor := func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
126
+			actual = append(actual, action)
127
+			return false, nil, nil
128
+		}
129
+		kreactor := func(action core.Action) (handled bool, ret runtime.Object, err error) {
126 130
 			actual = append(actual, action)
127 131
 			return false, nil, nil
128 132
 		}
129 133
 
130
-		tc.PrependReactor("update", "*", reactor)
131
-		tc.PrependReactor("delete", "*", reactor)
132
-		ktc.PrependReactor("update", "*", reactor)
133
-		ktc.PrependReactor("delete", "*", reactor)
134
+		tc.PrependReactor("update", "*", oreactor)
135
+		tc.PrependReactor("delete", "*", oreactor)
136
+		ktc.PrependReactor("update", "*", kreactor)
137
+		ktc.PrependReactor("delete", "*", kreactor)
134 138
 
135
-		reaper := NewGroupReaper(tc, tc, tc, ktc)
139
+		reaper := NewGroupReaper(tc, tc, tc, ktc.Core())
136 140
 		err := reaper.Stop("", test.group, 0, nil)
137 141
 		if err != nil {
138 142
 			t.Errorf("%s: unexpected error: %v", test.name, err)
... ...
@@ -6,7 +6,7 @@ import (
6 6
 	"github.com/golang/glog"
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8 8
 	kerrors "k8s.io/kubernetes/pkg/api/errors"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
9
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
10 10
 	"k8s.io/kubernetes/pkg/kubectl"
11 11
 
12 12
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -18,7 +18,7 @@ func NewUserReaper(
18 18
 	clusterBindingClient client.ClusterRoleBindingsInterface,
19 19
 	bindingClient client.RoleBindingsNamespacer,
20 20
 	authorizationsClient client.OAuthClientAuthorizationsInterface,
21
-	sccClient kclient.SecurityContextConstraintsInterface,
21
+	sccClient kcoreclient.SecurityContextConstraintsGetter,
22 22
 ) kubectl.Reaper {
23 23
 	return &UserReaper{
24 24
 		userClient:           userClient,
... ...
@@ -36,7 +36,7 @@ type UserReaper struct {
36 36
 	clusterBindingClient client.ClusterRoleBindingsInterface
37 37
 	bindingClient        client.RoleBindingsNamespacer
38 38
 	authorizationsClient client.OAuthClientAuthorizationsInterface
39
-	sccClient            kclient.SecurityContextConstraintsInterface
39
+	sccClient            kcoreclient.SecurityContextConstraintsGetter
40 40
 }
41 41
 
42 42
 // Stop on a reaper is actually used for deletion.  In this case, we'll delete referencing identities, clusterBindings, and bindings,
... ...
@@ -5,6 +5,9 @@ import (
5 5
 	"testing"
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8
+	"k8s.io/kubernetes/pkg/api/unversioned"
9
+	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
10
+	"k8s.io/kubernetes/pkg/client/testing/core"
8 11
 	ktestclient "k8s.io/kubernetes/pkg/client/unversioned/testclient"
9 12
 	"k8s.io/kubernetes/pkg/runtime"
10 13
 
... ...
@@ -108,7 +111,7 @@ func TestUserReaper(t *testing.T) {
108 108
 				},
109 109
 			},
110 110
 			expected: []interface{}{
111
-				ktestclient.UpdateActionImpl{ActionImpl: ktestclient.ActionImpl{Verb: "update", Resource: "securitycontextconstraints"}, Object: &kapi.SecurityContextConstraints{
111
+				core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "securitycontextconstraints"}}, Object: &kapi.SecurityContextConstraints{
112 112
 					ObjectMeta: kapi.ObjectMeta{Name: "scc-one-subject"},
113 113
 					Users:      []string{},
114 114
 				}},
... ...
@@ -204,20 +207,24 @@ func TestUserReaper(t *testing.T) {
204 204
 
205 205
 	for _, test := range tests {
206 206
 		tc := testclient.NewSimpleFake(test.objects...)
207
-		ktc := ktestclient.NewSimpleFake(test.objects...)
207
+		ktc := fake.NewSimpleClientset(test.objects...)
208 208
 
209 209
 		actual := []interface{}{}
210
-		reactor := func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
210
+		oreactor := func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
211
+			actual = append(actual, action)
212
+			return false, nil, nil
213
+		}
214
+		kreactor := func(action core.Action) (handled bool, ret runtime.Object, err error) {
211 215
 			actual = append(actual, action)
212 216
 			return false, nil, nil
213 217
 		}
214 218
 
215
-		tc.PrependReactor("update", "*", reactor)
216
-		tc.PrependReactor("delete", "*", reactor)
217
-		ktc.PrependReactor("update", "*", reactor)
218
-		ktc.PrependReactor("delete", "*", reactor)
219
+		tc.PrependReactor("update", "*", oreactor)
220
+		tc.PrependReactor("delete", "*", oreactor)
221
+		ktc.PrependReactor("update", "*", kreactor)
222
+		ktc.PrependReactor("delete", "*", kreactor)
219 223
 
220
-		reaper := NewUserReaper(tc, tc, tc, tc, tc, ktc)
224
+		reaper := NewUserReaper(tc, tc, tc, tc, tc, ktc.Core())
221 225
 		err := reaper.Stop("", test.user, 0, nil)
222 226
 		if err != nil {
223 227
 			t.Errorf("%s: unexpected error: %v", test.name, err)
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"time"
7 7
 
8 8
 	kapi "k8s.io/kubernetes/pkg/api"
9
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
9 10
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10 11
 	"k8s.io/kubernetes/pkg/fields"
11 12
 	watchapi "k8s.io/kubernetes/pkg/watch"
... ...
@@ -82,7 +83,7 @@ func mockBuild() *buildapi.Build {
82 82
 	}
83 83
 }
84 84
 
85
-func RunBuildControllerTest(t testingT, osClient *client.Client, kClient *kclient.Client) {
85
+func RunBuildControllerTest(t testingT, osClient *client.Client, kClientset *kclientset.Clientset) {
86 86
 	// Setup an error channel
87 87
 	errChan := make(chan error) // go routines will send a message on this channel if an error occurs. Once this happens the test is over
88 88
 
... ...
@@ -120,7 +121,7 @@ func RunBuildControllerTest(t testingT, osClient *client.Client, kClient *kclien
120 120
 	}()
121 121
 
122 122
 	// Watch build pods as they are created
123
-	podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))})
123
+	podWatch, err := kClientset.Core().Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))})
124 124
 	if err != nil {
125 125
 		t.Fatal(err)
126 126
 	}
... ...
@@ -158,7 +159,7 @@ type buildControllerPodTest struct {
158 158
 	States []buildControllerPodState
159 159
 }
160 160
 
161
-func RunBuildPodControllerTest(t testingT, osClient *client.Client, kClient *kclient.Client) {
161
+func RunBuildPodControllerTest(t testingT, osClient *client.Client, kClient *kclientset.Clientset) {
162 162
 	ns := testutil.Namespace()
163 163
 	waitTime := BuildPodControllerTestWait
164 164
 
... ...
@@ -501,7 +502,7 @@ WaitLoop3:
501 501
 	}
502 502
 }
503 503
 
504
-func RunBuildDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
504
+func RunBuildDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClientset *kclientset.Clientset) {
505 505
 
506 506
 	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
507 507
 	if err != nil {
... ...
@@ -514,7 +515,7 @@ func RunBuildDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAd
514 514
 		t.Fatalf("Couldn't create Build: %v", err)
515 515
 	}
516 516
 
517
-	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
517
+	podWatch, err := clusterAdminKubeClientset.Core().Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
518 518
 	if err != nil {
519 519
 		t.Fatalf("Couldn't subscribe to Pods %v", err)
520 520
 	}
... ...
@@ -566,7 +567,7 @@ func waitForWatchType(t testingT, name string, w watchapi.Interface, expect watc
566 566
 	return nil
567 567
 }
568 568
 
569
-func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
569
+func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClientset *kclientset.Clientset) {
570 570
 
571 571
 	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
572 572
 	if err != nil {
... ...
@@ -579,7 +580,7 @@ func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client,
579 579
 		t.Fatalf("Couldn't create Build: %v", err)
580 580
 	}
581 581
 
582
-	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
582
+	podWatch, err := clusterAdminKubeClientset.Core().Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
583 583
 	if err != nil {
584 584
 		t.Fatalf("Couldn't subscribe to Pods %v", err)
585 585
 	}
... ...
@@ -622,7 +623,7 @@ func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client,
622 622
 		t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase)
623 623
 	}
624 624
 
625
-	clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
625
+	clusterAdminKubeClientset.Core().Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
626 626
 	event = waitForWatch(t, "build updated to error", buildWatch)
627 627
 	if e, a := watchapi.Modified, event.Type; e != a {
628 628
 		t.Fatalf("expected watch event type %s, got %s", e, a)
... ...
@@ -633,7 +634,7 @@ func RunBuildRunningPodDeleteTest(t testingT, clusterAdminClient *client.Client,
633 633
 	}
634 634
 }
635 635
 
636
-func RunBuildCompletePodDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
636
+func RunBuildCompletePodDeleteTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClientset *kclientset.Clientset) {
637 637
 
638 638
 	buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
639 639
 	if err != nil {
... ...
@@ -646,7 +647,7 @@ func RunBuildCompletePodDeleteTest(t testingT, clusterAdminClient *client.Client
646 646
 		t.Fatalf("Couldn't create Build: %v", err)
647 647
 	}
648 648
 
649
-	podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
649
+	podWatch, err := clusterAdminKubeClientset.Core().Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion})
650 650
 	if err != nil {
651 651
 		t.Fatalf("Couldn't subscribe to Pods %v", err)
652 652
 	}
... ...
@@ -686,7 +687,7 @@ func RunBuildCompletePodDeleteTest(t testingT, clusterAdminClient *client.Client
686 686
 		t.Fatalf("expected build status to be marked complete, but was marked %s", newBuild.Status.Phase)
687 687
 	}
688 688
 
689
-	clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
689
+	clusterAdminKubeClientset.Core().Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0))
690 690
 	time.Sleep(10 * time.Second)
691 691
 	newBuild, err = clusterAdminClient.Builds(testutil.Namespace()).Get(newBuild.Name)
692 692
 	if err != nil {
... ...
@@ -697,7 +698,7 @@ func RunBuildCompletePodDeleteTest(t testingT, clusterAdminClient *client.Client
697 697
 	}
698 698
 }
699 699
 
700
-func RunBuildConfigChangeControllerTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) {
700
+func RunBuildConfigChangeControllerTest(t testingT, clusterAdminClient *client.Client, clusterAdminKubeClientset *kclientset.Clientset) {
701 701
 	config := configChangeBuildConfig()
702 702
 	created, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(config)
703 703
 	if err != nil {
... ...
@@ -105,7 +105,7 @@ Common functions for extended tests are located in `./hack/util.sh`. Environment
105 105
 CLI interface
106 106
 -------------
107 107
 
108
-In order to be able to call the OpenShift CLI and Kubernetes and OpenShift REST clients and simulate the OpenShift `oc` command in the test suite, first we need to create an instance of the CLI, in the top-level Ginkgo describe container.
108
+In order to be able to call the OpenShift CLI and Kubernetes and OpenShift clients and simulate the OpenShift `oc` command in the test suite, first we need to create an instance of the CLI, in the top-level Ginkgo describe container.
109 109
 The top-level describe container should also specify the bucket into which the test belongs and a short test description. Other globally accessible variables (eg. fixtures) can be declared as well.
110 110
 
111 111
 ```go
... ...
@@ -20,7 +20,7 @@ var _ = g.Describe("[builds][Slow] builds should have deadlines", func() {
20 20
 
21 21
 	g.JustBeforeEach(func() {
22 22
 		g.By("waiting for builder service account")
23
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
23
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
24 24
 		o.Expect(err).NotTo(o.HaveOccurred())
25 25
 	})
26 26
 
... ...
@@ -40,7 +40,7 @@ var _ = g.Describe("[builds][Slow] builds should have deadlines", func() {
40 40
 			o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) // the build should have failed
41 41
 
42 42
 			g.By("verifying the build pod status")
43
-			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build))
43
+			pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build))
44 44
 			o.Expect(err).NotTo(o.HaveOccurred())
45 45
 			o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(kapi.PodFailed))
46 46
 			o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded"))
... ...
@@ -64,7 +64,7 @@ var _ = g.Describe("[builds][Slow] builds should have deadlines", func() {
64 64
 			o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) // the build should have failed
65 65
 
66 66
 			g.By("verifying the build pod status")
67
-			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build))
67
+			pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build))
68 68
 			o.Expect(err).NotTo(o.HaveOccurred())
69 69
 			o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(kapi.PodFailed))
70 70
 			o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded"))
... ...
@@ -21,32 +21,32 @@ var _ = g.Describe("[bldcompat][Slow][Compatibility] build controller", func() {
21 21
 
22 22
 	g.Describe("RunBuildControllerTest", func() {
23 23
 		g.It("should succeed", func() {
24
-			build.RunBuildControllerTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
24
+			build.RunBuildControllerTest(g.GinkgoT(), oc.AdminClient(), oc.AdminKubeClient())
25 25
 		})
26 26
 	})
27 27
 	g.Describe("RunBuildPodControllerTest", func() {
28 28
 		g.It("should succeed", func() {
29
-			build.RunBuildPodControllerTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
29
+			build.RunBuildPodControllerTest(g.GinkgoT(), oc.AdminClient(), oc.AdminKubeClient())
30 30
 		})
31 31
 	})
32 32
 	g.Describe("RunImageChangeTriggerTest [SkipPrevControllers]", func() {
33 33
 		g.It("should succeed", func() {
34
-			build.RunImageChangeTriggerTest(g.GinkgoT(), oc.AdminREST())
34
+			build.RunImageChangeTriggerTest(g.GinkgoT(), oc.AdminClient())
35 35
 		})
36 36
 	})
37 37
 	g.Describe("RunBuildDeleteTest", func() {
38 38
 		g.It("should succeed", func() {
39
-			build.RunBuildDeleteTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
39
+			build.RunBuildDeleteTest(g.GinkgoT(), oc.AdminClient(), oc.AdminKubeClient())
40 40
 		})
41 41
 	})
42 42
 	g.Describe("RunBuildRunningPodDeleteTest", func() {
43 43
 		g.It("should succeed", func() {
44
-			build.RunBuildRunningPodDeleteTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
44
+			build.RunBuildRunningPodDeleteTest(g.GinkgoT(), oc.AdminClient(), oc.AdminKubeClient())
45 45
 		})
46 46
 	})
47 47
 	g.Describe("RunBuildConfigChangeControllerTest", func() {
48 48
 		g.It("should succeed", func() {
49
-			build.RunBuildConfigChangeControllerTest(g.GinkgoT(), oc.AdminREST(), oc.AdminKubeREST())
49
+			build.RunBuildConfigChangeControllerTest(g.GinkgoT(), oc.AdminClient(), oc.AdminKubeClient())
50 50
 		})
51 51
 	})
52 52
 })
... ...
@@ -23,7 +23,7 @@ var _ = g.Describe("[builds][pullsecret][Conformance] docker build using a pull
23 23
 
24 24
 	g.JustBeforeEach(func() {
25 25
 		g.By("waiting for builder service account")
26
-		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
26
+		err := exutil.WaitForBuilderAccount(oc.AdminKubeClient().Core().ServiceAccounts(oc.Namespace()))
27 27
 		o.Expect(err).NotTo(o.HaveOccurred())
28 28
 	})
29 29
 
... ...
@@ -23,7 +23,7 @@ var _ = g.Describe("[builds][quota][Slow] docker build with a quota", func() {
23 23
 
24 24
 	g.JustBeforeEach(func() {
25 25
 		g.By("waiting for builder service account")
26
-		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
26
+		err := exutil.WaitForBuilderAccount(oc.AdminKubeClient().Core().ServiceAccounts(oc.Namespace()))
27 27
 		o.Expect(err).NotTo(o.HaveOccurred())
28 28
 	})
29 29
 
... ...
@@ -26,7 +26,7 @@ USER 1001
26 26
 
27 27
 	g.JustBeforeEach(func() {
28 28
 		g.By("waiting for builder service account")
29
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
29
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
30 30
 		o.Expect(err).NotTo(o.HaveOccurred())
31 31
 		oc.SetOutputDir(exutil.TestContext.OutputDir)
32 32
 	})
... ...
@@ -38,7 +38,7 @@ USER 1001
38 38
 			o.Expect(err).NotTo(o.HaveOccurred())
39 39
 
40 40
 			g.By("starting a test build")
41
-			bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("jenkins")
41
+			bc, err := oc.Client().BuildConfigs(oc.Namespace()).Get("jenkins")
42 42
 			o.Expect(err).NotTo(o.HaveOccurred())
43 43
 			o.Expect(bc.Spec.Source.Git).To(o.BeNil())
44 44
 			o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil())
... ...
@@ -46,7 +46,7 @@ USER 1001
46 46
 
47 47
 			buildName := "jenkins-1"
48 48
 			g.By("expecting the Dockerfile build is in Complete phase")
49
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
49
+			err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
50 50
 			//debug for failures on jenkins
51 51
 			if err != nil {
52 52
 				exutil.DumpBuildLogs("jenkins", oc)
... ...
@@ -54,7 +54,7 @@ USER 1001
54 54
 			o.Expect(err).NotTo(o.HaveOccurred())
55 55
 
56 56
 			g.By("getting the build Docker image reference from ImageStream")
57
-			image, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("jenkins", "custom")
57
+			image, err := oc.Client().ImageStreamTags(oc.Namespace()).Get("jenkins", "custom")
58 58
 			o.Expect(err).NotTo(o.HaveOccurred())
59 59
 			o.Expect(image.Image.DockerImageMetadata.Config.User).To(o.Equal("1001"))
60 60
 		})
... ...
@@ -65,7 +65,7 @@ USER 1001
65 65
 			o.Expect(err).NotTo(o.HaveOccurred())
66 66
 
67 67
 			g.By("starting a test build")
68
-			bc, err := oc.REST().BuildConfigs(oc.Namespace()).Get("centos")
68
+			bc, err := oc.Client().BuildConfigs(oc.Namespace()).Get("centos")
69 69
 			o.Expect(err).NotTo(o.HaveOccurred())
70 70
 			o.Expect(bc.Spec.Source.Git).To(o.BeNil())
71 71
 			o.Expect(bc.Spec.Source.Dockerfile).NotTo(o.BeNil())
... ...
@@ -75,7 +75,7 @@ USER 1001
75 75
 
76 76
 			buildName := "centos-1"
77 77
 			g.By("expecting the Dockerfile build is in Complete phase")
78
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
78
+			err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
79 79
 			//debug for failures on jenkins
80 80
 			if err != nil {
81 81
 				exutil.DumpBuildLogs("centos", oc)
... ...
@@ -83,12 +83,12 @@ USER 1001
83 83
 			o.Expect(err).NotTo(o.HaveOccurred())
84 84
 
85 85
 			g.By("getting the built Docker image reference from ImageStream")
86
-			image, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("centos", "latest")
86
+			image, err := oc.Client().ImageStreamTags(oc.Namespace()).Get("centos", "latest")
87 87
 			o.Expect(err).NotTo(o.HaveOccurred())
88 88
 			o.Expect(image.Image.DockerImageMetadata.Config.User).To(o.Equal("1001"))
89 89
 
90 90
 			g.By("checking for the imported tag")
91
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("centos", "7")
91
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("centos", "7")
92 92
 			o.Expect(err).NotTo(o.HaveOccurred())
93 93
 		})
94 94
 	})
... ...
@@ -148,7 +148,7 @@ var _ = g.Describe("[LocalNode][builds] forcePull should affect pulling builder
148 148
 
149 149
 		g.JustBeforeEach(func() {
150 150
 			g.By("waiting for builder service account")
151
-			err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
151
+			err := exutil.WaitForBuilderAccount(oc.AdminKubeClient().Core().ServiceAccounts(oc.Namespace()))
152 152
 			o.Expect(err).NotTo(o.HaveOccurred())
153 153
 		})
154 154
 
... ...
@@ -48,7 +48,7 @@ var _ = g.Describe("[builds][Slow] can use private repositories as build input",
48 48
 
49 49
 	g.JustBeforeEach(func() {
50 50
 		g.By("waiting for builder service account")
51
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
51
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
52 52
 		o.Expect(err).NotTo(o.HaveOccurred())
53 53
 	})
54 54
 
... ...
@@ -69,7 +69,7 @@ var _ = g.Describe("[builds][Slow] can use private repositories as build input",
69 69
 		o.Expect(err).NotTo(o.HaveOccurred())
70 70
 
71 71
 		g.By("expecting the deployment of the gitserver to be in the Complete phase")
72
-		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), gitServerDeploymentConfigName)
72
+		err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), gitServerDeploymentConfigName)
73 73
 		o.Expect(err).NotTo(o.HaveOccurred())
74 74
 
75 75
 		sourceSecretName := secretFunc()
... ...
@@ -86,7 +86,7 @@ var _ = g.Describe("[builds][Slow] can use private repositories as build input",
86 86
 		o.Expect(err).NotTo(o.HaveOccurred())
87 87
 
88 88
 		g.By(fmt.Sprintf("expecting build %s to complete successfully", buildName))
89
-		err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
89
+		err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
90 90
 		if err != nil {
91 91
 			exutil.DumpBuildLogs(buildConfigName, oc)
92 92
 		}
... ...
@@ -118,7 +118,7 @@ var _ = g.Describe("[builds][Slow] can use private repositories as build input",
118 118
 				o.Expect(err).NotTo(o.HaveOccurred())
119 119
 
120 120
 				g.By("getting the token secret name for the builder service account")
121
-				sa, err := oc.KubeREST().ServiceAccounts(oc.Namespace()).Get("builder")
121
+				sa, err := oc.KubeClient().Core().ServiceAccounts(oc.Namespace()).Get("builder")
122 122
 				o.Expect(err).NotTo(o.HaveOccurred())
123 123
 				for _, s := range sa.Secrets {
124 124
 					if strings.Contains(s.Name, "token") {
... ...
@@ -20,11 +20,11 @@ var _ = g.Describe("[builds][Slow] build can have Docker image source", func() {
20 20
 
21 21
 	g.JustBeforeEach(func() {
22 22
 		g.By("waiting for builder service account")
23
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
23
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
24 24
 		o.Expect(err).NotTo(o.HaveOccurred())
25 25
 
26 26
 		g.By("waiting for imagestreams to be imported")
27
-		err = exutil.WaitForAnImageStream(oc.AdminREST().ImageStreams("openshift"), "jenkins", exutil.CheckImageStreamLatestTagPopulatedFn, exutil.CheckImageStreamTagNotFoundFn)
27
+		err = exutil.WaitForAnImageStream(oc.AdminClient().ImageStreams("openshift"), "jenkins", exutil.CheckImageStreamLatestTagPopulatedFn, exutil.CheckImageStreamTagNotFoundFn)
28 28
 		o.Expect(err).NotTo(o.HaveOccurred())
29 29
 	})
30 30
 
... ...
@@ -39,10 +39,10 @@ var _ = g.Describe("[builds][Slow] build can have Docker image source", func() {
39 39
 			br.AssertSuccess()
40 40
 
41 41
 			g.By("expecting the pod to deploy successfully")
42
-			pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
42
+			pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
43 43
 			o.Expect(err).NotTo(o.HaveOccurred())
44 44
 			o.Expect(len(pods)).To(o.Equal(1))
45
-			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
45
+			pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0])
46 46
 			o.Expect(err).NotTo(o.HaveOccurred())
47 47
 
48 48
 			g.By("expecting the pod to contain the file from the input image")
... ...
@@ -62,10 +62,10 @@ var _ = g.Describe("[builds][Slow] build can have Docker image source", func() {
62 62
 			br.AssertSuccess()
63 63
 
64 64
 			g.By("expect the pod to deploy successfully")
65
-			pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), imageDockerLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
65
+			pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), imageDockerLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
66 66
 			o.Expect(err).NotTo(o.HaveOccurred())
67 67
 			o.Expect(len(pods)).To(o.Equal(1))
68
-			pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
68
+			pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0])
69 69
 			o.Expect(err).NotTo(o.HaveOccurred())
70 70
 
71 71
 			g.By("expecting the pod to contain the file from the input image")
... ...
@@ -21,7 +21,7 @@ var _ = g.Describe("[builds][Slow] result image should have proper labels set",
21 21
 
22 22
 	g.JustBeforeEach(func() {
23 23
 		g.By("waiting for builder service account")
24
-		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
24
+		err := exutil.WaitForBuilderAccount(oc.AdminKubeClient().Core().ServiceAccounts(oc.Namespace()))
25 25
 		o.Expect(err).NotTo(o.HaveOccurred())
26 26
 	})
27 27
 
... ...
@@ -42,10 +42,10 @@ var _ = g.Describe("[builds][Slow] result image should have proper labels set",
42 42
 			br.AssertSuccess()
43 43
 
44 44
 			g.By("getting the Docker image reference from ImageStream")
45
-			imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
45
+			imageRef, err := exutil.GetDockerImageReference(oc.Client().ImageStreams(oc.Namespace()), "test", "latest")
46 46
 			o.Expect(err).NotTo(o.HaveOccurred())
47 47
 
48
-			imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef)
48
+			imageLabels, err := eximages.GetImageLabels(oc.Client().ImageStreamImages(oc.Namespace()), "test", imageRef)
49 49
 			o.Expect(err).NotTo(o.HaveOccurred())
50 50
 
51 51
 			g.By("inspecting the new image for proper Docker labels")
... ...
@@ -71,10 +71,10 @@ var _ = g.Describe("[builds][Slow] result image should have proper labels set",
71 71
 			br.AssertSuccess()
72 72
 
73 73
 			g.By("getting the Docker image reference from ImageStream")
74
-			imageRef, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
74
+			imageRef, err := exutil.GetDockerImageReference(oc.Client().ImageStreams(oc.Namespace()), "test", "latest")
75 75
 			o.Expect(err).NotTo(o.HaveOccurred())
76 76
 
77
-			imageLabels, err := eximages.GetImageLabels(oc.REST().ImageStreamImages(oc.Namespace()), "test", imageRef)
77
+			imageLabels, err := eximages.GetImageLabels(oc.Client().ImageStreamImages(oc.Namespace()), "test", imageRef)
78 78
 			o.Expect(err).NotTo(o.HaveOccurred())
79 79
 
80 80
 			g.By("inspecting the new image for proper Docker labels")
... ...
@@ -19,7 +19,7 @@ var _ = g.Describe("[builds] build with empty source", func() {
19 19
 
20 20
 	g.JustBeforeEach(func() {
21 21
 		g.By("waiting for builder service account")
22
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
22
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
23 23
 		o.Expect(err).NotTo(o.HaveOccurred())
24 24
 		oc.Run("create").Args("-f", buildFixture).Execute()
25 25
 	})
... ...
@@ -31,7 +31,7 @@ var _ = g.Describe("[builds] build with empty source", func() {
31 31
 			br.AssertSuccess()
32 32
 
33 33
 			g.By(fmt.Sprintf("verifying the status of %q", br.BuildPath))
34
-			build, err := oc.REST().Builds(oc.Namespace()).Get(br.Build.Name)
34
+			build, err := oc.Client().Builds(oc.Namespace()).Get(br.Build.Name)
35 35
 			o.Expect(err).NotTo(o.HaveOccurred())
36 36
 			o.Expect(build.Spec.Source.Dockerfile).To(o.BeNil())
37 37
 			o.Expect(build.Spec.Source.Git).To(o.BeNil())
... ...
@@ -18,7 +18,7 @@ var _ = g.Describe("[builds][Slow] openshift pipeline build", func() {
18 18
 	)
19 19
 	g.JustBeforeEach(func() {
20 20
 		g.By("waiting for builder service account")
21
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
21
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
22 22
 		o.Expect(err).NotTo(o.HaveOccurred())
23 23
 	})
24 24
 	g.Context("Manual deploy the jenkins and trigger a jenkins pipeline build", func() {
... ...
@@ -31,7 +31,7 @@ var _ = g.Describe("[builds][Slow] openshift pipeline build", func() {
31 31
 
32 32
 			//wait for the jenkins deployment complete
33 33
 			g.By("waiting the jenkins service deployed")
34
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc)
34
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "jenkins", oc)
35 35
 			if err != nil {
36 36
 				exutil.DumpDeploymentLogs("jenkins", oc)
37 37
 			}
... ...
@@ -20,7 +20,7 @@ var _ = g.Describe("[builds][Slow] the s2i build should support proxies", func()
20 20
 
21 21
 	g.JustBeforeEach(func() {
22 22
 		g.By("waiting for builder service account")
23
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
23
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
24 24
 		o.Expect(err).NotTo(o.HaveOccurred())
25 25
 		oc.Run("create").Args("-f", buildFixture).Execute()
26 26
 	})
... ...
@@ -20,7 +20,7 @@ var _ = g.Describe("[builds][Conformance] remove all builds when build configura
20 20
 
21 21
 	g.JustBeforeEach(func() {
22 22
 		g.By("waiting for builder service account")
23
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
23
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
24 24
 		o.Expect(err).NotTo(o.HaveOccurred())
25 25
 		oc.Run("create").Args("-f", buildFixture).Execute()
26 26
 	})
... ...
@@ -18,7 +18,7 @@ var _ = g.Describe("[builds] build have source revision metadata", func() {
18 18
 
19 19
 	g.JustBeforeEach(func() {
20 20
 		g.By("waiting for builder service account")
21
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
21
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
22 22
 		o.Expect(err).NotTo(o.HaveOccurred())
23 23
 		oc.Run("create").Args("-f", buildFixture).Execute()
24 24
 	})
... ...
@@ -30,7 +30,7 @@ var _ = g.Describe("[builds] build have source revision metadata", func() {
30 30
 			br.AssertSuccess()
31 31
 
32 32
 			g.By(fmt.Sprintf("verifying the status of %q", br.BuildPath))
33
-			build, err := oc.REST().Builds(oc.Namespace()).Get(br.Build.Name)
33
+			build, err := oc.Client().Builds(oc.Namespace()).Get(br.Build.Name)
34 34
 			o.Expect(err).NotTo(o.HaveOccurred())
35 35
 			o.Expect(build.Spec.Revision).NotTo(o.BeNil())
36 36
 			o.Expect(build.Spec.Revision.Git).NotTo(o.BeNil())
... ...
@@ -24,7 +24,7 @@ var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func()
24 24
 
25 25
 	g.JustBeforeEach(func() {
26 26
 		g.By("waiting for builder service account")
27
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
27
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
28 28
 		o.Expect(err).NotTo(o.HaveOccurred())
29 29
 		// Create all fixtures
30 30
 		oc.Run("create").Args("-f", exutil.FixturePath("testdata", "run_policy")).Execute()
... ...
@@ -39,7 +39,7 @@ var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func()
39 39
 			)
40 40
 			bcName := "sample-parallel-build"
41 41
 
42
-			buildWatch, err := oc.REST().Builds(oc.Namespace()).Watch(kapi.ListOptions{
42
+			buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{
43 43
 				LabelSelector: buildutil.BuildConfigSelector(bcName),
44 44
 			})
45 45
 			defer buildWatch.Stop()
... ...
@@ -86,7 +86,7 @@ var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func()
86 86
 				// TODO: This might introduce flakes in case the first build complete
87 87
 				// sooner or fail.
88 88
 				if build.Status.Phase == buildapi.BuildPhasePending {
89
-					c := buildclient.NewOSClientBuildClient(oc.REST())
89
+					c := buildclient.NewOSClientBuildClient(oc.Client())
90 90
 					firstBuildRunning := false
91 91
 					_, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool {
92 92
 						if b.Name == startedBuilds[0] && b.Status.Phase == buildapi.BuildPhaseRunning {
... ...
@@ -125,7 +125,7 @@ var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func()
125 125
 				startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1]))
126 126
 			}
127 127
 
128
-			buildWatch, err := oc.REST().Builds(oc.Namespace()).Watch(kapi.ListOptions{
128
+			buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{
129 129
 				LabelSelector: buildutil.BuildConfigSelector(bcName),
130 130
 			})
131 131
 			defer buildWatch.Stop()
... ...
@@ -142,7 +142,7 @@ var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func()
142 142
 					}
143 143
 					// Verify there are no other running or pending builds than this
144 144
 					// build as serial build always runs alone.
145
-					c := buildclient.NewOSClientBuildClient(oc.REST())
145
+					c := buildclient.NewOSClientBuildClient(oc.Client())
146 146
 					builds, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool {
147 147
 						if b.Name == build.Name {
148 148
 							return false
... ...
@@ -179,7 +179,7 @@ var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func()
179 179
 
180 180
 			bcName := "sample-serial-latest-only-build"
181 181
 			buildVerified := map[string]bool{}
182
-			buildWatch, err := oc.REST().Builds(oc.Namespace()).Watch(kapi.ListOptions{
182
+			buildWatch, err := oc.Client().Builds(oc.Namespace()).Watch(kapi.ListOptions{
183 183
 				LabelSelector: buildutil.BuildConfigSelector(bcName),
184 184
 			})
185 185
 			defer buildWatch.Stop()
... ...
@@ -233,7 +233,7 @@ var _ = g.Describe("[builds][Slow] using build configuration runPolicy", func()
233 233
 					}
234 234
 					// Verify there are no other running or pending builds than this
235 235
 					// build as serial build always runs alone.
236
-					c := buildclient.NewOSClientBuildClient(oc.REST())
236
+					c := buildclient.NewOSClientBuildClient(oc.Client())
237 237
 					builds, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b buildapi.Build) bool {
238 238
 						e2e.Logf("[%s] build %s is %s", build.Name, b.Name, b.Status.Phase)
239 239
 						if b.Name == build.Name {
... ...
@@ -19,7 +19,7 @@ var _ = g.Describe("[builds][Slow] Capabilities should be dropped for s2i builde
19 19
 
20 20
 	g.JustBeforeEach(func() {
21 21
 		g.By("waiting for builder service account")
22
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
22
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
23 23
 		o.Expect(err).NotTo(o.HaveOccurred())
24 24
 	})
25 25
 
... ...
@@ -28,7 +28,7 @@ var _ = g.Describe("[builds][Slow] s2i build with environment file in sources",
28 28
 
29 29
 	g.JustBeforeEach(func() {
30 30
 		g.By("waiting for builder service account")
31
-		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
31
+		err := exutil.WaitForBuilderAccount(oc.AdminKubeClient().Core().ServiceAccounts(oc.Namespace()))
32 32
 		o.Expect(err).NotTo(o.HaveOccurred())
33 33
 	})
34 34
 
... ...
@@ -49,7 +49,7 @@ var _ = g.Describe("[builds][Slow] s2i build with environment file in sources",
49 49
 			br.AssertSuccess()
50 50
 
51 51
 			g.By("getting the Docker image reference from ImageStream")
52
-			imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
52
+			imageName, err := exutil.GetDockerImageReference(oc.Client().ImageStreams(oc.Namespace()), "test", "latest")
53 53
 			o.Expect(err).NotTo(o.HaveOccurred())
54 54
 
55 55
 			g.By("instantiating a pod and service with the new image")
... ...
@@ -24,7 +24,7 @@ var _ = g.Describe("[builds][Slow] s2i extended build", func() {
24 24
 
25 25
 	g.JustBeforeEach(func() {
26 26
 		g.By("waiting for builder service account")
27
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
27
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
28 28
 		o.Expect(err).NotTo(o.HaveOccurred())
29 29
 
30 30
 		// we have to wait until image stream tag will be available, otherwise
... ...
@@ -27,7 +27,7 @@ var _ = g.Describe("[builds][Slow] incremental s2i build", func() {
27 27
 
28 28
 	g.JustBeforeEach(func() {
29 29
 		g.By("waiting for builder service account")
30
-		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
30
+		err := exutil.WaitForBuilderAccount(oc.AdminKubeClient().Core().ServiceAccounts(oc.Namespace()))
31 31
 		o.Expect(err).NotTo(o.HaveOccurred())
32 32
 	})
33 33
 
... ...
@@ -48,7 +48,7 @@ var _ = g.Describe("[builds][Slow] incremental s2i build", func() {
48 48
 			br2.AssertSuccess()
49 49
 
50 50
 			g.By("getting the Docker image reference from ImageStream")
51
-			imageName, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "internal-image", "latest")
51
+			imageName, err := exutil.GetDockerImageReference(oc.Client().ImageStreams(oc.Namespace()), "internal-image", "latest")
52 52
 			o.Expect(err).NotTo(o.HaveOccurred())
53 53
 
54 54
 			g.By("instantiating a pod and service with the new image")
... ...
@@ -23,7 +23,7 @@ var _ = g.Describe("[builds][Conformance] s2i build with a quota", func() {
23 23
 
24 24
 	g.JustBeforeEach(func() {
25 25
 		g.By("waiting for builder service account")
26
-		err := exutil.WaitForBuilderAccount(oc.AdminKubeREST().ServiceAccounts(oc.Namespace()))
26
+		err := exutil.WaitForBuilderAccount(oc.AdminKubeClient().Core().ServiceAccounts(oc.Namespace()))
27 27
 		o.Expect(err).NotTo(o.HaveOccurred())
28 28
 	})
29 29
 
... ...
@@ -46,7 +46,7 @@ var _ = g.Describe("[builds][Slow] can use build secrets", func() {
46 46
 			br.AssertSuccess()
47 47
 
48 48
 			g.By("getting the image name")
49
-			image, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
49
+			image, err := exutil.GetDockerImageReference(oc.Client().ImageStreams(oc.Namespace()), "test", "latest")
50 50
 			o.Expect(err).NotTo(o.HaveOccurred())
51 51
 
52 52
 			g.By("verifying the build secrets were available during build and not present in the output image")
... ...
@@ -81,7 +81,7 @@ var _ = g.Describe("[builds][Slow] can use build secrets", func() {
81 81
 			br.AssertSuccess()
82 82
 
83 83
 			g.By("getting the image name")
84
-			image, err := exutil.GetDockerImageReference(oc.REST().ImageStreams(oc.Namespace()), "test", "latest")
84
+			image, err := exutil.GetDockerImageReference(oc.Client().ImageStreams(oc.Namespace()), "test", "latest")
85 85
 			o.Expect(err).NotTo(o.HaveOccurred())
86 86
 
87 87
 			g.By("verifying the secrets are present in container output")
... ...
@@ -28,7 +28,7 @@ var _ = g.Describe("[builds][Slow] starting a build using CLI", func() {
28 28
 
29 29
 	g.JustBeforeEach(func() {
30 30
 		g.By("waiting for builder service account")
31
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
31
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
32 32
 		o.Expect(err).NotTo(o.HaveOccurred())
33 33
 		oc.Run("create").Args("-f", buildFixture).Execute()
34 34
 	})
... ...
@@ -44,7 +44,7 @@ var _ = g.Describe("[cli][Slow] can use rsync to upload files to pods", func() {
44 44
 
45 45
 		g.By("Getting the jenkins pod name")
46 46
 		selector, _ := labels.Parse("name=jenkins")
47
-		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: selector})
47
+		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: selector})
48 48
 		o.Expect(err).NotTo(o.HaveOccurred())
49 49
 		o.Expect(len(pods.Items)).ToNot(o.BeZero())
50 50
 		podName = pods.Items[0].Name
... ...
@@ -108,7 +108,7 @@ var _ = g.Describe("deploymentconfigs", func() {
108 108
 							if rand.Float32() < 0.5 {
109 109
 								options = nil
110 110
 							}
111
-							if err := oc.KubeREST().Pods(oc.Namespace()).Delete(pod.Name, options); err != nil {
111
+							if err := oc.KubeClient().Core().Pods(oc.Namespace()).Delete(pod.Name, options); err != nil {
112 112
 								e2e.Logf("%02d: unable to delete deployer pod %q: %v", i, pod.Name, err)
113 113
 							}
114 114
 						}
... ...
@@ -222,7 +222,7 @@ var _ = g.Describe("deploymentconfigs", func() {
222 222
 
223 223
 			g.By("ensuring no scale up of the deployment happens")
224 224
 			wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
225
-				rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get("deployment-test-1")
225
+				rc, err := oc.KubeClient().Core().ReplicationControllers(oc.Namespace()).Get("deployment-test-1")
226 226
 				o.Expect(err).NotTo(o.HaveOccurred())
227 227
 				o.Expect(rc.Spec.Replicas).Should(o.BeEquivalentTo(0))
228 228
 				o.Expect(rc.Status.Replicas).Should(o.BeEquivalentTo(0))
... ...
@@ -230,7 +230,7 @@ var _ = g.Describe("deploymentconfigs", func() {
230 230
 			})
231 231
 
232 232
 			g.By("verifying the scale is updated on the deployment config")
233
-			config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get("deployment-test")
233
+			config, err := oc.Client().DeploymentConfigs(oc.Namespace()).Get("deployment-test")
234 234
 			o.Expect(err).NotTo(o.HaveOccurred())
235 235
 			o.Expect(config.Spec.Replicas).Should(o.BeEquivalentTo(1))
236 236
 			o.Expect(config.Spec.Test).Should(o.BeTrue())
... ...
@@ -404,11 +404,11 @@ var _ = g.Describe("deploymentconfigs", func() {
404 404
 			g.By("waiting for the first rollout to complete")
405 405
 			o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred())
406 406
 
407
-			dc, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
407
+			dc, err := oc.Client().DeploymentConfigs(oc.Namespace()).Get(name)
408 408
 			o.Expect(err).NotTo(o.HaveOccurred())
409 409
 
410 410
 			g.By("updating the deployment config in order to trigger a new rollout")
411
-			_, err = client.UpdateConfigWithRetries(oc.REST(), oc.Namespace(), name, func(update *deployapi.DeploymentConfig) {
411
+			_, err = client.UpdateConfigWithRetries(oc.Client(), oc.Namespace(), name, func(update *deployapi.DeploymentConfig) {
412 412
 				one := int64(1)
413 413
 				update.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
414 414
 			})
... ...
@@ -416,7 +416,7 @@ var _ = g.Describe("deploymentconfigs", func() {
416 416
 			// Wait for latestVersion=2 to be surfaced in the API
417 417
 			latestVersion := dc.Status.LatestVersion
418 418
 			err = wait.PollImmediate(500*time.Millisecond, 10*time.Second, func() (bool, error) {
419
-				dc, err = oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
419
+				dc, err = oc.Client().DeploymentConfigs(oc.Namespace()).Get(name)
420 420
 				if err != nil {
421 421
 					return false, err
422 422
 				}
... ...
@@ -574,7 +574,7 @@ var _ = g.Describe("deploymentconfigs", func() {
574 574
 				o.Expect(fmt.Errorf("expected no deployment, found %#v", rcs[0])).NotTo(o.HaveOccurred())
575 575
 			}
576 576
 
577
-			_, err = client.UpdateConfigWithRetries(oc.REST(), oc.Namespace(), name, func(dc *deployapi.DeploymentConfig) {
577
+			_, err = client.UpdateConfigWithRetries(oc.Client(), oc.Namespace(), name, func(dc *deployapi.DeploymentConfig) {
578 578
 				// TODO: oc rollout pause should patch instead of making a full update
579 579
 				dc.Spec.Paused = false
580 580
 			})
... ...
@@ -761,14 +761,14 @@ var _ = g.Describe("deploymentconfigs", func() {
761 761
 			o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred())
762 762
 
763 763
 			g.By("verifying that all pods are ready")
764
-			config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
764
+			config, err := oc.Client().DeploymentConfigs(oc.Namespace()).Get(name)
765 765
 			o.Expect(err).NotTo(o.HaveOccurred())
766 766
 
767 767
 			selector := labels.Set(config.Spec.Selector).AsSelector()
768 768
 			opts := kapi.ListOptions{LabelSelector: selector}
769 769
 			ready := 0
770 770
 			if err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
771
-				pods, err := oc.KubeREST().Pods(oc.Namespace()).List(opts)
771
+				pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(opts)
772 772
 				if err != nil {
773 773
 					return false, nil
774 774
 				}
... ...
@@ -789,7 +789,7 @@ var _ = g.Describe("deploymentconfigs", func() {
789 789
 
790 790
 			g.By("verifying that the deployment is still running")
791 791
 			latestName := deployutil.DeploymentNameForConfigVersion(name, config.Status.LatestVersion)
792
-			latest, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get(latestName)
792
+			latest, err := oc.KubeClient().Core().ReplicationControllers(oc.Namespace()).Get(latestName)
793 793
 			o.Expect(err).NotTo(o.HaveOccurred())
794 794
 			if deployutil.IsTerminatedDeployment(latest) {
795 795
 				o.Expect(fmt.Errorf("expected deployment %q not to have terminated", latest.Name)).NotTo(o.HaveOccurred())
... ...
@@ -257,18 +257,18 @@ func deploymentPreHookRetried(dc *deployapi.DeploymentConfig, rcs []kapi.Replica
257 257
 }
258 258
 
259 259
 func deploymentInfo(oc *exutil.CLI, name string) (*deployapi.DeploymentConfig, []kapi.ReplicationController, []kapi.Pod, error) {
260
-	dc, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
260
+	dc, err := oc.Client().DeploymentConfigs(oc.Namespace()).Get(name)
261 261
 	if err != nil {
262 262
 		return nil, nil, nil, err
263 263
 	}
264 264
 
265 265
 	// get pods before RCs, so we see more RCs than pods.
266
-	pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{})
266
+	pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{})
267 267
 	if err != nil {
268 268
 		return nil, nil, nil, err
269 269
 	}
270 270
 
271
-	rcs, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).List(kapi.ListOptions{
271
+	rcs, err := oc.KubeClient().Core().ReplicationControllers(oc.Namespace()).List(kapi.ListOptions{
272 272
 		LabelSelector: deployutil.ConfigSelector(name),
273 273
 	})
274 274
 	if err != nil {
... ...
@@ -304,7 +304,7 @@ func waitForSyncedConfig(oc *exutil.CLI, name string, timeout time.Duration) err
304 304
 	}
305 305
 	generation := dc.Generation
306 306
 	return wait.PollImmediate(200*time.Millisecond, timeout, func() (bool, error) {
307
-		config, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
307
+		config, err := oc.Client().DeploymentConfigs(oc.Namespace()).Get(name)
308 308
 		if err != nil {
309 309
 			return false, err
310 310
 		}
... ...
@@ -171,7 +171,7 @@ func checkSingleIdle(oc *exutil.CLI, idlingFile string, resources map[string][]s
171 171
 
172 172
 	g.By("Fetching the service and checking the annotations are present")
173 173
 	serviceName := resources["service"][0]
174
-	endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
174
+	endpoints, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
175 175
 	o.Expect(err).NotTo(o.HaveOccurred())
176 176
 
177 177
 	o.Expect(endpoints.Annotations).To(o.HaveKey(unidlingapi.IdledAtAnnotation))
... ...
@@ -290,7 +290,7 @@ var _ = g.Describe("idling and unidling", func() {
290 290
 
291 291
 			g.By("Connecting to the service IP and checking the echo")
292 292
 			serviceName := resources["service"][0]
293
-			svc, err := oc.KubeREST().Services(oc.Namespace()).Get(serviceName)
293
+			svc, err := oc.KubeClient().Core().Services(oc.Namespace()).Get(serviceName)
294 294
 			o.Expect(err).ToNot(o.HaveOccurred())
295 295
 
296 296
 			err = tryEchoTCP(svc)
... ...
@@ -300,7 +300,7 @@ var _ = g.Describe("idling and unidling", func() {
300 300
 			err = waitForEndpointsAvailable(oc, serviceName)
301 301
 			o.Expect(err).ToNot(o.HaveOccurred())
302 302
 
303
-			endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
303
+			endpoints, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
304 304
 			o.Expect(err).ToNot(o.HaveOccurred())
305 305
 
306 306
 			g.By("Making sure the endpoints are no longer marked as idled")
... ...
@@ -315,7 +315,7 @@ var _ = g.Describe("idling and unidling", func() {
315 315
 
316 316
 			g.By("Connecting to the service IP and repeatedly connecting, making sure we seamlessly idle and come back up")
317 317
 			serviceName := resources["service"][0]
318
-			svc, err := oc.KubeREST().Services(oc.Namespace()).Get(serviceName)
318
+			svc, err := oc.KubeClient().Core().Services(oc.Namespace()).Get(serviceName)
319 319
 			o.Expect(err).ToNot(o.HaveOccurred())
320 320
 
321 321
 			o.Consistently(func() error { return tryEchoTCP(svc) }, 10*time.Second, 500*time.Millisecond).ShouldNot(o.HaveOccurred())
... ...
@@ -324,7 +324,7 @@ var _ = g.Describe("idling and unidling", func() {
324 324
 			err = waitForEndpointsAvailable(oc, serviceName)
325 325
 			o.Expect(err).ToNot(o.HaveOccurred())
326 326
 
327
-			endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
327
+			endpoints, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
328 328
 			o.Expect(err).ToNot(o.HaveOccurred())
329 329
 
330 330
 			g.By("Making sure the endpoints are no longer marked as idled")
... ...
@@ -343,7 +343,7 @@ var _ = g.Describe("idling and unidling", func() {
343 343
 			o.Expect(err).ToNot(o.HaveOccurred())
344 344
 
345 345
 			g.By("Connecting to the service IP many times and checking the echo")
346
-			svc, err := oc.KubeREST().Services(oc.Namespace()).Get(serviceName)
346
+			svc, err := oc.KubeClient().Core().Services(oc.Namespace()).Get(serviceName)
347 347
 			o.Expect(err).ToNot(o.HaveOccurred())
348 348
 
349 349
 			connectionsToStart := 100
... ...
@@ -374,7 +374,7 @@ var _ = g.Describe("idling and unidling", func() {
374 374
 			g.By("Waiting until we have endpoints")
375 375
 			err = waitForEndpointsAvailable(oc, serviceName)
376 376
 
377
-			endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
377
+			endpoints, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
378 378
 			o.Expect(err).ToNot(o.HaveOccurred())
379 379
 
380 380
 			g.By("Making sure the endpoints are no longer marked as idled")
... ...
@@ -393,7 +393,7 @@ var _ = g.Describe("idling and unidling", func() {
393 393
 
394 394
 			g.By("Connecting to the service IP and checking the echo")
395 395
 			serviceName := resources["service"][0]
396
-			svc, err := oc.KubeREST().Services(oc.Namespace()).Get(serviceName)
396
+			svc, err := oc.KubeClient().Core().Services(oc.Namespace()).Get(serviceName)
397 397
 			o.Expect(err).ToNot(o.HaveOccurred())
398 398
 
399 399
 			err = tryEchoUDP(svc)
... ...
@@ -403,7 +403,7 @@ var _ = g.Describe("idling and unidling", func() {
403 403
 			err = waitForEndpointsAvailable(oc, serviceName)
404 404
 			o.Expect(err).ToNot(o.HaveOccurred())
405 405
 
406
-			endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
406
+			endpoints, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
407 407
 			o.Expect(err).ToNot(o.HaveOccurred())
408 408
 
409 409
 			g.By("Making sure the endpoints are no longer marked as idled")
... ...
@@ -423,7 +423,7 @@ var _ = g.Describe("idling and unidling", func() {
423 423
 
424 424
 			g.By("Connecting to the service IP many times and checking the echo")
425 425
 			serviceName := resources["service"][0]
426
-			svc, err := oc.KubeREST().Services(oc.Namespace()).Get(serviceName)
426
+			svc, err := oc.KubeClient().Core().Services(oc.Namespace()).Get(serviceName)
427 427
 			o.Expect(err).ToNot(o.HaveOccurred())
428 428
 
429 429
 			connectionsToStart := 100
... ...
@@ -455,7 +455,7 @@ var _ = g.Describe("idling and unidling", func() {
455 455
 			err = waitForEndpointsAvailable(oc, serviceName)
456 456
 			o.Expect(err).ToNot(o.HaveOccurred())
457 457
 
458
-			endpoints, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
458
+			endpoints, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
459 459
 			o.Expect(err).ToNot(o.HaveOccurred())
460 460
 
461 461
 			g.By("Making sure the endpoints are no longer marked as idled")
... ...
@@ -11,7 +11,7 @@ import (
11 11
 
12 12
 func waitForEndpointsAvailable(oc *exutil.CLI, serviceName string) error {
13 13
 	return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) {
14
-		ep, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
14
+		ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
15 15
 		// Tolerate NotFound b/c it could take a moment for the endpoints to be created
16 16
 		if errors.TolerateNotFoundError(err) != nil {
17 17
 			return false, err
... ...
@@ -23,8 +23,8 @@ func waitForEndpointsAvailable(oc *exutil.CLI, serviceName string) error {
23 23
 
24 24
 func waitForNoPodsAvailable(oc *exutil.CLI) error {
25 25
 	return wait.Poll(200*time.Millisecond, 2*time.Minute, func() (bool, error) {
26
-		//ep, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(serviceName)
27
-		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{})
26
+		//ep, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(serviceName)
27
+		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{})
28 28
 		if err != nil {
29 29
 			return false, err
30 30
 		}
... ...
@@ -12,7 +12,7 @@ import (
12 12
 
13 13
 // RunInPodContainer will run provided command in the specified pod container.
14 14
 func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) error {
15
-	pods, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
15
+	pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), selector, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
16 16
 	if err != nil {
17 17
 		return err
18 18
 	}
... ...
@@ -20,7 +20,7 @@ func RunInPodContainer(oc *exutil.CLI, selector labels.Selector, cmd []string) e
20 20
 		return fmt.Errorf("Got %d pods for selector %v, expected 1", len(pods), selector)
21 21
 	}
22 22
 
23
-	pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(pods[0])
23
+	pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0])
24 24
 	if err != nil {
25 25
 		return err
26 26
 	}
... ...
@@ -354,7 +354,7 @@ func loadFixture(oc *exutil.CLI, filename string) {
354 354
 
355 355
 func assertEnvVars(oc *exutil.CLI, buildPrefix string, varsToFind map[string]string) {
356 356
 
357
-	buildList, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{})
357
+	buildList, err := oc.Client().Builds(oc.Namespace()).List(kapi.ListOptions{})
358 358
 	o.Expect(err).NotTo(o.HaveOccurred())
359 359
 
360 360
 	// Ensure that expected start-build environment variables were injected
... ...
@@ -419,7 +419,7 @@ func initExecPod(oc *exutil.CLI) *kapi.Pod {
419 419
 
420 420
 	var targetPod *kapi.Pod
421 421
 	err := wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
422
-		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{})
422
+		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{})
423 423
 		o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
424 424
 		for _, p := range pods.Items {
425 425
 			if strings.HasPrefix(p.Name, "centos") && !strings.Contains(p.Name, "deploy") && p.Status.Phase == "Running" {
... ...
@@ -545,7 +545,7 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
545 545
 		o.Expect(err).NotTo(o.HaveOccurred())
546 546
 
547 547
 		g.By("waiting for jenkins deployment")
548
-		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", oc)
548
+		err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "jenkins", oc)
549 549
 		o.Expect(err).NotTo(o.HaveOccurred())
550 550
 
551 551
 		g.By("get ip and port for jenkins service")
... ...
@@ -655,9 +655,9 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
655 655
 			// we leverage some of the openshift utilities for waiting for the deployment before we poll
656 656
 			// jenkins for the successful job completion
657 657
 			g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished")
658
-			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", oc)
658
+			err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "frontend", oc)
659 659
 			o.Expect(err).NotTo(o.HaveOccurred())
660
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod", oc)
660
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "frontend-prod", oc)
661 661
 			o.Expect(err).NotTo(o.HaveOccurred())
662 662
 
663 663
 			g.By("get build console logs and see if succeeded")
... ...
@@ -679,9 +679,9 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
679 679
 			// we leverage some of the openshift utilities for waiting for the deployment before we poll
680 680
 			// jenkins for the successful job completion
681 681
 			g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished")
682
-			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", oc)
682
+			err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "frontend", oc)
683 683
 			o.Expect(err).NotTo(o.HaveOccurred())
684
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod", oc)
684
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "frontend-prod", oc)
685 685
 			o.Expect(err).NotTo(o.HaveOccurred())
686 686
 
687 687
 			g.By("get build console logs and see if succeeded")
... ...
@@ -737,7 +737,7 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
737 737
 			// we leverage some of the openshift utilities for waiting for the deployment before we poll
738 738
 			// jenkins for the successful job completion
739 739
 			g.By("waiting for frontend deployments as signs that the build has finished")
740
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", oc)
740
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "frontend", oc)
741 741
 			o.Expect(err).NotTo(o.HaveOccurred())
742 742
 
743 743
 			g.By("get build console logs and see if succeeded")
... ...
@@ -753,7 +753,7 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
753 753
 
754 754
 		g.It("jenkins-plugin test trigger build DSL", func() {
755 755
 
756
-			buildsBefore, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{})
756
+			buildsBefore, err := oc.Client().Builds(oc.Namespace()).List(kapi.ListOptions{})
757 757
 			o.Expect(err).NotTo(o.HaveOccurred())
758 758
 
759 759
 			data, err := j.buildDSLJob(oc.Namespace(),
... ...
@@ -769,12 +769,12 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
769 769
 			o.Expect(err).NotTo(o.HaveOccurred())
770 770
 
771 771
 			err = wait.Poll(10*time.Second, 10*time.Minute, func() (bool, error) {
772
-				buildsAfter, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{})
772
+				buildsAfter, err := oc.Client().Builds(oc.Namespace()).List(kapi.ListOptions{})
773 773
 				o.Expect(err).NotTo(o.HaveOccurred())
774 774
 				return (len(buildsAfter.Items) != len(buildsBefore.Items)), nil
775 775
 			})
776 776
 
777
-			buildsAfter, err := oc.REST().Builds(oc.Namespace()).List(kapi.ListOptions{})
777
+			buildsAfter, err := oc.Client().Builds(oc.Namespace()).List(kapi.ListOptions{})
778 778
 			o.Expect(err).NotTo(o.HaveOccurred())
779 779
 			o.Expect(len(buildsAfter.Items)).To(o.Equal(len(buildsBefore.Items) + 1))
780 780
 
... ...
@@ -866,7 +866,7 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
866 866
 
867 867
 			loadFixture(oc, "multitag-template.json")
868 868
 			err := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
869
-				_, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig")
869
+				_, err := oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig")
870 870
 				if err != nil {
871 871
 					return false, nil
872 872
 				}
... ...
@@ -885,39 +885,39 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
885 885
 			ginkgolog("Job logs>>\n%s\n\n", log)
886 886
 
887 887
 			// Assert stream tagging results
888
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod")
888
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag", "prod")
889 889
 			o.Expect(err).NotTo(o.HaveOccurred())
890 890
 
891 891
 			// 1 to N mapping
892
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod2")
892
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag", "prod2")
893 893
 			o.Expect(err).NotTo(o.HaveOccurred())
894
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod3")
894
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag", "prod3")
895 895
 			o.Expect(err).NotTo(o.HaveOccurred())
896
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod4")
896
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag", "prod4")
897 897
 			o.Expect(err).NotTo(o.HaveOccurred())
898 898
 
899 899
 			// N to 1 mapping
900
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod5")
900
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag", "prod5")
901 901
 			o.Expect(err).NotTo(o.HaveOccurred())
902
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod5")
902
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod5")
903 903
 			o.Expect(err).NotTo(o.HaveOccurred())
904
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod5")
904
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod5")
905 905
 			o.Expect(err).NotTo(o.HaveOccurred())
906 906
 
907 907
 			// N to N mapping
908
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag", "prod6")
908
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag", "prod6")
909 909
 			o.Expect(err).NotTo(o.HaveOccurred())
910
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod7")
910
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag2", "prod7")
911 911
 			o.Expect(err).NotTo(o.HaveOccurred())
912
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod8")
912
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag3", "prod8")
913 913
 			o.Expect(err).NotTo(o.HaveOccurred())
914 914
 
915 915
 			// N to N mapping with creation
916
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag4", "prod9")
916
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag4", "prod9")
917 917
 			o.Expect(err).NotTo(o.HaveOccurred())
918
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag5", "prod10")
918
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag5", "prod10")
919 919
 			o.Expect(err).NotTo(o.HaveOccurred())
920
-			_, err = oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag6", "prod11")
920
+			_, err = oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag6", "prod11")
921 921
 			o.Expect(err).NotTo(o.HaveOccurred())
922 922
 
923 923
 		})
... ...
@@ -928,7 +928,7 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
928 928
 
929 929
 			loadFixture(oc, "multitag-template.json")
930 930
 			err := wait.Poll(10*time.Second, 1*time.Minute, func() (bool, error) {
931
-				_, err := oc.REST().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig")
931
+				_, err := oc.Client().ImageStreamTags(oc.Namespace()).Get("multitag3", "orig")
932 932
 				if err != nil {
933 933
 					return false, nil
934 934
 				}
... ...
@@ -978,25 +978,25 @@ var _ = g.Describe("[image_ecosystem][jenkins][Slow] openshift pipeline plugin",
978 978
 			// Assert stream tagging results
979 979
 			for _, namespace := range []string{oc.Namespace(), anotherNamespace} {
980 980
 				g.By("Checking tags in namespace: " + namespace)
981
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag", "prod")
981
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag", "prod")
982 982
 				o.Expect(err).NotTo(o.HaveOccurred())
983 983
 
984
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod1")
984
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag2", "prod1")
985 985
 				o.Expect(err).NotTo(o.HaveOccurred())
986
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod2")
986
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag2", "prod2")
987 987
 				o.Expect(err).NotTo(o.HaveOccurred())
988
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod3")
988
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag2", "prod3")
989 989
 				o.Expect(err).NotTo(o.HaveOccurred())
990
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag2", "prod4")
990
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag2", "prod4")
991 991
 				o.Expect(err).NotTo(o.HaveOccurred())
992 992
 
993
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag5", "prod5")
993
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag5", "prod5")
994 994
 				o.Expect(err).NotTo(o.HaveOccurred())
995 995
 
996
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag6", "prod6")
996
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag6", "prod6")
997 997
 				o.Expect(err).NotTo(o.HaveOccurred())
998 998
 
999
-				_, err = oc.REST().ImageStreamTags(namespace).Get("multitag7", "prod4")
999
+				_, err = oc.Client().ImageStreamTags(namespace).Get("multitag7", "prod4")
1000 1000
 				o.Expect(err).NotTo(o.HaveOccurred())
1001 1001
 			}
1002 1002
 
... ...
@@ -29,7 +29,7 @@ var _ = g.Describe("[image_ecosystem][mariadb][Slow] openshift mariadb image", f
29 29
 
30 30
 			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
31 31
 			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
32
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mariadb", oc)
32
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "mariadb", oc)
33 33
 			o.Expect(err).NotTo(o.HaveOccurred())
34 34
 
35 35
 			g.By("expecting the mariadb service get endpoints")
... ...
@@ -26,12 +26,12 @@ var _ = g.Describe("[image_ecosystem][mongodb] openshift mongodb image", func()
26 26
 			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())
27 27
 
28 28
 			g.By("waiting for the deployment to complete")
29
-			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mongodb", oc)
29
+			err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "mongodb", oc)
30 30
 			o.Expect(err).ShouldNot(o.HaveOccurred())
31 31
 
32 32
 			g.By("expecting the mongodb pod is running")
33 33
 			podNames, err := exutil.WaitForPods(
34
-				oc.KubeREST().Pods(oc.Namespace()),
34
+				oc.KubeClient().Core().Pods(oc.Namespace()),
35 35
 				exutil.ParseLabelsOrDie("name=mongodb"),
36 36
 				exutil.CheckPodIsRunningFn,
37 37
 				1,
... ...
@@ -37,7 +37,7 @@ var _ = g.Describe("[image_ecosystem][mongodb] openshift mongodb replication", f
37 37
 			o.Expect(oc.Run("new-app").Args("-f", templatePath).Execute()).Should(o.Succeed())
38 38
 
39 39
 			g.By("waiting for the deployment to complete")
40
-			err := exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc)
40
+			err := exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), deploymentConfigName, oc)
41 41
 			o.Expect(err).NotTo(o.HaveOccurred())
42 42
 
43 43
 			podNames := waitForNumberOfPodsWithLabel(oc, expectedReplicasAfterDeployment, "mongodb-replica")
... ...
@@ -87,7 +87,7 @@ func waitForNumberOfPodsWithLabel(oc *exutil.CLI, number int, label string) []st
87 87
 	g.By(fmt.Sprintf("expecting that there are %d running pods with label name=%s", number, label))
88 88
 
89 89
 	podNames, err := exutil.WaitForPods(
90
-		oc.KubeREST().Pods(oc.Namespace()),
90
+		oc.KubeClient().Core().Pods(oc.Namespace()),
91 91
 		exutil.ParseLabelsOrDie("name="+label),
92 92
 		exutil.CheckPodIsRunningFn,
93 93
 		number,
... ...
@@ -24,7 +24,7 @@ var _ = g.Describe("[image_ecosystem][mongodb][Slow] openshift mongodb replicati
24 24
 
25 25
 			g.By("creating persistent volumes")
26 26
 			_, err := exutil.SetupHostPathVolumes(
27
-				oc.AdminKubeREST().PersistentVolumes(),
27
+				oc.AdminKubeClient().Core().PersistentVolumes(),
28 28
 				oc.Namespace(),
29 29
 				"256Mi",
30 30
 				3,
... ...
@@ -34,7 +34,7 @@ var _ = g.Describe("[image_ecosystem][mongodb][Slow] openshift mongodb replicati
34 34
 			defer func() {
35 35
 				// We're removing only PVs because all other things will be removed
36 36
 				// together with namespace.
37
-				err := exutil.CleanupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace())
37
+				err := exutil.CleanupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace())
38 38
 				if err != nil {
39 39
 					fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't cleanup persistent volumes: %v", err)
40 40
 				}
... ...
@@ -53,7 +53,7 @@ var _ = g.Describe("[image_ecosystem][mongodb][Slow] openshift mongodb replicati
53 53
 
54 54
 			g.By("waiting for pods to running")
55 55
 			podNames, err := exutil.WaitForPods(
56
-				oc.KubeREST().Pods(oc.Namespace()),
56
+				oc.KubeClient().Core().Pods(oc.Namespace()),
57 57
 				exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
58 58
 				exutil.CheckPodIsRunningFn,
59 59
 				3,
... ...
@@ -79,7 +79,7 @@ var _ = g.Describe("[image_ecosystem][mongodb][Slow] openshift mongodb replicati
79 79
 
80 80
 			g.By("waiting for restarting of the pods")
81 81
 			podNames, err = exutil.WaitForPods(
82
-				oc.KubeREST().Pods(oc.Namespace()),
82
+				oc.KubeClient().Core().Pods(oc.Namespace()),
83 83
 				exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
84 84
 				exutil.CheckPodIsRunningFn,
85 85
 				3,
... ...
@@ -29,7 +29,7 @@ var _ = g.Describe("[image_ecosystem][mysql][Slow] openshift mysql image", func(
29 29
 
30 30
 			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
31 31
 			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
32
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "mysql", oc)
32
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "mysql", oc)
33 33
 			o.Expect(err).NotTo(o.HaveOccurred())
34 34
 
35 35
 			g.By("expecting the mysql service get endpoints")
... ...
@@ -13,7 +13,7 @@ import (
13 13
 	testutil "github.com/openshift/origin/test/util"
14 14
 
15 15
 	kapi "k8s.io/kubernetes/pkg/api"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
17 17
 )
18 18
 
19 19
 type testCase struct {
... ...
@@ -42,7 +42,7 @@ var (
42 42
 
43 43
 // CreateMySQLReplicationHelpers creates a set of MySQL helpers for master,
44 44
 // slave and an extra helper that is used for remote login test.
45
-func CreateMySQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
45
+func CreateMySQLReplicationHelpers(c kcoreclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
46 46
 	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 1*time.Minute)
47 47
 	o.Expect(err).NotTo(o.HaveOccurred())
48 48
 	masterPod := podNames[0]
... ...
@@ -72,7 +72,7 @@ func cleanup(oc *exutil.CLI) {
72 72
 	oc.AsAdmin().Run("delete").Args("all", "--all", "-n", oc.Namespace()).Execute()
73 73
 	exutil.DumpImageStreams(oc)
74 74
 	oc.AsAdmin().Run("delete").Args("pvc", "--all", "-n", oc.Namespace()).Execute()
75
-	exutil.CleanupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace())
75
+	exutil.CleanupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace())
76 76
 }
77 77
 
78 78
 func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
... ...
@@ -80,10 +80,10 @@ func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
80 80
 		oc.SetOutputDir(exutil.TestContext.OutputDir)
81 81
 		defer cleanup(oc)
82 82
 
83
-		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
83
+		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "1Gi", 2)
84 84
 		o.Expect(err).NotTo(o.HaveOccurred())
85 85
 
86
-		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
86
+		err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
87 87
 		o.Expect(err).NotTo(o.HaveOccurred())
88 88
 
89 89
 		exutil.CheckOpenShiftNamespaceImageStreams(oc)
... ...
@@ -96,7 +96,7 @@ func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
96 96
 		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
97 97
 		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
98 98
 		g.By("waiting for the deployment to complete")
99
-		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), helperName, oc)
99
+		err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), helperName, oc)
100 100
 		o.Expect(err).NotTo(o.HaveOccurred())
101 101
 
102 102
 		g.By("waiting for an endpoint")
... ...
@@ -109,7 +109,7 @@ func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
109 109
 			table := fmt.Sprintf("table_%0.2d", tableCounter)
110 110
 
111 111
 			g.By("creating replication helpers")
112
-			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
112
+			master, slaves, helper := CreateMySQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", helperName), slaveCount)
113 113
 			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})).NotTo(o.HaveOccurred())
114 114
 			o.Expect(exutil.WaitUntilAllHelpersAreUp(oc, slaves)).NotTo(o.HaveOccurred())
115 115
 
... ...
@@ -151,24 +151,24 @@ func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
151 151
 		g.By("after master is restarted by changing the Deployment Config")
152 152
 		err = oc.Run("env").Args("dc", "mysql-master", "MYSQL_ROOT_PASSWORD=newpass").Execute()
153 153
 		o.Expect(err).NotTo(o.HaveOccurred())
154
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
154
+		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
155 155
 		master, _, _ = assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
156 156
 
157 157
 		g.By("after master is restarted by deleting the pod")
158 158
 		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-master-2").Execute()
159 159
 		o.Expect(err).NotTo(o.HaveOccurred())
160
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
160
+		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
161 161
 		o.Expect(err).NotTo(o.HaveOccurred())
162 162
 		_, slaves, _ := assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
163 163
 
164 164
 		g.By("after slave is restarted by deleting the pod")
165 165
 		err = oc.Run("delete").Args("pod", "-l", "deployment=mysql-slave-1").Execute()
166 166
 		o.Expect(err).NotTo(o.HaveOccurred())
167
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
167
+		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
168 168
 		o.Expect(err).NotTo(o.HaveOccurred())
169 169
 		assertReplicationIsWorking("mysql-master-2", "mysql-slave-1", 1)
170 170
 
171
-		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
171
+		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=mysql-slave-1")})
172 172
 		o.Expect(err).NotTo(o.HaveOccurred())
173 173
 		o.Expect(len(pods.Items)).To(o.Equal(1))
174 174
 
... ...
@@ -177,7 +177,7 @@ func replicationTestFactory(oc *exutil.CLI, tc testCase) func() {
177 177
 			g.By("after slave is scaled to 0 and then back to 4 replicas")
178 178
 			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=0").Execute()
179 179
 			o.Expect(err).NotTo(o.HaveOccurred())
180
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
180
+			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
181 181
 			o.Expect(err).NotTo(o.HaveOccurred())
182 182
 			err = oc.Run("scale").Args("dc", "mysql-slave", "--replicas=4").Execute()
183 183
 			o.Expect(err).NotTo(o.HaveOccurred())
... ...
@@ -13,7 +13,7 @@ import (
13 13
 	testutil "github.com/openshift/origin/test/util"
14 14
 
15 15
 	kapi "k8s.io/kubernetes/pkg/api"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
16
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
17 17
 )
18 18
 
19 19
 var (
... ...
@@ -39,7 +39,7 @@ var _ = g.Describe("[LocalNode][image_ecosystem][postgresql][Slow] openshift pos
39 39
 
40 40
 // CreatePostgreSQLReplicationHelpers creates a set of PostgreSQL helpers for master,
41 41
 // slave an en extra helper that is used for remote login test.
42
-func CreatePostgreSQLReplicationHelpers(c kclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
42
+func CreatePostgreSQLReplicationHelpers(c kcoreclient.PodInterface, masterDeployment, slaveDeployment, helperDeployment string, slaveCount int) (exutil.Database, []exutil.Database, exutil.Database) {
43 43
 	podNames, err := exutil.WaitForPods(c, exutil.ParseLabelsOrDie(fmt.Sprintf("deployment=%s", masterDeployment)), exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
44 44
 	o.Expect(err).NotTo(o.HaveOccurred())
45 45
 	masterPod := podNames[0]
... ...
@@ -69,10 +69,10 @@ func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() {
69 69
 		oc.SetOutputDir(exutil.TestContext.OutputDir)
70 70
 		defer cleanup(oc)
71 71
 
72
-		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeREST().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
72
+		_, err := exutil.SetupHostPathVolumes(oc.AdminKubeClient().Core().PersistentVolumes(), oc.Namespace(), "512Mi", 1)
73 73
 		o.Expect(err).NotTo(o.HaveOccurred())
74 74
 
75
-		err = testutil.WaitForPolicyUpdate(oc.REST(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
75
+		err = testutil.WaitForPolicyUpdate(oc.Client(), oc.Namespace(), "create", templateapi.Resource("templates"), true)
76 76
 		o.Expect(err).NotTo(o.HaveOccurred())
77 77
 
78 78
 		exutil.CheckOpenShiftNamespaceImageStreams(oc)
... ...
@@ -84,7 +84,7 @@ func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() {
84 84
 
85 85
 		// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
86 86
 		// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
87
-		err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), postgreSQLHelperName, oc)
87
+		err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), postgreSQLHelperName, oc)
88 88
 		o.Expect(err).NotTo(o.HaveOccurred())
89 89
 
90 90
 		err = oc.KubeFramework().WaitForAnEndpoint(postgreSQLHelperName)
... ...
@@ -103,7 +103,7 @@ func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() {
103 103
 			tableCounter++
104 104
 			table := fmt.Sprintf("table_%0.2d", tableCounter)
105 105
 
106
-			master, slaves, helper := CreatePostgreSQLReplicationHelpers(oc.KubeREST().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", postgreSQLHelperName), slaveCount)
106
+			master, slaves, helper := CreatePostgreSQLReplicationHelpers(oc.KubeClient().Core().Pods(oc.Namespace()), masterDeployment, slaveDeployment, fmt.Sprintf("%s-1", postgreSQLHelperName), slaveCount)
107 107
 			err := exutil.WaitUntilAllHelpersAreUp(oc, []exutil.Database{master, helper})
108 108
 			if err != nil {
109 109
 				exutil.DumpDeploymentLogs("postgresql-master", oc)
... ...
@@ -150,31 +150,31 @@ func PostgreSQLReplicationTestFactory(oc *exutil.CLI, image string) func() {
150 150
 		g.By("after master is restarted by changing the Deployment Config")
151 151
 		err = oc.Run("env").Args("dc", "postgresql-master", "POSTGRESQL_ADMIN_PASSWORD=newpass").Execute()
152 152
 		o.Expect(err).NotTo(o.HaveOccurred())
153
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
153
+		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
154 154
 		master, _, _ = assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
155 155
 
156 156
 		g.By("after master is restarted by deleting the pod")
157 157
 		err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-master-2").Execute()
158 158
 		o.Expect(err).NotTo(o.HaveOccurred())
159
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
159
+		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), master.PodName(), 1*time.Minute)
160 160
 		o.Expect(err).NotTo(o.HaveOccurred())
161 161
 		_, slaves, _ := assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
162 162
 
163 163
 		g.By("after slave is restarted by deleting the pod")
164 164
 		err = oc.Run("delete").Args("pod", "-l", "deployment=postgresql-slave-1").Execute()
165 165
 		o.Expect(err).NotTo(o.HaveOccurred())
166
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
166
+		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), slaves[0].PodName(), 1*time.Minute)
167 167
 		o.Expect(err).NotTo(o.HaveOccurred())
168 168
 		assertReplicationIsWorking("postgresql-master-2", "postgresql-slave-1", 1)
169 169
 
170
-		pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=postgresql-slave-1")})
170
+		pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exutil.ParseLabelsOrDie("deployment=postgresql-slave-1")})
171 171
 		o.Expect(err).NotTo(o.HaveOccurred())
172 172
 		o.Expect(len(pods.Items)).To(o.Equal(1))
173 173
 
174 174
 		g.By("after slave is scaled to 0 and then back to 4 replicas")
175 175
 		err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=0").Execute()
176 176
 		o.Expect(err).NotTo(o.HaveOccurred())
177
-		err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
177
+		err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
178 178
 		o.Expect(err).NotTo(o.HaveOccurred())
179 179
 		err = oc.Run("scale").Args("dc", "postgresql-slave", "--replicas=4").Execute()
180 180
 		o.Expect(err).NotTo(o.HaveOccurred())
... ...
@@ -33,7 +33,7 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl
33 33
 			o.Expect(err).NotTo(o.HaveOccurred())
34 34
 
35 35
 			g.By("waiting for build to finish")
36
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
36
+			err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), "dancer-mysql-example-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
37 37
 			if err != nil {
38 38
 				exutil.DumpBuildLogs("dancer-mysql-example", oc)
39 39
 			}
... ...
@@ -41,7 +41,7 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl
41 41
 
42 42
 			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
43 43
 			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
44
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "dancer-mysql-example", oc)
44
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "dancer-mysql-example", oc)
45 45
 			o.Expect(err).NotTo(o.HaveOccurred())
46 46
 
47 47
 			g.By("waiting for endpoint")
... ...
@@ -49,7 +49,7 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl
49 49
 			o.Expect(err).NotTo(o.HaveOccurred())
50 50
 
51 51
 			assertPageCountIs := func(i int) {
52
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
52
+				_, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
53 53
 				o.Expect(err).NotTo(o.HaveOccurred())
54 54
 
55 55
 				result, err := CheckPageContains(oc, "dancer-mysql-example", "", pageCountFn(i))
... ...
@@ -65,7 +65,7 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl
65 65
 			RunInPodContainer(oc, dcLabel, modifyCommand)
66 66
 			assertPageCountIs(3)
67 67
 
68
-			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
68
+			pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
69 69
 			o.Expect(err).NotTo(o.HaveOccurred())
70 70
 			o.Expect(len(pods.Items)).To(o.Equal(1))
71 71
 
... ...
@@ -74,7 +74,7 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl
74 74
 			o.Expect(err).NotTo(o.HaveOccurred())
75 75
 			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
76 76
 			o.Expect(err).NotTo(o.HaveOccurred())
77
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
77
+			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
78 78
 			o.Expect(err).NotTo(o.HaveOccurred())
79 79
 			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
80 80
 			o.Expect(err).NotTo(o.HaveOccurred())
... ...
@@ -31,7 +31,7 @@ var _ = g.Describe("[image_ecosystem][php][Slow] hot deploy for openshift php im
31 31
 			o.Expect(err).NotTo(o.HaveOccurred())
32 32
 
33 33
 			g.By("waiting for build to finish")
34
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
34
+			err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
35 35
 			if err != nil {
36 36
 				exutil.DumpBuildLogs("cakephp-mysql-example", oc)
37 37
 			}
... ...
@@ -39,7 +39,7 @@ var _ = g.Describe("[image_ecosystem][php][Slow] hot deploy for openshift php im
39 39
 
40 40
 			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
41 41
 			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
42
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "cakephp-mysql-example", oc)
42
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "cakephp-mysql-example", oc)
43 43
 			o.Expect(err).NotTo(o.HaveOccurred())
44 44
 
45 45
 			g.By("waiting for endpoint")
... ...
@@ -47,7 +47,7 @@ var _ = g.Describe("[image_ecosystem][php][Slow] hot deploy for openshift php im
47 47
 			o.Expect(err).NotTo(o.HaveOccurred())
48 48
 
49 49
 			assertPageCountIs := func(i int) {
50
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
50
+				_, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
51 51
 				o.Expect(err).NotTo(o.HaveOccurred())
52 52
 
53 53
 				result, err := CheckPageContains(oc, "cakephp-mysql-example", "", pageCountFn(i))
... ...
@@ -34,7 +34,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt
34 34
 			o.Expect(err).NotTo(o.HaveOccurred())
35 35
 
36 36
 			g.By("waiting for build to finish")
37
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), "django-ex-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
37
+			err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), "django-ex-1", exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
38 38
 			if err != nil {
39 39
 				exutil.DumpBuildLogs("django-ex", oc)
40 40
 			}
... ...
@@ -42,7 +42,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt
42 42
 
43 43
 			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
44 44
 			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
45
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "django-ex", oc)
45
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "django-ex", oc)
46 46
 			o.Expect(err).NotTo(o.HaveOccurred())
47 47
 
48 48
 			g.By("waiting for endpoint")
... ...
@@ -50,7 +50,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt
50 50
 			o.Expect(err).NotTo(o.HaveOccurred())
51 51
 
52 52
 			assertPageCountIs := func(i int) {
53
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
53
+				_, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
54 54
 				o.Expect(err).NotTo(o.HaveOccurred())
55 55
 
56 56
 				result, err := CheckPageContains(oc, "django-ex", "", pageCountFn(i))
... ...
@@ -66,7 +66,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt
66 66
 			RunInPodContainer(oc, dcLabel, modifyCommand)
67 67
 			assertPageCountIs(3)
68 68
 
69
-			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
69
+			pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
70 70
 			o.Expect(err).NotTo(o.HaveOccurred())
71 71
 			o.Expect(len(pods.Items)).To(o.Equal(1))
72 72
 
... ...
@@ -75,7 +75,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt
75 75
 			o.Expect(err).NotTo(o.HaveOccurred())
76 76
 			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
77 77
 			o.Expect(err).NotTo(o.HaveOccurred())
78
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
78
+			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
79 79
 			o.Expect(err).NotTo(o.HaveOccurred())
80 80
 			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
81 81
 			o.Expect(err).NotTo(o.HaveOccurred())
... ...
@@ -32,7 +32,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby
32 32
 			o.Expect(err).NotTo(o.HaveOccurred())
33 33
 
34 34
 			g.By("waiting for build to finish")
35
-			err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
35
+			err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), dcName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
36 36
 			if err != nil {
37 37
 				exutil.DumpBuildLogs("rails-postgresql-example", oc)
38 38
 			}
... ...
@@ -40,7 +40,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby
40 40
 
41 41
 			// oc.KubeFramework().WaitForAnEndpoint currently will wait forever;  for now, prefacing with our WaitForADeploymentToComplete,
42 42
 			// which does have a timeout, since in most cases a failure in the service coming up stems from a failed deployment
43
-			err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), "rails-postgresql-example", oc)
43
+			err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), "rails-postgresql-example", oc)
44 44
 			o.Expect(err).NotTo(o.HaveOccurred())
45 45
 
46 46
 			g.By("waiting for endpoint")
... ...
@@ -48,7 +48,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby
48 48
 			o.Expect(err).NotTo(o.HaveOccurred())
49 49
 
50 50
 			assertPageContent := func(content string) {
51
-				_, err := exutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
51
+				_, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunningFn, 1, 2*time.Minute)
52 52
 				o.Expect(err).NotTo(o.HaveOccurred())
53 53
 
54 54
 				result, err := CheckPageContains(oc, "rails-postgresql-example", "", content)
... ...
@@ -63,7 +63,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby
63 63
 			g.By("testing application content source modification")
64 64
 			assertPageContent("Welcome to your Rails application on OpenShift")
65 65
 
66
-			pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
66
+			pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: dcLabel})
67 67
 			o.Expect(err).NotTo(o.HaveOccurred())
68 68
 			o.Expect(len(pods.Items)).To(o.Equal(1))
69 69
 
... ...
@@ -72,7 +72,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby
72 72
 			o.Expect(err).NotTo(o.HaveOccurred())
73 73
 			err = oc.Run("scale").Args("rc", dcName, "--replicas=0").Execute()
74 74
 			o.Expect(err).NotTo(o.HaveOccurred())
75
-			err = exutil.WaitUntilPodIsGone(oc.KubeREST().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
75
+			err = exutil.WaitUntilPodIsGone(oc.KubeClient().Core().Pods(oc.Namespace()), pods.Items[0].Name, 1*time.Minute)
76 76
 			o.Expect(err).NotTo(o.HaveOccurred())
77 77
 			err = oc.Run("scale").Args("rc", dcName, "--replicas=1").Execute()
78 78
 			o.Expect(err).NotTo(o.HaveOccurred())
... ...
@@ -32,7 +32,7 @@ func NewSampleRepoTest(c SampleRepoConfig) func() {
32 32
 
33 33
 		g.JustBeforeEach(func() {
34 34
 			g.By("Waiting for builder service account")
35
-			err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
35
+			err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
36 36
 			o.Expect(err).NotTo(o.HaveOccurred())
37 37
 		})
38 38
 
... ...
@@ -50,19 +50,19 @@ func NewSampleRepoTest(c SampleRepoConfig) func() {
50 50
 				buildName := c.buildConfigName + "-1"
51 51
 
52 52
 				g.By("expecting the build is in the Complete phase")
53
-				err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
53
+				err = exutil.WaitForABuild(oc.Client().Builds(oc.Namespace()), buildName, exutil.CheckBuildSuccessFn, exutil.CheckBuildFailedFn)
54 54
 				if err != nil {
55 55
 					exutil.DumpBuildLogs(c.buildConfigName, oc)
56 56
 				}
57 57
 				o.Expect(err).NotTo(o.HaveOccurred())
58 58
 
59 59
 				g.By("expecting the app deployment to be complete")
60
-				err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
60
+				err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), c.deploymentConfigName, oc)
61 61
 				o.Expect(err).NotTo(o.HaveOccurred())
62 62
 
63 63
 				if len(c.dbDeploymentConfigName) > 0 {
64 64
 					g.By("expecting the db deployment to be complete")
65
-					err = exutil.WaitForADeploymentToComplete(oc.KubeREST().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
65
+					err = exutil.WaitForADeploymentToComplete(oc.KubeClient().Core().ReplicationControllers(oc.Namespace()), c.dbDeploymentConfigName, oc)
66 66
 					o.Expect(err).NotTo(o.HaveOccurred())
67 67
 
68 68
 					g.By("expecting the db service is available")
... ...
@@ -20,7 +20,7 @@ var _ = g.Describe("[image_ecosystem][Slow] openshift images should be SCL enabl
20 20
 
21 21
 	g.JustBeforeEach(func() {
22 22
 		g.By("waiting for builder service account")
23
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
23
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
24 24
 		o.Expect(err).NotTo(o.HaveOccurred())
25 25
 	})
26 26
 
... ...
@@ -54,7 +54,7 @@ var _ = g.Describe("[image_ecosystem][Slow] openshift images should be SCL enabl
54 54
 						Name:    "test",
55 55
 						Command: []string{"/usr/bin/sleep", "infinity"},
56 56
 					})
57
-					_, err := oc.KubeREST().Pods(oc.Namespace()).Create(pod)
57
+					_, err := oc.KubeClient().Core().Pods(oc.Namespace()).Create(pod)
58 58
 					o.Expect(err).NotTo(o.HaveOccurred())
59 59
 
60 60
 					err = oc.KubeFramework().WaitForPodRunning(pod.Name)
... ...
@@ -26,7 +26,7 @@ var _ = g.Describe("[imageapis] openshift limit range admission", func() {
26 26
 
27 27
 	g.JustBeforeEach(func() {
28 28
 		g.By("Waiting for builder service account")
29
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
29
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
30 30
 		o.Expect(err).NotTo(o.HaveOccurred())
31 31
 	})
32 32
 
... ...
@@ -34,7 +34,7 @@ var _ = g.Describe("[imageapis] openshift limit range admission", func() {
34 34
 	// is destroyed
35 35
 	tearDown := func(oc *exutil.CLI) {
36 36
 		g.By(fmt.Sprintf("Deleting limit range %s", limitRangeName))
37
-		oc.AdminKubeREST().LimitRanges(oc.Namespace()).Delete(limitRangeName)
37
+		oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Delete(limitRangeName, nil)
38 38
 
39 39
 		deleteTestImagesAndStreams(oc)
40 40
 	}
... ...
@@ -113,7 +113,7 @@ var _ = g.Describe("[imageapis] openshift limit range admission", func() {
113 113
 		o.Expect(err).NotTo(o.HaveOccurred())
114 114
 
115 115
 		g.By(`removing tag "second" from "another" image stream`)
116
-		err = oc.REST().ImageStreamTags(oc.Namespace()).Delete("another", "second")
116
+		err = oc.Client().ImageStreamTags(oc.Namespace()).Delete("another", "second")
117 117
 		o.Expect(err).NotTo(o.HaveOccurred())
118 118
 
119 119
 		g.By(fmt.Sprintf("trying to push image below limits %v", limits))
... ...
@@ -148,7 +148,7 @@ var _ = g.Describe("[imageapis] openshift limit range admission", func() {
148 148
 		o.Expect(err).NotTo(o.HaveOccurred())
149 149
 
150 150
 		g.By(fmt.Sprintf("trying to tag a docker image exceeding limit %v", limit))
151
-		is, err := oc.REST().ImageStreams(oc.Namespace()).Get("stream")
151
+		is, err := oc.Client().ImageStreams(oc.Namespace()).Get("stream")
152 152
 		o.Expect(err).NotTo(o.HaveOccurred())
153 153
 		is.Spec.Tags["foo"] = imageapi.TagReference{
154 154
 			Name: "foo",
... ...
@@ -160,12 +160,12 @@ var _ = g.Describe("[imageapis] openshift limit range admission", func() {
160 160
 				Insecure: true,
161 161
 			},
162 162
 		}
163
-		_, err = oc.REST().ImageStreams(oc.Namespace()).Update(is)
163
+		_, err = oc.Client().ImageStreams(oc.Namespace()).Update(is)
164 164
 		o.Expect(err).To(o.HaveOccurred())
165 165
 		o.Expect(quotautil.IsErrorQuotaExceeded(err)).Should(o.Equal(true))
166 166
 
167 167
 		g.By("re-tagging the image under different tag")
168
-		is, err = oc.REST().ImageStreams(oc.Namespace()).Get("stream")
168
+		is, err = oc.Client().ImageStreams(oc.Namespace()).Get("stream")
169 169
 		o.Expect(err).NotTo(o.HaveOccurred())
170 170
 		is.Spec.Tags["duplicate"] = imageapi.TagReference{
171 171
 			Name: "duplicate",
... ...
@@ -177,7 +177,7 @@ var _ = g.Describe("[imageapis] openshift limit range admission", func() {
177 177
 				Insecure: true,
178 178
 			},
179 179
 		}
180
-		_, err = oc.REST().ImageStreams(oc.Namespace()).Update(is)
180
+		_, err = oc.Client().ImageStreams(oc.Namespace()).Update(is)
181 181
 		o.Expect(err).NotTo(o.HaveOccurred())
182 182
 	})
183 183
 
... ...
@@ -238,7 +238,7 @@ func buildAndPushTestImagesTo(oc *exutil.CLI, isName string, tagPrefix string, n
238 238
 		if err != nil {
239 239
 			return nil, err
240 240
 		}
241
-		ist, err := oc.REST().ImageStreamTags(oc.Namespace()).Get(isName, tag)
241
+		ist, err := oc.Client().ImageStreamTags(oc.Namespace()).Get(isName, tag)
242 242
 		if err != nil {
243 243
 			return nil, err
244 244
 		}
... ...
@@ -268,14 +268,14 @@ func createLimitRangeOfType(oc *exutil.CLI, limitType kapi.LimitType, maxLimits
268 268
 	}
269 269
 
270 270
 	g.By(fmt.Sprintf("creating limit range object %q with %s limited to: %v", limitRangeName, limitType, maxLimits))
271
-	lr, err := oc.AdminKubeREST().LimitRanges(oc.Namespace()).Create(lr)
271
+	lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Create(lr)
272 272
 	return lr, err
273 273
 }
274 274
 
275 275
 // bumpLimit changes the limit value for given resource for all the limit types of limit range object
276 276
 func bumpLimit(oc *exutil.CLI, resourceName kapi.ResourceName, limit string) (kapi.ResourceList, error) {
277 277
 	g.By(fmt.Sprintf("bump a limit on resource %q to %s", resourceName, limit))
278
-	lr, err := oc.AdminKubeREST().LimitRanges(oc.Namespace()).Get(limitRangeName)
278
+	lr, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Get(limitRangeName)
279 279
 	if err != nil {
280 280
 		return nil, err
281 281
 	}
... ...
@@ -299,7 +299,7 @@ func bumpLimit(oc *exutil.CLI, resourceName kapi.ResourceName, limit string) (ka
299 299
 	if !change {
300 300
 		return res, nil
301 301
 	}
302
-	_, err = oc.AdminKubeREST().LimitRanges(oc.Namespace()).Update(lr)
302
+	_, err = oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Update(lr)
303 303
 	return res, err
304 304
 }
305 305
 
... ...
@@ -31,7 +31,7 @@ var _ = g.Describe("[imageapis] openshift resource quota admission", func() {
31 31
 
32 32
 	g.JustBeforeEach(func() {
33 33
 		g.By("Waiting for builder service account")
34
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
34
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
35 35
 		o.Expect(err).NotTo(o.HaveOccurred())
36 36
 	})
37 37
 
... ...
@@ -39,7 +39,7 @@ var _ = g.Describe("[imageapis] openshift resource quota admission", func() {
39 39
 	// is destroyed
40 40
 	tearDown := func(oc *exutil.CLI) {
41 41
 		g.By(fmt.Sprintf("Deleting quota %s", quotaName))
42
-		oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Delete(quotaName)
42
+		oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Delete(quotaName, nil)
43 43
 
44 44
 		deleteTestImagesAndStreams(oc)
45 45
 	}
... ...
@@ -96,10 +96,10 @@ var _ = g.Describe("[imageapis] openshift resource quota admission", func() {
96 96
 		o.Expect(err).NotTo(o.HaveOccurred())
97 97
 
98 98
 		g.By("deleting first image stream")
99
-		err = oc.REST().ImageStreams(oc.Namespace()).Delete("first")
99
+		err = oc.Client().ImageStreams(oc.Namespace()).Delete("first")
100 100
 		o.Expect(err).NotTo(o.HaveOccurred())
101 101
 		used, err = exutil.WaitForResourceQuotaSync(
102
-			oc.KubeREST().ResourceQuotas(oc.Namespace()),
102
+			oc.KubeClient().Core().ResourceQuotas(oc.Namespace()),
103 103
 			quotaName,
104 104
 			kapi.ResourceList{imageapi.ResourceImageStreams: resource.MustParse("1")},
105 105
 			true,
... ...
@@ -130,7 +130,7 @@ func createResourceQuota(oc *exutil.CLI, hard kapi.ResourceList) (*kapi.Resource
130 130
 	}
131 131
 
132 132
 	g.By(fmt.Sprintf("creating resource quota with a limit %v", hard))
133
-	rq, err := oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Create(rq)
133
+	rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Create(rq)
134 134
 	if err != nil {
135 135
 		return nil, err
136 136
 	}
... ...
@@ -167,12 +167,12 @@ func assertQuotasEqual(a, b kapi.ResourceList) error {
167 167
 // bumpQuota modifies hard spec of quota object with the given value. It returns modified hard spec.
168 168
 func bumpQuota(oc *exutil.CLI, resourceName kapi.ResourceName, value int64) (kapi.ResourceList, error) {
169 169
 	g.By(fmt.Sprintf("bump the quota to %s=%d", resourceName, value))
170
-	rq, err := oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Get(quotaName)
170
+	rq, err := oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Get(quotaName)
171 171
 	if err != nil {
172 172
 		return nil, err
173 173
 	}
174 174
 	rq.Spec.Hard[resourceName] = *resource.NewQuantity(value, resource.DecimalSI)
175
-	_, err = oc.AdminKubeREST().ResourceQuotas(oc.Namespace()).Update(rq)
175
+	_, err = oc.AdminKubeClient().Core().ResourceQuotas(oc.Namespace()).Update(rq)
176 176
 	if err != nil {
177 177
 		return nil, err
178 178
 	}
... ...
@@ -187,7 +187,7 @@ func bumpQuota(oc *exutil.CLI, resourceName kapi.ResourceName, value int64) (kap
187 187
 func waitForResourceQuotaSync(oc *exutil.CLI, name string, expectedResources kapi.ResourceList) (kapi.ResourceList, error) {
188 188
 	g.By(fmt.Sprintf("waiting for resource quota %s to get updated", name))
189 189
 	used, err := exutil.WaitForResourceQuotaSync(
190
-		oc.KubeREST().ResourceQuotas(oc.Namespace()),
190
+		oc.KubeClient().Core().ResourceQuotas(oc.Namespace()),
191 191
 		quotaName,
192 192
 		expectedResources,
193 193
 		false,
... ...
@@ -203,7 +203,7 @@ func waitForResourceQuotaSync(oc *exutil.CLI, name string, expectedResources kap
203 203
 func waitForLimitSync(oc *exutil.CLI, hardLimit kapi.ResourceList) error {
204 204
 	g.By(fmt.Sprintf("waiting for resource quota %s to get updated", quotaName))
205 205
 	return testutil.WaitForResourceQuotaLimitSync(
206
-		oc.KubeREST().ResourceQuotas(oc.Namespace()),
206
+		oc.KubeClient().Core().ResourceQuotas(oc.Namespace()),
207 207
 		quotaName,
208 208
 		hardLimit,
209 209
 		waitTimeout)
... ...
@@ -219,14 +219,14 @@ func deleteTestImagesAndStreams(oc *exutil.CLI) {
219 219
 		oc.Namespace(),
220 220
 	} {
221 221
 		g.By(fmt.Sprintf("Deleting images and image streams in project %q", projectName))
222
-		iss, err := oc.AdminREST().ImageStreams(projectName).List(kapi.ListOptions{})
222
+		iss, err := oc.AdminClient().ImageStreams(projectName).List(kapi.ListOptions{})
223 223
 		if err != nil {
224 224
 			continue
225 225
 		}
226 226
 		for _, is := range iss.Items {
227 227
 			for _, history := range is.Status.Tags {
228 228
 				for i := range history.Items {
229
-					oc.AdminREST().Images().Delete(history.Items[i].Image)
229
+					oc.AdminClient().Images().Delete(history.Items[i].Image)
230 230
 				}
231 231
 			}
232 232
 			for _, tagRef := range is.Spec.Tags {
... ...
@@ -236,7 +236,7 @@ func deleteTestImagesAndStreams(oc *exutil.CLI) {
236 236
 					if err != nil {
237 237
 						continue
238 238
 					}
239
-					oc.AdminREST().Images().Delete(id)
239
+					oc.AdminClient().Images().Delete(id)
240 240
 				}
241 241
 			}
242 242
 		}
... ...
@@ -244,7 +244,7 @@ func deleteTestImagesAndStreams(oc *exutil.CLI) {
244 244
 		// let the extended framework take care of the current namespace
245 245
 		if projectName != oc.Namespace() {
246 246
 			g.By(fmt.Sprintf("Deleting project %q", projectName))
247
-			oc.AdminREST().Projects().Delete(projectName)
247
+			oc.AdminClient().Projects().Delete(projectName)
248 248
 		}
249 249
 	}
250 250
 }
... ...
@@ -71,13 +71,13 @@ func BuildAndPushImageOfSizeWithBuilder(
71 71
 		istName += ":" + tag
72 72
 	}
73 73
 
74
-	bc, err := oc.REST().BuildConfigs(namespace).Get(name)
74
+	bc, err := oc.Client().BuildConfigs(namespace).Get(name)
75 75
 	if err == nil {
76 76
 		if bc.Spec.CommonSpec.Output.To.Kind != "ImageStreamTag" {
77 77
 			return fmt.Errorf("Unexpected kind of buildspec's output (%s != %s)", bc.Spec.CommonSpec.Output.To.Kind, "ImageStreamTag")
78 78
 		}
79 79
 		bc.Spec.CommonSpec.Output.To.Name = istName
80
-		if _, err = oc.REST().BuildConfigs(namespace).Update(bc); err != nil {
80
+		if _, err = oc.Client().BuildConfigs(namespace).Update(bc); err != nil {
81 81
 			return err
82 82
 		}
83 83
 	} else {
... ...
@@ -240,7 +240,7 @@ func BuildAndPushImageOfSizeWithDocker(
240 240
 
241 241
 // GetDockerRegistryURL returns a cluster URL of internal docker registry if available.
242 242
 func GetDockerRegistryURL(oc *exutil.CLI) (string, error) {
243
-	svc, err := oc.AdminKubeREST().Services("default").Get("docker-registry")
243
+	svc, err := oc.AdminKubeClient().Core().Services("default").Get("docker-registry")
244 244
 	if err != nil {
245 245
 		return "", err
246 246
 	}
... ...
@@ -37,7 +37,7 @@ var _ = g.Describe("[images] prune images", func() {
37 37
 			originalAcceptSchema2 = &accepts
38 38
 		}
39 39
 
40
-		err := exutil.WaitForBuilderAccount(oc.KubeREST().ServiceAccounts(oc.Namespace()))
40
+		err := exutil.WaitForBuilderAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()))
41 41
 		o.Expect(err).NotTo(o.HaveOccurred())
42 42
 
43 43
 		g.By(fmt.Sprintf("give a user %s a right to prune images with %s role", oc.Username(), "system:image-pruner"))
... ...
@@ -118,10 +118,10 @@ func testPruneImages(oc *exutil.CLI, schemaVersion int) {
118 118
 	o.Expect(pruneSize < keepSize).To(o.BeTrue())
119 119
 
120 120
 	g.By(fmt.Sprintf("ensure uploaded image is of schema %d", schemaVersion))
121
-	imgPrune, err := oc.AsAdmin().REST().Images().Get(imgPruneName)
121
+	imgPrune, err := oc.AsAdmin().Client().Images().Get(imgPruneName)
122 122
 	o.Expect(err).NotTo(o.HaveOccurred())
123 123
 	o.Expect(imgPrune.DockerImageManifestMediaType).To(o.Equal(mediaType))
124
-	imgKeep, err := oc.AsAdmin().REST().Images().Get(imgKeepName)
124
+	imgKeep, err := oc.AsAdmin().Client().Images().Get(imgKeepName)
125 125
 	o.Expect(err).NotTo(o.HaveOccurred())
126 126
 	o.Expect(imgKeep.DockerImageManifestMediaType).To(o.Equal(mediaType))
127 127
 
... ...
@@ -188,7 +188,7 @@ func testPruneImages(oc *exutil.CLI, schemaVersion int) {
188 188
 
189 189
 func tearDownPruneImagesTest(oc *exutil.CLI, cleanUp *cleanUpContainer) {
190 190
 	for _, image := range cleanUp.imageNames {
191
-		err := oc.AsAdmin().REST().Images().Delete(image)
191
+		err := oc.AsAdmin().Client().Images().Delete(image)
192 192
 		if err != nil {
193 193
 			fmt.Fprintf(g.GinkgoWriter, "clean up of image %q failed: %v\n", image, err)
194 194
 		}
... ...
@@ -248,7 +248,7 @@ func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error {
248 248
 		return nil
249 249
 	}
250 250
 
251
-	dc, err := oc.REST().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry")
251
+	dc, err := oc.Client().DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry")
252 252
 	if err != nil {
253 253
 		return err
254 254
 	}
... ...
@@ -259,5 +259,5 @@ func ensureRegistryAcceptsSchema2(oc *exutil.CLI, accept bool) error {
259 259
 	if err != nil {
260 260
 		return fmt.Errorf("failed to update registry's environment with %s: %v", &waitForVersion, err)
261 261
 	}
262
-	return exutil.WaitForRegistry(oc.AdminREST(), oc.AdminKubeREST(), &waitForVersion, oc)
262
+	return exutil.WaitForRegistry(oc.AdminClient(), oc.AdminKubeClient(), &waitForVersion, oc)
263 263
 }
... ...
@@ -29,16 +29,16 @@ var _ = g.Describe("[job][Conformance] openshift can execute jobs", func() {
29 29
 				o.Expect(err).NotTo(o.HaveOccurred())
30 30
 
31 31
 				g.By("waiting for a pod...")
32
-				podNames, err := exeutil.WaitForPods(oc.KubeREST().Pods(oc.Namespace()), exeutil.ParseLabelsOrDie(labels), exeutil.CheckPodIsSucceededFn, 1, 2*time.Minute)
32
+				podNames, err := exeutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), exeutil.ParseLabelsOrDie(labels), exeutil.CheckPodIsSucceededFn, 1, 2*time.Minute)
33 33
 				o.Expect(err).NotTo(o.HaveOccurred())
34 34
 				o.Expect(len(podNames)).Should(o.Equal(1))
35 35
 
36 36
 				g.By("waiting for a job...")
37
-				err = exeutil.WaitForAJob(oc.KubeREST().ExtensionsClient.Jobs(oc.Namespace()), name, 2*time.Minute)
37
+				err = exeutil.WaitForAJob(oc.KubeClient().Batch().Jobs(oc.Namespace()), name, 2*time.Minute)
38 38
 				o.Expect(err).NotTo(o.HaveOccurred())
39 39
 
40 40
 				g.By("checking job status...")
41
-				jobs, err := oc.KubeREST().ExtensionsClient.Jobs(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exeutil.ParseLabelsOrDie(labels)})
41
+				jobs, err := oc.KubeClient().Batch().Jobs(oc.Namespace()).List(kapi.ListOptions{LabelSelector: exeutil.ParseLabelsOrDie(labels)})
42 42
 				o.Expect(err).NotTo(o.HaveOccurred())
43 43
 
44 44
 				o.Expect(len(jobs.Items)).Should(o.Equal(1))
... ...
@@ -16,6 +16,7 @@ import (
16 16
 
17 17
 	kapi "k8s.io/kubernetes/pkg/api"
18 18
 	apierrs "k8s.io/kubernetes/pkg/api/errors"
19
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
19 20
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
20 21
 	clientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
21 22
 	"k8s.io/kubernetes/pkg/util/wait"
... ...
@@ -30,7 +31,7 @@ import (
30 30
 )
31 31
 
32 32
 // CLI provides function to call the OpenShift CLI and Kubernetes and OpenShift
33
-// REST clients.
33
+// clients.
34 34
 type CLI struct {
35 35
 	execPath         string
36 36
 	verb             string
... ...
@@ -151,7 +152,7 @@ func (c *CLI) SetupProject(name string, kubeClient *kclient.Client, _ map[string
151 151
 	e2e.Logf("The user is now %q", c.Username())
152 152
 
153 153
 	e2e.Logf("Creating project %q", c.Namespace())
154
-	_, err := c.REST().ProjectRequests().Create(&projectapi.ProjectRequest{
154
+	_, err := c.Client().ProjectRequests().Create(&projectapi.ProjectRequest{
155 155
 		ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()},
156 156
 	})
157 157
 	if err != nil {
... ...
@@ -159,7 +160,7 @@ func (c *CLI) SetupProject(name string, kubeClient *kclient.Client, _ map[string
159 159
 		return nil, err
160 160
 	}
161 161
 	if err := wait.ExponentialBackoff(kclient.DefaultBackoff, func() (bool, error) {
162
-		if _, err := c.KubeREST().Pods(c.Namespace()).List(kapi.ListOptions{}); err != nil {
162
+		if _, err := c.KubeClient().Core().Pods(c.Namespace()).List(kapi.ListOptions{}); err != nil {
163 163
 			if apierrs.IsForbidden(err) {
164 164
 				e2e.Logf("Waiting for user to have access to the namespace")
165 165
 				return false, nil
... ...
@@ -178,10 +179,10 @@ func (c *CLI) Verbose() *CLI {
178 178
 	return c
179 179
 }
180 180
 
181
-// REST provides an OpenShift REST client for the current user. If the user is not
182
-// set, then it provides REST client for the cluster admin user
183
-func (c *CLI) REST() *client.Client {
184
-	_, clientConfig, err := configapi.GetKubeClient(c.configPath, nil)
181
+// Client provides an OpenShift client for the current user. If the user is not
182
+// set, then it provides client for the cluster admin user
183
+func (c *CLI) Client() *client.Client {
184
+	_, _, clientConfig, err := configapi.GetKubeClient(c.configPath, nil)
185 185
 	osClient, err := client.New(clientConfig)
186 186
 	if err != nil {
187 187
 		FatalErr(err)
... ...
@@ -189,9 +190,9 @@ func (c *CLI) REST() *client.Client {
189 189
 	return osClient
190 190
 }
191 191
 
192
-// AdminREST provides an OpenShift REST client for the cluster admin user.
193
-func (c *CLI) AdminREST() *client.Client {
194
-	_, clientConfig, err := configapi.GetKubeClient(c.adminConfigPath, nil)
192
+// AdminClient provides an OpenShift client for the cluster admin user.
193
+func (c *CLI) AdminClient() *client.Client {
194
+	_, _, clientConfig, err := configapi.GetKubeClient(c.adminConfigPath, nil)
195 195
 	osClient, err := client.New(clientConfig)
196 196
 	if err != nil {
197 197
 		FatalErr(err)
... ...
@@ -199,18 +200,18 @@ func (c *CLI) AdminREST() *client.Client {
199 199
 	return osClient
200 200
 }
201 201
 
202
-// KubeREST provides a Kubernetes REST client for the current namespace
203
-func (c *CLI) KubeREST() *kclient.Client {
204
-	kubeClient, _, err := configapi.GetKubeClient(c.configPath, nil)
202
+// KubeClient provides a Kubernetes client for the current namespace
203
+func (c *CLI) KubeClient() *kclientset.Clientset {
204
+	_, kubeClient, _, err := configapi.GetKubeClient(c.configPath, nil)
205 205
 	if err != nil {
206 206
 		FatalErr(err)
207 207
 	}
208 208
 	return kubeClient
209 209
 }
210 210
 
211
-// AdminKubeREST provides a Kubernetes REST client for the cluster admin user.
212
-func (c *CLI) AdminKubeREST() *kclient.Client {
213
-	kubeClient, _, err := configapi.GetKubeClient(c.adminConfigPath, nil)
211
+// AdminKubeClient provides a Kubernetes client for the cluster admin user.
212
+func (c *CLI) AdminKubeClient() *kclientset.Clientset {
213
+	_, kubeClient, _, err := configapi.GetKubeClient(c.adminConfigPath, nil)
214 214
 	if err != nil {
215 215
 		FatalErr(err)
216 216
 	}
... ...
@@ -32,7 +32,7 @@ func (m MySQL) PodName() string {
32 32
 
33 33
 // IsReady pings the MySQL server.
34 34
 func (m MySQL) IsReady(oc *util.CLI) (bool, error) {
35
-	conf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.podName)
35
+	conf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
36 36
 	if err != nil {
37 37
 		return false, err
38 38
 	}
... ...
@@ -51,11 +51,11 @@ func (m MySQL) IsReady(oc *util.CLI) (bool, error) {
51 51
 
52 52
 // Query executes an SQL query as an ordinary user and returns the result.
53 53
 func (m MySQL) Query(oc *util.CLI, query string) (string, error) {
54
-	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
54
+	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
55 55
 	if err != nil {
56 56
 		return "", err
57 57
 	}
58
-	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
58
+	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
59 59
 	if err != nil {
60 60
 		return "", err
61 61
 	}
... ...
@@ -67,11 +67,11 @@ func (m MySQL) Query(oc *util.CLI, query string) (string, error) {
67 67
 
68 68
 // QueryPrivileged executes an SQL query as a root user and returns the result.
69 69
 func (m MySQL) QueryPrivileged(oc *util.CLI, query string) (string, error) {
70
-	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
70
+	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
71 71
 	if err != nil {
72 72
 		return "", err
73 73
 	}
74
-	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
74
+	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
75 75
 	if err != nil {
76 76
 		return "", err
77 77
 	}
... ...
@@ -82,11 +82,11 @@ func (m MySQL) QueryPrivileged(oc *util.CLI, query string) (string, error) {
82 82
 
83 83
 // TestRemoteLogin will test whether we can login through to a remote database.
84 84
 func (m MySQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error {
85
-	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
85
+	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
86 86
 	if err != nil {
87 87
 		return err
88 88
 	}
89
-	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
89
+	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
90 90
 	if err != nil {
91 91
 		return err
92 92
 	}
... ...
@@ -32,7 +32,7 @@ func (m PostgreSQL) PodName() string {
32 32
 
33 33
 // IsReady pings the PostgreSQL server.
34 34
 func (m PostgreSQL) IsReady(oc *util.CLI) (bool, error) {
35
-	conf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.podName)
35
+	conf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
36 36
 	if err != nil {
37 37
 		return false, err
38 38
 	}
... ...
@@ -51,11 +51,11 @@ func (m PostgreSQL) IsReady(oc *util.CLI) (bool, error) {
51 51
 
52 52
 // Query executes an SQL query as an ordinary user and returns the result.
53 53
 func (m PostgreSQL) Query(oc *util.CLI, query string) (string, error) {
54
-	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
54
+	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
55 55
 	if err != nil {
56 56
 		return "", err
57 57
 	}
58
-	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
58
+	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
59 59
 	if err != nil {
60 60
 		return "", err
61 61
 	}
... ...
@@ -67,11 +67,11 @@ func (m PostgreSQL) Query(oc *util.CLI, query string) (string, error) {
67 67
 
68 68
 // QueryPrivileged executes an SQL query as a root user and returns the result.
69 69
 func (m PostgreSQL) QueryPrivileged(oc *util.CLI, query string) (string, error) {
70
-	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
70
+	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
71 71
 	if err != nil {
72 72
 		return "", err
73 73
 	}
74
-	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
74
+	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
75 75
 	if err != nil {
76 76
 		return "", err
77 77
 	}
... ...
@@ -83,11 +83,11 @@ func (m PostgreSQL) QueryPrivileged(oc *util.CLI, query string) (string, error)
83 83
 
84 84
 // TestRemoteLogin will test whether we can login through to a remote database.
85 85
 func (m PostgreSQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error {
86
-	container, err := firstContainerName(oc.KubeREST().Pods(oc.Namespace()), m.podName)
86
+	container, err := firstContainerName(oc.KubeClient().Core().Pods(oc.Namespace()), m.podName)
87 87
 	if err != nil {
88 88
 		return err
89 89
 	}
90
-	masterConf, err := getPodConfig(oc.KubeREST().Pods(oc.Namespace()), m.masterPodName)
90
+	masterConf, err := getPodConfig(oc.KubeClient().Core().Pods(oc.Namespace()), m.masterPodName)
91 91
 	if err != nil {
92 92
 		return err
93 93
 	}
... ...
@@ -85,7 +85,7 @@ func ListImages() ([]string, error) {
85 85
 //BuildAuthConfiguration constructs a non-standard dockerClient.AuthConfiguration that can be used to communicate with the openshift internal docker registry
86 86
 func BuildAuthConfiguration(credKey string, oc *CLI) (*dockerClient.AuthConfiguration, error) {
87 87
 	authCfg := &dockerClient.AuthConfiguration{}
88
-	secretList, err := oc.AdminKubeREST().Secrets(oc.Namespace()).List(kapi.ListOptions{})
88
+	secretList, err := oc.AdminKubeClient().Core().Secrets(oc.Namespace()).List(kapi.ListOptions{})
89 89
 
90 90
 	g.By(fmt.Sprintf("get secret list err %v ", err))
91 91
 	if err == nil {
... ...
@@ -22,7 +22,9 @@ import (
22 22
 	"k8s.io/kubernetes/pkg/api/unversioned"
23 23
 	"k8s.io/kubernetes/pkg/apimachinery/registered"
24 24
 	"k8s.io/kubernetes/pkg/apis/batch"
25
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
25
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
26
+	kbatchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned"
27
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
26 28
 	"k8s.io/kubernetes/pkg/fields"
27 29
 	"k8s.io/kubernetes/pkg/labels"
28 30
 	"k8s.io/kubernetes/pkg/quota"
... ...
@@ -47,7 +49,7 @@ func WaitForOpenShiftNamespaceImageStreams(oc *CLI) error {
47 47
 	langs := []string{"ruby", "nodejs", "perl", "php", "python", "wildfly", "mysql", "postgresql", "mongodb", "jenkins"}
48 48
 	scan := func() bool {
49 49
 		for _, lang := range langs {
50
-			is, err := oc.REST().ImageStreams("openshift").Get(lang)
50
+			is, err := oc.Client().ImageStreams("openshift").Get(lang)
51 51
 			if err != nil {
52 52
 				return false
53 53
 			}
... ...
@@ -82,7 +84,7 @@ func CheckOpenShiftNamespaceImageStreams(oc *CLI) {
82 82
 	missing := false
83 83
 	langs := []string{"ruby", "nodejs", "perl", "php", "python", "wildfly", "mysql", "postgresql", "mongodb", "jenkins"}
84 84
 	for _, lang := range langs {
85
-		_, err := oc.REST().ImageStreams("openshift").Get(lang)
85
+		_, err := oc.Client().ImageStreams("openshift").Get(lang)
86 86
 		if err != nil {
87 87
 			missing = true
88 88
 			break
... ...
@@ -151,7 +153,7 @@ func DumpBuildLogs(bc string, oc *CLI) {
151 151
 }
152 152
 
153 153
 func GetDeploymentConfigPods(oc *CLI, dcName string) (*kapi.PodList, error) {
154
-	return oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("deploymentconfig=%s", dcName))})
154
+	return oc.KubeClient().Core().Pods(oc.Namespace()).List(kapi.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("deploymentconfig=%s", dcName))})
155 155
 }
156 156
 
157 157
 // DumpDeploymentLogs will dump the latest deployment logs for a DeploymentConfig for debug purposes
... ...
@@ -415,7 +417,7 @@ func StartBuildAndWait(oc *CLI, args ...string) (result *BuildResult, err error)
415 415
 	buildName := matches[1]
416 416
 
417 417
 	fmt.Fprintf(g.GinkgoWriter, "Waiting for %s to complete\n", buildPath)
418
-	err = WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName,
418
+	err = WaitForABuild(oc.Client().Builds(oc.Namespace()), buildName,
419 419
 		func(b *buildapi.Build) bool {
420 420
 			result.Build = b
421 421
 			result.BuildSuccess = CheckBuildSuccessFn(b)
... ...
@@ -489,7 +491,7 @@ var CheckBuildFailedFn = func(b *buildapi.Build) bool {
489 489
 
490 490
 // WaitForBuilderAccount waits until the builder service account gets fully
491 491
 // provisioned
492
-func WaitForBuilderAccount(c kclient.ServiceAccountsInterface) error {
492
+func WaitForBuilderAccount(c kcoreclient.ServiceAccountInterface) error {
493 493
 	waitFn := func() (bool, error) {
494 494
 		sc, err := c.Get("builder")
495 495
 		if err != nil {
... ...
@@ -569,7 +571,7 @@ func TimedWaitForAnImageStreamTag(oc *CLI, namespace, name, tag string, waitTime
569 569
 	c := make(chan error)
570 570
 	go func() {
571 571
 		err := WaitForAnImageStream(
572
-			oc.REST().ImageStreams(namespace),
572
+			oc.Client().ImageStreams(namespace),
573 573
 			name,
574 574
 			func(is *imageapi.ImageStream) bool {
575 575
 				if history, exists := is.Status.Tags[tag]; !exists || len(history.Items) == 0 {
... ...
@@ -643,7 +645,7 @@ func compareResourceControllerNames(a, b string) int {
643 643
 // When isOK returns true, WaitForADeployment returns nil, when isFailed returns
644 644
 // true, WaitForADeployment returns an error including the deployment status.
645 645
 // WaitForADeployment waits for at most a certain timeout (non-configurable).
646
-func WaitForADeployment(client kclient.ReplicationControllerInterface, name string, isOK, isFailed func(*kapi.ReplicationController) bool, oc *CLI) error {
646
+func WaitForADeployment(client kcoreclient.ReplicationControllerInterface, name string, isOK, isFailed func(*kapi.ReplicationController) bool, oc *CLI) error {
647 647
 	timeout := 15 * time.Minute
648 648
 
649 649
 	// closing done signals that any pending operation should be aborted.
... ...
@@ -755,7 +757,7 @@ func WaitForADeployment(client kclient.ReplicationControllerInterface, name stri
755 755
 }
756 756
 
757 757
 // WaitForADeploymentToComplete waits for a deployment to complete.
758
-func WaitForADeploymentToComplete(client kclient.ReplicationControllerInterface, name string, oc *CLI) error {
758
+func WaitForADeploymentToComplete(client kcoreclient.ReplicationControllerInterface, name string, oc *CLI) error {
759 759
 	return WaitForADeployment(client, name, CheckDeploymentCompletedFn, CheckDeploymentFailedFn, oc)
760 760
 }
761 761
 
... ...
@@ -764,7 +766,7 @@ func WaitForADeploymentToComplete(client kclient.ReplicationControllerInterface,
764 764
 // registry's deployment config will be fetched from etcd.
765 765
 func WaitForRegistry(
766 766
 	dcNamespacer client.DeploymentConfigsNamespacer,
767
-	kubeClient kclient.Interface,
767
+	kubeClient kclientset.Interface,
768 768
 	waitForDCVersion *int64,
769 769
 	oc *CLI,
770 770
 ) error {
... ...
@@ -782,7 +784,7 @@ func WaitForRegistry(
782 782
 	}
783 783
 	fmt.Fprintf(g.GinkgoWriter, "waiting for deployment of version %d to complete\n", latestVersion)
784 784
 
785
-	err := WaitForADeployment(kubeClient.ReplicationControllers(kapi.NamespaceDefault), "docker-registry",
785
+	err := WaitForADeployment(kubeClient.Core().ReplicationControllers(kapi.NamespaceDefault), "docker-registry",
786 786
 		func(rc *kapi.ReplicationController) bool {
787 787
 			if !CheckDeploymentCompletedFn(rc) {
788 788
 				return false
... ...
@@ -810,7 +812,7 @@ func WaitForRegistry(
810 810
 	}
811 811
 
812 812
 	requirement, err := labels.NewRequirement(deployapi.DeploymentLabel, selection.Equals, sets.NewString(fmt.Sprintf("docker-registry-%d", latestVersion)))
813
-	pods, err := WaitForPods(kubeClient.Pods(kapi.NamespaceDefault), labels.NewSelector().Add(*requirement), CheckPodIsReadyFn, 1, time.Minute)
813
+	pods, err := WaitForPods(kubeClient.Core().Pods(kapi.NamespaceDefault), labels.NewSelector().Add(*requirement), CheckPodIsReadyFn, 1, time.Minute)
814 814
 	now := time.Now()
815 815
 	fmt.Fprintf(g.GinkgoWriter, "deployed registry pod %s after %s\n", pods[0], now.Sub(start).String())
816 816
 	return err
... ...
@@ -840,7 +842,7 @@ func isUsageSynced(received, expected kapi.ResourceList, expectedIsUpperLimit bo
840 840
 // or equal to quota's usage, which is useful for expected usage increment. Otherwise expected usage must
841 841
 // compare lower or equal to quota's usage, which is useful for expected usage decrement.
842 842
 func WaitForResourceQuotaSync(
843
-	client kclient.ResourceQuotaInterface,
843
+	client kcoreclient.ResourceQuotaInterface,
844 844
 	name string,
845 845
 	expectedUsage kapi.ResourceList,
846 846
 	expectedIsUpperLimit bool,
... ...
@@ -902,7 +904,7 @@ var CheckDeploymentFailedFn = func(d *kapi.ReplicationController) bool {
902 902
 }
903 903
 
904 904
 // GetPodNamesByFilter looks up pods that satisfy the predicate and returns their names.
905
-func GetPodNamesByFilter(c kclient.PodInterface, label labels.Selector, predicate func(kapi.Pod) bool) (podNames []string, err error) {
905
+func GetPodNamesByFilter(c kcoreclient.PodInterface, label labels.Selector, predicate func(kapi.Pod) bool) (podNames []string, err error) {
906 906
 	podList, err := c.List(kapi.ListOptions{LabelSelector: label})
907 907
 	if err != nil {
908 908
 		return nil, err
... ...
@@ -915,7 +917,7 @@ func GetPodNamesByFilter(c kclient.PodInterface, label labels.Selector, predicat
915 915
 	return podNames, nil
916 916
 }
917 917
 
918
-func WaitForAJob(c kclient.JobInterface, name string, timeout time.Duration) error {
918
+func WaitForAJob(c kbatchclient.JobInterface, name string, timeout time.Duration) error {
919 919
 	return wait.Poll(1*time.Second, timeout, func() (bool, error) {
920 920
 		j, e := c.Get(name)
921 921
 		if e != nil {
... ...
@@ -934,7 +936,7 @@ func WaitForAJob(c kclient.JobInterface, name string, timeout time.Duration) err
934 934
 
935 935
 // WaitForPods waits until given number of pods that match the label selector and
936 936
 // satisfy the predicate are found
937
-func WaitForPods(c kclient.PodInterface, label labels.Selector, predicate func(kapi.Pod) bool, count int, timeout time.Duration) ([]string, error) {
937
+func WaitForPods(c kcoreclient.PodInterface, label labels.Selector, predicate func(kapi.Pod) bool, count int, timeout time.Duration) ([]string, error) {
938 938
 	var podNames []string
939 939
 	err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
940 940
 		p, e := GetPodNamesByFilter(c, label, predicate)
... ...
@@ -975,7 +977,7 @@ var CheckPodIsReadyFn = func(pod kapi.Pod) bool {
975 975
 }
976 976
 
977 977
 // WaitUntilPodIsGone waits until the named Pod will disappear
978
-func WaitUntilPodIsGone(c kclient.PodInterface, podName string, timeout time.Duration) error {
978
+func WaitUntilPodIsGone(c kcoreclient.PodInterface, podName string, timeout time.Duration) error {
979 979
 	return wait.Poll(1*time.Second, timeout, func() (bool, error) {
980 980
 		_, err := c.Get(podName)
981 981
 		if err != nil {
... ...
@@ -1054,7 +1056,7 @@ func CreatePersistentVolume(name, capacity, hostPath string) *kapi.PersistentVol
1054 1054
 }
1055 1055
 
1056 1056
 // SetupHostPathVolumes will create multiple PersistentVolumes with given capacity
1057
-func SetupHostPathVolumes(c kclient.PersistentVolumeInterface, prefix, capacity string, count int) (volumes []*kapi.PersistentVolume, err error) {
1057
+func SetupHostPathVolumes(c kcoreclient.PersistentVolumeInterface, prefix, capacity string, count int) (volumes []*kapi.PersistentVolume, err error) {
1058 1058
 	rootDir, err := ioutil.TempDir(TestContext.OutputDir, "persistent-volumes")
1059 1059
 	if err != nil {
1060 1060
 		return volumes, err
... ...
@@ -1084,7 +1086,7 @@ func SetupHostPathVolumes(c kclient.PersistentVolumeInterface, prefix, capacity
1084 1084
 
1085 1085
 // CleanupHostPathVolumes removes all PersistentVolumes created by
1086 1086
 // SetupHostPathVolumes, with a given prefix
1087
-func CleanupHostPathVolumes(c kclient.PersistentVolumeInterface, prefix string) error {
1087
+func CleanupHostPathVolumes(c kcoreclient.PersistentVolumeInterface, prefix string) error {
1088 1088
 	pvs, err := c.List(kapi.ListOptions{})
1089 1089
 	if err != nil {
1090 1090
 		return err
... ...
@@ -1101,7 +1103,7 @@ func CleanupHostPathVolumes(c kclient.PersistentVolumeInterface, prefix string)
1101 1101
 			continue
1102 1102
 		}
1103 1103
 
1104
-		if err = c.Delete(pv.Name); err != nil {
1104
+		if err = c.Delete(pv.Name, nil); err != nil {
1105 1105
 			fmt.Fprintf(g.GinkgoWriter, "WARNING: couldn't remove PV %s: %v\n", pv.Name, err)
1106 1106
 			continue
1107 1107
 		}
... ...
@@ -1199,7 +1201,7 @@ func GetEndpointAddress(oc *CLI, name string) (string, error) {
1199 1199
 	if err != nil {
1200 1200
 		return "", err
1201 1201
 	}
1202
-	endpoint, err := oc.KubeREST().Endpoints(oc.Namespace()).Get(name)
1202
+	endpoint, err := oc.KubeClient().Core().Endpoints(oc.Namespace()).Get(name)
1203 1203
 	if err != nil {
1204 1204
 		return "", err
1205 1205
 	}
... ...
@@ -11,8 +11,7 @@ import (
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12 12
 	"k8s.io/kubernetes/pkg/api/meta"
13 13
 	"k8s.io/kubernetes/pkg/api/unversioned"
14
-	clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
15
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
14
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
16 15
 	"k8s.io/kubernetes/pkg/runtime"
17 16
 	kyaml "k8s.io/kubernetes/pkg/util/yaml"
18 17
 
... ...
@@ -32,7 +31,7 @@ type TestPluginConfig struct {
32 32
 
33 33
 func (obj *TestPluginConfig) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
34 34
 
35
-func setupAdmissionTest(t *testing.T, setupConfig func(*configapi.MasterConfig)) (*kclient.Client, *client.Client) {
35
+func setupAdmissionTest(t *testing.T, setupConfig func(*configapi.MasterConfig)) (*kclientset.Clientset, *client.Client) {
36 36
 	testutil.RequireEtcd(t)
37 37
 	masterConfig, err := testserver.DefaultMasterOptions()
38 38
 	if err != nil {
... ...
@@ -87,7 +86,7 @@ func (a *testAdmissionPlugin) Handles(operation admission.Operation) bool {
87 87
 func registerAdmissionPlugins(t *testing.T, names ...string) {
88 88
 	for _, name := range names {
89 89
 		pluginName := name
90
-		admission.RegisterPlugin(pluginName, func(client clientset.Interface, config io.Reader) (admission.Interface, error) {
90
+		admission.RegisterPlugin(pluginName, func(client kclientset.Interface, config io.Reader) (admission.Interface, error) {
91 91
 			plugin := &testAdmissionPlugin{
92 92
 				name: pluginName,
93 93
 			}
... ...
@@ -193,7 +192,7 @@ func TestKubernetesAdmissionPluginOrderOverride(t *testing.T) {
193 193
 		config.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride = []string{"plugin1", "plugin2"}
194 194
 	})
195 195
 
196
-	createdPod, err := kubeClient.Pods(kapi.NamespaceDefault).Create(admissionTestPod())
196
+	createdPod, err := kubeClient.Core().Pods(kapi.NamespaceDefault).Create(admissionTestPod())
197 197
 	if err != nil {
198 198
 		t.Fatalf("Unexpected error creating pod: %v", err)
199 199
 	}
... ...
@@ -215,7 +214,7 @@ func TestKubernetesAdmissionPluginConfigFile(t *testing.T) {
215 215
 			},
216 216
 		}
217 217
 	})
218
-	createdPod, err := kubeClient.Pods(kapi.NamespaceDefault).Create(admissionTestPod())
218
+	createdPod, err := kubeClient.Core().Pods(kapi.NamespaceDefault).Create(admissionTestPod())
219 219
 	if err = checkAdmissionObjectLabelValues(createdPod.Labels, map[string]string{"plugin1": "plugin1configvalue", "plugin2": "default"}); err != nil {
220 220
 		t.Errorf("Error: %v", err)
221 221
 	}
... ...
@@ -235,7 +234,7 @@ func TestKubernetesAdmissionPluginEmbeddedConfig(t *testing.T) {
235 235
 			},
236 236
 		}
237 237
 	})
238
-	createdPod, err := kubeClient.Pods(kapi.NamespaceDefault).Create(admissionTestPod())
238
+	createdPod, err := kubeClient.Core().Pods(kapi.NamespaceDefault).Create(admissionTestPod())
239 239
 	if err = checkAdmissionObjectLabelValues(createdPod.Labels, map[string]string{"plugin1": "embeddedvalue1", "plugin2": "default"}); err != nil {
240 240
 		t.Errorf("Error: %v", err)
241 241
 	}
... ...
@@ -313,18 +312,18 @@ func TestAlwaysPullImagesOn(t *testing.T) {
313 313
 	if err != nil {
314 314
 		t.Fatalf("error starting server: %v", err)
315 315
 	}
316
-	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
316
+	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
317 317
 	if err != nil {
318 318
 		t.Fatalf("error getting client: %v", err)
319 319
 	}
320 320
 
321 321
 	ns := &kapi.Namespace{}
322 322
 	ns.Name = testutil.Namespace()
323
-	_, err = kubeClient.Namespaces().Create(ns)
323
+	_, err = kubeClientset.Core().Namespaces().Create(ns)
324 324
 	if err != nil {
325 325
 		t.Fatalf("error creating namespace: %v", err)
326 326
 	}
327
-	if err := testserver.WaitForPodCreationServiceAccounts(kubeClient, testutil.Namespace()); err != nil {
327
+	if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
328 328
 		t.Fatalf("error getting client config: %v", err)
329 329
 	}
330 330
 
... ...
@@ -338,7 +337,7 @@ func TestAlwaysPullImagesOn(t *testing.T) {
338 338
 		},
339 339
 	}
340 340
 
341
-	actualPod, err := kubeClient.Pods(testutil.Namespace()).Create(testPod)
341
+	actualPod, err := kubeClientset.Core().Pods(testutil.Namespace()).Create(testPod)
342 342
 	if err != nil {
343 343
 		t.Fatalf("unexpected error: %v", err)
344 344
 	}
... ...
@@ -355,18 +354,18 @@ func TestAlwaysPullImagesOff(t *testing.T) {
355 355
 	if err != nil {
356 356
 		t.Fatalf("error starting server: %v", err)
357 357
 	}
358
-	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
358
+	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
359 359
 	if err != nil {
360 360
 		t.Fatalf("error getting client: %v", err)
361 361
 	}
362 362
 
363 363
 	ns := &kapi.Namespace{}
364 364
 	ns.Name = testutil.Namespace()
365
-	_, err = kubeClient.Namespaces().Create(ns)
365
+	_, err = kubeClientset.Core().Namespaces().Create(ns)
366 366
 	if err != nil {
367 367
 		t.Fatalf("error creating namespace: %v", err)
368 368
 	}
369
-	if err := testserver.WaitForPodCreationServiceAccounts(kubeClient, testutil.Namespace()); err != nil {
369
+	if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
370 370
 		t.Fatalf("error getting client config: %v", err)
371 371
 	}
372 372
 
... ...
@@ -380,7 +379,7 @@ func TestAlwaysPullImagesOff(t *testing.T) {
380 380
 		},
381 381
 	}
382 382
 
383
-	actualPod, err := kubeClient.Pods(testutil.Namespace()).Create(testPod)
383
+	actualPod, err := kubeClientset.Core().Pods(testutil.Namespace()).Create(testPod)
384 384
 	if err != nil {
385 385
 		t.Fatalf("unexpected error: %v", err)
386 386
 	}
... ...
@@ -4,14 +4,14 @@ import (
4 4
 	"testing"
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
7
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8 8
 
9 9
 	"github.com/openshift/origin/pkg/client"
10 10
 	testutil "github.com/openshift/origin/test/util"
11 11
 	testserver "github.com/openshift/origin/test/util/server"
12 12
 )
13 13
 
14
-func setupAuditTest(t *testing.T) (*kclient.Client, *client.Client) {
14
+func setupAuditTest(t *testing.T) (*kclientset.Clientset, *client.Client) {
15 15
 	testutil.RequireEtcd(t)
16 16
 	masterConfig, err := testserver.DefaultMasterOptions()
17 17
 	if err != nil {
... ...
@@ -38,7 +38,7 @@ func TestBasicFunctionalityWithAudit(t *testing.T) {
38 38
 	kubeClient, _ := setupAuditTest(t)
39 39
 	defer testutil.DumpEtcdOnFailure(t)
40 40
 
41
-	if _, err := kubeClient.Pods(kapi.NamespaceDefault).Watch(kapi.ListOptions{}); err != nil {
41
+	if _, err := kubeClient.Core().Pods(kapi.NamespaceDefault).Watch(kapi.ListOptions{}); err != nil {
42 42
 		t.Errorf("Unexpected error watching pods: %v", err)
43 43
 	}
44 44
 
... ...
@@ -4,7 +4,7 @@ import (
4 4
 	"testing"
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
7
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8 8
 
9 9
 	"github.com/openshift/origin/pkg/client"
10 10
 	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
... ...
@@ -69,7 +69,7 @@ func TestConcurrentBuildConfigControllers(t *testing.T) {
69 69
 	build.RunBuildConfigChangeControllerTest(t, osClient, kClient)
70 70
 }
71 71
 
72
-func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Client, *kclient.Client) {
72
+func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Client, *kclientset.Clientset) {
73 73
 	testutil.RequireEtcd(t)
74 74
 	master, clusterAdminKubeConfig, err := testserver.StartTestMaster()
75 75
 	if err != nil {
... ...
@@ -81,18 +81,18 @@ func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Cli
81 81
 		t.Fatal(err)
82 82
 	}
83 83
 
84
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
84
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
85 85
 	if err != nil {
86 86
 		t.Fatal(err)
87 87
 	}
88
-	_, err = clusterAdminKubeClient.Namespaces().Create(&kapi.Namespace{
88
+	_, err = clusterAdminKubeClientset.Core().Namespaces().Create(&kapi.Namespace{
89 89
 		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
90 90
 	})
91 91
 	if err != nil {
92 92
 		t.Fatal(err)
93 93
 	}
94 94
 
95
-	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {
95
+	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClientset, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {
96 96
 		t.Fatalf("unexpected error: %v", err)
97 97
 	}
98 98
 
... ...
@@ -117,5 +117,5 @@ func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Cli
117 117
 	for i := 0; i < counts.ConfigChangeControllers; i++ {
118 118
 		openshiftConfig.RunBuildConfigChangeController()
119 119
 	}
120
-	return clusterAdminClient, clusterAdminKubeClient
120
+	return clusterAdminClient, clusterAdminKubeClientset
121 121
 }
... ...
@@ -7,7 +7,7 @@ import (
7 7
 	"time"
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 	"k8s.io/kubernetes/pkg/fields"
12 12
 	watchapi "k8s.io/kubernetes/pkg/watch"
13 13
 
... ...
@@ -27,10 +27,10 @@ var buildPodAdmissionTestTimeout time.Duration = 30 * time.Second
27 27
 func TestBuildDefaultGitHTTPProxy(t *testing.T) {
28 28
 	defer testutil.DumpEtcdOnFailure(t)
29 29
 	httpProxy := "http://my.test.proxy:12345"
30
-	oclient, kclient := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
30
+	oclient, kclientset := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
31 31
 		GitHTTPProxy: httpProxy,
32 32
 	})
33
-	build, _ := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
33
+	build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
34 34
 	if actual := build.Spec.Source.Git.HTTPProxy; actual == nil || *actual != httpProxy {
35 35
 		t.Errorf("Resulting build did not get expected HTTP proxy: %v", actual)
36 36
 	}
... ...
@@ -39,10 +39,10 @@ func TestBuildDefaultGitHTTPProxy(t *testing.T) {
39 39
 func TestBuildDefaultGitHTTPSProxy(t *testing.T) {
40 40
 	defer testutil.DumpEtcdOnFailure(t)
41 41
 	httpsProxy := "https://my.test.proxy:12345"
42
-	oclient, kclient := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
42
+	oclient, kclientset := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
43 43
 		GitHTTPSProxy: httpsProxy,
44 44
 	})
45
-	build, _ := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
45
+	build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
46 46
 	if actual := build.Spec.Source.Git.HTTPSProxy; actual == nil || *actual != httpsProxy {
47 47
 		t.Errorf("Resulting build did not get expected HTTPS proxy: %v", actual)
48 48
 	}
... ...
@@ -60,10 +60,10 @@ func TestBuildDefaultEnvironment(t *testing.T) {
60 60
 			Value: "VALUE2",
61 61
 		},
62 62
 	}
63
-	oclient, kclient := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
63
+	oclient, kclientset := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
64 64
 		Env: env,
65 65
 	})
66
-	build, _ := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
66
+	build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
67 67
 	if actual := build.Spec.Strategy.DockerStrategy.Env; !reflect.DeepEqual(env, actual) {
68 68
 		t.Errorf("Resulting build did not get expected environment: %v", actual)
69 69
 	}
... ...
@@ -72,10 +72,10 @@ func TestBuildDefaultEnvironment(t *testing.T) {
72 72
 func TestBuildDefaultLabels(t *testing.T) {
73 73
 	defer testutil.DumpEtcdOnFailure(t)
74 74
 	labels := []buildapi.ImageLabel{{Name: "KEY", Value: "VALUE"}}
75
-	oclient, kclient := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
75
+	oclient, kclientset := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
76 76
 		ImageLabels: labels,
77 77
 	})
78
-	build, _ := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
78
+	build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
79 79
 	if actual := build.Spec.Output.ImageLabels; !reflect.DeepEqual(labels, actual) {
80 80
 		t.Errorf("Resulting build did not get expected labels: %v", actual)
81 81
 	}
... ...
@@ -84,10 +84,10 @@ func TestBuildDefaultLabels(t *testing.T) {
84 84
 func TestBuildDefaultNodeSelectors(t *testing.T) {
85 85
 	defer testutil.DumpEtcdOnFailure(t)
86 86
 	selectors := map[string]string{"KEY": "VALUE"}
87
-	oclient, kclient := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
87
+	oclient, kclientset := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
88 88
 		NodeSelector: selectors,
89 89
 	})
90
-	_, pod := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
90
+	_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
91 91
 	if actual := pod.Spec.NodeSelector; !reflect.DeepEqual(selectors, actual) {
92 92
 		t.Errorf("Resulting pod did not get expected nodeselectors: %v", actual)
93 93
 	}
... ...
@@ -96,10 +96,10 @@ func TestBuildDefaultNodeSelectors(t *testing.T) {
96 96
 func TestBuildDefaultAnnotations(t *testing.T) {
97 97
 	defer testutil.DumpEtcdOnFailure(t)
98 98
 	annotations := map[string]string{"KEY": "VALUE"}
99
-	oclient, kclient := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
99
+	oclient, kclientset := setupBuildDefaultsAdmissionTest(t, &defaultsapi.BuildDefaultsConfig{
100 100
 		Annotations: annotations,
101 101
 	})
102
-	_, pod := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
102
+	_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
103 103
 	if actual := pod.Annotations; strings.Compare(actual["KEY"], annotations["KEY"]) != 0 {
104 104
 		t.Errorf("Resulting pod did not get expected annotations: actual: %v, expected: %v", actual["KEY"], annotations["KEY"])
105 105
 	}
... ...
@@ -107,10 +107,10 @@ func TestBuildDefaultAnnotations(t *testing.T) {
107 107
 
108 108
 func TestBuildOverrideForcePull(t *testing.T) {
109 109
 	defer testutil.DumpEtcdOnFailure(t)
110
-	oclient, kclient := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
110
+	oclient, kclientset := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
111 111
 		ForcePull: true,
112 112
 	})
113
-	build, _ := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
113
+	build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
114 114
 	if !build.Spec.Strategy.DockerStrategy.ForcePull {
115 115
 		t.Errorf("ForcePull was not set on resulting build")
116 116
 	}
... ...
@@ -118,10 +118,10 @@ func TestBuildOverrideForcePull(t *testing.T) {
118 118
 
119 119
 func TestBuildOverrideForcePullCustomStrategy(t *testing.T) {
120 120
 	defer testutil.DumpEtcdOnFailure(t)
121
-	oclient, kclient := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
121
+	oclient, kclientset := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
122 122
 		ForcePull: true,
123 123
 	})
124
-	build, pod := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestCustomBuild())
124
+	build, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestCustomBuild())
125 125
 	if pod.Spec.Containers[0].ImagePullPolicy != kapi.PullAlways {
126 126
 		t.Errorf("Pod ImagePullPolicy is not PullAlways")
127 127
 	}
... ...
@@ -133,10 +133,10 @@ func TestBuildOverrideForcePullCustomStrategy(t *testing.T) {
133 133
 func TestBuildOverrideLabels(t *testing.T) {
134 134
 	defer testutil.DumpEtcdOnFailure(t)
135 135
 	labels := []buildapi.ImageLabel{{Name: "KEY", Value: "VALUE"}}
136
-	oclient, kclient := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
136
+	oclient, kclientset := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
137 137
 		ImageLabels: labels,
138 138
 	})
139
-	build, _ := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
139
+	build, _ := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
140 140
 	if actual := build.Spec.Output.ImageLabels; !reflect.DeepEqual(labels, actual) {
141 141
 		t.Errorf("Resulting build did not get expected labels: %v", actual)
142 142
 	}
... ...
@@ -145,10 +145,10 @@ func TestBuildOverrideLabels(t *testing.T) {
145 145
 func TestBuildOverrideNodeSelectors(t *testing.T) {
146 146
 	defer testutil.DumpEtcdOnFailure(t)
147 147
 	selectors := map[string]string{"KEY": "VALUE"}
148
-	oclient, kclient := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
148
+	oclient, kclientset := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
149 149
 		NodeSelector: selectors,
150 150
 	})
151
-	_, pod := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
151
+	_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
152 152
 	if actual := pod.Spec.NodeSelector; !reflect.DeepEqual(selectors, actual) {
153 153
 		t.Errorf("Resulting build did not get expected nodeselectors: %v", actual)
154 154
 	}
... ...
@@ -157,10 +157,10 @@ func TestBuildOverrideNodeSelectors(t *testing.T) {
157 157
 func TestBuildOverrideAnnotations(t *testing.T) {
158 158
 	defer testutil.DumpEtcdOnFailure(t)
159 159
 	annotations := map[string]string{"KEY": "VALUE"}
160
-	oclient, kclient := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
160
+	oclient, kclientset := setupBuildOverridesAdmissionTest(t, &overridesapi.BuildOverridesConfig{
161 161
 		Annotations: annotations,
162 162
 	})
163
-	_, pod := runBuildPodAdmissionTest(t, oclient, kclient, buildPodAdmissionTestDockerBuild())
163
+	_, pod := runBuildPodAdmissionTest(t, oclient, kclientset, buildPodAdmissionTestDockerBuild())
164 164
 	if actual := pod.Annotations; strings.Compare(actual["KEY"], annotations["KEY"]) != 0 {
165 165
 		t.Errorf("Resulting build did not get expected annotations: %v", actual)
166 166
 	}
... ...
@@ -194,7 +194,7 @@ func buildPodAdmissionTestDockerBuild() *buildapi.Build {
194 194
 	return build
195 195
 }
196 196
 
197
-func runBuildPodAdmissionTest(t *testing.T, client *client.Client, kclient *kclient.Client, build *buildapi.Build) (*buildapi.Build, *kapi.Pod) {
197
+func runBuildPodAdmissionTest(t *testing.T, client *client.Client, kclientset *kclientset.Clientset, build *buildapi.Build) (*buildapi.Build, *kapi.Pod) {
198 198
 
199 199
 	ns := testutil.Namespace()
200 200
 	_, err := client.Builds(ns).Create(build)
... ...
@@ -208,7 +208,7 @@ func runBuildPodAdmissionTest(t *testing.T, client *client.Client, kclient *kcli
208 208
 			buildapi.GetBuildPodName(build),
209 209
 		),
210 210
 	}
211
-	podWatch, err := kclient.Pods(ns).Watch(watchOpt)
211
+	podWatch, err := kclientset.Core().Pods(ns).Watch(watchOpt)
212 212
 	if err != nil {
213 213
 		t.Fatalf("%v", err)
214 214
 	}
... ...
@@ -240,7 +240,7 @@ func runBuildPodAdmissionTest(t *testing.T, client *client.Client, kclient *kcli
240 240
 	return nil, nil
241 241
 }
242 242
 
243
-func setupBuildDefaultsAdmissionTest(t *testing.T, defaultsConfig *defaultsapi.BuildDefaultsConfig) (*client.Client, *kclient.Client) {
243
+func setupBuildDefaultsAdmissionTest(t *testing.T, defaultsConfig *defaultsapi.BuildDefaultsConfig) (*client.Client, *kclientset.Clientset) {
244 244
 	return setupBuildPodAdmissionTest(t, map[string]configapi.AdmissionPluginConfig{
245 245
 		"BuildDefaults": {
246 246
 			Configuration: defaultsConfig,
... ...
@@ -248,7 +248,7 @@ func setupBuildDefaultsAdmissionTest(t *testing.T, defaultsConfig *defaultsapi.B
248 248
 	})
249 249
 }
250 250
 
251
-func setupBuildOverridesAdmissionTest(t *testing.T, overridesConfig *overridesapi.BuildOverridesConfig) (*client.Client, *kclient.Client) {
251
+func setupBuildOverridesAdmissionTest(t *testing.T, overridesConfig *overridesapi.BuildOverridesConfig) (*client.Client, *kclientset.Clientset) {
252 252
 	return setupBuildPodAdmissionTest(t, map[string]configapi.AdmissionPluginConfig{
253 253
 		"BuildOverrides": {
254 254
 			Configuration: overridesConfig,
... ...
@@ -256,7 +256,7 @@ func setupBuildOverridesAdmissionTest(t *testing.T, overridesConfig *overridesap
256 256
 	})
257 257
 }
258 258
 
259
-func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.AdmissionPluginConfig) (*client.Client, *kclient.Client) {
259
+func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.AdmissionPluginConfig) (*client.Client, *kclientset.Clientset) {
260 260
 	testutil.RequireEtcd(t)
261 261
 	master, err := testserver.DefaultMasterOptions()
262 262
 	if err != nil {
... ...
@@ -272,12 +272,12 @@ func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.
272 272
 		t.Fatalf("%v", err)
273 273
 	}
274 274
 
275
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
275
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
276 276
 	if err != nil {
277 277
 		t.Fatalf("%v", err)
278 278
 	}
279 279
 
280
-	_, err = clusterAdminKubeClient.Namespaces().Create(&kapi.Namespace{
280
+	_, err = clusterAdminKubeClientset.Namespaces().Create(&kapi.Namespace{
281 281
 		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
282 282
 	})
283 283
 	if err != nil {
... ...
@@ -285,7 +285,7 @@ func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.
285 285
 	}
286 286
 
287 287
 	err = testserver.WaitForServiceAccounts(
288
-		clusterAdminKubeClient,
288
+		clusterAdminKubeClientset,
289 289
 		testutil.Namespace(),
290 290
 		[]string{
291 291
 			bootstrappolicy.BuilderServiceAccountName,
... ...
@@ -295,5 +295,5 @@ func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.
295 295
 		t.Fatalf("%v", err)
296 296
 	}
297 297
 
298
-	return clusterAdminClient, clusterAdminKubeClient
298
+	return clusterAdminClient, clusterAdminKubeClientset
299 299
 }
... ...
@@ -9,7 +9,7 @@ import (
9 9
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
10 10
 	"k8s.io/kubernetes/pkg/api/resource"
11 11
 	"k8s.io/kubernetes/pkg/api/unversioned"
12
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
12
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 13
 	utilwait "k8s.io/kubernetes/pkg/util/wait"
14 14
 
15 15
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -66,10 +66,10 @@ func TestClusterQuota(t *testing.T) {
66 66
 		t.Fatalf("unexpected error: %v", err)
67 67
 	}
68 68
 
69
-	if err := labelNamespace(clusterAdminKubeClient, "first"); err != nil {
69
+	if err := labelNamespace(clusterAdminKubeClient.Core(), "first"); err != nil {
70 70
 		t.Fatalf("unexpected error: %v", err)
71 71
 	}
72
-	if err := labelNamespace(clusterAdminKubeClient, "second"); err != nil {
72
+	if err := labelNamespace(clusterAdminKubeClient.Core(), "second"); err != nil {
73 73
 		t.Fatalf("unexpected error: %v", err)
74 74
 	}
75 75
 	if err := waitForQuotaLabeling(clusterAdminClient, "first"); err != nil {
... ...
@@ -81,19 +81,19 @@ func TestClusterQuota(t *testing.T) {
81 81
 
82 82
 	configmap := &kapi.ConfigMap{}
83 83
 	configmap.GenerateName = "test"
84
-	if _, err := clusterAdminKubeClient.ConfigMaps("first").Create(configmap); err != nil {
84
+	if _, err := clusterAdminKubeClient.Core().ConfigMaps("first").Create(configmap); err != nil {
85 85
 		t.Fatalf("unexpected error: %v", err)
86 86
 	}
87
-	if _, err := clusterAdminKubeClient.ConfigMaps("second").Create(configmap); err != nil {
87
+	if _, err := clusterAdminKubeClient.Core().ConfigMaps("second").Create(configmap); err != nil {
88 88
 		t.Fatalf("unexpected error: %v", err)
89 89
 	}
90
-	if _, err := clusterAdminKubeClient.ConfigMaps("second").Create(configmap); !kapierrors.IsForbidden(err) {
90
+	if _, err := clusterAdminKubeClient.Core().ConfigMaps("second").Create(configmap); !kapierrors.IsForbidden(err) {
91 91
 		list, err := clusterAdminClient.AppliedClusterResourceQuotas("second").List(kapi.ListOptions{})
92 92
 		if err == nil {
93 93
 			t.Errorf("quota is %#v", list)
94 94
 		}
95 95
 
96
-		list2, err := clusterAdminKubeClient.ConfigMaps("").List(kapi.ListOptions{})
96
+		list2, err := clusterAdminKubeClient.Core().ConfigMaps("").List(kapi.ListOptions{})
97 97
 		if err == nil {
98 98
 			t.Errorf("ConfigMaps is %#v", list2)
99 99
 		}
... ...
@@ -135,7 +135,7 @@ func waitForQuotaLabeling(clusterAdminClient client.AppliedClusterResourceQuotas
135 135
 	})
136 136
 }
137 137
 
138
-func labelNamespace(clusterAdminKubeClient kclient.NamespacesInterface, namespaceName string) error {
138
+func labelNamespace(clusterAdminKubeClient kcoreclient.NamespacesGetter, namespaceName string) error {
139 139
 	ns1, err := clusterAdminKubeClient.Namespaces().Get(namespaceName)
140 140
 	if err != nil {
141 141
 		return err
... ...
@@ -3,15 +3,16 @@ package integration
3 3
 import (
4 4
 	"testing"
5 5
 
6
+	kapi "k8s.io/kubernetes/pkg/api"
7
+	apierrors "k8s.io/kubernetes/pkg/api/errors"
8
+	"k8s.io/kubernetes/pkg/api/resource"
9
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
10
+
6 11
 	"github.com/openshift/origin/pkg/cmd/server/api"
7 12
 	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
8 13
 	overrideapi "github.com/openshift/origin/pkg/quota/admission/clusterresourceoverride/api"
9 14
 	testutil "github.com/openshift/origin/test/util"
10 15
 	testserver "github.com/openshift/origin/test/util/server"
11
-	kapi "k8s.io/kubernetes/pkg/api"
12
-	apierrors "k8s.io/kubernetes/pkg/api/errors"
13
-	"k8s.io/kubernetes/pkg/api/resource"
14
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15 16
 
16 17
 	_ "github.com/openshift/origin/pkg/quota/admission/clusterresourceoverride/api/install"
17 18
 )
... ...
@@ -23,8 +24,8 @@ func TestClusterResourceOverridePluginWithNoLimits(t *testing.T) {
23 23
 		CPURequestToLimitPercent:    50,
24 24
 		MemoryRequestToLimitPercent: 50,
25 25
 	}
26
-	kubeClient := setupClusterResourceOverrideTest(t, config)
27
-	podHandler := kubeClient.Pods(testutil.Namespace())
26
+	kubeClientset := setupClusterResourceOverrideTest(t, config)
27
+	podHandler := kubeClientset.Core().Pods(testutil.Namespace())
28 28
 
29 29
 	// test with no limits object present
30 30
 
... ...
@@ -50,9 +51,9 @@ func TestClusterResourceOverridePluginWithLimits(t *testing.T) {
50 50
 		CPURequestToLimitPercent:    50,
51 51
 		MemoryRequestToLimitPercent: 50,
52 52
 	}
53
-	kubeClient := setupClusterResourceOverrideTest(t, config)
54
-	podHandler := kubeClient.Pods(testutil.Namespace())
55
-	limitHandler := kubeClient.LimitRanges(testutil.Namespace())
53
+	kubeClientset := setupClusterResourceOverrideTest(t, config)
54
+	podHandler := kubeClientset.Core().Pods(testutil.Namespace())
55
+	limitHandler := kubeClientset.Core().LimitRanges(testutil.Namespace())
56 56
 
57 57
 	// test with limits object with defaults;
58 58
 	// I wanted to test with a limits object without defaults to see limits forbid an empty resource spec,
... ...
@@ -100,7 +101,7 @@ func TestClusterResourceOverridePluginWithLimits(t *testing.T) {
100 100
 	}
101 101
 }
102 102
 
103
-func setupClusterResourceOverrideTest(t *testing.T, pluginConfig *overrideapi.ClusterResourceOverrideConfig) kclient.Interface {
103
+func setupClusterResourceOverrideTest(t *testing.T, pluginConfig *overrideapi.ClusterResourceOverrideConfig) kclientset.Interface {
104 104
 	testutil.RequireEtcd(t)
105 105
 	masterConfig, err := testserver.DefaultMasterOptions()
106 106
 	if err != nil {
... ...
@@ -123,7 +124,7 @@ func setupClusterResourceOverrideTest(t *testing.T, pluginConfig *overrideapi.Cl
123 123
 	if err != nil {
124 124
 		t.Fatal(err)
125 125
 	}
126
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
126
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
127 127
 	if err != nil {
128 128
 		t.Fatal(err)
129 129
 	}
... ...
@@ -140,10 +141,10 @@ func setupClusterResourceOverrideTest(t *testing.T, pluginConfig *overrideapi.Cl
140 140
 	if err != nil {
141 141
 		t.Fatal(err)
142 142
 	}
143
-	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
143
+	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClientset, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
144 144
 		t.Fatal(err)
145 145
 	}
146
-	return clusterAdminKubeClient
146
+	return clusterAdminKubeClientset
147 147
 }
148 148
 
149 149
 func testClusterResourceOverridePod(name string, memory string, cpu string) *kapi.Pod {
... ...
@@ -40,6 +40,7 @@ func TestDeployScale(t *testing.T) {
40 40
 	}
41 41
 
42 42
 	config := deploytest.OkDeploymentConfig(0)
43
+	config.Namespace = namespace
43 44
 	config.Spec.Triggers = []deployapi.DeploymentTriggerPolicy{}
44 45
 	config.Spec.Replicas = 1
45 46
 
... ...
@@ -27,7 +27,7 @@ func TestDiagNodeConditions(t *testing.T) {
27 27
 	}
28 28
 	nodeDiag := clusterdiags.NodeDefinitions{KubeClient: client}
29 29
 	err = wait.Poll(200*time.Millisecond, 5*time.Second, func() (bool, error) {
30
-		if _, err := client.Nodes().Get(nodeConfig.NodeName); kapierror.IsNotFound(err) {
30
+		if _, err := client.Core().Nodes().Get(nodeConfig.NodeName); kapierror.IsNotFound(err) {
31 31
 			return false, nil
32 32
 		}
33 33
 		return true, err
... ...
@@ -46,12 +46,12 @@ func TestDiagNodeConditions(t *testing.T) {
46 46
 
47 47
 	// Make the node unschedulable and verify diagnostics notices
48 48
 	err = wait.Poll(200*time.Millisecond, time.Second, func() (bool, error) {
49
-		node, err := client.Nodes().Get(nodeConfig.NodeName)
49
+		node, err := client.Core().Nodes().Get(nodeConfig.NodeName)
50 50
 		if err != nil {
51 51
 			return false, err
52 52
 		}
53 53
 		node.Spec.Unschedulable = true
54
-		if _, err := client.Nodes().Update(node); kapierror.IsConflict(err) {
54
+		if _, err := client.Core().Nodes().Update(node); kapierror.IsConflict(err) {
55 55
 			return false, nil
56 56
 		}
57 57
 		return true, err
... ...
@@ -68,7 +68,7 @@ func TestDiagNodeConditions(t *testing.T) {
68 68
 	}
69 69
 
70 70
 	// delete it and check with no nodes defined; should get an error about that.
71
-	if err := client.Nodes().Delete(nodeConfig.NodeName); err != nil {
71
+	if err := client.Core().Nodes().Delete(nodeConfig.NodeName, nil); err != nil {
72 72
 		t.Errorf("unexpected error deleting node: %v", err)
73 73
 	}
74 74
 	if errors := nodeDiag.Check().Errors(); len(errors) != 1 ||
... ...
@@ -78,7 +78,7 @@ func TestDiagNodeConditions(t *testing.T) {
78 78
 
79 79
 	// Next create a node and leave it in NotReady state. Should get a warning
80 80
 	// about that, plus the previous error as there are still no nodes available.
81
-	_, err = client.Nodes().Create(&kapi.Node{ObjectMeta: kapi.ObjectMeta{Name: "test-node"}})
81
+	_, err = client.Core().Nodes().Create(&kapi.Node{ObjectMeta: kapi.ObjectMeta{Name: "test-node"}})
82 82
 	if err != nil {
83 83
 		t.Fatalf("expected no errors creating a node: %#v", err)
84 84
 	}
... ...
@@ -80,7 +80,7 @@ func TestDNS(t *testing.T) {
80 80
 	if err != nil {
81 81
 		t.Fatalf("unexpected error: %v", err)
82 82
 	}
83
-	kubernetesService, err := client.Services(kapi.NamespaceDefault).Get("kubernetes")
83
+	kubernetesService, err := client.Core().Services(kapi.NamespaceDefault).Get("kubernetes")
84 84
 	if err != nil {
85 85
 		t.Fatalf("unexpected error: %v", err)
86 86
 	}
... ...
@@ -96,7 +96,7 @@ func TestDNS(t *testing.T) {
96 96
 	}
97 97
 
98 98
 	for {
99
-		if _, err := client.Services(kapi.NamespaceDefault).Create(&kapi.Service{
99
+		if _, err := client.Core().Services(kapi.NamespaceDefault).Create(&kapi.Service{
100 100
 			ObjectMeta: kapi.ObjectMeta{
101 101
 				Name: "headless",
102 102
 			},
... ...
@@ -112,7 +112,7 @@ func TestDNS(t *testing.T) {
112 112
 			}
113 113
 			t.Fatalf("unexpected error: %v", err)
114 114
 		}
115
-		if _, err := client.Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
115
+		if _, err := client.Core().Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
116 116
 			ObjectMeta: kapi.ObjectMeta{
117 117
 				Name: "headless",
118 118
 			},
... ...
@@ -130,7 +130,7 @@ func TestDNS(t *testing.T) {
130 130
 	headlessIP := net.ParseIP("172.0.0.1")
131 131
 	headlessIPHash := getHash(headlessIP.String())
132 132
 
133
-	if _, err := client.Services(kapi.NamespaceDefault).Create(&kapi.Service{
133
+	if _, err := client.Core().Services(kapi.NamespaceDefault).Create(&kapi.Service{
134 134
 		ObjectMeta: kapi.ObjectMeta{
135 135
 			Name: "headless2",
136 136
 		},
... ...
@@ -141,7 +141,7 @@ func TestDNS(t *testing.T) {
141 141
 	}); err != nil {
142 142
 		t.Fatalf("unexpected error: %v", err)
143 143
 	}
144
-	if _, err := client.Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
144
+	if _, err := client.Core().Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
145 145
 		ObjectMeta: kapi.ObjectMeta{
146 146
 			Name: "headless2",
147 147
 		},
... ...
@@ -4,7 +4,7 @@ import (
4 4
 	"testing"
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
7
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8 8
 
9 9
 	configapi "github.com/openshift/origin/pkg/cmd/server/api"
10 10
 	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
... ...
@@ -24,7 +24,7 @@ var exampleAddresses = map[string]string{
24 24
 	"external": "1.2.3.4",
25 25
 }
26 26
 
27
-func testOne(t *testing.T, client *kclient.Client, namespace, addrType string, success bool) *kapi.Endpoints {
27
+func testOne(t *testing.T, client kclientset.Interface, namespace, addrType string, success bool) *kapi.Endpoints {
28 28
 	testEndpoint := &kapi.Endpoints{}
29 29
 	testEndpoint.GenerateName = "test"
30 30
 	testEndpoint.Subsets = []kapi.EndpointSubset{
... ...
@@ -43,7 +43,7 @@ func testOne(t *testing.T, client *kclient.Client, namespace, addrType string, s
43 43
 		},
44 44
 	}
45 45
 
46
-	ep, err := client.Endpoints(namespace).Create(testEndpoint)
46
+	ep, err := client.Core().Endpoints(namespace).Create(testEndpoint)
47 47
 	if err != nil && success {
48 48
 		t.Fatalf("unexpected error creating %s network endpoint: %v", addrType, err)
49 49
 	} else if err == nil && !success {
... ...
@@ -115,12 +115,12 @@ func TestEndpointAdmission(t *testing.T) {
115 115
 	// User without restricted endpoint permission can't modify IPs but can still do other modifications
116 116
 	ep := testOne(t, clusterAdminKubeClient, "myproject", "cluster", true)
117 117
 	ep.Annotations = map[string]string{"foo": "bar"}
118
-	ep, err = projectAdminClient.Endpoints("myproject").Update(ep)
118
+	ep, err = projectAdminClient.Core().Endpoints("myproject").Update(ep)
119 119
 	if err != nil {
120 120
 		t.Fatalf("unexpected error updating endpoint annotation: %v", err)
121 121
 	}
122 122
 	ep.Subsets[0].Addresses[0].IP = exampleAddresses["service"]
123
-	ep, err = projectAdminClient.Endpoints("myproject").Update(ep)
123
+	ep, err = projectAdminClient.Core().Endpoints("myproject").Update(ep)
124 124
 	if err == nil {
125 125
 		t.Fatalf("unexpected success modifying endpoint")
126 126
 	}
... ...
@@ -78,7 +78,7 @@ func TestExtensionsAPIDeletion(t *testing.T) {
78 78
 			},
79 79
 		},
80 80
 	}
81
-	if _, err := projectAdminKubeClient.Extensions().Jobs(projName).Create(&job); err != nil {
81
+	if _, err := projectAdminKubeClient.Batch().Jobs(projName).Create(&job); err != nil {
82 82
 		t.Fatalf("unexpected error creating the job object: %v", err)
83 83
 	}
84 84
 
... ...
@@ -86,7 +86,7 @@ func TestExtensionsAPIDeletion(t *testing.T) {
86 86
 		t.Fatalf("unexpected error deleting the project: %v", err)
87 87
 	}
88 88
 	err = wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
89
-		_, err := clusterAdminKubeClient.Namespaces().Get(projName)
89
+		_, err := clusterAdminKubeClient.Core().Namespaces().Get(projName)
90 90
 		if errors.IsNotFound(err) {
91 91
 			return true, nil
92 92
 		}
... ...
@@ -101,7 +101,7 @@ func TestExtensionsAPIDeletion(t *testing.T) {
101 101
 	} else if !errors.IsNotFound(err) {
102 102
 		t.Fatalf("Error trying to get deleted HPA object (not a not-found error): %v", err)
103 103
 	}
104
-	if _, err := clusterAdminKubeClient.Extensions().Jobs(projName).Get(job.Name); err == nil {
104
+	if _, err := clusterAdminKubeClient.Batch().Jobs(projName).Get(job.Name); err == nil {
105 105
 		t.Fatalf("Job object was still present after project was deleted!")
106 106
 	} else if !errors.IsNotFound(err) {
107 107
 		t.Fatalf("Error trying to get deleted Job object (not a not-found error): %v", err)
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"time"
9 9
 
10 10
 	kapi "k8s.io/kubernetes/pkg/api"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 	"k8s.io/kubernetes/pkg/probe"
13 13
 	httpprobe "k8s.io/kubernetes/pkg/probe/http"
14 14
 	"k8s.io/kubernetes/pkg/watch"
... ...
@@ -98,7 +98,7 @@ func healthzProxyTest(masterConfig *configapi.MasterConfig, t *testing.T) {
98 98
 	}
99 99
 }
100 100
 
101
-func watchProxyTest(cluster1AdminKubeClient, cluster2AdminKubeClient *kclient.Client, t *testing.T) {
101
+func watchProxyTest(cluster1AdminKubeClient, cluster2AdminKubeClient *kclientset.Clientset, t *testing.T) {
102 102
 	// list namespaces in order to determine correct resourceVersion
103 103
 	namespaces, err := cluster1AdminKubeClient.Namespaces().List(kapi.ListOptions{})
104 104
 
... ...
@@ -445,7 +445,7 @@ func TestImageStreamImportAuthenticated(t *testing.T) {
445 445
 		t.Logf("testing %s host", host)
446 446
 
447 447
 		// add secrets for subsequent checks
448
-		_, err = kc.Secrets(testutil.Namespace()).Create(&kapi.Secret{
448
+		_, err = kc.Core().Secrets(testutil.Namespace()).Create(&kapi.Secret{
449 449
 			ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("secret-%d", i+1)},
450 450
 			Type:       kapi.SecretTypeDockerConfigJson,
451 451
 			Data: map[string][]byte{
... ...
@@ -9,6 +9,8 @@ import (
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	kapierrors "k8s.io/kubernetes/pkg/api/errors"
11 11
 	"k8s.io/kubernetes/pkg/api/resource"
12
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
13
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
12 14
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
13 15
 	"k8s.io/kubernetes/pkg/util/wait"
14 16
 
... ...
@@ -57,7 +59,7 @@ func TestImageStreamTagsAdmission(t *testing.T) {
57 57
 	}
58 58
 
59 59
 	limit := kapi.ResourceList{imageapi.ResourceImageStreamTags: resource.MustParse("0")}
60
-	lrClient := kClient.LimitRanges(testutil.Namespace())
60
+	lrClient := kClient.Core().LimitRanges(testutil.Namespace())
61 61
 	createLimitRangeOfType(t, lrClient, limitRangeName, imageapi.LimitTypeImageStream, limit)
62 62
 
63 63
 	t.Logf("trying to create ImageStreamTag referencing isimage exceeding quota %v", limit)
... ...
@@ -262,7 +264,7 @@ func TestImageStreamAdmitSpecUpdate(t *testing.T) {
262 262
 		imageapi.ResourceImageStreamTags:   resource.MustParse("0"),
263 263
 		imageapi.ResourceImageStreamImages: resource.MustParse("0"),
264 264
 	}
265
-	lrClient := kClient.LimitRanges(testutil.Namespace())
265
+	lrClient := kClient.Core().LimitRanges(testutil.Namespace())
266 266
 	createLimitRangeOfType(t, lrClient, limitRangeName, imageapi.LimitTypeImageStream, limit)
267 267
 
268 268
 	t.Logf("trying to create a new image stream with a tag exceeding limit %v", limit)
... ...
@@ -371,7 +373,7 @@ func TestImageStreamAdmitStatusUpdate(t *testing.T) {
371 371
 		imageapi.ResourceImageStreamTags:   resource.MustParse("0"),
372 372
 		imageapi.ResourceImageStreamImages: resource.MustParse("0"),
373 373
 	}
374
-	lrClient := kClient.LimitRanges(testutil.Namespace())
374
+	lrClient := kClient.Core().LimitRanges(testutil.Namespace())
375 375
 	createLimitRangeOfType(t, lrClient, limitRangeName, imageapi.LimitTypeImageStream, limit)
376 376
 
377 377
 	t.Logf("trying to create a new image stream with a tag exceeding limit %v", limit)
... ...
@@ -467,7 +469,7 @@ func TestImageStreamAdmitStatusUpdate(t *testing.T) {
467 467
 	}
468 468
 }
469 469
 
470
-func setupImageStreamAdmissionTest(t *testing.T) (*kclient.Client, *client.Client) {
470
+func setupImageStreamAdmissionTest(t *testing.T) (*kclientset.Clientset, *client.Client) {
471 471
 	testutil.RequireEtcd(t)
472 472
 
473 473
 	_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
... ...
@@ -517,7 +519,7 @@ func errForbiddenWithRetry(err error) bool {
517 517
 
518 518
 // createResourceQuota creates a resource quota with given hard limits in a current namespace and waits until
519 519
 // a first usage refresh
520
-func createResourceQuota(t *testing.T, rqClient kclient.ResourceQuotaInterface, quotaName string, hard kapi.ResourceList) *kapi.ResourceQuota {
520
+func createResourceQuota(t *testing.T, rqClient kcoreclient.ResourceQuotaInterface, quotaName string, hard kapi.ResourceList) *kapi.ResourceQuota {
521 521
 	rq := &kapi.ResourceQuota{
522 522
 		ObjectMeta: kapi.ObjectMeta{
523 523
 			Name: quotaName,
... ...
@@ -540,7 +542,7 @@ func createResourceQuota(t *testing.T, rqClient kclient.ResourceQuotaInterface,
540 540
 }
541 541
 
542 542
 // bumpQuota modifies hard spec of quota object with the given value. It returns modified hard spec.
543
-func bumpQuota(t *testing.T, rqs kclient.ResourceQuotaInterface, quotaName string, resourceName kapi.ResourceName, value int64) kapi.ResourceList {
543
+func bumpQuota(t *testing.T, rqs kcoreclient.ResourceQuotaInterface, quotaName string, resourceName kapi.ResourceName, value int64) kapi.ResourceList {
544 544
 	t.Logf("bump the quota %s to %s=%d", quotaName, resourceName, value)
545 545
 	rq, err := rqs.Get(quotaName)
546 546
 	if err != nil {
... ...
@@ -564,7 +566,7 @@ func bumpQuota(t *testing.T, rqs kclient.ResourceQuotaInterface, quotaName strin
564 564
 
565 565
 // createLimitRangeOfType creates a new limit range object with given max limits set for given limit type. The
566 566
 // object will be created in current namespace.
567
-func createLimitRangeOfType(t *testing.T, lrClient kclient.LimitRangeInterface, limitRangeName string, limitType kapi.LimitType, maxLimits kapi.ResourceList) *kapi.LimitRange {
567
+func createLimitRangeOfType(t *testing.T, lrClient kcoreclient.LimitRangeInterface, limitRangeName string, limitType kapi.LimitType, maxLimits kapi.ResourceList) *kapi.LimitRange {
568 568
 	lr := &kapi.LimitRange{
569 569
 		ObjectMeta: kapi.ObjectMeta{
570 570
 			Name: limitRangeName,
... ...
@@ -587,7 +589,7 @@ func createLimitRangeOfType(t *testing.T, lrClient kclient.LimitRangeInterface,
587 587
 	return lr
588 588
 }
589 589
 
590
-func bumpLimit(t *testing.T, lrClient kclient.LimitRangeInterface, limitRangeName string, resourceName kapi.ResourceName, limit string) kapi.ResourceList {
590
+func bumpLimit(t *testing.T, lrClient kcoreclient.LimitRangeInterface, limitRangeName string, resourceName kapi.ResourceName, limit string) kapi.ResourceList {
591 591
 	t.Logf("bump a limit on resource %q to %s", resourceName, limit)
592 592
 	lr, err := lrClient.Get(limitRangeName)
593 593
 	if err != nil {
... ...
@@ -290,7 +290,7 @@ func TestImageStreamTagLifecycleHook(t *testing.T) {
290 290
 		t.Fatalf("unexpected error: %v", err)
291 291
 	}
292 292
 
293
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
293
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
294 294
 	if err != nil {
295 295
 		t.Errorf("unexpected error: %v", err)
296 296
 	}
... ...
@@ -310,7 +310,7 @@ func TestImageStreamTagLifecycleHook(t *testing.T) {
310 310
 	}
311 311
 
312 312
 	// can tag to a stream that exists
313
-	exec := stratsupport.NewHookExecutor(nil, clusterAdminClient, clusterAdminKubeClient, os.Stdout, kapi.Codecs.UniversalDecoder())
313
+	exec := stratsupport.NewHookExecutor(nil, clusterAdminClient, clusterAdminKubeClientset, os.Stdout, kapi.Codecs.UniversalDecoder())
314 314
 	err = exec.Execute(
315 315
 		&deployapi.LifecycleHook{
316 316
 			TagImages: []deployapi.TagImageHook{
... ...
@@ -348,7 +348,7 @@ func TestImageStreamTagLifecycleHook(t *testing.T) {
348 348
 	}
349 349
 
350 350
 	// can execute a second time the same tag and it should work
351
-	exec = stratsupport.NewHookExecutor(nil, clusterAdminClient, clusterAdminKubeClient, os.Stdout, kapi.Codecs.UniversalDecoder())
351
+	exec = stratsupport.NewHookExecutor(nil, clusterAdminClient, clusterAdminKubeClientset, os.Stdout, kapi.Codecs.UniversalDecoder())
352 352
 	err = exec.Execute(
353 353
 		&deployapi.LifecycleHook{
354 354
 			TagImages: []deployapi.TagImageHook{
... ...
@@ -380,7 +380,7 @@ func TestImageStreamTagLifecycleHook(t *testing.T) {
380 380
 	}
381 381
 
382 382
 	// can lifecycle tag a new image stream
383
-	exec = stratsupport.NewHookExecutor(nil, clusterAdminClient, clusterAdminKubeClient, os.Stdout, kapi.Codecs.UniversalDecoder())
383
+	exec = stratsupport.NewHookExecutor(nil, clusterAdminClient, clusterAdminKubeClientset, os.Stdout, kapi.Codecs.UniversalDecoder())
384 384
 	err = exec.Execute(
385 385
 		&deployapi.LifecycleHook{
386 386
 			TagImages: []deployapi.TagImageHook{
... ...
@@ -8,7 +8,7 @@ import (
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10 10
 	"k8s.io/kubernetes/pkg/client/cache"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 	"k8s.io/kubernetes/pkg/controller/framework"
13 13
 	"k8s.io/kubernetes/pkg/runtime"
14 14
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -37,7 +37,7 @@ func TestIngressIPAllocation(t *testing.T) {
37 37
 	if err != nil {
38 38
 		t.Fatalf("Unexpected error: %v", err)
39 39
 	}
40
-	kc, _, err := configapi.GetKubeClient(clusterAdminKubeConfig, &configapi.ClientConnectionOverrides{
40
+	_, kc, _, err := configapi.GetKubeClient(clusterAdminKubeConfig, &configapi.ClientConnectionOverrides{
41 41
 		QPS:   20,
42 42
 		Burst: 50,
43 43
 	})
... ...
@@ -55,10 +55,10 @@ func TestIngressIPAllocation(t *testing.T) {
55 55
 	_, informerController := framework.NewInformer(
56 56
 		&cache.ListWatch{
57 57
 			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
58
-				return kc.Services(kapi.NamespaceAll).List(options)
58
+				return kc.Core().Services(kapi.NamespaceAll).List(options)
59 59
 			},
60 60
 			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
61
-				return kc.Services(kapi.NamespaceAll).Watch(options)
61
+				return kc.Core().Services(kapi.NamespaceAll).Watch(options)
62 62
 			},
63 63
 		},
64 64
 		&kapi.Service{},
... ...
@@ -91,7 +91,7 @@ func TestIngressIPAllocation(t *testing.T) {
91 91
 
92 92
 	// Validate that all services of type load balancer have a unique
93 93
 	// ingress ip and corresponding external ip.
94
-	services, err := kc.Services(kapi.NamespaceDefault).List(kapi.ListOptions{})
94
+	services, err := kc.Core().Services(kapi.NamespaceDefault).List(kapi.ListOptions{})
95 95
 	if err != nil {
96 96
 		t.Fatalf("Unexpected error: %v", err)
97 97
 	}
... ...
@@ -128,7 +128,7 @@ const (
128 128
 	deleteOp
129 129
 )
130 130
 
131
-func generateServiceEvents(t *testing.T, kc kclient.Interface) {
131
+func generateServiceEvents(t *testing.T, kc kclientset.Interface) {
132 132
 	maxMillisecondInterval := 25
133 133
 	minServiceCount := 10
134 134
 	maxOperations := minServiceCount + 30
... ...
@@ -154,7 +154,7 @@ func generateServiceEvents(t *testing.T, kc kclient.Interface) {
154 154
 		case updateOp:
155 155
 			targetIndex := rand.Intn(len(services))
156 156
 			name := services[targetIndex].Name
157
-			s, err := kc.Services(kapi.NamespaceDefault).Get(name)
157
+			s, err := kc.Core().Services(kapi.NamespaceDefault).Get(name)
158 158
 			if err != nil {
159 159
 				continue
160 160
 			}
... ...
@@ -165,7 +165,7 @@ func generateServiceEvents(t *testing.T, kc kclient.Interface) {
165 165
 			} else {
166 166
 				s.Spec.Type = kapi.ServiceTypeLoadBalancer
167 167
 			}
168
-			s, err = kc.Services(kapi.NamespaceDefault).Update(s)
168
+			s, err = kc.Core().Services(kapi.NamespaceDefault).Update(s)
169 169
 			if err != nil {
170 170
 				continue
171 171
 			}
... ...
@@ -173,7 +173,7 @@ func generateServiceEvents(t *testing.T, kc kclient.Interface) {
173 173
 		case deleteOp:
174 174
 			targetIndex := rand.Intn(len(services))
175 175
 			name := services[targetIndex].Name
176
-			err := kc.Services(kapi.NamespaceDefault).Delete(name)
176
+			err := kc.Core().Services(kapi.NamespaceDefault).Delete(name, nil)
177 177
 			if err != nil {
178 178
 				continue
179 179
 			}
... ...
@@ -195,7 +195,7 @@ func generateServiceEvents(t *testing.T, kc kclient.Interface) {
195 195
 	}
196 196
 }
197 197
 
198
-func createService(kc kclient.Interface, name string, typeLoadBalancer bool) (*kapi.Service, error) {
198
+func createService(kc kclientset.Interface, name string, typeLoadBalancer bool) (*kapi.Service, error) {
199 199
 	serviceType := kapi.ServiceTypeClusterIP
200 200
 	if typeLoadBalancer {
201 201
 		serviceType = kapi.ServiceTypeLoadBalancer
... ...
@@ -213,5 +213,5 @@ func createService(kc kclient.Interface, name string, typeLoadBalancer bool) (*k
213 213
 			}},
214 214
 		},
215 215
 	}
216
-	return kc.Services(kapi.NamespaceDefault).Create(service)
216
+	return kc.Core().Services(kapi.NamespaceDefault).Create(service)
217 217
 }
... ...
@@ -23,20 +23,20 @@ func TestNamespaceLifecycleAdmission(t *testing.T) {
23 23
 	if err != nil {
24 24
 		t.Fatal(err)
25 25
 	}
26
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
26
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
27 27
 	if err != nil {
28 28
 		t.Fatal(err)
29 29
 	}
30 30
 
31 31
 	for _, ns := range []string{"default", "openshift", "openshift-infra"} {
32
-		if err := clusterAdminKubeClient.Namespaces().Delete(ns); err == nil {
32
+		if err := clusterAdminKubeClientset.Core().Namespaces().Delete(ns, nil); err == nil {
33 33
 			t.Fatalf("expected error deleting %q namespace, got none", ns)
34 34
 		}
35 35
 	}
36 36
 
37 37
 	// Create a namespace directly (not via a project)
38 38
 	ns := &kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "test"}}
39
-	ns, err = clusterAdminKubeClient.Namespaces().Create(ns)
39
+	ns, err = clusterAdminKubeClientset.Core().Namespaces().Create(ns)
40 40
 	if err != nil {
41 41
 		t.Fatal(err)
42 42
 	}
... ...
@@ -65,7 +65,7 @@ func TestNamespaceLifecycleAdmission(t *testing.T) {
65 65
 	}
66 66
 
67 67
 	// Ensure the origin finalizer is added
68
-	ns, err = clusterAdminKubeClient.Namespaces().Get(ns.Name)
68
+	ns, err = clusterAdminKubeClientset.Core().Namespaces().Get(ns.Name)
69 69
 	if err != nil {
70 70
 		t.Fatal(err)
71 71
 	}
... ...
@@ -82,7 +82,7 @@ func TestNamespaceLifecycleAdmission(t *testing.T) {
82 82
 
83 83
 	// Delete the namespace
84 84
 	// We don't have to worry about racing the namespace deletion controller because we've only started the master
85
-	err = clusterAdminKubeClient.Namespaces().Delete(ns.Name)
85
+	err = clusterAdminKubeClientset.Core().Namespaces().Delete(ns.Name, nil)
86 86
 	if err != nil {
87 87
 		t.Fatal(err)
88 88
 	}
... ...
@@ -40,7 +40,7 @@ func TestOAuthDisabled(t *testing.T) {
40 40
 	}
41 41
 
42 42
 	// Make sure cert auth still works
43
-	namespaces, err := client.Namespaces().List(kapi.ListOptions{})
43
+	namespaces, err := client.Core().Namespaces().List(kapi.ListOptions{})
44 44
 	if err != nil {
45 45
 		t.Fatalf("Unexpected error %v", err)
46 46
 	}
... ...
@@ -8,7 +8,7 @@ import (
8 8
 	"k8s.io/kubernetes/pkg/api/unversioned"
9 9
 	"k8s.io/kubernetes/pkg/apis/batch"
10 10
 	"k8s.io/kubernetes/pkg/apis/extensions"
11
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
11
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
12 12
 
13 13
 	"github.com/openshift/origin/pkg/client"
14 14
 	policy "github.com/openshift/origin/pkg/cmd/admin/policy"
... ...
@@ -22,15 +22,15 @@ import (
22 22
 
23 23
 func TestPodNodeConstraintsAdmissionPluginSetNodeNameClusterAdmin(t *testing.T) {
24 24
 	defer testutil.DumpEtcdOnFailure(t)
25
-	oclient, kclient := setupClusterAdminPodNodeConstraintsTest(t, &pluginapi.PodNodeConstraintsConfig{})
26
-	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node name, cluster admin", kclient, oclient, "nodename.example.com", nil, false)
25
+	oclient, kclientset := setupClusterAdminPodNodeConstraintsTest(t, &pluginapi.PodNodeConstraintsConfig{})
26
+	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node name, cluster admin", kclientset, oclient, "nodename.example.com", nil, false)
27 27
 }
28 28
 
29 29
 func TestPodNodeConstraintsAdmissionPluginSetNodeNameNonAdmin(t *testing.T) {
30 30
 	defer testutil.DumpEtcdOnFailure(t)
31 31
 	config := &pluginapi.PodNodeConstraintsConfig{}
32
-	oclient, kclient := setupUserPodNodeConstraintsTest(t, config, "derples")
33
-	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node name, regular user", kclient, oclient, "nodename.example.com", nil, true)
32
+	oclient, kclientset := setupUserPodNodeConstraintsTest(t, config, "derples")
33
+	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node name, regular user", kclientset, oclient, "nodename.example.com", nil, true)
34 34
 }
35 35
 
36 36
 func TestPodNodeConstraintsAdmissionPluginSetNodeSelectorClusterAdmin(t *testing.T) {
... ...
@@ -38,8 +38,8 @@ func TestPodNodeConstraintsAdmissionPluginSetNodeSelectorClusterAdmin(t *testing
38 38
 	config := &pluginapi.PodNodeConstraintsConfig{
39 39
 		NodeSelectorLabelBlacklist: []string{"hostname"},
40 40
 	}
41
-	oclient, kclient := setupClusterAdminPodNodeConstraintsTest(t, config)
42
-	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node selector, cluster admin", kclient, oclient, "", map[string]string{"hostname": "foo"}, false)
41
+	oclient, kclientset := setupClusterAdminPodNodeConstraintsTest(t, config)
42
+	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node selector, cluster admin", kclientset, oclient, "", map[string]string{"hostname": "foo"}, false)
43 43
 }
44 44
 
45 45
 func TestPodNodeConstraintsAdmissionPluginSetNodeSelectorNonAdmin(t *testing.T) {
... ...
@@ -47,11 +47,11 @@ func TestPodNodeConstraintsAdmissionPluginSetNodeSelectorNonAdmin(t *testing.T)
47 47
 	config := &pluginapi.PodNodeConstraintsConfig{
48 48
 		NodeSelectorLabelBlacklist: []string{"hostname"},
49 49
 	}
50
-	oclient, kclient := setupUserPodNodeConstraintsTest(t, config, "derples")
51
-	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node selector, regular user", kclient, oclient, "", map[string]string{"hostname": "foo"}, true)
50
+	oclient, kclientset := setupUserPodNodeConstraintsTest(t, config, "derples")
51
+	testPodNodeConstraintsObjectCreationWithPodTemplate(t, "set node selector, regular user", kclientset, oclient, "", map[string]string{"hostname": "foo"}, true)
52 52
 }
53 53
 
54
-func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig) (*client.Client, *kclient.Client) {
54
+func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig) (*client.Client, *kclientset.Clientset) {
55 55
 	testutil.RequireEtcd(t)
56 56
 	masterConfig, err := testserver.DefaultMasterOptions()
57 57
 	if err != nil {
... ...
@@ -69,7 +69,7 @@ func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *plugina
69 69
 	if err != nil {
70 70
 		t.Fatalf("error starting server: %v", err)
71 71
 	}
72
-	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
72
+	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
73 73
 	if err != nil {
74 74
 		t.Fatalf("error getting client: %v", err)
75 75
 	}
... ...
@@ -79,17 +79,17 @@ func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *plugina
79 79
 	}
80 80
 	ns := &kapi.Namespace{}
81 81
 	ns.Name = testutil.Namespace()
82
-	_, err = kubeClient.Namespaces().Create(ns)
82
+	_, err = kubeClientset.Core().Namespaces().Create(ns)
83 83
 	if err != nil {
84 84
 		t.Fatalf("error creating namespace: %v", err)
85 85
 	}
86
-	if err := testserver.WaitForPodCreationServiceAccounts(kubeClient, testutil.Namespace()); err != nil {
86
+	if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
87 87
 		t.Fatalf("unexpected error: %v", err)
88 88
 	}
89
-	return openShiftClient, kubeClient
89
+	return openShiftClient, kubeClientset
90 90
 }
91 91
 
92
-func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig, user string) (*client.Client, *kclient.Client) {
92
+func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig, user string) (*client.Client, *kclientset.Clientset) {
93 93
 	testutil.RequireEtcd(t)
94 94
 	masterConfig, err := testserver.DefaultMasterOptions()
95 95
 	if err != nil {
... ...
@@ -114,21 +114,21 @@ func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNo
114 114
 	if err != nil {
115 115
 		t.Fatalf("unexpected error: %v", err)
116 116
 	}
117
-	userClient, userkubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, user)
117
+	userClient, userkubeClientset, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, user)
118 118
 	if err != nil {
119 119
 		t.Fatalf("error getting user/kube client: %v", err)
120 120
 	}
121
-	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
121
+	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
122 122
 	if err != nil {
123 123
 		t.Fatalf("error getting kube client: %v", err)
124 124
 	}
125 125
 	ns := &kapi.Namespace{}
126 126
 	ns.Name = testutil.Namespace()
127
-	_, err = kubeClient.Namespaces().Create(ns)
127
+	_, err = kubeClientset.Core().Namespaces().Create(ns)
128 128
 	if err != nil {
129 129
 		t.Fatalf("error creating namespace: %v", err)
130 130
 	}
131
-	if err := testserver.WaitForServiceAccounts(kubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
131
+	if err := testserver.WaitForServiceAccounts(kubeClientset, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
132 132
 		t.Fatalf("unexpected error: %v", err)
133 133
 	}
134 134
 	addUser := &policy.RoleModificationOptions{
... ...
@@ -140,7 +140,7 @@ func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNo
140 140
 	if err := addUser.AddRole(); err != nil {
141 141
 		t.Fatalf("unexpected error: %v", err)
142 142
 	}
143
-	return userClient, userkubeClient
143
+	return userClient, userkubeClientset
144 144
 }
145 145
 
146 146
 func testPodNodeConstraintsPodSpec(nodeName string, nodeSelector map[string]string) kapi.PodSpec {
... ...
@@ -224,7 +224,7 @@ func testPodNodeConstraintsDeploymentConfig(nodeName string, nodeSelector map[st
224 224
 
225 225
 // testPodNodeConstraintsObjectCreationWithPodTemplate attemps to create different object types that contain pod templates
226 226
 // using the passed in nodeName and nodeSelector. It will use the expectError flag to determine if an error should be returned or not
227
-func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name string, kclient kclient.Interface, client client.Interface, nodeName string, nodeSelector map[string]string, expectError bool) {
227
+func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name string, kclientset kclientset.Interface, client client.Interface, nodeName string, nodeSelector map[string]string, expectError bool) {
228 228
 
229 229
 	checkForbiddenErr := func(objType string, err error) {
230 230
 		if err == nil && expectError {
... ...
@@ -243,28 +243,28 @@ func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name stri
243 243
 
244 244
 	// Pod
245 245
 	pod := testPodNodeConstraintsPod(nodeName, nodeSelector)
246
-	_, err := kclient.Pods(testutil.Namespace()).Create(pod)
246
+	_, err := kclientset.Core().Pods(testutil.Namespace()).Create(pod)
247 247
 	checkForbiddenErr("pod", err)
248 248
 
249 249
 	// ReplicationController
250 250
 	rc := testPodNodeConstraintsReplicationController(nodeName, nodeSelector)
251
-	_, err = kclient.ReplicationControllers(testutil.Namespace()).Create(rc)
251
+	_, err = kclientset.Core().ReplicationControllers(testutil.Namespace()).Create(rc)
252 252
 	checkForbiddenErr("rc", err)
253 253
 
254 254
 	// TODO: Enable when the deployments endpoint is supported in Origin
255 255
 	// Deployment
256 256
 	// d := testPodNodeConstraintsDeployment(nodeName, nodeSelector)
257
-	// _, err = kclient.Extensions().Deployments(testutil.Namespace()).Create(d)
257
+	// _, err = kclientset.Extensions().Deployments(testutil.Namespace()).Create(d)
258 258
 	// checkForbiddenErr("deployment", err)
259 259
 
260 260
 	// ReplicaSet
261 261
 	rs := testPodNodeConstraintsReplicaSet(nodeName, nodeSelector)
262
-	_, err = kclient.Extensions().ReplicaSets(testutil.Namespace()).Create(rs)
262
+	_, err = kclientset.Extensions().ReplicaSets(testutil.Namespace()).Create(rs)
263 263
 	checkForbiddenErr("replicaset", err)
264 264
 
265 265
 	// Job
266 266
 	job := testPodNodeConstraintsJob(nodeName, nodeSelector)
267
-	_, err = kclient.Extensions().Jobs(testutil.Namespace()).Create(job)
267
+	_, err = kclientset.Batch().Jobs(testutil.Namespace()).Create(job)
268 268
 	checkForbiddenErr("job", err)
269 269
 
270 270
 	// DeploymentConfig
... ...
@@ -5,8 +5,8 @@ import (
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7 7
 	apierrors "k8s.io/kubernetes/pkg/api/errors"
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8 9
 	"k8s.io/kubernetes/pkg/client/restclient"
9
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10 10
 	"k8s.io/kubernetes/pkg/labels"
11 11
 
12 12
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -18,7 +18,7 @@ import (
18 18
 	testserver "github.com/openshift/origin/test/util/server"
19 19
 )
20 20
 
21
-func setupProjectRequestLimitTest(t *testing.T, pluginConfig *requestlimit.ProjectRequestLimitConfig) (kclient.Interface, client.Interface, *restclient.Config) {
21
+func setupProjectRequestLimitTest(t *testing.T, pluginConfig *requestlimit.ProjectRequestLimitConfig) (kclientset.Interface, client.Interface, *restclient.Config) {
22 22
 	testutil.RequireEtcd(t)
23 23
 	masterConfig, err := testserver.DefaultMasterOptions()
24 24
 	if err != nil {
... ...
@@ -60,13 +60,13 @@ func setupProjectRequestLimitUsers(t *testing.T, client client.Interface, users
60 60
 	}
61 61
 }
62 62
 
63
-func setupProjectRequestLimitNamespaces(t *testing.T, kclient kclient.Interface, namespacesByRequester map[string]int) {
63
+func setupProjectRequestLimitNamespaces(t *testing.T, kclient kclientset.Interface, namespacesByRequester map[string]int) {
64 64
 	for requester, nsCount := range namespacesByRequester {
65 65
 		for i := 0; i < nsCount; i++ {
66 66
 			ns := &kapi.Namespace{}
67 67
 			ns.GenerateName = "testns"
68 68
 			ns.Annotations = map[string]string{projectapi.ProjectRequester: requester}
69
-			_, err := kclient.Namespaces().Create(ns)
69
+			_, err := kclient.Core().Namespaces().Create(ns)
70 70
 			if err != nil {
71 71
 				t.Fatalf("Could not create namespace for requester %s: %v", requester, err)
72 72
 			}
... ...
@@ -38,7 +38,7 @@ func TestProjectRequestError(t *testing.T) {
38 38
 	if err != nil {
39 39
 		t.Fatalf("error starting server: %v", err)
40 40
 	}
41
-	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
41
+	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
42 42
 	if err != nil {
43 43
 		t.Fatalf("error getting client: %v", err)
44 44
 	}
... ...
@@ -67,7 +67,7 @@ func TestProjectRequestError(t *testing.T) {
67 67
 	}
68 68
 
69 69
 	// Watch the project, rolebindings, and configmaps
70
-	nswatch, err := kubeClient.Namespaces().Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", ns)})
70
+	nswatch, err := kubeClientset.Core().Namespaces().Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", ns)})
71 71
 	if err != nil {
72 72
 		t.Fatal(err)
73 73
 	}
... ...
@@ -75,7 +75,7 @@ func TestProjectRequestError(t *testing.T) {
75 75
 	if err != nil {
76 76
 		t.Fatal(err)
77 77
 	}
78
-	cmwatch, err := kubeClient.ConfigMaps(ns).Watch(kapi.ListOptions{})
78
+	cmwatch, err := kubeClientset.Core().ConfigMaps(ns).Watch(kapi.ListOptions{})
79 79
 	if err != nil {
80 80
 		t.Fatal(err)
81 81
 	}
... ...
@@ -130,7 +130,7 @@ func TestProjectRequestError(t *testing.T) {
130 130
 	}
131 131
 
132 132
 	// Verify project is deleted
133
-	if nsObj, err := kubeClient.Namespaces().Get(ns); !kapierrors.IsNotFound(err) {
133
+	if nsObj, err := kubeClientset.Core().Namespaces().Get(ns); !kapierrors.IsNotFound(err) {
134 134
 		t.Errorf("Expected namespace to be gone, got %#v, %#v", nsObj, err)
135 135
 	}
136 136
 }
... ...
@@ -33,7 +33,7 @@ func TestProjectIsNamespace(t *testing.T) {
33 33
 	if err != nil {
34 34
 		t.Fatalf("unexpected error: %v", err)
35 35
 	}
36
-	kubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
36
+	kubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
37 37
 	if err != nil {
38 38
 		t.Fatalf("unexpected error: %v", err)
39 39
 	}
... ...
@@ -42,7 +42,7 @@ func TestProjectIsNamespace(t *testing.T) {
42 42
 	namespace := &kapi.Namespace{
43 43
 		ObjectMeta: kapi.ObjectMeta{Name: "integration-test"},
44 44
 	}
45
-	namespaceResult, err := kubeClient.Namespaces().Create(namespace)
45
+	namespaceResult, err := kubeClientset.Core().Namespaces().Create(namespace)
46 46
 	if err != nil {
47 47
 		t.Fatalf("unexpected error: %v", err)
48 48
 	}
... ...
@@ -72,7 +72,7 @@ func TestProjectIsNamespace(t *testing.T) {
72 72
 	}
73 73
 
74 74
 	// now get the namespace for that project
75
-	namespace, err = kubeClient.Namespaces().Get(projectResult.Name)
75
+	namespace, err = kubeClientset.Core().Namespaces().Get(projectResult.Name)
76 76
 	if err != nil {
77 77
 		t.Fatalf("unexpected error: %v", err)
78 78
 	}
... ...
@@ -101,7 +101,7 @@ func TestProjectMustExist(t *testing.T) {
101 101
 		t.Fatalf("unexpected error: %v", err)
102 102
 	}
103 103
 
104
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
104
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
105 105
 	if err != nil {
106 106
 		t.Fatalf("unexpected error: %v", err)
107 107
 	}
... ...
@@ -115,7 +115,7 @@ func TestProjectMustExist(t *testing.T) {
115 115
 		},
116 116
 	}
117 117
 
118
-	_, err = clusterAdminKubeClient.Pods("test").Create(pod)
118
+	_, err = clusterAdminKubeClientset.Core().Pods("test").Create(pod)
119 119
 	if err == nil {
120 120
 		t.Errorf("Expected an error on creation of a Kubernetes resource because namespace does not exist")
121 121
 	}
... ...
@@ -7,7 +7,7 @@ import (
7 7
 	"time"
8 8
 
9 9
 	kapi "k8s.io/kubernetes/pkg/api"
10
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
10
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
11 11
 	"k8s.io/kubernetes/pkg/util/sets"
12 12
 	"k8s.io/kubernetes/pkg/watch"
13 13
 
... ...
@@ -56,7 +56,7 @@ func stressRouter(t *testing.T, namespaceCount, routesPerNamespace, routerCount,
56 56
 
57 57
 		// Create a namespace
58 58
 		namespaceProperties := createNamespaceProperties()
59
-		namespace, err := kc.Namespaces().Create(namespaceProperties)
59
+		namespace, err := kc.Core().Namespaces().Create(namespaceProperties)
60 60
 		if err != nil {
61 61
 			t.Fatalf("unexpected error: %v", err)
62 62
 		}
... ...
@@ -65,14 +65,14 @@ func stressRouter(t *testing.T, namespaceCount, routesPerNamespace, routerCount,
65 65
 
66 66
 			// Create a service for the route
67 67
 			serviceProperties := createServiceProperties()
68
-			service, err := kc.Services(namespace.Name).Create(serviceProperties)
68
+			service, err := kc.Core().Services(namespace.Name).Create(serviceProperties)
69 69
 			if err != nil {
70 70
 				t.Fatalf("unexpected error: %v", err)
71 71
 			}
72 72
 
73 73
 			// Create endpoints
74 74
 			endpointsProperties := createEndpointsProperties(service.Name)
75
-			_, err = kc.Endpoints(namespace.Name).Create(endpointsProperties)
75
+			_, err = kc.Core().Endpoints(namespace.Name).Create(endpointsProperties)
76 76
 			if err != nil {
77 77
 				t.Fatalf("unexpected error: %v", err)
78 78
 			}
... ...
@@ -194,7 +194,7 @@ func createRouteProperties(serviceName string) *routeapi.Route {
194 194
 
195 195
 // launchAPI launches an api server and returns clients configured to
196 196
 // access it.
197
-func launchApi() (osclient.Interface, kclient.Interface, error) {
197
+func launchApi() (osclient.Interface, kclientset.Interface, error) {
198 198
 	_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
199 199
 	if err != nil {
200 200
 		return nil, nil, err
... ...
@@ -259,7 +259,7 @@ func (p *DelayPlugin) SetLastSyncProcessed(processed bool) error {
259 259
 
260 260
 // launchRouter launches a template router that communicates with the
261 261
 // api via the provided clients.
262
-func launchRouter(oc osclient.Interface, kc kclient.Interface, maxDelay int32, name string, reloadInterval int, reloadCounts map[string]int) (templatePlugin *templateplugin.TemplatePlugin) {
262
+func launchRouter(oc osclient.Interface, kc kclientset.Interface, maxDelay int32, name string, reloadInterval int, reloadCounts map[string]int) (templatePlugin *templateplugin.TemplatePlugin) {
263 263
 	r := templateplugin.NewFakeTemplateRouter()
264 264
 
265 265
 	reloadCounts[name] = 0
... ...
@@ -4,7 +4,7 @@ import (
4 4
 	"testing"
5 5
 
6 6
 	kapi "k8s.io/kubernetes/pkg/api"
7
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
7
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
8 8
 
9 9
 	configapi "github.com/openshift/origin/pkg/cmd/server/api"
10 10
 	pluginapi "github.com/openshift/origin/pkg/quota/admission/runonceduration/api"
... ...
@@ -28,9 +28,9 @@ func testRunOnceDurationPod(activeDeadlineSeconds int64) *kapi.Pod {
28 28
 	return pod
29 29
 }
30 30
 
31
-func testPodDuration(t *testing.T, name string, kclient kclient.Interface, pod *kapi.Pod, expected int64) {
31
+func testPodDuration(t *testing.T, name string, kclientset kclientset.Interface, pod *kapi.Pod, expected int64) {
32 32
 	// Pod with no duration set
33
-	pod, err := kclient.Pods(testutil.Namespace()).Create(pod)
33
+	pod, err := kclientset.Core().Pods(testutil.Namespace()).Create(pod)
34 34
 	if err != nil {
35 35
 		t.Fatalf("%s: unexpected: %v", name, err)
36 36
 	}
... ...
@@ -49,11 +49,11 @@ func TestRunOnceDurationAdmissionPlugin(t *testing.T) {
49 49
 	config := &pluginapi.RunOnceDurationConfig{
50 50
 		ActiveDeadlineSecondsLimit: &secs,
51 51
 	}
52
-	kclient := setupRunOnceDurationTest(t, config, nil)
52
+	kclientset := setupRunOnceDurationTest(t, config, nil)
53 53
 
54
-	testPodDuration(t, "global, no duration", kclient, testRunOnceDurationPod(0), 3600)
55
-	testPodDuration(t, "global, larger duration", kclient, testRunOnceDurationPod(7200), 3600)
56
-	testPodDuration(t, "global, smaller duration", kclient, testRunOnceDurationPod(100), 100)
54
+	testPodDuration(t, "global, no duration", kclientset, testRunOnceDurationPod(0), 3600)
55
+	testPodDuration(t, "global, larger duration", kclientset, testRunOnceDurationPod(7200), 3600)
56
+	testPodDuration(t, "global, smaller duration", kclientset, testRunOnceDurationPod(100), 100)
57 57
 }
58 58
 
59 59
 func TestRunOnceDurationAdmissionPluginProjectLimit(t *testing.T) {
... ...
@@ -65,13 +65,13 @@ func TestRunOnceDurationAdmissionPluginProjectLimit(t *testing.T) {
65 65
 	nsAnnotations := map[string]string{
66 66
 		pluginapi.ActiveDeadlineSecondsLimitAnnotation: "100",
67 67
 	}
68
-	kclient := setupRunOnceDurationTest(t, config, nsAnnotations)
69
-	testPodDuration(t, "project, no duration", kclient, testRunOnceDurationPod(0), 100)
70
-	testPodDuration(t, "project, larger duration", kclient, testRunOnceDurationPod(7200), 100)
71
-	testPodDuration(t, "project, smaller duration", kclient, testRunOnceDurationPod(50), 50)
68
+	kclientset := setupRunOnceDurationTest(t, config, nsAnnotations)
69
+	testPodDuration(t, "project, no duration", kclientset, testRunOnceDurationPod(0), 100)
70
+	testPodDuration(t, "project, larger duration", kclientset, testRunOnceDurationPod(7200), 100)
71
+	testPodDuration(t, "project, smaller duration", kclientset, testRunOnceDurationPod(50), 50)
72 72
 }
73 73
 
74
-func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurationConfig, nsAnnotations map[string]string) kclient.Interface {
74
+func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurationConfig, nsAnnotations map[string]string) kclientset.Interface {
75 75
 	testutil.RequireEtcd(t)
76 76
 	masterConfig, err := testserver.DefaultMasterOptions()
77 77
 	if err != nil {
... ...
@@ -86,19 +86,19 @@ func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurat
86 86
 	if err != nil {
87 87
 		t.Fatalf("error starting server: %v", err)
88 88
 	}
89
-	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
89
+	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
90 90
 	if err != nil {
91 91
 		t.Fatalf("error getting client: %v", err)
92 92
 	}
93 93
 	ns := &kapi.Namespace{}
94 94
 	ns.Name = testutil.Namespace()
95 95
 	ns.Annotations = nsAnnotations
96
-	_, err = kubeClient.Namespaces().Create(ns)
96
+	_, err = kubeClientset.Core().Namespaces().Create(ns)
97 97
 	if err != nil {
98 98
 		t.Fatalf("error creating namespace: %v", err)
99 99
 	}
100
-	if err := testserver.WaitForPodCreationServiceAccounts(kubeClient, testutil.Namespace()); err != nil {
100
+	if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
101 101
 		t.Errorf("unexpected error: %v", err)
102 102
 	}
103
-	return kubeClient
103
+	return kubeClientset
104 104
 }
... ...
@@ -60,7 +60,7 @@ func TestSAAsOAuthClient(t *testing.T) {
60 60
 	if err != nil {
61 61
 		t.Fatalf("unexpected error: %v", err)
62 62
 	}
63
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
63
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
64 64
 	if err != nil {
65 65
 		t.Fatalf("unexpected error: %v", err)
66 66
 	}
... ...
@@ -73,7 +73,7 @@ func TestSAAsOAuthClient(t *testing.T) {
73 73
 	if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projectName, "harold"); err != nil {
74 74
 		t.Fatalf("unexpected error: %v", err)
75 75
 	}
76
-	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, projectName, []string{"default"}); err != nil {
76
+	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClientset, projectName, []string{"default"}); err != nil {
77 77
 		t.Fatalf("unexpected error: %v", err)
78 78
 	}
79 79
 
... ...
@@ -93,7 +93,7 @@ func TestSAAsOAuthClient(t *testing.T) {
93 93
 
94 94
 	// retry this a couple times.  We seem to be flaking on update conflicts and missing secrets all together
95 95
 	err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
96
-		defaultSA, err = clusterAdminKubeClient.ServiceAccounts(projectName).Get("default")
96
+		defaultSA, err = clusterAdminKubeClientset.Core().ServiceAccounts(projectName).Get("default")
97 97
 		if err != nil {
98 98
 			return err
99 99
 		}
... ...
@@ -102,7 +102,7 @@ func TestSAAsOAuthClient(t *testing.T) {
102 102
 		}
103 103
 		defaultSA.Annotations[saoauth.OAuthRedirectModelAnnotationURIPrefix+"one"] = redirectURL
104 104
 		defaultSA.Annotations[saoauth.OAuthWantChallengesAnnotationPrefix] = "true"
105
-		defaultSA, err = clusterAdminKubeClient.ServiceAccounts(projectName).Update(defaultSA)
105
+		defaultSA, err = clusterAdminKubeClientset.Core().ServiceAccounts(projectName).Update(defaultSA)
106 106
 		return err
107 107
 	})
108 108
 	if err != nil {
... ...
@@ -112,7 +112,7 @@ func TestSAAsOAuthClient(t *testing.T) {
112 112
 	var oauthSecret *kapi.Secret
113 113
 	// retry this a couple times.  We seem to be flaking on update conflicts and missing secrets all together
114 114
 	err = wait.PollImmediate(30*time.Millisecond, 10*time.Second, func() (done bool, err error) {
115
-		allSecrets, err := clusterAdminKubeClient.Secrets(projectName).List(kapi.ListOptions{})
115
+		allSecrets, err := clusterAdminKubeClientset.Core().Secrets(projectName).List(kapi.ListOptions{})
116 116
 		if err != nil {
117 117
 			return false, err
118 118
 		}
... ...
@@ -22,7 +22,7 @@ func TestPodUpdateSCCEnforcement(t *testing.T) {
22 22
 	if err != nil {
23 23
 		t.Fatalf("unexpected error: %v", err)
24 24
 	}
25
-	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
25
+	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
26 26
 	if err != nil {
27 27
 		t.Fatalf("unexpected error: %v", err)
28 28
 	}
... ...
@@ -41,7 +41,7 @@ func TestPodUpdateSCCEnforcement(t *testing.T) {
41 41
 		t.Fatalf("unexpected error: %v", err)
42 42
 	}
43 43
 
44
-	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, projectName, []string{"default"}); err != nil {
44
+	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClientset, projectName, []string{"default"}); err != nil {
45 45
 		t.Fatalf("unexpected error: %v", err)
46 46
 	}
47 47
 
... ...
@@ -63,7 +63,7 @@ func TestPodUpdateSCCEnforcement(t *testing.T) {
63 63
 		t.Fatalf("missing forbidden: %v", err)
64 64
 	}
65 65
 
66
-	actualPod, err := clusterAdminKubeClient.Pods(projectName).Create(privilegedPod)
66
+	actualPod, err := clusterAdminKubeClientset.Core().Pods(projectName).Create(privilegedPod)
67 67
 	if err != nil {
68 68
 		t.Fatalf("unexpected error: %v", err)
69 69
 	}
... ...
@@ -10,6 +10,7 @@ import (
10 10
 
11 11
 	"k8s.io/kubernetes/pkg/api"
12 12
 	"k8s.io/kubernetes/pkg/api/errors"
13
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
13 14
 	"k8s.io/kubernetes/pkg/client/restclient"
14 15
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
15 16
 	"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
... ...
@@ -40,7 +41,7 @@ func TestServiceAccountAuthorization(t *testing.T) {
40 40
 	if err != nil {
41 41
 		t.Fatalf("unexpected error: %v", err)
42 42
 	}
43
-	cluster1AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile)
43
+	cluster1AdminKubeClientset, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile)
44 44
 	if err != nil {
45 45
 		t.Fatalf("unexpected error: %v", err)
46 46
 	}
... ...
@@ -50,7 +51,7 @@ func TestServiceAccountAuthorization(t *testing.T) {
50 50
 	}
51 51
 
52 52
 	// Get a service account token and build a client
53
-	saToken, err := waitForServiceAccountToken(cluster1AdminKubeClient, saNamespace, saName, 20, time.Second)
53
+	saToken, err := waitForServiceAccountToken(cluster1AdminKubeClientset, saNamespace, saName, 20, time.Second)
54 54
 	if err != nil {
55 55
 		t.Fatalf("unexpected error: %v", err)
56 56
 	}
... ...
@@ -204,7 +205,7 @@ func writeClientConfigToKubeConfig(config restclient.Config, path string) error
204 204
 	return nil
205 205
 }
206 206
 
207
-func waitForServiceAccountToken(client *kclient.Client, ns, name string, attempts int, interval time.Duration) (string, error) {
207
+func waitForServiceAccountToken(client *kclientset.Clientset, ns, name string, attempts int, interval time.Duration) (string, error) {
208 208
 	for i := 0; i <= attempts; i++ {
209 209
 		time.Sleep(interval)
210 210
 		token, err := getServiceAccountToken(client, ns, name)
... ...
@@ -218,14 +219,14 @@ func waitForServiceAccountToken(client *kclient.Client, ns, name string, attempt
218 218
 	return "", nil
219 219
 }
220 220
 
221
-func getServiceAccountToken(client *kclient.Client, ns, name string) (string, error) {
222
-	secrets, err := client.Secrets(ns).List(api.ListOptions{})
221
+func getServiceAccountToken(client *kclientset.Clientset, ns, name string) (string, error) {
222
+	secrets, err := client.Core().Secrets(ns).List(api.ListOptions{})
223 223
 	if err != nil {
224 224
 		return "", err
225 225
 	}
226 226
 	for _, secret := range secrets.Items {
227 227
 		if secret.Type == api.SecretTypeServiceAccountToken && secret.Annotations[api.ServiceAccountNameKey] == name {
228
-			sa, err := client.ServiceAccounts(ns).Get(name)
228
+			sa, err := client.Core().ServiceAccounts(ns).Get(name)
229 229
 			if err != nil {
230 230
 				return "", err
231 231
 			}
... ...
@@ -276,7 +277,7 @@ func TestAutomaticCreationOfPullSecrets(t *testing.T) {
276 276
 	}
277 277
 }
278 278
 
279
-func waitForServiceAccountPullSecret(client *kclient.Client, ns, name string, attempts int, interval time.Duration) (string, error) {
279
+func waitForServiceAccountPullSecret(client *kclientset.Clientset, ns, name string, attempts int, interval time.Duration) (string, error) {
280 280
 	for i := 0; i <= attempts; i++ {
281 281
 		time.Sleep(interval)
282 282
 		token, err := getServiceAccountPullSecret(client, ns, name)
... ...
@@ -290,8 +291,8 @@ func waitForServiceAccountPullSecret(client *kclient.Client, ns, name string, at
290 290
 	return "", nil
291 291
 }
292 292
 
293
-func getServiceAccountPullSecret(client *kclient.Client, ns, name string) (string, error) {
294
-	secrets, err := client.Secrets(ns).List(api.ListOptions{})
293
+func getServiceAccountPullSecret(client *kclientset.Clientset, ns, name string) (string, error) {
294
+	secrets, err := client.Core().Secrets(ns).List(api.ListOptions{})
295 295
 	if err != nil {
296 296
 		return "", err
297 297
 	}
... ...
@@ -10,8 +10,11 @@ import (
10 10
 
11 11
 	kapi "k8s.io/kubernetes/pkg/api"
12 12
 	kerrs "k8s.io/kubernetes/pkg/api/errors"
13
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
14
+	kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
13 15
 	"k8s.io/kubernetes/pkg/client/restclient"
14 16
 	kclient "k8s.io/kubernetes/pkg/client/unversioned"
17
+	adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset"
15 18
 	"k8s.io/kubernetes/pkg/fields"
16 19
 	"k8s.io/kubernetes/pkg/quota"
17 20
 	"k8s.io/kubernetes/pkg/util/wait"
... ...
@@ -35,8 +38,8 @@ func KubeConfigPath() string {
35 35
 	return filepath.Join(GetBaseDir(), "openshift.local.config", "master", "admin.kubeconfig")
36 36
 }
37 37
 
38
-func GetClusterAdminKubeClient(adminKubeConfigFile string) (*kclient.Client, error) {
39
-	c, _, err := configapi.GetKubeClient(adminKubeConfigFile, nil)
38
+func GetClusterAdminKubeClient(adminKubeConfigFile string) (*kclientset.Clientset, error) {
39
+	_, c, _, err := configapi.GetKubeClient(adminKubeConfigFile, nil)
40 40
 	if err != nil {
41 41
 		return nil, err
42 42
 	}
... ...
@@ -56,14 +59,15 @@ func GetClusterAdminClient(adminKubeConfigFile string) (*client.Client, error) {
56 56
 }
57 57
 
58 58
 func GetClusterAdminClientConfig(adminKubeConfigFile string) (*restclient.Config, error) {
59
-	_, conf, err := configapi.GetKubeClient(adminKubeConfigFile, nil)
59
+	_, _, conf, err := configapi.GetKubeClient(adminKubeConfigFile, nil)
60 60
 	if err != nil {
61 61
 		return nil, err
62 62
 	}
63 63
 	return conf, nil
64 64
 }
65 65
 
66
-func GetClientForUser(clientConfig restclient.Config, username string) (*client.Client, *kclient.Client, *restclient.Config, error) {
66
+// TODO internalclientset: get rid of oldClient after next rebase
67
+func GetClientForUser(clientConfig restclient.Config, username string) (*client.Client, *kclientset.Clientset, *restclient.Config, error) {
67 68
 	token, err := tokencmd.RequestToken(&clientConfig, nil, username, "password")
68 69
 	if err != nil {
69 70
 		return nil, nil, nil, err
... ...
@@ -76,13 +80,14 @@ func GetClientForUser(clientConfig restclient.Config, username string) (*client.
76 76
 	if err != nil {
77 77
 		return nil, nil, nil, err
78 78
 	}
79
+	kubeClientset := adapter.FromUnversionedClient(kubeClient)
79 80
 
80 81
 	osClient, err := client.New(&userClientConfig)
81 82
 	if err != nil {
82 83
 		return nil, nil, nil, err
83 84
 	}
84 85
 
85
-	return osClient, kubeClient, &userClientConfig, nil
86
+	return osClient, kubeClientset, &userClientConfig, nil
86 87
 }
87 88
 
88 89
 func GetScopedClientForUser(adminClient *client.Client, clientConfig restclient.Config, username string, scopes []string) (*client.Client, *kclient.Client, *restclient.Config, error) {
... ...
@@ -121,15 +126,15 @@ func GetScopedClientForUser(adminClient *client.Client, clientConfig restclient.
121 121
 	return osClient, kubeClient, &scopedConfig, nil
122 122
 }
123 123
 
124
-func GetClientForServiceAccount(adminClient *kclient.Client, clientConfig restclient.Config, namespace, name string) (*client.Client, *kclient.Client, *restclient.Config, error) {
125
-	_, err := adminClient.Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: namespace}})
124
+func GetClientForServiceAccount(adminClient *kclientset.Clientset, clientConfig restclient.Config, namespace, name string) (*client.Client, *kclientset.Clientset, *restclient.Config, error) {
125
+	_, err := adminClient.Core().Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: namespace}})
126 126
 	if err != nil && !kerrs.IsAlreadyExists(err) {
127 127
 		return nil, nil, nil, err
128 128
 	}
129 129
 
130
-	sa, err := adminClient.ServiceAccounts(namespace).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: name}})
130
+	sa, err := adminClient.Core().ServiceAccounts(namespace).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: name}})
131 131
 	if kerrs.IsAlreadyExists(err) {
132
-		sa, err = adminClient.ServiceAccounts(namespace).Get(name)
132
+		sa, err = adminClient.Core().ServiceAccounts(namespace).Get(name)
133 133
 	}
134 134
 	if err != nil {
135 135
 		return nil, nil, nil, err
... ...
@@ -138,7 +143,7 @@ func GetClientForServiceAccount(adminClient *kclient.Client, clientConfig restcl
138 138
 	token := ""
139 139
 	err = wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
140 140
 		selector := fields.OneTermEqualSelector(kapi.SecretTypeField, string(kapi.SecretTypeServiceAccountToken))
141
-		secrets, err := adminClient.Secrets(namespace).List(kapi.ListOptions{FieldSelector: selector})
141
+		secrets, err := adminClient.Core().Secrets(namespace).List(kapi.ListOptions{FieldSelector: selector})
142 142
 		if err != nil {
143 143
 			return false, err
144 144
 		}
... ...
@@ -161,19 +166,20 @@ func GetClientForServiceAccount(adminClient *kclient.Client, clientConfig restcl
161 161
 	if err != nil {
162 162
 		return nil, nil, nil, err
163 163
 	}
164
+	kubeClientset := adapter.FromUnversionedClient(kubeClient)
164 165
 
165 166
 	osClient, err := client.New(&saClientConfig)
166 167
 	if err != nil {
167 168
 		return nil, nil, nil, err
168 169
 	}
169 170
 
170
-	return osClient, kubeClient, &saClientConfig, nil
171
+	return osClient, kubeClientset, &saClientConfig, nil
171 172
 }
172 173
 
173 174
 // WaitForResourceQuotaSync watches given resource quota until its hard limit is updated to match the desired
174 175
 // spec or timeout occurs.
175 176
 func WaitForResourceQuotaLimitSync(
176
-	client kclient.ResourceQuotaInterface,
177
+	client kcoreclient.ResourceQuotaInterface,
177 178
 	name string,
178 179
 	hardLimit kapi.ResourceList,
179 180
 	timeout time.Duration,
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"time"
6 6
 
7 7
 	kapi "k8s.io/kubernetes/pkg/api"
8
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
8
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
9 9
 	"k8s.io/kubernetes/pkg/watch"
10 10
 
11 11
 	"github.com/openshift/origin/pkg/cmd/util"
... ...
@@ -37,12 +37,12 @@ func CreateNamespace(clusterAdminKubeConfig, name string) (err error) {
37 37
 	return err
38 38
 }
39 39
 
40
-func DeleteAndWaitForNamespaceTermination(c *kclient.Client, name string) error {
41
-	w, err := c.Namespaces().Watch(kapi.ListOptions{})
40
+func DeleteAndWaitForNamespaceTermination(c *kclientset.Clientset, name string) error {
41
+	w, err := c.Core().Namespaces().Watch(kapi.ListOptions{})
42 42
 	if err != nil {
43 43
 		return err
44 44
 	}
45
-	if err := c.Namespaces().Delete(name); err != nil {
45
+	if err := c.Core().Namespaces().Delete(name, nil); err != nil {
46 46
 		return err
47 47
 	}
48 48
 	_, err = watch.Until(30*time.Second, w, func(event watch.Event) (bool, error) {
... ...
@@ -12,8 +12,8 @@ import (
12 12
 	"github.com/golang/glog"
13 13
 	"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
14 14
 	kapi "k8s.io/kubernetes/pkg/api"
15
+	kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
15 16
 	"k8s.io/kubernetes/pkg/client/restclient"
16
-	kclient "k8s.io/kubernetes/pkg/client/unversioned"
17 17
 	"k8s.io/kubernetes/pkg/util/wait"
18 18
 
19 19
 	"github.com/openshift/origin/pkg/client"
... ...
@@ -380,7 +380,7 @@ func StartTestMasterAPI() (*configapi.MasterConfig, string, error) {
380 380
 
381 381
 // serviceAccountSecretsExist checks whether the given service account has at least a token and a dockercfg
382 382
 // secret associated with it.
383
-func serviceAccountSecretsExist(client *kclient.Client, namespace string, sa *kapi.ServiceAccount) bool {
383
+func serviceAccountSecretsExist(clientset *kclientset.Clientset, namespace string, sa *kapi.ServiceAccount) bool {
384 384
 	foundTokenSecret := false
385 385
 	foundDockercfgSecret := false
386 386
 	for _, secret := range sa.Secrets {
... ...
@@ -388,7 +388,7 @@ func serviceAccountSecretsExist(client *kclient.Client, namespace string, sa *ka
388 388
 		if len(secret.Namespace) > 0 {
389 389
 			ns = secret.Namespace
390 390
 		}
391
-		secret, err := client.Secrets(ns).Get(secret.Name)
391
+		secret, err := clientset.Core().Secrets(ns).Get(secret.Name)
392 392
 		if err == nil {
393 393
 			switch secret.Type {
394 394
 			case kapi.SecretTypeServiceAccountToken:
... ...
@@ -404,8 +404,8 @@ func serviceAccountSecretsExist(client *kclient.Client, namespace string, sa *ka
404 404
 // WaitForPodCreationServiceAccounts ensures that the service account needed for pod creation exists
405 405
 // and that the cache for the admission control that checks for pod tokens has caught up to allow
406 406
 // pod creation.
407
-func WaitForPodCreationServiceAccounts(client *kclient.Client, namespace string) error {
408
-	if err := WaitForServiceAccounts(client, namespace, []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
407
+func WaitForPodCreationServiceAccounts(clientset *kclientset.Clientset, namespace string) error {
408
+	if err := WaitForServiceAccounts(clientset, namespace, []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
409 409
 		return err
410 410
 	}
411 411
 
... ...
@@ -419,12 +419,12 @@ func WaitForPodCreationServiceAccounts(client *kclient.Client, namespace string)
419 419
 	}
420 420
 
421 421
 	return wait.PollImmediate(time.Second, PodCreationWaitTimeout, func() (bool, error) {
422
-		pod, err := client.Pods(namespace).Create(testPod)
422
+		pod, err := clientset.Core().Pods(namespace).Create(testPod)
423 423
 		if err != nil {
424 424
 			glog.Warningf("Error attempting to create test pod: %v", err)
425 425
 			return false, nil
426 426
 		}
427
-		err = client.Pods(namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
427
+		err = clientset.Core().Pods(namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
428 428
 		if err != nil {
429 429
 			return false, err
430 430
 		}
... ...
@@ -434,12 +434,12 @@ func WaitForPodCreationServiceAccounts(client *kclient.Client, namespace string)
434 434
 
435 435
 // WaitForServiceAccounts ensures the service accounts needed by build pods exist in the namespace
436 436
 // The extra controllers tend to starve the service account controller
437
-func WaitForServiceAccounts(client *kclient.Client, namespace string, accounts []string) error {
438
-	serviceAccounts := client.ServiceAccounts(namespace)
437
+func WaitForServiceAccounts(clientset *kclientset.Clientset, namespace string, accounts []string) error {
438
+	serviceAccounts := clientset.Core().ServiceAccounts(namespace)
439 439
 	return wait.Poll(time.Second, ServiceAccountWaitTimeout, func() (bool, error) {
440 440
 		for _, account := range accounts {
441 441
 			if sa, err := serviceAccounts.Get(account); err != nil {
442
-				if !serviceAccountSecretsExist(client, namespace, sa) {
442
+				if !serviceAccountSecretsExist(clientset, namespace, sa) {
443 443
 					continue
444 444
 				}
445 445
 				return false, nil
... ...
@@ -40,7 +40,7 @@ func WaitForAddress(pod *kapi.Pod, service *kapi.Service, ns string) (string, er
40 40
 	if err != nil {
41 41
 		return "", err
42 42
 	}
43
-	watcher, err := client.Endpoints(ns).Watch(kapi.ListOptions{})
43
+	watcher, err := client.Core().Endpoints(ns).Watch(kapi.ListOptions{})
44 44
 	if err != nil {
45 45
 		return "", fmt.Errorf("Unexpected error: %v", err)
46 46
 	}
... ...
@@ -96,7 +96,7 @@ func CreatePodFromImage(stream *imageapi.ImageStream, tag, ns string) *kapi.Pod
96 96
 			RestartPolicy: kapi.RestartPolicyNever,
97 97
 		},
98 98
 	}
99
-	if pod, err := client.Pods(ns).Create(pod); err != nil {
99
+	if pod, err := client.Core().Pods(ns).Create(pod); err != nil {
100 100
 		fmt.Printf("%v\n", err)
101 101
 		return nil
102 102
 	} else {
... ...
@@ -122,7 +122,7 @@ func CreateServiceForPod(pod *kapi.Pod, ns string) *kapi.Service {
122 122
 			}},
123 123
 		},
124 124
 	}
125
-	if service, err := client.Services(ns).Create(service); err != nil {
125
+	if service, err := client.Core().Services(ns).Create(service); err != nil {
126 126
 		fmt.Printf("%v\n", err)
127 127
 		return nil
128 128
 	} else {
... ...
@@ -136,6 +136,6 @@ func CleanupServiceAndPod(pod *kapi.Pod, service *kapi.Service, ns string) {
136 136
 	if err != nil {
137 137
 		return
138 138
 	}
139
-	client.Pods(ns).Delete(pod.Name, nil)
140
-	client.Services(ns).Delete(service.Name)
139
+	client.Core().Pods(ns).Delete(pod.Name, nil)
140
+	client.Core().Services(ns).Delete(service.Name, nil)
141 141
 }