Browse code

fixed go vet invocation and errors

Steve Kuznetsov authored on 2015/10/01 21:56:42
Showing 33 changed files
... ...
@@ -57,7 +57,7 @@ else
57 57
 verify: build
58 58
 endif
59 59
 	hack/verify-gofmt.sh
60
-	#hack/verify-govet.sh disable until we can verify that the output is sane
60
+	hack/verify-govet.sh
61 61
 	hack/verify-generated-deep-copies.sh
62 62
 	hack/verify-generated-conversions.sh
63 63
 	hack/verify-generated-completions.sh
... ...
@@ -17,12 +17,13 @@ source "${OS_ROOT}/hack/util.sh"
17 17
 cd "${OS_ROOT}"
18 18
 mkdir -p _output/govet
19 19
 
20
+os::build::setup_env
21
+
20 22
 FAILURE=false
21 23
 test_dirs=$(find_files | cut -d '/' -f 1-2 | sort -u)
22 24
 for test_dir in $test_dirs
23 25
 do
24
-  go tool vet -shadow=false \
25
-              $test_dir
26
+  go tool vet -shadow=false $test_dir
26 27
   if [ "$?" -ne 0 ]
27 28
   then 
28 29
     FAILURE=true
... ...
@@ -82,7 +82,7 @@ type Edge struct {
82 82
 }
83 83
 
84 84
 func NewEdge(head, tail graph.Node, kinds ...string) Edge {
85
-	return Edge{concrete.Edge{head, tail}, sets.NewString(kinds...)}
85
+	return Edge{concrete.Edge{F: head, T: tail}, sets.NewString(kinds...)}
86 86
 }
87 87
 
88 88
 func (e Edge) Kinds() sets.String {
... ...
@@ -650,7 +650,7 @@ func Fprint(out io.Writer, g Graph) {
650 650
 	}
651 651
 	for _, edge := range g.Edges() {
652 652
 		for _, edgeKind := range g.EdgeKinds(edge).List() {
653
-			fmt.Fprintf(out, "edge %d -> %d : %d\n", edge.From().ID(), edge.From().ID(), edgeKind)
653
+			fmt.Fprintf(out, "edge %d -> %d : %s\n", edge.From().ID(), edge.From().ID(), edgeKind)
654 654
 		}
655 655
 	}
656 656
 }
... ...
@@ -185,7 +185,7 @@ func TestNewSearchRequest(t *testing.T) {
185 185
 		case err != nil && !testCase.expectedError:
186 186
 			t.Errorf("%s: expected no error but got: %v", testCase.name, err)
187 187
 		case err == nil && testCase.expectedError:
188
-			t.Error("%s: expected an error but got none")
188
+			t.Errorf("%s: expected an error but got none", testCase.name)
189 189
 		}
190 190
 
191 191
 		if !reflect.DeepEqual(testCase.expectedRequest, request) {
... ...
@@ -19,12 +19,12 @@ import (
19 19
 type readOnlyClusterPolicyCache struct {
20 20
 	registry  clusterpolicyregistry.WatchingRegistry
21 21
 	indexer   cache.Indexer
22
-	reflector cache.Reflector
22
+	reflector *cache.Reflector
23 23
 
24 24
 	keyFunc cache.KeyFunc
25 25
 }
26 26
 
27
-func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) readOnlyClusterPolicyCache {
27
+func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) *readOnlyClusterPolicyCache {
28 28
 	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)
29 29
 
30 30
 	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
... ...
@@ -43,10 +43,10 @@ func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegist
43 43
 		2*time.Minute,
44 44
 	)
45 45
 
46
-	return readOnlyClusterPolicyCache{
46
+	return &readOnlyClusterPolicyCache{
47 47
 		registry:  registry,
48 48
 		indexer:   indexer,
49
-		reflector: *reflector,
49
+		reflector: reflector,
50 50
 
51 51
 		keyFunc: cache.MetaNamespaceKeyFunc,
52 52
 	}
... ...
@@ -104,5 +104,5 @@ func (c *readOnlyClusterPolicyCache) Get(name string) (*authorizationapi.Cluster
104 104
 }
105 105
 
106 106
 func newReadOnlyClusterPolicies(cache readOnlyAuthorizationCache) client.ReadOnlyClusterPolicyInterface {
107
-	return &cache.readOnlyClusterPolicyCache
107
+	return cache.readOnlyClusterPolicyCache
108 108
 }
... ...
@@ -14,7 +14,7 @@ import (
14 14
 	testregistry "github.com/openshift/origin/pkg/authorization/registry/test"
15 15
 )
16 16
 
17
-func beforeTestingSetup_readonlyclusterpolicycache() (testCache readOnlyClusterPolicyCache, cacheChannel, testChannel chan struct{}) {
17
+func beforeTestingSetup_readonlyclusterpolicycache() (testCache *readOnlyClusterPolicyCache, cacheChannel, testChannel chan struct{}) {
18 18
 	cacheChannel = make(chan struct{})
19 19
 
20 20
 	testRegistry := testregistry.NewClusterPolicyRegistry(testClusterPolicies, nil)
... ...
@@ -19,12 +19,12 @@ import (
19 19
 type readOnlyClusterPolicyBindingCache struct {
20 20
 	registry  clusterbindingregistry.WatchingRegistry
21 21
 	indexer   cache.Indexer
22
-	reflector cache.Reflector
22
+	reflector *cache.Reflector
23 23
 
24 24
 	keyFunc cache.KeyFunc
25 25
 }
26 26
 
27
-func NewReadOnlyClusterPolicyBindingCache(registry clusterbindingregistry.WatchingRegistry) readOnlyClusterPolicyBindingCache {
27
+func NewReadOnlyClusterPolicyBindingCache(registry clusterbindingregistry.WatchingRegistry) *readOnlyClusterPolicyBindingCache {
28 28
 	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)
29 29
 
30 30
 	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
... ...
@@ -43,10 +43,10 @@ func NewReadOnlyClusterPolicyBindingCache(registry clusterbindingregistry.Watchi
43 43
 		2*time.Minute,
44 44
 	)
45 45
 
46
-	return readOnlyClusterPolicyBindingCache{
46
+	return &readOnlyClusterPolicyBindingCache{
47 47
 		registry:  registry,
48 48
 		indexer:   indexer,
49
-		reflector: *reflector,
49
+		reflector: reflector,
50 50
 
51 51
 		keyFunc: cache.MetaNamespaceKeyFunc,
52 52
 	}
... ...
@@ -104,5 +104,5 @@ func (c *readOnlyClusterPolicyBindingCache) Get(name string) (*authorizationapi.
104 104
 }
105 105
 
106 106
 func newReadOnlyClusterPolicyBindings(cache readOnlyAuthorizationCache) client.ReadOnlyClusterPolicyBindingInterface {
107
-	return &cache.readOnlyClusterPolicyBindingCache
107
+	return cache.readOnlyClusterPolicyBindingCache
108 108
 }
... ...
@@ -14,7 +14,7 @@ import (
14 14
 	testregistry "github.com/openshift/origin/pkg/authorization/registry/test"
15 15
 )
16 16
 
17
-func beforeTestingSetup_readonlyclusterpolicybindingcache() (testCache readOnlyClusterPolicyBindingCache, cacheChannel, testChannel chan struct{}) {
17
+func beforeTestingSetup_readonlyclusterpolicybindingcache() (testCache *readOnlyClusterPolicyBindingCache, cacheChannel, testChannel chan struct{}) {
18 18
 	cacheChannel = make(chan struct{})
19 19
 
20 20
 	testRegistry := testregistry.NewClusterPolicyBindingRegistry(testClusterPolicyBindings, nil)
... ...
@@ -19,12 +19,12 @@ import (
19 19
 type readOnlyPolicyCache struct {
20 20
 	registry  policyregistry.WatchingRegistry
21 21
 	indexer   cache.Indexer
22
-	reflector cache.Reflector
22
+	reflector *cache.Reflector
23 23
 
24 24
 	keyFunc cache.KeyFunc
25 25
 }
26 26
 
27
-func NewReadOnlyPolicyCache(registry policyregistry.WatchingRegistry) readOnlyPolicyCache {
27
+func NewReadOnlyPolicyCache(registry policyregistry.WatchingRegistry) *readOnlyPolicyCache {
28 28
 	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)
29 29
 
30 30
 	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
... ...
@@ -43,10 +43,10 @@ func NewReadOnlyPolicyCache(registry policyregistry.WatchingRegistry) readOnlyPo
43 43
 		2*time.Minute,
44 44
 	)
45 45
 
46
-	return readOnlyPolicyCache{
46
+	return &readOnlyPolicyCache{
47 47
 		registry:  registry,
48 48
 		indexer:   indexer,
49
-		reflector: *reflector,
49
+		reflector: reflector,
50 50
 
51 51
 		keyFunc: cache.MetaNamespaceKeyFunc,
52 52
 	}
... ...
@@ -120,7 +120,7 @@ type readOnlyPolicies struct {
120 120
 
121 121
 func newReadOnlyPolicies(cache readOnlyAuthorizationCache, namespace string) client.ReadOnlyPolicyInterface {
122 122
 	return &readOnlyPolicies{
123
-		readOnlyPolicyCache: &cache.readOnlyPolicyCache,
123
+		readOnlyPolicyCache: cache.readOnlyPolicyCache,
124 124
 		namespace:           namespace,
125 125
 	}
126 126
 }
... ...
@@ -14,7 +14,7 @@ import (
14 14
 	testregistry "github.com/openshift/origin/pkg/authorization/registry/test"
15 15
 )
16 16
 
17
-func beforeTestingSetup_readonlypolicycache() (testCache readOnlyPolicyCache, cacheChannel, testChannel chan struct{}) {
17
+func beforeTestingSetup_readonlypolicycache() (testCache *readOnlyPolicyCache, cacheChannel, testChannel chan struct{}) {
18 18
 	cacheChannel = make(chan struct{})
19 19
 
20 20
 	testRegistry := testregistry.NewPolicyRegistry(testPolicies, nil)
... ...
@@ -19,12 +19,12 @@ import (
19 19
 type readOnlyPolicyBindingCache struct {
20 20
 	registry  bindingregistry.WatchingRegistry
21 21
 	indexer   cache.Indexer
22
-	reflector cache.Reflector
22
+	reflector *cache.Reflector
23 23
 
24 24
 	keyFunc cache.KeyFunc
25 25
 }
26 26
 
27
-func NewReadOnlyPolicyBindingCache(registry bindingregistry.WatchingRegistry) readOnlyPolicyBindingCache {
27
+func NewReadOnlyPolicyBindingCache(registry bindingregistry.WatchingRegistry) *readOnlyPolicyBindingCache {
28 28
 	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)
29 29
 
30 30
 	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
... ...
@@ -43,10 +43,10 @@ func NewReadOnlyPolicyBindingCache(registry bindingregistry.WatchingRegistry) re
43 43
 		2*time.Minute,
44 44
 	)
45 45
 
46
-	return readOnlyPolicyBindingCache{
46
+	return &readOnlyPolicyBindingCache{
47 47
 		registry:  registry,
48 48
 		indexer:   indexer,
49
-		reflector: *reflector,
49
+		reflector: reflector,
50 50
 
51 51
 		keyFunc: cache.MetaNamespaceKeyFunc,
52 52
 	}
... ...
@@ -120,7 +120,7 @@ type readOnlyPolicyBindings struct {
120 120
 
121 121
 func newReadOnlyPolicyBindings(cache readOnlyAuthorizationCache, namespace string) client.ReadOnlyPolicyBindingInterface {
122 122
 	return &readOnlyPolicyBindings{
123
-		readOnlyPolicyBindingCache: &cache.readOnlyPolicyBindingCache,
123
+		readOnlyPolicyBindingCache: cache.readOnlyPolicyBindingCache,
124 124
 		namespace:                  namespace,
125 125
 	}
126 126
 }
... ...
@@ -14,7 +14,7 @@ import (
14 14
 	testregistry "github.com/openshift/origin/pkg/authorization/registry/test"
15 15
 )
16 16
 
17
-func beforeTestingSetup_readonlypolicybindingcache() (testCache readOnlyPolicyBindingCache, cacheChannel, testChannel chan struct{}) {
17
+func beforeTestingSetup_readonlypolicybindingcache() (testCache *readOnlyPolicyBindingCache, cacheChannel, testChannel chan struct{}) {
18 18
 	cacheChannel = make(chan struct{})
19 19
 
20 20
 	testRegistry := testregistry.NewPolicyBindingRegistry(testPolicyBindings, nil)
... ...
@@ -23,10 +23,10 @@ type ReadOnlyCache interface {
23 23
 
24 24
 // readOnlyAuthorizationCache embeds four parallel caches for policies and bindings on both the project and cluster level
25 25
 type readOnlyAuthorizationCache struct {
26
-	readOnlyPolicyCache               readOnlyPolicyCache
27
-	readOnlyClusterPolicyCache        readOnlyClusterPolicyCache
28
-	readOnlyPolicyBindingCache        readOnlyPolicyBindingCache
29
-	readOnlyClusterPolicyBindingCache readOnlyClusterPolicyBindingCache
26
+	readOnlyPolicyCache               *readOnlyPolicyCache
27
+	readOnlyClusterPolicyCache        *readOnlyClusterPolicyCache
28
+	readOnlyPolicyBindingCache        *readOnlyPolicyBindingCache
29
+	readOnlyClusterPolicyBindingCache *readOnlyClusterPolicyBindingCache
30 30
 }
31 31
 
32 32
 // Run begins watching and synchronizing the cache
... ...
@@ -52,7 +52,7 @@ func NewCmdRequestProject(name, fullName, ocLoginName, ocProjectName string, f *
52 52
 	cmd := &cobra.Command{
53 53
 		Use:     fmt.Sprintf("%s NAME [--display-name=DISPLAYNAME] [--description=DESCRIPTION]", name),
54 54
 		Short:   "Request a new project",
55
-		Long:    fmt.Sprintf(requestProjectLong, ocLoginName, ocProjectName),
55
+		Long:    requestProjectLong,
56 56
 		Example: fmt.Sprintf(requestProjectExample, fullName),
57 57
 		Run: func(cmd *cobra.Command, args []string) {
58 58
 			if err := options.complete(cmd, f); err != nil {
... ...
@@ -394,7 +394,7 @@ func WaitForBuildComplete(c osclient.BuildInterface, name string) error {
394 394
 				return nil
395 395
 			}
396 396
 			if name != list.Items[i].Name || isFailed(&list.Items[i]) {
397
-				return fmt.Errorf("the build %s/%s status is %q", list.Items[i].Namespace, list.Items[i].Name, &list.Items[i].Status.Phase)
397
+				return fmt.Errorf("the build %s/%s status is %q", list.Items[i].Namespace, list.Items[i].Name, list.Items[i].Status.Phase)
398 398
 			}
399 399
 		}
400 400
 
... ...
@@ -729,7 +729,7 @@ func describeVolumeSource(source *kapi.VolumeSource) string {
729 729
 	case source.HostPath != nil:
730 730
 		return fmt.Sprintf("host path %s", source.HostPath.Path)
731 731
 	case source.ISCSI != nil:
732
-		return fmt.Sprintf("ISCSI %s target-portal=%s type=%s lun=%s%s", source.ISCSI.IQN, source.ISCSI.TargetPortal, source.ISCSI.FSType, source.ISCSI.Lun, sourceAccessMode(source.ISCSI.ReadOnly))
732
+		return fmt.Sprintf("ISCSI %s target-portal=%s type=%s lun=%d%s", source.ISCSI.IQN, source.ISCSI.TargetPortal, source.ISCSI.FSType, source.ISCSI.Lun, sourceAccessMode(source.ISCSI.ReadOnly))
733 733
 	case source.NFS != nil:
734 734
 		return fmt.Sprintf("NFS %s:%s%s", source.NFS.Server, source.NFS.Path, sourceAccessMode(source.NFS.ReadOnly))
735 735
 	case source.PersistentVolumeClaim != nil:
... ...
@@ -34,7 +34,7 @@ func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Confi
34 34
 		switch diagnosticName {
35 35
 		case clientdiags.ConfigContextsName:
36 36
 			for contextName := range rawConfig.Contexts {
37
-				diagnostics = append(diagnostics, clientdiags.ConfigContext{rawConfig, contextName})
37
+				diagnostics = append(diagnostics, clientdiags.ConfigContext{RawConfig: rawConfig, ContextName: contextName})
38 38
 			}
39 39
 
40 40
 		default:
... ...
@@ -46,13 +46,13 @@ func (o DiagnosticsOptions) buildClusterDiagnostics(rawConfig *clientcmdapi.Conf
46 46
 	for _, diagnosticName := range requestedDiagnostics {
47 47
 		switch diagnosticName {
48 48
 		case clustdiags.NodeDefinitionsName:
49
-			diagnostics = append(diagnostics, &clustdiags.NodeDefinitions{kclusterClient, clusterClient})
49
+			diagnostics = append(diagnostics, &clustdiags.NodeDefinitions{KubeClient: kclusterClient, OsClient: clusterClient})
50 50
 		case clustdiags.ClusterRegistryName:
51
-			diagnostics = append(diagnostics, &clustdiags.ClusterRegistry{kclusterClient, clusterClient})
51
+			diagnostics = append(diagnostics, &clustdiags.ClusterRegistry{KubeClient: kclusterClient, OsClient: clusterClient})
52 52
 		case clustdiags.ClusterRouterName:
53
-			diagnostics = append(diagnostics, &clustdiags.ClusterRouter{kclusterClient, clusterClient})
53
+			diagnostics = append(diagnostics, &clustdiags.ClusterRouter{KubeClient: kclusterClient, OsClient: clusterClient})
54 54
 		case clustdiags.ClusterRolesName:
55
-			diagnostics = append(diagnostics, &clustdiags.ClusterRoles{clusterClient, clusterClient})
55
+			diagnostics = append(diagnostics, &clustdiags.ClusterRoles{ClusterRolesClient: clusterClient, SARClient: clusterClient})
56 56
 
57 57
 		default:
58 58
 			return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
... ...
@@ -59,19 +59,19 @@ func (o DiagnosticsOptions) buildHostDiagnostics() ([]types.Diagnostic, bool, er
59 59
 	for _, diagnosticName := range requestedDiagnostics {
60 60
 		switch diagnosticName {
61 61
 		case systemddiags.AnalyzeLogsName:
62
-			diagnostics = append(diagnostics, systemddiags.AnalyzeLogs{systemdUnits})
62
+			diagnostics = append(diagnostics, systemddiags.AnalyzeLogs{SystemdUnits: systemdUnits})
63 63
 
64 64
 		case systemddiags.UnitStatusName:
65
-			diagnostics = append(diagnostics, systemddiags.UnitStatus{systemdUnits})
65
+			diagnostics = append(diagnostics, systemddiags.UnitStatus{SystemdUnits: systemdUnits})
66 66
 
67 67
 		case hostdiags.MasterConfigCheckName:
68 68
 			if len(o.MasterConfigLocation) > 0 {
69
-				diagnostics = append(diagnostics, hostdiags.MasterConfigCheck{o.MasterConfigLocation})
69
+				diagnostics = append(diagnostics, hostdiags.MasterConfigCheck{MasterConfigFile: o.MasterConfigLocation})
70 70
 			}
71 71
 
72 72
 		case hostdiags.NodeConfigCheckName:
73 73
 			if len(o.NodeConfigLocation) > 0 {
74
-				diagnostics = append(diagnostics, hostdiags.NodeConfigCheck{o.NodeConfigLocation})
74
+				diagnostics = append(diagnostics, hostdiags.NodeConfigCheck{NodeConfigFile: o.NodeConfigLocation})
75 75
 			}
76 76
 
77 77
 		default:
... ...
@@ -183,7 +183,7 @@ func (d *Deployer) Deploy(namespace, deploymentName string) error {
183 183
 		}
184 184
 		// Scale the deployment down to zero.
185 185
 		retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
186
-		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{-1, ""}, retryWaitParams, retryWaitParams); err != nil {
186
+		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams); err != nil {
187 187
 			glog.Errorf("Couldn't scale down prior deployment %s: %v", deployutil.LabelForDeployment(&candidate), err)
188 188
 		} else {
189 189
 			glog.Infof("Scaled down prior deployment %s", deployutil.LabelForDeployment(&candidate))
... ...
@@ -110,7 +110,7 @@ func (c *AssetConfig) buildAssetHandler() (http.Handler, error) {
110 110
 	assetFunc := assets.JoinAssetFuncs(assets.Asset, java.Asset)
111 111
 	assetDirFunc := assets.JoinAssetDirFuncs(assets.AssetDir, java.AssetDir)
112 112
 
113
-	handler := http.FileServer(&assetfs.AssetFS{assetFunc, assetDirFunc, ""})
113
+	handler := http.FileServer(&assetfs.AssetFS{Asset: assetFunc, AssetDir: assetDirFunc, Prefix: ""})
114 114
 
115 115
 	// Map of context roots (no leading or trailing slash) to the asset path to serve for requests to a missing asset
116 116
 	subcontextMap := map[string]string{
... ...
@@ -143,7 +143,7 @@ func cacheControlFilter(handler http.Handler, value string) http.Handler {
143 143
 // namespacingFilter adds a filter that adds the namespace of the request to the context.  Not all requests will have namespaces,
144 144
 // but any that do will have the appropriate value added.
145 145
 func namespacingFilter(handler http.Handler, contextMapper kapi.RequestContextMapper) http.Handler {
146
-	infoResolver := &apiserver.APIRequestInfoResolver{sets.NewString("api", "osapi", "oapi"), latest.RESTMapper}
146
+	infoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: sets.NewString("api", "osapi", "oapi"), RestMapper: latest.RESTMapper}
147 147
 
148 148
 	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
149 149
 		ctx, ok := contextMapper.Get(req)
... ...
@@ -338,7 +338,7 @@ func newAuthorizer(policyClient policyclient.ReadOnlyPolicyClient, projectReques
338 338
 }
339 339
 
340 340
 func newAuthorizationAttributeBuilder(requestContextMapper kapi.RequestContextMapper) authorizer.AuthorizationAttributeBuilder {
341
-	authorizationAttributeBuilder := authorizer.NewAuthorizationAttributeBuilder(requestContextMapper, &apiserver.APIRequestInfoResolver{sets.NewString("api", "osapi", "oapi"), latest.RESTMapper})
341
+	authorizationAttributeBuilder := authorizer.NewAuthorizationAttributeBuilder(requestContextMapper, &apiserver.APIRequestInfoResolver{APIPrefixes: sets.NewString("api", "osapi", "oapi"), RestMapper: latest.RESTMapper})
342 342
 	return authorizationAttributeBuilder
343 343
 }
344 344
 
... ...
@@ -51,7 +51,7 @@ type Factory struct {
51 51
 
52 52
 // NewFactory creates an object that holds common methods across all OpenShift commands
53 53
 func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
54
-	mapper := ShortcutExpander{kubectl.ShortcutExpander{latest.RESTMapper}}
54
+	mapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: latest.RESTMapper}}
55 55
 
56 56
 	clients := &clientCache{
57 57
 		clients: make(map[string]*client.Client),
... ...
@@ -72,7 +72,7 @@ func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
72 72
 
73 73
 	w.Object = func() (meta.RESTMapper, runtime.ObjectTyper) {
74 74
 		if cfg, err := clientConfig.ClientConfig(); err == nil {
75
-			return kubectl.OutputVersionMapper{mapper, cfg.Version}, api.Scheme
75
+			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cfg.Version}, api.Scheme
76 76
 		}
77 77
 		return mapper, api.Scheme
78 78
 	}
... ...
@@ -41,7 +41,7 @@ func encodeAndCreate(info *resource.Info, namespace string, obj runtime.Object)
41 41
 // event a failure occurs. The contents of list will be updated to include the
42 42
 // version from the server.
43 43
 func (b *Bulk) Create(list *kapi.List, namespace string) []error {
44
-	resourceMapper := &resource.Mapper{b.Typer, b.Mapper, resource.ClientMapperFunc(b.RESTClientFactory)}
44
+	resourceMapper := &resource.Mapper{ObjectTyper: b.Typer, RESTMapper: b.Mapper, ClientMapper: resource.ClientMapperFunc(b.RESTClientFactory)}
45 45
 	after := b.After
46 46
 	if after == nil {
47 47
 		after = func(*resource.Info, error) {}
... ...
@@ -30,7 +30,7 @@ type DeploymentConfigScaler struct {
30 30
 //  is not nil), and then optionally waits for it's replica count to reach the new value (if wait is not nil).
31 31
 func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
32 32
 	if preconditions == nil {
33
-		preconditions = &kubectl.ScalePrecondition{-1, ""}
33
+		preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}
34 34
 	}
35 35
 	if retry == nil {
36 36
 		// Make it try only once, immediately
... ...
@@ -64,7 +64,7 @@ func (scaler *DeploymentConfigScaler) ScaleSimple(namespace, name string, precon
64 64
 	const scaled = "scaled"
65 65
 	controller, err := scaler.c.GetReplicationController(namespace, name)
66 66
 	if err != nil {
67
-		return "", kubectl.ControllerScaleError{kubectl.ControllerScaleGetFailure, "Unknown", err}
67
+		return "", kubectl.ControllerScaleError{FailureType: kubectl.ControllerScaleGetFailure, ResourceVersion: "Unknown", ActualError: err}
68 68
 	}
69 69
 	if preconditions != nil {
70 70
 		if err := preconditions.Validate(controller); err != nil {
... ...
@@ -74,7 +74,7 @@ func (scaler *DeploymentConfigScaler) ScaleSimple(namespace, name string, precon
74 74
 	controller.Spec.Replicas = int(newSize)
75 75
 	// TODO: do retry on 409 errors here?
76 76
 	if _, err := scaler.c.UpdateReplicationController(namespace, controller); err != nil {
77
-		return "", kubectl.ControllerScaleError{kubectl.ControllerScaleUpdateFailure, controller.ResourceVersion, err}
77
+		return "", kubectl.ControllerScaleError{FailureType: kubectl.ControllerScaleUpdateFailure, ResourceVersion: controller.ResourceVersion, ActualError: err}
78 78
 	}
79 79
 	// TODO: do a better job of printing objects here.
80 80
 	return scaled, nil
... ...
@@ -146,7 +146,7 @@ func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationCo
146 146
 }
147 147
 
148 148
 func (s *RecreateDeploymentStrategy) scaleAndWait(deployment *kapi.ReplicationController, replicas int, retry *kubectl.RetryParams, wait *kubectl.RetryParams) (*kapi.ReplicationController, error) {
149
-	if err := s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{-1, ""}, retry, wait); err != nil {
149
+	if err := s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, wait); err != nil {
150 150
 		return nil, err
151 151
 	}
152 152
 	updatedDeployment, err := s.getReplicationController(deployment.Namespace, deployment.Name)
... ...
@@ -72,9 +72,9 @@ func (d *NodeDefinitions) CanRun() (bool, error) {
72 72
 		Resource: "nodes",
73 73
 	})
74 74
 	if err != nil {
75
-		return false, types.DiagnosticError{"DClu0005", fmt.Sprintf(clientErrorGettingNodes, err), err}
75
+		return false, types.DiagnosticError{ID: "DClu0005", LogMessage: fmt.Sprintf(clientErrorGettingNodes, err), Cause: err}
76 76
 	} else if !can {
77
-		return false, types.DiagnosticError{"DClu0006", "Client does not have access to see node status", err}
77
+		return false, types.DiagnosticError{ID: "DClu0006", LogMessage: "Client does not have access to see node status", Cause: err}
78 78
 	}
79 79
 	return true, nil
80 80
 }
... ...
@@ -102,9 +102,9 @@ func (d *ClusterRouter) CanRun() (bool, error) {
102 102
 		ResourceName: routerName,
103 103
 	})
104 104
 	if err != nil {
105
-		return false, types.DiagnosticError{"DClu2010", fmt.Sprintf(clientAccessError, err), err}
105
+		return false, types.DiagnosticError{ID: "DClu2010", LogMessage: fmt.Sprintf(clientAccessError, err), Cause: err}
106 106
 	} else if !can {
107
-		return false, types.DiagnosticError{"DClu2011", "Client does not have cluster-admin access", err}
107
+		return false, types.DiagnosticError{ID: "DClu2011", LogMessage: "Client does not have cluster-admin access", Cause: err}
108 108
 	}
109 109
 	return true, nil
110 110
 }
... ...
@@ -2,10 +2,11 @@ package types
2 2
 
3 3
 import (
4 4
 	"fmt"
5
-	"github.com/golang/glog"
6 5
 	"runtime"
7 6
 	"strings"
8 7
 
8
+	"github.com/golang/glog"
9
+
9 10
 	"github.com/openshift/origin/pkg/diagnostics/log"
10 11
 )
11 12
 
... ...
@@ -122,7 +123,7 @@ func (r *diagnosticResultImpl) caller(depth int) string {
122 122
 	return "diagnostic " + r.origin
123 123
 }
124 124
 func (r *diagnosticResultImpl) logError(id string, err error, msg string) {
125
-	r.appendLogs(2, log.Entry{id, r.caller(2), log.ErrorLevel, msg})
125
+	r.appendLogs(2, log.Entry{ID: id, Origin: r.caller(2), Level: log.ErrorLevel, Message: msg})
126 126
 	if de, ok := err.(DiagnosticError); ok {
127 127
 		r.appendErrors(de)
128 128
 	} else {
... ...
@@ -130,7 +131,7 @@ func (r *diagnosticResultImpl) logError(id string, err error, msg string) {
130 130
 	}
131 131
 }
132 132
 func (r *diagnosticResultImpl) logWarning(id string, err error, msg string) {
133
-	r.appendLogs(2, log.Entry{id, r.caller(2), log.WarnLevel, msg})
133
+	r.appendLogs(2, log.Entry{ID: id, Origin: r.caller(2), Level: log.WarnLevel, Message: msg})
134 134
 	if de, ok := err.(DiagnosticError); ok {
135 135
 		r.appendWarnings(de)
136 136
 	} else {
... ...
@@ -138,7 +139,7 @@ func (r *diagnosticResultImpl) logWarning(id string, err error, msg string) {
138 138
 	}
139 139
 }
140 140
 func (r *diagnosticResultImpl) logMessage(id string, level log.Level, msg string) {
141
-	r.appendLogs(2, log.Entry{id, r.caller(2), level, msg})
141
+	r.appendLogs(2, log.Entry{ID: id, Origin: r.caller(2), Level: level, Message: msg})
142 142
 }
143 143
 
144 144
 // Public ingress functions
... ...
@@ -804,12 +804,13 @@ func TestRunAll(t *testing.T) {
804 804
 			case *kapi.Service:
805 805
 				if test.checkPort != "" {
806 806
 					if len(tp.Spec.Ports) == 0 {
807
-						t.Errorf("%s: did not get any ports in service")
807
+						t.Errorf("%s: did not get any ports in service", test.name)
808 808
 						break
809 809
 					}
810 810
 					expectedPort, _ := strconv.Atoi(test.checkPort)
811 811
 					if tp.Spec.Ports[0].Port != expectedPort {
812
-						t.Errorf("%s: did not get expected port in service. Expected: %d. Got %d\n", expectedPort, tp.Spec.Ports[0].Port)
812
+						t.Errorf("%s: did not get expected port in service. Expected: %d. Got %d\n",
813
+							test.name, expectedPort, tp.Spec.Ports[0].Port)
813 814
 					}
814 815
 				}
815 816
 				if test.config.Labels != nil {
... ...
@@ -61,8 +61,6 @@ func Finalize(kubeClient kclient.Interface, namespace *kapi.Namespace) (result *
61 61
 			return nil, err
62 62
 		}
63 63
 	}
64
-
65
-	return
66 64
 }
67 65
 
68 66
 // finalizeInternal will update the namespace finalizer list to either have or not have origin finalizer
... ...
@@ -239,7 +239,7 @@ func TestHandleRoute(t *testing.T) {
239 239
 	// here
240 240
 	plugin := controller.NewUniqueHost(templatePlugin, controller.HostForRoute)
241 241
 
242
-	original := util.Time{time.Now()}
242
+	original := util.Time{Time: time.Now()}
243 243
 
244 244
 	//add
245 245
 	route := &routeapi.Route{