Switch to registered.RestMapper()
Lowercase resource before passing to rest mapper
Print custom message for watch to include API group
ScaleStatus selector internal API change
InfoForObject preferred version
Add batch.Job to list of checked resources in admission
Interesting - controller/config/policy changes
Discovering namespace controller
PV controller config
Add batch/autoscaling groups to bootstrap policy
Add service/proxy, replicationcontrollers/scale:
Allow HPA controllers to watch HPAs
Override default generators for run
Add system:discovery role, auto-reconcile on server start
Remove manual selector from Job, allow auto-creation
| ... | ... |
@@ -30,8 +30,6 @@ const ( |
| 30 | 30 |
) |
| 31 | 31 |
|
| 32 | 32 |
const ( |
| 33 |
- APIGroupExtensions = "extensions" |
|
| 34 |
- |
|
| 35 | 33 |
// ResourceGroupPrefix is the prefix for indicating that a resource entry is actually a group of resources. The groups are defined in code and indicate resources that are commonly permissioned together |
| 36 | 34 |
ResourceGroupPrefix = "resourcegroup:" |
| 37 | 35 |
BuildGroupName = ResourceGroupPrefix + "builds" |
| ... | ... |
@@ -20,15 +20,15 @@ type ForbiddenMessageResolver struct {
|
| 20 | 20 |
} |
| 21 | 21 |
|
| 22 | 22 |
func NewForbiddenMessageResolver(projectRequestForbiddenTemplate string) *ForbiddenMessageResolver {
|
| 23 |
+ apiGroupIfNotEmpty := "{{if len .Attributes.GetAPIGroup }}{{.Attributes.GetAPIGroup}}.{{end}}"
|
|
| 24 |
+ |
|
| 23 | 25 |
messageResolver := &ForbiddenMessageResolver{
|
| 24 | 26 |
namespacedVerbsToResourcesToForbiddenMessageMaker: map[string]map[string]ForbiddenMessageMaker{},
|
| 25 | 27 |
rootScopedVerbsToResourcesToForbiddenMessageMaker: map[string]map[string]ForbiddenMessageMaker{},
|
| 26 | 28 |
nonResourceURLForbiddenMessageMaker: newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot "{{.Attributes.GetVerb}}" on "{{.Attributes.GetURL}}"`),
|
| 27 |
- defaultForbiddenMessageMaker: newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot "{{.Attributes.GetVerb}}" "{{.Attributes.GetResource}}" with name "{{.Attributes.GetResourceName}}" in project "{{.Namespace}}"`),
|
|
| 29 |
+ defaultForbiddenMessageMaker: newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot "{{.Attributes.GetVerb}}" "` + apiGroupIfNotEmpty + `{{.Attributes.GetResource}}" with name "{{.Attributes.GetResourceName}}" in project "{{.Namespace}}"`),
|
|
| 28 | 30 |
} |
| 29 | 31 |
|
| 30 |
- apiGroupIfNotEmpty := "{{if len .Attributes.GetAPIGroup }}{{.Attributes.GetAPIGroup}}.{{end}}"
|
|
| 31 |
- |
|
| 32 | 32 |
// general messages |
| 33 | 33 |
messageResolver.addNamespacedForbiddenMessageMaker("create", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot create `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} in project "{{.Namespace}}"`))
|
| 34 | 34 |
messageResolver.addRootScopedForbiddenMessageMaker("create", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot create `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} at the cluster scope`))
|
| ... | ... |
@@ -36,6 +36,8 @@ func NewForbiddenMessageResolver(projectRequestForbiddenTemplate string) *Forbid |
| 36 | 36 |
messageResolver.addRootScopedForbiddenMessageMaker("get", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot get `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} at the cluster scope`))
|
| 37 | 37 |
messageResolver.addNamespacedForbiddenMessageMaker("list", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot list `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} in project "{{.Namespace}}"`))
|
| 38 | 38 |
messageResolver.addRootScopedForbiddenMessageMaker("list", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot list all `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} in the cluster`))
|
| 39 |
+ messageResolver.addNamespacedForbiddenMessageMaker("watch", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot watch `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} in project "{{.Namespace}}"`))
|
|
| 40 |
+ messageResolver.addRootScopedForbiddenMessageMaker("watch", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot watch all `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} in the cluster`))
|
|
| 39 | 41 |
messageResolver.addNamespacedForbiddenMessageMaker("update", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot update `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} in project "{{.Namespace}}"`))
|
| 40 | 42 |
messageResolver.addRootScopedForbiddenMessageMaker("update", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot update `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} at the cluster scope`))
|
| 41 | 43 |
messageResolver.addNamespacedForbiddenMessageMaker("delete", authorizationapi.ResourceAll, newTemplateForbiddenMessageMaker(`User "{{.User.GetName}}" cannot delete `+apiGroupIfNotEmpty+`{{.Attributes.GetResource}} in project "{{.Namespace}}"`))
|
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
kapi "k8s.io/kubernetes/pkg/api" |
| 11 | 11 |
kapierrors "k8s.io/kubernetes/pkg/api/errors" |
| 12 | 12 |
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" |
| 13 |
+ "k8s.io/kubernetes/pkg/util/sets" |
|
| 13 | 14 |
|
| 14 | 15 |
authorizationapi "github.com/openshift/origin/pkg/authorization/api" |
| 15 | 16 |
"github.com/openshift/origin/pkg/client" |
| ... | ... |
@@ -23,6 +24,10 @@ import ( |
| 23 | 23 |
const ReconcileClusterRoleBindingsRecommendedName = "reconcile-cluster-role-bindings" |
| 24 | 24 |
|
| 25 | 25 |
type ReconcileClusterRoleBindingsOptions struct {
|
| 26 |
+ // RolesToReconcile says which roles should have their default bindings reconciled. |
|
| 27 |
+ // An empty or nil slice means reconcile all of them. |
|
| 28 |
+ RolesToReconcile []string |
|
| 29 |
+ |
|
| 26 | 30 |
Confirmed bool |
| 27 | 31 |
Union bool |
| 28 | 32 |
|
| ... | ... |
@@ -71,7 +76,7 @@ func NewCmdReconcileClusterRoleBindings(name, fullName string, f *clientcmd.Fact |
| 71 | 71 |
excludeGroups := []string{}
|
| 72 | 72 |
|
| 73 | 73 |
cmd := &cobra.Command{
|
| 74 |
- Use: name, |
|
| 74 |
+ Use: name + " [ClusterRoleName]...", |
|
| 75 | 75 |
Short: "Replace cluster role bindings to match the recommended bootstrap policy", |
| 76 | 76 |
Long: reconcileBindingsLong, |
| 77 | 77 |
Example: fmt.Sprintf(reconcileBindingsExample, fullName), |
| ... | ... |
@@ -102,10 +107,6 @@ func NewCmdReconcileClusterRoleBindings(name, fullName string, f *clientcmd.Fact |
| 102 | 102 |
} |
| 103 | 103 |
|
| 104 | 104 |
func (o *ReconcileClusterRoleBindingsOptions) Complete(cmd *cobra.Command, f *clientcmd.Factory, args []string, excludeUsers, excludeGroups []string) error {
|
| 105 |
- if len(args) != 0 {
|
|
| 106 |
- return kcmdutil.UsageError(cmd, "no arguments are allowed") |
|
| 107 |
- } |
|
| 108 |
- |
|
| 109 | 105 |
oclient, _, err := f.Clients() |
| 110 | 106 |
if err != nil {
|
| 111 | 107 |
return err |
| ... | ... |
@@ -116,6 +117,22 @@ func (o *ReconcileClusterRoleBindingsOptions) Complete(cmd *cobra.Command, f *cl |
| 116 | 116 |
|
| 117 | 117 |
o.ExcludeSubjects = authorizationapi.BuildSubjects(excludeUsers, excludeGroups, uservalidation.ValidateUserName, uservalidation.ValidateGroupName) |
| 118 | 118 |
|
| 119 |
+ mapper, _ := f.Object() |
|
| 120 |
+ for _, resourceString := range args {
|
|
| 121 |
+ resource, name, err := ocmdutil.ResolveResource("clusterroles", resourceString, mapper)
|
|
| 122 |
+ if err != nil {
|
|
| 123 |
+ return err |
|
| 124 |
+ } |
|
| 125 |
+ if resource != "clusterroles" {
|
|
| 126 |
+ return fmt.Errorf("%s is not a valid resource type for this command", resource)
|
|
| 127 |
+ } |
|
| 128 |
+ if len(name) == 0 {
|
|
| 129 |
+ return fmt.Errorf("%s did not contain a name", resourceString)
|
|
| 130 |
+ } |
|
| 131 |
+ |
|
| 132 |
+ o.RolesToReconcile = append(o.RolesToReconcile, name) |
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 119 | 135 |
return nil |
| 120 | 136 |
} |
| 121 | 137 |
|
| ... | ... |
@@ -167,9 +184,15 @@ func (o *ReconcileClusterRoleBindingsOptions) RunReconcileClusterRoleBindings(cm |
| 167 | 167 |
func (o *ReconcileClusterRoleBindingsOptions) ChangedClusterRoleBindings() ([]*authorizationapi.ClusterRoleBinding, error) {
|
| 168 | 168 |
changedRoleBindings := []*authorizationapi.ClusterRoleBinding{}
|
| 169 | 169 |
|
| 170 |
+ rolesToReconcile := sets.NewString(o.RolesToReconcile...) |
|
| 171 |
+ rolesNotFound := sets.NewString(o.RolesToReconcile...) |
|
| 170 | 172 |
bootstrapClusterRoleBindings := bootstrappolicy.GetBootstrapClusterRoleBindings() |
| 171 | 173 |
for i := range bootstrapClusterRoleBindings {
|
| 172 | 174 |
expectedClusterRoleBinding := &bootstrapClusterRoleBindings[i] |
| 175 |
+ if (len(rolesToReconcile) > 0) && !rolesToReconcile.Has(expectedClusterRoleBinding.RoleRef.Name) {
|
|
| 176 |
+ continue |
|
| 177 |
+ } |
|
| 178 |
+ rolesNotFound.Delete(expectedClusterRoleBinding.RoleRef.Name) |
|
| 173 | 179 |
|
| 174 | 180 |
actualClusterRoleBinding, err := o.RoleBindingClient.Get(expectedClusterRoleBinding.Name) |
| 175 | 181 |
if kapierrors.IsNotFound(err) {
|
| ... | ... |
@@ -193,6 +216,11 @@ func (o *ReconcileClusterRoleBindingsOptions) ChangedClusterRoleBindings() ([]*a |
| 193 | 193 |
} |
| 194 | 194 |
} |
| 195 | 195 |
|
| 196 |
+ if len(rolesNotFound) != 0 {
|
|
| 197 |
+ // return the known changes and the error so that a caller can decide if he wants a partial update |
|
| 198 |
+ return changedRoleBindings, fmt.Errorf("did not find requested cluster role %s", rolesNotFound.List())
|
|
| 199 |
+ } |
|
| 200 |
+ |
|
| 196 | 201 |
return changedRoleBindings, nil |
| 197 | 202 |
} |
| 198 | 203 |
|
| ... | ... |
@@ -143,7 +143,7 @@ func CreateEdgeRoute(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, ar |
| 143 | 143 |
RESTMapper: mapper, |
| 144 | 144 |
ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), |
| 145 | 145 |
} |
| 146 |
- info, err := resourceMapper.InfoForObject(route) |
|
| 146 |
+ info, err := resourceMapper.InfoForObject(route, nil) |
|
| 147 | 147 |
if err != nil {
|
| 148 | 148 |
return err |
| 149 | 149 |
} |
| ... | ... |
@@ -228,7 +228,7 @@ func CreatePassthroughRoute(f *clientcmd.Factory, out io.Writer, cmd *cobra.Comm |
| 228 | 228 |
RESTMapper: mapper, |
| 229 | 229 |
ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), |
| 230 | 230 |
} |
| 231 |
- info, err := resourceMapper.InfoForObject(route) |
|
| 231 |
+ info, err := resourceMapper.InfoForObject(route, nil) |
|
| 232 | 232 |
if err != nil {
|
| 233 | 233 |
return err |
| 234 | 234 |
} |
| ... | ... |
@@ -346,7 +346,7 @@ func CreateReencryptRoute(f *clientcmd.Factory, out io.Writer, cmd *cobra.Comman |
| 346 | 346 |
RESTMapper: mapper, |
| 347 | 347 |
ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), |
| 348 | 348 |
} |
| 349 |
- info, err := resourceMapper.InfoForObject(route) |
|
| 349 |
+ info, err := resourceMapper.InfoForObject(route, nil) |
|
| 350 | 350 |
if err != nil {
|
| 351 | 351 |
return err |
| 352 | 352 |
} |
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
"github.com/spf13/cobra" |
| 11 | 11 |
kcmd "k8s.io/kubernetes/pkg/kubectl/cmd" |
| 12 | 12 |
"k8s.io/kubernetes/pkg/kubectl/cmd/config" |
| 13 |
+ kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" |
|
| 13 | 14 |
kvalidation "k8s.io/kubernetes/pkg/util/validation" |
| 14 | 15 |
|
| 15 | 16 |
cmdconfig "github.com/openshift/origin/pkg/cmd/cli/config" |
| ... | ... |
@@ -336,7 +337,8 @@ foreground for an interactive container execution. You may pass 'run-controller |
| 336 | 336 |
|
| 337 | 337 |
// NewCmdRun is a wrapper for the Kubernetes cli run command |
| 338 | 338 |
func NewCmdRun(fullName string, f *clientcmd.Factory, in io.Reader, out, errout io.Writer) *cobra.Command {
|
| 339 |
- cmd := kcmd.NewCmdRun(f.Factory, in, out, errout) |
|
| 339 |
+ opts := &kcmd.RunOptions{DefaultRestartAlwaysGenerator: kcmdutil.RunV1GeneratorName, DefaultGenerator: kcmdutil.RunPodV1GeneratorName}
|
|
| 340 |
+ cmd := kcmd.NewCmdRunWithOptions(f.Factory, opts, in, out, errout) |
|
| 340 | 341 |
cmd.Long = runLong |
| 341 | 342 |
cmd.Example = fmt.Sprintf(runExample, fullName) |
| 342 | 343 |
cmd.SuggestFor = []string{"image"}
|
| ... | ... |
@@ -128,7 +128,7 @@ func OverwriteBootstrapPolicy(storage storage.Interface, policyFile, createBoots |
| 128 | 128 |
fmt.Fprintf(out, "Performing a dry run of policy overwrite:\n\n") |
| 129 | 129 |
} |
| 130 | 130 |
|
| 131 |
- mapper := cmdclientcmd.ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: registered.GroupOrDie(authorizationapi.GroupName).RESTMapper}}
|
|
| 131 |
+ mapper := cmdclientcmd.ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}}
|
|
| 132 | 132 |
typer := kapi.Scheme |
| 133 | 133 |
clientMapper := resource.ClientMapperFunc(func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
|
| 134 | 134 |
return nil, nil |
| ... | ... |
@@ -38,11 +38,24 @@ var ( |
| 38 | 38 |
// exposed externally. |
| 39 | 39 |
DeadOpenShiftStorageVersionLevels = []string{"v1beta1", "v1beta3"}
|
| 40 | 40 |
|
| 41 |
- APIGroupKube = "" |
|
| 42 |
- APIGroupExtensions = "extensions" |
|
| 41 |
+ APIGroupKube = "" |
|
| 42 |
+ APIGroupExtensions = "extensions" |
|
| 43 |
+ APIGroupAutoscaling = "autoscaling" |
|
| 44 |
+ APIGroupBatch = "batch" |
|
| 45 |
+ |
|
| 46 |
+ // Map of group names to allowed REST API versions |
|
| 43 | 47 |
KubeAPIGroupsToAllowedVersions = map[string][]string{
|
| 44 |
- APIGroupKube: {"v1"},
|
|
| 45 |
- APIGroupExtensions: {"v1beta1"},
|
|
| 48 |
+ APIGroupKube: {"v1"},
|
|
| 49 |
+ APIGroupExtensions: {"v1beta1"},
|
|
| 50 |
+ APIGroupAutoscaling: {"v1"},
|
|
| 51 |
+ APIGroupBatch: {"v1"},
|
|
| 52 |
+ } |
|
| 53 |
+ // Map of group names to known, but disallowed REST API versions |
|
| 54 |
+ KubeAPIGroupsToDeadVersions = map[string][]string{
|
|
| 55 |
+ APIGroupKube: {"v1beta3"},
|
|
| 56 |
+ APIGroupExtensions: {},
|
|
| 57 |
+ APIGroupAutoscaling: {},
|
|
| 58 |
+ APIGroupBatch: {},
|
|
| 46 | 59 |
} |
| 47 | 60 |
KnownKubeAPIGroups = sets.StringKeySet(KubeAPIGroupsToAllowedVersions) |
| 48 | 61 |
|
| ... | ... |
@@ -1,10 +1,45 @@ |
| 1 | 1 |
package api |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "reflect" |
|
| 4 | 5 |
"strings" |
| 5 | 6 |
"testing" |
| 7 |
+ |
|
| 8 |
+ "k8s.io/kubernetes/pkg/apimachinery/registered" |
|
| 9 |
+ "k8s.io/kubernetes/pkg/util/sets" |
|
| 6 | 10 |
) |
| 7 | 11 |
|
| 12 |
+func TestKnownAPIGroups(t *testing.T) {
|
|
| 13 |
+ unexposedGroups := sets.NewString("authorization.k8s.io", "componentconfig", "metrics")
|
|
| 14 |
+ |
|
| 15 |
+ enabledGroups := sets.NewString() |
|
| 16 |
+ for _, enabledVersion := range registered.EnabledVersions() {
|
|
| 17 |
+ enabledGroups.Insert(enabledVersion.Group) |
|
| 18 |
+ } |
|
| 19 |
+ |
|
| 20 |
+ if missingKnownGroups := KnownKubeAPIGroups.Difference(enabledGroups); len(missingKnownGroups) > 0 {
|
|
| 21 |
+ t.Errorf("KnownKubeAPIGroups are missing from registered.EnabledVersions: %v", missingKnownGroups.List())
|
|
| 22 |
+ } |
|
| 23 |
+ if unknownEnabledGroups := enabledGroups.Difference(KnownKubeAPIGroups).Difference(unexposedGroups); len(unknownEnabledGroups) > 0 {
|
|
| 24 |
+ t.Errorf("KnownKubeAPIGroups is missing groups from registered.EnabledVersions: %v", unknownEnabledGroups.List())
|
|
| 25 |
+ } |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+func TestAllowedAPIVersions(t *testing.T) {
|
|
| 29 |
+ // Make sure all versions we know about match registered versions |
|
| 30 |
+ for group, versions := range KubeAPIGroupsToAllowedVersions {
|
|
| 31 |
+ enabled := sets.NewString() |
|
| 32 |
+ for _, enabledVersion := range registered.EnabledVersionsForGroup(group) {
|
|
| 33 |
+ enabled.Insert(enabledVersion.Version) |
|
| 34 |
+ } |
|
| 35 |
+ expected := sets.NewString(versions...) |
|
| 36 |
+ actual := enabled.Difference(sets.NewString(KubeAPIGroupsToDeadVersions[group]...)) |
|
| 37 |
+ if e, a := expected.List(), actual.List(); !reflect.DeepEqual(e, a) {
|
|
| 38 |
+ t.Errorf("For group %s, expected versions %#v, got %#v", group, e, a)
|
|
| 39 |
+ } |
|
| 40 |
+ } |
|
| 41 |
+} |
|
| 42 |
+ |
|
| 8 | 43 |
func TestFeatureListAdd(t *testing.T) {
|
| 9 | 44 |
orderedList := []string{FeatureBuilder, FeatureWebConsole, FeatureS2I}
|
| 10 | 45 |
fl := FeatureList{}
|
| ... | ... |
@@ -77,6 +77,7 @@ const ( |
| 77 | 77 |
SDNManagerRoleName = "system:sdn-manager" |
| 78 | 78 |
OAuthTokenDeleterRoleName = "system:oauth-token-deleter" |
| 79 | 79 |
WebHooksRoleName = "system:webhook" |
| 80 |
+ DiscoveryRoleName = "system:discovery" |
|
| 80 | 81 |
|
| 81 | 82 |
// NodeAdmin has full access to the API provided by the kubelet |
| 82 | 83 |
NodeAdminRoleName = "system:node-admin" |
| ... | ... |
@@ -107,6 +108,7 @@ const ( |
| 107 | 107 |
SDNReaderRoleBindingName = SDNReaderRoleName + "s" |
| 108 | 108 |
SDNManagerRoleBindingName = SDNManagerRoleName + "s" |
| 109 | 109 |
WebHooksRoleBindingName = WebHooksRoleName + "s" |
| 110 |
+ DiscoveryRoleBindingName = DiscoveryRoleName + "-binding" |
|
| 110 | 111 |
RegistryAdminRoleBindingName = RegistryAdminRoleName + "s" |
| 111 | 112 |
RegistryViewerRoleBindingName = RegistryViewerRoleName + "s" |
| 112 | 113 |
RegistryEditorRoleBindingName = RegistryEditorRoleName + "s" |
| ... | ... |
@@ -4,6 +4,9 @@ import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
|
| 6 | 6 |
kapi "k8s.io/kubernetes/pkg/api" |
| 7 |
+ "k8s.io/kubernetes/pkg/apis/autoscaling" |
|
| 8 |
+ "k8s.io/kubernetes/pkg/apis/batch" |
|
| 9 |
+ "k8s.io/kubernetes/pkg/apis/extensions" |
|
| 7 | 10 |
"k8s.io/kubernetes/pkg/util/sets" |
| 8 | 11 |
|
| 9 | 12 |
authorizationapi "github.com/openshift/origin/pkg/authorization/api" |
| ... | ... |
@@ -28,6 +31,9 @@ const ( |
| 28 | 28 |
InfraHPAControllerServiceAccountName = "hpa-controller" |
| 29 | 29 |
HPAControllerRoleName = "system:hpa-controller" |
| 30 | 30 |
|
| 31 |
+ InfraNamespaceControllerServiceAccountName = "namespace-controller" |
|
| 32 |
+ NamespaceControllerRoleName = "system:namespace-controller" |
|
| 33 |
+ |
|
| 31 | 34 |
InfraPersistentVolumeBinderControllerServiceAccountName = "pv-binder-controller" |
| 32 | 35 |
PersistentVolumeBinderControllerRoleName = "system:pv-binder-controller" |
| 33 | 36 |
|
| ... | ... |
@@ -218,13 +224,13 @@ func init() {
|
| 218 | 218 |
Rules: []authorizationapi.PolicyRule{
|
| 219 | 219 |
// JobController.jobController.ListWatch |
| 220 | 220 |
{
|
| 221 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 221 |
+ APIGroups: []string{extensions.GroupName, batch.GroupName},
|
|
| 222 | 222 |
Verbs: sets.NewString("list", "watch"),
|
| 223 | 223 |
Resources: sets.NewString("jobs"),
|
| 224 | 224 |
}, |
| 225 | 225 |
// JobController.syncJob() -> updateJobStatus() |
| 226 | 226 |
{
|
| 227 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 227 |
+ APIGroups: []string{extensions.GroupName, batch.GroupName},
|
|
| 228 | 228 |
Verbs: sets.NewString("update"),
|
| 229 | 229 |
Resources: sets.NewString("jobs/status"),
|
| 230 | 230 |
}, |
| ... | ... |
@@ -259,17 +265,17 @@ func init() {
|
| 259 | 259 |
Rules: []authorizationapi.PolicyRule{
|
| 260 | 260 |
// HPA Controller |
| 261 | 261 |
{
|
| 262 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 263 |
- Verbs: sets.NewString("get", "list"),
|
|
| 262 |
+ APIGroups: []string{extensions.GroupName, autoscaling.GroupName},
|
|
| 263 |
+ Verbs: sets.NewString("get", "list", "watch"),
|
|
| 264 | 264 |
Resources: sets.NewString("horizontalpodautoscalers"),
|
| 265 | 265 |
}, |
| 266 | 266 |
{
|
| 267 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 267 |
+ APIGroups: []string{extensions.GroupName, autoscaling.GroupName},
|
|
| 268 | 268 |
Verbs: sets.NewString("update"),
|
| 269 | 269 |
Resources: sets.NewString("horizontalpodautoscalers/status"),
|
| 270 | 270 |
}, |
| 271 | 271 |
{
|
| 272 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 272 |
+ APIGroups: []string{extensions.GroupName, kapi.GroupName},
|
|
| 273 | 273 |
Verbs: sets.NewString("get", "update"),
|
| 274 | 274 |
Resources: sets.NewString("replicationcontrollers/scale"),
|
| 275 | 275 |
}, |
| ... | ... |
@@ -456,7 +462,7 @@ func init() {
|
| 456 | 456 |
Rules: []authorizationapi.PolicyRule{
|
| 457 | 457 |
// DaemonSetsController.dsStore.ListWatch |
| 458 | 458 |
{
|
| 459 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 459 |
+ APIGroups: []string{extensions.GroupName},
|
|
| 460 | 460 |
Verbs: sets.NewString("list", "watch"),
|
| 461 | 461 |
Resources: sets.NewString("daemonsets"),
|
| 462 | 462 |
}, |
| ... | ... |
@@ -472,7 +478,7 @@ func init() {
|
| 472 | 472 |
}, |
| 473 | 473 |
// DaemonSetsController.storeDaemonSetStatus |
| 474 | 474 |
{
|
| 475 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 475 |
+ APIGroups: []string{extensions.GroupName},
|
|
| 476 | 476 |
Verbs: sets.NewString("update"),
|
| 477 | 477 |
Resources: sets.NewString("daemonsets/status"),
|
| 478 | 478 |
}, |
| ... | ... |
@@ -497,4 +503,37 @@ func init() {
|
| 497 | 497 |
if err != nil {
|
| 498 | 498 |
panic(err) |
| 499 | 499 |
} |
| 500 |
+ |
|
| 501 |
+ err = InfraSAs.addServiceAccount( |
|
| 502 |
+ InfraNamespaceControllerServiceAccountName, |
|
| 503 |
+ authorizationapi.ClusterRole{
|
|
| 504 |
+ ObjectMeta: kapi.ObjectMeta{
|
|
| 505 |
+ Name: NamespaceControllerRoleName, |
|
| 506 |
+ }, |
|
| 507 |
+ Rules: []authorizationapi.PolicyRule{
|
|
| 508 |
+ // Watching/deleting namespaces |
|
| 509 |
+ {
|
|
| 510 |
+ APIGroups: []string{kapi.GroupName},
|
|
| 511 |
+ Verbs: sets.NewString("get", "list", "watch", "delete"),
|
|
| 512 |
+ Resources: sets.NewString("namespaces"),
|
|
| 513 |
+ }, |
|
| 514 |
+ // Updating status to terminating, updating finalizer list |
|
| 515 |
+ {
|
|
| 516 |
+ APIGroups: []string{kapi.GroupName},
|
|
| 517 |
+ Verbs: sets.NewString("update"),
|
|
| 518 |
+ Resources: sets.NewString("namespaces/finalize", "namespaces/status"),
|
|
| 519 |
+ }, |
|
| 520 |
+ |
|
| 521 |
+ // Ability to delete resources |
|
| 522 |
+ {
|
|
| 523 |
+ APIGroups: []string{"*"},
|
|
| 524 |
+ Verbs: sets.NewString("get", "list", "delete", "deletecollection"),
|
|
| 525 |
+ Resources: sets.NewString("*"),
|
|
| 526 |
+ }, |
|
| 527 |
+ }, |
|
| 528 |
+ }, |
|
| 529 |
+ ) |
|
| 530 |
+ if err != nil {
|
|
| 531 |
+ panic(err) |
|
| 532 |
+ } |
|
| 500 | 533 |
} |
| ... | ... |
@@ -4,8 +4,12 @@ import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
|
| 6 | 6 |
kapi "k8s.io/kubernetes/pkg/api" |
| 7 |
+ "k8s.io/kubernetes/pkg/apis/autoscaling" |
|
| 8 |
+ "k8s.io/kubernetes/pkg/apis/batch" |
|
| 9 |
+ "k8s.io/kubernetes/pkg/apis/extensions" |
|
| 7 | 10 |
"k8s.io/kubernetes/pkg/util/sets" |
| 8 | 11 |
|
| 12 |
+ "github.com/openshift/origin/pkg/api" |
|
| 9 | 13 |
authorizationapi "github.com/openshift/origin/pkg/authorization/api" |
| 10 | 14 |
) |
| 11 | 15 |
|
| ... | ... |
@@ -49,9 +53,9 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 49 | 49 |
}, |
| 50 | 50 |
Rules: []authorizationapi.PolicyRule{
|
| 51 | 51 |
{
|
| 52 |
+ APIGroups: []string{authorizationapi.APIGroupAll},
|
|
| 52 | 53 |
Verbs: sets.NewString(authorizationapi.VerbAll), |
| 53 | 54 |
Resources: sets.NewString(authorizationapi.ResourceAll), |
| 54 |
- APIGroups: []string{authorizationapi.APIGroupAll},
|
|
| 55 | 55 |
}, |
| 56 | 56 |
{
|
| 57 | 57 |
Verbs: sets.NewString(authorizationapi.VerbAll), |
| ... | ... |
@@ -69,9 +73,19 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 69 | 69 |
Resources: sets.NewString(authorizationapi.NonEscalatingResourcesGroupName), |
| 70 | 70 |
}, |
| 71 | 71 |
{
|
| 72 |
+ APIGroups: []string{autoscaling.GroupName},
|
|
| 73 |
+ Verbs: sets.NewString("get", "list", "watch"),
|
|
| 74 |
+ Resources: sets.NewString("horizontalpodautoscalers"),
|
|
| 75 |
+ }, |
|
| 76 |
+ {
|
|
| 77 |
+ APIGroups: []string{batch.GroupName},
|
|
| 78 |
+ Verbs: sets.NewString("get", "list", "watch"),
|
|
| 79 |
+ Resources: sets.NewString("jobs"),
|
|
| 80 |
+ }, |
|
| 81 |
+ {
|
|
| 82 |
+ APIGroups: []string{extensions.GroupName},
|
|
| 72 | 83 |
Verbs: sets.NewString("get", "list", "watch"),
|
| 73 | 84 |
Resources: sets.NewString("daemonsets", "jobs", "horizontalpodautoscalers", "replicationcontrollers/scale"),
|
| 74 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 75 | 85 |
}, |
| 76 | 86 |
{ // permissions to check access. These creates are non-mutating
|
| 77 | 87 |
Verbs: sets.NewString("create"),
|
| ... | ... |
@@ -100,11 +114,42 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 100 | 100 |
}, |
| 101 | 101 |
Rules: []authorizationapi.PolicyRule{
|
| 102 | 102 |
{
|
| 103 |
+ APIGroups: []string{kapi.GroupName},
|
|
| 104 |
+ Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
|
| 105 |
+ Resources: sets.NewString( |
|
| 106 |
+ authorizationapi.KubeExposedGroupName, |
|
| 107 |
+ "secrets", |
|
| 108 |
+ "pods/attach", "pods/proxy", "pods/exec", "pods/portforward", |
|
| 109 |
+ "services/proxy", |
|
| 110 |
+ "replicationcontrollers/scale", |
|
| 111 |
+ ), |
|
| 112 |
+ }, |
|
| 113 |
+ {
|
|
| 114 |
+ APIGroups: []string{api.GroupName},
|
|
| 115 |
+ Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
|
| 116 |
+ Resources: sets.NewString( |
|
| 117 |
+ authorizationapi.OpenshiftExposedGroupName, |
|
| 118 |
+ authorizationapi.PermissionGrantingGroupName, |
|
| 119 |
+ "projects", |
|
| 120 |
+ authorizationapi.DockerBuildResource, |
|
| 121 |
+ authorizationapi.SourceBuildResource, |
|
| 122 |
+ authorizationapi.CustomBuildResource, |
|
| 123 |
+ "deploymentconfigs/scale", |
|
| 124 |
+ "imagestreams/secrets", |
|
| 125 |
+ ), |
|
| 126 |
+ }, |
|
| 127 |
+ {
|
|
| 128 |
+ APIGroups: []string{autoscaling.GroupName},
|
|
| 103 | 129 |
Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
| 104 |
- Resources: sets.NewString(authorizationapi.OpenshiftExposedGroupName, authorizationapi.PermissionGrantingGroupName, authorizationapi.KubeExposedGroupName, "projects", "secrets", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward", authorizationapi.DockerBuildResource, authorizationapi.SourceBuildResource, authorizationapi.CustomBuildResource, "deploymentconfigs/scale", "imagestreams/secrets"), |
|
| 130 |
+ Resources: sets.NewString("horizontalpodautoscalers"),
|
|
| 105 | 131 |
}, |
| 106 | 132 |
{
|
| 107 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 133 |
+ APIGroups: []string{batch.GroupName},
|
|
| 134 |
+ Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
|
| 135 |
+ Resources: sets.NewString("jobs"),
|
|
| 136 |
+ }, |
|
| 137 |
+ {
|
|
| 138 |
+ APIGroups: []string{extensions.GroupName},
|
|
| 108 | 139 |
Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
| 109 | 140 |
Resources: sets.NewString("daemonsets", "jobs", "horizontalpodautoscalers", "replicationcontrollers/scale"),
|
| 110 | 141 |
}, |
| ... | ... |
@@ -130,11 +175,40 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 130 | 130 |
}, |
| 131 | 131 |
Rules: []authorizationapi.PolicyRule{
|
| 132 | 132 |
{
|
| 133 |
+ APIGroups: []string{kapi.GroupName},
|
|
| 134 |
+ Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
|
| 135 |
+ Resources: sets.NewString( |
|
| 136 |
+ authorizationapi.KubeExposedGroupName, |
|
| 137 |
+ "secrets", |
|
| 138 |
+ "pods/attach", "pods/proxy", "pods/exec", "pods/portforward", |
|
| 139 |
+ "services/proxy", |
|
| 140 |
+ "replicationcontrollers/scale", |
|
| 141 |
+ ), |
|
| 142 |
+ }, |
|
| 143 |
+ {
|
|
| 144 |
+ APIGroups: []string{api.GroupName},
|
|
| 133 | 145 |
Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
| 134 |
- Resources: sets.NewString(authorizationapi.OpenshiftExposedGroupName, authorizationapi.KubeExposedGroupName, "secrets", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward", authorizationapi.DockerBuildResource, authorizationapi.SourceBuildResource, authorizationapi.CustomBuildResource, "deploymentconfigs/scale", "imagestreams/secrets"), |
|
| 146 |
+ Resources: sets.NewString( |
|
| 147 |
+ authorizationapi.OpenshiftExposedGroupName, |
|
| 148 |
+ authorizationapi.DockerBuildResource, |
|
| 149 |
+ authorizationapi.SourceBuildResource, |
|
| 150 |
+ authorizationapi.CustomBuildResource, |
|
| 151 |
+ "deploymentconfigs/scale", |
|
| 152 |
+ "imagestreams/secrets", |
|
| 153 |
+ ), |
|
| 135 | 154 |
}, |
| 136 | 155 |
{
|
| 137 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 156 |
+ APIGroups: []string{autoscaling.GroupName},
|
|
| 157 |
+ Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
|
| 158 |
+ Resources: sets.NewString("horizontalpodautoscalers"),
|
|
| 159 |
+ }, |
|
| 160 |
+ {
|
|
| 161 |
+ APIGroups: []string{batch.GroupName},
|
|
| 162 |
+ Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
|
| 163 |
+ Resources: sets.NewString("jobs"),
|
|
| 164 |
+ }, |
|
| 165 |
+ {
|
|
| 166 |
+ APIGroups: []string{extensions.GroupName},
|
|
| 138 | 167 |
Verbs: sets.NewString("get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"),
|
| 139 | 168 |
Resources: sets.NewString("daemonsets", "jobs", "horizontalpodautoscalers", "replicationcontrollers/scale"),
|
| 140 | 169 |
}, |
| ... | ... |
@@ -159,7 +233,17 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 159 | 159 |
Resources: sets.NewString(authorizationapi.OpenshiftExposedGroupName, authorizationapi.KubeAllGroupName, authorizationapi.OpenshiftStatusGroupName, authorizationapi.KubeStatusGroupName, "projects"), |
| 160 | 160 |
}, |
| 161 | 161 |
{
|
| 162 |
- APIGroups: []string{authorizationapi.APIGroupExtensions},
|
|
| 162 |
+ APIGroups: []string{autoscaling.GroupName},
|
|
| 163 |
+ Verbs: sets.NewString("get", "list", "watch"),
|
|
| 164 |
+ Resources: sets.NewString("horizontalpodautoscalers"),
|
|
| 165 |
+ }, |
|
| 166 |
+ {
|
|
| 167 |
+ APIGroups: []string{batch.GroupName},
|
|
| 168 |
+ Verbs: sets.NewString("get", "list", "watch"),
|
|
| 169 |
+ Resources: sets.NewString("jobs"),
|
|
| 170 |
+ }, |
|
| 171 |
+ {
|
|
| 172 |
+ APIGroups: []string{extensions.GroupName},
|
|
| 163 | 173 |
Verbs: sets.NewString("get", "list", "watch"),
|
| 164 | 174 |
Resources: sets.NewString("daemonsets", "jobs", "horizontalpodautoscalers"),
|
| 165 | 175 |
}, |
| ... | ... |
@@ -193,12 +277,17 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 193 | 193 |
{
|
| 194 | 194 |
Verbs: sets.NewString("get"),
|
| 195 | 195 |
NonResourceURLs: sets.NewString( |
| 196 |
+ // Health |
|
| 196 | 197 |
"/healthz", "/healthz/*", |
| 198 |
+ |
|
| 199 |
+ // Server version checking |
|
| 197 | 200 |
"/version", |
| 198 |
- "/api", "/api/", "/api/v1", "/api/v1/", |
|
| 199 |
- "/apis", "/apis/", "/apis/extensions", "/apis/extensions/", "/apis/extensions/v1beta1", "/apis/extensions/v1beta1/", |
|
| 201 |
+ |
|
| 202 |
+ // API discovery/negotiation |
|
| 203 |
+ "/api", "/api/*", |
|
| 204 |
+ "/apis", "/apis/*", |
|
| 205 |
+ "/oapi", "/oapi/*", |
|
| 200 | 206 |
"/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients |
| 201 |
- "/oapi/", "/oapi", "/oapi/v1", "/oapi/v1/", |
|
| 202 | 207 |
), |
| 203 | 208 |
}, |
| 204 | 209 |
}, |
| ... | ... |
@@ -396,7 +485,7 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 396 | 396 |
}, |
| 397 | 397 |
{
|
| 398 | 398 |
Verbs: sets.NewString(authorizationapi.VerbAll), |
| 399 |
- Resources: sets.NewString(authorizationapi.NodeMetricsResource, authorizationapi.NodeStatsResource, authorizationapi.NodeLogResource), |
|
| 399 |
+ Resources: sets.NewString("nodes/proxy", authorizationapi.NodeMetricsResource, authorizationapi.NodeStatsResource, authorizationapi.NodeLogResource),
|
|
| 400 | 400 |
}, |
| 401 | 401 |
}, |
| 402 | 402 |
}, |
| ... | ... |
@@ -561,6 +650,28 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
|
| 561 | 561 |
}, |
| 562 | 562 |
}, |
| 563 | 563 |
}, |
| 564 |
+ |
|
| 565 |
+ {
|
|
| 566 |
+ ObjectMeta: kapi.ObjectMeta{
|
|
| 567 |
+ Name: DiscoveryRoleName, |
|
| 568 |
+ }, |
|
| 569 |
+ Rules: []authorizationapi.PolicyRule{
|
|
| 570 |
+ {
|
|
| 571 |
+ Verbs: sets.NewString("get"),
|
|
| 572 |
+ NonResourceURLs: sets.NewString( |
|
| 573 |
+ // Server version checking |
|
| 574 |
+ "/version", |
|
| 575 |
+ |
|
| 576 |
+ // API discovery/negotiation |
|
| 577 |
+ "/api", "/api/*", |
|
| 578 |
+ "/apis", "/apis/*", |
|
| 579 |
+ "/oapi", "/oapi/*", |
|
| 580 |
+ "/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients |
|
| 581 |
+ ), |
|
| 582 |
+ }, |
|
| 583 |
+ }, |
|
| 584 |
+ }, |
|
| 585 |
+ |
|
| 564 | 586 |
{
|
| 565 | 587 |
ObjectMeta: kapi.ObjectMeta{
|
| 566 | 588 |
Name: RegistryAdminRoleName, |
| ... | ... |
@@ -796,5 +907,17 @@ func GetBootstrapClusterRoleBindings() []authorizationapi.ClusterRoleBinding {
|
| 796 | 796 |
}, |
| 797 | 797 |
Subjects: []kapi.ObjectReference{{Kind: authorizationapi.SystemGroupKind, Name: AuthenticatedGroup}, {Kind: authorizationapi.SystemGroupKind, Name: UnauthenticatedGroup}},
|
| 798 | 798 |
}, |
| 799 |
+ {
|
|
| 800 |
+ ObjectMeta: kapi.ObjectMeta{
|
|
| 801 |
+ Name: DiscoveryRoleBindingName, |
|
| 802 |
+ }, |
|
| 803 |
+ RoleRef: kapi.ObjectReference{
|
|
| 804 |
+ Name: DiscoveryRoleName, |
|
| 805 |
+ }, |
|
| 806 |
+ Subjects: []kapi.ObjectReference{
|
|
| 807 |
+ {Kind: authorizationapi.SystemGroupKind, Name: AuthenticatedGroup},
|
|
| 808 |
+ {Kind: authorizationapi.SystemGroupKind, Name: UnauthenticatedGroup},
|
|
| 809 |
+ }, |
|
| 810 |
+ }, |
|
| 799 | 811 |
} |
| 800 | 812 |
} |
| ... | ... |
@@ -16,6 +16,10 @@ import ( |
| 16 | 16 |
extv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" |
| 17 | 17 |
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" |
| 18 | 18 |
"k8s.io/kubernetes/pkg/client/record" |
| 19 |
+ "k8s.io/kubernetes/pkg/client/typed/dynamic" |
|
| 20 |
+ coreunversioned "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" |
|
| 21 |
+ extensionsunversioned "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" |
|
| 22 |
+ |
|
| 19 | 23 |
client "k8s.io/kubernetes/pkg/client/unversioned" |
| 20 | 24 |
"k8s.io/kubernetes/pkg/controller" |
| 21 | 25 |
"k8s.io/kubernetes/pkg/controller/daemon" |
| ... | ... |
@@ -81,27 +85,24 @@ func (c *MasterConfig) InstallAPI(container *restful.Container) ([]string, error |
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 | 83 |
// RunNamespaceController starts the Kubernetes Namespace Manager |
| 84 |
-func (c *MasterConfig) RunNamespaceController() {
|
|
| 85 |
- versions := []string{}
|
|
| 86 |
- for _, version := range configapi.GetEnabledAPIVersionsForGroup(c.Options, configapi.APIGroupKube) {
|
|
| 87 |
- versions = append(versions, unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: version}.String())
|
|
| 88 |
- } |
|
| 89 |
- for _, version := range configapi.GetEnabledAPIVersionsForGroup(c.Options, configapi.APIGroupExtensions) {
|
|
| 90 |
- versions = append(versions, unversioned.GroupVersion{Group: configapi.APIGroupExtensions, Version: version}.String())
|
|
| 84 |
+func (c *MasterConfig) RunNamespaceController(kubeClient internalclientset.Interface, clientPool dynamic.ClientPool) {
|
|
| 85 |
+ // Find the list of namespaced resources via discovery that the namespace controller must manage |
|
| 86 |
+ groupVersionResources, err := namespacecontroller.ServerPreferredNamespacedGroupVersionResources(kubeClient.Discovery()) |
|
| 87 |
+ if err != nil {
|
|
| 88 |
+ glog.Fatalf("Failed to get supported resources from server: %v", err)
|
|
| 91 | 89 |
} |
| 92 |
- apiVersions := &unversioned.APIVersions{Versions: versions}
|
|
| 93 |
- namespaceController := namespacecontroller.NewNamespaceController(internalclientset.FromUnversionedClient(c.KubeClient), apiVersions, c.ControllerManager.NamespaceSyncPeriod) |
|
| 90 |
+ namespaceController := namespacecontroller.NewNamespaceController(kubeClient, clientPool, groupVersionResources, c.ControllerManager.NamespaceSyncPeriod.Duration, kapi.FinalizerKubernetes) |
|
| 94 | 91 |
go namespaceController.Run(c.ControllerManager.ConcurrentNamespaceSyncs, utilwait.NeverStop) |
| 95 | 92 |
} |
| 96 | 93 |
|
| 97 | 94 |
// RunPersistentVolumeClaimBinder starts the Kubernetes Persistent Volume Claim Binder |
| 98 | 95 |
func (c *MasterConfig) RunPersistentVolumeClaimBinder(client *client.Client) {
|
| 99 |
- binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(internalclientset.FromUnversionedClient(client), c.ControllerManager.PVClaimBinderSyncPeriod) |
|
| 96 |
+ binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(internalclientset.FromUnversionedClient(client), c.ControllerManager.PVClaimBinderSyncPeriod.Duration) |
|
| 100 | 97 |
binder.Run() |
| 101 | 98 |
} |
| 102 | 99 |
|
| 103 | 100 |
func (c *MasterConfig) RunPersistentVolumeProvisioner(client *client.Client) {
|
| 104 |
- provisioner, err := kctrlmgr.NewVolumeProvisioner(c.CloudProvider, c.ControllerManager.VolumeConfigFlags) |
|
| 101 |
+ provisioner, err := kctrlmgr.NewVolumeProvisioner(c.CloudProvider, c.ControllerManager.VolumeConfiguration) |
|
| 105 | 102 |
if err != nil {
|
| 106 | 103 |
// a provisioner was expected but encountered an error |
| 107 | 104 |
glog.Fatal(err) |
| ... | ... |
@@ -114,7 +115,14 @@ func (c *MasterConfig) RunPersistentVolumeProvisioner(client *client.Client) {
|
| 114 | 114 |
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) |
| 115 | 115 |
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) |
| 116 | 116 |
controllerClient := volumeclaimbinder.NewControllerClient(internalclientset.FromUnversionedClient(client)) |
| 117 |
- provisionerController, err := volumeclaimbinder.NewPersistentVolumeProvisionerController(controllerClient, c.ControllerManager.PVClaimBinderSyncPeriod, allPlugins, provisioner, c.CloudProvider) |
|
| 117 |
+ provisionerController, err := volumeclaimbinder.NewPersistentVolumeProvisionerController( |
|
| 118 |
+ controllerClient, |
|
| 119 |
+ c.ControllerManager.PVClaimBinderSyncPeriod.Duration, |
|
| 120 |
+ c.ControllerManager.ClusterName, |
|
| 121 |
+ allPlugins, |
|
| 122 |
+ provisioner, |
|
| 123 |
+ c.CloudProvider, |
|
| 124 |
+ ) |
|
| 118 | 125 |
if err != nil {
|
| 119 | 126 |
glog.Fatalf("Could not start Persistent Volume Provisioner: %+v", err)
|
| 120 | 127 |
} |
| ... | ... |
@@ -132,27 +140,27 @@ func (c *MasterConfig) RunPersistentVolumeClaimRecycler(recyclerImageName string |
| 132 | 132 |
defaultScrubPod.Spec.Containers[0].SecurityContext = &kapi.SecurityContext{RunAsUser: &uid}
|
| 133 | 133 |
defaultScrubPod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent |
| 134 | 134 |
|
| 135 |
- volumeConfig := c.ControllerManager.VolumeConfigFlags |
|
| 135 |
+ volumeConfig := c.ControllerManager.VolumeConfiguration |
|
| 136 | 136 |
hostPathConfig := volume.VolumeConfig{
|
| 137 |
- RecyclerMinimumTimeout: volumeConfig.PersistentVolumeRecyclerMinimumTimeoutHostPath, |
|
| 138 |
- RecyclerTimeoutIncrement: volumeConfig.PersistentVolumeRecyclerIncrementTimeoutHostPath, |
|
| 137 |
+ RecyclerMinimumTimeout: volumeConfig.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, |
|
| 138 |
+ RecyclerTimeoutIncrement: volumeConfig.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, |
|
| 139 | 139 |
RecyclerPodTemplate: defaultScrubPod, |
| 140 | 140 |
} |
| 141 | 141 |
|
| 142 |
- if len(volumeConfig.PersistentVolumeRecyclerPodTemplateFilePathHostPath) != 0 {
|
|
| 143 |
- if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerPodTemplateFilePathHostPath, &hostPathConfig); err != nil {
|
|
| 144 |
- glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerPodTemplateFilePathHostPath, err)
|
|
| 142 |
+ if len(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath) != 0 {
|
|
| 143 |
+ if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil {
|
|
| 144 |
+ glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err)
|
|
| 145 | 145 |
} |
| 146 | 146 |
} |
| 147 | 147 |
nfsConfig := volume.VolumeConfig{
|
| 148 |
- RecyclerMinimumTimeout: volumeConfig.PersistentVolumeRecyclerMinimumTimeoutNFS, |
|
| 149 |
- RecyclerTimeoutIncrement: volumeConfig.PersistentVolumeRecyclerIncrementTimeoutNFS, |
|
| 148 |
+ RecyclerMinimumTimeout: volumeConfig.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, |
|
| 149 |
+ RecyclerTimeoutIncrement: volumeConfig.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, |
|
| 150 | 150 |
RecyclerPodTemplate: defaultScrubPod, |
| 151 | 151 |
} |
| 152 | 152 |
|
| 153 |
- if len(volumeConfig.PersistentVolumeRecyclerPodTemplateFilePathNFS) != 0 {
|
|
| 154 |
- if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerPodTemplateFilePathNFS, &nfsConfig); err != nil {
|
|
| 155 |
- glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerPodTemplateFilePathNFS, err)
|
|
| 153 |
+ if len(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS) != 0 {
|
|
| 154 |
+ if err := attemptToLoadRecycler(volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil {
|
|
| 155 |
+ glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", volumeConfig.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
|
|
| 156 | 156 |
} |
| 157 | 157 |
} |
| 158 | 158 |
|
| ... | ... |
@@ -165,7 +173,13 @@ func (c *MasterConfig) RunPersistentVolumeClaimRecycler(recyclerImageName string |
| 165 | 165 |
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) |
| 166 | 166 |
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) |
| 167 | 167 |
|
| 168 |
- recycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(internalclientset.FromUnversionedClient(client), c.ControllerManager.PVClaimBinderSyncPeriod, volumeConfig.PersistentVolumeRecyclerMaximumRetry, allPlugins, c.CloudProvider) |
|
| 168 |
+ recycler, err := volumeclaimbinder.NewPersistentVolumeRecycler( |
|
| 169 |
+ internalclientset.FromUnversionedClient(client), |
|
| 170 |
+ c.ControllerManager.PVClaimBinderSyncPeriod.Duration, |
|
| 171 |
+ volumeConfig.PersistentVolumeRecyclerConfiguration.MaximumRetry, |
|
| 172 |
+ allPlugins, |
|
| 173 |
+ c.CloudProvider, |
|
| 174 |
+ ) |
|
| 169 | 175 |
if err != nil {
|
| 170 | 176 |
glog.Fatalf("Could not start Persistent Volume Recycler: %+v", err)
|
| 171 | 177 |
} |
| ... | ... |
@@ -193,7 +207,12 @@ func attemptToLoadRecycler(path string, config *volume.VolumeConfig) error {
|
| 193 | 193 |
|
| 194 | 194 |
// RunReplicationController starts the Kubernetes replication controller sync loop |
| 195 | 195 |
func (c *MasterConfig) RunReplicationController(client *client.Client) {
|
| 196 |
- controllerManager := replicationcontroller.NewReplicationManager(internalclientset.FromUnversionedClient(client), kctrlmgr.ResyncPeriod(c.ControllerManager), replicationcontroller.BurstReplicas) |
|
| 196 |
+ controllerManager := replicationcontroller.NewReplicationManager( |
|
| 197 |
+ internalclientset.FromUnversionedClient(client), |
|
| 198 |
+ kctrlmgr.ResyncPeriod(c.ControllerManager), |
|
| 199 |
+ replicationcontroller.BurstReplicas, |
|
| 200 |
+ c.ControllerManager.LookupCacheSizeForRC, |
|
| 201 |
+ ) |
|
| 197 | 202 |
go controllerManager.Run(c.ControllerManager.ConcurrentRCSyncs, utilwait.NeverStop) |
| 198 | 203 |
} |
| 199 | 204 |
|
| ... | ... |
@@ -206,14 +225,24 @@ func (c *MasterConfig) RunJobController(client *client.Client) {
|
| 206 | 206 |
// RunHPAController starts the Kubernetes hpa controller sync loop |
| 207 | 207 |
func (c *MasterConfig) RunHPAController(oc *osclient.Client, kc *client.Client, heapsterNamespace string) {
|
| 208 | 208 |
clientsetClient := internalclientset.FromUnversionedClient(kc) |
| 209 |
- delegScaleNamespacer := osclient.NewDelegatingScaleNamespacer(oc, kc) |
|
| 210 |
- podautoscaler := podautoscalercontroller.NewHorizontalController(clientsetClient, delegScaleNamespacer, clientsetClient, metrics.NewHeapsterMetricsClient(clientsetClient, heapsterNamespace, "https", "heapster", "")) |
|
| 211 |
- podautoscaler.Run(c.ControllerManager.HorizontalPodAutoscalerSyncPeriod) |
|
| 209 |
+ delegatingScaleNamespacer := osclient.NewDelegatingScaleNamespacer(oc, kc) |
|
| 210 |
+ podautoscaler := podautoscalercontroller.NewHorizontalController( |
|
| 211 |
+ coreunversioned.EventsGetter(clientsetClient), |
|
| 212 |
+ extensionsunversioned.ScalesGetter(delegatingScaleNamespacer), |
|
| 213 |
+ extensionsunversioned.HorizontalPodAutoscalersGetter(clientsetClient), |
|
| 214 |
+ metrics.NewHeapsterMetricsClient(clientsetClient, heapsterNamespace, "https", "heapster", ""), |
|
| 215 |
+ c.ControllerManager.HorizontalPodAutoscalerSyncPeriod.Duration, |
|
| 216 |
+ ) |
|
| 217 |
+ go podautoscaler.Run(utilwait.NeverStop) |
|
| 212 | 218 |
} |
| 213 | 219 |
|
| 214 | 220 |
func (c *MasterConfig) RunDaemonSetsController(client *client.Client) {
|
| 215 |
- controller := daemon.NewDaemonSetsController(internalclientset.FromUnversionedClient(client), kctrlmgr.ResyncPeriod(c.ControllerManager)) |
|
| 216 |
- go controller.Run(c.ControllerManager.ConcurrentDSCSyncs, utilwait.NeverStop) |
|
| 221 |
+ controller := daemon.NewDaemonSetsController( |
|
| 222 |
+ internalclientset.FromUnversionedClient(client), |
|
| 223 |
+ kctrlmgr.ResyncPeriod(c.ControllerManager), |
|
| 224 |
+ c.ControllerManager.LookupCacheSizeForDaemonSet, |
|
| 225 |
+ ) |
|
| 226 |
+ go controller.Run(c.ControllerManager.ConcurrentDaemonSetSyncs, utilwait.NeverStop) |
|
| 217 | 227 |
} |
| 218 | 228 |
|
| 219 | 229 |
// RunEndpointController starts the Kubernetes replication controller sync loop |
| ... | ... |
@@ -247,13 +276,15 @@ func (c *MasterConfig) RunResourceQuotaManager() {
|
| 247 | 247 |
kapi.Kind("ReplicationController"),
|
| 248 | 248 |
kapi.Kind("PersistentVolumeClaim"),
|
| 249 | 249 |
kapi.Kind("Secret"),
|
| 250 |
+ kapi.Kind("ConfigMap"),
|
|
| 250 | 251 |
} |
| 251 | 252 |
resourceQuotaControllerOptions := &kresourcequota.ResourceQuotaControllerOptions{
|
| 252 |
- KubeClient: client, |
|
| 253 |
- ResyncPeriod: controller.StaticResyncPeriodFunc(c.ControllerManager.ResourceQuotaSyncPeriod), |
|
| 254 |
- Registry: resourceQuotaRegistry, |
|
| 255 |
- GroupKindsToReplenish: groupKindsToReplenish, |
|
| 256 |
- ControllerFactory: kresourcequota.NewReplenishmentControllerFactory(client), |
|
| 253 |
+ KubeClient: client, |
|
| 254 |
+ ResyncPeriod: controller.StaticResyncPeriodFunc(c.ControllerManager.ResourceQuotaSyncPeriod.Duration), |
|
| 255 |
+ Registry: resourceQuotaRegistry, |
|
| 256 |
+ GroupKindsToReplenish: groupKindsToReplenish, |
|
| 257 |
+ ControllerFactory: kresourcequota.NewReplenishmentControllerFactory(client), |
|
| 258 |
+ ReplenishmentResyncPeriod: kctrlmgr.ResyncPeriod(c.ControllerManager), |
|
| 257 | 259 |
} |
| 258 | 260 |
go kresourcequota.NewResourceQuotaController(resourceQuotaControllerOptions).Run(c.ControllerManager.ConcurrentResourceQuotaSyncs, utilwait.NeverStop) |
| 259 | 261 |
} |
| ... | ... |
@@ -261,23 +292,27 @@ func (c *MasterConfig) RunResourceQuotaManager() {
|
| 261 | 261 |
// RunNodeController starts the node controller |
| 262 | 262 |
func (c *MasterConfig) RunNodeController() {
|
| 263 | 263 |
s := c.ControllerManager |
| 264 |
+ |
|
| 265 |
+ // this cidr has been validated already |
|
| 266 |
+ _, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR) |
|
| 267 |
+ |
|
| 264 | 268 |
controller := nodecontroller.NewNodeController( |
| 265 | 269 |
c.CloudProvider, |
| 266 | 270 |
internalclientset.FromUnversionedClient(c.KubeClient), |
| 267 |
- s.PodEvictionTimeout, |
|
| 271 |
+ s.PodEvictionTimeout.Duration, |
|
| 268 | 272 |
|
| 269 | 273 |
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), |
| 270 | 274 |
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), // upstream uses the same ones too |
| 271 | 275 |
|
| 272 |
- s.NodeMonitorGracePeriod, |
|
| 273 |
- s.NodeStartupGracePeriod, |
|
| 274 |
- s.NodeMonitorPeriod, |
|
| 276 |
+ s.NodeMonitorGracePeriod.Duration, |
|
| 277 |
+ s.NodeStartupGracePeriod.Duration, |
|
| 278 |
+ s.NodeMonitorPeriod.Duration, |
|
| 275 | 279 |
|
| 276 |
- (*net.IPNet)(&s.ClusterCIDR), |
|
| 280 |
+ clusterCIDR, |
|
| 277 | 281 |
s.AllocateNodeCIDRs, |
| 278 | 282 |
) |
| 279 | 283 |
|
| 280 |
- controller.Run(s.NodeSyncPeriod) |
|
| 284 |
+ controller.Run(s.NodeSyncPeriod.Duration) |
|
| 281 | 285 |
} |
| 282 | 286 |
|
| 283 | 287 |
func (c *MasterConfig) createSchedulerConfig() (*scheduler.Config, error) {
|
| ... | ... |
@@ -113,7 +113,7 @@ func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextM |
| 113 | 113 |
} |
| 114 | 114 |
|
| 115 | 115 |
cmserver := cmapp.NewCMServer() |
| 116 |
- cmserver.PodEvictionTimeout = podEvictionTimeout |
|
| 116 |
+ cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout}
|
|
| 117 | 117 |
// resolve extended arguments |
| 118 | 118 |
// TODO: this should be done in config validation (along with the above) so we can provide |
| 119 | 119 |
// proper errors |
| ... | ... |
@@ -188,6 +188,7 @@ func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextM |
| 188 | 188 |
storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion |
| 189 | 189 |
} |
| 190 | 190 |
|
| 191 |
+ // TODO: also need to enable this if batch or autoscaling is enabled and doesn't have a storage version set |
|
| 191 | 192 |
enabledExtensionsVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions) |
| 192 | 193 |
if len(enabledExtensionsVersions) > 0 {
|
| 193 | 194 |
groupMeta, err := registered.Group(configapi.APIGroupExtensions) |
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
kapi "k8s.io/kubernetes/pkg/api" |
| 16 | 16 |
"k8s.io/kubernetes/pkg/client/record" |
| 17 | 17 |
"k8s.io/kubernetes/pkg/kubelet/cadvisor" |
| 18 |
+ cadvisortesting "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" |
|
| 18 | 19 |
"k8s.io/kubernetes/pkg/kubelet/cm" |
| 19 | 20 |
"k8s.io/kubernetes/pkg/kubelet/dockertools" |
| 20 | 21 |
proxy "k8s.io/kubernetes/pkg/proxy" |
| ... | ... |
@@ -237,7 +238,7 @@ var defaultCadvisorInterface cadvisor.Interface = nil |
| 237 | 237 |
|
| 238 | 238 |
// SetFakeCadvisorInterfaceForIntegrationTest sets a fake cadvisor implementation to allow the node to run in integration tests |
| 239 | 239 |
func SetFakeCadvisorInterfaceForIntegrationTest() {
|
| 240 |
- defaultCadvisorInterface = &cadvisor.Fake{}
|
|
| 240 |
+ defaultCadvisorInterface = &cadvisortesting.Fake{}
|
|
| 241 | 241 |
} |
| 242 | 242 |
|
| 243 | 243 |
// defaultContainerManagerInterface holds the overridden default interface |
| ... | ... |
@@ -187,8 +187,6 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error |
| 187 | 187 |
if value := cmdutil.Env("OPENSHIFT_DIND", ""); value == "true" {
|
| 188 | 188 |
glog.Warningf("Using FakeOOMAdjuster for docker-in-docker compatibility")
|
| 189 | 189 |
cfg.OOMAdjuster = oom.NewFakeOOMAdjuster() |
| 190 |
- glog.Warningf("Disabling cgroup manipulation of nested docker daemon for docker-in-docker compatibility")
|
|
| 191 |
- cfg.DockerDaemonContainer = "" |
|
| 192 | 190 |
} |
| 193 | 191 |
|
| 194 | 192 |
// Setup auth |
| ... | ... |
@@ -200,6 +200,32 @@ func (c *MasterConfig) ensureComponentAuthorizationRules() {
|
| 200 | 200 |
if err != nil {
|
| 201 | 201 |
glog.Errorf("error waiting for policy cache to initialize: %v", err)
|
| 202 | 202 |
} |
| 203 |
+ |
|
| 204 |
+ // Reconcile roles that must exist for the cluster to function |
|
| 205 |
+ // Be very judicious about what is placed in this list, since it will be enforced on every server start |
|
| 206 |
+ reconcileRoles := &policy.ReconcileClusterRolesOptions{
|
|
| 207 |
+ RolesToReconcile: []string{bootstrappolicy.DiscoveryRoleName},
|
|
| 208 |
+ Confirmed: true, |
|
| 209 |
+ Union: true, |
|
| 210 |
+ Out: ioutil.Discard, |
|
| 211 |
+ RoleClient: c.PrivilegedLoopbackOpenShiftClient.ClusterRoles(), |
|
| 212 |
+ } |
|
| 213 |
+ if err := reconcileRoles.RunReconcileClusterRoles(nil, nil); err != nil {
|
|
| 214 |
+ glog.Errorf("Could not auto reconcile roles: %v\n", err)
|
|
| 215 |
+ } |
|
| 216 |
+ |
|
| 217 |
+ // Reconcile rolebindings that must exist for the cluster to function |
|
| 218 |
+ // Be very judicious about what is placed in this list, since it will be enforced on every server start |
|
| 219 |
+ reconcileRoleBindings := &policy.ReconcileClusterRoleBindingsOptions{
|
|
| 220 |
+ RolesToReconcile: []string{bootstrappolicy.DiscoveryRoleName},
|
|
| 221 |
+ Confirmed: true, |
|
| 222 |
+ Union: true, |
|
| 223 |
+ Out: ioutil.Discard, |
|
| 224 |
+ RoleBindingClient: c.PrivilegedLoopbackOpenShiftClient.ClusterRoleBindings(), |
|
| 225 |
+ } |
|
| 226 |
+ if err := reconcileRoleBindings.RunReconcileClusterRoleBindings(nil, nil); err != nil {
|
|
| 227 |
+ glog.Errorf("Could not auto reconcile role bindings: %v\n", err)
|
|
| 228 |
+ } |
|
| 203 | 229 |
} |
| 204 | 230 |
|
| 205 | 231 |
// ensureCORSAllowedOrigins takes a string list of origins and attempts to covert them to CORS origin |
| ... | ... |
@@ -178,7 +178,7 @@ func (c *MasterConfig) Run(protected []APIInstaller, unprotected []APIInstaller) |
| 178 | 178 |
|
| 179 | 179 |
var kubeAPILevels []string |
| 180 | 180 |
if c.Options.KubernetesMasterConfig != nil {
|
| 181 |
- kubeAPILevels = configapi.GetEnabledAPIVersionsForGroup(*c.Options.KubernetesMasterConfig, configapi.APIGroupKube) |
|
| 181 |
+ kubeAPILevels = configapi.GetEnabledAPIVersionsForGroup(*c.Options.KubernetesMasterConfig, kapi.GroupName) |
|
| 182 | 182 |
} |
| 183 | 183 |
|
| 184 | 184 |
handler = indexAPIPaths(c.Options.APILevels, kubeAPILevels, handler) |
| ... | ... |
@@ -215,11 +215,12 @@ func (c *MasterConfig) Run(protected []APIInstaller, unprotected []APIInstaller) |
| 215 | 215 |
handler = contextHandler |
| 216 | 216 |
} |
| 217 | 217 |
|
| 218 |
+ longRunningRequestCheck := apiserver.BasicLongRunningRequestCheck(longRunningRE, map[string]string{"watch": "true"})
|
|
| 218 | 219 |
// TODO: MaxRequestsInFlight should be subdivided by intent, type of behavior, and speed of |
| 219 | 220 |
// execution - updates vs reads, long reads vs short reads, fat reads vs skinny reads. |
| 220 | 221 |
if c.Options.ServingInfo.MaxRequestsInFlight > 0 {
|
| 221 | 222 |
sem := make(chan bool, c.Options.ServingInfo.MaxRequestsInFlight) |
| 222 |
- handler = apiserver.MaxInFlightLimit(sem, longRunningRE, handler) |
|
| 223 |
+ handler = apiserver.MaxInFlightLimit(sem, longRunningRequestCheck, handler) |
|
| 223 | 224 |
} |
| 224 | 225 |
|
| 225 | 226 |
c.serve(handler, extra) |
| ... | ... |
@@ -558,7 +559,13 @@ func (c *MasterConfig) InstallUnprotectedAPI(container *restful.Container) ([]st |
| 558 | 558 |
|
| 559 | 559 |
// initAPIVersionRoute initializes the osapi endpoint to behave similar to the upstream api endpoint |
| 560 | 560 |
func initAPIVersionRoute(root *restful.WebService, prefix string, versions ...string) {
|
| 561 |
- versionHandler := apiserver.APIVersionHandler(kapi.Codecs, versions...) |
|
| 561 |
+ versionHandler := apiserver.APIVersionHandler(kapi.Codecs, func(req *restful.Request) *unversioned.APIVersions {
|
|
| 562 |
+ apiVersionsForDiscovery := unversioned.APIVersions{
|
|
| 563 |
+ // TODO: ServerAddressByClientCIDRs: s.getServerAddressByClientCIDRs(req.Request), |
|
| 564 |
+ Versions: versions, |
|
| 565 |
+ } |
|
| 566 |
+ return &apiVersionsForDiscovery |
|
| 567 |
+ }) |
|
| 562 | 568 |
root.Route(root.GET(prefix).To(versionHandler). |
| 563 | 569 |
Doc("list supported server API versions").
|
| 564 | 570 |
Produces(restful.MIME_JSON). |
| ... | ... |
@@ -441,7 +441,7 @@ func (c *MasterConfig) BuildConfigWebHookClient() *osclient.Client {
|
| 441 | 441 |
|
| 442 | 442 |
// BuildControllerClients returns the build controller client objects |
| 443 | 443 |
func (c *MasterConfig) BuildControllerClients() (*osclient.Client, *kclient.Client) {
|
| 444 |
- osClient, kClient, err := c.GetServiceAccountClients(bootstrappolicy.InfraBuildControllerServiceAccountName) |
|
| 444 |
+ _, osClient, kClient, err := c.GetServiceAccountClients(bootstrappolicy.InfraBuildControllerServiceAccountName) |
|
| 445 | 445 |
if err != nil {
|
| 446 | 446 |
glog.Fatal(err) |
| 447 | 447 |
} |
| ... | ... |
@@ -480,7 +480,7 @@ func (c *MasterConfig) DeploymentConfigScaleClient() *kclient.Client {
|
| 480 | 480 |
|
| 481 | 481 |
// DeploymentControllerClients returns the deployment controller client objects |
| 482 | 482 |
func (c *MasterConfig) DeploymentControllerClients() (*osclient.Client, *kclient.Client) {
|
| 483 |
- osClient, kClient, err := c.GetServiceAccountClients(bootstrappolicy.InfraDeploymentControllerServiceAccountName) |
|
| 483 |
+ _, osClient, kClient, err := c.GetServiceAccountClients(bootstrappolicy.InfraDeploymentControllerServiceAccountName) |
|
| 484 | 484 |
if err != nil {
|
| 485 | 485 |
glog.Fatal(err) |
| 486 | 486 |
} |
| ... | ... |
@@ -567,9 +567,9 @@ func NewEtcdStorage(client newetcdclient.Client, version unversioned.GroupVersio |
| 567 | 567 |
|
| 568 | 568 |
// GetServiceAccountClients returns an OpenShift and Kubernetes client with the credentials of the |
| 569 | 569 |
// named service account in the infra namespace |
| 570 |
-func (c *MasterConfig) GetServiceAccountClients(name string) (*osclient.Client, *kclient.Client, error) {
|
|
| 570 |
+func (c *MasterConfig) GetServiceAccountClients(name string) (*restclient.Config, *osclient.Client, *kclient.Client, error) {
|
|
| 571 | 571 |
if len(name) == 0 {
|
| 572 |
- return nil, nil, errors.New("No service account name specified")
|
|
| 572 |
+ return nil, nil, nil, errors.New("No service account name specified")
|
|
| 573 | 573 |
} |
| 574 | 574 |
return serviceaccounts.Clients( |
| 575 | 575 |
c.PrivilegedLoopbackClientConfig, |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
|
| 9 | 9 |
"github.com/golang/glog" |
| 10 | 10 |
|
| 11 |
+ kctrlmgr "k8s.io/kubernetes/cmd/kube-controller-manager/app" |
|
| 11 | 12 |
cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" |
| 12 | 13 |
"k8s.io/kubernetes/pkg/admission" |
| 13 | 14 |
kapi "k8s.io/kubernetes/pkg/api" |
| ... | ... |
@@ -54,6 +55,9 @@ import ( |
| 54 | 54 |
const ( |
| 55 | 55 |
defaultConcurrentResourceQuotaSyncs int = 5 |
| 56 | 56 |
defaultResourceQuotaSyncPeriod time.Duration = 5 * time.Minute |
| 57 |
+ |
|
| 58 |
+ // from CMServer MinResyncPeriod |
|
| 59 |
+ defaultReplenishmentSyncPeriod time.Duration = 12 * time.Hour |
|
| 57 | 60 |
) |
| 58 | 61 |
|
| 59 | 62 |
// RunProjectAuthorizationCache starts the project authorization cache |
| ... | ... |
@@ -440,20 +444,23 @@ func (c *MasterConfig) RunGroupCache() {
|
| 440 | 440 |
func (c *MasterConfig) RunResourceQuotaManager(cm *cmapp.CMServer) {
|
| 441 | 441 |
concurrentResourceQuotaSyncs := defaultConcurrentResourceQuotaSyncs |
| 442 | 442 |
resourceQuotaSyncPeriod := defaultResourceQuotaSyncPeriod |
| 443 |
+ replenishmentSyncPeriodFunc := controller.StaticResyncPeriodFunc(defaultReplenishmentSyncPeriod) |
|
| 443 | 444 |
if cm != nil {
|
| 444 | 445 |
// TODO: should these be part of os master config? |
| 445 | 446 |
concurrentResourceQuotaSyncs = cm.ConcurrentResourceQuotaSyncs |
| 446 |
- resourceQuotaSyncPeriod = cm.ResourceQuotaSyncPeriod |
|
| 447 |
+ resourceQuotaSyncPeriod = cm.ResourceQuotaSyncPeriod.Duration |
|
| 448 |
+ replenishmentSyncPeriodFunc = kctrlmgr.ResyncPeriod(cm) |
|
| 447 | 449 |
} |
| 448 | 450 |
|
| 449 | 451 |
osClient, kClient := c.ResourceQuotaManagerClients() |
| 450 | 452 |
resourceQuotaRegistry := quota.NewRegistry(osClient) |
| 451 | 453 |
resourceQuotaControllerOptions := &kresourcequota.ResourceQuotaControllerOptions{
|
| 452 |
- KubeClient: kClient, |
|
| 453 |
- ResyncPeriod: controller.StaticResyncPeriodFunc(resourceQuotaSyncPeriod), |
|
| 454 |
- Registry: resourceQuotaRegistry, |
|
| 455 |
- GroupKindsToReplenish: []unversioned.GroupKind{imageapi.Kind("ImageStream")},
|
|
| 456 |
- ControllerFactory: quotacontroller.NewReplenishmentControllerFactory(osClient), |
|
| 454 |
+ KubeClient: kClient, |
|
| 455 |
+ ResyncPeriod: controller.StaticResyncPeriodFunc(resourceQuotaSyncPeriod), |
|
| 456 |
+ Registry: resourceQuotaRegistry, |
|
| 457 |
+ GroupKindsToReplenish: []unversioned.GroupKind{imageapi.Kind("ImageStream")},
|
|
| 458 |
+ ControllerFactory: quotacontroller.NewReplenishmentControllerFactory(osClient), |
|
| 459 |
+ ReplenishmentResyncPeriod: replenishmentSyncPeriodFunc, |
|
| 457 | 460 |
} |
| 458 | 461 |
go kresourcequota.NewResourceQuotaController(resourceQuotaControllerOptions).Run(concurrentResourceQuotaSyncs, utilwait.NeverStop) |
| 459 | 462 |
} |
| ... | ... |
@@ -16,7 +16,10 @@ import ( |
| 16 | 16 |
"github.com/spf13/cobra" |
| 17 | 17 |
|
| 18 | 18 |
kerrors "k8s.io/kubernetes/pkg/api/errors" |
| 19 |
+ "k8s.io/kubernetes/pkg/apis/extensions" |
|
| 19 | 20 |
"k8s.io/kubernetes/pkg/capabilities" |
| 21 |
+ "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" |
|
| 22 |
+ "k8s.io/kubernetes/pkg/client/typed/dynamic" |
|
| 20 | 23 |
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" |
| 21 | 24 |
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" |
| 22 | 25 |
|
| ... | ... |
@@ -531,39 +534,46 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro |
| 531 | 531 |
oc.RunSecurityAllocationController() |
| 532 | 532 |
|
| 533 | 533 |
if kc != nil {
|
| 534 |
- _, rcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicationControllerServiceAccountName) |
|
| 534 |
+ _, _, rcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicationControllerServiceAccountName) |
|
| 535 | 535 |
if err != nil {
|
| 536 | 536 |
glog.Fatalf("Could not get client for replication controller: %v", err)
|
| 537 | 537 |
} |
| 538 |
- _, jobClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraJobControllerServiceAccountName) |
|
| 538 |
+ _, _, jobClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraJobControllerServiceAccountName) |
|
| 539 | 539 |
if err != nil {
|
| 540 | 540 |
glog.Fatalf("Could not get client for job controller: %v", err)
|
| 541 | 541 |
} |
| 542 |
- hpaOClient, hpaKClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraHPAControllerServiceAccountName) |
|
| 542 |
+ _, hpaOClient, hpaKClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraHPAControllerServiceAccountName) |
|
| 543 | 543 |
if err != nil {
|
| 544 | 544 |
glog.Fatalf("Could not get client for HPA controller: %v", err)
|
| 545 | 545 |
} |
| 546 | 546 |
|
| 547 |
- _, recyclerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeRecyclerControllerServiceAccountName) |
|
| 547 |
+ _, _, recyclerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeRecyclerControllerServiceAccountName) |
|
| 548 | 548 |
if err != nil {
|
| 549 | 549 |
glog.Fatalf("Could not get client for persistent volume recycler controller: %v", err)
|
| 550 | 550 |
} |
| 551 | 551 |
|
| 552 |
- _, binderClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeBinderControllerServiceAccountName) |
|
| 552 |
+ _, _, binderClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeBinderControllerServiceAccountName) |
|
| 553 | 553 |
if err != nil {
|
| 554 | 554 |
glog.Fatalf("Could not get client for persistent volume binder controller: %v", err)
|
| 555 | 555 |
} |
| 556 | 556 |
|
| 557 |
- _, provisionerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeProvisionerControllerServiceAccountName) |
|
| 557 |
+ _, _, provisionerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeProvisionerControllerServiceAccountName) |
|
| 558 | 558 |
if err != nil {
|
| 559 | 559 |
glog.Fatalf("Could not get client for persistent volume provisioner controller: %v", err)
|
| 560 | 560 |
} |
| 561 | 561 |
|
| 562 |
- _, daemonSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDaemonSetControllerServiceAccountName) |
|
| 562 |
+ _, _, daemonSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDaemonSetControllerServiceAccountName) |
|
| 563 | 563 |
if err != nil {
|
| 564 | 564 |
glog.Fatalf("Could not get client for daemonset controller: %v", err)
|
| 565 | 565 |
} |
| 566 | 566 |
|
| 567 |
+ namespaceControllerClientConfig, _, namespaceControllerKubeClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraNamespaceControllerServiceAccountName) |
|
| 568 |
+ if err != nil {
|
|
| 569 |
+ glog.Fatalf("Could not get client for namespace controller: %v", err)
|
|
| 570 |
+ } |
|
| 571 |
+ namespaceControllerClientSet := internalclientset.FromUnversionedClient(namespaceControllerKubeClient) |
|
| 572 |
+ namespaceControllerClientPool := dynamic.NewClientPool(namespaceControllerClientConfig, dynamic.LegacyAPIPathResolverFunc) |
|
| 573 |
+ |
|
| 567 | 574 |
// called by admission control |
| 568 | 575 |
kc.RunResourceQuotaManager() |
| 569 | 576 |
oc.RunResourceQuotaManager(kc.ControllerManager) |
| ... | ... |
@@ -572,13 +582,25 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro |
| 572 | 572 |
kc.RunNodeController() |
| 573 | 573 |
kc.RunScheduler() |
| 574 | 574 |
kc.RunReplicationController(rcClient) |
| 575 |
- if len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, configapi.APIGroupExtensions)) > 0 {
|
|
| 575 |
+ |
|
| 576 |
+ extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, extensions.GroupName)) > 0 |
|
| 577 |
+ |
|
| 578 |
+ // TODO: enable this check once the job controller can use the batch API if the extensions API is disabled |
|
| 579 |
+ // batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, batch.GroupName)) > 0 |
|
| 580 |
+ if extensionsEnabled /*|| batchEnabled*/ {
|
|
| 576 | 581 |
kc.RunJobController(jobClient) |
| 582 |
+ } |
|
| 583 |
+ // TODO: enable this check once the HPA controller can use the autoscaling API if the extensions API is disabled |
|
| 584 |
+ // autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, autoscaling.GroupName)) > 0 |
|
| 585 |
+ if extensionsEnabled /*|| autoscalingEnabled*/ {
|
|
| 577 | 586 |
kc.RunHPAController(hpaOClient, hpaKClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace) |
| 587 |
+ } |
|
| 588 |
+ if extensionsEnabled {
|
|
| 578 | 589 |
kc.RunDaemonSetsController(daemonSetClient) |
| 579 | 590 |
} |
| 591 |
+ |
|
| 580 | 592 |
kc.RunEndpointController() |
| 581 |
- kc.RunNamespaceController() |
|
| 593 |
+ kc.RunNamespaceController(namespaceControllerClientSet, namespaceControllerClientPool) |
|
| 582 | 594 |
kc.RunPersistentVolumeClaimBinder(binderClient) |
| 583 | 595 |
kc.RunPersistentVolumeProvisioner(provisionerClient) |
| 584 | 596 |
kc.RunPersistentVolumeClaimRecycler(oc.ImageFor("recycler"), recyclerClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace)
|
| ... | ... |
@@ -32,7 +32,6 @@ import ( |
| 32 | 32 |
"k8s.io/kubernetes/pkg/labels" |
| 33 | 33 |
"k8s.io/kubernetes/pkg/runtime" |
| 34 | 34 |
"k8s.io/kubernetes/pkg/util/homedir" |
| 35 |
- "k8s.io/kubernetes/pkg/util/sets" |
|
| 36 | 35 |
|
| 37 | 36 |
"github.com/openshift/origin/pkg/api/latest" |
| 38 | 37 |
authorizationapi "github.com/openshift/origin/pkg/authorization/api" |
| ... | ... |
@@ -145,8 +144,8 @@ type Factory struct {
|
| 145 | 145 |
func DefaultGenerators(cmdName string) map[string]kubectl.Generator {
|
| 146 | 146 |
generators := map[string]map[string]kubectl.Generator{}
|
| 147 | 147 |
generators["run"] = map[string]kubectl.Generator{
|
| 148 |
- "run/v1": deploygen.BasicDeploymentConfigController{},
|
|
| 149 |
- "run-controller/v1": kubectl.BasicReplicationController{},
|
|
| 148 |
+ cmdutil.RunV1GeneratorName: deploygen.BasicDeploymentConfigController{},
|
|
| 149 |
+ "run-controller/v1": kubectl.BasicReplicationController{},
|
|
| 150 | 150 |
} |
| 151 | 151 |
generators["expose"] = map[string]kubectl.Generator{
|
| 152 | 152 |
"route/v1": routegen.RouteGenerator{},
|
| ... | ... |
@@ -157,20 +156,7 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator {
|
| 157 | 157 |
|
| 158 | 158 |
// NewFactory creates an object that holds common methods across all OpenShift commands |
| 159 | 159 |
func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
|
| 160 |
- var restMapper meta.MultiRESTMapper |
|
| 161 |
- seenGroups := sets.String{}
|
|
| 162 |
- for _, gv := range registered.EnabledVersions() {
|
|
| 163 |
- if seenGroups.Has(gv.Group) {
|
|
| 164 |
- continue |
|
| 165 |
- } |
|
| 166 |
- seenGroups.Insert(gv.Group) |
|
| 167 |
- |
|
| 168 |
- groupMeta, err := registered.Group(gv.Group) |
|
| 169 |
- if err != nil {
|
|
| 170 |
- continue |
|
| 171 |
- } |
|
| 172 |
- restMapper = meta.MultiRESTMapper(append(restMapper, groupMeta.RESTMapper)) |
|
| 173 |
- } |
|
| 160 |
+ restMapper := registered.RESTMapper() |
|
| 174 | 161 |
|
| 175 | 162 |
clients := &clientCache{
|
| 176 | 163 |
clients: make(map[string]*client.Client), |
| ... | ... |
@@ -304,6 +290,10 @@ func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
|
| 304 | 304 |
for k, v := range kubeGenerators {
|
| 305 | 305 |
ret[k] = v |
| 306 | 306 |
} |
| 307 |
+ // TODO: enable once deployments are supported in origin |
|
| 308 |
+ if cmdName == "run" {
|
|
| 309 |
+ delete(ret, cmdutil.DeploymentV1Beta1GeneratorName) |
|
| 310 |
+ } |
|
| 307 | 311 |
for k, v := range originGenerators {
|
| 308 | 312 |
ret[k] = v |
| 309 | 313 |
} |
| ... | ... |
@@ -3,15 +3,33 @@ package clientcmd |
| 3 | 3 |
import ( |
| 4 | 4 |
"net/http" |
| 5 | 5 |
"net/http/httptest" |
| 6 |
+ "reflect" |
|
| 6 | 7 |
"testing" |
| 7 | 8 |
|
| 8 | 9 |
"k8s.io/kubernetes/pkg/client/restclient" |
| 10 |
+ "k8s.io/kubernetes/pkg/util/sets" |
|
| 9 | 11 |
|
| 10 | 12 |
"github.com/openshift/origin/pkg/api/v1" |
| 11 | 13 |
"github.com/openshift/origin/pkg/api/v1beta3" |
| 12 | 14 |
"github.com/openshift/origin/pkg/client" |
| 13 | 15 |
) |
| 14 | 16 |
|
| 17 |
+// TestRunGenerators makes sure we catch new generators added to `oc run` |
|
| 18 |
+func TestRunGenerators(t *testing.T) {
|
|
| 19 |
+ f := NewFactory(nil) |
|
| 20 |
+ |
|
| 21 |
+ // Contains the run generators we expect to see |
|
| 22 |
+ // If new generators appear from upstream, make sure we support the underlying types |
|
| 23 |
+ // If we do (like Job, Pod, etc), add them to the expected list here |
|
| 24 |
+ // If we do not support in oc yet (like upstream Deployments), remove them in our factory's Generators function in factory.go |
|
| 25 |
+ expectedRunGenerators := sets.NewString("job/v1", "job/v1beta1", "run-controller/v1", "run-pod/v1", "run/v1").List()
|
|
| 26 |
+ |
|
| 27 |
+ runGenerators := sets.StringKeySet(f.Generators("run")).List()
|
|
| 28 |
+ if !reflect.DeepEqual(expectedRunGenerators, runGenerators) {
|
|
| 29 |
+ t.Errorf("Expected run generators:%#v, got:\n%#v", expectedRunGenerators, runGenerators)
|
|
| 30 |
+ } |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 15 | 33 |
func TestClientConfigForVersion(t *testing.T) {
|
| 16 | 34 |
called := 0 |
| 17 | 35 |
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
| ... | ... |
@@ -57,7 +57,7 @@ func ResolveResource(defaultResource, resourceString string, mapper meta.RESTMap |
| 57 | 57 |
case 1: |
| 58 | 58 |
name = parts[0] |
| 59 | 59 |
case 2: |
| 60 |
- partialResource := unversioned.GroupVersionResource{Resource: parts[0]}
|
|
| 60 |
+ partialResource := unversioned.GroupVersionResource{Resource: strings.ToLower(parts[0])}
|
|
| 61 | 61 |
gvrs, err := mapper.ResourcesFor(partialResource) |
| 62 | 62 |
if err != nil {
|
| 63 | 63 |
return "", "", err |
| ... | ... |
@@ -73,7 +73,7 @@ func (b *Bulk) Create(list *kapi.List, namespace string) []error {
|
| 73 | 73 |
|
| 74 | 74 |
errs := []error{}
|
| 75 | 75 |
for i, item := range list.Items {
|
| 76 |
- info, err := resourceMapper.InfoForObject(item) |
|
| 76 |
+ info, err := resourceMapper.InfoForObject(item, nil) |
|
| 77 | 77 |
if err != nil {
|
| 78 | 78 |
errs = append(errs, err) |
| 79 | 79 |
if after(info, err) {
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
kapi "k8s.io/kubernetes/pkg/api" |
| 7 | 7 |
"k8s.io/kubernetes/pkg/api/errors" |
| 8 | 8 |
"k8s.io/kubernetes/pkg/api/rest" |
| 9 |
+ "k8s.io/kubernetes/pkg/api/unversioned" |
|
| 9 | 10 |
"k8s.io/kubernetes/pkg/apis/extensions" |
| 10 | 11 |
kclient "k8s.io/kubernetes/pkg/client/unversioned" |
| 11 | 12 |
"k8s.io/kubernetes/pkg/fields" |
| ... | ... |
@@ -102,7 +103,7 @@ func (r *ScaleREST) Get(ctx kapi.Context, name string) (runtime.Object, error) {
|
| 102 | 102 |
}, |
| 103 | 103 |
Status: extensions.ScaleStatus{
|
| 104 | 104 |
Replicas: totalReplicas, |
| 105 |
- Selector: deploymentConfig.Spec.Selector, |
|
| 105 |
+ Selector: &unversioned.LabelSelector{MatchLabels: deploymentConfig.Spec.Selector},
|
|
| 106 | 106 |
}, |
| 107 | 107 |
}, nil |
| 108 | 108 |
} |
| ... | ... |
@@ -136,7 +137,7 @@ func (r *ScaleREST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object |
| 136 | 136 |
Replicas: scale.Spec.Replicas, |
| 137 | 137 |
}, |
| 138 | 138 |
Status: extensions.ScaleStatus{
|
| 139 |
- Selector: deploymentConfig.Spec.Selector, |
|
| 139 |
+ Selector: &unversioned.LabelSelector{MatchLabels: deploymentConfig.Spec.Selector},
|
|
| 140 | 140 |
}, |
| 141 | 141 |
} |
| 142 | 142 |
|
| ... | ... |
@@ -9,6 +9,7 @@ import ( |
| 9 | 9 |
kapi "k8s.io/kubernetes/pkg/api" |
| 10 | 10 |
kapierrors "k8s.io/kubernetes/pkg/api/errors" |
| 11 | 11 |
"k8s.io/kubernetes/pkg/api/unversioned" |
| 12 |
+ "k8s.io/kubernetes/pkg/apis/batch" |
|
| 12 | 13 |
"k8s.io/kubernetes/pkg/apis/extensions" |
| 13 | 14 |
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" |
| 14 | 15 |
"k8s.io/kubernetes/pkg/util/sets" |
| ... | ... |
@@ -59,6 +60,7 @@ var resourcesToCheck = map[unversioned.GroupResource]unversioned.GroupKind{
|
| 59 | 59 |
kapi.Resource("pods"): kapi.Kind("Pod"),
|
| 60 | 60 |
kapi.Resource("podtemplates"): kapi.Kind("PodTemplate"),
|
| 61 | 61 |
kapi.Resource("replicationcontrollers"): kapi.Kind("ReplicationController"),
|
| 62 |
+ batch.Resource("jobs"): batch.Kind("Job"),
|
|
| 62 | 63 |
extensions.Resource("deployments"): extensions.Kind("Deployment"),
|
| 63 | 64 |
extensions.Resource("replicasets"): extensions.Kind("ReplicaSet"),
|
| 64 | 65 |
extensions.Resource("jobs"): extensions.Kind("Job"),
|
| ... | ... |
@@ -17,6 +17,7 @@ import ( |
| 17 | 17 |
kapi "k8s.io/kubernetes/pkg/api" |
| 18 | 18 |
"k8s.io/kubernetes/pkg/api/unversioned" |
| 19 | 19 |
"k8s.io/kubernetes/pkg/apimachinery/registered" |
| 20 |
+ "k8s.io/kubernetes/pkg/apis/batch" |
|
| 20 | 21 |
"k8s.io/kubernetes/pkg/apis/extensions" |
| 21 | 22 |
"k8s.io/kubernetes/pkg/auth/user" |
| 22 | 23 |
"k8s.io/kubernetes/pkg/runtime" |
| ... | ... |
@@ -195,6 +196,12 @@ func TestPodNodeConstraintsResources(t *testing.T) {
|
| 195 | 195 |
prefix: "Job", |
| 196 | 196 |
}, |
| 197 | 197 |
{
|
| 198 |
+ resource: job, |
|
| 199 |
+ kind: batch.Kind("Job"),
|
|
| 200 |
+ groupresource: batch.Resource("jobs"),
|
|
| 201 |
+ prefix: "Job", |
|
| 202 |
+ }, |
|
| 203 |
+ {
|
|
| 198 | 204 |
resource: deploymentConfig, |
| 199 | 205 |
kind: deployapi.Kind("DeploymentConfig"),
|
| 200 | 206 |
groupresource: deployapi.Resource("deploymentconfigs"),
|
| ... | ... |
@@ -334,7 +341,7 @@ func deployment(setNodeSelector bool) runtime.Object {
|
| 334 | 334 |
|
| 335 | 335 |
func replicaSet(setNodeSelector bool) runtime.Object {
|
| 336 | 336 |
rs := &extensions.ReplicaSet{}
|
| 337 |
- rs.Spec.Template = podTemplateSpec(setNodeSelector) |
|
| 337 |
+ rs.Spec.Template = *podTemplateSpec(setNodeSelector) |
|
| 338 | 338 |
return rs |
| 339 | 339 |
} |
| 340 | 340 |
|
| ... | ... |
@@ -55,7 +55,7 @@ func (s *ClientLookupTokenRetriever) GetToken(namespace, name string) (string, e |
| 55 | 55 |
|
| 56 | 56 |
// Clients returns an OpenShift and Kubernetes client with the credentials of the named service account |
| 57 | 57 |
// TODO: change return types to client.Interface/kclient.Interface to allow auto-reloading credentials |
| 58 |
-func Clients(config restclient.Config, tokenRetriever TokenRetriever, namespace, name string) (*client.Client, *kclient.Client, error) {
|
|
| 58 |
+func Clients(config restclient.Config, tokenRetriever TokenRetriever, namespace, name string) (*restclient.Config, *client.Client, *kclient.Client, error) {
|
|
| 59 | 59 |
// Clear existing auth info |
| 60 | 60 |
config.Username = "" |
| 61 | 61 |
config.Password = "" |
| ... | ... |
@@ -63,26 +63,32 @@ func Clients(config restclient.Config, tokenRetriever TokenRetriever, namespace, |
| 63 | 63 |
config.CertData = []byte{}
|
| 64 | 64 |
config.KeyFile = "" |
| 65 | 65 |
config.KeyData = []byte{}
|
| 66 |
+ config.BearerToken = "" |
|
| 67 |
+ |
|
| 68 |
+ if len(config.UserAgent) > 0 {
|
|
| 69 |
+ config.UserAgent += " " |
|
| 70 |
+ } |
|
| 71 |
+ config.UserAgent += fmt.Sprintf("system:serviceaccount:%s:%s", namespace, name)
|
|
| 66 | 72 |
|
| 67 | 73 |
// For now, just initialize the token once |
| 68 | 74 |
// TODO: refetch the token if the client encounters 401 errors |
| 69 | 75 |
token, err := tokenRetriever.GetToken(namespace, name) |
| 70 | 76 |
if err != nil {
|
| 71 |
- return nil, nil, err |
|
| 77 |
+ return nil, nil, nil, err |
|
| 72 | 78 |
} |
| 73 | 79 |
config.BearerToken = token |
| 74 | 80 |
|
| 75 | 81 |
c, err := client.New(&config) |
| 76 | 82 |
if err != nil {
|
| 77 |
- return nil, nil, err |
|
| 83 |
+ return nil, nil, nil, err |
|
| 78 | 84 |
} |
| 79 | 85 |
|
| 80 | 86 |
kc, err := kclient.New(&config) |
| 81 | 87 |
if err != nil {
|
| 82 |
- return nil, nil, err |
|
| 88 |
+ return nil, nil, nil, err |
|
| 83 | 89 |
} |
| 84 | 90 |
|
| 85 |
- return c, kc, nil |
|
| 91 |
+ return &config, c, kc, nil |
|
| 86 | 92 |
} |
| 87 | 93 |
|
| 88 | 94 |
// IsValidServiceAccountToken returns true if the given secret contains a service account token valid for the given service account |
| ... | ... |
@@ -199,6 +199,13 @@ os::cmd::expect_success_and_not_text 'oc get clusterrolebindings/basic-users -o |
| 199 | 199 |
# Ensure --additive-only=false removes customized users from the binding |
| 200 | 200 |
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings --additive-only=false --confirm' |
| 201 | 201 |
os::cmd::expect_success_and_not_text 'oc get clusterrolebindings/basic-users -o json' 'custom-user' |
| 202 |
+# check the reconcile again with a specific cluster role name |
|
| 203 |
+os::cmd::expect_success 'oc delete clusterrolebinding/basic-users' |
|
| 204 |
+os::cmd::expect_failure 'oc get clusterrolebinding/basic-users' |
|
| 205 |
+os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings cluster-admin --confirm' |
|
| 206 |
+os::cmd::expect_failure 'oc get clusterrolebinding/basic-users' |
|
| 207 |
+os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings basic-user --confirm' |
|
| 208 |
+os::cmd::expect_success 'oc get clusterrolebinding/basic-users' |
|
| 202 | 209 |
echo "admin-reconcile-cluster-role-bindings: ok" |
| 203 | 210 |
|
| 204 | 211 |
os::cmd::expect_success "oc create -f test/extended/fixtures/roles/policy-roles.yaml" |
| ... | ... |
@@ -232,6 +232,16 @@ os::cmd::expect_success 'oc run --image=openshift/hello-openshift --restart=Neve |
| 232 | 232 |
os::cmd::expect_success 'oc run --image=openshift/hello-openshift --generator=job/v1beta1 --restart=Never test4' |
| 233 | 233 |
os::cmd::expect_success 'oc delete dc/test rc/test2 pod/test3 job/test4' |
| 234 | 234 |
|
| 235 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}"' 'DeploymentConfig v1'
|
|
| 236 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --restart=Always' 'DeploymentConfig v1'
|
|
| 237 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --restart=Never' 'Pod v1'
|
|
| 238 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --generator=job/v1beta1' 'Job extensions/v1beta1'
|
|
| 239 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --generator=job/v1' 'Job batch/v1'
|
|
| 240 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --generator=run/v1' 'DeploymentConfig v1'
|
|
| 241 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --generator=run-controller/v1' 'ReplicationController v1'
|
|
| 242 |
+os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --generator=run-pod/v1' 'Pod v1'
|
|
| 243 |
+os::cmd::expect_failure_and_text 'oc run --dry-run foo --image=bar -o "go-template={{.kind}} {{.apiVersion}}" --generator=deployment/v1beta1' 'not found'
|
|
| 244 |
+ |
|
| 235 | 245 |
os::cmd::expect_success 'oc process -f examples/sample-app/application-template-stibuild.json -l name=mytemplate | oc create -f -' |
| 236 | 246 |
os::cmd::expect_success 'oc delete all -l name=mytemplate' |
| 237 | 247 |
os::cmd::expect_success 'oc new-app https://github.com/openshift/ruby-hello-world' |
| ... | ... |
@@ -194,5 +194,21 @@ items: |
| 194 | 194 |
- kind: SystemGroup |
| 195 | 195 |
name: system:unauthenticated |
| 196 | 196 |
userNames: null |
| 197 |
+- apiVersion: v1 |
|
| 198 |
+ groupNames: |
|
| 199 |
+ - system:authenticated |
|
| 200 |
+ - system:unauthenticated |
|
| 201 |
+ kind: ClusterRoleBinding |
|
| 202 |
+ metadata: |
|
| 203 |
+ creationTimestamp: null |
|
| 204 |
+ name: system:discovery-binding |
|
| 205 |
+ roleRef: |
|
| 206 |
+ name: system:discovery |
|
| 207 |
+ subjects: |
|
| 208 |
+ - kind: SystemGroup |
|
| 209 |
+ name: system:authenticated |
|
| 210 |
+ - kind: SystemGroup |
|
| 211 |
+ name: system:unauthenticated |
|
| 212 |
+ userNames: null |
|
| 197 | 213 |
kind: List |
| 198 | 214 |
metadata: {}
|
| ... | ... |
@@ -102,6 +102,24 @@ items: |
| 102 | 102 |
- list |
| 103 | 103 |
- watch |
| 104 | 104 |
- apiGroups: |
| 105 |
+ - autoscaling |
|
| 106 |
+ attributeRestrictions: null |
|
| 107 |
+ resources: |
|
| 108 |
+ - horizontalpodautoscalers |
|
| 109 |
+ verbs: |
|
| 110 |
+ - get |
|
| 111 |
+ - list |
|
| 112 |
+ - watch |
|
| 113 |
+ - apiGroups: |
|
| 114 |
+ - batch |
|
| 115 |
+ attributeRestrictions: null |
|
| 116 |
+ resources: |
|
| 117 |
+ - jobs |
|
| 118 |
+ verbs: |
|
| 119 |
+ - get |
|
| 120 |
+ - list |
|
| 121 |
+ - watch |
|
| 122 |
+ - apiGroups: |
|
| 105 | 123 |
- extensions |
| 106 | 124 |
attributeRestrictions: null |
| 107 | 125 |
resources: |
| ... | ... |
@@ -146,7 +164,36 @@ items: |
| 146 | 146 |
creationTimestamp: null |
| 147 | 147 |
name: admin |
| 148 | 148 |
rules: |
| 149 |
- - apiGroups: null |
|
| 149 |
+ - apiGroups: |
|
| 150 |
+ - "" |
|
| 151 |
+ attributeRestrictions: null |
|
| 152 |
+ resources: |
|
| 153 |
+ - configmaps |
|
| 154 |
+ - endpoints |
|
| 155 |
+ - persistentvolumeclaims |
|
| 156 |
+ - pods |
|
| 157 |
+ - pods/attach |
|
| 158 |
+ - pods/exec |
|
| 159 |
+ - pods/log |
|
| 160 |
+ - pods/portforward |
|
| 161 |
+ - pods/proxy |
|
| 162 |
+ - replicationcontrollers |
|
| 163 |
+ - replicationcontrollers/scale |
|
| 164 |
+ - secrets |
|
| 165 |
+ - serviceaccounts |
|
| 166 |
+ - services |
|
| 167 |
+ - services/proxy |
|
| 168 |
+ verbs: |
|
| 169 |
+ - create |
|
| 170 |
+ - delete |
|
| 171 |
+ - deletecollection |
|
| 172 |
+ - get |
|
| 173 |
+ - list |
|
| 174 |
+ - patch |
|
| 175 |
+ - update |
|
| 176 |
+ - watch |
|
| 177 |
+ - apiGroups: |
|
| 178 |
+ - "" |
|
| 150 | 179 |
attributeRestrictions: null |
| 151 | 180 |
resources: |
| 152 | 181 |
- buildconfigs |
| ... | ... |
@@ -160,13 +207,11 @@ items: |
| 160 | 160 |
- builds/docker |
| 161 | 161 |
- builds/log |
| 162 | 162 |
- builds/source |
| 163 |
- - configmaps |
|
| 164 | 163 |
- deploymentconfigrollbacks |
| 165 | 164 |
- deploymentconfigs |
| 166 | 165 |
- deploymentconfigs/log |
| 167 | 166 |
- deploymentconfigs/scale |
| 168 | 167 |
- deployments |
| 169 |
- - endpoints |
|
| 170 | 168 |
- generatedeploymentconfigs |
| 171 | 169 |
- imagestreamimages |
| 172 | 170 |
- imagestreamimports |
| ... | ... |
@@ -176,23 +221,12 @@ items: |
| 176 | 176 |
- imagestreamtags |
| 177 | 177 |
- localresourceaccessreviews |
| 178 | 178 |
- localsubjectaccessreviews |
| 179 |
- - persistentvolumeclaims |
|
| 180 |
- - pods |
|
| 181 |
- - pods/attach |
|
| 182 |
- - pods/exec |
|
| 183 |
- - pods/log |
|
| 184 |
- - pods/portforward |
|
| 185 |
- - pods/proxy |
|
| 186 | 179 |
- processedtemplates |
| 187 | 180 |
- projects |
| 188 |
- - replicationcontrollers |
|
| 189 | 181 |
- resourceaccessreviews |
| 190 | 182 |
- rolebindings |
| 191 | 183 |
- roles |
| 192 | 184 |
- routes |
| 193 |
- - secrets |
|
| 194 |
- - serviceaccounts |
|
| 195 |
- - services |
|
| 196 | 185 |
- subjectaccessreviews |
| 197 | 186 |
- templateconfigs |
| 198 | 187 |
- templates |
| ... | ... |
@@ -206,6 +240,34 @@ items: |
| 206 | 206 |
- update |
| 207 | 207 |
- watch |
| 208 | 208 |
- apiGroups: |
| 209 |
+ - autoscaling |
|
| 210 |
+ attributeRestrictions: null |
|
| 211 |
+ resources: |
|
| 212 |
+ - horizontalpodautoscalers |
|
| 213 |
+ verbs: |
|
| 214 |
+ - create |
|
| 215 |
+ - delete |
|
| 216 |
+ - deletecollection |
|
| 217 |
+ - get |
|
| 218 |
+ - list |
|
| 219 |
+ - patch |
|
| 220 |
+ - update |
|
| 221 |
+ - watch |
|
| 222 |
+ - apiGroups: |
|
| 223 |
+ - batch |
|
| 224 |
+ attributeRestrictions: null |
|
| 225 |
+ resources: |
|
| 226 |
+ - jobs |
|
| 227 |
+ verbs: |
|
| 228 |
+ - create |
|
| 229 |
+ - delete |
|
| 230 |
+ - deletecollection |
|
| 231 |
+ - get |
|
| 232 |
+ - list |
|
| 233 |
+ - patch |
|
| 234 |
+ - update |
|
| 235 |
+ - watch |
|
| 236 |
+ - apiGroups: |
|
| 209 | 237 |
- extensions |
| 210 | 238 |
attributeRestrictions: null |
| 211 | 239 |
resources: |
| ... | ... |
@@ -274,7 +336,36 @@ items: |
| 274 | 274 |
creationTimestamp: null |
| 275 | 275 |
name: edit |
| 276 | 276 |
rules: |
| 277 |
- - apiGroups: null |
|
| 277 |
+ - apiGroups: |
|
| 278 |
+ - "" |
|
| 279 |
+ attributeRestrictions: null |
|
| 280 |
+ resources: |
|
| 281 |
+ - configmaps |
|
| 282 |
+ - endpoints |
|
| 283 |
+ - persistentvolumeclaims |
|
| 284 |
+ - pods |
|
| 285 |
+ - pods/attach |
|
| 286 |
+ - pods/exec |
|
| 287 |
+ - pods/log |
|
| 288 |
+ - pods/portforward |
|
| 289 |
+ - pods/proxy |
|
| 290 |
+ - replicationcontrollers |
|
| 291 |
+ - replicationcontrollers/scale |
|
| 292 |
+ - secrets |
|
| 293 |
+ - serviceaccounts |
|
| 294 |
+ - services |
|
| 295 |
+ - services/proxy |
|
| 296 |
+ verbs: |
|
| 297 |
+ - create |
|
| 298 |
+ - delete |
|
| 299 |
+ - deletecollection |
|
| 300 |
+ - get |
|
| 301 |
+ - list |
|
| 302 |
+ - patch |
|
| 303 |
+ - update |
|
| 304 |
+ - watch |
|
| 305 |
+ - apiGroups: |
|
| 306 |
+ - "" |
|
| 278 | 307 |
attributeRestrictions: null |
| 279 | 308 |
resources: |
| 280 | 309 |
- buildconfigs |
| ... | ... |
@@ -288,13 +379,11 @@ items: |
| 288 | 288 |
- builds/docker |
| 289 | 289 |
- builds/log |
| 290 | 290 |
- builds/source |
| 291 |
- - configmaps |
|
| 292 | 291 |
- deploymentconfigrollbacks |
| 293 | 292 |
- deploymentconfigs |
| 294 | 293 |
- deploymentconfigs/log |
| 295 | 294 |
- deploymentconfigs/scale |
| 296 | 295 |
- deployments |
| 297 |
- - endpoints |
|
| 298 | 296 |
- generatedeploymentconfigs |
| 299 | 297 |
- imagestreamimages |
| 300 | 298 |
- imagestreamimports |
| ... | ... |
@@ -302,19 +391,8 @@ items: |
| 302 | 302 |
- imagestreams |
| 303 | 303 |
- imagestreams/secrets |
| 304 | 304 |
- imagestreamtags |
| 305 |
- - persistentvolumeclaims |
|
| 306 |
- - pods |
|
| 307 |
- - pods/attach |
|
| 308 |
- - pods/exec |
|
| 309 |
- - pods/log |
|
| 310 |
- - pods/portforward |
|
| 311 |
- - pods/proxy |
|
| 312 | 305 |
- processedtemplates |
| 313 |
- - replicationcontrollers |
|
| 314 | 306 |
- routes |
| 315 |
- - secrets |
|
| 316 |
- - serviceaccounts |
|
| 317 |
- - services |
|
| 318 | 307 |
- templateconfigs |
| 319 | 308 |
- templates |
| 320 | 309 |
verbs: |
| ... | ... |
@@ -327,6 +405,34 @@ items: |
| 327 | 327 |
- update |
| 328 | 328 |
- watch |
| 329 | 329 |
- apiGroups: |
| 330 |
+ - autoscaling |
|
| 331 |
+ attributeRestrictions: null |
|
| 332 |
+ resources: |
|
| 333 |
+ - horizontalpodautoscalers |
|
| 334 |
+ verbs: |
|
| 335 |
+ - create |
|
| 336 |
+ - delete |
|
| 337 |
+ - deletecollection |
|
| 338 |
+ - get |
|
| 339 |
+ - list |
|
| 340 |
+ - patch |
|
| 341 |
+ - update |
|
| 342 |
+ - watch |
|
| 343 |
+ - apiGroups: |
|
| 344 |
+ - batch |
|
| 345 |
+ attributeRestrictions: null |
|
| 346 |
+ resources: |
|
| 347 |
+ - jobs |
|
| 348 |
+ verbs: |
|
| 349 |
+ - create |
|
| 350 |
+ - delete |
|
| 351 |
+ - deletecollection |
|
| 352 |
+ - get |
|
| 353 |
+ - list |
|
| 354 |
+ - patch |
|
| 355 |
+ - update |
|
| 356 |
+ - watch |
|
| 357 |
+ - apiGroups: |
|
| 330 | 358 |
- extensions |
| 331 | 359 |
attributeRestrictions: null |
| 332 | 360 |
resources: |
| ... | ... |
@@ -444,6 +550,24 @@ items: |
| 444 | 444 |
- list |
| 445 | 445 |
- watch |
| 446 | 446 |
- apiGroups: |
| 447 |
+ - autoscaling |
|
| 448 |
+ attributeRestrictions: null |
|
| 449 |
+ resources: |
|
| 450 |
+ - horizontalpodautoscalers |
|
| 451 |
+ verbs: |
|
| 452 |
+ - get |
|
| 453 |
+ - list |
|
| 454 |
+ - watch |
|
| 455 |
+ - apiGroups: |
|
| 456 |
+ - batch |
|
| 457 |
+ attributeRestrictions: null |
|
| 458 |
+ resources: |
|
| 459 |
+ - jobs |
|
| 460 |
+ verbs: |
|
| 461 |
+ - get |
|
| 462 |
+ - list |
|
| 463 |
+ - watch |
|
| 464 |
+ - apiGroups: |
|
| 447 | 465 |
- extensions |
| 448 | 466 |
attributeRestrictions: null |
| 449 | 467 |
resources: |
| ... | ... |
@@ -518,21 +642,13 @@ items: |
| 518 | 518 |
attributeRestrictions: null |
| 519 | 519 |
nonResourceURLs: |
| 520 | 520 |
- /api |
| 521 |
- - /api/ |
|
| 522 |
- - /api/v1 |
|
| 523 |
- - /api/v1/ |
|
| 521 |
+ - /api/* |
|
| 524 | 522 |
- /apis |
| 525 |
- - /apis/ |
|
| 526 |
- - /apis/extensions |
|
| 527 |
- - /apis/extensions/ |
|
| 528 |
- - /apis/extensions/v1beta1 |
|
| 529 |
- - /apis/extensions/v1beta1/ |
|
| 523 |
+ - /apis/* |
|
| 530 | 524 |
- /healthz |
| 531 | 525 |
- /healthz/* |
| 532 | 526 |
- /oapi |
| 533 |
- - /oapi/ |
|
| 534 |
- - /oapi/v1 |
|
| 535 |
- - /oapi/v1/ |
|
| 527 |
+ - /oapi/* |
|
| 536 | 528 |
- /osapi |
| 537 | 529 |
- /osapi/ |
| 538 | 530 |
- /version |
| ... | ... |
@@ -780,6 +896,7 @@ items: |
| 780 | 780 |
resources: |
| 781 | 781 |
- nodes/log |
| 782 | 782 |
- nodes/metrics |
| 783 |
+ - nodes/proxy |
|
| 783 | 784 |
- nodes/stats |
| 784 | 785 |
verbs: |
| 785 | 786 |
- '*' |
| ... | ... |
@@ -998,6 +1115,27 @@ items: |
| 998 | 998 |
kind: ClusterRole |
| 999 | 999 |
metadata: |
| 1000 | 1000 |
creationTimestamp: null |
| 1001 |
+ name: system:discovery |
|
| 1002 |
+ rules: |
|
| 1003 |
+ - apiGroups: null |
|
| 1004 |
+ attributeRestrictions: null |
|
| 1005 |
+ nonResourceURLs: |
|
| 1006 |
+ - /api |
|
| 1007 |
+ - /api/* |
|
| 1008 |
+ - /apis |
|
| 1009 |
+ - /apis/* |
|
| 1010 |
+ - /oapi |
|
| 1011 |
+ - /oapi/* |
|
| 1012 |
+ - /osapi |
|
| 1013 |
+ - /osapi/ |
|
| 1014 |
+ - /version |
|
| 1015 |
+ resources: [] |
|
| 1016 |
+ verbs: |
|
| 1017 |
+ - get |
|
| 1018 |
+- apiVersion: v1 |
|
| 1019 |
+ kind: ClusterRole |
|
| 1020 |
+ metadata: |
|
| 1021 |
+ creationTimestamp: null |
|
| 1001 | 1022 |
name: registry-admin |
| 1002 | 1023 |
rules: |
| 1003 | 1024 |
- apiGroups: null |
| ... | ... |
@@ -1277,14 +1415,17 @@ items: |
| 1277 | 1277 |
rules: |
| 1278 | 1278 |
- apiGroups: |
| 1279 | 1279 |
- extensions |
| 1280 |
+ - autoscaling |
|
| 1280 | 1281 |
attributeRestrictions: null |
| 1281 | 1282 |
resources: |
| 1282 | 1283 |
- horizontalpodautoscalers |
| 1283 | 1284 |
verbs: |
| 1284 | 1285 |
- get |
| 1285 | 1286 |
- list |
| 1287 |
+ - watch |
|
| 1286 | 1288 |
- apiGroups: |
| 1287 | 1289 |
- extensions |
| 1290 |
+ - autoscaling |
|
| 1288 | 1291 |
attributeRestrictions: null |
| 1289 | 1292 |
resources: |
| 1290 | 1293 |
- horizontalpodautoscalers/status |
| ... | ... |
@@ -1292,6 +1433,7 @@ items: |
| 1292 | 1292 |
- update |
| 1293 | 1293 |
- apiGroups: |
| 1294 | 1294 |
- extensions |
| 1295 |
+ - "" |
|
| 1295 | 1296 |
attributeRestrictions: null |
| 1296 | 1297 |
resources: |
| 1297 | 1298 |
- replicationcontrollers/scale |
| ... | ... |
@@ -1335,6 +1477,7 @@ items: |
| 1335 | 1335 |
rules: |
| 1336 | 1336 |
- apiGroups: |
| 1337 | 1337 |
- extensions |
| 1338 |
+ - batch |
|
| 1338 | 1339 |
attributeRestrictions: null |
| 1339 | 1340 |
resources: |
| 1340 | 1341 |
- jobs |
| ... | ... |
@@ -1343,6 +1486,7 @@ items: |
| 1343 | 1343 |
- watch |
| 1344 | 1344 |
- apiGroups: |
| 1345 | 1345 |
- extensions |
| 1346 |
+ - batch |
|
| 1346 | 1347 |
attributeRestrictions: null |
| 1347 | 1348 |
resources: |
| 1348 | 1349 |
- jobs/status |
| ... | ... |
@@ -1374,6 +1518,40 @@ items: |
| 1374 | 1374 |
kind: ClusterRole |
| 1375 | 1375 |
metadata: |
| 1376 | 1376 |
creationTimestamp: null |
| 1377 |
+ name: system:namespace-controller |
|
| 1378 |
+ rules: |
|
| 1379 |
+ - apiGroups: |
|
| 1380 |
+ - "" |
|
| 1381 |
+ attributeRestrictions: null |
|
| 1382 |
+ resources: |
|
| 1383 |
+ - namespaces |
|
| 1384 |
+ verbs: |
|
| 1385 |
+ - delete |
|
| 1386 |
+ - get |
|
| 1387 |
+ - list |
|
| 1388 |
+ - watch |
|
| 1389 |
+ - apiGroups: |
|
| 1390 |
+ - "" |
|
| 1391 |
+ attributeRestrictions: null |
|
| 1392 |
+ resources: |
|
| 1393 |
+ - namespaces/finalize |
|
| 1394 |
+ - namespaces/status |
|
| 1395 |
+ verbs: |
|
| 1396 |
+ - update |
|
| 1397 |
+ - apiGroups: |
|
| 1398 |
+ - '*' |
|
| 1399 |
+ attributeRestrictions: null |
|
| 1400 |
+ resources: |
|
| 1401 |
+ - '*' |
|
| 1402 |
+ verbs: |
|
| 1403 |
+ - delete |
|
| 1404 |
+ - deletecollection |
|
| 1405 |
+ - get |
|
| 1406 |
+ - list |
|
| 1407 |
+- apiVersion: v1 |
|
| 1408 |
+ kind: ClusterRole |
|
| 1409 |
+ metadata: |
|
| 1410 |
+ creationTimestamp: null |
|
| 1377 | 1411 |
name: system:pv-binder-controller |
| 1378 | 1412 |
rules: |
| 1379 | 1413 |
- apiGroups: null |
| ... | ... |
@@ -8,6 +8,7 @@ import ( |
| 8 | 8 |
"strings" |
| 9 | 9 |
"testing" |
| 10 | 10 |
|
| 11 |
+ kapi "k8s.io/kubernetes/pkg/api" |
|
| 11 | 12 |
"k8s.io/kubernetes/pkg/api/unversioned" |
| 12 | 13 |
"k8s.io/kubernetes/pkg/client/restclient" |
| 13 | 14 |
"k8s.io/kubernetes/pkg/util/sets" |
| ... | ... |
@@ -59,7 +60,7 @@ func TestRootAPIPaths(t *testing.T) {
|
| 59 | 59 |
// We need to make sure that any APILevels specified in the config are present in the RootPaths, and that |
| 60 | 60 |
// any not specified are not |
| 61 | 61 |
expectedOpenShiftAPILevels := sets.NewString(masterConfig.APILevels...) |
| 62 |
- expectedKubeAPILevels := sets.NewString(configapi.GetEnabledAPIVersionsForGroup(*masterConfig.KubernetesMasterConfig, configapi.APIGroupKube)...) |
|
| 62 |
+ expectedKubeAPILevels := sets.NewString(configapi.GetEnabledAPIVersionsForGroup(*masterConfig.KubernetesMasterConfig, kapi.GroupName)...) |
|
| 63 | 63 |
actualOpenShiftAPILevels := sets.String{}
|
| 64 | 64 |
actualKubeAPILevels := sets.String{}
|
| 65 | 65 |
for _, route := range broadcastRootPaths.Paths {
|
| ... | ... |
@@ -178,6 +178,10 @@ func TestAuthorizationResolution(t *testing.T) {
|
| 178 | 178 |
var globalClusterAdminUsers = sets.NewString() |
| 179 | 179 |
var globalClusterAdminGroups = sets.NewString("system:cluster-admins", "system:masters")
|
| 180 | 180 |
|
| 181 |
+// This list includes the admins from above, plus users or groups known to have global view access |
|
| 182 |
+var globalClusterReaderUsers = sets.NewString("system:serviceaccount:openshift-infra:namespace-controller")
|
|
| 183 |
+var globalClusterReaderGroups = sets.NewString("system:cluster-readers", "system:cluster-admins", "system:masters")
|
|
| 184 |
+ |
|
| 181 | 185 |
type resourceAccessReviewTest struct {
|
| 182 | 186 |
description string |
| 183 | 187 |
clientInterface client.ResourceAccessReviewInterface |
| ... | ... |
@@ -339,12 +343,12 @@ func TestAuthorizationResourceAccessReview(t *testing.T) {
|
| 339 | 339 |
review: localRequestWhoCanViewDeployments, |
| 340 | 340 |
response: authorizationapi.ResourceAccessReviewResponse{
|
| 341 | 341 |
Users: sets.NewString("harold", "valerie"),
|
| 342 |
- Groups: globalClusterAdminGroups, |
|
| 342 |
+ Groups: sets.NewString(), |
|
| 343 | 343 |
Namespace: "hammer-project", |
| 344 | 344 |
}, |
| 345 | 345 |
} |
| 346 |
- test.response.Users.Insert(globalClusterAdminUsers.List()...) |
|
| 347 |
- test.response.Groups.Insert("system:cluster-readers")
|
|
| 346 |
+ test.response.Users.Insert(globalClusterReaderUsers.List()...) |
|
| 347 |
+ test.response.Groups.Insert(globalClusterReaderGroups.List()...) |
|
| 348 | 348 |
test.run(t) |
| 349 | 349 |
} |
| 350 | 350 |
{
|
| ... | ... |
@@ -354,12 +358,12 @@ func TestAuthorizationResourceAccessReview(t *testing.T) {
|
| 354 | 354 |
review: localRequestWhoCanViewDeployments, |
| 355 | 355 |
response: authorizationapi.ResourceAccessReviewResponse{
|
| 356 | 356 |
Users: sets.NewString("mark", "edgar"),
|
| 357 |
- Groups: globalClusterAdminGroups, |
|
| 357 |
+ Groups: sets.NewString(), |
|
| 358 | 358 |
Namespace: "mallet-project", |
| 359 | 359 |
}, |
| 360 | 360 |
} |
| 361 |
- test.response.Users.Insert(globalClusterAdminUsers.List()...) |
|
| 362 |
- test.response.Groups.Insert("system:cluster-readers")
|
|
| 361 |
+ test.response.Users.Insert(globalClusterReaderUsers.List()...) |
|
| 362 |
+ test.response.Groups.Insert(globalClusterReaderGroups.List()...) |
|
| 363 | 363 |
test.run(t) |
| 364 | 364 |
} |
| 365 | 365 |
|
| ... | ... |
@@ -381,11 +385,12 @@ func TestAuthorizationResourceAccessReview(t *testing.T) {
|
| 381 | 381 |
clientInterface: clusterAdminClient.ResourceAccessReviews(), |
| 382 | 382 |
review: requestWhoCanViewDeployments, |
| 383 | 383 |
response: authorizationapi.ResourceAccessReviewResponse{
|
| 384 |
- Users: globalClusterAdminUsers, |
|
| 385 |
- Groups: globalClusterAdminGroups, |
|
| 384 |
+ Users: sets.NewString(), |
|
| 385 |
+ Groups: sets.NewString(), |
|
| 386 | 386 |
}, |
| 387 | 387 |
} |
| 388 |
- test.response.Groups.Insert("system:cluster-readers")
|
|
| 388 |
+ test.response.Users.Insert(globalClusterReaderUsers.List()...) |
|
| 389 |
+ test.response.Groups.Insert(globalClusterReaderGroups.List()...) |
|
| 389 | 390 |
test.run(t) |
| 390 | 391 |
} |
| 391 | 392 |
|
| ... | ... |
@@ -399,12 +404,12 @@ func TestAuthorizationResourceAccessReview(t *testing.T) {
|
| 399 | 399 |
review: localRequestWhoCanViewDeployments, |
| 400 | 400 |
response: authorizationapi.ResourceAccessReviewResponse{
|
| 401 | 401 |
Users: sets.NewString("edgar"),
|
| 402 |
- Groups: globalClusterAdminGroups, |
|
| 402 |
+ Groups: sets.NewString(), |
|
| 403 | 403 |
Namespace: "mallet-project", |
| 404 | 404 |
}, |
| 405 | 405 |
} |
| 406 |
- test.response.Users.Insert(globalClusterAdminUsers.List()...) |
|
| 407 |
- test.response.Groups.Insert("system:cluster-readers")
|
|
| 406 |
+ test.response.Users.Insert(globalClusterReaderUsers.List()...) |
|
| 407 |
+ test.response.Groups.Insert(globalClusterReaderGroups.List()...) |
|
| 408 | 408 |
test.run(t) |
| 409 | 409 |
} |
| 410 | 410 |
} |
| ... | ... |
@@ -1047,7 +1052,7 @@ func TestOldLocalResourceAccessReviewEndpoint(t *testing.T) {
|
| 1047 | 1047 |
|
| 1048 | 1048 |
expectedResponse := &authorizationapi.ResourceAccessReviewResponse{
|
| 1049 | 1049 |
Namespace: namespace, |
| 1050 |
- Users: sets.NewString("harold", "system:serviceaccount:hammer-project:builder"),
|
|
| 1050 |
+ Users: sets.NewString("harold", "system:serviceaccount:hammer-project:builder", "system:serviceaccount:openshift-infra:namespace-controller"),
|
|
| 1051 | 1051 |
Groups: sets.NewString("system:cluster-admins", "system:masters", "system:serviceaccounts:hammer-project"),
|
| 1052 | 1052 |
} |
| 1053 | 1053 |
if (actualResponse.Namespace != expectedResponse.Namespace) || |
| ... | ... |
@@ -1074,7 +1079,7 @@ func TestOldLocalResourceAccessReviewEndpoint(t *testing.T) {
|
| 1074 | 1074 |
|
| 1075 | 1075 |
expectedResponse := &authorizationapi.ResourceAccessReviewResponse{
|
| 1076 | 1076 |
Namespace: namespace, |
| 1077 |
- Users: sets.NewString("harold", "system:serviceaccount:hammer-project:builder"),
|
|
| 1077 |
+ Users: sets.NewString("harold", "system:serviceaccount:hammer-project:builder", "system:serviceaccount:openshift-infra:namespace-controller"),
|
|
| 1078 | 1078 |
Groups: sets.NewString("system:cluster-admins", "system:masters", "system:serviceaccounts:hammer-project"),
|
| 1079 | 1079 |
} |
| 1080 | 1080 |
if (actualResponse.Namespace != expectedResponse.Namespace) || |
| ... | ... |
@@ -11,7 +11,6 @@ import ( |
| 11 | 11 |
|
| 12 | 12 |
kapi "k8s.io/kubernetes/pkg/api" |
| 13 | 13 |
"k8s.io/kubernetes/pkg/api/errors" |
| 14 |
- "k8s.io/kubernetes/pkg/api/unversioned" |
|
| 15 | 14 |
expapi "k8s.io/kubernetes/pkg/apis/extensions" |
| 16 | 15 |
"k8s.io/kubernetes/pkg/util/wait" |
| 17 | 16 |
) |
| ... | ... |
@@ -68,7 +67,6 @@ func TestExtensionsAPIDeletion(t *testing.T) {
|
| 68 | 68 |
job := expapi.Job{
|
| 69 | 69 |
ObjectMeta: kapi.ObjectMeta{Name: "test-job"},
|
| 70 | 70 |
Spec: expapi.JobSpec{
|
| 71 |
- Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
|
| 72 | 71 |
Template: kapi.PodTemplateSpec{
|
| 73 | 72 |
ObjectMeta: kapi.ObjectMeta{Labels: map[string]string{"foo": "bar"}},
|
| 74 | 73 |
Spec: kapi.PodSpec{
|
| ... | ... |
@@ -25,7 +25,7 @@ func TestExtensionsAPIDisabled(t *testing.T) {
|
| 25 | 25 |
} |
| 26 | 26 |
|
| 27 | 27 |
// Disable all extensions API versions |
| 28 |
- masterConfig.KubernetesMasterConfig.DisabledAPIGroupVersions = map[string][]string{"extensions": {"*"}}
|
|
| 28 |
+ masterConfig.KubernetesMasterConfig.DisabledAPIGroupVersions = map[string][]string{"extensions": {"*"}, "autoscaling": {"*"}, "batch": {"*"}}
|
|
| 29 | 29 |
|
| 30 | 30 |
clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) |
| 31 | 31 |
if err != nil {
|
| 32 | 32 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,164 @@ |
| 0 |
+// +build integration |
|
| 1 |
+ |
|
| 2 |
+package integration |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "path" |
|
| 6 |
+ "testing" |
|
| 7 |
+ |
|
| 8 |
+ "golang.org/x/net/context" |
|
| 9 |
+ |
|
| 10 |
+ etcd "github.com/coreos/etcd/client" |
|
| 11 |
+ testutil "github.com/openshift/origin/test/util" |
|
| 12 |
+ testserver "github.com/openshift/origin/test/util/server" |
|
| 13 |
+ |
|
| 14 |
+ kapi "k8s.io/kubernetes/pkg/api" |
|
| 15 |
+ "k8s.io/kubernetes/pkg/api/unversioned" |
|
| 16 |
+ "k8s.io/kubernetes/pkg/apis/extensions" |
|
| 17 |
+ extensions_v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" |
|
| 18 |
+ kclient "k8s.io/kubernetes/pkg/client/unversioned" |
|
| 19 |
+ "k8s.io/kubernetes/pkg/runtime" |
|
| 20 |
+) |
|
| 21 |
+ |
|
| 22 |
+// TODO: enable once storage is separable |
|
| 23 |
+// func TestStorageVersionsSeparated(t *testing.T) {
|
|
| 24 |
+// runStorageTest(t, "separated", |
|
| 25 |
+// autoscaling_v1.SchemeGroupVersion, |
|
| 26 |
+// batch_v1.SchemeGroupVersion, |
|
| 27 |
+// extensions_v1beta1.SchemeGroupVersion, |
|
| 28 |
+// ) |
|
| 29 |
+// } |
|
| 30 |
+ |
|
| 31 |
+func TestStorageVersionsUnified(t *testing.T) {
|
|
| 32 |
+ runStorageTest(t, "unified", |
|
| 33 |
+ extensions_v1beta1.SchemeGroupVersion, |
|
| 34 |
+ extensions_v1beta1.SchemeGroupVersion, |
|
| 35 |
+ extensions_v1beta1.SchemeGroupVersion, |
|
| 36 |
+ ) |
|
| 37 |
+} |
|
| 38 |
+ |
|
| 39 |
+func runStorageTest(t *testing.T, ns string, autoscalingVersion, batchVersion, extensionsVersion unversioned.GroupVersion) {
|
|
| 40 |
+ etcdServer := testutil.RequireEtcd(t) |
|
| 41 |
+ |
|
| 42 |
+ masterConfig, err := testserver.DefaultMasterOptions() |
|
| 43 |
+ if err != nil {
|
|
| 44 |
+ t.Fatalf("unexpected error: %v", err)
|
|
| 45 |
+ } |
|
| 46 |
+ |
|
| 47 |
+ keys := etcd.NewKeysAPI(etcdServer.Client) |
|
| 48 |
+ getGVKFromEtcd := func(prefix, name string) (*unversioned.GroupVersionKind, error) {
|
|
| 49 |
+ key := path.Join(masterConfig.EtcdStorageConfig.KubernetesStoragePrefix, prefix, ns, name) |
|
| 50 |
+ resp, err := keys.Get(context.TODO(), key, nil) |
|
| 51 |
+ if err != nil {
|
|
| 52 |
+ return nil, err |
|
| 53 |
+ } |
|
| 54 |
+ _, gvk, err := runtime.UnstructuredJSONScheme.Decode([]byte(resp.Node.Value), nil, nil) |
|
| 55 |
+ return gvk, err |
|
| 56 |
+ } |
|
| 57 |
+ |
|
| 58 |
+ // TODO: Set storage versions for API groups |
|
| 59 |
+ // masterConfig.EtcdStorageConfig.StorageVersions[autoscaling.GroupName] = autoscalingVersion.String() |
|
| 60 |
+ // masterConfig.EtcdStorageConfig.StorageVersions[batch.GroupName] = batchVersion.String() |
|
| 61 |
+ // masterConfig.EtcdStorageConfig.StorageVersions[extensions.GroupName] = extensionsVersion.String() |
|
| 62 |
+ |
|
| 63 |
+ clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) |
|
| 64 |
+ if err != nil {
|
|
| 65 |
+ t.Fatalf("unexpected error: %v", err)
|
|
| 66 |
+ } |
|
| 67 |
+ clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) |
|
| 68 |
+ if err != nil {
|
|
| 69 |
+ t.Fatalf("unexpected error: %v", err)
|
|
| 70 |
+ } |
|
| 71 |
+ clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) |
|
| 72 |
+ if err != nil {
|
|
| 73 |
+ t.Fatalf("unexpected error: %v", err)
|
|
| 74 |
+ } |
|
| 75 |
+ |
|
| 76 |
+ // create the containing project |
|
| 77 |
+ if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, ns, "admin"); err != nil {
|
|
| 78 |
+ t.Fatalf("unexpected error creating the project: %v", err)
|
|
| 79 |
+ } |
|
| 80 |
+ projectAdminClient, projectAdminKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "admin") |
|
| 81 |
+ if err != nil {
|
|
| 82 |
+ t.Fatalf("unexpected error getting project admin client: %v", err)
|
|
| 83 |
+ } |
|
| 84 |
+ if err := testutil.WaitForPolicyUpdate(projectAdminClient, ns, "get", extensions.Resource("horizontalpodautoscalers"), true); err != nil {
|
|
| 85 |
+ t.Fatalf("unexpected error waiting for policy update: %v", err)
|
|
| 86 |
+ } |
|
| 87 |
+ |
|
| 88 |
+ jobTestcases := map[string]struct {
|
|
| 89 |
+ creator kclient.JobInterface |
|
| 90 |
+ }{
|
|
| 91 |
+ "batch": {creator: projectAdminKubeClient.Batch().Jobs(ns)},
|
|
| 92 |
+ "extensions": {creator: projectAdminKubeClient.Extensions().Jobs(ns)},
|
|
| 93 |
+ } |
|
| 94 |
+ for name, testcase := range jobTestcases {
|
|
| 95 |
+ job := extensions.Job{
|
|
| 96 |
+ ObjectMeta: kapi.ObjectMeta{Name: name + "-job"},
|
|
| 97 |
+ Spec: extensions.JobSpec{
|
|
| 98 |
+ Template: kapi.PodTemplateSpec{
|
|
| 99 |
+ Spec: kapi.PodSpec{
|
|
| 100 |
+ RestartPolicy: kapi.RestartPolicyNever, |
|
| 101 |
+ Containers: []kapi.Container{{Name: "containername", Image: "containerimage"}},
|
|
| 102 |
+ }, |
|
| 103 |
+ }, |
|
| 104 |
+ }, |
|
| 105 |
+ } |
|
| 106 |
+ |
|
| 107 |
+ // Create a Job |
|
| 108 |
+ if _, err := testcase.creator.Create(&job); err != nil {
|
|
| 109 |
+ t.Fatalf("%s: unexpected error creating Job: %v", name, err)
|
|
| 110 |
+ } |
|
| 111 |
+ |
|
| 112 |
+ // Ensure it is persisted correctly |
|
| 113 |
+ if gvk, err := getGVKFromEtcd("jobs", job.Name); err != nil {
|
|
| 114 |
+ t.Fatalf("%s: unexpected error reading Job: %v", name, err)
|
|
| 115 |
+ } else if *gvk != batchVersion.WithKind("Job") {
|
|
| 116 |
+ t.Fatalf("%s: expected api version %s in etcd, got %s reading Job", name, batchVersion, gvk)
|
|
| 117 |
+ } |
|
| 118 |
+ |
|
| 119 |
+ // Ensure it is accessible from both APIs |
|
| 120 |
+ if _, err := projectAdminKubeClient.Batch().Jobs(ns).Get(job.Name); err != nil {
|
|
| 121 |
+ t.Errorf("%s: Error reading Job from the batch client: %#v", name, err)
|
|
| 122 |
+ } |
|
| 123 |
+ if _, err := projectAdminKubeClient.Extensions().Jobs(ns).Get(job.Name); err != nil {
|
|
| 124 |
+ t.Errorf("%s: Error reading Job from the extensions client: %#v", name, err)
|
|
| 125 |
+ } |
|
| 126 |
+ } |
|
| 127 |
+ |
|
| 128 |
+ hpaTestcases := map[string]struct {
|
|
| 129 |
+ creator kclient.HorizontalPodAutoscalerInterface |
|
| 130 |
+ }{
|
|
| 131 |
+ "autoscaling": {creator: projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(ns)},
|
|
| 132 |
+ "extensions": {creator: projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(ns)},
|
|
| 133 |
+ } |
|
| 134 |
+ for name, testcase := range hpaTestcases {
|
|
| 135 |
+ hpa := extensions.HorizontalPodAutoscaler{
|
|
| 136 |
+ ObjectMeta: kapi.ObjectMeta{Name: name + "-hpa"},
|
|
| 137 |
+ Spec: extensions.HorizontalPodAutoscalerSpec{
|
|
| 138 |
+ MaxReplicas: 1, |
|
| 139 |
+ ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: "scale"},
|
|
| 140 |
+ }, |
|
| 141 |
+ } |
|
| 142 |
+ |
|
| 143 |
+ // Create an HPA |
|
| 144 |
+ if _, err := testcase.creator.Create(&hpa); err != nil {
|
|
| 145 |
+ t.Fatalf("%s: unexpected error creating HPA: %v", name, err)
|
|
| 146 |
+ } |
|
| 147 |
+ |
|
| 148 |
+ // Make sure it is persisted correctly |
|
| 149 |
+ if gvk, err := getGVKFromEtcd("horizontalpodautoscalers", hpa.Name); err != nil {
|
|
| 150 |
+ t.Fatalf("%s: unexpected error reading HPA: %v", name, err)
|
|
| 151 |
+ } else if *gvk != autoscalingVersion.WithKind("HorizontalPodAutoscaler") {
|
|
| 152 |
+ t.Fatalf("%s: expected api version %s in etcd, got %s reading HPA", name, autoscalingVersion, gvk)
|
|
| 153 |
+ } |
|
| 154 |
+ |
|
| 155 |
+ // Make sure it is available from both APIs |
|
| 156 |
+ if _, err := projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(ns).Get(hpa.Name); err != nil {
|
|
| 157 |
+ t.Errorf("%s: Error reading HPA.autoscaling from the autoscaling/v1 API: %#v", name, err)
|
|
| 158 |
+ } |
|
| 159 |
+ if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(ns).Get(hpa.Name); err != nil {
|
|
| 160 |
+ t.Errorf("%s: Error reading HPA.extensions from the extensions/v1beta1 API: %#v", name, err)
|
|
| 161 |
+ } |
|
| 162 |
+ } |
|
| 163 |
+} |