Browse code

Add preliminary quota support for emptyDir volumes on XFS.

Introduces a volume plugin which simply wraps the k8s emptyDir volume plugin,
and layers in functionality to apply quotas to an XFS filesystem. This approach
allows us to avoid carrying a patch to k8s to accomplish the goal. Composition is
used over inheritance to ensure we don't suddenly no longer implement the correct
interface after a k8s rebase.

The behavior is triggered via a new optional setting in node-config.yaml:

volumeConfig:
localQuota:
perFSGroup: 512Mi

The implementation will use FSGroup as the gid for the quota, if no FSGroup is
specified (indicating the request matched an SCC with RunAsAny) the quota
application is quietly skipped.

Errors will be thrown if the config option is specified, but the volume dir
does not reside on an XFS filesytem, or it is mounted without gquota option.
XFS cannot remove a quota without unmounting the filesystem and re-creating all
other quotas, so unused quotas are left dangling. Should the same FSGroup ever
appear on the node the quota will be re-initialized to whatever it should be at
that point in time.

Devan Goodwin authored on 2016/02/04 00:30:51
Showing 15 changed files
... ...
@@ -1,6 +1,7 @@
1 1
 package api
2 2
 
3 3
 import (
4
+	"k8s.io/kubernetes/pkg/api/resource"
4 5
 	"k8s.io/kubernetes/pkg/api/unversioned"
5 6
 	"k8s.io/kubernetes/pkg/runtime"
6 7
 	"k8s.io/kubernetes/pkg/util/sets"
... ...
@@ -116,6 +117,23 @@ type NodeConfig struct {
116 116
 
117 117
 	// IPTablesSyncPeriod is how often iptable rules are refreshed
118 118
 	IPTablesSyncPeriod string
119
+
120
+	// VolumeConfig contains options for configuring volumes on the node.
121
+	VolumeConfig VolumeConfig
122
+}
123
+
124
+// VolumeConfig contains options for configuring volumes on the node.
125
+type VolumeConfig struct {
126
+	// LocalQuota contains options for controlling local volume quota on the node.
127
+	LocalQuota LocalQuota
128
+}
129
+
130
+// LocalQuota contains options for controlling local volume quota on the node.
131
+type LocalQuota struct {
132
+	// PerFSGroup can be specified to enable a quota on local storage use per unique FSGroup ID.
133
+	// At present this is only implemented for emptyDir volumes, and if the underlying
134
+	// volumeDirectory is on an XFS filesystem.
135
+	PerFSGroup *resource.Quantity
119 136
 }
120 137
 
121 138
 // NodeNetworkConfig provides network options for the node
... ...
@@ -1,6 +1,7 @@
1 1
 package v1
2 2
 
3 3
 import (
4
+	"k8s.io/kubernetes/pkg/api"
4 5
 	"k8s.io/kubernetes/pkg/conversion"
5 6
 	"k8s.io/kubernetes/pkg/runtime"
6 7
 	"k8s.io/kubernetes/pkg/runtime/serializer"
... ...
@@ -306,6 +307,7 @@ func addConversionFuncs(scheme *runtime.Scheme) {
306 306
 			out.Location = in.Location
307 307
 			return nil
308 308
 		},
309
+		api.Convert_resource_Quantity_To_resource_Quantity,
309 310
 	)
310 311
 	if err != nil {
311 312
 		// If one of the conversion functions is malformed, detect it immediately.
... ...
@@ -362,6 +362,15 @@ func (LDAPSyncConfig) SwaggerDoc() map[string]string {
362 362
 	return map_LDAPSyncConfig
363 363
 }
364 364
 
365
+var map_LocalQuota = map[string]string{
366
+	"":           "LocalQuota contains options for controlling local volume quota on the node.",
367
+	"perFSGroup": "FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID. At present this is only implemented for emptyDir volumes, and if the underlying volumeDirectory is on an XFS filesystem.",
368
+}
369
+
370
+func (LocalQuota) SwaggerDoc() map[string]string {
371
+	return map_LocalQuota
372
+}
373
+
365 374
 var map_MasterClients = map[string]string{
366 375
 	"": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes",
367 376
 	"openshiftLoopbackKubeConfig":  "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master",
... ...
@@ -457,6 +466,7 @@ var map_NodeConfig = map[string]string{
457 457
 	"kubeletArguments":    "KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments.  These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.",
458 458
 	"proxyArguments":      "ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments.  These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.",
459 459
 	"iptablesSyncPeriod":  "IPTablesSyncPeriod is how often iptable rules are refreshed",
460
+	"volumeConfig":        "VolumeConfig contains options for configuring volumes on the node.",
460 461
 }
461 462
 
462 463
 func (NodeConfig) SwaggerDoc() map[string]string {
... ...
@@ -750,3 +760,12 @@ var map_UserAgentMatchingConfig = map[string]string{
750 750
 func (UserAgentMatchingConfig) SwaggerDoc() map[string]string {
751 751
 	return map_UserAgentMatchingConfig
752 752
 }
753
+
754
+var map_VolumeConfig = map[string]string{
755
+	"":           "VolumeConfig contains options for configuring volumes on the node.",
756
+	"localQuota": "LocalQuota contains options for controlling local volume quota on the node.",
757
+}
758
+
759
+func (VolumeConfig) SwaggerDoc() map[string]string {
760
+	return map_VolumeConfig
761
+}
... ...
@@ -1,6 +1,7 @@
1 1
 package v1
2 2
 
3 3
 import (
4
+	"k8s.io/kubernetes/pkg/api/resource"
4 5
 	"k8s.io/kubernetes/pkg/api/unversioned"
5 6
 	"k8s.io/kubernetes/pkg/runtime"
6 7
 )
... ...
@@ -68,6 +69,23 @@ type NodeConfig struct {
68 68
 
69 69
 	// IPTablesSyncPeriod is how often iptable rules are refreshed
70 70
 	IPTablesSyncPeriod string `json:"iptablesSyncPeriod"`
71
+
72
+	// VolumeConfig contains options for configuring volumes on the node.
73
+	VolumeConfig VolumeConfig `json:"volumeConfig"`
74
+}
75
+
76
+// VolumeConfig contains options for configuring volumes on the node.
77
+type VolumeConfig struct {
78
+	// LocalQuota contains options for controlling local volume quota on the node.
79
+	LocalQuota LocalQuota `json:"localQuota"`
80
+}
81
+
82
+// LocalQuota contains options for controlling local volume quota on the node.
83
+type LocalQuota struct {
84
+	// FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID.
85
+	// At present this is only implemented for emptyDir volumes, and if the underlying
86
+	// volumeDirectory is on an XFS filesystem.
87
+	PerFSGroup *resource.Quantity `json:"perFSGroup"`
71 88
 }
72 89
 
73 90
 // NodeAuthConfig holds authn/authz configuration options
... ...
@@ -4,6 +4,7 @@ import (
4 4
 	"testing"
5 5
 
6 6
 	"github.com/ghodss/yaml"
7
+	"speter.net/go/exp/math/dec/inf"
7 8
 
8 9
 	"k8s.io/kubernetes/pkg/api/unversioned"
9 10
 	"k8s.io/kubernetes/pkg/runtime"
... ...
@@ -11,6 +12,7 @@ import (
11 11
 	"k8s.io/kubernetes/pkg/util"
12 12
 
13 13
 	internal "github.com/openshift/origin/pkg/cmd/server/api"
14
+	"github.com/openshift/origin/pkg/cmd/server/api/latest"
14 15
 	"github.com/openshift/origin/pkg/cmd/server/api/v1"
15 16
 
16 17
 	// install all APIs
... ...
@@ -55,6 +57,9 @@ servingInfo:
55 55
   clientCA: ""
56 56
   keyFile: ""
57 57
   namedCertificates: null
58
+volumeConfig:
59
+  localQuota:
60
+    perFSGroup: null
58 61
 volumeDirectory: ""
59 62
 `
60 63
 
... ...
@@ -455,7 +460,7 @@ servingInfo:
455 455
 `
456 456
 )
457 457
 
458
-func TestNodeConfig(t *testing.T) {
458
+func TestSerializeNodeConfig(t *testing.T) {
459 459
 	config := &internal.NodeConfig{
460 460
 		PodManifestConfig: &internal.PodManifestConfig{},
461 461
 	}
... ...
@@ -468,6 +473,135 @@ func TestNodeConfig(t *testing.T) {
468 468
 	}
469 469
 }
470 470
 
471
+func TestReadNodeConfigLocalVolumeDirQuota(t *testing.T) {
472
+
473
+	tests := map[string]struct {
474
+		config   string
475
+		expected string
476
+	}{
477
+		"null quota": {
478
+			config: `
479
+apiVersion: v1
480
+volumeConfig:
481
+  localQuota:
482
+    perFSGroup: null
483
+`,
484
+			expected: "",
485
+		},
486
+		"missing quota": {
487
+			config: `
488
+apiVersion: v1
489
+volumeConfig:
490
+  localQuota:
491
+`,
492
+			expected: "",
493
+		},
494
+		"missing localQuota": {
495
+			config: `
496
+apiVersion: v1
497
+volumeConfig:
498
+`,
499
+			expected: "",
500
+		},
501
+		"missing volumeConfig": {
502
+			config: `
503
+apiVersion: v1
504
+`,
505
+			expected: "",
506
+		},
507
+		"no unit (bytes) quota": {
508
+			config: `
509
+apiVersion: v1
510
+volumeConfig:
511
+  localQuota:
512
+    perFSGroup: 200000
513
+`,
514
+			expected: "200000",
515
+		},
516
+		"Kb quota": {
517
+			config: `
518
+apiVersion: v1
519
+volumeConfig:
520
+  localQuota:
521
+    perFSGroup: 200Ki
522
+`,
523
+			expected: "204800",
524
+		},
525
+		"Mb quota": {
526
+			config: `
527
+apiVersion: v1
528
+volumeConfig:
529
+  localQuota:
530
+    perFSGroup: 512Mi
531
+`,
532
+			expected: "536870912",
533
+		},
534
+		"Gb quota": {
535
+			config: `
536
+apiVersion: v1
537
+volumeConfig:
538
+  localQuota:
539
+    perFSGroup: 2Gi
540
+`,
541
+			expected: "2147483648",
542
+		},
543
+		"Tb quota": {
544
+			config: `
545
+apiVersion: v1
546
+volumeConfig:
547
+  localQuota:
548
+    perFSGroup: 2Ti
549
+`,
550
+			expected: "2199023255552",
551
+		},
552
+		// This is invalid config, would be caught by validation but just
553
+		// testing it parses ok:
554
+		"negative quota": {
555
+			config: `
556
+apiVersion: v1
557
+volumeConfig:
558
+  localQuota:
559
+    perFSGroup: -512Mi
560
+`,
561
+			expected: "-536870912",
562
+		},
563
+		"zero quota": {
564
+			config: `
565
+apiVersion: v1
566
+volumeConfig:
567
+  localQuota:
568
+    perFSGroup: 0
569
+`,
570
+			expected: "0",
571
+		},
572
+	}
573
+
574
+	for name, test := range tests {
575
+		t.Logf("Running test: %s", name)
576
+		nodeConfig := &internal.NodeConfig{}
577
+		if err := latest.ReadYAMLInto([]byte(test.config), nodeConfig); err != nil {
578
+			t.Errorf("Error reading yaml: %s", err.Error())
579
+		}
580
+		if test.expected == "" && nodeConfig.VolumeConfig.LocalQuota.PerFSGroup != nil {
581
+			t.Errorf("Expected empty quota but got: %s", *nodeConfig.VolumeConfig.LocalQuota.PerFSGroup)
582
+		}
583
+		if test.expected != "" {
584
+			if nodeConfig.VolumeConfig.LocalQuota.PerFSGroup == nil {
585
+				t.Errorf("Expected quota: %s, got: nil", test.expected)
586
+			} else {
587
+				amount := nodeConfig.VolumeConfig.LocalQuota.PerFSGroup.Amount
588
+				t.Logf("%s", amount.String())
589
+				rounded := new(inf.Dec)
590
+				rounded.Round(amount, 0, inf.RoundUp)
591
+				t.Logf("%s", rounded.String())
592
+				if test.expected != rounded.String() {
593
+					t.Errorf("Expected quota: %s, got: %s", test.expected, rounded.String())
594
+				}
595
+			}
596
+		}
597
+	}
598
+}
599
+
471 600
 type AdmissionPluginTestConfig struct {
472 601
 	unversioned.TypeMeta
473 602
 	Data string `json:"data"`
... ...
@@ -50,6 +50,8 @@ func ValidateNodeConfig(config *api.NodeConfig, fldPath *field.Path) ValidationR
50 50
 		validationResults.AddErrors(field.Invalid(fldPath.Child("iptablesSyncPeriod"), config.IPTablesSyncPeriod, fmt.Sprintf("unable to parse iptablesSyncPeriod: %v. Examples with correct format: '5s', '1m', '2h22m'", err)))
51 51
 	}
52 52
 
53
+	validationResults.AddErrors(ValidateVolumeConfig(config.VolumeConfig, fldPath.Child("volumeConfig"))...)
54
+
53 55
 	return validationResults
54 56
 }
55 57
 
... ...
@@ -113,3 +115,13 @@ func ValidateDockerConfig(config api.DockerConfig, fldPath *field.Path) field.Er
113 113
 func ValidateKubeletExtendedArguments(config api.ExtendedArguments, fldPath *field.Path) field.ErrorList {
114 114
 	return ValidateExtendedArguments(config, kubeletoptions.NewKubeletServer().AddFlags, fldPath)
115 115
 }
116
+
117
+func ValidateVolumeConfig(config api.VolumeConfig, fldPath *field.Path) field.ErrorList {
118
+	allErrs := field.ErrorList{}
119
+
120
+	if config.LocalQuota.PerFSGroup != nil && config.LocalQuota.PerFSGroup.Value() < 0 {
121
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("localQuota", "perFSGroup"), config.LocalQuota.PerFSGroup,
122
+			"must be a positive integer"))
123
+	}
124
+	return allErrs
125
+}
... ...
@@ -3,6 +3,7 @@ package validation
3 3
 import (
4 4
 	"testing"
5 5
 
6
+	"k8s.io/kubernetes/pkg/api/resource"
6 7
 	"k8s.io/kubernetes/pkg/util/validation/field"
7 8
 
8 9
 	configapi "github.com/openshift/origin/pkg/cmd/server/api"
... ...
@@ -61,3 +62,30 @@ func TestFailingKubeletArgs(t *testing.T) {
61 61
 		t.Errorf("expected %v, got %v", e, a)
62 62
 	}
63 63
 }
64
+
65
+func TestInvalidProjectEmptyDirQuota(t *testing.T) {
66
+	negQuota := resource.MustParse("-1000Mi")
67
+	nodeCfg := configapi.NodeConfig{
68
+		VolumeConfig: configapi.VolumeConfig{
69
+			LocalQuota: configapi.LocalQuota{
70
+				PerFSGroup: &negQuota,
71
+			},
72
+		},
73
+	}
74
+	errs := ValidateNodeConfig(&nodeCfg, nil)
75
+	// This will result in several errors, one of them should be related to the
76
+	// project empty dir quota:
77
+	var emptyDirQuotaError *field.Error
78
+	for _, err := range errs.Errors {
79
+		t.Logf("Found error: %s", err.Field)
80
+		if err.Field == "volumeConfig.localQuota.perFSGroup" {
81
+			emptyDirQuotaError = err
82
+		}
83
+	}
84
+	if emptyDirQuotaError == nil {
85
+		t.Fatalf("expected volumeConfig.localQuota.perFSGroup error but got none")
86
+	}
87
+	if emptyDirQuotaError.Type != field.ErrorTypeInvalid {
88
+		t.Errorf("unexpected error for negative volumeConfig.localQuota.perFSGroup: %s", emptyDirQuotaError.Detail)
89
+	}
90
+}
... ...
@@ -2,6 +2,7 @@ package kubernetes
2 2
 
3 3
 import (
4 4
 	"crypto/tls"
5
+	"errors"
5 6
 	"fmt"
6 7
 	"net"
7 8
 	"strconv"
... ...
@@ -20,8 +21,9 @@ import (
20 20
 	kubeletserver "k8s.io/kubernetes/pkg/kubelet/server"
21 21
 	kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
22 22
 	"k8s.io/kubernetes/pkg/util"
23
-	"k8s.io/kubernetes/pkg/util/errors"
23
+	kerrors "k8s.io/kubernetes/pkg/util/errors"
24 24
 	"k8s.io/kubernetes/pkg/util/oom"
25
+	"k8s.io/kubernetes/pkg/volume"
25 26
 
26 27
 	osdnapi "github.com/openshift/openshift-sdn/plugins/osdn/api"
27 28
 	"github.com/openshift/openshift-sdn/plugins/osdn/factory"
... ...
@@ -31,6 +33,7 @@ import (
31 31
 	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
32 32
 	cmdflags "github.com/openshift/origin/pkg/cmd/util/flags"
33 33
 	"github.com/openshift/origin/pkg/cmd/util/variable"
34
+	"github.com/openshift/origin/pkg/volume/empty_dir"
34 35
 	"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
35 36
 )
36 37
 
... ...
@@ -156,7 +159,7 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error
156 156
 	// TODO: this should be done in config validation (along with the above) so we can provide
157 157
 	// proper errors
158 158
 	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
159
-		return nil, errors.NewAggregate(err)
159
+		return nil, kerrors.NewAggregate(err)
160 160
 	}
161 161
 
162 162
 	proxyconfig, err := buildKubeProxyConfig(options)
... ...
@@ -169,6 +172,49 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error
169 169
 		return nil, err
170 170
 	}
171 171
 
172
+	// Replace the standard k8s emptyDir volume plugin with a wrapper version
173
+	// which offers XFS quota functionality, but only if the node config
174
+	// specifies an empty dir quota to apply to projects:
175
+	if options.VolumeConfig.LocalQuota.PerFSGroup != nil {
176
+		glog.V(2).Info("Replacing empty-dir volume plugin with quota wrapper")
177
+		wrappedEmptyDirPlugin := false
178
+
179
+		quotaApplicator, err := empty_dir.NewQuotaApplicator(options.VolumeDirectory)
180
+		if err != nil {
181
+			return nil, err
182
+		}
183
+
184
+		// Create a volume spec with emptyDir we can use to search for the
185
+		// emptyDir plugin with CanSupport:
186
+		emptyDirSpec := &volume.Spec{
187
+			Volume: &kapi.Volume{
188
+				VolumeSource: kapi.VolumeSource{
189
+					EmptyDir: &kapi.EmptyDirVolumeSource{},
190
+				},
191
+			},
192
+		}
193
+
194
+		for idx, plugin := range cfg.VolumePlugins {
195
+			// Can't really do type checking or use a constant here as they are not exported:
196
+			if plugin.CanSupport(emptyDirSpec) {
197
+				wrapper := empty_dir.EmptyDirQuotaPlugin{
198
+					Wrapped:         plugin,
199
+					Quota:           *options.VolumeConfig.LocalQuota.PerFSGroup,
200
+					QuotaApplicator: quotaApplicator,
201
+				}
202
+				cfg.VolumePlugins[idx] = &wrapper
203
+				wrappedEmptyDirPlugin = true
204
+			}
205
+		}
206
+		// Because we can't look for the k8s emptyDir plugin by any means that would
207
+		// survive a refactor, error out if we couldn't find it:
208
+		if !wrappedEmptyDirPlugin {
209
+			return nil, errors.New("unable to wrap emptyDir volume plugin for quota support")
210
+		}
211
+	} else {
212
+		glog.V(2).Info("Skipping replacement of empty-dir volume plugin with quota wrapper, no local fsGroup quota specified")
213
+	}
214
+
172 215
 	// provide any config overrides
173 216
 	cfg.NodeName = options.NodeName
174 217
 	cfg.KubeClient = internalclientset.FromUnversionedClient(kubeClient)
... ...
@@ -354,7 +400,7 @@ func buildKubeProxyConfig(options configapi.NodeConfig) (*proxyoptions.ProxyServ
354 354
 
355 355
 	// Resolve cmd flags to add any user overrides
356 356
 	if err := cmdflags.Resolve(options.ProxyArguments, proxyconfig.AddFlags); len(err) > 0 {
357
-		return nil, errors.NewAggregate(err)
357
+		return nil, kerrors.NewAggregate(err)
358 358
 	}
359 359
 
360 360
 	return proxyconfig, nil
361 361
new file mode 100644
... ...
@@ -0,0 +1,104 @@
0
+package empty_dir
1
+
2
+import (
3
+	"k8s.io/kubernetes/pkg/api"
4
+	"k8s.io/kubernetes/pkg/api/resource"
5
+	"k8s.io/kubernetes/pkg/types"
6
+	"k8s.io/kubernetes/pkg/volume"
7
+)
8
+
9
+var _ volume.VolumePlugin = &EmptyDirQuotaPlugin{}
10
+var _ volume.Builder = &emptyDirQuotaBuilder{}
11
+
12
+// EmptyDirQuotaPlugin is a simple wrapper for the k8s empty dir plugin builder.
13
+type EmptyDirQuotaPlugin struct {
14
+	// wrapped is the actual k8s emptyDir volume plugin we will pass method calls to.
15
+	Wrapped volume.VolumePlugin
16
+
17
+	// The default quota to apply to each node:
18
+	Quota resource.Quantity
19
+
20
+	// QuotaApplicator is passed to actual volume builders so they can apply
21
+	// quota for the supported filesystem.
22
+	QuotaApplicator QuotaApplicator
23
+}
24
+
25
+func (plugin *EmptyDirQuotaPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
26
+	volBuilder, err := plugin.Wrapped.NewBuilder(spec, pod, opts)
27
+	if err != nil {
28
+		return volBuilder, err
29
+	}
30
+
31
+	// Because we cannot access several fields on the k8s emptyDir struct, and
32
+	// we do not wish to modify k8s code for this, we have to grab a reference
33
+	// to them ourselves.
34
+	// This logic is the same as k8s.io/kubernetes/pkg/volume/empty_dir:
35
+	medium := api.StorageMediumDefault
36
+	if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir.
37
+		medium = spec.Volume.EmptyDir.Medium
38
+	}
39
+
40
+	// Wrap the builder object with our own to add quota functionality:
41
+	wrapperEmptyDir := &emptyDirQuotaBuilder{
42
+		wrapped:         volBuilder,
43
+		pod:             pod,
44
+		medium:          medium,
45
+		quota:           plugin.Quota,
46
+		quotaApplicator: plugin.QuotaApplicator,
47
+	}
48
+	return wrapperEmptyDir, err
49
+}
50
+
51
+func (plugin *EmptyDirQuotaPlugin) Init(host volume.VolumeHost) error {
52
+	return plugin.Wrapped.Init(host)
53
+}
54
+
55
+func (plugin *EmptyDirQuotaPlugin) Name() string {
56
+	return plugin.Wrapped.Name()
57
+}
58
+
59
+func (plugin *EmptyDirQuotaPlugin) CanSupport(spec *volume.Spec) bool {
60
+	return plugin.Wrapped.CanSupport(spec)
61
+}
62
+
63
+func (plugin *EmptyDirQuotaPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
64
+	return plugin.Wrapped.NewCleaner(volName, podUID)
65
+}
66
+
67
+// emptyDirQuotaBuilder is a wrapper plugin builder for the k8s empty dir builder itself.
68
+// This plugin just extends and adds the functionality to apply a
69
+// quota for the pods FSGroup on an XFS filesystem.
70
+type emptyDirQuotaBuilder struct {
71
+	wrapped         volume.Builder
72
+	pod             *api.Pod
73
+	medium          api.StorageMedium
74
+	quota           resource.Quantity
75
+	quotaApplicator QuotaApplicator
76
+}
77
+
78
+// Must implement SetUp as well, otherwise the internal Builder.SetUp calls it's
79
+// own SetUpAt method, not the one we need.
80
+
81
+func (edq *emptyDirQuotaBuilder) SetUp(fsGroup *int64) error {
82
+	return edq.SetUpAt(edq.GetPath(), fsGroup)
83
+}
84
+
85
+func (edq *emptyDirQuotaBuilder) SetUpAt(dir string, fsGroup *int64) error {
86
+	err := edq.wrapped.SetUpAt(dir, fsGroup)
87
+	if err == nil {
88
+		err = edq.quotaApplicator.Apply(dir, edq.medium, edq.pod, fsGroup, edq.quota)
89
+	}
90
+	return err
91
+}
92
+
93
+func (edq *emptyDirQuotaBuilder) GetAttributes() volume.Attributes {
94
+	return edq.wrapped.GetAttributes()
95
+}
96
+
97
+func (edq *emptyDirQuotaBuilder) GetMetrics() (*volume.Metrics, error) {
98
+	return edq.wrapped.GetMetrics()
99
+}
100
+
101
+func (edq *emptyDirQuotaBuilder) GetPath() string {
102
+	return edq.wrapped.GetPath()
103
+}
0 104
new file mode 100644
... ...
@@ -0,0 +1,192 @@
0
+package empty_dir
1
+
2
+import (
3
+	"bytes"
4
+	"fmt"
5
+	"os/exec"
6
+	"strings"
7
+
8
+	"github.com/golang/glog"
9
+	"k8s.io/kubernetes/pkg/api"
10
+	"k8s.io/kubernetes/pkg/api/resource"
11
+)
12
+
13
+// QuotaApplicator is used to apply quota to an emptyDir volume.
14
+type QuotaApplicator interface {
15
+	// Apply the quota to the given EmptyDir path:
16
+	Apply(dir string, medium api.StorageMedium, pod *api.Pod, fsGroup *int64, quota resource.Quantity) error
17
+}
18
+
19
+type xfsQuotaApplicator struct {
20
+	cmdRunner quotaCommandRunner
21
+}
22
+
23
+// NewQuotaApplicator checks the filesystem type for the configured volume directory
24
+// and returns an appropriate implementation of the quota applicator. If the filesystem
25
+// does not appear to be a type we support quotas on, an error is returned.
26
+func NewQuotaApplicator(volumeDirectory string) (QuotaApplicator, error) {
27
+
28
+	cmdRunner := &realQuotaCommandRunner{}
29
+	isXFS, err := isXFS(cmdRunner, volumeDirectory)
30
+	if err != nil {
31
+		return nil, err
32
+	}
33
+	if isXFS {
34
+		// Make sure xfs_quota is on the PATH, otherwise we're not going to get very far:
35
+		_, pathErr := exec.LookPath("xfs_quota")
36
+		if pathErr != nil {
37
+			return nil, pathErr
38
+		}
39
+
40
+		return &xfsQuotaApplicator{
41
+			cmdRunner: cmdRunner,
42
+		}, nil
43
+	}
44
+
45
+	// If we were unable to find a quota supported filesystem type, return an error:
46
+	return nil, fmt.Errorf("%s is not on a supported filesystem for local volume quota", volumeDirectory)
47
+}
48
+
49
+// quotaCommandRunner interface is used to abstract the actual running of
50
+// commands so we can unit test more behavior.
51
+type quotaCommandRunner interface {
52
+	RunFSTypeCommand(dir string) (string, string, error)
53
+	RunFSDeviceCommand(dir string) (string, string, error)
54
+	RunApplyQuotaCommand(fsDevice string, quota resource.Quantity, fsGroup int64) (string, string, error)
55
+}
56
+
57
+type realQuotaCommandRunner struct {
58
+}
59
+
60
+func (cr *realQuotaCommandRunner) RunFSTypeCommand(dir string) (string, string, error) {
61
+	args := []string{"-f", "-c", "%T", dir}
62
+	outBytes, err := exec.Command("stat", args...).Output()
63
+	return string(outBytes), "", err
64
+}
65
+
66
+func (cr *realQuotaCommandRunner) RunFSDeviceCommand(dir string) (string, string, error) {
67
+	outBytes, err := exec.Command("df", "--output=source", dir).Output()
68
+	return string(outBytes), "", err
69
+}
70
+
71
+func (cr *realQuotaCommandRunner) RunApplyQuotaCommand(fsDevice string, quota resource.Quantity, fsGroup int64) (string, string, error) {
72
+	args := []string{"-x", "-c",
73
+		fmt.Sprintf("limit -g bsoft=%d bhard=%d %d", quota.Value(), quota.Value(), fsGroup),
74
+		fsDevice,
75
+	}
76
+
77
+	cmd := exec.Command("xfs_quota", args...)
78
+	var stderr bytes.Buffer
79
+	cmd.Stderr = &stderr
80
+
81
+	err := cmd.Run()
82
+	glog.V(5).Infof("Ran: xfs_quota %s", args)
83
+	return "", stderr.String(), err
84
+}
85
+
86
+// Apply sets the actual quota on a device for an emptyDir volume if possible. Will return an error
87
+// if anything goes wrong during the process. (not an XFS filesystem, etc) If the volume medium is set
88
+// to memory, or no FSGroup is provided (indicating the request matched an SCC set to RunAsAny), this
89
+// method will effectively no-op.
90
+func (xqa *xfsQuotaApplicator) Apply(dir string, medium api.StorageMedium, pod *api.Pod, fsGroup *int64, quota resource.Quantity) error {
91
+
92
+	if medium == api.StorageMediumMemory {
93
+		glog.V(5).Infof("Skipping quota application due to memory storage medium.")
94
+		return nil
95
+	}
96
+	isXFS, err := isXFS(xqa.cmdRunner, dir)
97
+	if err != nil {
98
+		return err
99
+	}
100
+	if !isXFS {
101
+		return fmt.Errorf("unable to apply quota: %s is not on an XFS filesystem", dir)
102
+	}
103
+	if fsGroup == nil {
104
+		// This indicates the operation matched an SCC with FSGroup strategy RunAsAny.
105
+		// Not an error condition.
106
+		glog.V(5).Infof("Unable to apply XFS quota, no FSGroup specified.")
107
+		return nil
108
+	}
109
+
110
+	volDevice, err := xqa.getFSDevice(dir)
111
+	if err != nil {
112
+		return err
113
+	}
114
+
115
+	err = xqa.applyQuota(volDevice, quota, *fsGroup)
116
+	if err != nil {
117
+		return err
118
+	}
119
+
120
+	return nil
121
+}
122
+
123
+func (xqa *xfsQuotaApplicator) applyQuota(volDevice string, quota resource.Quantity, fsGroupID int64) error {
124
+	_, stderr, err := xqa.cmdRunner.RunApplyQuotaCommand(volDevice, quota, fsGroupID)
125
+	if err != nil {
126
+		return err
127
+	}
128
+	// xfs_quota is very happy to fail but return a success code, likely due to its
129
+	// interactive shell approach. Grab stderr, if we see anything written to it we'll
130
+	// consider this an error.
131
+	if len(stderr) > 0 {
132
+		return fmt.Errorf("xfs_quota wrote to stderr: %s", stderr)
133
+	}
134
+
135
+	glog.V(4).Infof("XFS quota applied: device=%s, quota=%d, fsGroup=%d", volDevice, quota.Value(), fsGroupID)
136
+	return nil
137
+}
138
+
139
+func (xqa *xfsQuotaApplicator) getFSDevice(dir string) (string, error) {
140
+	return getFSDevice(dir, xqa.cmdRunner)
141
+}
142
+
143
+// GetFSDevice returns the filesystem device for a given path. To do this we
144
+// run df on the path, returning a header line and the line we're
145
+// interested in. The first string token in that line will be the device name.
146
+func GetFSDevice(dir string) (string, error) {
147
+	return getFSDevice(dir, &realQuotaCommandRunner{})
148
+}
149
+
150
+func getFSDevice(dir string, cmdRunner quotaCommandRunner) (string, error) {
151
+	out, _, err := cmdRunner.RunFSDeviceCommand(dir)
152
+	if err != nil {
153
+		return "", fmt.Errorf("unable to find filesystem device for emptyDir volume %s: %s", dir, err)
154
+	}
155
+	fsDevice, parseErr := parseFSDevice(out)
156
+	return fsDevice, parseErr
157
+}
158
+
159
+func parseFSDevice(dfOutput string) (string, error) {
160
+	// Need to skip the df header line starting with "Filesystem", and grab the first
161
+	// word of the following line which will be our device path.
162
+	lines := strings.Split(dfOutput, "\n")
163
+	if len(lines) < 2 {
164
+		return "", fmt.Errorf("%s: %s", unexpectedLineCountError, dfOutput)
165
+	}
166
+
167
+	fsDevice := strings.Split(lines[1], " ")[0]
168
+	// Make sure it looks like a device:
169
+	if !strings.HasPrefix(fsDevice, "/") {
170
+		return "", fmt.Errorf("%s: %s", invalidFilesystemError, fsDevice)
171
+	}
172
+
173
+	return fsDevice, nil
174
+}
175
+
176
+// isXFS checks if the empty dir is on an XFS filesystem.
177
+func isXFS(cmdRunner quotaCommandRunner, dir string) (bool, error) {
178
+	out, _, err := cmdRunner.RunFSTypeCommand(dir)
179
+	if err != nil {
180
+		return false, fmt.Errorf("unable to check filesystem type for emptydir volume %s: %s", dir, err)
181
+	}
182
+	if strings.TrimSpace(out) == "xfs" {
183
+		return true, nil
184
+	}
185
+	return false, nil
186
+}
187
+
188
+const (
189
+	invalidFilesystemError   = "found invalid filesystem device"
190
+	unexpectedLineCountError = "unexpected line count in df output"
191
+)
0 192
new file mode 100644
... ...
@@ -0,0 +1,249 @@
0
+package empty_dir
1
+
2
+import (
3
+	"errors"
4
+	"strings"
5
+	"testing"
6
+
7
+	kapi "k8s.io/kubernetes/pkg/api"
8
+	"k8s.io/kubernetes/pkg/api/resource"
9
+)
10
+
11
+const expectedDevice = "/dev/sdb2"
12
+
13
+func TestParseFSDevice(t *testing.T) {
14
+	tests := map[string]struct {
15
+		dfOutput  string
16
+		expDevice string
17
+		expError  string
18
+	}{
19
+		"happy path": {
20
+			dfOutput:  "Filesystem\n/dev/sdb2",
21
+			expDevice: expectedDevice,
22
+		},
23
+		"happy path multi-token": {
24
+			dfOutput:  "Filesystem\n/dev/sdb2           16444592     8  16444584   1% /var/openshift.local.volumes/",
25
+			expDevice: expectedDevice,
26
+		},
27
+		"invalid tmpfs": {
28
+			dfOutput: "Filesystem\ntmpfs",
29
+			expError: invalidFilesystemError,
30
+		},
31
+		"invalid empty": {
32
+			dfOutput: "",
33
+			expError: unexpectedLineCountError,
34
+		},
35
+		"invalid one line": {
36
+			dfOutput: "Filesystem\n",
37
+			expError: invalidFilesystemError,
38
+		},
39
+		"invalid blank second line": {
40
+			dfOutput: "Filesystem\n\n",
41
+			expError: invalidFilesystemError,
42
+		},
43
+		"invalid too many lines": {
44
+			dfOutput:  "Filesystem\n/dev/sdb2\ntmpfs\nwhatisgoingon",
45
+			expDevice: expectedDevice,
46
+		},
47
+	}
48
+	for name, test := range tests {
49
+		t.Logf("running TestParseFSDevice: %s", name)
50
+		device, err := parseFSDevice(test.dfOutput)
51
+		if test.expDevice != "" && test.expDevice != device {
52
+			t.Errorf("Unexpected filesystem device, expected: %s, got: %s", test.expDevice, device)
53
+		}
54
+		if test.expError != "" && (err == nil || !strings.Contains(err.Error(), test.expError)) {
55
+			t.Errorf("Unexpected filesystem error, expected: %s, got: %s", test.expError, err)
56
+		}
57
+	}
58
+}
59
+
60
+// Avoid running actual commands to manage XFS quota:
61
+type mockQuotaCommandRunner struct {
62
+	RunFSDeviceCommandResponse *cmdResponse
63
+	RunFSTypeCommandResponse   *cmdResponse
64
+
65
+	RanApplyQuotaFSDevice string
66
+	RanApplyQuota         *resource.Quantity
67
+	RanApplyQuotaFSGroup  int64
68
+}
69
+
70
+func (m *mockQuotaCommandRunner) RunFSTypeCommand(dir string) (string, string, error) {
71
+	if m.RunFSTypeCommandResponse != nil {
72
+		return m.RunFSTypeCommandResponse.Stdout, m.RunFSTypeCommandResponse.Stderr, m.RunFSTypeCommandResponse.Error
73
+	}
74
+	return "xfs", "", nil
75
+}
76
+
77
+func (m *mockQuotaCommandRunner) RunFSDeviceCommand(dir string) (string, string, error) {
78
+	if m.RunFSDeviceCommandResponse != nil {
79
+		return m.RunFSDeviceCommandResponse.Stdout, m.RunFSDeviceCommandResponse.Stderr, m.RunFSDeviceCommandResponse.Error
80
+	}
81
+	return "Filesystem\n/dev/sdb2", "", nil
82
+}
83
+
84
+func (m *mockQuotaCommandRunner) RunApplyQuotaCommand(fsDevice string, quota resource.Quantity, fsGroup int64) (string, string, error) {
85
+	// Store these for assertions in tests:
86
+	m.RanApplyQuotaFSDevice = fsDevice
87
+	m.RanApplyQuota = &quota
88
+	m.RanApplyQuotaFSGroup = fsGroup
89
+	return "", "", nil
90
+}
91
+
92
+// Small struct for specifying how we want the various quota command runners to
93
+// respond in tests:
94
+type cmdResponse struct {
95
+	Stdout string
96
+	Stderr string
97
+	Error  error
98
+}
99
+
100
+func TestApplyQuota(t *testing.T) {
101
+
102
+	var defaultFSGroup int64
103
+	defaultFSGroup = 1000050000
104
+
105
+	tests := map[string]struct {
106
+		FSGroupID *int64
107
+		Quota     string
108
+
109
+		FSTypeCmdResponse     *cmdResponse
110
+		FSDeviceCmdResponse   *cmdResponse
111
+		ApplyQuotaCmdResponse *cmdResponse
112
+
113
+		ExpFSDevice string
114
+		ExpError    string // sub-string to be searched for in error message
115
+		ExpSkipped  bool
116
+	}{
117
+		"happy path": {
118
+			Quota:     "512",
119
+			FSGroupID: &defaultFSGroup,
120
+		},
121
+		"zero quota": {
122
+			Quota:     "0",
123
+			FSGroupID: &defaultFSGroup,
124
+		},
125
+		"invalid filesystem device": {
126
+			Quota:     "512",
127
+			FSGroupID: &defaultFSGroup,
128
+			FSDeviceCmdResponse: &cmdResponse{
129
+				Stdout: "Filesystem\ntmpfs",
130
+				Stderr: "",
131
+				Error:  nil,
132
+			},
133
+			ExpError:   invalidFilesystemError,
134
+			ExpSkipped: true,
135
+		},
136
+		"error checking filesystem device": {
137
+			Quota:     "512",
138
+			FSGroupID: &defaultFSGroup,
139
+			FSDeviceCmdResponse: &cmdResponse{
140
+				Stdout: "",
141
+				Stderr: "no such file or directory",
142
+				Error:  errors.New("no such file or directory"), // Would be exit error in real life
143
+			},
144
+			ExpError:   "no such file or directory",
145
+			ExpSkipped: true,
146
+		},
147
+		"non-xfs filesystem type": {
148
+			Quota:     "512",
149
+			FSGroupID: &defaultFSGroup,
150
+			FSTypeCmdResponse: &cmdResponse{
151
+				Stdout: "ext4",
152
+				Stderr: "",
153
+				Error:  nil,
154
+			},
155
+			ExpError:   "not on an XFS filesystem",
156
+			ExpSkipped: true,
157
+		},
158
+		"error checking filesystem type": {
159
+			Quota:     "512",
160
+			FSGroupID: &defaultFSGroup,
161
+			FSTypeCmdResponse: &cmdResponse{
162
+				Stdout: "",
163
+				Stderr: "no such file or directory",
164
+				Error:  errors.New("no such file or directory"), // Would be exit error in real life
165
+			},
166
+			ExpError:   "unable to check filesystem type",
167
+			ExpSkipped: true,
168
+		},
169
+		// Should result in success, but no quota actually gets applied:
170
+		"no FSGroup": {
171
+			Quota:      "512",
172
+			ExpSkipped: true,
173
+		},
174
+	}
175
+
176
+	for name, test := range tests {
177
+		t.Logf("running TestApplyQuota: %s", name)
178
+		quotaApplicator := xfsQuotaApplicator{}
179
+		// Replace the real command runner with our mock:
180
+		mockCmdRunner := mockQuotaCommandRunner{}
181
+		quotaApplicator.cmdRunner = &mockCmdRunner
182
+		fakeDir := "/var/lib/origin/openshift.local.volumes/pods/d71f6949-cb3f-11e5-aedf-989096de63cb"
183
+
184
+		// Configure the default happy path command responses if nothing was specified
185
+		// by the test:
186
+		if test.FSTypeCmdResponse == nil {
187
+			// Configure the default happy path response:
188
+			test.FSTypeCmdResponse = &cmdResponse{
189
+				Stdout: "xfs",
190
+				Stderr: "",
191
+				Error:  nil,
192
+			}
193
+		}
194
+		if test.FSDeviceCmdResponse == nil {
195
+			test.FSDeviceCmdResponse = &cmdResponse{
196
+				Stdout: "Filesystem\n/dev/sdb2",
197
+				Stderr: "",
198
+				Error:  nil,
199
+			}
200
+		}
201
+
202
+		if test.ApplyQuotaCmdResponse == nil {
203
+			test.ApplyQuotaCmdResponse = &cmdResponse{
204
+				Stdout: "",
205
+				Stderr: "",
206
+				Error:  nil,
207
+			}
208
+		}
209
+
210
+		mockCmdRunner.RunFSDeviceCommandResponse = test.FSDeviceCmdResponse
211
+		mockCmdRunner.RunFSTypeCommandResponse = test.FSTypeCmdResponse
212
+
213
+		quota := resource.MustParse(test.Quota)
214
+		err := quotaApplicator.Apply(fakeDir, kapi.StorageMediumDefault, &kapi.Pod{}, test.FSGroupID, quota)
215
+		if test.ExpError == "" && !test.ExpSkipped {
216
+			// Expecting success case:
217
+			if mockCmdRunner.RanApplyQuotaFSDevice != "/dev/sdb2" {
218
+				t.Errorf("failed: '%s', expected quota applied to: %s, got: %s", name, "/dev/sdb2", mockCmdRunner.RanApplyQuotaFSDevice)
219
+			}
220
+			if mockCmdRunner.RanApplyQuota.Value() != quota.Value() {
221
+				t.Errorf("failed: '%s', expected quota: %d, got: %d", name, quota.Value(),
222
+					mockCmdRunner.RanApplyQuota.Value())
223
+			}
224
+			if mockCmdRunner.RanApplyQuotaFSGroup != *test.FSGroupID {
225
+				t.Errorf("failed: '%s', expected FSGroup: %d, got: %d", name, test.FSGroupID, mockCmdRunner.RanApplyQuotaFSGroup)
226
+			}
227
+		} else if test.ExpError != "" {
228
+			// Expecting error case:
229
+			if err == nil {
230
+				t.Errorf("failed: '%s', expected error but got none", name)
231
+			} else if !strings.Contains(err.Error(), test.ExpError) {
232
+				t.Errorf("failed: '%s', expected error containing '%s', got: '%s'", name, test.ExpError, err)
233
+			}
234
+		}
235
+
236
+		if test.ExpSkipped {
237
+			if mockCmdRunner.RanApplyQuota != nil {
238
+				t.Errorf("failed: '%s', expected error but quota was applied", name)
239
+			}
240
+			if mockCmdRunner.RanApplyQuotaFSGroup != 0 {
241
+				t.Errorf("failed: '%s', expected error but quota was applied", name)
242
+			}
243
+			if mockCmdRunner.RanApplyQuotaFSDevice != "" {
244
+				t.Errorf("failed: '%s', expected error but quota was applied", name)
245
+			}
246
+		}
247
+	}
248
+}
... ...
@@ -30,4 +30,4 @@ for test_script in $test_scripts; do
30 30
 	ENDTIME=$(date +%s); echo "${test_script} took $(($ENDTIME - $STARTTIME)) seconds and returned with ${CURR_RETURN}";
31 31
 done
32 32
 
33
-exit ${OVERALL_RETURN}
34 33
\ No newline at end of file
34
+exit ${OVERALL_RETURN}
... ...
@@ -51,6 +51,15 @@ if [[ -z ${TEST_ONLY+x} ]]; then
51 51
   os::util::environment::use_sudo
52 52
   reset_tmp_dir
53 53
 
54
+  # If the current system has the XFS volume dir mount point we configure
55
+  # in the test images, assume to use it which will allow the local storage
56
+  # quota tests to pass.
57
+  if [ -d "/mnt/openshift-xfs-vol-dir" ]; then
58
+    export VOLUME_DIR="/mnt/openshift-xfs-vol-dir"
59
+  else
60
+    echo "[WARN] /mnt/openshift-xfs-vol-dir does not exist, local storage quota tests may fail."
61
+  fi
62
+
54 63
   os::log::start_system_logger
55 64
 
56 65
   # when selinux is enforcing, the volume dir selinux label needs to be
... ...
@@ -62,6 +71,14 @@ if [[ -z ${TEST_ONLY+x} ]]; then
62 62
          sudo chcon -t svirt_sandbox_file_t ${VOLUME_DIR}
63 63
   fi
64 64
   configure_os_server
65
+
66
+  # Similar to above check, if the XFS volume dir mount point exists enable
67
+  # local storage quota in node-config.yaml so these tests can pass:
68
+  if [ -d "/mnt/openshift-xfs-vol-dir" ]; then
69
+    sed -i 's/perFSGroup: null/perFSGroup: 256Mi/' $NODE_CONFIG_DIR/node-config.yaml
70
+  fi
71
+  echo "[INFO] Using VOLUME_DIR=${VOLUME_DIR}"
72
+
65 73
   start_os_server
66 74
 
67 75
   export KUBECONFIG="${ADMIN_KUBECONFIG}"
... ...
@@ -9,6 +9,7 @@ import (
9 9
 	_ "github.com/openshift/origin/test/extended/images"
10 10
 	_ "github.com/openshift/origin/test/extended/jenkins"
11 11
 	_ "github.com/openshift/origin/test/extended/jobs"
12
+	_ "github.com/openshift/origin/test/extended/localquota"
12 13
 	_ "github.com/openshift/origin/test/extended/router"
13 14
 	_ "github.com/openshift/origin/test/extended/security"
14 15
 
15 16
new file mode 100644
... ...
@@ -0,0 +1,151 @@
0
+package localquota
1
+
2
+import (
3
+	"bytes"
4
+	"fmt"
5
+	"os"
6
+	"os/exec"
7
+	"strconv"
8
+	"strings"
9
+	"time"
10
+
11
+	g "github.com/onsi/ginkgo"
12
+	o "github.com/onsi/gomega"
13
+
14
+	"github.com/openshift/origin/pkg/volume/empty_dir"
15
+	exutil "github.com/openshift/origin/test/extended/util"
16
+)
17
+
18
+const (
19
+	volDirEnvVar       = "VOLUME_DIR"
20
+	podCreationTimeout = 120    // seconds
21
+	expectedQuotaKb    = 262144 // launcher script sets 256Mi, xfs_quota reports in Kb.
22
+)
23
+
24
+func lookupFSGroup(oc *exutil.CLI, project string) (int, error) {
25
+	gidRange, err := oc.Run("get").Args("project", project,
26
+		"--template='{{ index .metadata.annotations \"openshift.io/sa.scc.supplemental-groups\" }}'").Output()
27
+	if err != nil {
28
+		return 0, err
29
+	}
30
+
31
+	// gidRange will be something like: 1000030000/10000
32
+	fsGroupStr := strings.Split(gidRange, "/")[0]
33
+	fsGroupStr = strings.Replace(fsGroupStr, "'", "", -1)
34
+
35
+	fsGroup, err := strconv.Atoi(fsGroupStr)
36
+	if err != nil {
37
+		return 0, err
38
+	}
39
+
40
+	return fsGroup, nil
41
+}
42
+
43
+// lookupXFSQuota runs an xfs_quota report and parses the output
44
+// looking for the given fsGroup ID's hard quota.
45
+//
46
+// Will return -1 if no quota was found for the fsGroup, and return
47
+// an error if something legitimately goes wrong in parsing the output.
48
+//
49
+// Output from this command looks like:
50
+//
51
+// $ xfs_quota -x -c 'report -n  -L 1000030000 -U 1000030000' /tmp/openshift/xfs-vol-dir
52
+// Group quota on /tmp/openshift/xfs-vol-dir (/dev/sdb2)
53
+//                                Blocks
54
+// Group ID         Used       Soft       Hard    Warn/Grace
55
+// ---------- --------------------------------------------------
56
+// #1000030000          0     524288     524288     00 [--------]
57
+func lookupXFSQuota(oc *exutil.CLI, fsGroup int, volDir string) (int, error) {
58
+
59
+	// First lookup the filesystem device the volumeDir resides on:
60
+	fsDevice, err := empty_dir.GetFSDevice(volDir)
61
+	if err != nil {
62
+		return 0, err
63
+	}
64
+
65
+	args := []string{"xfs_quota", "-x", "-c", fmt.Sprintf("report -n -L %d -U %d", fsGroup, fsGroup), fsDevice}
66
+	cmd := exec.Command("sudo", args...)
67
+	var stderr bytes.Buffer
68
+	cmd.Stderr = &stderr
69
+	outBytes, reportErr := cmd.Output()
70
+	if reportErr != nil {
71
+		return 0, reportErr
72
+	}
73
+	quotaReport := string(outBytes)
74
+
75
+	// Parse output looking for lines starting with a #, which are the lines with
76
+	// group IDs and their quotas:
77
+	lines := strings.Split(quotaReport, "\n")
78
+	for _, l := range lines {
79
+		if strings.HasPrefix(l, fmt.Sprintf("#%d", fsGroup)) {
80
+			words := strings.Fields(l)
81
+			if len(words) != 6 {
82
+				return 0, fmt.Errorf("expected 6 words in quota line: %s", l)
83
+			}
84
+			quota, err := strconv.Atoi(words[3])
85
+			if err != nil {
86
+				return 0, err
87
+			}
88
+			return quota, nil
89
+		}
90
+	}
91
+
92
+	// We repeat this check until the quota shows up or we time out, so not
93
+	// being able to find the GID in the output does not imply an error, just
94
+	// that we haven't found it yet.
95
+	return -1, nil
96
+}
97
+
98
+// waitForQuotaToBeApplied will check for the expected quota, and wait a short interval if
99
+// not found until we reach the timeout. If we were unable to find the quota we expected,
100
+// an error will be returned. If we found the expected quota in time we will return nil.
101
+func waitForQuotaToBeApplied(oc *exutil.CLI, fsGroup int, volDir string) error {
102
+	secondsWaited := 0
103
+	for secondsWaited < podCreationTimeout {
104
+		quotaFound, quotaErr := lookupXFSQuota(oc, fsGroup, volDir)
105
+		o.Expect(quotaErr).NotTo(o.HaveOccurred())
106
+		if quotaFound == expectedQuotaKb {
107
+			return nil
108
+		}
109
+
110
+		time.Sleep(1 * time.Second)
111
+		secondsWaited = secondsWaited + 1
112
+	}
113
+
114
+	return fmt.Errorf("expected quota was not applied in time")
115
+}
116
+
117
+var _ = g.Describe("[volumes] Test local storage quota", func() {
118
+	defer g.GinkgoRecover()
119
+	var (
120
+		oc                 = exutil.NewCLI("local-quota", exutil.KubeConfigPath())
121
+		emptyDirPodFixture = exutil.FixturePath("..", "..", "examples", "hello-openshift", "hello-pod.json")
122
+	)
123
+
124
+	g.Describe("FSGroup local storage quota", func() {
125
+		g.It("should be applied to XFS filesystem when a pod is created", func() {
126
+			oc.SetOutputDir(exutil.TestContext.OutputDir)
127
+			project := oc.Namespace()
128
+
129
+			// Verify volDir is on XFS, if not this test can't pass:
130
+			volDir := os.Getenv(volDirEnvVar)
131
+			g.By(fmt.Sprintf("make sure volume directory (%s) is on an XFS filesystem", volDir))
132
+			o.Expect(volDir).NotTo(o.Equal(""))
133
+			args := []string{"-f", "-c", "'%T'", volDir}
134
+			outBytes, _ := exec.Command("stat", args...).Output()
135
+			o.Expect(strings.Contains(string(outBytes), "xfs")).To(o.BeTrue())
136
+
137
+			g.By("lookup test projects fsGroup ID")
138
+			fsGroup, err := lookupFSGroup(oc, project)
139
+			o.Expect(err).NotTo(o.HaveOccurred())
140
+
141
+			g.By("create hello-openshift pod with emptyDir volume")
142
+			_, createPodErr := oc.Run("create").Args("-f", emptyDirPodFixture).Output()
143
+			o.Expect(createPodErr).NotTo(o.HaveOccurred())
144
+
145
+			g.By("wait for XFS quota to be applied and verify")
146
+			lookupQuotaErr := waitForQuotaToBeApplied(oc, fsGroup, volDir)
147
+			o.Expect(lookupQuotaErr).NotTo(o.HaveOccurred())
148
+		})
149
+	})
150
+})