Binary files kubernetes-original/api/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/api/._.DS_Store differ
Binary files kubernetes-original/api/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/api/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/apps_v1alpha1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/apps_v1alpha1.json
--- kubernetes-original/api/swagger-spec/apps_v1alpha1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/apps_v1alpha1.json 2018-03-23 21:44:53.000000000 +0000
@@ -1459,6 +1459,10 @@
"photonPersistentDisk": {
"$ref": "v1.PhotonPersistentDiskVolumeSource",
"description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
+ },
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
}
}
},
@@ -2105,6 +2109,23 @@
},
"fsType": {
"type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
}
}
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/apps_v1beta1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/apps_v1beta1.json
--- kubernetes-original/api/swagger-spec/apps_v1beta1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/apps_v1beta1.json 2018-03-23 21:44:53.000000000 +0000
@@ -4479,6 +4479,10 @@
"$ref": "v1.PhotonPersistentDiskVolumeSource",
"description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
},
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+ },
"projected": {
"$ref": "v1.ProjectedVolumeSource",
"description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -5202,6 +5206,23 @@
},
"fsType": {
"type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
}
}
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/apps_v1beta2.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/apps_v1beta2.json
--- kubernetes-original/api/swagger-spec/apps_v1beta2.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/apps_v1beta2.json 2018-03-23 21:44:53.000000000 +0000
@@ -6845,6 +6845,10 @@
"$ref": "v1.PhotonPersistentDiskVolumeSource",
"description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
},
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+ },
"projected": {
"$ref": "v1.ProjectedVolumeSource",
"description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -7568,6 +7572,23 @@
},
"fsType": {
"type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
}
}
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/batch_v1beta1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/batch_v1beta1.json
--- kubernetes-original/api/swagger-spec/batch_v1beta1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/batch_v1beta1.json 2018-03-23 21:44:53.000000000 +0000
@@ -1874,6 +1874,10 @@
"$ref": "v1.PhotonPersistentDiskVolumeSource",
"description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
},
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+ },
"projected": {
"$ref": "v1.ProjectedVolumeSource",
"description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -2597,6 +2601,23 @@
},
"fsType": {
"type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
}
}
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/batch_v1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/batch_v1.json
--- kubernetes-original/api/swagger-spec/batch_v1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/batch_v1.json 2018-03-23 21:44:53.000000000 +0000
@@ -1819,6 +1819,10 @@
"$ref": "v1.PhotonPersistentDiskVolumeSource",
"description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
},
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+ },
"projected": {
"$ref": "v1.ProjectedVolumeSource",
"description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -2542,6 +2546,23 @@
},
"fsType": {
"type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
}
}
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/batch_v2alpha1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/batch_v2alpha1.json
--- kubernetes-original/api/swagger-spec/batch_v2alpha1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/batch_v2alpha1.json 2018-03-23 21:44:53.000000000 +0000
@@ -1889,6 +1889,10 @@
"storageos": {
"$ref": "v1.StorageOSVolumeSource",
"description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+ },
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
}
}
},
@@ -2793,6 +2797,23 @@
}
}
},
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
"v1.Container": {
"id": "v1.Container",
"description": "A single application container that you want to run within a pod.",
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/extensions_v1beta1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/extensions_v1beta1.json
--- kubernetes-original/api/swagger-spec/extensions_v1beta1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/extensions_v1beta1.json 2018-03-23 21:44:53.000000000 +0000
@@ -7502,6 +7502,10 @@
"storageos": {
"$ref": "v1.StorageOSVolumeSource",
"description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+ },
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
}
}
},
@@ -8210,6 +8214,23 @@
},
"fsType": {
"type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
}
}
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/settings.k8s.io_v1alpha1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/settings.k8s.io_v1alpha1.json
--- kubernetes-original/api/swagger-spec/settings.k8s.io_v1alpha1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/settings.k8s.io_v1alpha1.json 2018-03-23 21:44:53.000000000 +0000
@@ -1676,6 +1676,10 @@
"storageos": {
"$ref": "v1.StorageOSVolumeSource",
"description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+ },
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
}
}
},
@@ -2346,6 +2350,23 @@
},
"fsType": {
"type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
}
}
diff --no-dereference -uNr kubernetes-original/api/swagger-spec/v1.json kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/v1.json
--- kubernetes-original/api/swagger-spec/v1.json 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/api/swagger-spec/v1.json 2018-03-23 21:44:53.000000000 +0000
@@ -20629,6 +20629,10 @@
"$ref": "v1.PhotonPersistentDiskVolumeSource",
"description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
},
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+ },
"portworxVolume": {
"$ref": "v1.PortworxVolumeSource",
"description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine"
@@ -21200,6 +21204,23 @@
}
}
},
+ "v1.CascadeDiskVolumeSource": {
+ "id": "v1.CascadeDiskVolumeSource",
+ "description": "Represents a Cascade persistent disk resource.",
+ "required": [
+ "diskID"
+ ],
+ "properties": {
+ "diskID": {
+ "type": "string",
+ "description": "ID that identifies Cascade persistent disk"
+ },
+ "fsType": {
+ "type": "string",
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+ }
+ }
+ },
"v1.PortworxVolumeSource": {
"id": "v1.PortworxVolumeSource",
"description": "PortworxVolumeSource represents a Portworx volume resource.",
@@ -21657,6 +21678,10 @@
"storageos": {
"$ref": "v1.StorageOSVolumeSource",
"description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+ },
+ "cascadeDisk": {
+ "$ref": "v1.CascadeDiskVolumeSource",
+ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
}
}
},
Binary files kubernetes-original/cmd/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/cmd/._.DS_Store differ
Binary files kubernetes-original/cmd/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/cmd/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/cmd/kube-controller-manager/app/BUILD kubernetes-modified/src/k8s.io/kubernetes/cmd/kube-controller-manager/app/BUILD
--- kubernetes-original/cmd/kube-controller-manager/app/BUILD 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/cmd/kube-controller-manager/app/BUILD 2018-03-23 21:44:53.000000000 +0000
@@ -86,6 +86,7 @@
"//pkg/volume/aws_ebs:go_default_library",
"//pkg/volume/azure_dd:go_default_library",
"//pkg/volume/azure_file:go_default_library",
+ "//pkg/volume/cascade_disk:go_default_library",
"//pkg/volume/cinder:go_default_library",
"//pkg/volume/csi:go_default_library",
"//pkg/volume/fc:go_default_library",
diff --no-dereference -uNr kubernetes-original/cmd/kube-controller-manager/app/plugins.go kubernetes-modified/src/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go
--- kubernetes-original/cmd/kube-controller-manager/app/plugins.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go 2018-03-23 21:44:53.000000000 +0000
@@ -34,6 +34,7 @@
"k8s.io/kubernetes/pkg/volume/aws_ebs"
"k8s.io/kubernetes/pkg/volume/azure_dd"
"k8s.io/kubernetes/pkg/volume/azure_file"
+ "k8s.io/kubernetes/pkg/volume/cascade_disk"
"k8s.io/kubernetes/pkg/volume/cinder"
"k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/fc"
@@ -77,6 +78,7 @@
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
+ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
}
@@ -106,6 +108,7 @@
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
+ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
}
@@ -165,6 +168,7 @@
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...)
+ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
return allPlugins
}
Binary files kubernetes-original/cmd/kube-controller-manager/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/cmd/kube-controller-manager/._.DS_Store differ
Binary files kubernetes-original/cmd/kube-controller-manager/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/cmd/kube-controller-manager/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/cmd/kubelet/app/BUILD kubernetes-modified/src/k8s.io/kubernetes/cmd/kubelet/app/BUILD
--- kubernetes-original/cmd/kubelet/app/BUILD 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/cmd/kubelet/app/BUILD 2018-03-23 21:44:53.000000000 +0000
@@ -74,6 +74,7 @@
"//pkg/volume/aws_ebs:go_default_library",
"//pkg/volume/azure_dd:go_default_library",
"//pkg/volume/azure_file:go_default_library",
+ "//pkg/volume/cascade_disk:go_default_library",
"//pkg/volume/cephfs:go_default_library",
"//pkg/volume/cinder:go_default_library",
"//pkg/volume/configmap:go_default_library",
diff --no-dereference -uNr kubernetes-original/cmd/kubelet/app/plugins.go kubernetes-modified/src/k8s.io/kubernetes/cmd/kubelet/app/plugins.go
--- kubernetes-original/cmd/kubelet/app/plugins.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/cmd/kubelet/app/plugins.go 2018-03-23 21:44:53.000000000 +0000
@@ -32,6 +32,7 @@
"k8s.io/kubernetes/pkg/volume/aws_ebs"
"k8s.io/kubernetes/pkg/volume/azure_dd"
"k8s.io/kubernetes/pkg/volume/azure_file"
+ "k8s.io/kubernetes/pkg/volume/cascade_disk"
"k8s.io/kubernetes/pkg/volume/cephfs"
"k8s.io/kubernetes/pkg/volume/cinder"
"k8s.io/kubernetes/pkg/volume/configmap"
@@ -100,6 +101,7 @@
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
+ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
}
Binary files kubernetes-original/cmd/kubelet/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/cmd/kubelet/._.DS_Store differ
Binary files kubernetes-original/cmd/kubelet/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/cmd/kubelet/.DS_Store differ
Binary files kubernetes-original/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/._.DS_Store differ
Binary files kubernetes-original/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/.DS_Store differ
Binary files kubernetes-original/pkg/apis/core/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/core/._.DS_Store differ
Binary files kubernetes-original/pkg/apis/core/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/core/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/pkg/apis/core/types.go kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/core/types.go
--- kubernetes-original/pkg/apis/core/types.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/core/types.go 2018-03-23 21:44:53.000000000 +0000
@@ -316,6 +316,8 @@
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// +optional
StorageOS *StorageOSVolumeSource
+ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+ CascadeDisk *CascadeDiskVolumeSource
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@@ -394,6 +396,8 @@
// CSI (Container Storage Interface) represents storage that handled by an external CSI driver
// +optional
CSI *CSIPersistentVolumeSource
+ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+ CascadeDisk *CascadeDiskVolumeSource
}
type PersistentVolumeClaimVolumeSource struct {
@@ -1471,6 +1475,16 @@
SecretRef *ObjectReference
}
+// Represents a Cascade persistent disk resource.
+type CascadeDiskVolumeSource struct {
+ // ID that identifies Cascade persistent disk
+ DiskID string
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType string
+}
+
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
diff --no-dereference -uNr kubernetes-original/pkg/apis/core/validation/validation.go kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
--- kubernetes-original/pkg/apis/core/validation/validation.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/core/validation/validation.go 2018-03-23 21:44:53.000000000 +0000
@@ -681,6 +681,14 @@
allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...)
}
}
+ if source.CascadeDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCascadeDiskVolumeSource(source.CascadeDisk, fldPath.Child("cascadeDisk"))...)
+ }
+ }
if numVolumes == 0 {
allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
@@ -1440,6 +1448,14 @@
return allErrs
}
+func validateCascadeDiskVolumeSource(cd *core.CascadeDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if len(cd.DiskID) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("diskID"), ""))
+ }
+ return allErrs
+}
+
// ValidatePersistentVolumeName checks that a name is appropriate for a
// PersistentVolumeName object.
var ValidatePersistentVolumeName = NameIsDNSSubdomain
@@ -1674,6 +1690,15 @@
}
}
+ if pv.Spec.CascadeDisk != nil {
+ if numVolumes > 0 {
+ allErrs = append(allErrs, field.Forbidden(specPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
+ } else {
+ numVolumes++
+ allErrs = append(allErrs, validateCascadeDiskVolumeSource(pv.Spec.CascadeDisk, specPath.Child("cascadeDisk"))...)
+ }
+ }
+
if numVolumes == 0 {
allErrs = append(allErrs, field.Required(specPath, "must specify a volume type"))
}
Binary files kubernetes-original/pkg/apis/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/._.DS_Store differ
Binary files kubernetes-original/pkg/apis/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/pkg/apis/extensions/types.go kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/extensions/types.go
--- kubernetes-original/pkg/apis/extensions/types.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/apis/extensions/types.go 2018-03-23 21:44:53.000000000 +0000
@@ -925,6 +925,7 @@
PortworxVolume FSType = "portworxVolume"
ScaleIO FSType = "scaleIO"
CSI FSType = "csi"
+ CascadeDisk FSType = "cascadeDisk"
All FSType = "*"
)
Binary files kubernetes-original/pkg/cloudprovider/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/._.DS_Store differ
Binary files kubernetes-original/pkg/cloudprovider/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/BUILD kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD
--- kubernetes-original/pkg/cloudprovider/providers/BUILD 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD 2018-03-23 21:44:53.000000000 +0000
@@ -12,6 +12,7 @@
deps = [
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
+ "//pkg/cloudprovider/providers/cascade:go_default_library",
"//pkg/cloudprovider/providers/cloudstack:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/cloudprovider/providers/openstack:go_default_library",
@@ -34,6 +35,7 @@
":package-srcs",
"//pkg/cloudprovider/providers/aws:all-srcs",
"//pkg/cloudprovider/providers/azure:all-srcs",
+ "//pkg/cloudprovider/providers/cascade:all-srcs",
"//pkg/cloudprovider/providers/cloudstack:all-srcs",
"//pkg/cloudprovider/providers/fake:all-srcs",
"//pkg/cloudprovider/providers/gce:all-srcs",
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/apitypes.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/apitypes.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/apitypes.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/apitypes.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,227 @@
+package cascade
+
+import "fmt"
+
+const (
+ NotFoundError = 1408
+ VMNotFoundError = 2006
+ DiskNotFoundError = 3011
+ DiskInUseError = 3012
+)
+
+// Represents APIError returned by the API in case of an error.
+type APIError struct {
+ Code *string `json:"code"`
+ Data map[string]string `json:"data"`
+ ErrorCode int32 `json:"errorCode,omitempty"`
+ Message *string `json:"message"`
+ HttpStatusCode int `json:"-"` // Not part of API contract
+}
+
+// Implement Go error interface for ApiError.
+func (e APIError) Error() string {
+ return fmt.Sprintf(
+ "Cascade: { HTTP status: '%d', code: '%s', message: '%s', data: '%v', errorcode: '%d' }",
+ e.HttpStatusCode, StringVal(e.Code), StringVal(e.Message), e.Data, e.ErrorCode)
+}
+
+// Used to represent a generic HTTP error, i.e. an unexpected HTTP 500.
+type HttpError struct {
+ StatusCode int
+ Message string
+}
+
+// Implementation of error interface for HttpError.
+func (e HttpError) Error() string {
+ return fmt.Sprintf("Cascade: HTTP %d: %v", e.StatusCode, e.Message)
+}
+
+// Represents a task which gets returned for long running API calls.
+type Task struct {
+ EndTime int64 `json:"endTime,omitempty"`
+ Entity *Entity `json:"entity,omitempty"`
+ ID *string `json:"id"`
+ Operation string `json:"operation,omitempty"`
+ QueuedTime *int64 `json:"queuedTime"`
+ ResourceProperties interface{} `json:"resourceProperties,omitempty"`
+ SelfLink string `json:"selfLink,omitempty"`
+ StartedTime *int64 `json:"startedTime"`
+ State *string `json:"state"`
+ Steps []*Step `json:"steps"`
+}
+
+// Represents the entity associated with the task.
+type Entity struct {
+ ID *string `json:"id"`
+ Kind *string `json:"kind"`
+}
+
+// Represents a task that has entered into an error state. Task errors can be caught and type-checked against with the
+// usual Go idiom.
+type TaskError struct {
+ ID string `json:"id"`
+ Step Step `json:"step,omitempty"`
+}
+
+// Implement Go error interface for TaskError.
+func (e TaskError) Error() string {
+ return fmt.Sprintf("Cascade: Task '%s' is in error state: {@step==%s}", e.ID, GetStep(e.Step))
+}
+
+// An error representing a timeout while waiting for a task to complete.
+type TaskTimeoutError struct {
+ ID string
+}
+
+// Implement Go error interface for TaskTimeoutError.
+func (e TaskTimeoutError) Error() string {
+ return fmt.Sprintf("Cascade: Timed out waiting for task '%s'. "+
+ "Task may not be in error state, examine task for full details.", e.ID)
+}
+
+// Represents a step in a task.
+type Step struct {
+ EndTime int64 `json:"endTime,omitempty"`
+ Errors []*APIError `json:"errors"`
+ Operation string `json:"operation,omitempty"`
+ Options map[string]string `json:"options,omitempty"`
+ QueuedTime *int64 `json:"queuedTime"`
+ Sequence int32 `json:"sequence,omitempty"`
+ StartedTime *int64 `json:"startedTime"`
+ State *string `json:"state"`
+ Warnings []*APIError `json:"warnings"`
+}
+
+// Implement Go error interface for Step.
+func GetStep(s Step) string {
+ return fmt.Sprintf("{\"operation\"=>\"%s\",\"state\"=>\"%s}", s.Operation, StringVal(s.State))
+}
+
+// Represents the VM response returned by the API.
+type VM struct {
+ AttachedDisks []*AttachedDisk `json:"attachedDisks"`
+ Cost []*QuotaLineItem `json:"cost"`
+ Flavor *string `json:"flavor"`
+ FloatingIP string `json:"floatingIp,omitempty"`
+ HighAvailableVMGroupID string `json:"highAvailableVMGroupID,omitempty"`
+ ID *string `json:"id"`
+ Kind string `json:"kind"`
+ Name *string `json:"name"`
+ SelfLink string `json:"selfLink,omitempty"`
+ SourceImageID string `json:"sourceImageId,omitempty"`
+ State *string `json:"state"`
+ Subnets []string `json:"subnets"`
+ Tags []string `json:"tags"`
+}
+
+// Represents the listVMs response returned by the API.
+type VMList struct {
+ Items []*VM `json:"items"`
+ NextPageLink string `json:"nextPageLink,omitempty"`
+ PreviousPageLink string `json:"previousPageLink,omitempty"`
+}
+
+// Represents multiple VMs returned by the API.
+type VMs struct {
+ Items []VM `json:"items"`
+}
+
+// Represents the disks attached to the VMs.
+type AttachedDisk struct {
+ BootDisk *bool `json:"bootDisk"`
+ CapacityGb *int32 `json:"capacityGb"`
+ Flavor *string `json:"flavor"`
+ ID *string `json:"id"`
+ Kind *string `json:"kind"`
+ Name *string `json:"name"`
+ State *string `json:"state"`
+}
+
+// Represents an attach disk operation request.
+type VMDiskOperation struct {
+ Arguments map[string]string `json:"arguments,omitempty"`
+ DiskID *string `json:"diskId"`
+}
+
+// Represents the quota line items for the VM.
+type QuotaLineItem struct {
+ Key *string `json:"key"`
+ Unit *string `json:"unit"`
+ Value *float64 `json:"value"`
+}
+
+// Represents a persistent disk
+type PersistentDisk struct {
+ CapacityGB int32 `json:"capacityGb,omitempty"`
+ Cost []*QuotaLineItem `json:"cost"`
+ Datastore string `json:"datastore,omitempty"`
+ Flavor *string `json:"flavor"`
+ ID *string `json:"id"`
+ Kind string `json:"kind"`
+ Name *string `json:"name"`
+ SelfLink string `json:"selfLink,omitempty"`
+ State *string `json:"state"`
+ Tags []string `json:"tags"`
+ VM string `json:"vm"`
+ MountDevice string `json:"mountDevice,omitempty"`
+ Zone *string `json:"zone"`
+}
+
+// Represents the spec for creating a disk.
+type DiskCreateSpec struct {
+ Affinities []*LocalitySpec `json:"affinities"`
+ CapacityGB *int32 `json:"capacityGb"`
+ Flavor *string `json:"flavor"`
+ Kind *string `json:"kind"`
+ Name *string `json:"name"`
+ Tags []string `json:"tags"`
+ Zone *string `json:"zone"`
+}
+
+// Represents the spec for specifying affinity for a disk with another entity.
+type LocalitySpec struct {
+ ID *string `json:"id"`
+ Kind *string `json:"kind"`
+}
+
+// Represens the LoadBalancer response returned by the API.
+type LoadBalancer struct {
+ Endpoint *string `json:"endpoint"`
+}
+
+// Represents the spec for creating a LoadBalancer.
+type LoadBalancerCreateSpec struct {
+ HealthCheck *LoadBalancerHealthCheck `json:"healthCheck"`
+ Name *string `json:"name"`
+ PortMaps []*LoadBalancerPortMap `json:"portMaps"`
+ Type *string `json:"type"`
+ SubDomain *string `json:"subDomain"`
+}
+
+// Represents the health check spec for a load balancer.
+type LoadBalancerHealthCheck struct {
+ HealthyThreshold int64 `json:"healthyThreshold,omitempty"`
+ IntervalInSeconds int64 `json:"intervalInSeconds,omitempty"`
+ Path *string `json:"path,omitempty"`
+ Port *int64 `json:"port"`
+ Protocol *string `json:"protocol"`
+}
+
+// Represents a port mapping spec for a load balancer.
+type LoadBalancerPortMap struct {
+ AllowedCidrs []*string `json:"allowedCidrs"`
+ InstancePort *int64 `json:"instancePort"`
+ InstanceProtocol *string `json:"instanceProtocol"`
+ LoadBalancerPort *int64 `json:"loadBalancerPort"`
+ LoadBalancerProtocol *string `json:"loadBalancerProtocol"`
+}
+
+// Represents a VM to be registered with or deregistered from the load balancer.
+type LoadBalancerVM struct {
+ ID *string `json:"id"`
+}
+
+// Represents a list of VMs to be registered with or deregistered from the load balancer.
+type LoadBalancerVMUpdate struct {
+ VMIds []*LoadBalancerVM `json:"vmIds"`
+}
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/auth.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/auth.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/auth.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/auth.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,145 @@
+package cascade
+
+import (
+ "fmt"
+ "strings"
+ "github.com/golang/glog"
+ "os/exec"
+)
+
+const (
+ tScope = "openid offline_access rs_admin_server at_groups rs_vmdir"
+
+ afdCli = "/opt/vmware/bin/vmafd-cli"
+ afdCliMachineAccountCmd = "get-machine-account-info"
+ afdCliPasswordPrefix = "Password: "
+ afdCliSeparator = "\n"
+)
+
+// AuthConfig contains configuration information for the authentication client.
+type AuthConfig struct {
+ tenantName string
+ authEndpoint string
+ machineAccountName string
+}
+
+// AuthClient defines functions related to authentication.
+type AuthClient struct {
+ cfg *AuthConfig
+}
+
+// NewAuthClient creates a new authentication client
+func NewAuthClient(cascadeCfg *CascadeConfig) (*AuthClient, error) {
+ return &AuthClient{
+ cfg: &AuthConfig{
+ tenantName: cascadeCfg.Global.TenantName,
+ authEndpoint: cascadeCfg.Global.AuthEndpoint,
+ machineAccountName: fmt.Sprintf("%s@%s", cascadeCfg.Global.DNSName, cascadeCfg.Global.DomainName),
+ },
+ }, nil
+}
+
+func (c *AuthClient) GetTokensByMachineAccount() (*TokenOptions, error) {
+ // Use the VMAFD CLI to get the machine account password
+ cmd := exec.Command(afdCli, afdCliMachineAccountCmd)
+ output, err := cmd.Output()
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to get machine account credentials. Cannot create Client.")
+ return nil, fmt.Errorf("Failed to get machine account credentials, err: %v", err)
+ }
+
+ password, err := parseMachineAccountInfo(output)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to parse machine account credentials. Cannot create Client.")
+ return nil, fmt.Errorf("Failed to parse machine account credentials, err: %v", err)
+ }
+
+ return c.GetTokensByCredentials(c.cfg.machineAccountName, password)
+}
+
+// GetTokensByPassword gets tokens using username and password
+func (c *AuthClient) GetTokensByCredentials(username, password string) (*TokenOptions, error) {
+ // Parse tenant part from username
+ parts := strings.Split(username, "@")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("Invalid full user name '%s': expected user@tenant", username)
+ }
+ tenant := parts[1]
+
+ oidcClient, err := buildOIDCClient(c.cfg.authEndpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ tokenResponse, err := oidcClient.GetTokenByPasswordGrant(tenant, username, password)
+ if err != nil {
+ return nil, err
+ }
+
+ return toTokenOptions(tokenResponse), nil
+}
+
+// GetTokensByRefreshToken gets tokens using refresh token
+func (c *AuthClient) GetTokensByRefreshToken(refreshtoken string) (*TokenOptions, error) {
+ oidcClient, err := buildOIDCClient(c.cfg.authEndpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ tokenResponse, err := oidcClient.GetTokenByRefreshTokenGrant(c.cfg.tenantName, refreshtoken)
+ if err != nil {
+ return nil, err
+ }
+
+ return toTokenOptions(tokenResponse), nil
+}
+
+func buildOIDCClient(authEndpoint string) (*OIDCClient, error) {
+ options := &OIDCClientOptions{
+ IgnoreCertificate: true,
+ RootCAs: nil,
+ TokenScope: tScope,
+ }
+
+ return NewOIDCClient(authEndpoint, options, nil), nil
+}
+
+func toTokenOptions(response *OIDCTokenResponse) *TokenOptions {
+ return &TokenOptions{
+ AccessToken: response.AccessToken,
+ ExpiresIn: response.ExpiresIn,
+ RefreshToken: response.RefreshToken,
+ IDToken: response.IDToken,
+ TokenType: response.TokenType,
+ }
+}
+
+// parseMachineAccountInfo parses the machine account password from the machine-account-info output which looks like
+// this:
+//MachineAccount: photon-8rwdscr1.lw-testdom.com
+//Password: FT`])}]d/3\EPwRpz9k1
+func parseMachineAccountInfo(output []byte) (string, error) {
+ if len(output) <= 0 {
+ return "", fmt.Errorf("account info is not specified")
+ }
+
+ strOut := string(output)
+ strOutLen := len(strOut)
+
+ pwdStart := strings.Index(strOut, afdCliPasswordPrefix)
+ if pwdStart < 0 {
+ return "", fmt.Errorf("account info is not in expected format")
+ }
+ pwdStart = pwdStart + len(afdCliPasswordPrefix)
+ if pwdStart >= strOutLen {
+ return "", fmt.Errorf("account info is not in expected format")
+ }
+ pwdEnd := strings.LastIndex(strOut, afdCliSeparator)
+ if pwdEnd < 0 || pwdEnd <= pwdStart || pwdEnd >= strOutLen {
+ return "", fmt.Errorf("account info is not in expected format")
+ }
+
+ pwd := strOut[pwdStart:pwdEnd]
+
+ return pwd, nil
+}
\ No newline at end of file
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/BUILD kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/BUILD
--- kubernetes-original/pkg/cloudprovider/providers/cascade/BUILD 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/BUILD 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,44 @@
+package(default_visibility = ["//visibility:public"])
+
+load(
+ "@io_bazel_rules_go//go:def.bzl",
+ "go_library",
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "apitypes.go",
+ "auth.go",
+ "cascade.go",
+ "cascade_disks.go",
+ "cascade_instances.go",
+ "cascade_loadbalancer.go",
+ "client.go",
+ "oidcclient.go",
+ "restclient.go",
+ "utils.go"
+ ],
+ deps = [
+ "//pkg/api/v1/helper:go_default_library",
+ "//pkg/cloudprovider:go_default_library",
+ "//pkg/controller:go_default_library",
+ "//vendor/github.com/golang/glog:go_default_library",
+ "//vendor/gopkg.in/gcfg.v1:go_default_library",
+ "//vendor/k8s.io/api/core/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
+ ],
+)
+
+filegroup(
+ name = "package-srcs",
+ srcs = glob(["**"]),
+ tags = ["automanaged"],
+ visibility = ["//visibility:private"],
+)
+
+filegroup(
+ name = "all-srcs",
+ srcs = [":package-srcs"],
+ tags = ["automanaged"],
+)
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/cascade_disks.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade_disks.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/cascade_disks.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade_disks.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,225 @@
+package cascade
+
+import (
+ "github.com/golang/glog"
+ k8stypes "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/kubernetes/pkg/kubelet/apis"
+ "k8s.io/kubernetes/pkg/volume"
+)
+
+// Attaches given virtual disk volume to the node running kubelet.
+func (cc *CascadeCloud) AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error) {
+ // Check if disk is already attached to that node.
+ attached, err := cc.DiskIsAttached(diskID, nodeName)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: cc.DiskIsAttached failed during AttachDisk. Error[%v]", err)
+ return "", err
+ }
+
+ // If not already attached, attach the disk.
+ if !attached {
+ operation := &VMDiskOperation{
+ DiskID: StringPtr(diskID),
+ }
+
+ vmID, err := cc.InstanceID(nodeName)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for AttachDisk. Error[%v]", err)
+ return "", err
+ }
+
+ task, err := cc.apiClient.AttachDisk(vmID, operation)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to attach disk with ID %s. Error[%v]", diskID, err)
+ return "", err
+ }
+
+ _, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to attach disk with ID %s. Error[%v]",
+ diskID, err)
+ return "", err
+ }
+ }
+
+ // Get mount device of the attached disk.
+ disk, err := cc.apiClient.GetDisk(diskID)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
+ return "", err
+ }
+
+ return disk.MountDevice, nil
+}
+
+// Detaches given virtual disk volume from the node running kubelet.
+func (cc *CascadeCloud) DetachDisk(diskID string, nodeName k8stypes.NodeName) error {
+ operation := &VMDiskOperation{
+ DiskID: StringPtr(diskID),
+ }
+
+ vmID, err := cc.InstanceID(nodeName)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DetachDisk. Error[%v]", err)
+ return err
+ }
+
+ task, err := cc.apiClient.DetachDisk(vmID, operation)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", diskID, err)
+ return err
+ }
+
+ _, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]",
+ diskID, err)
+ return err
+ }
+
+ return nil
+}
+
+// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
+func (cc *CascadeCloud) DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error) {
+ vmID, err := cc.InstanceID(nodeName)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
+ return false, err
+ }
+
+ _, err = cc.apiClient.GetVM(vmID)
+ if err != nil {
+ switch err.(type) {
+ case APIError:
+ if err.(APIError).ErrorCode == VMNotFoundError {
+ // If instance no longer exists, we will assume that the volume is not attached.
+ glog.Warningf("Cascade Cloud Provider: Instance %s does not exist. DiskIsAttached will assume"+
+ " disk %s is not attached to it.", nodeName, diskID)
+ return false, nil
+ }
+ }
+ return false, err
+ }
+
+ disk, err := cc.apiClient.GetDisk(diskID)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
+ return false, err
+ }
+
+ if disk.VM == vmID {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
+func (cc *CascadeCloud) DisksAreAttached(diskIDs []string, nodeName k8stypes.NodeName) (map[string]bool, error) {
+ attached := make(map[string]bool)
+ for _, diskID := range diskIDs {
+ attached[diskID] = false
+ }
+
+ vmID, err := cc.InstanceID(nodeName)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
+ return attached, err
+ }
+
+ for _, diskID := range diskIDs {
+ disk, err := cc.apiClient.GetDisk(diskID)
+ if err != nil {
+ glog.Warningf("Cascade Cloud Provider: failed to get VMs for persistent disk %s, err [%v]",
+ diskID, err)
+ } else {
+ if disk.VM == vmID {
+ attached[diskID] = true
+ }
+ }
+ }
+
+ return attached, nil
+}
+
+// Create a volume of given size (in GB).
+func (cc *CascadeCloud) CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error) {
+ // Get Zones for the cluster
+ zones, err := cc.apiClient.GetZones()
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to Get zones for the cluster. Error[%v]", err)
+ return "", err
+ }
+
+ // Pick a zone to place the disk in.
+ zoneSet := sets.NewString()
+ for _, zone := range zones {
+ zoneSet.Insert(zone)
+ }
+ zone := volume.ChooseZoneForVolume(zoneSet, volumeOptions.Name)
+
+ diskSpec := DiskCreateSpec{}
+ diskSpec.Name = StringPtr(volumeOptions.Name)
+ diskSpec.Flavor = StringPtr(volumeOptions.Flavor)
+ diskSpec.CapacityGB = Int32Ptr(int32(volumeOptions.CapacityGB))
+ diskSpec.Kind = StringPtr(DiskSpecKind)
+ diskSpec.Zone = StringPtr(zone)
+
+ task, err := cc.apiClient.CreateDisk(&diskSpec)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to CreateDisk. Error[%v]", err)
+ return "", err
+ }
+
+ waitTask, err := cc.apiClient.WaitForTask(StringVal(task.ID))
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err)
+ return "", err
+ }
+
+ return StringVal(waitTask.Entity.ID), nil
+}
+
+// Deletes a volume given volume name.
+func (cc *CascadeCloud) DeleteDisk(diskID string) error {
+ task, err := cc.apiClient.DeleteDisk(diskID)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to DeleteDisk. Error[%v]", err)
+ // If we get a DiskNotFound error, we assume that the disk is already deleted. So we don't return an error here.
+ switch err.(type) {
+ case APIError:
+ if err.(APIError).ErrorCode == DiskNotFoundError {
+ return nil
+ }
+ if err.(APIError).ErrorCode == DiskInUseError {
+ return volume.NewDeletedVolumeInUseError(err.Error())
+ }
+ }
+ return err
+ }
+
+ _, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err)
+ return err
+ }
+
+ return nil
+}
+
+// Gets the zone and region for the volume.
+func (cc *CascadeCloud) GetVolumeLabels(diskID string) (map[string]string, error) {
+ disk, err := cc.apiClient.GetDisk(diskID)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to GetDisk for GetVolumeLabels. Error[%v]", err)
+ return nil, err
+ }
+
+ labels := make(map[string]string)
+ labels[apis.LabelZoneFailureDomain] = StringVal(disk.Zone)
+ labels[apis.LabelZoneRegion] = cc.cfg.Global.Region
+
+ return labels, nil
+}
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/cascade.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/cascade.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,216 @@
+// The use of Cascade cloud provider requires the kubelet, kube-apiserver, and kube-controller-manager to be started
+// with config flag: '--cloud-provider=cascade --cloud-config=[path_to_config_file]'.
+package cascade
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "github.com/golang/glog"
+ "gopkg.in/gcfg.v1"
+ k8stypes "k8s.io/apimachinery/pkg/types"
+ "k8s.io/kubernetes/pkg/cloudprovider"
+ "k8s.io/kubernetes/pkg/controller"
+ "strings"
+)
+
+const (
+ ProviderName = "cascade"
+ DiskSpecKind = "persistent-disk"
+ MasterPrefix = "master"
+)
+
+// CascadeCloud is an implementation of the cloud provider interface for Cascade Controller.
+type CascadeCloud struct {
+ cfg *CascadeConfig
+ // Authentication client to get token for Cascade API calls
+ authClient *AuthClient
+ // API Client to make Cascade API calls
+ apiClient *Client
+ // local $HOSTNAME
+ localHostname string
+ // hostname from K8S, could be overridden
+ localK8sHostname string
+}
+
+// CascadeCloud represents Cascade cloud provider's configuration.
+type CascadeConfig struct {
+ Global struct {
+ // the Cascade Controller endpoint
+ CloudTarget string `gcfg:"target"`
+ // Cascade Controller tenantName name
+ TenantName string `gcfg:"tenantName"`
+ // Cascade Controller cluster ID
+ ClusterID string `gcfg:"clusterID"`
+ // Authentication server endpoint for Cascade Controller
+ AuthEndpoint string `gcfg:"authEndpoint"`
+ // Lightwave domain name for the node
+ DomainName string `gcfg:"domainName"`
+ // DNS name of the node.
+ DNSName string `gcfg:"dnsName"`
+ // Region in which the cluster is in
+ Region string `gcfg:"region"`
+ // Availability zone in which the cluster is in
+ Zone string `gcfg:"zone"`
+ }
+}
+
+// Disks is interface for manipulation with Cascade Controller Persistent Disks.
+type Disks interface {
+ // AttachDisk attaches given disk to given node. Current node
+ // is used when nodeName is empty string.
+ AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error)
+
+ // DetachDisk detaches given disk to given node. Current node
+ // is used when nodeName is empty string.
+ DetachDisk(diskID string, nodeName k8stypes.NodeName) error
+
+ // DiskIsAttached checks if a disk is attached to the given node.
+ DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error)
+
+ // DisksAreAttached is a batch function to check if a list of disks are attached
+ // to the node with the specified NodeName.
+ DisksAreAttached(diskID []string, nodeName k8stypes.NodeName) (map[string]bool, error)
+
+ // CreateDisk creates a new PD with given properties.
+ CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error)
+
+ // DeleteDisk deletes PD.
+ DeleteDisk(diskID string) error
+
+ // Get labels to apply to volume on creation.
+ GetVolumeLabels(diskID string) (map[string]string, error)
+}
+
+// VolumeOptions specifies capacity, tags, name and flavorID for a volume.
+type VolumeOptions struct {
+ CapacityGB int
+ Tags map[string]string
+ Name string
+ Flavor string
+}
+
+func readConfig(config io.Reader) (*CascadeConfig, error) {
+ if config == nil {
+ err := fmt.Errorf("Cascade Cloud Provider: config file is missing. Please restart with " +
+ "--cloud-provider=cascade --cloud-config=[path_to_config_file]")
+ return nil, err
+ }
+
+ var cfg CascadeConfig
+ err := gcfg.ReadInto(&cfg, config)
+ return &cfg, err
+}
+
+func init() {
+ cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
+ cfg, err := readConfig(config)
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: failed to read in cloud provider config file. Error[%v]", err)
+ return nil, err
+ }
+ return newCascadeCloud(cfg)
+ })
+}
+
+func newCascadeCloud(cfg *CascadeConfig) (*CascadeCloud, error) {
+ if len(cfg.Global.CloudTarget) == 0 {
+ return nil, fmt.Errorf("Cascade Controller endpoint was not specified.")
+ }
+
+ // Get local hostname
+ hostname, err := os.Hostname()
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: get hostname failed. Error[%v]", err)
+ return nil, err
+ }
+
+ cc := CascadeCloud{
+ cfg: cfg,
+ localHostname: hostname,
+ localK8sHostname: "",
+ }
+
+ // Instantiate the auth and API clients only on the master nodes. Kubelets running on the workers don't need them as
+ // they are used primarily for making API calls to Cascade.
+ if strings.HasPrefix(hostname, MasterPrefix) {
+ if cc.authClient, err = NewAuthClient(cfg); err != nil {
+ return nil, err
+ }
+
+ if cc.apiClient, err = NewClient(cfg, cc.authClient); err != nil {
+ return nil, err
+ }
+ }
+
+ return &cc, nil
+}
+
+// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
+func (cc *CascadeCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
+
+// Instances returns an implementation of Instances for Cascade Controller.
+func (cc *CascadeCloud) Instances() (cloudprovider.Instances, bool) {
+ return cc, true
+}
+
+// List is an implementation of Instances.List.
+func (cc *CascadeCloud) List(filter string) ([]k8stypes.NodeName, error) {
+ return nil, errors.New("unimplemented")
+}
+
+func (cc *CascadeCloud) Clusters() (cloudprovider.Clusters, bool) {
+ return nil, true
+}
+
+// ProviderName returns the cloud provider ID.
+func (cc *CascadeCloud) ProviderName() string {
+ return ProviderName
+}
+
+// LoadBalancer returns an implementation of LoadBalancer for Cascade Controller.
+func (cc *CascadeCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
+ return cc, true
+}
+
+// Zones returns an implementation of Zones for Cascade Controller.
+func (cc *CascadeCloud) Zones() (cloudprovider.Zones, bool) {
+ return cc, true
+}
+
+func (cc *CascadeCloud) GetZone() (cloudprovider.Zone, error) {
+ return cloudprovider.Zone{
+ Region: cc.cfg.Global.Region,
+ FailureDomain: cc.cfg.Global.Zone,
+ }, nil
+}
+
+// GetZoneByProviderID implements Zones.GetZoneByProviderID
+// This is particularly useful in external cloud providers where the kubelet
+// does not initialize node data.
+func (cc *CascadeCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
+ return cloudprovider.Zone{}, errors.New("unimplemented")
+}
+
+// GetZoneByNodeName implements Zones.GetZoneByNodeName
+// This is particularly useful in external cloud providers where the kubelet
+// does not initialize node data.
+func (cc *CascadeCloud) GetZoneByNodeName(nodeName k8stypes.NodeName) (cloudprovider.Zone, error) {
+ return cloudprovider.Zone{}, errors.New("unimeplemented")
+}
+
+// Routes returns a false since the interface is not supported for Cascade controller.
+func (cc *CascadeCloud) Routes() (cloudprovider.Routes, bool) {
+ return nil, false
+}
+
+// ScrubDNS filters DNS settings for pods.
+func (cc *CascadeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
+ return nameservers, searches
+}
+
+// HasClusterID returns true if the cluster has a clusterID
+func (cc *CascadeCloud) HasClusterID() bool {
+ return true
+}
\ No newline at end of file
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/cascade_instances.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade_instances.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/cascade_instances.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade_instances.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,90 @@
+package cascade
+
+import (
+ "k8s.io/api/core/v1"
+ k8stypes "k8s.io/apimachinery/pkg/types"
+ "errors"
+ "strings"
+)
+
+// NodeAddresses is an implementation of Instances.NodeAddresses. In the future, private IP address, external IP, etc.
+// will be added based on need.
+func (cc *CascadeCloud) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
+ addresses := []v1.NodeAddress{}
+ addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: cc.cfg.Global.DNSName})
+ return addresses, nil
+}
+
+// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
+// This method will not be called from the node that is requesting this ID. i.e. metadata service
+// and other local methods cannot be used here
+func (cc *CascadeCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
+ // Get the name of the VM using the ID and generate the DNS name based on the VM name.
+ vm, err := cc.apiClient.GetVM(providerID)
+ if err != nil {
+ return nil, err
+ }
+ // Get the DNS name for the master VM and replace the VM name portion with the requested VM name.
+ dnsNameParts := strings.SplitN(cc.cfg.Global.DNSName, ".", 2)
+ if len(dnsNameParts) != 2 {
+ return nil, errors.New("Cascade cloud provider: Invalid DNS name specified in the configuation. " +
+ "Cannot get NodeAddressByProviderID.")
+ }
+ dnsAddress := StringVal(vm.Name) + dnsNameParts[1]
+ addresses := []v1.NodeAddress{}
+ addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: dnsAddress})
+ return addresses, nil
+}
+
+func (cc *CascadeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
+ return errors.New("unimplemented")
+}
+
+// Current node name returns node name based on host name. For Cascade Kubernetes nodes, we will use host name as the
+// node name.
+func (cc *CascadeCloud) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
+ cc.localK8sHostname = hostname
+ return k8stypes.NodeName(hostname), nil
+}
+
+// ExternalID returns the cloud provider ID of the specified instance (deprecated).
+// Note: We do not call Cascade Controller here to check if the instance is alive or not because that requires the
+// worker nodes to also login to Cascade Controller. That check is used by Kubernetes to proactively remove nodes that
+// the cloud provider believes is no longer available. Even otherwise, Kubernetes will remove those nodes eventually.
+// So we are not losing much by not doing that check.
+func (cc *CascadeCloud) ExternalID(nodeName k8stypes.NodeName) (string, error) {
+ return getInstanceIDFromNodeName(nodeName)
+}
+
+// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
+// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
+func (cc *CascadeCloud) InstanceExistsByProviderID(providerID string) (bool, error) {
+ return false, errors.New("unimplemented")
+}
+
+// InstanceID returns the cloud provider ID of the specified instance.
+func (cc *CascadeCloud) InstanceID(nodeName k8stypes.NodeName) (string, error) {
+ return getInstanceIDFromNodeName(nodeName)
+}
+
+// This gets the Cascade VM ID from the Kubernetes node name.
+func getInstanceIDFromNodeName(nodeName k8stypes.NodeName) (string, error) {
+ // nodeName is of the format master-instance-id or worker-instance-id. To compute the instance ID, we need to just
+ // get the portion after master- or worker-. That is what we do below.
+ nodeParts := strings.SplitN(string(nodeName), "-", 2)
+ if len(nodeParts) != 2 {
+ return "", errors.New("Cascade cloud provider: Invalid node name. Cannot fetch instance ID.")
+ }
+ return nodeParts[1], nil
+}
+
+// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
+// This method will not be called from the node that is requesting this ID. i.e. metadata service
+// and other local methods cannot be used here
+func (cc *CascadeCloud) InstanceTypeByProviderID(providerID string) (string, error) {
+ return "", errors.New("unimplemented")
+}
+
+func (cc *CascadeCloud) InstanceType(nodeName k8stypes.NodeName) (string, error) {
+ return "", nil
+}
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,284 @@
+package cascade
+
+import (
+ "fmt"
+ "github.com/golang/glog"
+ "k8s.io/api/core/v1"
+ "k8s.io/kubernetes/pkg/api/v1/service"
+ "k8s.io/kubernetes/pkg/cloudprovider"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+const TCP_PROTOCOL = "TCP"
+
+const HTTP_PROTOCOL = "HTTP"
+
+// EnsureLoadBalancer creates or updates a Cascade load balancer
+func (cc *CascadeCloud) EnsureLoadBalancer(clusterName string, k8sService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
+ logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancer")
+
+ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+ logger.Infof("Load balancer name: %s", loadBalancerName)
+
+ // Sanity checks
+ if k8sService.Spec.SessionAffinity != v1.ServiceAffinityNone {
+ logger.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
+ return nil, fmt.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
+ }
+
+ if len(k8sService.Spec.Ports) == 0 {
+ logger.Errorf("No port mapping is specified")
+ return nil, fmt.Errorf("No port mapping is specified")
+ }
+
+ // Create load balancer port maps
+ portMaps := []*LoadBalancerPortMap{}
+ for _, port := range k8sService.Spec.Ports {
+ if port.Protocol != v1.ProtocolTCP {
+ logger.Warningf("Ignoring port that does not use TCP protocol: %+v", port)
+ continue
+ }
+
+ if port.NodePort == 0 {
+ logger.Warningf("Ignoring port without node port defined: %+v", port)
+ continue
+ }
+
+ // TODO: For now we only support SSL pass through. All port mappings are using TCP protocol.
+ // Also note that we allow all external traffic to access the ports.
+ portMap := &LoadBalancerPortMap{
+ InstancePort: Int64Ptr(int64(port.NodePort)),
+ InstanceProtocol: StringPtr(TCP_PROTOCOL),
+ LoadBalancerPort: Int64Ptr(int64(port.Port)),
+ LoadBalancerProtocol: StringPtr(TCP_PROTOCOL),
+ }
+ portMaps = append(portMaps, portMap)
+ }
+
+ // Create load balancer health check
+ healthCheck := &LoadBalancerHealthCheck{
+ HealthyThreshold: 5,
+ IntervalInSeconds: 10,
+ }
+ if healthCheckPath, healthCheckNodePort := service.GetServiceHealthCheckPathPort(k8sService); healthCheckPath != "" {
+ logger.Infof("HTTP health checks on: %s:%d", healthCheckPath, healthCheckNodePort)
+ healthCheck.Path = StringPtr(healthCheckPath)
+ healthCheck.Port = Int64Ptr(int64(healthCheckNodePort))
+ healthCheck.Protocol = StringPtr(HTTP_PROTOCOL)
+ } else {
+ logger.Infof("TCP health check on port: %d", Int64Val(portMaps[0].InstancePort))
+ healthCheck.Port = portMaps[0].InstancePort
+ healthCheck.Protocol = StringPtr(TCP_PROTOCOL)
+ }
+
+ // Create load balancer
+ createSpec := &LoadBalancerCreateSpec{
+ Name: StringPtr(loadBalancerName),
+ Type: StringPtr("PUBLIC"),
+ PortMaps: portMaps,
+ HealthCheck: healthCheck,
+ SubDomain: StringPtr(k8sService.Name),
+ }
+ logger.Infof("Load balancer create spec: %+v", *createSpec)
+
+ task, err := cc.apiClient.CreateOrUpdateLoadBalancer(createSpec)
+ if err != nil {
+ logger.Errorf("Failed to create or update load balancer. Error: [%v]", err)
+ return nil, err
+ }
+
+ _, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+ if err != nil {
+ logger.Errorf("Failed to poll task status of creating or updating load balancer. Error: [%v]", err)
+ return nil, err
+ }
+
+ // Apply VM update to load balancer
+ err = cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger)
+ if err != nil {
+ // The private function already did logging. No need to log again.
+ return nil, err
+ }
+
+ // Get load balancer
+ loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName))
+ if err != nil {
+ glog.Errorf("Failed to get load balancer. Error: [%v]", err)
+ return nil, err
+ }
+
+ return toLoadBalancerStatus(loadBalancer), nil
+}
+
+// GetLoadBalancer returns the information about a Cascade load balancer
+func (cc *CascadeCloud) GetLoadBalancer(clusterName string, k8sService *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
+ logger := newLoadBalancerLogger(clusterName, k8sService, "GetLoadBalancer")
+
+ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+ logger.Infof("Load balancer name: %s", loadBalancerName)
+
+ // Get load balancer
+ loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName))
+ if err != nil {
+ logger.Errorf("Failed to get load balancer. Error: [%v]", err)
+ // Do not return error here because we want the caller of this function to determine
+ // what to do with the not-found situation.
+ switch err.(type) {
+ case APIError:
+ if err.(APIError).ErrorCode == NotFoundError {
+ return nil, false, nil
+ }
+ }
+ return nil, false, err
+ }
+
+ return toLoadBalancerStatus(loadBalancer), true, nil
+}
+
+// UpdateLoadBalancer updates the node information of a Cascade load balancer
+func (cc *CascadeCloud) UpdateLoadBalancer(clusterName string, k8sService *v1.Service, nodes []*v1.Node) error {
+ logger := newLoadBalancerLogger(clusterName, k8sService, "UpdateLoadBalancer")
+
+ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+ logger.Infof("Load balancer name: %s", loadBalancerName)
+
+ err := cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger)
+ if err != nil {
+ // The private function already did logging. No need to log again.
+ return err
+ }
+
+ return nil
+}
+
+// EnsureLoadBalancerDeleted deletes a Cascade load balancer
+func (cc *CascadeCloud) EnsureLoadBalancerDeleted(clusterName string, k8sService *v1.Service) error {
+ logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancerDeleted")
+
+ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+ logger.Infof("Load balancer name: %s", loadBalancerName)
+
+ task, err := cc.apiClient.DeleteLoadBalancer(StringPtr(loadBalancerName))
+ if err != nil {
+ logger.Errorf("Failed to delete load balancer. Error: [%v]", err)
+ // If we get a NotFound error, we assume that the load balancer is already deleted. So we don't return an error
+ // here.
+ switch err.(type) {
+ case APIError:
+ if err.(APIError).ErrorCode == NotFoundError {
+ return nil
+ }
+ }
+ return err
+ }
+
+ _, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+ if err != nil {
+ logger.Errorf("Failed to poll task status of deleting load balancer. Error: [%v]", err)
+ return err
+ }
+
+ return nil
+}
+
+func (cc *CascadeCloud) updateLoadBalancerVMs(
+ nodes []*v1.Node, loadBalancerName string, logger *loadBalancerLogger) error {
+
+ // Apply VM update to the load balancer
+ loadBalancerVMs := make([]*LoadBalancerVM, 0)
+
+ for _, node := range(nodes) {
+ // If the node does not have a name, we cannot derive its instance ID. Therefore we skip this node.
+ if len(node.Name) == 0 {
+ logger.Warningf("Node %s does not have a name. Skip updating this VM for load balancer", node.UID)
+ continue
+ }
+
+ // If we cannot get the instance ID, something is wrong on the Cascade Controller side.
+ // However, we should tolerate such failure and continue the load balancer VM update
+ // by skipping this VM.
+ instanceID, err := cc.InstanceID(types.NodeName(node.Name))
+ if err != nil {
+ logger.Warningf("Unable to get instance ID for node %s, skip updating this VM for load balancer. Error [%v]", node.Name, err)
+ continue
+ }
+
+ loadBalancerVMs = append(loadBalancerVMs, &LoadBalancerVM{
+ ID: StringPtr(instanceID),
+ })
+ }
+
+ if len(loadBalancerVMs) == 0 {
+ logger.Infof("No nodes to be added to the load balancer. Skip updating load balancer VMs")
+ return nil
+ }
+
+ vmUpdate := &LoadBalancerVMUpdate{
+ VMIds: loadBalancerVMs,
+ }
+ logger.Infof("Load balancer VM update spec: %+v", vmUpdate.VMIds)
+
+ task, err := cc.apiClient.ApplyVMsToLoadBalancer(StringPtr(loadBalancerName), vmUpdate)
+ if err != nil {
+ logger.Errorf("Failed to update load balancer VMs. Error: [%v]", err)
+ return err
+ }
+
+ _, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+ if err != nil {
+ logger.Errorf("Failed to poll task status of updating load balancer VMs. Error: [%v]", err)
+ return err
+ }
+
+ return nil
+}
+
+func toLoadBalancerStatus(lb *LoadBalancer) *v1.LoadBalancerStatus {
+ var endpoint string
+ if lb != nil && lb.Endpoint != nil {
+ endpoint = StringVal(lb.Endpoint)
+ }
+
+ return &v1.LoadBalancerStatus{
+ Ingress: []v1.LoadBalancerIngress{
+ {
+ Hostname: endpoint,
+ },
+ },
+ }
+}
+
+type loadBalancerLogger struct {
+ clusterName string
+ k8sService *v1.Service
+ callingFunc string
+}
+
+func newLoadBalancerLogger(clusterName string, k8sService *v1.Service, callingFunc string) *loadBalancerLogger {
+ return &loadBalancerLogger{
+ clusterName: clusterName,
+ k8sService: k8sService,
+ callingFunc: callingFunc,
+ }
+}
+
+func (l *loadBalancerLogger) getLogMsg(
+ msgTemplate string, args ...interface{}) string {
+
+ errorMsg := fmt.Sprintf("Cascade Cloud Provider::%s::Cluster [%s] Service [%s]: %s",
+ l.callingFunc, l.clusterName, l.k8sService.Name,
+ msgTemplate)
+ return fmt.Sprintf(errorMsg, args)
+}
+
+func (l *loadBalancerLogger) Errorf(msgTemplate string, args ...interface{}) {
+ glog.Errorln(l.getLogMsg(msgTemplate, args))
+}
+
+func (l *loadBalancerLogger) Warningf(msgTemplate string, args ...interface{}) {
+ glog.Warningln(l.getLogMsg(msgTemplate, args))
+}
+
+func (l *loadBalancerLogger) Infof(msgTemplate string, args ...interface{}) {
+ glog.Infoln(l.getLogMsg(msgTemplate, args))
+}
\ No newline at end of file
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/client.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/client.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/client.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/client.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,394 @@
+package cascade
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+ "github.com/golang/glog"
+ "net/http"
+ "strings"
+ "time"
+)
+
+// Represents stateless context needed to call Cascade APIs.
+// Note that we are implementing the Cascade APIs manually instead of using the swagger generated code
+// because swagger uses a different version of openapi library than kubernetes. It is difficult to
+// address the version conflict to make it compile.
+type Client struct {
+ cfg *ClientConfig
+ options ClientOptions
+ restClient *restClient
+}
+
+type ClientConfig struct {
+ tenantName string
+ clusterID string
+ region string
+ endpoint string
+}
+
+// Represents Tokens
+type TokenOptions struct {
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ RefreshToken string `json:"refresh_token,omitempty"`
+ IDToken string `json:"id_token"`
+ TokenType string `json:"token_type"`
+}
+
+type TokenCallback func(string)
+
+// Options for Client
+type ClientOptions struct {
+ // When using the Tasks.Wait APIs, defines the duration of how long
+ // we should continue to poll the server. Default is 30 minutes.
+ // TasksAPI.WaitTimeout() can be used to specify timeout on
+ // individual calls.
+ TaskPollTimeout time.Duration
+
+ // Whether or not to ignore any TLS errors when talking to Cascade,
+ // false by default.
+ IgnoreCertificate bool
+
+ // List of root CA's to use for server validation
+ // nil by default.
+ RootCAs *x509.CertPool
+
+ // For tasks APIs, defines the number of retries to make in the event
+ // of an error. Default is 3.
+ TaskRetryCount int
+
+ // Tokens for user authentication. Default is empty.
+ TokenOptions *TokenOptions
+}
+
+const minimumTaskPollDelay = 500 * time.Millisecond
+
+// Creates a new Cascade client which can be used to make API calls to Cascade.
+func NewClient(cfg *CascadeConfig, authClient *AuthClient) (c *Client, err error) {
+ tokenOptions, err := authClient.GetTokensByMachineAccount()
+ if err != nil {
+ glog.Errorf("Cascade Cloud Provider: Failed to create new client due to error: %+v", err)
+ return
+ }
+
+ options := &ClientOptions{
+ TaskPollTimeout: 30 * time.Minute,
+ TaskRetryCount: 3,
+ TokenOptions: tokenOptions,
+ IgnoreCertificate: false,
+ RootCAs: nil,
+ }
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: options.IgnoreCertificate,
+ RootCAs: options.RootCAs},
+ }
+
+ tokenCallback := func(newToken string) {
+ c.options.TokenOptions.AccessToken = newToken
+ }
+
+ restClient := &restClient{
+ authClient: authClient,
+ httpClient: &http.Client{Transport: tr},
+ UpdateAccessTokenCallback: tokenCallback,
+ }
+
+ clientConfig := &ClientConfig{
+ tenantName: cfg.Global.TenantName,
+ clusterID: cfg.Global.ClusterID,
+ region: cfg.Global.Region,
+ endpoint: strings.TrimRight(cfg.Global.CloudTarget, "/"),
+ }
+
+ c = &Client{
+ cfg: clientConfig,
+ restClient: restClient,
+ // Ensure a copy of options is made, rather than using a pointer
+ // which may change out from underneath if misused by the caller.
+ options: *options,
+ }
+
+ return
+}
+
+// Gets VM with the specified ID.
+func (api *Client) GetVM(vmID string) (vm *VM, err error) {
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, vmID)
+ res, err := api.restClient.Get(uri, api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ res, err = getError(res)
+ if err != nil {
+ return
+ }
+ vm = &VM{}
+ err = json.NewDecoder(res.Body).Decode(vm)
+ return
+}
+
+// Gets disk with the specified ID.
+func (api *Client) GetDisk(diskID string) (disk *PersistentDisk, err error) {
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, diskID)
+ res, err := api.restClient.Get(uri, api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ res, err = getError(res)
+ if err != nil {
+ return
+ }
+ disk = &PersistentDisk{}
+ err = json.NewDecoder(res.Body).Decode(disk)
+ return
+}
+
+// Creates a disk under the cluster.
+func (api *Client) CreateDisk(spec *DiskCreateSpec) (task *Task, err error) {
+ body, err := json.Marshal(spec)
+ if err != nil {
+ return
+ }
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID)
+ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ task, err = getTask(getError(res))
+ return
+}
+
+// Deletes a disk with the specified ID.
+func (api *Client) DeleteDisk(diskID string) (task *Task, err error) {
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, diskID)
+ res, err := api.restClient.Delete(uri, api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ task, err = getTask(getError(res))
+ return
+}
+
+// Attaches a disk to the specified VM.
+func (api *Client) AttachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
+ body, err := json.Marshal(op)
+ if err != nil {
+ return
+ }
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/attach_disk", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, vmID)
+ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ task, err = getTask(getError(res))
+ return
+}
+
+// Detaches a disk from the specified VM.
+func (api *Client) DetachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
+ body, err := json.Marshal(op)
+ if err != nil {
+ return
+ }
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/detach_disk", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, vmID)
+ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ task, err = getTask(getError(res))
+ return
+}
+
+// Gets a task by ID.
+func (api *Client) GetTask(taskID string) (task *Task, err error) {
+ uri := fmt.Sprintf("%s/v1/tenants/%s/tasks/%s?region=%s", api.cfg.endpoint, api.cfg.tenantName,
+ taskID, api.cfg.region)
+ res, err := api.restClient.Get(uri, api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ result, err := getTask(getError(res))
+ return result, err
+}
+
+// Waits for a task to complete by polling the tasks API until a task returns with the state COMPLETED or ERROR.
+func (api *Client) WaitForTask(taskID string) (task *Task, err error) {
+ start := time.Now()
+ numErrors := 0
+ maxErrors := api.options.TaskRetryCount
+ backoffMultiplier := 1
+
+ for time.Since(start) < api.options.TaskPollTimeout {
+ task, err = api.GetTask(taskID)
+ if err != nil {
+ switch err.(type) {
+ // If an ApiError comes back, something is wrong, return the error to the caller
+ case APIError:
+ return
+ // For other errors, retry before giving up
+ default:
+ numErrors++
+ if numErrors > maxErrors {
+ return
+ }
+ }
+ } else {
+ // Reset the error count any time a successful call is made
+ numErrors = 0
+ if StringVal(task.State) == "COMPLETED" {
+ return
+ }
+ if StringVal(task.State) == "ERROR" {
+ err = TaskError{StringVal(task.ID), getFailedStep(task)}
+ return
+ }
+ }
+
+ // Perform backoff based on how long it has been since we started polling. The logic is as follows:
+ // For the first 10 seconds, poll every 500 milliseconds.
+ // From there till the first 1 minute, poll every 1 second.
+ // From there till the first 10 minutes, poll every 5 seconds.
+ // From there till the timeout (30 minutes), poll every 10 seconds.
+ elapsedTime := time.Since(start)
+ if elapsedTime > 10*time.Second && elapsedTime <= 60*time.Second {
+ backoffMultiplier = 2
+ } else if elapsedTime > 60*time.Second && elapsedTime <= 600*time.Second {
+ backoffMultiplier = 10
+ } else if elapsedTime > 600*time.Second && elapsedTime <= api.options.TaskPollTimeout {
+ backoffMultiplier = 20
+ }
+ time.Sleep(time.Duration(backoffMultiplier) * minimumTaskPollDelay)
+ }
+ err = TaskTimeoutError{taskID}
+ return
+}
+
+// CreateOrUpdateLoadBalancer creates a load balancer if not existed, or update one otherwise
+func (api *Client) CreateOrUpdateLoadBalancer(spec *LoadBalancerCreateSpec) (*Task, error) {
+ body, err := json.Marshal(spec)
+ if err != nil {
+ return nil, err
+ }
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID)
+ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ return getTask(getError(res))
+}
+
+// GetLoadBalancer returns a load balancer by name
+func (api *Client) GetLoadBalancer(loadBalancerName *string) (*LoadBalancer, error) {
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, StringVal(loadBalancerName))
+ res, err := api.restClient.Get(uri, api.options.TokenOptions)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ res, err = getError(res)
+ if err != nil {
+ return nil, err
+ }
+ loadBalancer := &LoadBalancer{}
+ err = json.NewDecoder(res.Body).Decode(loadBalancer)
+ return loadBalancer, err
+}
+
+// DeleteLoadBalancer deletes a load balancer by name
+func (api *Client) DeleteLoadBalancer(loadBalancerName *string) (*Task, error) {
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, StringVal(loadBalancerName))
+ res, err := api.restClient.Delete(uri, api.options.TokenOptions)
+ if err != nil {
+ return nil, err
+ }
+ return getTask(getError(res))
+}
+
+// ApplyVMsToLoadBalancer updates the instances that are registered with the load balancer
+func (api *Client) ApplyVMsToLoadBalancer(loadBalancerName *string, update *LoadBalancerVMUpdate) (*Task, error) {
+ body, err := json.Marshal(update)
+ if err != nil {
+ return nil, err
+ }
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s/update_vms", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID, StringVal(loadBalancerName))
+ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ return getTask(getError(res))
+}
+
+// Gets all the zones in which the cluster has the VMs in.
+func (api *Client) GetZones() (zones []string, err error) {
+ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/zones", api.cfg.endpoint, api.cfg.tenantName,
+ api.cfg.clusterID)
+ res, err := api.restClient.Get(uri, api.options.TokenOptions)
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ res, err = getError(res)
+ if err != nil {
+ return
+ }
+ err = json.NewDecoder(res.Body).Decode(&zones)
+ return
+}
+
+// Reads a task object out of the HTTP response. Takes an error argument
+// so that GetTask can easily wrap GetError. This function will do nothing
+// if e is not nil.
+// e.g. res, err := getTask(getError(someApi.Get()))
+func getTask(res *http.Response, e error) (*Task, error) {
+ if e != nil {
+ return nil, e
+ }
+ var task Task
+ err := json.NewDecoder(res.Body).Decode(&task)
+ if err != nil {
+ return nil, err
+ }
+ if StringVal(task.State) == "ERROR" {
+ // Critical: return task as well, so that it can be examined
+ // for error details.
+ return &task, TaskError{StringVal(task.ID), getFailedStep(&task)}
+ }
+ return &task, nil
+}
+
+// Gets the failed step in the task to get error details for failed task.
+func getFailedStep(task *Task) (step Step) {
+ var errorStep Step
+ for _, s := range task.Steps {
+ if StringVal(s.State) == "ERROR" {
+ errorStep = *s
+ break
+ }
+ }
+
+ return errorStep
+}
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/oidcclient.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/oidcclient.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/oidcclient.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/oidcclient.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,297 @@
+package cascade
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const tokenScope string = "openid offline_access"
+
+// OIDCClient is client for OIDC
+type OIDCClient struct {
+ httpClient *http.Client
+ logger *log.Logger
+
+ Endpoint string
+ Options *OIDCClientOptions
+}
+
+// OIDCClientOptions is OIDC client options
+type OIDCClientOptions struct {
+ // Whether or not to ignore any TLS errors when talking to Cascade,
+ // false by default.
+ IgnoreCertificate bool
+
+ // List of root CA's to use for server validation
+ // nil by default.
+ RootCAs *x509.CertPool
+
+ // The scope values to use when requesting tokens
+ TokenScope string
+}
+
+// NewOIDCClient creates an instance of OIDCClient
+func NewOIDCClient(endpoint string, options *OIDCClientOptions, logger *log.Logger) (c *OIDCClient) {
+ if logger == nil {
+ logger = log.New(ioutil.Discard, "", log.LstdFlags)
+ }
+
+ options = buildOptions(options)
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: options.IgnoreCertificate,
+ RootCAs: options.RootCAs},
+ }
+
+ c = &OIDCClient{
+ httpClient: &http.Client{Transport: tr},
+ logger: logger,
+ Endpoint: strings.TrimRight(endpoint, "/"),
+ Options: options,
+ }
+ return
+}
+
+func buildOptions(options *OIDCClientOptions) (result *OIDCClientOptions) {
+ result = &OIDCClientOptions{
+ TokenScope: tokenScope,
+ }
+
+ if options == nil {
+ return
+ }
+
+ result.IgnoreCertificate = options.IgnoreCertificate
+
+ if options.RootCAs != nil {
+ result.RootCAs = options.RootCAs
+ }
+
+ if options.TokenScope != "" {
+ result.TokenScope = options.TokenScope
+ }
+
+ return
+}
+
+func (client *OIDCClient) buildURL(path string) (url string) {
+ return fmt.Sprintf("%s%s", client.Endpoint, path)
+}
+
+// Cert download helper
+
+const certDownloadPath string = "/afd/vecs/ssl"
+
+type lightWaveCert struct {
+ Value string `json:"encoded"`
+}
+
+// GetRootCerts gets root certs
+func (client *OIDCClient) GetRootCerts() (certList []*x509.Certificate, err error) {
+ // turn TLS verification off for
+ originalTr := client.httpClient.Transport
+ defer client.setTransport(originalTr)
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ }
+ client.setTransport(tr)
+
+ // get the certs
+ resp, err := client.httpClient.Get(client.buildURL(certDownloadPath))
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ err = fmt.Errorf("Unexpected error retrieving auth server certs: %v %s", resp.StatusCode, resp.Status)
+ return
+ }
+
+ // parse the certs
+ certsData := &[]lightWaveCert{}
+ err = json.NewDecoder(resp.Body).Decode(certsData)
+ if err != nil {
+ return
+ }
+
+ certList = make([]*x509.Certificate, len(*certsData))
+ for idx, cert := range *certsData {
+ block, _ := pem.Decode([]byte(cert.Value))
+ if block == nil {
+ err = fmt.Errorf("Unexpected response format: %v", certsData)
+ return nil, err
+ }
+
+ decodedCert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ certList[idx] = decodedCert
+ }
+
+ return
+}
+
+func (client *OIDCClient) setTransport(tr http.RoundTripper) {
+ client.httpClient.Transport = tr
+}
+
+// Metadata request helpers
+const metadataPathFormat string = "/openidconnect/%s/.well-known/openid-configuration"
+
+// OIDCMetadataResponse is the response for Metadata request
+type OIDCMetadataResponse struct {
+ TokenEndpoint string `json:"token_endpoint"`
+ AuthorizationEndpoint string `json:"authorization_endpoint"`
+ EndSessionEndpoint string `json:"end_session_endpoint"`
+}
+
+func (client *OIDCClient) getMetadata(domain string) (metadata *OIDCMetadataResponse, err error) {
+ metadataPath := fmt.Sprintf(metadataPathFormat, domain)
+ request, err := http.NewRequest("GET", client.buildURL(metadataPath), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := client.httpClient.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ err = client.checkResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ metadata = &OIDCMetadataResponse{}
+ err = json.NewDecoder(resp.Body).Decode(metadata)
+ if err != nil {
+ return nil, err
+ }
+
+ return
+}
+
+// Token request helpers
+
+const passwordGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s"
+const refreshTokenGrantFormatString = "grant_type=refresh_token&refresh_token=%s"
+const clientGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s&client_id=%s"
+
+// OIDCTokenResponse is the response for OIDC request
+type OIDCTokenResponse struct {
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ RefreshToken string `json:"refresh_token,omitempty"`
+ IDToken string `json:"id_token"`
+ TokenType string `json:"token_type"`
+}
+
+// GetTokenByPasswordGrant gets OIDC tokens by password
+func (client *OIDCClient) GetTokenByPasswordGrant(domain, username, password string) (tokens *OIDCTokenResponse, err error) {
+ metadata, err := client.getMetadata(domain)
+ if err != nil {
+ return nil, err
+ }
+
+ username = url.QueryEscape(username)
+ password = url.QueryEscape(password)
+ body := fmt.Sprintf(passwordGrantFormatString, username, password, client.Options.TokenScope)
+ return client.getToken(metadata.TokenEndpoint, body)
+}
+
+// GetClientTokenByPasswordGrant gets OIDC tokens by password
+func (client *OIDCClient) GetClientTokenByPasswordGrant(domain, username, password, clientID string) (tokens *OIDCTokenResponse, err error) {
+ metadata, err := client.getMetadata(domain)
+ if err != nil {
+ return nil, err
+ }
+
+ username = url.QueryEscape(username)
+ password = url.QueryEscape(password)
+ clientID = url.QueryEscape(clientID)
+ body := fmt.Sprintf(clientGrantFormatString, username, password, client.Options.TokenScope, clientID)
+ return client.getToken(metadata.TokenEndpoint, body)
+}
+
+// GetTokenByRefreshTokenGrant gets OIDC tokens by refresh token
+func (client *OIDCClient) GetTokenByRefreshTokenGrant(domain, refreshToken string) (tokens *OIDCTokenResponse, err error) {
+ metadata, err := client.getMetadata(domain)
+ if err != nil {
+ return nil, err
+ }
+
+ body := fmt.Sprintf(refreshTokenGrantFormatString, refreshToken)
+ return client.getToken(metadata.TokenEndpoint, body)
+}
+
+func (client *OIDCClient) getToken(tokenEndpoint, body string) (tokens *OIDCTokenResponse, err error) {
+ request, err := http.NewRequest("POST", tokenEndpoint, strings.NewReader(body))
+ if err != nil {
+ return nil, err
+ }
+ request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+
+ resp, err := client.httpClient.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ err = client.checkResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ tokens = &OIDCTokenResponse{}
+ err = json.NewDecoder(resp.Body).Decode(tokens)
+ if err != nil {
+ return nil, err
+ }
+
+ return
+}
+
+// OIDCError is OIDC error
+type OIDCError struct {
+ Code string `json:"error"`
+ Message string `json:"error_description"`
+}
+
+func (e OIDCError) Error() string {
+ return fmt.Sprintf("%v: %v", e.Code, e.Message)
+}
+
+func (client *OIDCClient) checkResponse(response *http.Response) (err error) {
+ if response.StatusCode/100 == 2 {
+ return
+ }
+
+ respBody, readErr := ioutil.ReadAll(response.Body)
+ if readErr != nil {
+ return fmt.Errorf(
+ "Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr)
+ }
+
+ var oidcErr OIDCError
+ err = json.Unmarshal(respBody, &oidcErr)
+ if err != nil || oidcErr.Code == "" {
+ return fmt.Errorf(
+ "Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr)
+ }
+
+ return oidcErr
+}
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/OWNERS kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/OWNERS
--- kubernetes-original/pkg/cloudprovider/providers/cascade/OWNERS 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/OWNERS 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,3 @@
+maintainers:
+- ashokc
+- ysheng
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/restclient.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/restclient.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/restclient.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/restclient.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,262 @@
+package cascade
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+type restClient struct {
+ httpClient *http.Client
+ authClient *AuthClient
+ UpdateAccessTokenCallback TokenCallback
+}
+
+type request struct {
+ Method string
+ URL string
+ ContentType string
+ Body io.Reader
+ Tokens *TokenOptions
+}
+
+type page struct {
+ Items []interface{} `json:"items"`
+ NextPageLink string `json:"nextPageLink"`
+ PreviousPageLink string `json:"previousPageLink"`
+}
+
+type documentList struct {
+ Items []interface{}
+}
+
+type bodyRewinder func() io.Reader
+
+const appJson string = "application/json"
+const expiredAuthToken int32 = 1904
+
+func (client *restClient) AppendSlice(origSlice []interface{}, dataToAppend []interface{}) []interface{} {
+ origLen := len(origSlice)
+ newLen := origLen + len(dataToAppend)
+
+ if newLen > cap(origSlice) {
+ newSlice := make([]interface{}, (newLen+1)*2)
+ copy(newSlice, origSlice)
+ origSlice = newSlice
+ }
+
+ origSlice = origSlice[0:newLen]
+ copy(origSlice[origLen:newLen], dataToAppend)
+
+ return origSlice
+}
+
+func (client *restClient) Get(url string, tokens *TokenOptions) (res *http.Response, err error) {
+ req := request{"GET", url, "", nil, tokens}
+ res, err = client.SendRequest(&req, nil)
+ return
+}
+
+func (client *restClient) GetList(endpoint string, url string, tokens *TokenOptions) (result []byte, err error) {
+ req := request{"GET", url, "", nil, tokens}
+ res, err := client.SendRequest(&req, nil)
+ if err != nil {
+ return
+ }
+ res, err = getError(res)
+ if err != nil {
+ return
+ }
+
+ decoder := json.NewDecoder(res.Body)
+ decoder.UseNumber()
+
+ page := &page{}
+ err = decoder.Decode(page)
+ if err != nil {
+ return
+ }
+
+ documentList := &documentList{}
+ documentList.Items = client.AppendSlice(documentList.Items, page.Items)
+
+ for page.NextPageLink != "" {
+ req = request{"GET", endpoint + page.NextPageLink, "", nil, tokens}
+ res, err = client.SendRequest(&req, nil)
+ if err != nil {
+ return
+ }
+ res, err = getError(res)
+ if err != nil {
+ return
+ }
+
+ decoder = json.NewDecoder(res.Body)
+ decoder.UseNumber()
+
+ page.NextPageLink = ""
+ page.PreviousPageLink = ""
+
+ err = decoder.Decode(page)
+ if err != nil {
+ return
+ }
+
+ documentList.Items = client.AppendSlice(documentList.Items, page.Items)
+ }
+
+ result, err = json.Marshal(documentList)
+
+ return
+}
+
+func (client *restClient) Post(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
+ if contentType == "" {
+ contentType = appJson
+ }
+
+ req := request{"POST", url, contentType, body, tokens}
+ rewinder := func() io.Reader {
+ body.Seek(0, 0)
+ return body
+ }
+ res, err = client.SendRequest(&req, rewinder)
+ return
+}
+
+func (client *restClient) Patch(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
+ if contentType == "" {
+ contentType = appJson
+ }
+
+ req := request{"PATCH", url, contentType, body, tokens}
+ rewinder := func() io.Reader {
+ body.Seek(0, 0)
+ return body
+ }
+ res, err = client.SendRequest(&req, rewinder)
+ return
+}
+
+func (client *restClient) Put(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
+ if contentType == "" {
+ contentType = appJson
+ }
+
+ req := request{"PUT", url, contentType, body, tokens}
+ rewinder := func() io.Reader {
+ body.Seek(0, 0)
+ return body
+ }
+ res, err = client.SendRequest(&req, rewinder)
+ return
+}
+
+func (client *restClient) Delete(url string, tokens *TokenOptions) (res *http.Response, err error) {
+ req := request{"DELETE", url, "", nil, tokens}
+ res, err = client.SendRequest(&req, nil)
+ return
+}
+
+func (client *restClient) SendRequest(req *request, bodyRewinder bodyRewinder) (res *http.Response, err error) {
+ res, err = client.sendRequestHelper(req)
+ // In most cases, we'll return immediately
+ // If the operation succeeded, but we got a 401 response and if we're using
+ // authentication, then we'll look into the body to see if the token expired
+ if err != nil {
+ return res, err
+ }
+ if res.StatusCode != 401 {
+ // It's not a 401, so the token didn't expire
+ return res, err
+ }
+ if req.Tokens == nil || req.Tokens.AccessToken == "" {
+ // We don't have a token, so we can't renew the token, no need to proceed
+ return res, err
+ }
+
+ // We're going to look in the body to see if it failed because the token expired
+ // This means we need to read the body, but the functions that call us also
+ // expect to read the body. So we read the body, then create a new reader
+ // so they can read the body as normal.
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return res, err
+ }
+ res.Body = ioutil.NopCloser(bytes.NewReader(body))
+
+ // Now see if we had an expired token or not
+ var apiError APIError
+ err = json.Unmarshal(body, &apiError)
+ if err != nil {
+ return res, err
+ }
+ if apiError.ErrorCode != expiredAuthToken {
+ return res, nil
+ }
+
+ // We were told that the access token expired, so we acquire a new token using the refresh token.
+ newTokens, err := client.authClient.GetTokensByRefreshToken(req.Tokens.RefreshToken)
+ // If there is an error during token refresh, we assume that the refresh token also expired. So we login again using
+ // the machine account.
+ if err != nil {
+ newTokens, err = client.authClient.GetTokensByMachineAccount()
+ if err != nil {
+ return res, err
+ }
+ }
+ req.Tokens.AccessToken = newTokens.AccessToken
+ if client.UpdateAccessTokenCallback != nil {
+ client.UpdateAccessTokenCallback(newTokens.AccessToken)
+ }
+ if req.Body != nil && bodyRewinder != nil {
+ req.Body = bodyRewinder()
+ }
+ res, err = client.sendRequestHelper(req)
+ return res, nil
+}
+
+func (client *restClient) sendRequestHelper(req *request) (res *http.Response, err error) {
+ r, err := http.NewRequest(req.Method, req.URL, req.Body)
+ if err != nil {
+ return
+ }
+ if req.ContentType != "" {
+ r.Header.Add("Content-Type", req.ContentType)
+ }
+ if req.Tokens != nil && req.Tokens.AccessToken != "" {
+ r.Header.Add("Authorization", "Bearer "+req.Tokens.AccessToken)
+ }
+ res, err = client.httpClient.Do(r)
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+// Reads an error out of the HTTP response, or does nothing if
+// no error occured.
+func getError(res *http.Response) (*http.Response, error) {
+ // Do nothing if the response is a successful 2xx
+ if res.StatusCode/100 == 2 {
+ return res, nil
+ }
+ var apiError APIError
+ // ReadAll is usually a bad practice, but here we need to read the response all
+ // at once because we may attempt to use the data twice. It's preferable to use
+ // methods that take io.Reader, e.g. json.NewDecoder
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal(body, &apiError)
+ if err != nil {
+ // If deserializing into ApiError fails, return a generic HttpError instead
+ return nil, HttpError{res.StatusCode, string(body[:])}
+ }
+ apiError.HttpStatusCode = res.StatusCode
+ return nil, apiError
+}
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/cascade/utils.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/utils.go
--- kubernetes-original/pkg/cloudprovider/providers/cascade/utils.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/cascade/utils.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,25 @@
+package cascade
+
+func StringPtr(s string) *string {
+ return &s
+}
+
+// StringVal returns string from string pointer, nil returns ""
+func StringVal(p *string) (s string) {
+ if p != nil {
+ s = *p
+ }
+ return
+}
+
+func Int64Ptr(s int64) *int64 {
+ return &s
+}
+
+func Int64Val(s *int64) int64 {
+ return *s
+}
+
+func Int32Ptr(s int32) *int32 {
+ return &s
+}
\ No newline at end of file
diff --no-dereference -uNr kubernetes-original/pkg/cloudprovider/providers/providers.go kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go
--- kubernetes-original/pkg/cloudprovider/providers/providers.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go 2018-03-23 21:44:53.000000000 +0000
@@ -20,6 +20,7 @@
// Cloud providers
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
+ _ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
_ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
Binary files kubernetes-original/pkg/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/._.DS_Store differ
Binary files kubernetes-original/pkg/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/.DS_Store differ
Binary files kubernetes-original/pkg/printers/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/printers/._.DS_Store differ
Binary files kubernetes-original/pkg/printers/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/printers/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/pkg/printers/internalversion/describe.go kubernetes-modified/src/k8s.io/kubernetes/pkg/printers/internalversion/describe.go
--- kubernetes-original/pkg/printers/internalversion/describe.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/printers/internalversion/describe.go 2018-03-23 21:44:53.000000000 +0000
@@ -751,6 +751,8 @@
printFlexVolumeSource(volume.VolumeSource.FlexVolume, w)
case volume.VolumeSource.Flocker != nil:
printFlockerVolumeSource(volume.VolumeSource.Flocker, w)
+ case volume.VolumeSource.CascadeDisk != nil:
+ printCascadeDiskVolumeSource(volume.VolumeSource.CascadeDisk, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
@@ -1101,6 +1103,13 @@
csi.Driver, csi.VolumeHandle, csi.ReadOnly)
}
+func printCascadeDiskVolumeSource(cascade *api.CascadeDiskVolumeSource, w PrefixWriter) {
+ w.Write(LEVEL_2, "Type:\tCascadeDisk (a Persistent Disk resource in Cascade)\n"+
+ " DiskID:\t%v\n"+
+ " FSType:\t%v\n",
+ cascade.DiskID, cascade.FSType)
+}
+
type PersistentVolumeDescriber struct {
clientset.Interface
}
@@ -1189,6 +1198,8 @@
printFlockerVolumeSource(pv.Spec.Flocker, w)
case pv.Spec.CSI != nil:
printCSIPersistentVolumeSource(pv.Spec.CSI, w)
+ case pv.Spec.CascadeDisk != nil:
+ printCascadeDiskVolumeSource(pv.Spec.CascadeDisk, w)
default:
w.Write(LEVEL_1, "<unknown>\n")
}
Binary files kubernetes-original/pkg/security/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/security/._.DS_Store differ
Binary files kubernetes-original/pkg/security/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/security/.DS_Store differ
Binary files kubernetes-original/pkg/security/podsecuritypolicy/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/._.DS_Store differ
Binary files kubernetes-original/pkg/security/podsecuritypolicy/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/pkg/security/podsecuritypolicy/util/util.go kubernetes-modified/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go
--- kubernetes-original/pkg/security/podsecuritypolicy/util/util.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go 2018-03-23 21:44:53.000000000 +0000
@@ -68,6 +68,7 @@
string(extensions.PortworxVolume),
string(extensions.ScaleIO),
string(extensions.CSI),
+ string(extensions.CascadeDisk),
)
return fstypes
}
@@ -129,6 +130,8 @@
return extensions.PortworxVolume, nil
case v.ScaleIO != nil:
return extensions.ScaleIO, nil
+ case v.CascadeDisk != nil:
+ return extensions.CascadeDisk, nil
}
return "", fmt.Errorf("unknown volume type for volume: %#v", v)
diff --no-dereference -uNr kubernetes-original/pkg/volume/cascade_disk/attacher.go kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/attacher.go
--- kubernetes-original/pkg/volume/cascade_disk/attacher.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/attacher.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,269 @@
+package cascade_disk
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "time"
+
+ "github.com/golang/glog"
+ "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
+ "k8s.io/kubernetes/pkg/util/mount"
+ "k8s.io/kubernetes/pkg/volume"
+ volumeutil "k8s.io/kubernetes/pkg/volume/util"
+ "k8s.io/kubernetes/pkg/volume/util/volumehelper"
+ "strings"
+)
+
+type cascadeDiskAttacher struct {
+ host volume.VolumeHost
+ cascadeDisks cascade.Disks
+}
+
+var _ volume.Attacher = &cascadeDiskAttacher{}
+var _ volume.AttachableVolumePlugin = &cascadeDiskPlugin{}
+
+func (plugin *cascadeDiskPlugin) NewAttacher() (volume.Attacher, error) {
+ cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
+ if err != nil {
+ glog.Errorf("Cascade attacher: NewAttacher failed to get cloud provider")
+ return nil, err
+ }
+
+ return &cascadeDiskAttacher{
+ host: plugin.host,
+ cascadeDisks: cascadeCloud,
+ }, nil
+}
+
+// Attach attaches the volume specified by the given spec to the given host. On success, returns the device path where
+// the device was attached on the node.
+func (attacher *cascadeDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
+ hostName := string(nodeName)
+ volumeSource, _, err := getVolumeSource(spec)
+ if err != nil {
+ glog.Errorf("Cascade attacher: Attach failed to get volume source")
+ return "", err
+ }
+
+ // cascadeDisks.AttachDisk checks if disk is already attached to the node. So we don't have to do that separately
+ // here.
+ glog.V(4).Infof("Cascade: Attach disk called for host %s", hostName)
+ devicePath, err := attacher.cascadeDisks.AttachDisk(volumeSource.DiskID, nodeName)
+ if err != nil {
+ glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.DiskID, nodeName, err)
+ return "", err
+ }
+
+ // Cacsade uses device names of the format /dev/sdX, but newer Linux Kernels mount them under /dev/xvdX
+ // (source: AWS console). So we have to rename the first occurrence of sd to xvd.
+ devicePath = strings.Replace(devicePath, "sd", "xvd", 1)
+ return devicePath, nil
+}
+
+// VolumesAreAttached verifies whether the volumes specified in the spec are attached to the specified node.
+func (attacher *cascadeDiskAttacher) VolumesAreAttached(specs []*volume.Spec,
+ nodeName types.NodeName) (map[*volume.Spec]bool, error) {
+ volumesAttachedCheck := make(map[*volume.Spec]bool)
+ volumeSpecMap := make(map[string]*volume.Spec)
+ diskIDList := []string{}
+ for _, spec := range specs {
+ volumeSource, _, err := getVolumeSource(spec)
+ if err != nil {
+ glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
+ continue
+ }
+
+ diskIDList = append(diskIDList, volumeSource.DiskID)
+ volumesAttachedCheck[spec] = true
+ volumeSpecMap[volumeSource.DiskID] = spec
+ }
+ attachedResult, err := attacher.cascadeDisks.DisksAreAttached(diskIDList, nodeName)
+ if err != nil {
+ glog.Errorf(
+ "Error checking if volumes (%v) are attached to current node (%q). err=%v",
+ diskIDList, nodeName, err)
+ return volumesAttachedCheck, err
+ }
+
+ for diskID, attached := range attachedResult {
+ if !attached {
+ spec := volumeSpecMap[diskID]
+ volumesAttachedCheck[spec] = false
+ glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached",
+ diskID, spec.Name())
+ }
+ }
+ return volumesAttachedCheck, nil
+}
+
+// WaitForAttach waits until the devicePath returned by the Attach call is available.
+func (attacher *cascadeDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod,
+ timeout time.Duration) (string, error) {
+ volumeSource, _, err := getVolumeSource(spec)
+ if err != nil {
+ glog.Errorf("Cascade attacher: WaitForAttach failed to get volume source")
+ return "", err
+ }
+
+ if devicePath == "" {
+ return "", fmt.Errorf("WaitForAttach failed for disk %s: devicePath is empty.", volumeSource.DiskID)
+ }
+
+ ticker := time.NewTicker(checkSleepDuration)
+ defer ticker.Stop()
+
+ timer := time.NewTimer(timeout)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ glog.V(4).Infof("Checking disk %s is attached", volumeSource.DiskID)
+ checkPath, err := verifyDevicePath(devicePath)
+ if err != nil {
+ // Log error, if any, and continue checking periodically. See issue #11321
+ glog.Warningf("Cascade attacher: WaitForAttach with devicePath %s Checking PD %s Error verify "+
+ "path", devicePath, volumeSource.DiskID)
+ } else if checkPath != "" {
+ // A device path has successfully been created for the disk
+ glog.V(4).Infof("Successfully found attached disk %s.", volumeSource.DiskID)
+ return devicePath, nil
+ }
+ case <-timer.C:
+ return "", fmt.Errorf("Could not find attached disk %s. Timeout waiting for mount paths to be "+
+ "created.", volumeSource.DiskID)
+ }
+ }
+}
+
+// GetDeviceMountPath returns a path where the device should point which should be bind mounted for individual volumes.
+func (attacher *cascadeDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
+ volumeSource, _, err := getVolumeSource(spec)
+ if err != nil {
+ glog.Errorf("Cascade attacher: GetDeviceMountPath failed to get volume source")
+ return "", err
+ }
+
+ return makeGlobalPDPath(attacher.host, volumeSource.DiskID), nil
+}
+
+// GetMountDeviceRefs finds all other references to the device referenced by deviceMountPath; returns a list of paths.
+func (plugin *cascadeDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
+ mounter := plugin.host.GetMounter(plugin.GetPluginName())
+ return mount.GetMountRefs(mounter, deviceMountPath)
+}
+
+// MountDevice mounts device to global mount point.
+func (attacher *cascadeDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
+ mounter := attacher.host.GetMounter(cascadeDiskPluginName)
+ notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
+ glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err)
+ return err
+ }
+ notMnt = true
+ } else {
+ return err
+ }
+ }
+
+ volumeSource, _, err := getVolumeSource(spec)
+ if err != nil {
+ glog.Errorf("Cascade attacher: MountDevice failed to get volume source. err: %s", err)
+ return err
+ }
+
+ options := []string{}
+
+ if notMnt {
+ diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cascadeDiskPluginName, attacher.host)
+ mountOptions := volume.MountOptionFromSpec(spec)
+ err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
+ if err != nil {
+ os.Remove(deviceMountPath)
+ return err
+ }
+ glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v",
+ spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
+ }
+ return nil
+}
+
+type cascadeDiskDetacher struct {
+ mounter mount.Interface
+ cascadeDisks cascade.Disks
+}
+
+var _ volume.Detacher = &cascadeDiskDetacher{}
+
+// NewDetacher returns the detacher associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewDetacher() (volume.Detacher, error) {
+ cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
+ if err != nil {
+ glog.Errorf("Cascade attacher: NewDetacher failed to get cloud provider. err: %s", err)
+ return nil, err
+ }
+
+ return &cascadeDiskDetacher{
+ mounter: plugin.host.GetMounter(plugin.GetPluginName()),
+ cascadeDisks: cascadeCloud,
+ }, nil
+}
+
+// Detach detaches the given device from the given host.
+func (detacher *cascadeDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
+ hostName := string(nodeName)
+ diskID := path.Base(deviceMountPath)
+ attached, err := detacher.cascadeDisks.DiskIsAttached(diskID, nodeName)
+ if err != nil {
+ // Log error and continue with detach
+ glog.Errorf(
+ "Error checking if persistent disk (%q) is already attached to current node (%q). "+
+ "Will continue and try detach anyway. err=%v", diskID, hostName, err)
+ }
+
+ if err == nil && !attached {
+ // Volume is already detached from node.
+ glog.V(4).Infof("detach operation was successful. persistent disk %q is already detached "+
+ "from node %q.", diskID, hostName)
+ return nil
+ }
+
+ if err := detacher.cascadeDisks.DetachDisk(diskID, nodeName); err != nil {
+ glog.Errorf("Error detaching volume %q: %v", diskID, err)
+ return err
+ }
+ return nil
+}
+
+// WaitForDetach waits for the devicePath to become unavailable.
+func (detacher *cascadeDiskDetacher) WaitForDetach(devicePath string, timeout time.Duration) error {
+ ticker := time.NewTicker(checkSleepDuration)
+ defer ticker.Stop()
+ timer := time.NewTimer(timeout)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ glog.V(4).Infof("Checking device %q is detached.", devicePath)
+ if pathExists, err := volumeutil.PathExists(devicePath); err != nil {
+ return fmt.Errorf("Error checking if device path exists: %v", err)
+ } else if !pathExists {
+ return nil
+ }
+ case <-timer.C:
+ return fmt.Errorf("Timeout reached; Device %v is still attached", devicePath)
+ }
+ }
+}
+
+// UnmountDevice unmounts the disk specified by the device mount path.
+func (detacher *cascadeDiskDetacher) UnmountDevice(deviceMountPath string) error {
+ return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
+}
diff --no-dereference -uNr kubernetes-original/pkg/volume/cascade_disk/BUILD kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/BUILD
--- kubernetes-original/pkg/volume/cascade_disk/BUILD 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/BUILD 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,43 @@
+package(default_visibility = ["//visibility:public"])
+
+load(
+ "@io_bazel_rules_go//go:def.bzl",
+ "go_library",
+ "go_test",
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "attacher.go",
+ "cascade_disk.go",
+ "cascade_util.go",
+ ],
+ deps = [
+ "//pkg/cloudprovider:go_default_library",
+ "//pkg/cloudprovider/providers/cascade:go_default_library",
+ "//pkg/util/mount:go_default_library",
+ "//pkg/util/strings:go_default_library",
+ "//pkg/volume:go_default_library",
+ "//pkg/volume/util:go_default_library",
+ "//pkg/volume/util/volumehelper:go_default_library",
+ "//vendor/github.com/golang/glog:go_default_library",
+ "//vendor/k8s.io/api/core/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
+ ],
+)
+
+filegroup(
+ name = "package-srcs",
+ srcs = glob(["**"]),
+ tags = ["automanaged"],
+ visibility = ["//visibility:private"],
+)
+
+filegroup(
+ name = "all-srcs",
+ srcs = [":package-srcs"],
+ tags = ["automanaged"],
+)
diff --no-dereference -uNr kubernetes-original/pkg/volume/cascade_disk/cascade_disk.go kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/cascade_disk.go
--- kubernetes-original/pkg/volume/cascade_disk/cascade_disk.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/cascade_disk.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,391 @@
+package cascade_disk
+
+import (
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/golang/glog"
+ "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/kubernetes/pkg/util/mount"
+ utilstrings "k8s.io/kubernetes/pkg/util/strings"
+ "k8s.io/kubernetes/pkg/volume"
+ "k8s.io/kubernetes/pkg/volume/util"
+ "k8s.io/kubernetes/pkg/volume/util/volumehelper"
+)
+
+// This is the primary entrypoint for volume plugins.
+func ProbeVolumePlugins() []volume.VolumePlugin {
+ return []volume.VolumePlugin{&cascadeDiskPlugin{}}
+}
+
+type cascadeDiskPlugin struct {
+ host volume.VolumeHost
+}
+
+var _ volume.VolumePlugin = &cascadeDiskPlugin{}
+var _ volume.PersistentVolumePlugin = &cascadeDiskPlugin{}
+var _ volume.DeletableVolumePlugin = &cascadeDiskPlugin{}
+var _ volume.ProvisionableVolumePlugin = &cascadeDiskPlugin{}
+
+const (
+ cascadeDiskPluginName = "kubernetes.io/cascade-disk"
+)
+
+// Init initializes the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) Init(host volume.VolumeHost) error {
+ plugin.host = host
+ return nil
+}
+
+// GetPluginName returns the name of the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) GetPluginName() string {
+ return cascadeDiskPluginName
+}
+
+// GetVolumeName returns the name of the volume which is the diskID in our case.
+func (plugin *cascadeDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
+ volumeSource, _, err := getVolumeSource(spec)
+ if err != nil {
+ glog.Errorf("Cascade volume plugin: GetVolumeName failed to get volume source")
+ return "", err
+ }
+
+ return volumeSource.DiskID, nil
+}
+
+// CanSupport specifies whether the Cascade volume plguin can support the specific resource type.
+// Cascade plugin only supports the persistent volume and volume resource which has the Cascade disk annotation.
+func (plugin *cascadeDiskPlugin) CanSupport(spec *volume.Spec) bool {
+ return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil) ||
+ (spec.Volume != nil && spec.Volume.CascadeDisk != nil)
+}
+
+// RequiresRemount specifies whether remount is required for the disk.
+func (plugin *cascadeDiskPlugin) RequiresRemount() bool {
+ return false
+}
+
+// SupportsMountOption specifies whether the Cascade volume plugin supports the mount operation.
+func (plugin *cascadeDiskPlugin) SupportsMountOption() bool {
+ return true
+}
+
+// SupportsBulkVolumeVerification specifies whether bulk volume verification is supported.
+func (plugin *cascadeDiskPlugin) SupportsBulkVolumeVerification() bool {
+ return false
+}
+
+// NewMounter returns the mounter associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod,
+ _ volume.VolumeOptions) (volume.Mounter, error) {
+ return plugin.newMounterInternal(spec, pod.UID, &CascadeDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
+}
+
+// NewUnmounter returns the unmounter associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
+ return plugin.newUnmounterInternal(volName, podUID, &CascadeDiskUtil{},
+ plugin.host.GetMounter(plugin.GetPluginName()))
+}
+
+func (plugin *cascadeDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager,
+ mounter mount.Interface) (volume.Mounter, error) {
+ volumeSource, _, err := getVolumeSource(spec)
+ if err != nil {
+ glog.Errorf("Cascade volume plugin: newMounterInternal failed to get volume source")
+ return nil, err
+ }
+
+ diskID := volumeSource.DiskID
+ fsType := volumeSource.FSType
+
+ return &cascadeDiskMounter{
+ cascadeDisk: &cascadeDisk{
+ podUID: podUID,
+ volName: spec.Name(),
+ diskID: diskID,
+ manager: manager,
+ mounter: mounter,
+ plugin: plugin,
+ },
+ fsType: fsType,
+ diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
+}
+
+func (plugin *cascadeDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager,
+ mounter mount.Interface) (volume.Unmounter, error) {
+ return &cascadeDiskUnmounter{
+ &cascadeDisk{
+ podUID: podUID,
+ volName: volName,
+ manager: manager,
+ mounter: mounter,
+ plugin: plugin,
+ }}, nil
+}
+
+// ConstructVolumeSpec constructs a Cascade volume spec based on the name and mount path.
+func (plugin *cascadeDiskPlugin) ConstructVolumeSpec(volumeSpecName, mountPath string) (*volume.Spec, error) {
+ mounter := plugin.host.GetMounter(plugin.GetPluginName())
+ pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
+ diskID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
+ if err != nil {
+ return nil, err
+ }
+
+ cascadeDisk := &v1.Volume{
+ Name: volumeSpecName,
+ VolumeSource: v1.VolumeSource{
+ CascadeDisk: &v1.CascadeDiskVolumeSource{
+ DiskID: diskID,
+ },
+ },
+ }
+ return volume.NewSpecFromVolume(cascadeDisk), nil
+}
+
+// Abstract interface to disk operations.
+type diskManager interface {
+ // Creates a volume
+ CreateVolume(provisioner *cascadeDiskProvisioner) (diskID string, volumeSizeGB int, fstype string, err error)
+ // Deletes a volume
+ DeleteVolume(deleter *cascadeDiskDeleter) error
+}
+
+// cascadeDisk volumes are disk resources attached to the kubelet's host machine and exposed to the pod.
+type cascadeDisk struct {
+ volName string
+ podUID types.UID
+ diskID string
+ fsType string
+ manager diskManager
+ mounter mount.Interface
+ plugin *cascadeDiskPlugin
+ volume.MetricsNil
+}
+
+var _ volume.Mounter = &cascadeDiskMounter{}
+
+type cascadeDiskMounter struct {
+ *cascadeDisk
+ fsType string
+ diskMounter *mount.SafeFormatAndMount
+}
+
+// GetAttributes returns the attributes associated with a Cascade disk.
+func (b *cascadeDiskMounter) GetAttributes() volume.Attributes {
+ return volume.Attributes{
+ SupportsSELinux: true,
+ }
+}
+
+// CanMount checks prior to mount operations to verify that the required components (binaries, etc.) to mount the
+// volume are available on the underlying node. If not, it returns an error.
+func (b *cascadeDiskMounter) CanMount() error {
+ return nil
+}
+
+// SetUp attaches the disk and bind mounts to the volume path.
+func (b *cascadeDiskMounter) SetUp(fsGroup *int64) error {
+ return b.SetUpAt(b.GetPath(), fsGroup)
+}
+
+// SetUpAt attaches the disk and bind mounts to the volume path.
+func (b *cascadeDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
+ glog.V(4).Infof("Cascade Persistent Disk setup %s to %s", b.diskID, dir)
+
+ // TODO: handle failed mounts here.
+ notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
+ if err != nil && !os.IsNotExist(err) {
+ glog.Errorf("cannot validate mount point: %s %v", dir, err)
+ return err
+ }
+ if !notmnt {
+ return nil
+ }
+
+ if err := os.MkdirAll(dir, 0750); err != nil {
+ glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
+ return err
+ }
+
+ options := []string{"bind"}
+
+ // Perform a bind mount to the full path to allow duplicate mounts of the same PD.
+ globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskID)
+ glog.V(4).Infof("attempting to mount %s", dir)
+
+ err = b.mounter.Mount(globalPDPath, dir, "", options)
+ if err != nil {
+ notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
+ if mntErr != nil {
+ glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
+ return err
+ }
+ if !notmnt {
+ if mntErr = b.mounter.Unmount(dir); mntErr != nil {
+ glog.Errorf("Failed to unmount: %v", mntErr)
+ return err
+ }
+ notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
+ if mntErr != nil {
+ glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
+ return err
+ }
+ if !notmnt {
+ glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.",
+ b.GetPath())
+ return err
+ }
+ }
+ os.Remove(dir)
+ glog.Errorf("Mount of disk %s failed: %v", dir, err)
+ return err
+ }
+ volume.SetVolumeOwnership(b, fsGroup)
+
+ return nil
+}
+
+var _ volume.Unmounter = &cascadeDiskUnmounter{}
+
+type cascadeDiskUnmounter struct {
+ *cascadeDisk
+}
+
+// TearDown unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that
+// disk on the kubelet.
+func (c *cascadeDiskUnmounter) TearDown() error {
+ return c.TearDownAt(c.GetPath())
+}
+
+// TearDownAt unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that
+// disk on the kubelet.
+func (c *cascadeDiskUnmounter) TearDownAt(dir string) error {
+ return util.UnmountPath(dir, c.mounter)
+}
+
+func makeGlobalPDPath(host volume.VolumeHost, diskID string) string {
+ return path.Join(host.GetPluginDir(cascadeDiskPluginName), mount.MountsInGlobalPDPath, diskID)
+}
+
+func (cd *cascadeDisk) GetPath() string {
+ name := cascadeDiskPluginName
+ return cd.plugin.host.GetPodVolumeDir(cd.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cd.volName)
+}
+
+func (plugin *cascadeDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
+ return []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
+ }
+}
+
+type cascadeDiskDeleter struct {
+ *cascadeDisk
+}
+
+var _ volume.Deleter = &cascadeDiskDeleter{}
+
+// NewDeleter returns the deleter associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
+ return plugin.newDeleterInternal(spec, &CascadeDiskUtil{})
+}
+
+func (plugin *cascadeDiskPlugin) newDeleterInternal(spec *volume.Spec, manager diskManager) (volume.Deleter, error) {
+ if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk == nil {
+ return nil, fmt.Errorf("spec.PersistentVolumeSource.CascadeDisk is nil")
+ }
+ return &cascadeDiskDeleter{
+ &cascadeDisk{
+ volName: spec.Name(),
+ diskID: spec.PersistentVolume.Spec.CascadeDisk.DiskID,
+ manager: manager,
+ plugin: plugin,
+ }}, nil
+}
+
+func (r *cascadeDiskDeleter) Delete() error {
+ return r.manager.DeleteVolume(r)
+}
+
+type cascadeDiskProvisioner struct {
+ *cascadeDisk
+ options volume.VolumeOptions
+}
+
+var _ volume.Provisioner = &cascadeDiskProvisioner{}
+
+// NewProvisioner returns the provisioner associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
+ return plugin.newProvisionerInternal(options, &CascadeDiskUtil{})
+}
+
+func (plugin *cascadeDiskPlugin) newProvisionerInternal(options volume.VolumeOptions,
+ manager diskManager) (volume.Provisioner, error) {
+ return &cascadeDiskProvisioner{
+ cascadeDisk: &cascadeDisk{
+ manager: manager,
+ plugin: plugin,
+ },
+ options: options,
+ }, nil
+}
+
+// Provision provisions the persistent volume by making a CreateDisk call to Cascade Controller.
+func (p *cascadeDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
+ if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
+ return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported",
+ p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
+ }
+
+ diskID, sizeGB, fstype, err := p.manager.CreateVolume(p)
+ if err != nil {
+ return nil, err
+ }
+
+ if fstype == "" {
+ fstype = "ext4"
+ }
+
+ pv := &v1.PersistentVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: p.options.PVName,
+ Labels: map[string]string{},
+ Annotations: map[string]string{
+ volumehelper.VolumeDynamicallyCreatedByKey: "cascade-volume-dynamic-provisioner",
+ },
+ },
+ Spec: v1.PersistentVolumeSpec{
+ PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
+ AccessModes: p.options.PVC.Spec.AccessModes,
+ Capacity: v1.ResourceList{
+ v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
+ },
+ PersistentVolumeSource: v1.PersistentVolumeSource{
+ CascadeDisk: &v1.CascadeDiskVolumeSource{
+ DiskID: diskID,
+ FSType: fstype,
+ },
+ },
+ MountOptions: p.options.MountOptions,
+ },
+ }
+ if len(p.options.PVC.Spec.AccessModes) == 0 {
+ pv.Spec.AccessModes = p.plugin.GetAccessModes()
+ }
+
+ return pv, nil
+}
+
+func getVolumeSource(spec *volume.Spec) (*v1.CascadeDiskVolumeSource, bool, error) {
+ if spec.Volume != nil && spec.Volume.CascadeDisk != nil {
+ return spec.Volume.CascadeDisk, spec.ReadOnly, nil
+ } else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil {
+ return spec.PersistentVolume.Spec.CascadeDisk, spec.ReadOnly, nil
+ }
+
+ return nil, false, fmt.Errorf("Spec does not reference a Cascade disk type")
+}
\ No newline at end of file
diff --no-dereference -uNr kubernetes-original/pkg/volume/cascade_disk/cascade_util.go kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/cascade_util.go
--- kubernetes-original/pkg/volume/cascade_disk/cascade_util.go 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/cascade_util.go 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,107 @@
+package cascade_disk
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/glog"
+ "k8s.io/api/core/v1"
+ "k8s.io/kubernetes/pkg/cloudprovider"
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
+ "k8s.io/kubernetes/pkg/volume"
+ volumeutil "k8s.io/kubernetes/pkg/volume/util"
+)
+
+const (
+ checkSleepDuration = time.Second
+)
+
+type CascadeDiskUtil struct{}
+
+func verifyDevicePath(path string) (string, error) {
+ if pathExists, err := volumeutil.PathExists(path); err != nil {
+ return "", fmt.Errorf("Error checking if path exists: %v", err)
+ } else if pathExists {
+ return path, nil
+ }
+
+ glog.V(4).Infof("verifyDevicePath: path does not exist yet")
+ return "", nil
+}
+
+// CreateVolume creates a Cascade persistent disk.
+func (util *CascadeDiskUtil) CreateVolume(p *cascadeDiskProvisioner) (diskID string, capacityGB int, fstype string,
+ err error) {
+ cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider())
+ if err != nil {
+ glog.Errorf("Cascade Util: CreateVolume failed to get cloud provider. Error [%v]", err)
+ return "", 0, "", err
+ }
+
+ capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
+ volSizeBytes := capacity.Value()
+ // Cascade works with GB, convert to GB with rounding up
+ volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
+ name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
+ volumeOptions := &cascade.VolumeOptions{
+ CapacityGB: volSizeGB,
+ Tags: *p.options.CloudTags,
+ Name: name,
+ }
+
+ for parameter, value := range p.options.Parameters {
+ switch strings.ToLower(parameter) {
+ case "flavor":
+ volumeOptions.Flavor = value
+ case volume.VolumeParameterFSType:
+ fstype = value
+ glog.V(4).Infof("Cascade Util: Setting fstype to %s", fstype)
+ default:
+ glog.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
+ p.plugin.GetPluginName())
+ return "", 0, "", fmt.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
+ p.plugin.GetPluginName())
+ }
+ }
+
+ diskID, err = cloud.CreateDisk(volumeOptions)
+ if err != nil {
+ glog.Errorf("Cascade Util: failed to CreateDisk. Error [%v]", err)
+ return "", 0, "", err
+ }
+
+ glog.V(4).Infof("Successfully created Cascade persistent disk %s", name)
+ return diskID, volSizeGB, "", nil
+}
+
+// DeleteVolume deletes a Cascade volume.
+func (util *CascadeDiskUtil) DeleteVolume(disk *cascadeDiskDeleter) error {
+ cloud, err := getCloudProvider(disk.plugin.host.GetCloudProvider())
+ if err != nil {
+ glog.Errorf("Cascade Util: DeleteVolume failed to get cloud provider. Error [%v]", err)
+ return err
+ }
+
+ if err = cloud.DeleteDisk(disk.diskID); err != nil {
+ glog.Errorf("Cascade Util: failed to DeleteDisk for diskID %s. Error [%v]", disk.diskID, err)
+ return err
+ }
+
+ glog.V(4).Infof("Successfully deleted Cascade persistent disk %s", disk.diskID)
+ return nil
+}
+
+func getCloudProvider(cloud cloudprovider.Interface) (*cascade.CascadeCloud, error) {
+ if cloud == nil {
+ glog.Errorf("Cascade Util: Cloud provider not initialized properly")
+ return nil, fmt.Errorf("Cascade Util: Cloud provider not initialized properly")
+ }
+
+ cc := cloud.(*cascade.CascadeCloud)
+ if cc == nil {
+ glog.Errorf("Invalid cloud provider: expected Cascade")
+ return nil, fmt.Errorf("Invalid cloud provider: expected Cascade")
+ }
+ return cc, nil
+}
diff --no-dereference -uNr kubernetes-original/pkg/volume/cascade_disk/OWNERS kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/OWNERS
--- kubernetes-original/pkg/volume/cascade_disk/OWNERS 1970-01-01 00:00:00.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/cascade_disk/OWNERS 2018-03-23 21:44:53.000000000 +0000
@@ -0,0 +1,2 @@
+maintainers:
+- ashokc
Binary files kubernetes-original/pkg/volume/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/._.DS_Store differ
Binary files kubernetes-original/pkg/volume/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/pkg/volume/.DS_Store differ
Binary files kubernetes-original/plugin/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/._.DS_Store differ
Binary files kubernetes-original/plugin/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/.DS_Store differ
Binary files kubernetes-original/plugin/pkg/admission/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/admission/._.DS_Store differ
Binary files kubernetes-original/plugin/pkg/admission/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/admission/.DS_Store differ
Binary files kubernetes-original/plugin/pkg/admission/persistentvolume/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/._.DS_Store differ
Binary files kubernetes-original/plugin/pkg/admission/persistentvolume/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/plugin/pkg/admission/persistentvolume/label/admission.go kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go
--- kubernetes-original/plugin/pkg/admission/persistentvolume/label/admission.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go 2018-03-23 21:44:53.000000000 +0000
@@ -27,6 +27,7 @@
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@@ -50,6 +51,7 @@
ebsVolumes aws.Volumes
cloudConfig []byte
gceCloudProvider *gce.GCECloud
+ cascadeDisks cascade.Disks
}
var _ admission.MutationInterface = &persistentVolumeLabel{}
@@ -102,6 +104,13 @@
}
volumeLabels = labels
}
+ if volume.Spec.CascadeDisk != nil {
+ labels, err := l.findCascadeDiskLabels(volume)
+ if err != nil {
+ return admission.NewForbidden(a, fmt.Errorf("error querying Cascade volume %s: %v", volume.Spec.CascadeDisk.DiskID, err))
+ }
+ volumeLabels = labels
+ }
if len(volumeLabels) != 0 {
if volume.Labels == nil {
@@ -214,3 +223,48 @@
}
return l.gceCloudProvider, nil
}
+
+func (l *persistentVolumeLabel) findCascadeDiskLabels(volume *api.PersistentVolume) (map[string]string, error) {
+ // Ignore any volumes that are being provisioned
+ if volume.Spec.CascadeDisk.DiskID == vol.ProvisionedVolumeName {
+ return nil, nil
+ }
+ cascadeDisks, err := l.getCascadeDisks()
+ if err != nil {
+ return nil, err
+ }
+ if cascadeDisks == nil {
+ return nil, fmt.Errorf("unable to build Cascade cloud provider for volumes")
+ }
+
+ labels, err := cascadeDisks.GetVolumeLabels(volume.Spec.CascadeDisk.DiskID)
+ if err != nil {
+ return nil, err
+ }
+
+ return labels, nil
+}
+
+// getCascadeDisks returns the Cascade Disks interface
+func (l *persistentVolumeLabel) getCascadeDisks() (cascade.Disks, error) {
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if l.cascadeDisks == nil {
+ var cloudConfigReader io.Reader
+ if len(l.cloudConfig) > 0 {
+ cloudConfigReader = bytes.NewReader(l.cloudConfig)
+ }
+ cloudProvider, err := cloudprovider.GetCloudProvider("cascade", cloudConfigReader)
+ if err != nil || cloudProvider == nil {
+ return nil, err
+ }
+ provider, ok := cloudProvider.(*cascade.CascadeCloud)
+ if !ok {
+ // GetCloudProvider has gone very wrong
+ return nil, fmt.Errorf("error retrieving Cascade cloud provider")
+ }
+ l.cascadeDisks = provider
+ }
+ return l.cascadeDisks, nil
+}
Binary files kubernetes-original/plugin/pkg/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/._.DS_Store differ
Binary files kubernetes-original/plugin/pkg/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/plugin/pkg/.DS_Store differ
Binary files kubernetes-original/staging/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/._.DS_Store differ
Binary files kubernetes-original/staging/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/.DS_Store differ
Binary files kubernetes-original/staging/src/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/._.DS_Store differ
Binary files kubernetes-original/staging/src/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/.DS_Store differ
Binary files kubernetes-original/staging/src/k8s.io/api/core/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/core/._.DS_Store differ
Binary files kubernetes-original/staging/src/k8s.io/api/core/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/core/.DS_Store differ
diff --no-dereference -uNr kubernetes-original/staging/src/k8s.io/api/core/v1/generated.pb.go kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/core/v1/generated.pb.go
--- kubernetes-original/staging/src/k8s.io/api/core/v1/generated.pb.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/core/v1/generated.pb.go 2018-03-23 21:44:53.000000000 +0000
@@ -35,6 +35,7 @@
Binding
CSIPersistentVolumeSource
Capabilities
+ CascadeDiskVolumeSource
CephFSPersistentVolumeSource
CephFSVolumeSource
CinderVolumeSource
@@ -260,9 +261,11 @@
func (*AvoidPods) ProtoMessage() {}
func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} }
-func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} }
-func (*AzureDiskVolumeSource) ProtoMessage() {}
-func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
+func (m *CascadeDiskVolumeSource) Reset() { *m = CascadeDiskVolumeSource{} }
+func (*CascadeDiskVolumeSource) ProtoMessage() {}
+func (*CascadeDiskVolumeSource) Descriptor() ([]byte, []int) {
+ return fileDescriptorGenerated, []int{4}
+}
func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} }
func (*AzureFilePersistentVolumeSource) ProtoMessage() {}
@@ -1040,6 +1043,11 @@
return fileDescriptorGenerated, []int{185}
}
+func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} }
+func (*AzureDiskVolumeSource) ProtoMessage() {}
+func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{186} }
+
+
func init() {
proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource")
proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity")
@@ -1051,6 +1059,7 @@
proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding")
proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource")
proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities")
+ proto.RegisterType((*CascadeDiskVolumeSource)(nil), "k8s.io.api.core.v1.CascadeDiskVolumeSource")
proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource")
proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource")
proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource")
@@ -1613,6 +1622,32 @@
return i, nil
}
+func (m *CascadeDiskVolumeSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CascadeDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskID)))
+ i += copy(dAtA[i:], m.DiskID)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+ i += copy(dAtA[i:], m.FSType)
+ return i, nil
+}
+
func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -6283,13 +6318,13 @@
}
i += n120
}
- if m.AzureDisk != nil {
+ if m.CascadeDisk != nil {
dAtA[i] = 0x82
i++
dAtA[i] = 0x1
i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
- n121, err := m.AzureDisk.MarshalTo(dAtA[i:])
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size()))
+ n121, err := m.CascadeDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
@@ -6367,6 +6402,18 @@
}
i += n127
}
+ if m.AzureDisk != nil {
+ dAtA[i] = 0xba
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
+ n128, err := m.AzureDisk.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n128
+ }
return i, nil
}
@@ -10316,13 +10363,13 @@
}
i += n223
}
- if m.AzureDisk != nil {
+ if m.CascadeDisk != nil {
dAtA[i] = 0xb2
i++
dAtA[i] = 0x1
i++
- i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
- n224, err := m.AzureDisk.MarshalTo(dAtA[i:])
+ i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size()))
+ n224, err := m.CascadeDisk.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
@@ -10388,6 +10435,18 @@
}
i += n229
}
+ if m.AzureDisk != nil {
+ dAtA[i] = 0xe2
+ i++
+ dAtA[i] = 0x1
+ i++
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
+ n230, err := m.AzureDisk.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n230
+ }
return i, nil
}
@@ -10623,6 +10682,16 @@
return n
}
+func (m *CascadeDiskVolumeSource) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.DiskID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FSType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *CephFSPersistentVolumeSource) Size() (n int) {
var l int
_ = l
@@ -12331,8 +12400,8 @@
l = m.Quobyte.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- if m.AzureDisk != nil {
- l = m.AzureDisk.Size()
+ if m.CascadeDisk != nil {
+ l = m.CascadeDisk.Size()
n += 2 + l + sovGenerated(uint64(l))
}
if m.PhotonPersistentDisk != nil {
@@ -12359,6 +12428,10 @@
l = m.CSI.Size()
n += 2 + l + sovGenerated(uint64(l))
}
+ if m.AzureDisk != nil {
+ l = m.AzureDisk.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -13788,8 +13861,8 @@
l = m.Quobyte.Size()
n += 2 + l + sovGenerated(uint64(l))
}
- if m.AzureDisk != nil {
- l = m.AzureDisk.Size()
+ if m.CascadeDisk != nil {
+ l = m.CascadeDisk.Size()
n += 2 + l + sovGenerated(uint64(l))
}
if m.PhotonPersistentDisk != nil {
@@ -13812,6 +13885,10 @@
l = m.StorageOS.Size()
n += 2 + l + sovGenerated(uint64(l))
}
+ if m.AzureDisk != nil {
+ l = m.AzureDisk.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -13971,6 +14048,17 @@
}, "")
return s
}
+func (this *CascadeDiskVolumeSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CascadeDiskVolumeSource{`,
+ `DiskID:` + fmt.Sprintf("%v", this.DiskID) + `,`,
+ `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *CephFSPersistentVolumeSource) String() string {
if this == nil {
return "nil"
@@ -15335,13 +15423,14 @@
`AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`,
`VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
`Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
- `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
+ `CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
`PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
`PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
`ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`,
`Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`,
`StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`,
`CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`,
+ `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
`}`,
}, "")
return s
@@ -16468,12 +16557,13 @@
`ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`,
`VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
`Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
- `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
+ `CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
`PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
`PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
`ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`,
`Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`,
`StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`,
+ `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
`}`,
}, "")
return s
@@ -34322,7 +34412,7 @@
iNdEx = postIndex
case 16:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -34346,10 +34436,10 @@
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.AzureDisk == nil {
- m.AzureDisk = &AzureDiskVolumeSource{}
+ if m.CascadeDisk == nil {
+ m.CascadeDisk = &CascadeDiskVolumeSource{}
}
- if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -34551,6 +34641,39 @@
return err
}
iNdEx = postIndex
+ case 23:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AzureDisk == nil {
+ m.AzureDisk = &AzureDiskVolumeSource{}
+ }
+ if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -35089,6 +35212,114 @@
}
return nil
}
+func (m *CascadeDiskVolumeSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CascadeDiskVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CascadeDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DiskID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DiskID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FSType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -48522,7 +48753,7 @@
iNdEx = postIndex
case 22:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -48546,10 +48777,10 @@
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.AzureDisk == nil {
- m.AzureDisk = &AzureDiskVolumeSource{}
+ if m.CascadeDisk == nil {
+ m.CascadeDisk = &CascadeDiskVolumeSource{}
}
- if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -48718,6 +48949,39 @@
return err
}
iNdEx = postIndex
+ case 28:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AzureDisk == nil {
+ m.AzureDisk = &AzureDiskVolumeSource{}
+ }
+ if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --no-dereference -uNr kubernetes-original/staging/src/k8s.io/api/core/v1/types.go kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/core/v1/types.go
--- kubernetes-original/staging/src/k8s.io/api/core/v1/types.go 2018-03-20 19:21:10.000000000 +0000
+++ kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/core/v1/types.go 2018-03-23 21:44:53.000000000 +0000
@@ -333,9 +333,8 @@
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
- // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
- // +optional
- AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
+ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+ CascadeDisk *CascadeDiskVolumeSource `json:"cascadeDisk,omitempty" protobuf:"bytes,22,opt,name=cascadeDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// Items for all in one resources secrets, configmaps, and downward API
@@ -349,6 +348,9 @@
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
+ // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // +optional
+ AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,28,opt,name=azureDisk"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@@ -428,9 +430,8 @@
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
- // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
- // +optional
- AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
+ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+ CascadeDisk *CascadeDiskVolumeSource `json:"cascadeDisk,omitempty" protobuf:"bytes,16,opt,name=cascadeDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
@@ -449,6 +450,9 @@
// CSI represents storage that handled by an external CSI driver
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
+ // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // +optional
+ AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,23,opt,name=azureDisk"`
}
const (
@@ -1578,6 +1582,16 @@
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
+// Represents a Photon Controller persistent disk resource.
+type CascadeDiskVolumeSource struct {
+ // ID that identifies Cascade persistent disk
+ DiskID string `json:"diskID" protobuf:"bytes,1,opt,name=diskID"`
+ // Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+}
+
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
Binary files kubernetes-original/staging/src/k8s.io/api/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/._.DS_Store differ
Binary files kubernetes-original/staging/src/k8s.io/api/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/api/.DS_Store differ
Binary files kubernetes-original/staging/src/k8s.io/._.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/._.DS_Store differ
Binary files kubernetes-original/staging/src/k8s.io/.DS_Store and kubernetes-modified/src/k8s.io/kubernetes/staging/src/k8s.io/.DS_Store differ