From 6de826b35ced3b7cadc809d7ea778ce6a50aff43 Mon Sep 17 00:00:00 2001
From: DheerajSShetty <dheerajs@vmware.com>
Date: Wed, 22 Aug 2018 16:30:37 -0700
Subject: [PATCH] VKE patch for k8s version 1.9 (fbdcc5c)

---
 api/swagger-spec/apps_v1alpha1.json                | Bin 135734 -> 136495 bytes
 api/swagger-spec/apps_v1beta1.json                 | Bin 311661 -> 312422 bytes
 api/swagger-spec/apps_v1beta2.json                 | Bin 425385 -> 426146 bytes
 api/swagger-spec/batch_v1.json                     | Bin 182308 -> 183069 bytes
 api/swagger-spec/batch_v1beta1.json                | Bin 185598 -> 186359 bytes
 api/swagger-spec/batch_v2alpha1.json               | Bin 185625 -> 186386 bytes
 api/swagger-spec/extensions_v1beta1.json           | Bin 473155 -> 473916 bytes
 api/swagger-spec/settings.k8s.io_v1alpha1.json     | Bin 128260 -> 129021 bytes
 api/swagger-spec/v1.json                           | Bin 988673 -> 989618 bytes
 cmd/kube-apiserver/app/options/plugins.go          |   2 +
 cmd/kube-controller-manager/app/BUILD              |   1 +
 cmd/kube-controller-manager/app/plugins.go         |   4 +
 cmd/kubelet/app/BUILD                              |   1 +
 cmd/kubelet/app/plugins.go                         |   2 +
 pkg/apis/core/types.go                             |  14 +
 pkg/apis/core/validation/validation.go             |  25 +
 pkg/apis/extensions/types.go                       |   1 +
 pkg/cloudprovider/providers/BUILD                  |   2 +
 pkg/cloudprovider/providers/cascade/BUILD          |  56 ++
 pkg/cloudprovider/providers/cascade/OWNERS         |   3 +
 pkg/cloudprovider/providers/cascade/apitypes.go    | 230 +++++
 pkg/cloudprovider/providers/cascade/auth.go        | 145 ++++
 pkg/cloudprovider/providers/cascade/cascade.go     | 219 +++++
 .../providers/cascade/cascade_disks.go             | 226 +++++
 .../providers/cascade/cascade_instances.go         |  91 ++
 .../providers/cascade/cascade_instances_test.go    |  43 +
 .../providers/cascade/cascade_loadbalancer.go      | 284 +++++++
 pkg/cloudprovider/providers/cascade/client.go      | 399 +++++++++
 pkg/cloudprovider/providers/cascade/oidcclient.go  | 297 +++++++
 pkg/cloudprovider/providers/cascade/restclient.go  | 262 ++++++
 pkg/cloudprovider/providers/cascade/tests_owed     |   5 +
 pkg/cloudprovider/providers/cascade/utils.go       |  29 +
 pkg/cloudprovider/providers/providers.go           |   1 +
 pkg/kubeapiserver/authorizer/config.go             |   8 +-
 pkg/kubeapiserver/authorizer/modes/modes.go        |   3 +-
 pkg/printers/internalversion/describe.go           |  11 +
 pkg/security/podsecuritypolicy/util/util.go        |   3 +
 pkg/volume/cascade_disk/BUILD                      |  43 +
 pkg/volume/cascade_disk/OWNERS                     |   2 +
 pkg/volume/cascade_disk/attacher.go                | 265 ++++++
 pkg/volume/cascade_disk/azure_disk_util.go         | 136 +++
 pkg/volume/cascade_disk/cascade_disk.go            | 391 +++++++++
 pkg/volume/cascade_disk/cascade_util.go            | 201 +++++
 .../admission/persistentvolume/label/admission.go  |  54 ++
 plugin/pkg/admission/vke/BUILD                     |  61 ++
 plugin/pkg/admission/vke/admission.go              | 587 +++++++++++++
 plugin/pkg/admission/vke/admission_test.go         | 941 +++++++++++++++++++++
 plugin/pkg/auth/authorizer/vke/BUILD               |  40 +
 plugin/pkg/auth/authorizer/vke/OWNERS              |   3 +
 plugin/pkg/auth/authorizer/vke/vke_authorizer.go   | 123 +++
 .../pkg/auth/authorizer/vke/vke_authorizer_test.go | 230 +++++
 staging/src/k8s.io/api/core/v1/generated.pb.go     | Bin 1241955 -> 1248240 bytes
 staging/src/k8s.io/api/core/v1/types.go            |  26 +-
 53 files changed, 5462 insertions(+), 8 deletions(-)

diff --git a/api/swagger-spec/apps_v1alpha1.json b/api/swagger-spec/apps_v1alpha1.json
index aa3fbdc..0189f38 100644
--- a/api/swagger-spec/apps_v1alpha1.json
+++ b/api/swagger-spec/apps_v1alpha1.json
@@ -1459,6 +1459,10 @@
      "photonPersistentDisk": {
       "$ref": "v1.PhotonPersistentDiskVolumeSource",
       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
+     },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
      }
     }
    },
@@ -2109,6 +2113,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.Container": {
     "id": "v1.Container",
     "description": "A single application container that you want to run within a pod.",
diff --git a/api/swagger-spec/apps_v1beta1.json b/api/swagger-spec/apps_v1beta1.json
index e253317..c1fa812 100644
--- a/api/swagger-spec/apps_v1beta1.json
+++ b/api/swagger-spec/apps_v1beta1.json
@@ -4479,6 +4479,10 @@
       "$ref": "v1.PhotonPersistentDiskVolumeSource",
       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
      },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+     },
      "projected": {
       "$ref": "v1.ProjectedVolumeSource",
       "description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -5206,6 +5210,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.ProjectedVolumeSource": {
     "id": "v1.ProjectedVolumeSource",
     "description": "Represents a projected volume source",
diff --git a/api/swagger-spec/apps_v1beta2.json b/api/swagger-spec/apps_v1beta2.json
index be42788..5abb9f5 100644
--- a/api/swagger-spec/apps_v1beta2.json
+++ b/api/swagger-spec/apps_v1beta2.json
@@ -6845,6 +6845,10 @@
       "$ref": "v1.PhotonPersistentDiskVolumeSource",
       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
      },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+     },
      "projected": {
       "$ref": "v1.ProjectedVolumeSource",
       "description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -7572,6 +7576,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.ProjectedVolumeSource": {
     "id": "v1.ProjectedVolumeSource",
     "description": "Represents a projected volume source",
diff --git a/api/swagger-spec/batch_v1.json b/api/swagger-spec/batch_v1.json
index 28787d8..28fcb65 100644
--- a/api/swagger-spec/batch_v1.json
+++ b/api/swagger-spec/batch_v1.json
@@ -1819,6 +1819,10 @@
       "$ref": "v1.PhotonPersistentDiskVolumeSource",
       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
      },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+     },
      "projected": {
       "$ref": "v1.ProjectedVolumeSource",
       "description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -2546,6 +2550,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.ProjectedVolumeSource": {
     "id": "v1.ProjectedVolumeSource",
     "description": "Represents a projected volume source",
diff --git a/api/swagger-spec/batch_v1beta1.json b/api/swagger-spec/batch_v1beta1.json
index bb9b870..f67a014 100644
--- a/api/swagger-spec/batch_v1beta1.json
+++ b/api/swagger-spec/batch_v1beta1.json
@@ -1874,6 +1874,10 @@
       "$ref": "v1.PhotonPersistentDiskVolumeSource",
       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
      },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+     },
      "projected": {
       "$ref": "v1.ProjectedVolumeSource",
       "description": "Items for all in one resources secrets, configmaps, and downward API"
@@ -2601,6 +2605,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.ProjectedVolumeSource": {
     "id": "v1.ProjectedVolumeSource",
     "description": "Represents a projected volume source",
diff --git a/api/swagger-spec/batch_v2alpha1.json b/api/swagger-spec/batch_v2alpha1.json
index cde6619..78888be 100644
--- a/api/swagger-spec/batch_v2alpha1.json
+++ b/api/swagger-spec/batch_v2alpha1.json
@@ -1889,6 +1889,10 @@
      "storageos": {
       "$ref": "v1.StorageOSVolumeSource",
       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+     },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
      }
     }
    },
@@ -2793,6 +2797,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.Container": {
     "id": "v1.Container",
     "description": "A single application container that you want to run within a pod.",
diff --git a/api/swagger-spec/extensions_v1beta1.json b/api/swagger-spec/extensions_v1beta1.json
index d8b20a3..73342a1 100644
--- a/api/swagger-spec/extensions_v1beta1.json
+++ b/api/swagger-spec/extensions_v1beta1.json
@@ -7502,6 +7502,10 @@
      "storageos": {
       "$ref": "v1.StorageOSVolumeSource",
       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+     },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
      }
     }
    },
@@ -8214,6 +8218,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.ProjectedVolumeSource": {
     "id": "v1.ProjectedVolumeSource",
     "description": "Represents a projected volume source",
diff --git a/api/swagger-spec/settings.k8s.io_v1alpha1.json b/api/swagger-spec/settings.k8s.io_v1alpha1.json
index dc442a8..8c1f100 100644
--- a/api/swagger-spec/settings.k8s.io_v1alpha1.json
+++ b/api/swagger-spec/settings.k8s.io_v1alpha1.json
@@ -1676,6 +1676,10 @@
      "storageos": {
       "$ref": "v1.StorageOSVolumeSource",
       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+     },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
      }
     }
    },
@@ -2350,6 +2354,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.ProjectedVolumeSource": {
     "id": "v1.ProjectedVolumeSource",
     "description": "Represents a projected volume source",
diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json
index 97be62b..df544c8 100644
--- a/api/swagger-spec/v1.json
+++ b/api/swagger-spec/v1.json
@@ -20629,6 +20629,10 @@
       "$ref": "v1.PhotonPersistentDiskVolumeSource",
       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
      },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
+     },
      "portworxVolume": {
       "$ref": "v1.PortworxVolumeSource",
       "description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine"
@@ -21200,6 +21204,23 @@
      }
     }
    },
+   "v1.CascadeDiskVolumeSource": {
+    "id": "v1.CascadeDiskVolumeSource",
+    "description": "Represents a Cascade persistent disk resource.",
+    "required": [
+     "diskID"
+    ],
+    "properties": {
+     "diskID": {
+      "type": "string",
+      "description": "ID that identifies Cascade persistent disk"
+     },
+     "fsType": {
+      "type": "string",
+      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
+     }
+    }
+   },
    "v1.PortworxVolumeSource": {
     "id": "v1.PortworxVolumeSource",
     "description": "PortworxVolumeSource represents a Portworx volume resource.",
@@ -21657,6 +21678,10 @@
      "storageos": {
       "$ref": "v1.StorageOSVolumeSource",
       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
+     },
+     "vkeDisk": {
+      "$ref": "v1.CascadeDiskVolumeSource",
+      "description": "vkeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
      }
     }
    },
diff --git a/cmd/kube-apiserver/app/options/plugins.go b/cmd/kube-apiserver/app/options/plugins.go
index a0d2502..4fe32e4 100644
--- a/cmd/kube-apiserver/app/options/plugins.go
+++ b/cmd/kube-apiserver/app/options/plugins.go
@@ -52,6 +52,7 @@ import (
 	"k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny"
 	"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
 	"k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault"
+	"k8s.io/kubernetes/plugin/pkg/admission/vke"
 )
 
 // RegisterAllAdmissionPlugins registers all admission plugins
@@ -83,4 +84,5 @@ func RegisterAllAdmissionPlugins(plugins *admission.Plugins) {
 	setdefault.Register(plugins)
 	resize.Register(plugins)
 	pvcprotection.Register(plugins)
+	vke.Register(plugins)
 }
diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD
index c518b36..f1a91f6 100644
--- a/cmd/kube-controller-manager/app/BUILD
+++ b/cmd/kube-controller-manager/app/BUILD
@@ -86,6 +86,7 @@ go_library(
         "//pkg/volume/aws_ebs:go_default_library",
         "//pkg/volume/azure_dd:go_default_library",
         "//pkg/volume/azure_file:go_default_library",
+        "//pkg/volume/cascade_disk:go_default_library",
         "//pkg/volume/cinder:go_default_library",
         "//pkg/volume/csi:go_default_library",
         "//pkg/volume/fc:go_default_library",
diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go
index 170c366..5fa1cb1 100644
--- a/cmd/kube-controller-manager/app/plugins.go
+++ b/cmd/kube-controller-manager/app/plugins.go
@@ -34,6 +34,7 @@ import (
 	"k8s.io/kubernetes/pkg/volume/aws_ebs"
 	"k8s.io/kubernetes/pkg/volume/azure_dd"
 	"k8s.io/kubernetes/pkg/volume/azure_file"
+	"k8s.io/kubernetes/pkg/volume/cascade_disk"
 	"k8s.io/kubernetes/pkg/volume/cinder"
 	"k8s.io/kubernetes/pkg/volume/csi"
 	"k8s.io/kubernetes/pkg/volume/fc"
@@ -77,6 +78,7 @@ func ProbeAttachableVolumePlugins() []volume.VolumePlugin {
 	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
+	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
 	if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
 		allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
 	}
@@ -106,6 +108,7 @@ func ProbeExpandableVolumePlugins(config componentconfig.VolumeConfiguration) []
 	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
+	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
 	if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
 		allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
 	}
@@ -165,6 +168,7 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componen
 	allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...)
+	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
 
 	return allPlugins
 }
diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD
index c7e482f..0d3645a 100644
--- a/cmd/kubelet/app/BUILD
+++ b/cmd/kubelet/app/BUILD
@@ -74,6 +74,7 @@ go_library(
         "//pkg/volume/aws_ebs:go_default_library",
         "//pkg/volume/azure_dd:go_default_library",
         "//pkg/volume/azure_file:go_default_library",
+        "//pkg/volume/cascade_disk:go_default_library",
         "//pkg/volume/cephfs:go_default_library",
         "//pkg/volume/cinder:go_default_library",
         "//pkg/volume/configmap:go_default_library",
diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go
index ef41bb8..c9806f7 100644
--- a/cmd/kubelet/app/plugins.go
+++ b/cmd/kubelet/app/plugins.go
@@ -32,6 +32,7 @@ import (
 	"k8s.io/kubernetes/pkg/volume/aws_ebs"
 	"k8s.io/kubernetes/pkg/volume/azure_dd"
 	"k8s.io/kubernetes/pkg/volume/azure_file"
+	"k8s.io/kubernetes/pkg/volume/cascade_disk"
 	"k8s.io/kubernetes/pkg/volume/cephfs"
 	"k8s.io/kubernetes/pkg/volume/cinder"
 	"k8s.io/kubernetes/pkg/volume/configmap"
@@ -100,6 +101,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
 	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
 	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
+	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
 	if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
 		allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
 	}
diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go
index b6b570b..1d9db5e 100644
--- a/pkg/apis/core/types.go
+++ b/pkg/apis/core/types.go
@@ -316,6 +316,8 @@ type VolumeSource struct {
 	// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
 	// +optional
 	StorageOS *StorageOSVolumeSource
+	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+	CascadeDisk *CascadeDiskVolumeSource
 }
 
 // Similar to VolumeSource but meant for the administrator who creates PVs.
@@ -394,6 +396,8 @@ type PersistentVolumeSource struct {
 	// CSI (Container Storage Interface) represents storage that handled by an external CSI driver
 	// +optional
 	CSI *CSIPersistentVolumeSource
+	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+	CascadeDisk *CascadeDiskVolumeSource
 }
 
 type PersistentVolumeClaimVolumeSource struct {
@@ -1471,6 +1475,16 @@ type StorageOSPersistentVolumeSource struct {
 	SecretRef *ObjectReference
 }
 
+// Represents a Cascade persistent disk resource.
+type CascadeDiskVolumeSource struct {
+	// ID that identifies Cascade persistent disk
+	DiskID string
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	FSType string
+}
+
 // Adapts a ConfigMap into a volume.
 //
 // The contents of the target ConfigMap's Data field will be presented in a
diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go
index f6ab55f..2fa447d 100644
--- a/pkg/apis/core/validation/validation.go
+++ b/pkg/apis/core/validation/validation.go
@@ -681,6 +681,14 @@ func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volNam
 			allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...)
 		}
 	}
+	if source.CascadeDisk != nil {
+		if numVolumes > 0 {
+			allErrs = append(allErrs, field.Forbidden(fldPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
+		} else {
+			numVolumes++
+			allErrs = append(allErrs, validateCascadeDiskVolumeSource(source.CascadeDisk, fldPath.Child("cascadeDisk"))...)
+		}
+	}
 
 	if numVolumes == 0 {
 		allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
@@ -1440,6 +1448,14 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldP
 	return allErrs
 }
 
+func validateCascadeDiskVolumeSource(cd *core.CascadeDiskVolumeSource, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if len(cd.DiskID) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("diskID"), ""))
+	}
+	return allErrs
+}
+
 // ValidatePersistentVolumeName checks that a name is appropriate for a
 // PersistentVolumeName object.
 var ValidatePersistentVolumeName = NameIsDNSSubdomain
@@ -1674,6 +1690,15 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList {
 		}
 	}
 
+	if pv.Spec.CascadeDisk != nil {
+		if numVolumes > 0 {
+			allErrs = append(allErrs, field.Forbidden(specPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
+		} else {
+			numVolumes++
+			allErrs = append(allErrs, validateCascadeDiskVolumeSource(pv.Spec.CascadeDisk, specPath.Child("cascadeDisk"))...)
+		}
+	}
+
 	if numVolumes == 0 {
 		allErrs = append(allErrs, field.Required(specPath, "must specify a volume type"))
 	}
diff --git a/pkg/apis/extensions/types.go b/pkg/apis/extensions/types.go
index e369728..a5406ab 100644
--- a/pkg/apis/extensions/types.go
+++ b/pkg/apis/extensions/types.go
@@ -925,6 +925,7 @@ var (
 	PortworxVolume        FSType = "portworxVolume"
 	ScaleIO               FSType = "scaleIO"
 	CSI                   FSType = "csi"
+	CascadeDisk           FSType = "cascadeDisk"
 	All                   FSType = "*"
 )
 
diff --git a/pkg/cloudprovider/providers/BUILD b/pkg/cloudprovider/providers/BUILD
index aeccfa1..4313576 100644
--- a/pkg/cloudprovider/providers/BUILD
+++ b/pkg/cloudprovider/providers/BUILD
@@ -12,6 +12,7 @@ go_library(
     deps = [
         "//pkg/cloudprovider/providers/aws:go_default_library",
         "//pkg/cloudprovider/providers/azure:go_default_library",
+        "//pkg/cloudprovider/providers/cascade:go_default_library",
         "//pkg/cloudprovider/providers/cloudstack:go_default_library",
         "//pkg/cloudprovider/providers/gce:go_default_library",
         "//pkg/cloudprovider/providers/openstack:go_default_library",
@@ -34,6 +35,7 @@ filegroup(
         ":package-srcs",
         "//pkg/cloudprovider/providers/aws:all-srcs",
         "//pkg/cloudprovider/providers/azure:all-srcs",
+        "//pkg/cloudprovider/providers/cascade:all-srcs",
         "//pkg/cloudprovider/providers/cloudstack:all-srcs",
         "//pkg/cloudprovider/providers/fake:all-srcs",
         "//pkg/cloudprovider/providers/gce:all-srcs",
diff --git a/pkg/cloudprovider/providers/cascade/BUILD b/pkg/cloudprovider/providers/cascade/BUILD
new file mode 100644
index 0000000..4089166
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/BUILD
@@ -0,0 +1,56 @@
+package(default_visibility = ["//visibility:public"])
+
+load(
+    "@io_bazel_rules_go//go:def.bzl",
+    "go_library",
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "apitypes.go",
+        "auth.go",
+        "cascade.go",
+        "cascade_disks.go",
+        "cascade_instances.go",
+        "cascade_loadbalancer.go",
+        "client.go",
+        "oidcclient.go",
+        "restclient.go",
+        "utils.go"
+        ],
+    deps = [
+        "//pkg/api/v1/helper:go_default_library",
+        "//pkg/cloudprovider:go_default_library",
+        "//pkg/controller:go_default_library",
+        "//vendor/github.com/golang/glog:go_default_library",
+        "//vendor/gopkg.in/gcfg.v1:go_default_library",
+        "//vendor/k8s.io/api/core/v1:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = [
+        "cascade_instances_test.go",
+    ],
+    embed = [":go_default_library"],
+    deps = [
+        "//vendor/k8s.io/api/core/v1:go_default_library",
+        "//vendor/github.com/stretchr/testify/assert:go_default_library",
+    ],
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)
diff --git a/pkg/cloudprovider/providers/cascade/OWNERS b/pkg/cloudprovider/providers/cascade/OWNERS
new file mode 100644
index 0000000..70efc9d
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/OWNERS
@@ -0,0 +1,3 @@
+maintainers:
+- ashokc
+- ysheng
diff --git a/pkg/cloudprovider/providers/cascade/apitypes.go b/pkg/cloudprovider/providers/cascade/apitypes.go
new file mode 100644
index 0000000..d437394
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/apitypes.go
@@ -0,0 +1,230 @@
+package cascade
+
+import "fmt"
+
+const (
+	NotFoundError     = 1408
+	VMNotFoundError   = 2006
+	DiskNotFoundError = 3011
+	DiskInUseError    = 3012
+
+	DiskStateAttached = "ATTACHED"
+)
+
+// Represents APIError returned by the API in case of an error.
+type APIError struct {
+	Code           *string           `json:"code"`
+	Data           map[string]string `json:"data"`
+	ErrorCode      int32             `json:"errorCode,omitempty"`
+	Message        *string           `json:"message"`
+	HttpStatusCode int               `json:"-"` // Not part of API contract
+}
+
+// Implement Go error interface for ApiError.
+func (e APIError) Error() string {
+	return fmt.Sprintf(
+		"Cascade: { HTTP status: '%d', code: '%s', message: '%s', data: '%v', errorcode: '%d' }",
+		e.HttpStatusCode, StringVal(e.Code), StringVal(e.Message), e.Data, e.ErrorCode)
+}
+
+// Used to represent a generic HTTP error, i.e. an unexpected HTTP 500.
+type HttpError struct {
+	StatusCode int
+	Message    string
+}
+
+// Implementation of error interface for HttpError.
+func (e HttpError) Error() string {
+	return fmt.Sprintf("Cascade: HTTP %d: %v", e.StatusCode, e.Message)
+}
+
+// Represents a task which gets returned for long running API calls.
+type Task struct {
+	EndTime            int64       `json:"endTime,omitempty"`
+	Entity             *Entity     `json:"entity,omitempty"`
+	ID                 *string     `json:"id"`
+	Operation          string      `json:"operation,omitempty"`
+	QueuedTime         *int64      `json:"queuedTime"`
+	ResourceProperties interface{} `json:"resourceProperties,omitempty"`
+	SelfLink           string      `json:"selfLink,omitempty"`
+	StartedTime        *int64      `json:"startedTime"`
+	State              *string     `json:"state"`
+	Steps              []*Step     `json:"steps"`
+}
+
+// Represents the entity associated with the task.
+type Entity struct {
+	ID   *string `json:"id"`
+	Kind *string `json:"kind"`
+}
+
+// Represents a task that has entered into an error state. Task errors can be caught and type-checked against with the
+// usual Go idiom.
+type TaskError struct {
+	ID   string `json:"id"`
+	Step Step   `json:"step,omitempty"`
+}
+
+// Implement Go error interface for TaskError.
+func (e TaskError) Error() string {
+	return fmt.Sprintf("Cascade: Task '%s' is in error state: {@step==%s}", e.ID, GetStep(e.Step))
+}
+
+// An error representing a timeout while waiting for a task to complete.
+type TaskTimeoutError struct {
+	ID string
+}
+
+// Implement Go error interface for TaskTimeoutError.
+func (e TaskTimeoutError) Error() string {
+	return fmt.Sprintf("Cascade: Timed out waiting for task '%s'. "+
+		"Task may not be in error state, examine task for full details.", e.ID)
+}
+
+// Represents a step in a task.
+type Step struct {
+	EndTime     int64             `json:"endTime,omitempty"`
+	Errors      []*APIError       `json:"errors"`
+	Operation   string            `json:"operation,omitempty"`
+	Options     map[string]string `json:"options,omitempty"`
+	QueuedTime  *int64            `json:"queuedTime"`
+	Sequence    int32             `json:"sequence,omitempty"`
+	StartedTime *int64            `json:"startedTime"`
+	State       *string           `json:"state"`
+	Warnings    []*APIError       `json:"warnings"`
+}
+
+// Implement Go error interface for Step.
+func GetStep(s Step) string {
+	return fmt.Sprintf("{\"operation\"=>\"%s\",\"state\"=>\"%s}", s.Operation, StringVal(s.State))
+}
+
+// Represents the VM response returned by the API.
+type VM struct {
+	AttachedDisks          []*AttachedDisk  `json:"attachedDisks"`
+	Cost                   []*QuotaLineItem `json:"cost"`
+	Flavor                 *string          `json:"flavor"`
+	FloatingIP             string           `json:"floatingIp,omitempty"`
+	HighAvailableVMGroupID string           `json:"highAvailableVMGroupID,omitempty"`
+	ID                     *string          `json:"id"`
+	Kind                   string           `json:"kind"`
+	Name                   *string          `json:"name"`
+	SelfLink               string           `json:"selfLink,omitempty"`
+	SourceImageID          string           `json:"sourceImageId,omitempty"`
+	State                  *string          `json:"state"`
+	Subnets                []string         `json:"subnets"`
+	Tags                   []string         `json:"tags"`
+}
+
+// Represents the listVMs response returned by the API.
+type VMList struct {
+	Items            []*VM  `json:"items"`
+	NextPageLink     string `json:"nextPageLink,omitempty"`
+	PreviousPageLink string `json:"previousPageLink,omitempty"`
+}
+
+// Represents multiple VMs returned by the API.
+type VMs struct {
+	Items []VM `json:"items"`
+}
+
+// Represents the disks attached to the VMs.
+type AttachedDisk struct {
+	BootDisk   *bool   `json:"bootDisk"`
+	CapacityGb *int32  `json:"capacityGb"`
+	Flavor     *string `json:"flavor"`
+	ID         *string `json:"id"`
+	Kind       *string `json:"kind"`
+	Name       *string `json:"name"`
+	State      *string `json:"state"`
+}
+
+// Represents an attach disk operation request.
+type VMDiskOperation struct {
+	Arguments map[string]string `json:"arguments,omitempty"`
+	DiskID    *string           `json:"diskId"`
+}
+
+// Represents the quota line items for the VM.
+type QuotaLineItem struct {
+	Key   *string  `json:"key"`
+	Unit  *string  `json:"unit"`
+	Value *float64 `json:"value"`
+}
+
+// Represents a persistent disk
+type PersistentDisk struct {
+	CapacityGB  int32            `json:"capacityGb,omitempty"`
+	Cost        []*QuotaLineItem `json:"cost"`
+	Datastore   string           `json:"datastore,omitempty"`
+	Flavor      *string          `json:"flavor"`
+	ID          *string          `json:"id"`
+	Kind        string           `json:"kind"`
+	Name        *string          `json:"name"`
+	SelfLink    string           `json:"selfLink,omitempty"`
+	State       *string          `json:"state"`
+	Tags        []string         `json:"tags"`
+	VM          string           `json:"vm"`
+	MountDevice string           `json:"mountDevice,omitempty"`
+	Zone        *string          `json:"zone"`
+}
+
+// Represents the spec for creating a disk.
+type DiskCreateSpec struct {
+	Affinities []*LocalitySpec `json:"affinities"`
+	CapacityGB *int32          `json:"capacityGb"`
+	Flavor     *string         `json:"flavor"`
+	Kind       *string         `json:"kind"`
+	Name       *string         `json:"name"`
+	Tags       []string        `json:"tags"`
+	Zone       *string         `json:"zone"`
+	Encrypted  *bool           `json:"encrypted"`
+}
+
+// Represents the spec for specifying affinity for a disk with another entity.
+type LocalitySpec struct {
+	ID   *string `json:"id"`
+	Kind *string `json:"kind"`
+}
+
+// Represens the LoadBalancer response returned by the API.
+type LoadBalancer struct {
+	Endpoint *string `json:"endpoint"`
+}
+
+// Represents the spec for creating a LoadBalancer.
+type LoadBalancerCreateSpec struct {
+	HealthCheck *LoadBalancerHealthCheck `json:"healthCheck"`
+	Name        *string                  `json:"name"`
+	PortMaps    []*LoadBalancerPortMap   `json:"portMaps"`
+	Type        *string                  `json:"type"`
+	SubDomain   *string                  `json:"subDomain"`
+}
+
+// Represents the health check spec for a load balancer.
+type LoadBalancerHealthCheck struct {
+	HealthyThreshold  int64   `json:"healthyThreshold,omitempty"`
+	IntervalInSeconds int64   `json:"intervalInSeconds,omitempty"`
+	Path              *string `json:"path,omitempty"`
+	Port              *int64  `json:"port"`
+	Protocol          *string `json:"protocol"`
+}
+
+// Represents a port mapping spec for a load balancer.
+type LoadBalancerPortMap struct {
+	AllowedCidrs         []*string `json:"allowedCidrs"`
+	InstancePort         *int64    `json:"instancePort"`
+	InstanceProtocol     *string   `json:"instanceProtocol"`
+	LoadBalancerPort     *int64    `json:"loadBalancerPort"`
+	LoadBalancerProtocol *string   `json:"loadBalancerProtocol"`
+}
+
+// Represents a VM to be registered with or deregistered from the load balancer.
+type LoadBalancerVM struct {
+	ID *string `json:"id"`
+}
+
+// Represents a list of VMs to be registered with or deregistered from the load balancer.
+type LoadBalancerVMUpdate struct {
+	VMIds []*LoadBalancerVM `json:"vmIds"`
+}
diff --git a/pkg/cloudprovider/providers/cascade/auth.go b/pkg/cloudprovider/providers/cascade/auth.go
new file mode 100644
index 0000000..fc92377
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/auth.go
@@ -0,0 +1,145 @@
+package cascade
+
+import (
+	"fmt"
+	"strings"
+	"github.com/golang/glog"
+	"os/exec"
+)
+
+const (
+	tScope = "openid offline_access rs_admin_server at_groups rs_vmdir"
+
+	afdCli                     = "/opt/vmware/bin/vmafd-cli"
+	afdCliMachineAccountCmd    = "get-machine-account-info"
+	afdCliPasswordPrefix       = "Password: "
+	afdCliSeparator            = "\n"
+)
+
+// AuthConfig contains configuration information for the authentication client.
+type AuthConfig struct {
+	tenantName string
+	authEndpoint string
+	machineAccountName string
+}
+
+// AuthClient defines functions related to authentication.
+type AuthClient struct {
+	cfg *AuthConfig
+}
+
+// NewAuthClient creates a new authentication client
+func NewAuthClient(cascadeCfg *CascadeConfig) (*AuthClient, error) {
+	return &AuthClient{
+		cfg: &AuthConfig{
+			tenantName: cascadeCfg.Global.TenantName,
+			authEndpoint: cascadeCfg.Global.AuthEndpoint,
+			machineAccountName: fmt.Sprintf("%s@%s", cascadeCfg.Global.DNSName, cascadeCfg.Global.DomainName),
+		},
+	}, nil
+}
+
+func (c *AuthClient) GetTokensByMachineAccount() (*TokenOptions, error) {
+	// Use the VMAFD CLI to get the machine account password
+	cmd := exec.Command(afdCli, afdCliMachineAccountCmd)
+	output, err := cmd.Output()
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to get machine account credentials. Cannot create Client.")
+		return nil, fmt.Errorf("Failed to get machine account credentials, err: %v", err)
+	}
+
+	password, err := parseMachineAccountInfo(output)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to parse machine account credentials. Cannot create Client.")
+		return nil, fmt.Errorf("Failed to parse machine account credentials, err: %v", err)
+	}
+
+	return c.GetTokensByCredentials(c.cfg.machineAccountName, password)
+}
+
+// GetTokensByPassword gets tokens using username and password
+func (c *AuthClient) GetTokensByCredentials(username, password string) (*TokenOptions, error) {
+	// Parse tenant part from username
+	parts := strings.Split(username, "@")
+	if len(parts) != 2 {
+		return nil, fmt.Errorf("Invalid full user name '%s': expected user@tenant", username)
+	}
+	tenant := parts[1]
+
+	oidcClient, err := buildOIDCClient(c.cfg.authEndpoint)
+	if err != nil {
+		return nil, err
+	}
+
+	tokenResponse, err := oidcClient.GetTokenByPasswordGrant(tenant, username, password)
+	if err != nil {
+		return nil, err
+	}
+
+	return toTokenOptions(tokenResponse), nil
+}
+
+// GetTokensByRefreshToken gets tokens using refresh token
+func (c *AuthClient) GetTokensByRefreshToken(refreshtoken string) (*TokenOptions, error) {
+	oidcClient, err := buildOIDCClient(c.cfg.authEndpoint)
+	if err != nil {
+		return nil, err
+	}
+
+	tokenResponse, err := oidcClient.GetTokenByRefreshTokenGrant(c.cfg.tenantName, refreshtoken)
+	if err != nil {
+		return nil, err
+	}
+
+	return toTokenOptions(tokenResponse), nil
+}
+
+func buildOIDCClient(authEndpoint string) (*OIDCClient, error) {
+	options := &OIDCClientOptions{
+		IgnoreCertificate: false,
+		RootCAs:           nil,
+		TokenScope:        tScope,
+	}
+
+	return NewOIDCClient(authEndpoint, options, nil), nil
+}
+
+func toTokenOptions(response *OIDCTokenResponse) *TokenOptions {
+	return &TokenOptions{
+		AccessToken:  response.AccessToken,
+		ExpiresIn:    response.ExpiresIn,
+		RefreshToken: response.RefreshToken,
+		IDToken:      response.IDToken,
+		TokenType:    response.TokenType,
+	}
+}
+
+// parseMachineAccountInfo parses the machine account password from the machine-account-info output which looks like
+// this:
+//MachineAccount: photon-8rwdscr1.lw-testdom.com
+//Password: FT`])}]d/3\EPwRpz9k1
+func parseMachineAccountInfo(output []byte) (string, error) {
+	if len(output) <= 0 {
+		return "", fmt.Errorf("account info is not specified")
+	}
+
+	strOut := string(output)
+	strOutLen := len(strOut)
+
+	pwdStart := strings.Index(strOut, afdCliPasswordPrefix)
+	if pwdStart < 0 {
+		return "", fmt.Errorf("account info is not in expected format")
+	}
+	pwdStart = pwdStart + len(afdCliPasswordPrefix)
+	if pwdStart >= strOutLen {
+		return "", fmt.Errorf("account info is not in expected format")
+	}
+	pwdEnd := strings.LastIndex(strOut, afdCliSeparator)
+	if pwdEnd < 0 || pwdEnd <= pwdStart || pwdEnd >= strOutLen {
+		return "", fmt.Errorf("account info is not in expected format")
+	}
+
+	pwd := strOut[pwdStart:pwdEnd]
+
+	return pwd, nil
+}
\ No newline at end of file
diff --git a/pkg/cloudprovider/providers/cascade/cascade.go b/pkg/cloudprovider/providers/cascade/cascade.go
new file mode 100644
index 0000000..ba42576
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/cascade.go
@@ -0,0 +1,219 @@
+// The use of Cascade cloud provider requires the kubelet, kube-apiserver, and kube-controller-manager to be started
+// with config flag: '--cloud-provider=cascade --cloud-config=[path_to_config_file]'.
+package cascade
+
+import (
+	"errors"
+	"fmt"
+	"github.com/golang/glog"
+	"gopkg.in/gcfg.v1"
+	"io"
+	k8stypes "k8s.io/apimachinery/pkg/types"
+	"k8s.io/kubernetes/pkg/cloudprovider"
+	"k8s.io/kubernetes/pkg/controller"
+	"os"
+	"strings"
+)
+
+const (
+	ProviderName = "vke"
+	DiskSpecKind = "persistent-disk"
+	MasterPrefix = "master"
+)
+
+// CascadeCloud is an implementation of the cloud provider interface for Cascade Controller.
+type CascadeCloud struct {
+	cfg *CascadeConfig
+	// Authentication client to get token for Cascade API calls
+	authClient *AuthClient
+	// API Client to make Cascade API calls
+	apiClient *Client
+	// local $HOSTNAME
+	localHostname string
+	// hostname from K8S, could be overridden
+	localK8sHostname string
+}
+
+// CascadeCloud represents Cascade cloud provider's configuration.
+type CascadeConfig struct {
+	Global struct {
+		// the Cascade Controller endpoint
+		CloudTarget string `gcfg:"target"`
+		// Cascade Controller tenantName name
+		TenantName string `gcfg:"tenantName"`
+		// Cascade Controller cluster ID
+		ClusterID string `gcfg:"clusterID"`
+		// Authentication server endpoint for Cascade Controller
+		AuthEndpoint string `gcfg:"authEndpoint"`
+		// Lightwave domain name for the node
+		DomainName string `gcfg:"domainName"`
+		// DNS name of the node.
+		DNSName string `gcfg:"dnsName"`
+		// Region in which the cluster is in
+		Region string `gcfg:"region"`
+		// Availability zone in which the cluster is in
+		Zone string `gcfg:"zone"`
+		// IP address of the node.
+		IPAddress string `gcfg:"ipAddress"`
+	}
+}
+
+// Disks is interface for manipulation with Cascade Controller Persistent Disks.
+type Disks interface {
+	// AttachDisk attaches given disk to given node. Current node
+	// is used when nodeName is empty string.
+	AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error)
+
+	// DetachDisk detaches given disk to given node. Current node
+	// is used when nodeName is empty string.
+	DetachDisk(diskID string, nodeName k8stypes.NodeName) error
+
+	// DiskIsAttached checks if a disk is attached to the given node.
+	DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error)
+
+	// DisksAreAttached is a batch function to check if a list of disks are attached
+	// to the node with the specified NodeName.
+	DisksAreAttached(diskID []string, nodeName k8stypes.NodeName) (map[string]bool, error)
+
+	// CreateDisk creates a new PD with given properties.
+	CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error)
+
+	// DeleteDisk deletes PD.
+	DeleteDisk(diskID string) error
+
+	// Get labels to apply to volume on creation.
+	GetVolumeLabels(diskID string) (map[string]string, error)
+}
+
+// VolumeOptions specifies capacity, tags, name and flavorID for a volume.
+type VolumeOptions struct {
+	CapacityGB int
+	Tags       map[string]string
+	Name       string
+	Flavor     string
+	Encrypted  bool
+}
+
+func readConfig(config io.Reader) (*CascadeConfig, error) {
+	if config == nil {
+		err := fmt.Errorf("Cascade Cloud Provider: config file is missing. Please restart with " +
+			"--cloud-provider=cascade --cloud-config=[path_to_config_file]")
+		return nil, err
+	}
+
+	var cfg CascadeConfig
+	err := gcfg.ReadInto(&cfg, config)
+	return &cfg, err
+}
+
+func init() {
+	cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
+		cfg, err := readConfig(config)
+		if err != nil {
+			glog.Errorf("Cascade Cloud Provider: failed to read in cloud provider config file. Error[%v]", err)
+			return nil, err
+		}
+		return newCascadeCloud(cfg)
+	})
+}
+
+func newCascadeCloud(cfg *CascadeConfig) (*CascadeCloud, error) {
+	if len(cfg.Global.CloudTarget) == 0 {
+		return nil, fmt.Errorf("Cascade Controller endpoint was not specified.")
+	}
+
+	// Get local hostname
+	hostname, err := os.Hostname()
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: get hostname failed. Error[%v]", err)
+		return nil, err
+	}
+
+	cc := CascadeCloud{
+		cfg:              cfg,
+		localHostname:    hostname,
+		localK8sHostname: "",
+	}
+
+	// Instantiate the auth and API clients only on the master nodes. Kubelets running on the workers don't need them as
+	// they are used primarily for making API calls to Cascade.
+	if strings.HasPrefix(hostname, MasterPrefix) {
+		if cc.authClient, err = NewAuthClient(cfg); err != nil {
+			return nil, err
+		}
+
+		if cc.apiClient, err = NewClient(cfg, cc.authClient); err != nil {
+			return nil, err
+		}
+	}
+
+	return &cc, nil
+}
+
+// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
+func (cc *CascadeCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
+
+// Instances returns an implementation of Instances for Cascade Controller.
+func (cc *CascadeCloud) Instances() (cloudprovider.Instances, bool) {
+	return cc, true
+}
+
+// List is an implementation of Instances.List.
+func (cc *CascadeCloud) List(filter string) ([]k8stypes.NodeName, error) {
+	return nil, errors.New("unimplemented")
+}
+
+func (cc *CascadeCloud) Clusters() (cloudprovider.Clusters, bool) {
+	return nil, true
+}
+
+// ProviderName returns the cloud provider ID.
+func (cc *CascadeCloud) ProviderName() string {
+	return ProviderName
+}
+
+// LoadBalancer returns an implementation of LoadBalancer for Cascade Controller.
+func (cc *CascadeCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
+	return cc, true
+}
+
+// Zones returns an implementation of Zones for Cascade Controller.
+func (cc *CascadeCloud) Zones() (cloudprovider.Zones, bool) {
+	return cc, true
+}
+
+func (cc *CascadeCloud) GetZone() (cloudprovider.Zone, error) {
+	return cloudprovider.Zone{
+		Region:        cc.cfg.Global.Region,
+		FailureDomain: cc.cfg.Global.Zone,
+	}, nil
+}
+
+// GetZoneByProviderID implements Zones.GetZoneByProviderID
+// This is particularly useful in external cloud providers where the kubelet
+// does not initialize node data.
+func (cc *CascadeCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
+	return cloudprovider.Zone{}, errors.New("unimplemented")
+}
+
+// GetZoneByNodeName implements Zones.GetZoneByNodeName
+// This is particularly useful in external cloud providers where the kubelet
+// does not initialize node data.
+func (cc *CascadeCloud) GetZoneByNodeName(nodeName k8stypes.NodeName) (cloudprovider.Zone, error) {
+	return cloudprovider.Zone{}, errors.New("unimeplemented")
+}
+
+// Routes returns a false since the interface is not supported for Cascade controller.
+func (cc *CascadeCloud) Routes() (cloudprovider.Routes, bool) {
+	return nil, false
+}
+
+// ScrubDNS filters DNS settings for pods.
+func (cc *CascadeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
+	return nameservers, searches
+}
+
+// HasClusterID returns true if the cluster has a clusterID
+func (cc *CascadeCloud) HasClusterID() bool {
+	return true
+}
diff --git a/pkg/cloudprovider/providers/cascade/cascade_disks.go b/pkg/cloudprovider/providers/cascade/cascade_disks.go
new file mode 100644
index 0000000..4df1ab9
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/cascade_disks.go
@@ -0,0 +1,226 @@
+package cascade
+
+import (
+	"github.com/golang/glog"
+	k8stypes "k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/kubernetes/pkg/kubelet/apis"
+	"k8s.io/kubernetes/pkg/volume"
+)
+
+// Attaches given virtual disk volume to the node running kubelet.
+func (cc *CascadeCloud) AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error) {
+	// Check if disk is already attached to that node.
+	attached, err := cc.DiskIsAttached(diskID, nodeName)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: cc.DiskIsAttached failed during AttachDisk. Error[%v]", err)
+		return "", err
+	}
+
+	// If not already attached, attach the disk.
+	if !attached {
+		operation := &VMDiskOperation{
+			DiskID: StringPtr(diskID),
+		}
+
+		vmID, err := cc.InstanceID(nodeName)
+		if err != nil {
+			glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for AttachDisk. Error[%v]", err)
+			return "", err
+		}
+
+		task, err := cc.apiClient.AttachDisk(vmID, operation)
+		if err != nil {
+			glog.Errorf("Cascade Cloud Provider: Failed to attach disk with ID %s. Error[%v]", diskID, err)
+			return "", err
+		}
+
+		_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+		if err != nil {
+			glog.Errorf("Cascade Cloud Provider: Failed to wait for task to attach disk with ID %s. Error[%v]",
+				diskID, err)
+			return "", err
+		}
+	}
+
+	// Get mount device of the attached disk.
+	disk, err := cc.apiClient.GetDisk(diskID)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
+		return "", err
+	}
+
+	return disk.MountDevice, nil
+}
+
+// Detaches given virtual disk volume from the node running kubelet.
+func (cc *CascadeCloud) DetachDisk(diskID string, nodeName k8stypes.NodeName) error {
+	operation := &VMDiskOperation{
+		DiskID: StringPtr(diskID),
+	}
+
+	vmID, err := cc.InstanceID(nodeName)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DetachDisk. Error[%v]", err)
+		return err
+	}
+
+	task, err := cc.apiClient.DetachDisk(vmID, operation)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", diskID, err)
+		return err
+	}
+
+	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]",
+			diskID, err)
+		return err
+	}
+
+	return nil
+}
+
+// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
+func (cc *CascadeCloud) DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error) {
+	vmID, err := cc.InstanceID(nodeName)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
+		return false, err
+	}
+
+	_, err = cc.apiClient.GetVM(vmID)
+	if err != nil {
+		switch err.(type) {
+		case APIError:
+			if err.(APIError).ErrorCode == VMNotFoundError {
+				// If instance no longer exists, we will assume that the volume is not attached.
+				glog.Warningf("Cascade Cloud Provider: Instance %s does not exist. DiskIsAttached will assume"+
+					" disk %s is not attached to it.", nodeName, diskID)
+				return false, nil
+			}
+		}
+		return false, err
+	}
+
+	disk, err := cc.apiClient.GetDisk(diskID)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
+		return false, err
+	}
+
+	if disk.VM == vmID && StringVal(disk.State) == DiskStateAttached {
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
+func (cc *CascadeCloud) DisksAreAttached(diskIDs []string, nodeName k8stypes.NodeName) (map[string]bool, error) {
+	attached := make(map[string]bool)
+	for _, diskID := range diskIDs {
+		attached[diskID] = false
+	}
+
+	vmID, err := cc.InstanceID(nodeName)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
+		return attached, err
+	}
+
+	for _, diskID := range diskIDs {
+		disk, err := cc.apiClient.GetDisk(diskID)
+		if err != nil {
+			glog.Warningf("Cascade Cloud Provider: failed to get VMs for persistent disk %s, err [%v]",
+				diskID, err)
+		} else {
+			if disk.VM == vmID && StringVal(disk.State) == DiskStateAttached {
+				attached[diskID] = true
+			}
+		}
+	}
+
+	return attached, nil
+}
+
+// Create a volume of given size (in GB).
+func (cc *CascadeCloud) CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error) {
+	// Get Zones for the cluster
+	zones, err := cc.apiClient.GetZones()
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to Get zones for the cluster. Error[%v]", err)
+		return "", err
+	}
+
+	// Pick a zone to place the disk in.
+	zoneSet := sets.NewString()
+	for _, zone := range zones {
+		zoneSet.Insert(zone)
+	}
+	zone := volume.ChooseZoneForVolume(zoneSet, volumeOptions.Name)
+
+	diskSpec := DiskCreateSpec{}
+	diskSpec.Name = StringPtr(volumeOptions.Name)
+	diskSpec.Flavor = StringPtr(volumeOptions.Flavor)
+	diskSpec.CapacityGB = Int32Ptr(int32(volumeOptions.CapacityGB))
+	diskSpec.Kind = StringPtr(DiskSpecKind)
+	diskSpec.Zone = StringPtr(zone)
+	diskSpec.Encrypted = BoolPtr(volumeOptions.Encrypted)
+
+	task, err := cc.apiClient.CreateDisk(&diskSpec)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to CreateDisk. Error[%v]", err)
+		return "", err
+	}
+
+	waitTask, err := cc.apiClient.WaitForTask(StringVal(task.ID))
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err)
+		return "", err
+	}
+
+	return StringVal(waitTask.Entity.ID), nil
+}
+
+// Deletes a volume given volume name.
+func (cc *CascadeCloud) DeleteDisk(diskID string) error {
+	task, err := cc.apiClient.DeleteDisk(diskID)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to DeleteDisk. Error[%v]", err)
+		// If we get a DiskNotFound error, we assume that the disk is already deleted. So we don't return an error here.
+		switch err.(type) {
+		case APIError:
+			if err.(APIError).ErrorCode == DiskNotFoundError {
+				return nil
+			}
+			if err.(APIError).ErrorCode == DiskInUseError {
+				return volume.NewDeletedVolumeInUseError(err.Error())
+			}
+		}
+		return err
+	}
+
+	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err)
+		return err
+	}
+
+	return nil
+}
+
+// Gets the zone and region for the volume.
+func (cc *CascadeCloud) GetVolumeLabels(diskID string) (map[string]string, error) {
+	disk, err := cc.apiClient.GetDisk(diskID)
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to GetDisk for GetVolumeLabels. Error[%v]", err)
+		return nil, err
+	}
+
+	labels := make(map[string]string)
+	labels[apis.LabelZoneFailureDomain] = StringVal(disk.Zone)
+	labels[apis.LabelZoneRegion] = cc.cfg.Global.Region
+
+	return labels, nil
+}
diff --git a/pkg/cloudprovider/providers/cascade/cascade_instances.go b/pkg/cloudprovider/providers/cascade/cascade_instances.go
new file mode 100644
index 0000000..0172151
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/cascade_instances.go
@@ -0,0 +1,91 @@
+package cascade
+
+import (
+	"k8s.io/api/core/v1"
+	k8stypes "k8s.io/apimachinery/pkg/types"
+	"errors"
+	"strings"
+)
+
+// NodeAddresses is an implementation of Instances.NodeAddresses. In the future, private IP address, external IP, etc.
+// will be added based on need.
+func (cc *CascadeCloud) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
+	return []v1.NodeAddress{
+		{Type: v1.NodeInternalIP, Address: cc.cfg.Global.IPAddress},
+		{Type: v1.NodeInternalDNS, Address: cc.cfg.Global.DNSName},
+	}, nil
+}
+
+// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
+// This method will not be called from the node that is requesting this ID. i.e. metadata service
+// and other local methods cannot be used here
+func (cc *CascadeCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
+	// Get the name of the VM using the ID and generate the DNS name based on the VM name.
+	vm, err := cc.apiClient.GetVM(providerID)
+	if err != nil {
+		return nil, err
+	}
+	// Get the DNS name for the master VM and replace the VM name portion with the requested VM name.
+	dnsNameParts := strings.SplitN(cc.cfg.Global.DNSName, ".", 2)
+	if len(dnsNameParts) != 2 {
+		return nil, errors.New("Cascade cloud provider: Invalid DNS name specified in the configuation. " +
+			"Cannot get NodeAddressByProviderID.")
+	}
+	dnsAddress := StringVal(vm.Name) + dnsNameParts[1]
+	addresses := []v1.NodeAddress{}
+	addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: dnsAddress})
+	return addresses, nil
+}
+
+func (cc *CascadeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
+	return errors.New("unimplemented")
+}
+
+// Current node name returns node name based on host name. For Cascade Kubernetes nodes, we will use host name as the
+// node name.
+func (cc *CascadeCloud) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
+	cc.localK8sHostname = hostname
+	return k8stypes.NodeName(hostname), nil
+}
+
+// ExternalID returns the cloud provider ID of the specified instance (deprecated).
+// Note: We do not call Cascade Controller here to check if the instance is alive or not because that requires the
+// worker nodes to also login to Cascade Controller. That check is used by Kubernetes to proactively remove nodes that
+// the cloud provider believes is no longer available. Even otherwise, Kubernetes will remove those nodes eventually.
+// So we are not losing much by not doing that check.
+func (cc *CascadeCloud) ExternalID(nodeName k8stypes.NodeName) (string, error) {
+	return getInstanceIDFromNodeName(nodeName)
+}
+
+// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
+// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
+func (cc *CascadeCloud) InstanceExistsByProviderID(providerID string) (bool, error) {
+	return false, errors.New("unimplemented")
+}
+
+// InstanceID returns the cloud provider ID of the specified instance.
+func (cc *CascadeCloud) InstanceID(nodeName k8stypes.NodeName) (string, error) {
+	return getInstanceIDFromNodeName(nodeName)
+}
+
+// This gets the Cascade VM ID from the Kubernetes node name.
+func getInstanceIDFromNodeName(nodeName k8stypes.NodeName) (string, error) {
+	// nodeName is of the format master-instance-id or worker-instance-id. To compute the instance ID, we need to just
+	// get the portion after master- or worker-. That is what we do below.
+	nodeParts := strings.SplitN(string(nodeName), "-", 2)
+	if len(nodeParts) != 2 {
+		return "", errors.New("Cascade cloud provider: Invalid node name. Cannot fetch instance ID.")
+	}
+	return nodeParts[1], nil
+}
+
+// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
+// This method will not be called from the node that is requesting this ID. i.e. metadata service
+// and other local methods cannot be used here
+func (cc *CascadeCloud) InstanceTypeByProviderID(providerID string) (string, error) {
+	return "", errors.New("unimplemented")
+}
+
+func (cc *CascadeCloud) InstanceType(nodeName k8stypes.NodeName) (string, error) {
+	return "", nil
+}
diff --git a/pkg/cloudprovider/providers/cascade/cascade_instances_test.go b/pkg/cloudprovider/providers/cascade/cascade_instances_test.go
new file mode 100644
index 0000000..bec5491
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/cascade_instances_test.go
@@ -0,0 +1,43 @@
+package cascade
+
+import (
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"k8s.io/api/core/v1"
+)
+
+const configFile = `
+[Global]
+target = target
+tenantName = tenant
+clusterID = cluster-id
+authEndpoint = auth-endpoint
+domainName = domain.test
+dnsName = node.domain.test
+region = region
+zone = zone
+ipAddress = 1.1.1.1
+`
+
+func getCascadeInstance() (*CascadeCloud, error) {
+	cfg, err := readConfig(strings.NewReader(configFile))
+	return &CascadeCloud{
+		cfg:              cfg,
+		localHostname:    "hostname",
+		localK8sHostname: "",
+	}, err
+}
+
+func TestNodeAddresses(t *testing.T) {
+	cc, err := getCascadeInstance()
+	assert.Nil(t, err)
+	expectedNodeAddresses := []v1.NodeAddress{
+		{Type: v1.NodeInternalIP, Address: "1.1.1.1"},
+		{Type: v1.NodeInternalDNS, Address: "node.domain.test"},
+	}
+	actualNodeAddresses, err := cc.NodeAddresses("node")
+	assert.Nil(t, err)
+	assert.Equal(t, expectedNodeAddresses, actualNodeAddresses)
+}
diff --git a/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go b/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go
new file mode 100644
index 0000000..fac37e5
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go
@@ -0,0 +1,284 @@
+package cascade
+
+import (
+	"fmt"
+	"github.com/golang/glog"
+	"k8s.io/api/core/v1"
+	"k8s.io/kubernetes/pkg/api/v1/service"
+	"k8s.io/kubernetes/pkg/cloudprovider"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+const TCP_PROTOCOL = "TCP"
+
+const HTTP_PROTOCOL = "HTTP"
+
+// EnsureLoadBalancer creates or updates a Cascade load balancer
+func (cc *CascadeCloud) EnsureLoadBalancer(clusterName string, k8sService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
+	logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancer")
+
+	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+	logger.Infof("Load balancer name: %s", loadBalancerName)
+
+	// Sanity checks
+	if k8sService.Spec.SessionAffinity != v1.ServiceAffinityNone {
+		logger.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
+		return nil, fmt.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
+	}
+
+	if len(k8sService.Spec.Ports) == 0 {
+		logger.Errorf("No port mapping is specified")
+		return nil, fmt.Errorf("No port mapping is specified")
+	}
+
+	// Create load balancer port maps
+	portMaps := []*LoadBalancerPortMap{}
+	for _, port := range k8sService.Spec.Ports {
+		if port.Protocol != v1.ProtocolTCP {
+			logger.Warningf("Ignoring port that does not use TCP protocol: %+v", port)
+			continue
+		}
+
+		if port.NodePort == 0 {
+			logger.Warningf("Ignoring port without node port defined: %+v", port)
+			continue
+		}
+
+		// TODO: For now we only support SSL pass through. All port mappings are using TCP protocol.
+		//       Also note that we allow all external traffic to access the ports.
+		portMap := &LoadBalancerPortMap{
+			InstancePort:         Int64Ptr(int64(port.NodePort)),
+			InstanceProtocol:     StringPtr(TCP_PROTOCOL),
+			LoadBalancerPort:     Int64Ptr(int64(port.Port)),
+			LoadBalancerProtocol: StringPtr(TCP_PROTOCOL),
+		}
+		portMaps = append(portMaps, portMap)
+	}
+
+	// Create load balancer health check
+	healthCheck := &LoadBalancerHealthCheck{
+		HealthyThreshold: 5,
+		IntervalInSeconds: 10,
+	}
+	if healthCheckPath, healthCheckNodePort := service.GetServiceHealthCheckPathPort(k8sService); healthCheckPath != "" {
+		logger.Infof("HTTP health checks on: %s:%d", healthCheckPath, healthCheckNodePort)
+		healthCheck.Path = StringPtr(healthCheckPath)
+		healthCheck.Port = Int64Ptr(int64(healthCheckNodePort))
+		healthCheck.Protocol = StringPtr(HTTP_PROTOCOL)
+	} else {
+		logger.Infof("TCP health check on port: %d", Int64Val(portMaps[0].InstancePort))
+		healthCheck.Port = portMaps[0].InstancePort
+		healthCheck.Protocol = StringPtr(TCP_PROTOCOL)
+	}
+
+	// Create load balancer
+	createSpec := &LoadBalancerCreateSpec{
+		Name:        StringPtr(loadBalancerName),
+		Type:        StringPtr("PUBLIC"),
+		PortMaps:    portMaps,
+		HealthCheck: healthCheck,
+		SubDomain:   StringPtr(k8sService.Name),
+	}
+	logger.Infof("Load balancer create spec: %+v", *createSpec)
+
+	task, err := cc.apiClient.CreateOrUpdateLoadBalancer(createSpec)
+	if err != nil {
+		logger.Errorf("Failed to create or update load balancer. Error: [%v]", err)
+		return nil, err
+	}
+
+	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+	if err != nil {
+		logger.Errorf("Failed to poll task status of creating or updating load balancer. Error: [%v]", err)
+		return nil, err
+	}
+
+	// Apply VM update to load balancer
+	err = cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger)
+	if err != nil {
+		// The private function already did logging. No need to log again.
+		return nil, err
+	}
+
+	// Get load balancer
+	loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName))
+	if err != nil {
+		glog.Errorf("Failed to get load balancer. Error: [%v]", err)
+		return nil, err
+	}
+
+	return toLoadBalancerStatus(loadBalancer), nil
+}
+
+// GetLoadBalancer returns the information about a Cascade load balancer
+func (cc *CascadeCloud) GetLoadBalancer(clusterName string, k8sService *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
+	logger := newLoadBalancerLogger(clusterName, k8sService, "GetLoadBalancer")
+
+	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+	logger.Infof("Load balancer name: %s", loadBalancerName)
+
+	// Get load balancer
+	loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName))
+	if err != nil {
+		logger.Errorf("Failed to get load balancer. Error: [%v]", err)
+		// Do not return error here because we want the caller of this function to determine
+		// what to do with the not-found situation.
+		switch err.(type) {
+		case APIError:
+			if err.(APIError).ErrorCode == NotFoundError {
+				return nil, false, nil
+			}
+		}
+		return nil, false, err
+	}
+
+	return toLoadBalancerStatus(loadBalancer), true, nil
+}
+
+// UpdateLoadBalancer updates the node information of a Cascade load balancer
+func (cc *CascadeCloud) UpdateLoadBalancer(clusterName string, k8sService *v1.Service, nodes []*v1.Node) error {
+	logger := newLoadBalancerLogger(clusterName, k8sService, "UpdateLoadBalancer")
+
+	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+	logger.Infof("Load balancer name: %s", loadBalancerName)
+
+	err := cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger)
+	if err != nil {
+		// The private function already did logging. No need to log again.
+		return err
+	}
+
+	return nil
+}
+
+// EnsureLoadBalancerDeleted deletes a Cascade load balancer
+func (cc *CascadeCloud) EnsureLoadBalancerDeleted(clusterName string, k8sService *v1.Service) error {
+	logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancerDeleted")
+
+	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
+	logger.Infof("Load balancer name: %s", loadBalancerName)
+
+	task, err := cc.apiClient.DeleteLoadBalancer(StringPtr(loadBalancerName), k8sService.Name)
+	if err != nil {
+		logger.Errorf("Failed to delete load balancer. Error: [%v]", err)
+		// If we get a NotFound error, we assume that the load balancer is already deleted. So we don't return an error
+		// here.
+		switch err.(type) {
+		case APIError:
+			if err.(APIError).ErrorCode == NotFoundError {
+				return nil
+			}
+		}
+		return err
+	}
+
+	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+	if err != nil {
+		logger.Errorf("Failed to poll task status of deleting load balancer. Error: [%v]", err)
+		return err
+	}
+
+	return nil
+}
+
+func (cc *CascadeCloud) updateLoadBalancerVMs(
+	nodes []*v1.Node, loadBalancerName string, logger *loadBalancerLogger) error {
+
+	// Apply VM update to the load balancer
+	loadBalancerVMs := make([]*LoadBalancerVM, 0)
+
+	for _, node := range(nodes) {
+		// If the node does not have a name, we cannot derive its instance ID. Therefore we skip this node.
+		if len(node.Name) == 0 {
+			logger.Warningf("Node %s does not have a name. Skip updating this VM for load balancer", node.UID)
+			continue
+		}
+
+		// If we cannot get the instance ID, something is wrong on the Cascade Controller side.
+		// However, we should tolerate such failure and continue the load balancer VM update
+		// by skipping this VM.
+		instanceID, err := cc.InstanceID(types.NodeName(node.Name))
+		if err != nil {
+			logger.Warningf("Unable to get instance ID for node %s, skip updating this VM for load balancer. Error [%v]", node.Name, err)
+			continue
+		}
+
+		loadBalancerVMs = append(loadBalancerVMs, &LoadBalancerVM{
+			ID: StringPtr(instanceID),
+		})
+	}
+
+	if len(loadBalancerVMs) == 0 {
+		logger.Infof("No nodes to be added to the load balancer. Skip updating load balancer VMs")
+		return nil
+	}
+
+	vmUpdate := &LoadBalancerVMUpdate{
+		VMIds: loadBalancerVMs,
+	}
+	logger.Infof("Load balancer VM update spec: %+v", vmUpdate.VMIds)
+
+	task, err := cc.apiClient.ApplyVMsToLoadBalancer(StringPtr(loadBalancerName), vmUpdate)
+	if err != nil {
+		logger.Errorf("Failed to update load balancer VMs. Error: [%v]", err)
+		return err
+	}
+
+	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
+	if err != nil {
+		logger.Errorf("Failed to poll task status of updating load balancer VMs. Error: [%v]", err)
+		return err
+	}
+
+	return nil
+}
+
+func toLoadBalancerStatus(lb *LoadBalancer) *v1.LoadBalancerStatus {
+	var endpoint string
+	if lb != nil && lb.Endpoint != nil {
+		endpoint = StringVal(lb.Endpoint)
+	}
+
+	return &v1.LoadBalancerStatus{
+		Ingress: []v1.LoadBalancerIngress{
+			{
+				Hostname: endpoint,
+			},
+		},
+	}
+}
+
+type loadBalancerLogger struct {
+	clusterName string
+	k8sService *v1.Service
+	callingFunc string
+}
+
+func newLoadBalancerLogger(clusterName string, k8sService *v1.Service, callingFunc string) *loadBalancerLogger {
+	return &loadBalancerLogger{
+		clusterName: clusterName,
+		k8sService: k8sService,
+		callingFunc: callingFunc,
+	}
+}
+
+func (l *loadBalancerLogger) getLogMsg(
+	msgTemplate string, args ...interface{}) string {
+
+	errorMsg := fmt.Sprintf("Cascade Cloud Provider::%s::Cluster [%s] Service [%s]: %s",
+		l.callingFunc, l.clusterName, l.k8sService.Name,
+		msgTemplate)
+	return fmt.Sprintf(errorMsg, args)
+}
+
+func (l *loadBalancerLogger) Errorf(msgTemplate string, args ...interface{}) {
+	glog.Errorln(l.getLogMsg(msgTemplate, args))
+}
+
+func (l *loadBalancerLogger) Warningf(msgTemplate string, args ...interface{}) {
+	glog.Warningln(l.getLogMsg(msgTemplate, args))
+}
+
+func (l *loadBalancerLogger) Infof(msgTemplate string, args ...interface{}) {
+	glog.Infoln(l.getLogMsg(msgTemplate, args))
+}
\ No newline at end of file
diff --git a/pkg/cloudprovider/providers/cascade/client.go b/pkg/cloudprovider/providers/cascade/client.go
new file mode 100644
index 0000000..e4494e4
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/client.go
@@ -0,0 +1,399 @@
+package cascade
+
+import (
+	"bytes"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/json"
+	"fmt"
+	"github.com/golang/glog"
+	"net/http"
+	"strings"
+	"time"
+)
+
+// Represents stateless context needed to call Cascade APIs.
+// Note that we are implementing the Cascade APIs manually instead of using the swagger generated code
+// because swagger uses a different version of openapi library than kubernetes. It is difficult to
+// address the version conflict to make it compile.
+type Client struct {
+	cfg        *ClientConfig
+	options    ClientOptions
+	restClient *restClient
+}
+
+type ClientConfig struct {
+	tenantName string
+	clusterID  string
+	region     string
+	endpoint   string
+}
+
+// Represents Tokens
+type TokenOptions struct {
+	AccessToken  string `json:"access_token"`
+	ExpiresIn    int    `json:"expires_in"`
+	RefreshToken string `json:"refresh_token,omitempty"`
+	IDToken      string `json:"id_token"`
+	TokenType    string `json:"token_type"`
+}
+
+type TokenCallback func(string)
+
+// Options for Client
+type ClientOptions struct {
+	// When using the Tasks.Wait APIs, defines the duration of how long
+	// we should continue to poll the server. Default is 30 minutes.
+	// TasksAPI.WaitTimeout() can be used to specify timeout on
+	// individual calls.
+	TaskPollTimeout time.Duration
+
+	// Whether or not to ignore any TLS errors when talking to Cascade,
+	// false by default.
+	IgnoreCertificate bool
+
+	// List of root CA's to use for server validation
+	// nil by default.
+	RootCAs *x509.CertPool
+
+	// For tasks APIs, defines the number of retries to make in the event
+	// of an error. Default is 3.
+	TaskRetryCount int
+
+	// Tokens for user authentication. Default is empty.
+	TokenOptions *TokenOptions
+}
+
+const minimumTaskPollDelay = 500 * time.Millisecond
+
+// Creates a new Cascade client which can be used to make API calls to Cascade.
+func NewClient(cfg *CascadeConfig, authClient *AuthClient) (c *Client, err error) {
+	tokenOptions, err := authClient.GetTokensByMachineAccount()
+	if err != nil {
+		glog.Errorf("Cascade Cloud Provider: Failed to create new client due to error: %+v", err)
+		return
+	}
+
+	options := &ClientOptions{
+		TaskPollTimeout:   30 * time.Minute,
+		TaskRetryCount:    3,
+		TokenOptions:      tokenOptions,
+		IgnoreCertificate: false,
+		RootCAs:           nil,
+	}
+
+	tr := &http.Transport{
+		TLSClientConfig: &tls.Config{
+			InsecureSkipVerify: options.IgnoreCertificate,
+			RootCAs:            options.RootCAs},
+	}
+
+	tokenCallback := func(newToken string) {
+		c.options.TokenOptions.AccessToken = newToken
+	}
+
+	restClient := &restClient{
+		authClient:                authClient,
+		httpClient:                &http.Client{Transport: tr},
+		UpdateAccessTokenCallback: tokenCallback,
+	}
+
+	clientConfig := &ClientConfig{
+		tenantName: cfg.Global.TenantName,
+		clusterID:  cfg.Global.ClusterID,
+		region:     cfg.Global.Region,
+		endpoint:   strings.TrimRight(cfg.Global.CloudTarget, "/"),
+	}
+
+	c = &Client{
+		cfg:        clientConfig,
+		restClient: restClient,
+		// Ensure a copy of options is made, rather than using a pointer
+		// which may change out from underneath if misused by the caller.
+		options: *options,
+	}
+
+	return
+}
+
+// Gets VM with the specified ID.
+func (api *Client) GetVM(vmID string) (vm *VM, err error) {
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, vmID)
+	res, err := api.restClient.Get(uri, api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	res, err = getError(res)
+	if err != nil {
+		return
+	}
+	vm = &VM{}
+	err = json.NewDecoder(res.Body).Decode(vm)
+	return
+}
+
+// Gets disk with the specified ID.
+func (api *Client) GetDisk(diskID string) (disk *PersistentDisk, err error) {
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, diskID)
+	res, err := api.restClient.Get(uri, api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	res, err = getError(res)
+	if err != nil {
+		return
+	}
+	disk = &PersistentDisk{}
+	err = json.NewDecoder(res.Body).Decode(disk)
+	return
+}
+
+// Creates a disk under the cluster.
+func (api *Client) CreateDisk(spec *DiskCreateSpec) (task *Task, err error) {
+	body, err := json.Marshal(spec)
+	if err != nil {
+		return
+	}
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID)
+	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	task, err = getTask(getError(res))
+	return
+}
+
+// Deletes a disk with the specified ID.
+func (api *Client) DeleteDisk(diskID string) (task *Task, err error) {
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, diskID)
+	res, err := api.restClient.Delete(uri, api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	task, err = getTask(getError(res))
+	return
+}
+
+// Attaches a disk to the specified VM.
+func (api *Client) AttachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
+	body, err := json.Marshal(op)
+	if err != nil {
+		return
+	}
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/attach_disk", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, vmID)
+	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	task, err = getTask(getError(res))
+	return
+}
+
+// Detaches a disk from the specified VM.
+func (api *Client) DetachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
+	body, err := json.Marshal(op)
+	if err != nil {
+		return
+	}
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/detach_disk", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, vmID)
+	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	task, err = getTask(getError(res))
+	return
+}
+
+// Gets a task by ID.
+func (api *Client) GetTask(taskID string) (task *Task, err error) {
+	uri := fmt.Sprintf("%s/v1/tenants/%s/tasks/%s?region=%s", api.cfg.endpoint, api.cfg.tenantName,
+		taskID, api.cfg.region)
+	res, err := api.restClient.Get(uri, api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	result, err := getTask(getError(res))
+	return result, err
+}
+
+// Waits for a task to complete by polling the tasks API until a task returns with the state COMPLETED or ERROR.
+func (api *Client) WaitForTask(taskID string) (task *Task, err error) {
+	start := time.Now()
+	numErrors := 0
+	maxErrors := api.options.TaskRetryCount
+	backoffMultiplier := 1
+
+	for time.Since(start) < api.options.TaskPollTimeout {
+		task, err = api.GetTask(taskID)
+		if err != nil {
+			switch err.(type) {
+			// If an ApiError comes back, something is wrong, return the error to the caller
+			case APIError:
+				return
+				// For other errors, retry before giving up
+			default:
+				numErrors++
+				if numErrors > maxErrors {
+					return
+				}
+			}
+		} else {
+			// Reset the error count any time a successful call is made
+			numErrors = 0
+			if StringVal(task.State) == "COMPLETED" {
+				return
+			}
+			if StringVal(task.State) == "ERROR" {
+				err = TaskError{StringVal(task.ID), getFailedStep(task)}
+				return
+			}
+		}
+
+		// Perform backoff based on how long it has been since we started polling. The logic is as follows:
+		// For the first 10 seconds, poll every 500 milliseconds.
+		// From there till the first 1 minute, poll every 1 second.
+		// From there till the first 10 minutes, poll every 5 seconds.
+		// From there till the timeout (30 minutes), poll every 10 seconds.
+		elapsedTime := time.Since(start)
+		if elapsedTime > 10*time.Second && elapsedTime <= 60*time.Second {
+			backoffMultiplier = 2
+		} else if elapsedTime > 60*time.Second && elapsedTime <= 600*time.Second {
+			backoffMultiplier = 10
+		} else if elapsedTime > 600*time.Second && elapsedTime <= api.options.TaskPollTimeout {
+			backoffMultiplier = 20
+		}
+		time.Sleep(time.Duration(backoffMultiplier) * minimumTaskPollDelay)
+	}
+	err = TaskTimeoutError{taskID}
+	return
+}
+
+// CreateOrUpdateLoadBalancer creates a load balancer if not existed, or update one otherwise
+func (api *Client) CreateOrUpdateLoadBalancer(spec *LoadBalancerCreateSpec) (*Task, error) {
+	body, err := json.Marshal(spec)
+	if err != nil {
+		return nil, err
+	}
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID)
+	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	return getTask(getError(res))
+}
+
+// GetLoadBalancer returns a load balancer by name
+func (api *Client) GetLoadBalancer(loadBalancerName *string) (*LoadBalancer, error) {
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, StringVal(loadBalancerName))
+	res, err := api.restClient.Get(uri, api.options.TokenOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	res, err = getError(res)
+	if err != nil {
+		return nil, err
+	}
+	loadBalancer := &LoadBalancer{}
+	err = json.NewDecoder(res.Body).Decode(loadBalancer)
+	return loadBalancer, err
+}
+
+// DeleteLoadBalancer deletes a load balancer by name
+func (api *Client) DeleteLoadBalancer(loadBalancerName *string, subDomain string) (*Task, error) {
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, StringVal(loadBalancerName))
+
+	if len(subDomain) > 0 {
+		uri = fmt.Sprintf(uri + "?sub-domain=%s", subDomain)
+	}
+
+	res, err := api.restClient.Delete(uri, api.options.TokenOptions)
+	if err != nil {
+		return nil, err
+	}
+	return getTask(getError(res))
+}
+
+// ApplyVMsToLoadBalancer updates the instances that are registered with the load balancer
+func (api *Client) ApplyVMsToLoadBalancer(loadBalancerName *string, update *LoadBalancerVMUpdate) (*Task, error) {
+	body, err := json.Marshal(update)
+	if err != nil {
+		return nil, err
+	}
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s/update_vms", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID, StringVal(loadBalancerName))
+	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	return getTask(getError(res))
+}
+
+// Gets all the zones in which the cluster has the VMs in.
+func (api *Client) GetZones() (zones []string, err error) {
+	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/zones", api.cfg.endpoint, api.cfg.tenantName,
+		api.cfg.clusterID)
+	res, err := api.restClient.Get(uri, api.options.TokenOptions)
+	if err != nil {
+		return
+	}
+	defer res.Body.Close()
+	res, err = getError(res)
+	if err != nil {
+		return
+	}
+	err = json.NewDecoder(res.Body).Decode(&zones)
+	return
+}
+
+// Reads a task object out of the HTTP response. Takes an error argument
+// so that GetTask can easily wrap GetError. This function will do nothing
+// if e is not nil.
+// e.g. res, err := getTask(getError(someApi.Get()))
+func getTask(res *http.Response, e error) (*Task, error) {
+	if e != nil {
+		return nil, e
+	}
+	var task Task
+	err := json.NewDecoder(res.Body).Decode(&task)
+	if err != nil {
+		return nil, err
+	}
+	if StringVal(task.State) == "ERROR" {
+		// Critical: return task as well, so that it can be examined
+		// for error details.
+		return &task, TaskError{StringVal(task.ID), getFailedStep(&task)}
+	}
+	return &task, nil
+}
+
+// Gets the failed step in the task to get error details for failed task.
+func getFailedStep(task *Task) (step Step) {
+	var errorStep Step
+	for _, s := range task.Steps {
+		if StringVal(s.State) == "ERROR" {
+			errorStep = *s
+			break
+		}
+	}
+
+	return errorStep
+}
diff --git a/pkg/cloudprovider/providers/cascade/oidcclient.go b/pkg/cloudprovider/providers/cascade/oidcclient.go
new file mode 100644
index 0000000..6a71cc1
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/oidcclient.go
@@ -0,0 +1,297 @@
+package cascade
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/json"
+	"encoding/pem"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/url"
+	"strings"
+)
+
+const tokenScope string = "openid offline_access"
+
+// OIDCClient is client for OIDC
+type OIDCClient struct {
+	httpClient *http.Client
+	logger     *log.Logger
+
+	Endpoint string
+	Options  *OIDCClientOptions
+}
+
+// OIDCClientOptions is OIDC client options
+type OIDCClientOptions struct {
+	// Whether or not to ignore any TLS errors when talking to Cascade,
+	// false by default.
+	IgnoreCertificate bool
+
+	// List of root CA's to use for server validation
+	// nil by default.
+	RootCAs *x509.CertPool
+
+	// The scope values to use when requesting tokens
+	TokenScope string
+}
+
+// NewOIDCClient creates an instance of OIDCClient
+func NewOIDCClient(endpoint string, options *OIDCClientOptions, logger *log.Logger) (c *OIDCClient) {
+	if logger == nil {
+		logger = log.New(ioutil.Discard, "", log.LstdFlags)
+	}
+
+	options = buildOptions(options)
+	tr := &http.Transport{
+		TLSClientConfig: &tls.Config{
+			InsecureSkipVerify: options.IgnoreCertificate,
+			RootCAs:            options.RootCAs},
+	}
+
+	c = &OIDCClient{
+		httpClient: &http.Client{Transport: tr},
+		logger:     logger,
+		Endpoint:   strings.TrimRight(endpoint, "/"),
+		Options:    options,
+	}
+	return
+}
+
+func buildOptions(options *OIDCClientOptions) (result *OIDCClientOptions) {
+	result = &OIDCClientOptions{
+		TokenScope: tokenScope,
+	}
+
+	if options == nil {
+		return
+	}
+
+	result.IgnoreCertificate = options.IgnoreCertificate
+
+	if options.RootCAs != nil {
+		result.RootCAs = options.RootCAs
+	}
+
+	if options.TokenScope != "" {
+		result.TokenScope = options.TokenScope
+	}
+
+	return
+}
+
+func (client *OIDCClient) buildURL(path string) (url string) {
+	return fmt.Sprintf("%s%s", client.Endpoint, path)
+}
+
+// Cert download helper
+
+const certDownloadPath string = "/afd/vecs/ssl"
+
+type lightWaveCert struct {
+	Value string `json:"encoded"`
+}
+
+// GetRootCerts gets root certs
+func (client *OIDCClient) GetRootCerts() (certList []*x509.Certificate, err error) {
+	// turn TLS verification off for
+	originalTr := client.httpClient.Transport
+	defer client.setTransport(originalTr)
+
+	tr := &http.Transport{
+		TLSClientConfig: &tls.Config{
+			InsecureSkipVerify: false,
+		},
+	}
+	client.setTransport(tr)
+
+	// get the certs
+	resp, err := client.httpClient.Get(client.buildURL(certDownloadPath))
+	if err != nil {
+		return
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != 200 {
+		err = fmt.Errorf("Unexpected error retrieving auth server certs: %v %s", resp.StatusCode, resp.Status)
+		return
+	}
+
+	// parse the certs
+	certsData := &[]lightWaveCert{}
+	err = json.NewDecoder(resp.Body).Decode(certsData)
+	if err != nil {
+		return
+	}
+
+	certList = make([]*x509.Certificate, len(*certsData))
+	for idx, cert := range *certsData {
+		block, _ := pem.Decode([]byte(cert.Value))
+		if block == nil {
+			err = fmt.Errorf("Unexpected response format: %v", certsData)
+			return nil, err
+		}
+
+		decodedCert, err := x509.ParseCertificate(block.Bytes)
+		if err != nil {
+			return nil, err
+		}
+
+		certList[idx] = decodedCert
+	}
+
+	return
+}
+
+func (client *OIDCClient) setTransport(tr http.RoundTripper) {
+	client.httpClient.Transport = tr
+}
+
+// Metadata request helpers
+const metadataPathFormat string = "/openidconnect/%s/.well-known/openid-configuration"
+
+// OIDCMetadataResponse is the response for Metadata request
+type OIDCMetadataResponse struct {
+	TokenEndpoint         string `json:"token_endpoint"`
+	AuthorizationEndpoint string `json:"authorization_endpoint"`
+	EndSessionEndpoint    string `json:"end_session_endpoint"`
+}
+
+func (client *OIDCClient) getMetadata(domain string) (metadata *OIDCMetadataResponse, err error) {
+	metadataPath := fmt.Sprintf(metadataPathFormat, domain)
+	request, err := http.NewRequest("GET", client.buildURL(metadataPath), nil)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := client.httpClient.Do(request)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	err = client.checkResponse(resp)
+	if err != nil {
+		return nil, err
+	}
+
+	metadata = &OIDCMetadataResponse{}
+	err = json.NewDecoder(resp.Body).Decode(metadata)
+	if err != nil {
+		return nil, err
+	}
+
+	return
+}
+
+// Token request helpers
+
+const passwordGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s"
+const refreshTokenGrantFormatString = "grant_type=refresh_token&refresh_token=%s"
+const clientGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s&client_id=%s"
+
+// OIDCTokenResponse is the response for OIDC request
+type OIDCTokenResponse struct {
+	AccessToken  string `json:"access_token"`
+	ExpiresIn    int    `json:"expires_in"`
+	RefreshToken string `json:"refresh_token,omitempty"`
+	IDToken      string `json:"id_token"`
+	TokenType    string `json:"token_type"`
+}
+
+// GetTokenByPasswordGrant gets OIDC tokens by password
+func (client *OIDCClient) GetTokenByPasswordGrant(domain, username, password string) (tokens *OIDCTokenResponse, err error) {
+	metadata, err := client.getMetadata(domain)
+	if err != nil {
+		return nil, err
+	}
+
+	username = url.QueryEscape(username)
+	password = url.QueryEscape(password)
+	body := fmt.Sprintf(passwordGrantFormatString, username, password, client.Options.TokenScope)
+	return client.getToken(metadata.TokenEndpoint, body)
+}
+
+// GetClientTokenByPasswordGrant gets OIDC tokens by password
+func (client *OIDCClient) GetClientTokenByPasswordGrant(domain, username, password, clientID string) (tokens *OIDCTokenResponse, err error) {
+	metadata, err := client.getMetadata(domain)
+	if err != nil {
+		return nil, err
+	}
+
+	username = url.QueryEscape(username)
+	password = url.QueryEscape(password)
+	clientID = url.QueryEscape(clientID)
+	body := fmt.Sprintf(clientGrantFormatString, username, password, client.Options.TokenScope, clientID)
+	return client.getToken(metadata.TokenEndpoint, body)
+}
+
+// GetTokenByRefreshTokenGrant gets OIDC tokens by refresh token
+func (client *OIDCClient) GetTokenByRefreshTokenGrant(domain, refreshToken string) (tokens *OIDCTokenResponse, err error) {
+	metadata, err := client.getMetadata(domain)
+	if err != nil {
+		return nil, err
+	}
+
+	body := fmt.Sprintf(refreshTokenGrantFormatString, refreshToken)
+	return client.getToken(metadata.TokenEndpoint, body)
+}
+
+func (client *OIDCClient) getToken(tokenEndpoint, body string) (tokens *OIDCTokenResponse, err error) {
+	request, err := http.NewRequest("POST", tokenEndpoint, strings.NewReader(body))
+	if err != nil {
+		return nil, err
+	}
+	request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+
+	resp, err := client.httpClient.Do(request)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	err = client.checkResponse(resp)
+	if err != nil {
+		return nil, err
+	}
+
+	tokens = &OIDCTokenResponse{}
+	err = json.NewDecoder(resp.Body).Decode(tokens)
+	if err != nil {
+		return nil, err
+	}
+
+	return
+}
+
+// OIDCError is OIDC error
+type OIDCError struct {
+	Code    string `json:"error"`
+	Message string `json:"error_description"`
+}
+
+func (e OIDCError) Error() string {
+	return fmt.Sprintf("%v: %v", e.Code, e.Message)
+}
+
+func (client *OIDCClient) checkResponse(response *http.Response) (err error) {
+	if response.StatusCode/100 == 2 {
+		return
+	}
+
+	respBody, readErr := ioutil.ReadAll(response.Body)
+	if readErr != nil {
+		return fmt.Errorf(
+			"Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr)
+	}
+
+	var oidcErr OIDCError
+	err = json.Unmarshal(respBody, &oidcErr)
+	if err != nil || oidcErr.Code == "" {
+		return fmt.Errorf(
+			"Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr)
+	}
+
+	return oidcErr
+}
diff --git a/pkg/cloudprovider/providers/cascade/restclient.go b/pkg/cloudprovider/providers/cascade/restclient.go
new file mode 100644
index 0000000..71d8d1c
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/restclient.go
@@ -0,0 +1,262 @@
+package cascade
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"net/http"
+)
+
+type restClient struct {
+	httpClient                *http.Client
+	authClient                *AuthClient
+	UpdateAccessTokenCallback TokenCallback
+}
+
+type request struct {
+	Method      string
+	URL         string
+	ContentType string
+	Body        io.Reader
+	Tokens      *TokenOptions
+}
+
+type page struct {
+	Items            []interface{} `json:"items"`
+	NextPageLink     string        `json:"nextPageLink"`
+	PreviousPageLink string        `json:"previousPageLink"`
+}
+
+type documentList struct {
+	Items []interface{}
+}
+
+type bodyRewinder func() io.Reader
+
+const appJson string = "application/json"
+const expiredAuthToken int32 = 1904
+
+func (client *restClient) AppendSlice(origSlice []interface{}, dataToAppend []interface{}) []interface{} {
+	origLen := len(origSlice)
+	newLen := origLen + len(dataToAppend)
+
+	if newLen > cap(origSlice) {
+		newSlice := make([]interface{}, (newLen+1)*2)
+		copy(newSlice, origSlice)
+		origSlice = newSlice
+	}
+
+	origSlice = origSlice[0:newLen]
+	copy(origSlice[origLen:newLen], dataToAppend)
+
+	return origSlice
+}
+
+func (client *restClient) Get(url string, tokens *TokenOptions) (res *http.Response, err error) {
+	req := request{"GET", url, "", nil, tokens}
+	res, err = client.SendRequest(&req, nil)
+	return
+}
+
+func (client *restClient) GetList(endpoint string, url string, tokens *TokenOptions) (result []byte, err error) {
+	req := request{"GET", url, "", nil, tokens}
+	res, err := client.SendRequest(&req, nil)
+	if err != nil {
+		return
+	}
+	res, err = getError(res)
+	if err != nil {
+		return
+	}
+
+	decoder := json.NewDecoder(res.Body)
+	decoder.UseNumber()
+
+	page := &page{}
+	err = decoder.Decode(page)
+	if err != nil {
+		return
+	}
+
+	documentList := &documentList{}
+	documentList.Items = client.AppendSlice(documentList.Items, page.Items)
+
+	for page.NextPageLink != "" {
+		req = request{"GET", endpoint + page.NextPageLink, "", nil, tokens}
+		res, err = client.SendRequest(&req, nil)
+		if err != nil {
+			return
+		}
+		res, err = getError(res)
+		if err != nil {
+			return
+		}
+
+		decoder = json.NewDecoder(res.Body)
+		decoder.UseNumber()
+
+		page.NextPageLink = ""
+		page.PreviousPageLink = ""
+
+		err = decoder.Decode(page)
+		if err != nil {
+			return
+		}
+
+		documentList.Items = client.AppendSlice(documentList.Items, page.Items)
+	}
+
+	result, err = json.Marshal(documentList)
+
+	return
+}
+
+func (client *restClient) Post(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
+	if contentType == "" {
+		contentType = appJson
+	}
+
+	req := request{"POST", url, contentType, body, tokens}
+	rewinder := func() io.Reader {
+		body.Seek(0, 0)
+		return body
+	}
+	res, err = client.SendRequest(&req, rewinder)
+	return
+}
+
+func (client *restClient) Patch(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
+	if contentType == "" {
+		contentType = appJson
+	}
+
+	req := request{"PATCH", url, contentType, body, tokens}
+	rewinder := func() io.Reader {
+		body.Seek(0, 0)
+		return body
+	}
+	res, err = client.SendRequest(&req, rewinder)
+	return
+}
+
+func (client *restClient) Put(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
+	if contentType == "" {
+		contentType = appJson
+	}
+
+	req := request{"PUT", url, contentType, body, tokens}
+	rewinder := func() io.Reader {
+		body.Seek(0, 0)
+		return body
+	}
+	res, err = client.SendRequest(&req, rewinder)
+	return
+}
+
+func (client *restClient) Delete(url string, tokens *TokenOptions) (res *http.Response, err error) {
+	req := request{"DELETE", url, "", nil, tokens}
+	res, err = client.SendRequest(&req, nil)
+	return
+}
+
+func (client *restClient) SendRequest(req *request, bodyRewinder bodyRewinder) (res *http.Response, err error) {
+	res, err = client.sendRequestHelper(req)
+	// In most cases, we'll return immediately
+	// If the operation succeeded, but we got a 401 response and if we're using
+	// authentication, then we'll look into the body to see if the token expired
+	if err != nil {
+		return res, err
+	}
+	if res.StatusCode != 401 {
+		// It's not a 401, so the token didn't expire
+		return res, err
+	}
+	if req.Tokens == nil || req.Tokens.AccessToken == "" {
+		// We don't have a token, so we can't renew the token, no need to proceed
+		return res, err
+	}
+
+	// We're going to look in the body to see if it failed because the token expired
+	// This means we need to read the body, but the functions that call us also
+	// expect to read the body. So we read the body, then create a new reader
+	// so they can read the body as normal.
+	body, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return res, err
+	}
+	res.Body = ioutil.NopCloser(bytes.NewReader(body))
+
+	// Now see if we had an expired token or not
+	var apiError APIError
+	err = json.Unmarshal(body, &apiError)
+	if err != nil {
+		return res, err
+	}
+	if apiError.ErrorCode != expiredAuthToken {
+		return res, nil
+	}
+
+	// We were told that the access token expired, so we acquire a new token using the refresh token.
+	newTokens, err := client.authClient.GetTokensByRefreshToken(req.Tokens.RefreshToken)
+	// If there is an error during token refresh, we assume that the refresh token also expired. So we login again using
+	// the machine account.
+	if err != nil {
+		newTokens, err = client.authClient.GetTokensByMachineAccount()
+		if err != nil {
+			return res, err
+		}
+	}
+	req.Tokens.AccessToken = newTokens.AccessToken
+	if client.UpdateAccessTokenCallback != nil {
+		client.UpdateAccessTokenCallback(newTokens.AccessToken)
+	}
+	if req.Body != nil && bodyRewinder != nil {
+		req.Body = bodyRewinder()
+	}
+	res, err = client.sendRequestHelper(req)
+	return res, nil
+}
+
+func (client *restClient) sendRequestHelper(req *request) (res *http.Response, err error) {
+	r, err := http.NewRequest(req.Method, req.URL, req.Body)
+	if err != nil {
+		return
+	}
+	if req.ContentType != "" {
+		r.Header.Add("Content-Type", req.ContentType)
+	}
+	if req.Tokens != nil && req.Tokens.AccessToken != "" {
+		r.Header.Add("Authorization", "Bearer "+req.Tokens.AccessToken)
+	}
+	res, err = client.httpClient.Do(r)
+	if err != nil {
+		return
+	}
+
+	return
+}
+
+// Reads an error out of the HTTP response, or does nothing if
+// no error occured.
+func getError(res *http.Response) (*http.Response, error) {
+	// Do nothing if the response is a successful 2xx
+	if res.StatusCode/100 == 2 {
+		return res, nil
+	}
+	var apiError APIError
+	// ReadAll is usually a bad practice, but here we need to read the response all
+	// at once because we may attempt to use the data twice. It's preferable to use
+	// methods that take io.Reader, e.g. json.NewDecoder
+	body, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, err
+	}
+	err = json.Unmarshal(body, &apiError)
+	if err != nil {
+		// If deserializing into ApiError fails, return a generic HttpError instead
+		return nil, HttpError{res.StatusCode, string(body[:])}
+	}
+	apiError.HttpStatusCode = res.StatusCode
+	return nil, apiError
+}
diff --git a/pkg/cloudprovider/providers/cascade/tests_owed b/pkg/cloudprovider/providers/cascade/tests_owed
new file mode 100644
index 0000000..dff5ab1
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/tests_owed
@@ -0,0 +1,5 @@
+
+Yu Sheng
+Change-Id: Ifc11818f65a3e018aeea6988d9e2c0719b592920
+
+
diff --git a/pkg/cloudprovider/providers/cascade/utils.go b/pkg/cloudprovider/providers/cascade/utils.go
new file mode 100644
index 0000000..866f853
--- /dev/null
+++ b/pkg/cloudprovider/providers/cascade/utils.go
@@ -0,0 +1,29 @@
+package cascade
+
+func StringPtr(s string) *string {
+	return &s
+}
+
+// StringVal returns string from string pointer, nil returns ""
+func StringVal(p *string) (s string) {
+	if p != nil {
+		s = *p
+	}
+	return
+}
+
+func Int64Ptr(s int64) *int64 {
+	return &s
+}
+
+func Int64Val(s *int64) int64 {
+	return *s
+}
+
+func Int32Ptr(s int32) *int32 {
+	return &s
+}
+
+func BoolPtr(s bool) *bool {
+	return &s
+}
diff --git a/pkg/cloudprovider/providers/providers.go b/pkg/cloudprovider/providers/providers.go
index 7de9ca9..6d8a1d2 100644
--- a/pkg/cloudprovider/providers/providers.go
+++ b/pkg/cloudprovider/providers/providers.go
@@ -20,6 +20,7 @@ import (
 	// Cloud providers
 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
+	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack"
 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
diff --git a/pkg/kubeapiserver/authorizer/config.go b/pkg/kubeapiserver/authorizer/config.go
index 659f2ae..ed1f5f1 100644
--- a/pkg/kubeapiserver/authorizer/config.go
+++ b/pkg/kubeapiserver/authorizer/config.go
@@ -32,6 +32,7 @@ import (
 	"k8s.io/kubernetes/plugin/pkg/auth/authorizer/node"
 	"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
 	"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
+	"k8s.io/kubernetes/plugin/pkg/auth/authorizer/vke"
 )
 
 type AuthorizationConfig struct {
@@ -82,7 +83,12 @@ func (config AuthorizationConfig) New() (authorizer.Authorizer, authorizer.RuleR
 			)
 			nodeAuthorizer := node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), bootstrappolicy.NodeRules())
 			authorizers = append(authorizers, nodeAuthorizer)
-
+		case modes.ModeVKE:
+			vkeAuthorizer, err := vke.NewAuthorizer()
+			if err != nil {
+				return nil, nil, err
+			}
+			authorizers = append(authorizers, vkeAuthorizer)
 		case modes.ModeAlwaysAllow:
 			alwaysAllowAuthorizer := authorizerfactory.NewAlwaysAllowAuthorizer()
 			authorizers = append(authorizers, alwaysAllowAuthorizer)
diff --git a/pkg/kubeapiserver/authorizer/modes/modes.go b/pkg/kubeapiserver/authorizer/modes/modes.go
index 56a708a..6eb920f 100644
--- a/pkg/kubeapiserver/authorizer/modes/modes.go
+++ b/pkg/kubeapiserver/authorizer/modes/modes.go
@@ -23,9 +23,10 @@ const (
 	ModeWebhook     string = "Webhook"
 	ModeRBAC        string = "RBAC"
 	ModeNode        string = "Node"
+	ModeVKE         string = "VKE"
 )
 
-var AuthorizationModeChoices = []string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC, ModeWebhook, ModeRBAC, ModeNode}
+var AuthorizationModeChoices = []string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC, ModeWebhook, ModeRBAC, ModeNode, ModeVKE}
 
 // IsValidAuthorizationMode returns true if the given authorization mode is a valid one for the apiserver
 func IsValidAuthorizationMode(authzMode string) bool {
diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go
index c33b1c6..ef39b3b 100644
--- a/pkg/printers/internalversion/describe.go
+++ b/pkg/printers/internalversion/describe.go
@@ -751,6 +751,8 @@ func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) {
 			printFlexVolumeSource(volume.VolumeSource.FlexVolume, w)
 		case volume.VolumeSource.Flocker != nil:
 			printFlockerVolumeSource(volume.VolumeSource.Flocker, w)
+		case volume.VolumeSource.CascadeDisk != nil:
+			printCascadeDiskVolumeSource(volume.VolumeSource.CascadeDisk, w)
 		default:
 			w.Write(LEVEL_1, "<unknown>\n")
 		}
@@ -1101,6 +1103,13 @@ func printCSIPersistentVolumeSource(csi *api.CSIPersistentVolumeSource, w Prefix
 		csi.Driver, csi.VolumeHandle, csi.ReadOnly)
 }
 
+func printCascadeDiskVolumeSource(cascade *api.CascadeDiskVolumeSource, w PrefixWriter) {
+	w.Write(LEVEL_2, "Type:\tVKEDisk (a Persistent Disk resource in VKE)\n"+
+		"    DiskID:\t%v\n"+
+		"    FSType:\t%v\n",
+		cascade.DiskID, cascade.FSType)
+}
+
 type PersistentVolumeDescriber struct {
 	clientset.Interface
 }
@@ -1189,6 +1198,8 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) (
 			printFlockerVolumeSource(pv.Spec.Flocker, w)
 		case pv.Spec.CSI != nil:
 			printCSIPersistentVolumeSource(pv.Spec.CSI, w)
+		case pv.Spec.CascadeDisk != nil:
+			printCascadeDiskVolumeSource(pv.Spec.CascadeDisk, w)
 		default:
 			w.Write(LEVEL_1, "<unknown>\n")
 		}
diff --git a/pkg/security/podsecuritypolicy/util/util.go b/pkg/security/podsecuritypolicy/util/util.go
index d654f88..422fe0d 100644
--- a/pkg/security/podsecuritypolicy/util/util.go
+++ b/pkg/security/podsecuritypolicy/util/util.go
@@ -68,6 +68,7 @@ func GetAllFSTypesAsSet() sets.String {
 		string(extensions.PortworxVolume),
 		string(extensions.ScaleIO),
 		string(extensions.CSI),
+		string(extensions.CascadeDisk),
 	)
 	return fstypes
 }
@@ -129,6 +130,8 @@ func GetVolumeFSType(v api.Volume) (extensions.FSType, error) {
 		return extensions.PortworxVolume, nil
 	case v.ScaleIO != nil:
 		return extensions.ScaleIO, nil
+	case v.CascadeDisk != nil:
+		return extensions.CascadeDisk, nil
 	}
 
 	return "", fmt.Errorf("unknown volume type for volume: %#v", v)
diff --git a/pkg/volume/cascade_disk/BUILD b/pkg/volume/cascade_disk/BUILD
new file mode 100644
index 0000000..3386612
--- /dev/null
+++ b/pkg/volume/cascade_disk/BUILD
@@ -0,0 +1,43 @@
+package(default_visibility = ["//visibility:public"])
+
+load(
+    "@io_bazel_rules_go//go:def.bzl",
+    "go_library",
+    "go_test",
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "attacher.go",
+        "cascade_disk.go",
+        "cascade_util.go",
+    ],
+    deps = [
+        "//pkg/cloudprovider:go_default_library",
+        "//pkg/cloudprovider/providers/cascade:go_default_library",
+        "//pkg/util/mount:go_default_library",
+        "//pkg/util/strings:go_default_library",
+        "//pkg/volume:go_default_library",
+        "//pkg/volume/util:go_default_library",
+        "//pkg/volume/util/volumehelper:go_default_library",
+        "//vendor/github.com/golang/glog:go_default_library",
+        "//vendor/k8s.io/api/core/v1:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
+    ],
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)
diff --git a/pkg/volume/cascade_disk/OWNERS b/pkg/volume/cascade_disk/OWNERS
new file mode 100644
index 0000000..c3a4ed7
--- /dev/null
+++ b/pkg/volume/cascade_disk/OWNERS
@@ -0,0 +1,2 @@
+maintainers:
+- ashokc
diff --git a/pkg/volume/cascade_disk/attacher.go b/pkg/volume/cascade_disk/attacher.go
new file mode 100644
index 0000000..66b5836
--- /dev/null
+++ b/pkg/volume/cascade_disk/attacher.go
@@ -0,0 +1,265 @@
+package cascade_disk
+
+import (
+	"fmt"
+	"os"
+	"path"
+	"time"
+
+	"github.com/golang/glog"
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
+	"k8s.io/kubernetes/pkg/util/mount"
+	"k8s.io/kubernetes/pkg/volume"
+	volumeutil "k8s.io/kubernetes/pkg/volume/util"
+	"k8s.io/kubernetes/pkg/volume/util/volumehelper"
+)
+
+type cascadeDiskAttacher struct {
+	host         volume.VolumeHost
+	cascadeDisks cascade.Disks
+}
+
+var _ volume.Attacher = &cascadeDiskAttacher{}
+var _ volume.AttachableVolumePlugin = &cascadeDiskPlugin{}
+
+func (plugin *cascadeDiskPlugin) NewAttacher() (volume.Attacher, error) {
+	cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
+	if err != nil {
+		glog.Errorf("Cascade attacher: NewAttacher failed to get cloud provider")
+		return nil, err
+	}
+
+	return &cascadeDiskAttacher{
+		host:         plugin.host,
+		cascadeDisks: cascadeCloud,
+	}, nil
+}
+
+// Attach attaches the volume specified by the given spec to the given host. On success, returns the device path where
+// the device was attached on the node.
+func (attacher *cascadeDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
+	hostName := string(nodeName)
+	volumeSource, _, err := getVolumeSource(spec)
+	if err != nil {
+		glog.Errorf("Cascade attacher: Attach failed to get volume source")
+		return "", err
+	}
+
+	// cascadeDisks.AttachDisk checks if disk is already attached to the node. So we don't have to do that separately
+	// here.
+	glog.V(4).Infof("Cascade: Attach disk called for host %s", hostName)
+	devicePath, err := attacher.cascadeDisks.AttachDisk(volumeSource.DiskID, nodeName)
+	if err != nil {
+		glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.DiskID, nodeName, err)
+		return "", err
+	}
+	return devicePath, nil
+}
+
+// VolumesAreAttached verifies whether the volumes specified in the spec are attached to the specified node.
+func (attacher *cascadeDiskAttacher) VolumesAreAttached(specs []*volume.Spec,
+	nodeName types.NodeName) (map[*volume.Spec]bool, error) {
+	volumesAttachedCheck := make(map[*volume.Spec]bool)
+	volumeSpecMap := make(map[string]*volume.Spec)
+	diskIDList := []string{}
+	for _, spec := range specs {
+		volumeSource, _, err := getVolumeSource(spec)
+		if err != nil {
+			glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
+			continue
+		}
+
+		diskIDList = append(diskIDList, volumeSource.DiskID)
+		volumesAttachedCheck[spec] = true
+		volumeSpecMap[volumeSource.DiskID] = spec
+	}
+	attachedResult, err := attacher.cascadeDisks.DisksAreAttached(diskIDList, nodeName)
+	if err != nil {
+		glog.Errorf(
+			"Error checking if volumes (%v) are attached to current node (%q). err=%v",
+			diskIDList, nodeName, err)
+		return volumesAttachedCheck, err
+	}
+
+	for diskID, attached := range attachedResult {
+		if !attached {
+			spec := volumeSpecMap[diskID]
+			volumesAttachedCheck[spec] = false
+			glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached",
+				diskID, spec.Name())
+		}
+	}
+	return volumesAttachedCheck, nil
+}
+
+// WaitForAttach waits until the devicePath returned by the Attach call is available.
+func (attacher *cascadeDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod,
+	timeout time.Duration) (string, error) {
+	volumeSource, _, err := getVolumeSource(spec)
+	if err != nil {
+		glog.Errorf("Cascade attacher: WaitForAttach failed to get volume source")
+		return "", err
+	}
+
+	if devicePath == "" {
+		return "", fmt.Errorf("WaitForAttach failed for disk %s: devicePath is empty.", volumeSource.DiskID)
+	}
+
+	ticker := time.NewTicker(checkSleepDuration)
+	defer ticker.Stop()
+
+	timer := time.NewTimer(timeout)
+	defer timer.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			glog.V(4).Infof("Checking disk %s is attached", volumeSource.DiskID)
+			devicePath := getDiskByIdPath(devicePath)
+			checkPath, err := verifyDevicePath(devicePath)
+			if err != nil {
+				// Log error, if any, and continue checking periodically. See issue #11321
+				glog.Warningf("VKE attacher: WaitForAttach with devicePath %s Checking PD %s Error verify "+
+					"path", devicePath, volumeSource.DiskID)
+			} else if checkPath != "" {
+				// A device path has successfully been created for the disk
+				glog.V(4).Infof("Successfully found attached disk %s.", volumeSource.DiskID)
+				return checkPath, nil
+			}
+		case <-timer.C:
+			return "", fmt.Errorf("Could not find attached disk %s. Timeout waiting for mount paths to be "+
+				"created.", volumeSource.DiskID)
+		}
+	}
+}
+
+// GetDeviceMountPath returns a path where the device should point which should be bind mounted for individual volumes.
+func (attacher *cascadeDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
+	volumeSource, _, err := getVolumeSource(spec)
+	if err != nil {
+		glog.Errorf("Cascade attacher: GetDeviceMountPath failed to get volume source")
+		return "", err
+	}
+
+	return makeGlobalPDPath(attacher.host, volumeSource.DiskID), nil
+}
+
+// GetMountDeviceRefs finds all other references to the device referenced by deviceMountPath; returns a list of paths.
+func (plugin *cascadeDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
+	mounter := plugin.host.GetMounter(plugin.GetPluginName())
+	return mount.GetMountRefs(mounter, deviceMountPath)
+}
+
+// MountDevice mounts device to global mount point.
+func (attacher *cascadeDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
+	mounter := attacher.host.GetMounter(cascadeDiskPluginName)
+	notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
+				glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err)
+				return err
+			}
+			notMnt = true
+		} else {
+			return err
+		}
+	}
+
+	volumeSource, _, err := getVolumeSource(spec)
+	if err != nil {
+		glog.Errorf("Cascade attacher: MountDevice failed to get volume source. err: %s", err)
+		return err
+	}
+
+	options := []string{}
+
+	if notMnt {
+		diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cascadeDiskPluginName, attacher.host)
+		mountOptions := volume.MountOptionFromSpec(spec)
+		err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
+		if err != nil {
+			os.Remove(deviceMountPath)
+			return err
+		}
+		glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v",
+			spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
+	}
+	return nil
+}
+
+type cascadeDiskDetacher struct {
+	mounter      mount.Interface
+	cascadeDisks cascade.Disks
+}
+
+var _ volume.Detacher = &cascadeDiskDetacher{}
+
+// NewDetacher returns the detacher associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewDetacher() (volume.Detacher, error) {
+	cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
+	if err != nil {
+		glog.Errorf("Cascade attacher: NewDetacher failed to get cloud provider. err: %s", err)
+		return nil, err
+	}
+
+	return &cascadeDiskDetacher{
+		mounter:      plugin.host.GetMounter(plugin.GetPluginName()),
+		cascadeDisks: cascadeCloud,
+	}, nil
+}
+
+// Detach detaches the given device from the given host.
+func (detacher *cascadeDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
+	hostName := string(nodeName)
+	diskID := path.Base(deviceMountPath)
+	attached, err := detacher.cascadeDisks.DiskIsAttached(diskID, nodeName)
+	if err != nil {
+		// Log error and continue with detach
+		glog.Errorf(
+			"Error checking if persistent disk (%q) is already attached to current node (%q). "+
+				"Will continue and try detach anyway. err=%v", diskID, hostName, err)
+	}
+
+	if err == nil && !attached {
+		// Volume is already detached from node.
+		glog.V(4).Infof("detach operation was successful. persistent disk %q is already detached "+
+			"from node %q.", diskID, hostName)
+		return nil
+	}
+
+	if err := detacher.cascadeDisks.DetachDisk(diskID, nodeName); err != nil {
+		glog.Errorf("Error detaching volume %q: %v", diskID, err)
+		return err
+	}
+	return nil
+}
+
+// WaitForDetach waits for the devicePath to become unavailable.
+func (detacher *cascadeDiskDetacher) WaitForDetach(devicePath string, timeout time.Duration) error {
+	ticker := time.NewTicker(checkSleepDuration)
+	defer ticker.Stop()
+	timer := time.NewTimer(timeout)
+	defer timer.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			glog.V(4).Infof("Checking device %q is detached.", devicePath)
+			if pathExists, err := volumeutil.PathExists(devicePath); err != nil {
+				return fmt.Errorf("Error checking if device path exists: %v", err)
+			} else if !pathExists {
+				return nil
+			}
+		case <-timer.C:
+			return fmt.Errorf("Timeout reached; Device %v is still attached", devicePath)
+		}
+	}
+}
+
+// UnmountDevice unmounts the disk specified by the device mount path.
+func (detacher *cascadeDiskDetacher) UnmountDevice(deviceMountPath string) error {
+	return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
+}
diff --git a/pkg/volume/cascade_disk/azure_disk_util.go b/pkg/volume/cascade_disk/azure_disk_util.go
new file mode 100644
index 0000000..036b420
--- /dev/null
+++ b/pkg/volume/cascade_disk/azure_disk_util.go
@@ -0,0 +1,136 @@
+package cascade_disk
+
+import (
+	"io/ioutil"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+
+	"github.com/golang/glog"
+)
+
+const (
+	environmentFilePath = "/etc/kubernetes/environment"
+	diskPath            = "/dev/disk/cloud/"
+	sysPath             = "/sys/bus/scsi/devices"
+	lunIndex            = 3
+	maxOsScsiHostNo     = 3
+	vendor              = "vendor"
+	msftVendor          = "MSFT"
+	model               = "model"
+	vhdModel            = "VIRTUAL DISK"
+	block               = "block"
+)
+
+
+func findAzureVolume(lun int) (device string, err error) {
+	azureDisks := listAzureDiskPath()
+	return findDiskByLunWithConstraint(lun, azureDisks)
+}
+
+// List all the devices that are used by azure.
+// All these devices are the symbolic link under /dev/disk/cloud/
+// eg. azure_resource -> ../../sdb
+func listAzureDiskPath() []string {
+	var azureDiskList []string
+	if dirs, err := ioutil.ReadDir(diskPath); err == nil {
+		for _, f := range dirs {
+			name := f.Name()
+			diskPath := diskPath + name
+			if link, linkErr := os.Readlink(diskPath); linkErr == nil {
+				sd := link[(strings.LastIndex(link, "/") + 1):]
+				azureDiskList = append(azureDiskList, sd)
+			}
+		}
+	}
+	glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
+	return azureDiskList
+}
+
+// Azure attach/detach APIs return the disk model that does not have disk
+// device path name (eg. /dev/sde) instead it gives the lun number of the disk
+// getting attached/detached.
+// This function maps that lun number to device path name of the
+// device mounted to "current" node.
+// Refrence : https://github.com/kubernetes/kubernetes/blob/master/pkg/volume/azure_dd/azure_common_linux.go#L69
+//            https://docs.microsoft.com/en-us/azure/virtual-machines/linux/troubleshoot-device-names-problems
+func findDiskByLunWithConstraint(lun int, azureDisks []string) (string, error) {
+	var err error
+	if dirs, err := ioutil.ReadDir(sysPath); err == nil {
+		for _, f := range dirs {
+			name := f.Name()
+			// look for path like /sys/bus/scsi/devices/3:0:0:1
+			arr := strings.Split(name, ":")
+			if len(arr) < 4 {
+				continue
+			}
+			if len(azureDisks) == 0 {
+				glog.V(4).Infof("/dev/disk/cloud is not populated, now try to parse %v directly", name)
+				target, err := strconv.Atoi(arr[0])
+				if err != nil {
+					glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
+					continue
+				}
+				// as observed, targets 0-3 are used by OS disks. Skip them
+				if target <= maxOsScsiHostNo {
+					continue
+				}
+			}
+
+			// extract LUN from the path.
+			// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
+			l, err := strconv.Atoi(arr[lunIndex])
+			if err != nil {
+				// unknown path format, continue to read the next one
+				glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[lunIndex], name, err)
+				continue
+			}
+			if lun == l {
+				// find the matching LUN
+				// read vendor and model to ensure it is a VHD disk
+				vendorPath := path.Join(sysPath, name, vendor)
+				vendorBytes, err := ioutil.ReadFile(vendorPath)
+				if err != nil {
+					glog.Errorf("failed to read device vendor, err: %v", err)
+					continue
+				}
+				vendor := strings.TrimSpace(string(vendorBytes))
+				if strings.ToUpper(vendor) != msftVendor {
+					glog.V(4).Infof("vendor doesn't match VHD, got %s", vendor)
+					continue
+				}
+
+				modelPath := path.Join(sysPath, name, model)
+				modelBytes, err := ioutil.ReadFile(modelPath)
+				if err != nil {
+					glog.Errorf("failed to read device model, err: %v", err)
+					continue
+				}
+				model := strings.TrimSpace(string(modelBytes))
+				if strings.ToUpper(model) != vhdModel {
+					glog.V(4).Infof("model doesn't match VHD, got %s", model)
+					continue
+				}
+
+				// find a disk, validate name
+				dir := path.Join(sysPath, name, block)
+				if dev, err := ioutil.ReadDir(dir); err == nil {
+					found := false
+					devName := dev[0].Name()
+					for _, diskName := range azureDisks {
+						glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName)
+						if devName == diskName {
+							found = true
+							break
+						}
+					}
+					if !found {
+						return "/dev/" + devName, nil
+					}
+				}
+			}
+		}
+	}
+	return "", err
+}
diff --git a/pkg/volume/cascade_disk/cascade_disk.go b/pkg/volume/cascade_disk/cascade_disk.go
new file mode 100644
index 0000000..a25f224
--- /dev/null
+++ b/pkg/volume/cascade_disk/cascade_disk.go
@@ -0,0 +1,391 @@
+package cascade_disk
+
+import (
+	"fmt"
+	"os"
+	"path"
+
+	"github.com/golang/glog"
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/kubernetes/pkg/util/mount"
+	utilstrings "k8s.io/kubernetes/pkg/util/strings"
+	"k8s.io/kubernetes/pkg/volume"
+	"k8s.io/kubernetes/pkg/volume/util"
+	"k8s.io/kubernetes/pkg/volume/util/volumehelper"
+)
+
+// This is the primary entrypoint for volume plugins.
+func ProbeVolumePlugins() []volume.VolumePlugin {
+	return []volume.VolumePlugin{&cascadeDiskPlugin{}}
+}
+
+type cascadeDiskPlugin struct {
+	host volume.VolumeHost
+}
+
+var _ volume.VolumePlugin = &cascadeDiskPlugin{}
+var _ volume.PersistentVolumePlugin = &cascadeDiskPlugin{}
+var _ volume.DeletableVolumePlugin = &cascadeDiskPlugin{}
+var _ volume.ProvisionableVolumePlugin = &cascadeDiskPlugin{}
+
+const (
+	cascadeDiskPluginName = "kubernetes.io/vke-disk"
+)
+
+// Init initializes the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) Init(host volume.VolumeHost) error {
+	plugin.host = host
+	return nil
+}
+
+// GetPluginName returns the name of the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) GetPluginName() string {
+	return cascadeDiskPluginName
+}
+
+// GetVolumeName returns the name of the volume which is the diskID in our case.
+func (plugin *cascadeDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
+	volumeSource, _, err := getVolumeSource(spec)
+	if err != nil {
+		glog.Errorf("Cascade volume plugin: GetVolumeName failed to get volume source")
+		return "", err
+	}
+
+	return volumeSource.DiskID, nil
+}
+
+// CanSupport specifies whether the Cascade volume plguin can support the specific resource type.
+// Cascade plugin only supports the persistent volume and volume resource which has the Cascade disk annotation.
+func (plugin *cascadeDiskPlugin) CanSupport(spec *volume.Spec) bool {
+	return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil) ||
+		(spec.Volume != nil && spec.Volume.CascadeDisk != nil)
+}
+
+// RequiresRemount specifies whether remount is required for the disk.
+func (plugin *cascadeDiskPlugin) RequiresRemount() bool {
+	return false
+}
+
+// SupportsMountOption specifies whether the Cascade volume plugin supports the mount operation.
+func (plugin *cascadeDiskPlugin) SupportsMountOption() bool {
+	return true
+}
+
+// SupportsBulkVolumeVerification specifies whether bulk volume verification is supported.
+func (plugin *cascadeDiskPlugin) SupportsBulkVolumeVerification() bool {
+	return false
+}
+
+// NewMounter returns the mounter associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod,
+	_ volume.VolumeOptions) (volume.Mounter, error) {
+	return plugin.newMounterInternal(spec, pod.UID, &CascadeDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
+}
+
+// NewUnmounter returns the unmounter associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
+	return plugin.newUnmounterInternal(volName, podUID, &CascadeDiskUtil{},
+		plugin.host.GetMounter(plugin.GetPluginName()))
+}
+
+func (plugin *cascadeDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager,
+	mounter mount.Interface) (volume.Mounter, error) {
+	volumeSource, _, err := getVolumeSource(spec)
+	if err != nil {
+		glog.Errorf("Cascade volume plugin: newMounterInternal failed to get volume source")
+		return nil, err
+	}
+
+	diskID := volumeSource.DiskID
+	fsType := volumeSource.FSType
+
+	return &cascadeDiskMounter{
+		cascadeDisk: &cascadeDisk{
+			podUID:  podUID,
+			volName: spec.Name(),
+			diskID:  diskID,
+			manager: manager,
+			mounter: mounter,
+			plugin:  plugin,
+		},
+		fsType:      fsType,
+		diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
+}
+
+func (plugin *cascadeDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager,
+	mounter mount.Interface) (volume.Unmounter, error) {
+	return &cascadeDiskUnmounter{
+		&cascadeDisk{
+			podUID:  podUID,
+			volName: volName,
+			manager: manager,
+			mounter: mounter,
+			plugin:  plugin,
+		}}, nil
+}
+
+// ConstructVolumeSpec constructs a Cascade volume spec based on the name and mount path.
+func (plugin *cascadeDiskPlugin) ConstructVolumeSpec(volumeSpecName, mountPath string) (*volume.Spec, error) {
+	mounter := plugin.host.GetMounter(plugin.GetPluginName())
+	pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
+	diskID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
+	if err != nil {
+		return nil, err
+	}
+
+	cascadeDisk := &v1.Volume{
+		Name: volumeSpecName,
+		VolumeSource: v1.VolumeSource{
+			CascadeDisk: &v1.CascadeDiskVolumeSource{
+				DiskID: diskID,
+			},
+		},
+	}
+	return volume.NewSpecFromVolume(cascadeDisk), nil
+}
+
+// Abstract interface to disk operations.
+type diskManager interface {
+	// Creates a volume
+	CreateVolume(provisioner *cascadeDiskProvisioner) (diskID string, volumeSizeGB int, fstype string, err error)
+	// Deletes a volume
+	DeleteVolume(deleter *cascadeDiskDeleter) error
+}
+
+// cascadeDisk volumes are disk resources attached to the kubelet's host machine and exposed to the pod.
+type cascadeDisk struct {
+	volName string
+	podUID  types.UID
+	diskID  string
+	fsType  string
+	manager diskManager
+	mounter mount.Interface
+	plugin  *cascadeDiskPlugin
+	volume.MetricsNil
+}
+
+var _ volume.Mounter = &cascadeDiskMounter{}
+
+type cascadeDiskMounter struct {
+	*cascadeDisk
+	fsType      string
+	diskMounter *mount.SafeFormatAndMount
+}
+
+// GetAttributes returns the attributes associated with a Cascade disk.
+func (b *cascadeDiskMounter) GetAttributes() volume.Attributes {
+	return volume.Attributes{
+		SupportsSELinux: true,
+	}
+}
+
+// CanMount checks prior to mount operations to verify that the required components (binaries, etc.) to mount the
+// volume are available on the underlying node. If not, it returns an error.
+func (b *cascadeDiskMounter) CanMount() error {
+	return nil
+}
+
+// SetUp attaches the disk and bind mounts to the volume path.
+func (b *cascadeDiskMounter) SetUp(fsGroup *int64) error {
+	return b.SetUpAt(b.GetPath(), fsGroup)
+}
+
+// SetUpAt attaches the disk and bind mounts to the volume path.
+func (b *cascadeDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
+	glog.V(4).Infof("Cascade Persistent Disk setup %s to %s", b.diskID, dir)
+
+	// TODO: handle failed mounts here.
+	notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
+	if err != nil && !os.IsNotExist(err) {
+		glog.Errorf("cannot validate mount point: %s %v", dir, err)
+		return err
+	}
+	if !notmnt {
+		return nil
+	}
+
+	if err := os.MkdirAll(dir, 0750); err != nil {
+		glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
+		return err
+	}
+
+	options := []string{"bind"}
+
+	// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
+	globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskID)
+	glog.V(4).Infof("attempting to mount %s", dir)
+
+	err = b.mounter.Mount(globalPDPath, dir, "", options)
+	if err != nil {
+		notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
+		if mntErr != nil {
+			glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
+			return err
+		}
+		if !notmnt {
+			if mntErr = b.mounter.Unmount(dir); mntErr != nil {
+				glog.Errorf("Failed to unmount: %v", mntErr)
+				return err
+			}
+			notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
+			if mntErr != nil {
+				glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
+				return err
+			}
+			if !notmnt {
+				glog.Errorf("%s is still mounted, despite call to unmount().  Will try again next sync loop.",
+					b.GetPath())
+				return err
+			}
+		}
+		os.Remove(dir)
+		glog.Errorf("Mount of disk %s failed: %v", dir, err)
+		return err
+	}
+	volume.SetVolumeOwnership(b, fsGroup)
+
+	return nil
+}
+
+var _ volume.Unmounter = &cascadeDiskUnmounter{}
+
+type cascadeDiskUnmounter struct {
+	*cascadeDisk
+}
+
+// TearDown unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that
+// disk on the kubelet.
+func (c *cascadeDiskUnmounter) TearDown() error {
+	return c.TearDownAt(c.GetPath())
+}
+
+// TearDownAt unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that
+// disk on the kubelet.
+func (c *cascadeDiskUnmounter) TearDownAt(dir string) error {
+	return util.UnmountPath(dir, c.mounter)
+}
+
+func makeGlobalPDPath(host volume.VolumeHost, diskID string) string {
+	return path.Join(host.GetPluginDir(cascadeDiskPluginName), mount.MountsInGlobalPDPath, diskID)
+}
+
+func (cd *cascadeDisk) GetPath() string {
+	name := cascadeDiskPluginName
+	return cd.plugin.host.GetPodVolumeDir(cd.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cd.volName)
+}
+
+func (plugin *cascadeDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
+	return []v1.PersistentVolumeAccessMode{
+		v1.ReadWriteOnce,
+	}
+}
+
+type cascadeDiskDeleter struct {
+	*cascadeDisk
+}
+
+var _ volume.Deleter = &cascadeDiskDeleter{}
+
+// NewDeleter returns the deleter associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
+	return plugin.newDeleterInternal(spec, &CascadeDiskUtil{})
+}
+
+func (plugin *cascadeDiskPlugin) newDeleterInternal(spec *volume.Spec, manager diskManager) (volume.Deleter, error) {
+	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk == nil {
+		return nil, fmt.Errorf("spec.PersistentVolumeSource.CascadeDisk is nil")
+	}
+	return &cascadeDiskDeleter{
+		&cascadeDisk{
+			volName: spec.Name(),
+			diskID:  spec.PersistentVolume.Spec.CascadeDisk.DiskID,
+			manager: manager,
+			plugin:  plugin,
+		}}, nil
+}
+
+func (r *cascadeDiskDeleter) Delete() error {
+	return r.manager.DeleteVolume(r)
+}
+
+type cascadeDiskProvisioner struct {
+	*cascadeDisk
+	options volume.VolumeOptions
+}
+
+var _ volume.Provisioner = &cascadeDiskProvisioner{}
+
+// NewProvisioner returns the provisioner associated with the Cascade volume plugin.
+func (plugin *cascadeDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
+	return plugin.newProvisionerInternal(options, &CascadeDiskUtil{})
+}
+
+func (plugin *cascadeDiskPlugin) newProvisionerInternal(options volume.VolumeOptions,
+	manager diskManager) (volume.Provisioner, error) {
+	return &cascadeDiskProvisioner{
+		cascadeDisk: &cascadeDisk{
+			manager: manager,
+			plugin:  plugin,
+		},
+		options: options,
+	}, nil
+}
+
+// Provision provisions the persistent volume by making a CreateDisk call to Cascade Controller.
+func (p *cascadeDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
+	if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
+		return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported",
+			p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
+	}
+
+	diskID, sizeGB, fstype, err := p.manager.CreateVolume(p)
+	if err != nil {
+		return nil, err
+	}
+
+	if fstype == "" {
+		fstype = "ext4"
+	}
+
+	pv := &v1.PersistentVolume{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:   p.options.PVName,
+			Labels: map[string]string{},
+			Annotations: map[string]string{
+				volumehelper.VolumeDynamicallyCreatedByKey: "vke-volume-dynamic-provisioner",
+			},
+		},
+		Spec: v1.PersistentVolumeSpec{
+			PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
+			AccessModes:                   p.options.PVC.Spec.AccessModes,
+			Capacity: v1.ResourceList{
+				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
+			},
+			PersistentVolumeSource: v1.PersistentVolumeSource{
+				CascadeDisk: &v1.CascadeDiskVolumeSource{
+					DiskID: diskID,
+					FSType: fstype,
+				},
+			},
+			MountOptions: p.options.MountOptions,
+		},
+	}
+	if len(p.options.PVC.Spec.AccessModes) == 0 {
+		pv.Spec.AccessModes = p.plugin.GetAccessModes()
+	}
+
+	return pv, nil
+}
+
+func getVolumeSource(spec *volume.Spec) (*v1.CascadeDiskVolumeSource, bool, error) {
+	if spec.Volume != nil && spec.Volume.CascadeDisk != nil {
+		return spec.Volume.CascadeDisk, spec.ReadOnly, nil
+	} else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil {
+		return spec.PersistentVolume.Spec.CascadeDisk, spec.ReadOnly, nil
+	}
+
+	return nil, false, fmt.Errorf("Spec does not reference a Cascade disk type")
+}
\ No newline at end of file
diff --git a/pkg/volume/cascade_disk/cascade_util.go b/pkg/volume/cascade_disk/cascade_util.go
new file mode 100644
index 0000000..2604b89
--- /dev/null
+++ b/pkg/volume/cascade_disk/cascade_util.go
@@ -0,0 +1,201 @@
+package cascade_disk
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/glog"
+	"k8s.io/api/core/v1"
+	"k8s.io/kubernetes/pkg/cloudprovider"
+	"k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
+	"k8s.io/kubernetes/pkg/volume"
+	volumeutil "k8s.io/kubernetes/pkg/volume/util"
+)
+
+const (
+	checkSleepDuration  = time.Second
+)
+
+type CascadeDiskUtil struct{}
+
+func verifyDevicePath(path string) (string, error) {
+	if pathExists, err := volumeutil.PathExists(path); err != nil {
+		return "", fmt.Errorf("Error checking if path exists: %v", err)
+	} else if pathExists {
+		return path, nil
+	}
+
+	glog.V(4).Infof("verifyDevicePath: path does not exist yet")
+	return "", nil
+}
+
+// Returns path for given VKE disk mount
+func getDiskByIdPath(devicePath string) string {
+	provider, err := getProvider()
+	if err != nil {
+		glog.Warningf("error getting provider name from %s: %v", environmentFilePath, err)
+		return devicePath
+	}
+
+	switch provider {
+	case "azure":
+		// devicePath is an integer then it is lun returned from azure attach disk.
+		if diskLun, err := strconv.Atoi(devicePath); err == nil {
+			azDevicePath, err := findAzureVolume(diskLun)
+			if err != nil {
+				glog.Warningf("error looking for azure volume %q: %v", devicePath, err)
+			} else if azDevicePath != "" {
+				devicePath = azDevicePath
+			}
+		} else {
+			glog.Warningf("The device path for azure expected to be an integer but it is %s. Error : %v", devicePath, err)
+		}
+	default:
+		nvmePath, err := findNvmeVolume(devicePath)
+		if err != nil {
+			glog.Warningf("error looking for nvme volume %q: %v", devicePath, err)
+		} else if nvmePath != "" {
+			devicePath = nvmePath
+		}
+	}
+	return devicePath
+}
+
+// CreateVolume creates a Cascade persistent disk.
+func (util *CascadeDiskUtil) CreateVolume(p *cascadeDiskProvisioner) (diskID string, capacityGB int, fstype string,
+	err error) {
+	cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider())
+	if err != nil {
+		glog.Errorf("Cascade Util: CreateVolume failed to get cloud provider. Error [%v]", err)
+		return "", 0, "", err
+	}
+
+	capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
+	volSizeBytes := capacity.Value()
+	// Cascade works with GB, convert to GB with rounding up
+	volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
+	name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
+	volumeOptions := &cascade.VolumeOptions{
+		CapacityGB: volSizeGB,
+		Tags:       *p.options.CloudTags,
+		Name:       name,
+	}
+
+	// enabling encryption by default if not provided from within storage class
+	volumeOptions.Encrypted = true
+	for parameter, value := range p.options.Parameters {
+		switch strings.ToLower(parameter) {
+		case "flavor":
+			volumeOptions.Flavor = value
+		case "encrypted":
+			volumeOptions.Encrypted, err = strconv.ParseBool(value)
+			if err != nil {
+				glog.Errorf("Cascade Util: invalid value %q for encryption of volume plugin %s.", value,
+					p.plugin.GetPluginName())
+				return "", 0, "", fmt.Errorf("invalid encrypted boolean value %q, must be true or false: %v", value, err)
+			}
+		case volume.VolumeParameterFSType:
+			fstype = value
+			glog.V(4).Infof("Cascade Util: Setting fstype to %s", fstype)
+		default:
+			glog.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
+				p.plugin.GetPluginName())
+			return "", 0, "", fmt.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
+				p.plugin.GetPluginName())
+		}
+	}
+
+	diskID, err = cloud.CreateDisk(volumeOptions)
+	if err != nil {
+		glog.Errorf("Cascade Util: failed to CreateDisk. Error [%v]", err)
+		return "", 0, "", err
+	}
+
+	glog.V(4).Infof("Successfully created Cascade persistent disk %s", name)
+	return diskID, volSizeGB, "", nil
+}
+
+// DeleteVolume deletes a Cascade volume.
+func (util *CascadeDiskUtil) DeleteVolume(disk *cascadeDiskDeleter) error {
+	cloud, err := getCloudProvider(disk.plugin.host.GetCloudProvider())
+	if err != nil {
+		glog.Errorf("Cascade Util: DeleteVolume failed to get cloud provider. Error [%v]", err)
+		return err
+	}
+
+	if err = cloud.DeleteDisk(disk.diskID); err != nil {
+		glog.Errorf("Cascade Util: failed to DeleteDisk for diskID %s. Error [%v]", disk.diskID, err)
+		return err
+	}
+
+	glog.V(4).Infof("Successfully deleted Cascade persistent disk %s", disk.diskID)
+	return nil
+}
+
+func getCloudProvider(cloud cloudprovider.Interface) (*cascade.CascadeCloud, error) {
+	if cloud == nil {
+		glog.Errorf("Cascade Util: Cloud provider not initialized properly")
+		return nil, fmt.Errorf("Cascade Util: Cloud provider not initialized properly")
+	}
+
+	cc := cloud.(*cascade.CascadeCloud)
+	if cc == nil {
+		glog.Errorf("Invalid cloud provider: expected Cascade")
+		return nil, fmt.Errorf("Invalid cloud provider: expected Cascade")
+	}
+	return cc, nil
+}
+
+// findNvmeVolume looks for the nvme volume with the specified name
+// It follows the symlink (if it exists) and returns the absolute path to the device
+func findNvmeVolume(findName string) (device string, err error) {
+	stat, err := os.Lstat(findName)
+	if err != nil {
+		if os.IsNotExist(err) {
+			glog.V(6).Infof("nvme path not found %q", findName)
+			return "", nil
+		}
+		return "", fmt.Errorf("error getting stat of %q: %v", findName, err)
+	}
+
+	if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
+		glog.Warningf("nvme file %q found, but was not a symlink", findName)
+		return "", nil
+	}
+
+	// Find the target, resolving to an absolute path
+	// For example, /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 -> ../../nvme2n1
+	resolved, err := filepath.EvalSymlinks(findName)
+	if err != nil {
+		return "", fmt.Errorf("error reading target of symlink %q: %v", findName, err)
+	}
+
+	if !strings.HasPrefix(resolved, "/dev") {
+		return "", fmt.Errorf("resolved symlink for %q was unexpected: %q", findName, resolved)
+	}
+
+	return resolved, nil
+}
+
+func getProvider() (provider string, err error) {
+	file, err := os.Open(environmentFilePath)
+	if err != nil {
+		return "", err
+	}
+	defer file.Close()
+
+	scanner := bufio.NewScanner(file)
+
+	for scanner.Scan() {
+		lineContent := scanner.Text()
+		if strings.Contains(lineContent, "PROVIDER=") {
+			return strings.SplitAfter(lineContent, "=")[1], err
+		}
+	}
+	return "", err
+}
diff --git a/plugin/pkg/admission/persistentvolume/label/admission.go b/plugin/pkg/admission/persistentvolume/label/admission.go
index 86e1921..bf2912b 100644
--- a/plugin/pkg/admission/persistentvolume/label/admission.go
+++ b/plugin/pkg/admission/persistentvolume/label/admission.go
@@ -27,6 +27,7 @@ import (
 	api "k8s.io/kubernetes/pkg/apis/core"
 	"k8s.io/kubernetes/pkg/cloudprovider"
 	"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
+	"k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
 	"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
 	kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
 	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@@ -50,6 +51,7 @@ type persistentVolumeLabel struct {
 	ebsVolumes       aws.Volumes
 	cloudConfig      []byte
 	gceCloudProvider *gce.GCECloud
+	cascadeDisks     cascade.Disks
 }
 
 var _ admission.MutationInterface = &persistentVolumeLabel{}
@@ -102,6 +104,13 @@ func (l *persistentVolumeLabel) Admit(a admission.Attributes) (err error) {
 		}
 		volumeLabels = labels
 	}
+	if volume.Spec.CascadeDisk != nil {
+		labels, err := l.findCascadeDiskLabels(volume)
+		if err != nil {
+			return admission.NewForbidden(a, fmt.Errorf("error querying Cascade volume %s: %v", volume.Spec.CascadeDisk.DiskID, err))
+		}
+		volumeLabels = labels
+	}
 
 	if len(volumeLabels) != 0 {
 		if volume.Labels == nil {
@@ -214,3 +223,48 @@ func (l *persistentVolumeLabel) getGCECloudProvider() (*gce.GCECloud, error) {
 	}
 	return l.gceCloudProvider, nil
 }
+
+func (l *persistentVolumeLabel) findCascadeDiskLabels(volume *api.PersistentVolume) (map[string]string, error) {
+	// Ignore any volumes that are being provisioned
+	if volume.Spec.CascadeDisk.DiskID == vol.ProvisionedVolumeName {
+		return nil, nil
+	}
+	cascadeDisks, err := l.getCascadeDisks()
+	if err != nil {
+		return nil, err
+	}
+	if cascadeDisks == nil {
+		return nil, fmt.Errorf("unable to build Cascade cloud provider for volumes")
+	}
+
+	labels, err := cascadeDisks.GetVolumeLabels(volume.Spec.CascadeDisk.DiskID)
+	if err != nil {
+		return nil, err
+	}
+
+	return labels, nil
+}
+
+// getCascadeDisks returns the Cascade Disks interface
+func (l *persistentVolumeLabel) getCascadeDisks() (cascade.Disks, error) {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+
+	if l.cascadeDisks == nil {
+		var cloudConfigReader io.Reader
+		if len(l.cloudConfig) > 0 {
+			cloudConfigReader = bytes.NewReader(l.cloudConfig)
+		}
+		cloudProvider, err := cloudprovider.GetCloudProvider("vke", cloudConfigReader)
+		if err != nil || cloudProvider == nil {
+			return nil, err
+		}
+		provider, ok := cloudProvider.(*cascade.CascadeCloud)
+		if !ok {
+			// GetCloudProvider has gone very wrong
+			return nil, fmt.Errorf("error retrieving Cascade cloud provider")
+		}
+		l.cascadeDisks = provider
+	}
+	return l.cascadeDisks, nil
+}
diff --git a/plugin/pkg/admission/vke/BUILD b/plugin/pkg/admission/vke/BUILD
new file mode 100644
index 0000000..7d66036
--- /dev/null
+++ b/plugin/pkg/admission/vke/BUILD
@@ -0,0 +1,61 @@
+package(default_visibility = ["//visibility:public"])
+
+load(
+    "@io_bazel_rules_go//go:def.bzl",
+    "go_library",
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = ["admission.go"],
+    importpath = "k8s.io/kubernetes/plugin/pkg/admission/vke",
+    deps = [
+        "//pkg/apis/core:go_default_library",
+        "//pkg/apis/extensions:go_default_library",
+        "//pkg/apis/extensions/v1beta1:go_default_library",
+        "//pkg/apis/rbac:go_default_library",
+        "//pkg/registry/rbac:go_default_library",
+        "//pkg/security/podsecuritypolicy:go_default_library",
+        "//vendor/github.com/golang/glog:go_default_library",
+        "//vendor/k8s.io/api/extensions/v1beta1:go_default_library"
+        "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+        "//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = ["admission_test.go"],
+    embed = [":go_default_library"],
+    deps = [
+        "//pkg/apis/core:go_default_library",
+        "//pkg/apis/extensions:go_default_library",
+        "//pkg/apis/rbac:go_default_library",
+        "//pkg/registry/rbac:go_default_library",
+        "//pkg/security/podsecuritypolicy:go_default_library",
+        "//vendor/github.com/golang/glog:go_default_library",
+        "//vendor/github.com/stretchr/testify/assert:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
+        "//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
+    ],
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)
\ No newline at end of file
diff --git a/plugin/pkg/admission/vke/admission.go b/plugin/pkg/admission/vke/admission.go
new file mode 100644
index 0000000..192f384
--- /dev/null
+++ b/plugin/pkg/admission/vke/admission.go
@@ -0,0 +1,587 @@
+package vke
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"strings"
+
+	"github.com/golang/glog"
+	"k8s.io/api/extensions/v1beta1"
+	apiequality "k8s.io/apimachinery/pkg/api/equality"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"k8s.io/apimachinery/pkg/util/yaml"
+	"k8s.io/apiserver/pkg/admission"
+	api "k8s.io/kubernetes/pkg/apis/core"
+	"k8s.io/kubernetes/pkg/apis/extensions"
+	policybeta "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
+	"k8s.io/kubernetes/pkg/apis/rbac"
+	rbacregistry "k8s.io/kubernetes/pkg/registry/rbac"
+	"k8s.io/kubernetes/pkg/security/podsecuritypolicy"
+)
+
+const (
+	// PluginName indicates name of admission plugin.
+	PluginName = "VMwareAdmissionController"
+
+	systemUnsecuredUser      = "system:unsecured"
+	systemNodesGroup         = "system:nodes"
+	privilegedNamespace      = "vke-system"
+	privilegedServiceAccount = "system:serviceaccount:" + privilegedNamespace + ":"
+	reservedPrefix           = "vke"
+	reservedTolerationKey    = "Dedicated"
+	reservedTolerationValue  = "Master"
+	masterNodePrefix         = "master"
+	etcSslCerts              = "/etc/ssl/certs"
+)
+
+// Register registers a plugin.
+func Register(plugins *admission.Plugins) {
+	plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) {
+		return NewVMwareAdmissionController(config)
+	})
+}
+
+// vmwareAdmissionController is an implementation of admission.Interface.
+// It restricts access to VKE namespace for users and prevents the users from creating privileged pods.
+type vmwareAdmissionController struct {
+	psp             *extensions.PodSecurityPolicy
+	strategyFactory podsecuritypolicy.StrategyFactory
+	privilegedGroup string
+	clusterID       string
+}
+
+// vmwareAdmissionControllerConfig holds config data for VMwareAdmissionController.
+type vmwareAdmissionControllerConfig struct {
+	ClusterID             string `yaml:"clusterID"`
+	PrivilegedGroup       string `yaml:"privilegedGroup"`
+	PodSecurityPolicyFile string `yaml:"podSecurityPolicyFile"`
+}
+
+// AdmissionConfig holds config data for admission controllers.
+type AdmissionConfig struct {
+	VMwareAdmissionController vmwareAdmissionControllerConfig `yaml:"vmwareAdmissionController"`
+}
+
+var _ admission.MutationInterface = &vmwareAdmissionController{}
+
+// Admit makes an admission decision based on the request attributes.
+// Note: We implement the mutation interface for this admission controller, which means we have the ability to modify
+// the resources. But we do not modify them because we just need to validate the requests to either let them through or
+// block them. We have implemented this as a mutation interface because we need access to oldObjects during update
+// operations to find out what values are being modified. For node update operation, the old and new objects are always
+// identical in the validation phase (possibly due to some initializer modifying it). So, we are performing our
+// validation during the mutation phase.
+func (vac *vmwareAdmissionController) Admit(a admission.Attributes) (err error) {
+	if isPrivilegedUser(vac, a) {
+		return nil
+	}
+
+	if isSystemUnsecuredUser(a) {
+		return validateSystemUnsecuredUser(vac, a)
+	}
+
+	if isCertificateFromNode(a) {
+		return validateCertificateFromNode(a)
+	}
+
+	if isPrivilegedServiceAccount(a) {
+		return validatePrivilegedServiceAccount(a)
+	}
+
+	if isPrivilegedNamespace(a) {
+		return admission.NewForbidden(a,
+			fmt.Errorf("%s validation failed: cannot modify resources in namespace %s", PluginName, a.GetNamespace()))
+	}
+
+	switch a.GetResource().GroupResource() {
+	case api.Resource("pods"):
+		err = validatePods(vac, a)
+	case api.Resource("nodes"):
+		err = validateNodes(a)
+	case rbac.Resource("clusterroles"):
+		err = validateClusterRoles(a)
+	case rbac.Resource("clusterrolebindings"):
+		err = validateClusterRoleBindings(a)
+	}
+
+	return err
+}
+
+// Handles returns true if this admission controller can handle the given operation
+// where operation can be one of CREATE, UPDATE, DELETE, or CONNECT.
+func (vac *vmwareAdmissionController) Handles(operation admission.Operation) bool {
+	return true
+}
+
+// NewVMwareAdmissionController creates a new VMwareAdmissionController.
+func NewVMwareAdmissionController(configFile io.Reader) (*vmwareAdmissionController, error) {
+	glog.V(2).Infof("%s is enabled", PluginName)
+	if configFile == nil {
+		glog.Warningf("No config specified for %s. Using default configuration", PluginName)
+		return nil, fmt.Errorf("no config file specified for %s", PluginName)
+	}
+
+	var config AdmissionConfig
+	d := yaml.NewYAMLOrJSONDecoder(configFile, 4096)
+	err := d.Decode(&config)
+	if err != nil {
+		return nil, err
+	}
+
+	// Load PSP from file. If it fails, use default.
+	psp := getPSPFromFile(config.VMwareAdmissionController.PodSecurityPolicyFile)
+	if psp == nil {
+		psp = getDefaultPSP()
+	}
+
+	return &vmwareAdmissionController{
+		psp:             psp,
+		strategyFactory: podsecuritypolicy.NewSimpleStrategyFactory(),
+		privilegedGroup: config.VMwareAdmissionController.PrivilegedGroup,
+		clusterID:       config.VMwareAdmissionController.ClusterID,
+	}, nil
+}
+
+func getDefaultPSP() *extensions.PodSecurityPolicy {
+	return &extensions.PodSecurityPolicy{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       "PodSecurityPolicy",
+			APIVersion: "extensions/v1beta1",
+		},
+		Spec: extensions.PodSecurityPolicySpec{
+			Privileged:               false,
+			HostNetwork:              false,
+			HostIPC:                  false,
+			HostPID:                  false,
+			AllowPrivilegeEscalation: false,
+			HostPorts: []extensions.HostPortRange{
+				{1, 65535},
+			},
+			Volumes: []extensions.FSType{
+				"emptyDir",
+				"secret",
+				"downwardAPI",
+				"configMap",
+				"persistentVolumeClaim",
+				"projected",
+				"hostPath",
+			},
+			// We allow /etc/ssl/certs to be mounted in read only mode as a hack to allow Wavefront pods to be deployed.
+			// TODO(ashokc): Once we have support for users to create pods using privileged mode and host path, remove this.
+			AllowedHostPaths: []extensions.AllowedHostPath{
+				{
+					etcSslCerts,
+				},
+			},
+			FSGroup: extensions.FSGroupStrategyOptions{
+				Rule: extensions.FSGroupStrategyRunAsAny,
+			},
+			RunAsUser: extensions.RunAsUserStrategyOptions{
+				Rule: extensions.RunAsUserStrategyRunAsAny,
+			},
+			SELinux: extensions.SELinuxStrategyOptions{
+				Rule: extensions.SELinuxStrategyRunAsAny,
+			},
+			SupplementalGroups: extensions.SupplementalGroupsStrategyOptions{
+				Rule: extensions.SupplementalGroupsStrategyRunAsAny,
+			},
+		},
+	}
+}
+
+func getPSPFromFile(pspFile string) *extensions.PodSecurityPolicy {
+	pspBeta := v1beta1.PodSecurityPolicy{}
+	pspExtensions := extensions.PodSecurityPolicy{}
+
+	if pspFile == "" {
+		glog.V(2).Infof("%s: PSP file not specified, using default PSP", PluginName)
+		return nil
+	}
+
+	pspConfig, err := os.Open(pspFile)
+	if err != nil {
+		glog.V(2).Infof("%s: cannot open PSP file, using default PSP: %v", PluginName, err)
+		return nil
+	}
+
+	// We load the PSP that we read from file into pspBeta because this is the struct to which we can decode yaml to.
+	d := yaml.NewYAMLOrJSONDecoder(pspConfig, 4096)
+	err = d.Decode(&pspBeta)
+	if err != nil {
+		glog.V(2).Infof("%s: cannot decode PSP file, using default PSP: %v", PluginName, err)
+		return nil
+	}
+
+	// We convert pspBeta object into pspExtensions object because this is the one that pod validation uses.
+	err = policybeta.Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&pspBeta, &pspExtensions, nil)
+	if err != nil {
+		glog.V(2).Infof("%s: cannot convert v1beta1.PSP to extensions.PSP, using default PSP: %v", PluginName, err)
+		return nil
+	}
+
+	return &pspExtensions
+}
+
+func isPrivilegedUser(vac *vmwareAdmissionController, a admission.Attributes) bool {
+	// If the request comes from a user belonging to a privileged group, then we allow it. Only calls from Cascade
+	// controller will belong to this privileged group.
+	groups := a.GetUserInfo().GetGroups()
+	for _, group := range groups {
+		if group == vac.privilegedGroup {
+			return true
+		}
+	}
+
+	return false
+}
+
+func isSystemUnsecuredUser(a admission.Attributes) bool {
+	return a.GetUserInfo().GetName() == systemUnsecuredUser
+}
+
+func validateSystemUnsecuredUser(vac *vmwareAdmissionController, a admission.Attributes) (err error) {
+	// Currently the insecure port 8080 is exposed to only localhost inside the Kubernetes master VMs. So it can be used
+	// only by kube-controller-manager, kube-scheduler and cloud-init script which creates our pods and other resources.
+	// When a call comes on insecure port 8080, Kubernetes assigns them system:unsecured user name. We need to allow
+	// this so that our master components can be started successfully and kube-controller-manager and kube-scheduler can
+	// work as expected.
+	// But this needs to be allowed only inside our privileged namespace. If the request comes to any other namespace,
+	// we need to make it go through our pod validation. This is needed because a user can create a deployment or
+	// replica set which has a privileged pod. Since our admission controller does not look at deployments or replica
+	// sets, we will allow it. The actual pod inside the deployment or replica set will be created by the
+	// controller-manager and if we allow it to create pods in any namespace, then a user can create a privileged pod
+	// totally bypassing our security checks.
+	if !isPrivilegedNamespace(a) && (a.GetResource().GroupResource() == api.Resource("pods")) {
+		return validatePods(vac, a)
+	}
+
+	// For all other resources, allow.
+	return nil
+}
+
+func isCertificateFromNode(a admission.Attributes) bool {
+	// If the request came from a user with group = systemNodesGroup, then we assume that the request comes from a node
+	// which uses a certificate for authentication.
+	groups := a.GetUserInfo().GetGroups()
+	for _, group := range groups {
+		if group == systemNodesGroup {
+			return true
+		}
+	}
+	return false
+}
+
+func validateCertificateFromNode(a admission.Attributes) error {
+	// Block exec operations into pods for nodes. This is needed to block someone from using Kubelet's certificate to
+	// exec into privileged pods running on the master. Other operations with the node certificate like modifying master
+	// node, creating pods on master node, etc. are blocked by the node restriction admission controller.
+	if a.GetResource().GroupResource() == api.Resource("pods") && a.GetOperation() == admission.Connect {
+		return admission.NewForbidden(a,
+			fmt.Errorf("%s validation failed: cannot modify pods in namespace %s", PluginName, a.GetNamespace()))
+	}
+	return nil
+}
+
+func isPrivilegedNamespace(a admission.Attributes) bool {
+	// If the namespace mentioned in the resource is privileged, return true. We will hit this for calls made to all
+	// resources in this namespace and during delete and update operation on the namespace itself.
+	if a.GetNamespace() == privilegedNamespace {
+		return true
+	}
+
+	// If the resource is a namespace and if its name matched the privileged namespace, return true. We will hit this
+	// during creation of the namespace.
+	if a.GetResource().GroupResource() == api.Resource("namespaces") {
+		if namespace, ok := a.GetObject().(*api.Namespace); ok {
+			if namespace.Name == privilegedNamespace {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+func isPrivilegedServiceAccount(a admission.Attributes) bool {
+	user := a.GetUserInfo().GetName()
+	return strings.HasPrefix(user, privilegedServiceAccount)
+}
+
+func validatePrivilegedServiceAccount(a admission.Attributes) error {
+	// If the privileged service account performs any operation on the pods resource like create, update or connect,
+	// we block it. This is needed so that we can block exec access into privileged pods running on the master. Also,
+	// privileged service account does not need to perform these operations. So, just to be extra cautious we also block
+	// off create and update pods.
+	if a.GetResource().GroupResource() == api.Resource("pods") {
+		// Allow Delete operation on pods
+		if a.GetOperation() == admission.Delete {
+			return nil
+		}
+		// Allow subresources other than exec on pods
+		if len(a.GetSubresource()) != 0 && a.GetSubresource() != "exec" {
+			return nil
+		}
+		// Block all other operations
+		if a.GetOperation() == admission.Create || a.GetOperation() == admission.Update || a.GetOperation() == admission.Connect {
+			return admission.NewForbidden(a,
+				fmt.Errorf("%s validation failed: cannot modify pods in namespace %s", PluginName, a.GetNamespace()))
+		}
+	}
+
+	// If the privileged service account tries to update taints on a node, we block. We need to do this so that a user
+	// cannot use a privileged service account to untaint the node and run pods on a master.
+	if a.GetResource().GroupResource() == api.Resource("nodes") {
+		if a.GetOperation() == admission.Update {
+			node, ok := a.GetObject().(*api.Node)
+			if !ok {
+				return admission.NewForbidden(a,
+					fmt.Errorf("%s validation failed: unexpected type %T", PluginName, a.GetObject()))
+			}
+			oldNode, ok := a.GetOldObject().(*api.Node)
+			if !ok {
+				return admission.NewForbidden(a,
+					fmt.Errorf("%s validation failed: unexpected type %T", PluginName, a.GetOldObject()))
+			}
+
+			if !apiequality.Semantic.DeepEqual(node.Spec.Taints, oldNode.Spec.Taints) {
+				return admission.NewForbidden(a,
+					fmt.Errorf("%s validation failed: cannot modify taints on a node", PluginName))
+			}
+		}
+	}
+
+	return nil
+}
+
+func validateNodes(a admission.Attributes) error {
+	// If it is a connect operation on the sub resource, allow it. Sub resources of nodes are nodes/nodename/proxy and
+	// nodes/nodename/status. Prometheus needs read access to nodes/nodename/proxy/metrics. To support that and other
+	// pods which might need read access to node metrics, we need to allow this. But the VKE authorizer blocks off any
+	// access to perform exec, attach or run on the proxy resource on a master which will prevent the users from getting
+	// access to the privileged pods on master.
+	if a.GetOperation() == admission.Connect && len(a.GetSubresource()) != 0 {
+		return nil
+	}
+
+	// If the operation is Delete, fail. Deleting a node is not something that is useful to the user. Also, by deleting
+	// a node, they can potentially make their cluster useless.
+	if a.GetOperation() == admission.Delete {
+		return admission.NewForbidden(a, fmt.Errorf("%s validation failed: cannot delete nodes", PluginName))
+	}
+
+	// If the operation is on a master node, fail. We do not want to allow the users to modify labels and taints on the
+	// master node because it can compromise the security of the cluster.
+	if strings.HasPrefix(a.GetName(), masterNodePrefix) {
+		return admission.NewForbidden(a, fmt.Errorf("%s validation failed: cannot modify master nodes", PluginName))
+	}
+
+	return nil
+}
+
+func validateClusterRoles(a admission.Attributes) error {
+	// If the name in the request is not empty and has the reserved prefix, then fail. We will hit this during delete
+	// and update operations on the cluster roles. If it does not have the reserved prefix, allow it. If the name is
+	// empty then proceed to read it from the object in the request.
+	if a.GetName() != "" {
+		return checkReservedPrefix(a.GetName(), a)
+	}
+
+	clusterRole, ok := a.GetObject().(*rbac.ClusterRole)
+	// If we cannot get the cluster role binding object, fail.
+	if !ok {
+		return admission.NewForbidden(a,
+			fmt.Errorf("%s validation failed: unexpected type %T", PluginName, a.GetObject()))
+	}
+	// If we get the object and the name has the reserved prefix, fail. We will hit this when someone tries to create a
+	// cluster role with the reserved prefix.
+	return checkReservedPrefix(clusterRole.Name, a)
+}
+
+func validateClusterRoleBindings(a admission.Attributes) error {
+	// If the name in the request is not empty and has the reserved prefix, then fail. We will hit this during delete
+	// and update operations on the cluster role bindings. If it does not have the reserved prefix, allow it. If the
+	// name is empty then proceed to read it from the object in the request.
+	if a.GetName() != "" {
+		return checkReservedPrefix(a.GetName(), a)
+	}
+
+	clusterRoleBinding, ok := a.GetObject().(*rbac.ClusterRoleBinding)
+	// If we cannot get the cluster role binding object, fail.
+	if !ok {
+		return admission.NewForbidden(a,
+			fmt.Errorf("%s validation failed: unexpected type %T", PluginName, a.GetObject()))
+	}
+	// If we get the object and the name has the reserved prefix, fail. We will hit this when someone tries to create a
+	// cluster role binding with the reserved prefix.
+	return checkReservedPrefix(clusterRoleBinding.Name, a)
+}
+
+func validatePods(vac *vmwareAdmissionController, a admission.Attributes) error {
+	// If the request is acting on a sub resource of a pod then allow it. This request is not directly coming to a pod,
+	// but to a sub-resource like pods/foo/status. So, this does not have to be blocked.
+	if len(a.GetSubresource()) != 0 {
+		return nil
+	}
+
+	// If it is a Connect or Delete operation, allow it. We restrict access to connect to any pods in the vke-system
+	// namespace. So it is OK to allow this.
+	if a.GetOperation() == admission.Connect || a.GetOperation() == admission.Delete {
+		return nil
+	}
+
+	// If we cannot get the pod object, fail.
+	if _, ok := a.GetObject().(*api.Pod); !ok {
+		return admission.NewForbidden(a,
+			fmt.Errorf("%s validation failed: unexpected type %T", PluginName, a.GetObject()))
+	}
+
+	// If this is an update, see if we are only updating the ownerRef/finalizers.  Garbage collection does this
+	// and we should allow it in general, since you had the power to update and the power to delete.
+	// The worst that happens is that you delete something, but you aren't controlling the privileged object itself
+	if a.GetOperation() == admission.Update &&
+		rbacregistry.IsOnlyMutatingGCFields(a.GetObject(), a.GetOldObject(), apiequality.Semantic) {
+		return nil
+	}
+
+	errs := field.ErrorList{}
+	originalPod := a.GetObject().(*api.Pod)
+
+	// Generate a copy of the pod object because we are not allowed to mutate the pod object.
+	pod := originalPod.DeepCopy()
+
+	provider, err := podsecuritypolicy.NewSimpleProvider(vac.psp, pod.Namespace, vac.strategyFactory)
+	if err != nil {
+		return admission.NewForbidden(a, fmt.Errorf("%s validation failed: %v", PluginName, err))
+	}
+
+	// Set default security context for the pod. This fills in the defaults for the security context values that are not
+	// provided. This is needed to validate the security context correctly.
+	pod.Spec.SecurityContext, _, err = provider.CreatePodSecurityContext(pod)
+	if err != nil {
+		errs = append(errs, field.Invalid(field.NewPath("spec", "securityContext"),
+			pod.Spec.SecurityContext, err.Error()))
+	}
+
+	// Validate the pod.
+	errs = append(errs, provider.ValidatePodSecurityContext(pod, field.NewPath("spec", "securityContext"))...)
+
+	// Validate the pod's tolerations.
+	fieldErr := validatePodToleration(pod)
+	if fieldErr != nil {
+		errs = append(errs, fieldErr)
+	}
+
+	// Validate the initContainers that are part of the pod.
+	for i := range pod.Spec.InitContainers {
+		pod.Spec.InitContainers[i].SecurityContext, _, err = provider.CreateContainerSecurityContext(pod, &pod.Spec.InitContainers[i])
+		if err != nil {
+			errs = append(errs, field.Invalid(field.NewPath("spec", "initContainers").Index(i).
+				Child("securityContext"), "", err.Error()))
+			continue
+		}
+		errs = append(errs, provider.ValidateContainerSecurityContext(pod, &pod.Spec.InitContainers[i],
+			field.NewPath("spec", "initContainers").Index(i).Child("securityContext"))...)
+	}
+
+	// Validate the containers that are part of the pod.
+	for i := range pod.Spec.Containers {
+		pod.Spec.Containers[i].SecurityContext, _, err = provider.CreateContainerSecurityContext(pod, &pod.Spec.Containers[i])
+		if err != nil {
+			errs = append(errs, field.Invalid(field.NewPath("spec", "containers").Index(i).
+				Child("securityContext"), "", err.Error()))
+			continue
+		}
+		errs = append(errs, provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[i],
+			field.NewPath("spec", "containers").Index(i).Child("securityContext"))...)
+	}
+
+	// Validate that /etc/ssl/certs if mounted using hostPath volume mount is readOnly.
+	fieldErr = validateEtcSslCertsHostPath(pod)
+	if fieldErr != nil {
+		errs = append(errs, fieldErr)
+	}
+
+	if len(errs) > 0 {
+		return admission.NewForbidden(a,
+			fmt.Errorf("%s validation failed: %v", PluginName, errs))
+	}
+
+	return nil
+}
+
+func validatePodToleration(pod *api.Pod) *field.Error {
+	// Master nodes are tainted with "Dedicated=Master:NoSchedule". Only vke-system pods are allowed to tolerate
+	// this taint and to run on master nodes. A user's pod will be rejected if its spec has toleration for this taint.
+	for _, t := range pod.Spec.Tolerations {
+		reject := false
+
+		if t.Key == reservedTolerationKey && t.Value == reservedTolerationValue {
+			// Reject pod that has the reserved toleration "Dedicated=Master"
+			reject = true
+		} else if t.Operator == api.TolerationOpExists && (t.Key == reservedTolerationKey || t.Key == "") {
+			// Reject pod that has wildcard toleration matching the reserved toleration
+			reject = true
+		}
+
+		if reject {
+			return field.Invalid(field.NewPath("spec", "toleration"), fmt.Sprintf("%+v", t),
+				fmt.Sprintf("%s validation failed: should not tolerate master node taint", PluginName))
+		}
+	}
+	return nil
+}
+
+// Validate that /etc/ssl/certs if mounted using hostPath volume mount is readOnly. If not, fail.
+// This is a hack that is needed to get Wavefront pods to work.
+// TODO(ashokc): Once we have support for users to create pods using privileged mode and host path, remove this.
+func validateEtcSslCertsHostPath(pod *api.Pod) *field.Error {
+	// Get volumes which mount /etc/ssl/certs and put them in a map.
+	volumes := map[string]struct{}{}
+	for _, vol := range pod.Spec.Volumes {
+		if vol.HostPath != nil && strings.HasPrefix(vol.HostPath.Path, etcSslCerts) {
+			volumes[vol.Name] = struct{}{}
+		}
+	}
+
+	// For every initContainer, get all volumeMounts and verify if it matches any of the volumes in the volumes map.
+	// If yes, then check if they are read only. If not, return an error.
+	err := checkVolumeReadOnly(pod.Spec.InitContainers, volumes, "initContainers")
+	if err != nil {
+		return err
+	}
+
+	// For every container, get all volumeMounts and verify if it matches any of the volumes in the volumes map.
+	// If yes, then check if they are read only. If not, return an error.
+	err = checkVolumeReadOnly(pod.Spec.Containers, volumes, "containers")
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Checks if the container has a volumeMount belonging to the volumes map. If yes, it has to be read only. If not,
+// return error.
+func checkVolumeReadOnly(containers []api.Container, volumes map[string]struct{}, containerType string) *field.Error {
+	for i, container := range containers {
+		for _, vol := range container.VolumeMounts {
+			if _, ok := volumes[vol.Name]; ok {
+				if !vol.ReadOnly {
+					return field.Invalid(field.NewPath("spec", containerType).Index(i).Child("volumeMounts"),
+						fmt.Sprintf("%+v", vol), fmt.Sprintf("%s has to be mount as readOnly", etcSslCerts))
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func checkReservedPrefix(resourceName string, a admission.Attributes) error {
+	if strings.HasPrefix(resourceName, reservedPrefix) {
+		return admission.NewForbidden(a,
+			fmt.Errorf("%s validation failed: cannot modify resources with prefix %s", PluginName, reservedPrefix))
+	}
+	return nil
+}
diff --git a/plugin/pkg/admission/vke/admission_test.go b/plugin/pkg/admission/vke/admission_test.go
new file mode 100644
index 0000000..3fb4674
--- /dev/null
+++ b/plugin/pkg/admission/vke/admission_test.go
@@ -0,0 +1,941 @@
+package vke
+
+import (
+	"fmt"
+	"os"
+	"strings"
+	"testing"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	kadmission "k8s.io/apiserver/pkg/admission"
+	kapi "k8s.io/kubernetes/pkg/apis/core"
+	"k8s.io/kubernetes/pkg/apis/rbac"
+	"k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/user"
+)
+
+const (
+	testServiceAccountsGroup = "system.test\\cascade-controller-service-accounts"
+	clusterID                = "cluster-id"
+	defaultConfigFileFormat  = `
+vmwareAdmissionController:
+  privilegedGroup: %s
+  podSecurityPolicyFile: %s
+  clusterID: %s
+`
+	pspFileName   = "/tmp/psp.yaml"
+	pspConfigFile = `
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: vmware-pod-security-policy-restricted
+spec:
+  privileged: true
+  fsGroup:
+    rule: RunAsAny
+  runAsUser:
+    rule: RunAsAny
+  seLinux:
+    rule: RunAsAny
+  supplementalGroups:
+    rule: RunAsAny
+  volumes:
+  - 'emptyDir'
+  - 'secret'
+  - 'downwardAPI'
+  - 'configMap'
+  - 'persistentVolumeClaim'
+  - 'projected'
+  - 'hostPath'
+  hostPID: false
+  hostIPC: false
+  hostNetwork: true
+  hostPorts:
+  - min: 1
+    max: 65536
+`
+)
+
+func TestAdmitPrivileged(t *testing.T) {
+	tests := map[string]struct {
+		operation          kadmission.Operation
+		pod                *kapi.Pod
+		name               string
+		userInfo           user.Info
+		shouldPassValidate bool
+	}{
+		"create pod with Privileged=nil allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with Privileged=false allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withPrivileged(false).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with Privileged=true denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withPrivileged(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with multiple containers, one has Privileged=true denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withPrivileged(true).withInitContainer().withContainer().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"update pod with Privileged=true denied": {
+			operation:          kadmission.Update,
+			pod:                newTestPodBuilder().withPrivileged(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with HostNetwork=true denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostNetwork(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with HostIPC=true denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostIPC(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with HostPID=true denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostPID(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with HostPort allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostPort().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostVolume denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/", false).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with HostVolume /etc/ssl/certs in read-only mode allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/etc/ssl/certs", true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostVolume /etc/ssl/certs in read-write mode denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/etc/ssl/certs", false).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with CascadeDisk allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withCascadeDisk().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostVolume and CascadeDisk denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/", false).withCascadeDisk().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"connect pod allowed": {
+			operation:          kadmission.Connect,
+			pod:                newTestPodBuilder().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"delete pod allowed": {
+			operation:          kadmission.Delete,
+			pod:                nil,
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+	}
+
+	for k, v := range tests {
+		testPodValidation(k, v.operation, v.pod, v.name, v.userInfo, v.shouldPassValidate, t)
+	}
+}
+
+func TestAdmitPrivilegedWithCustomPSP(t *testing.T) {
+	tests := map[string]struct {
+		operation          kadmission.Operation
+		pod                *kapi.Pod
+		name               string
+		userInfo           user.Info
+		shouldPassValidate bool
+	}{
+		"create pod with Privileged=nil allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with Privileged=false allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withPrivileged(false).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with Privileged=true allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withPrivileged(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with multiple containers, one has Privileged=true allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withPrivileged(true).withInitContainer().withContainer().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"update pod with Privileged=true allowed": {
+			operation:          kadmission.Update,
+			pod:                newTestPodBuilder().withPrivileged(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostNetwork=true allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostNetwork(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostIPC=true denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostIPC(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with HostPID=true denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostPID(true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with HostPort allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostPort().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostVolume allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/", false).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostVolume /etc/ssl/certs in read-only mode allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/etc/ssl/certs", true).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostVolume /etc/ssl/certs in read-write mode denied": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/etc/ssl/certs", false).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"create pod with CascadeDisk allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withCascadeDisk().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"create pod with HostVolume and CascadeDisk allowed": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withHostVolume("/", false).withCascadeDisk().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"connect pod allowed": {
+			operation:          kadmission.Connect,
+			pod:                newTestPodBuilder().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"delete pod allowed": {
+			operation:          kadmission.Delete,
+			pod:                nil,
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+	}
+
+	// Setup custom PSP file.
+	file, err := os.Create(pspFileName)
+	if err != nil {
+		t.Errorf("TestAdmitPrivilegedWithCustomPSP: failed to open custom PSP file %v", err)
+		return
+	}
+	_, err = file.WriteString(pspConfigFile)
+	if err != nil {
+		t.Errorf("TestAdmitPrivilegedWithCustomPSP: failed to write to custom PSP file %v", err)
+		return
+	}
+
+	for k, v := range tests {
+		testPodValidation(k, v.operation, v.pod, v.name, v.userInfo, v.shouldPassValidate, t)
+	}
+
+	// Delete custom PSP file.
+	err = os.Remove(pspFileName)
+	if err != nil {
+		t.Errorf("TestAdmitPrivilegedWithCustomPSP: failed to delete custom PSP file %v", err)
+	}
+}
+
+func TestPrivilegedNamespace(t *testing.T) {
+	tests := map[string]struct {
+		operation          kadmission.Operation
+		pod                *kapi.Pod
+		name               string
+		userInfo           user.Info
+		shouldPassValidate bool
+	}{
+		"denied: regular lightwave user creates pod in vke-system namespace": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user cannot escalate privilege using service account": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).withServiceAccount(privilegedServiceAccount + "default").build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular service account creates pod in vke-system namespace": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withName("system:serviceaccount:kube-system:default").build(),
+			shouldPassValidate: false,
+		},
+		"allowed: privileged service account deletes a pod in privileged namespace": {
+			operation:          kadmission.Delete,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withName(privilegedServiceAccount + "default").build(),
+			shouldPassValidate: true,
+		},
+		"denied: privileged service account creates a pod in privileged namespace": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withName(privilegedServiceAccount + "default").build(),
+			shouldPassValidate: false,
+		},
+		"denied: privileged service account execs into a pod in privileged namespace": {
+			operation:          kadmission.Connect,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withName(privilegedServiceAccount + "default").build(),
+			shouldPassValidate: false,
+		},
+		"denied: privileged service account updates a pod in privileged namespace": {
+			operation:          kadmission.Update,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withName(privilegedServiceAccount + "default").build(),
+			shouldPassValidate: false,
+		},
+		"allowed: regular user creates pod in other namespace": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace("default").build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: Cascade Controller Service Account creates pod in vke-system namespace": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withGroup(testServiceAccountsGroup).build(),
+			shouldPassValidate: true,
+		},
+		"allowed: systemUnsecuredUser creates pod in vke-system namespace": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withName(systemUnsecuredUser).build(),
+			shouldPassValidate: true,
+		},
+		"allowed: kubelet group creates pod in vke-system namespace": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withGroup(systemNodesGroup).withName("system:node:worker").build(),
+			shouldPassValidate: true,
+		},
+		"denied: regular lightwave group does not grant privileged access": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withGroup("test1\\group1").build(),
+			shouldPassValidate: false,
+		},
+		"allowed: if user has multiple groups, any privileged group can grant privileged access": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withNamespace(privilegedNamespace).build(),
+			userInfo:           newTestUserBuilder().withGroup("test1\\group1").withGroup(testServiceAccountsGroup).build(),
+			shouldPassValidate: true,
+		},
+		"denied: kubelet exec into pod": {
+			operation:          kadmission.Connect,
+			pod:                newTestPodBuilder().build(),
+			userInfo:           newTestUserBuilder().withGroup("system:nodes").build(),
+			shouldPassValidate: false,
+		},
+	}
+	for k, v := range tests {
+		testPodValidation(k, v.operation, v.pod, v.name, v.userInfo, v.shouldPassValidate, t)
+	}
+}
+
+func TestToleration(t *testing.T) {
+	tests := map[string]struct {
+		operation          kadmission.Operation
+		pod                *kapi.Pod
+		name               string
+		userInfo           user.Info
+		shouldPassValidate bool
+	}{
+		"allowed: create pod with no toleration": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: create pod with normal toleration key": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withToleration("mykey", reservedTolerationValue, kapi.TolerationOpEqual, kapi.TaintEffectNoSchedule).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: create pod with normal toleration value": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withToleration(reservedTolerationKey, "myval", kapi.TolerationOpEqual, kapi.TaintEffectNoSchedule).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"denied: create pod with reserved toleration": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withToleration(reservedTolerationKey, reservedTolerationValue, kapi.TolerationOpEqual, kapi.TaintEffectNoSchedule).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: create pod with wildcard toleration": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withToleration("", "", kapi.TolerationOpExists, "").build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: create pod with value wildcard toleration": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withToleration(reservedTolerationKey, "", kapi.TolerationOpExists, kapi.TaintEffectNoSchedule).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"allowed: create pod with value wildcard and normal key": {
+			operation:          kadmission.Create,
+			pod:                newTestPodBuilder().withToleration("mykey", "", kapi.TolerationOpExists, kapi.TaintEffectNoSchedule).build(),
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+	}
+
+	for k, v := range tests {
+		testPodValidation(k, v.operation, v.pod, v.name, v.userInfo, v.shouldPassValidate, t)
+	}
+}
+
+func TestClusterLevelResources(t *testing.T) {
+	tests := map[string]struct {
+		operation          kadmission.Operation
+		resource           string
+		subresource        string
+		name               string
+		namespace          string
+		userInfo           user.Info
+		object             runtime.Object
+		oldObject          runtime.Object
+		shouldPassValidate bool
+	}{
+		"denied: regular lightwave user update configmaps in vke-system namespace": {
+			operation:          kadmission.Update,
+			resource:           "configmaps",
+			namespace:          privilegedNamespace,
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user delete daemonsets in vke-system namespace": {
+			operation:          kadmission.Delete,
+			resource:           "daemonsets",
+			namespace:          privilegedNamespace,
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user create deployments in vke-system namespace": {
+			operation:          kadmission.Create,
+			resource:           "deployments",
+			namespace:          privilegedNamespace,
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user create rolebindings in vke-system namespace": {
+			operation:          kadmission.Create,
+			resource:           "rolebindings",
+			namespace:          privilegedNamespace,
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"allowed: regular lightwave user create rolebindings in other namespace": {
+			operation:          kadmission.Create,
+			resource:           "rolebindings",
+			namespace:          "default",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: regular lightwave user create clusterroles": {
+			operation:          kadmission.Create,
+			resource:           "clusterroles",
+			name:               "cluster-role",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"denied: regular lightwave user create clusterroles with vke: prefix": {
+			operation:          kadmission.Create,
+			resource:           "clusterroles",
+			name:               "vke:clusterrole",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user delete clusterroles with vke: prefix": {
+			operation:          kadmission.Delete,
+			resource:           "clusterroles",
+			name:               "vke:clusterrole",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"allowed: systemUnsecuredUser update clusterroles with vke: prefix": {
+			operation:          kadmission.Update,
+			resource:           "clusterroles",
+			name:               "vke:clusterrole",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().withName(systemUnsecuredUser).build(),
+			shouldPassValidate: true,
+		},
+		"allowed: regular lightwave user create clusterrolebindings": {
+			operation:          kadmission.Create,
+			resource:           "clusterrolebindings",
+			name:               "cluster-role-binding",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"denied: regular lightwave user create clusterrolebindings with vke: prefix": {
+			operation:          kadmission.Create,
+			resource:           "clusterrolebindings",
+			name:               "vke:clusterrolebinding",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user update clusterrolebindings with vke: prefix": {
+			operation:          kadmission.Delete,
+			resource:           "clusterrolebindings",
+			name:               "vke:clusterrolebinding",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"allowed: systemUnsecuredUser update clusterrolebindings with vke: prefix": {
+			operation:          kadmission.Update,
+			resource:           "clusterrolebindings",
+			name:               "vke:clusterrolebinding",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().withName(systemUnsecuredUser).build(),
+			shouldPassValidate: true,
+		},
+		"allowed: regular lightwave user update worker nodes": {
+			operation:          kadmission.Update,
+			resource:           "nodes",
+			name:               "worker-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: regular lightwave user get worker nodes proxy subresource": {
+			operation:          kadmission.Connect,
+			resource:           "nodes",
+			subresource:        "proxy",
+			name:               "worker-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: regular lightwave user patch worker nodes proxy subresource": {
+			operation:          kadmission.Update,
+			resource:           "nodes",
+			subresource:        "proxy",
+			name:               "worker-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: regular lightwave user create worker nodes proxy subresource": {
+			operation:          kadmission.Create,
+			resource:           "nodes",
+			subresource:        "proxy",
+			name:               "worker-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"allowed: regular lightwave user get master nodes proxy subresource": {
+			operation:          kadmission.Connect,
+			resource:           "nodes",
+			subresource:        "proxy",
+			name:               "master-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: true,
+		},
+		"denied: regular lightwave user patch master nodes proxy subresource": {
+			operation:          kadmission.Update,
+			resource:           "nodes",
+			subresource:        "proxy",
+			name:               "master-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user create master nodes proxy subresource": {
+			operation:          kadmission.Create,
+			resource:           "nodes",
+			subresource:        "proxy",
+			name:               "master-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user update master nodes": {
+			operation:          kadmission.Update,
+			resource:           "nodes",
+			name:               "master-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user delete master nodes": {
+			operation:          kadmission.Delete,
+			resource:           "nodes",
+			name:               "master-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"denied: regular lightwave user delete worker nodes": {
+			operation:          kadmission.Delete,
+			resource:           "nodes",
+			name:               "worker-guid",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().build(),
+			shouldPassValidate: false,
+		},
+		"allowed: systemUnsecuredUser update nodes": {
+			operation:          kadmission.Update,
+			resource:           "nodes",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().withName(systemUnsecuredUser).build(),
+			shouldPassValidate: true,
+		},
+		"allowed: kubelet update node": {
+			operation:          kadmission.Update,
+			resource:           "nodes",
+			namespace:          "",
+			userInfo:           newTestUserBuilder().withGroup(systemNodesGroup).build(),
+			shouldPassValidate: true,
+		},
+		"denied: privileged service account update node taint": {
+			operation:          kadmission.Update,
+			resource:           "nodes",
+			namespace:          "",
+			oldObject:          newTestNodeBuilder().build(),
+			object:             newTestNodeBuilder().withTaint(nil).build(),
+			userInfo:           newTestUserBuilder().withName(privilegedServiceAccount + "default").build(),
+			shouldPassValidate: false,
+		},
+	}
+	for k, v := range tests {
+		testResourceValidation(k, v.operation, v.resource, v.subresource, v.name, v.namespace, v.userInfo, v.object,
+			v.oldObject, v.shouldPassValidate, t)
+	}
+}
+
+func testPodValidation(testCaseName string, op kadmission.Operation, pod *kapi.Pod, name string, userInfo user.Info,
+	shouldPassValidate bool, t *testing.T) {
+
+	defaultConfigFile := fmt.Sprintf(defaultConfigFileFormat, testServiceAccountsGroup, pspFileName, clusterID)
+	configFile := strings.NewReader(defaultConfigFile)
+	plugin, err := NewVMwareAdmissionController(configFile)
+	if err != nil {
+		t.Errorf("%s: failed to create admission controller %v", testCaseName, err)
+	}
+
+	namespace := "default"
+	if pod != nil {
+		namespace = pod.Namespace
+	}
+
+	attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"),
+		namespace, name, kapi.Resource("pods").WithVersion("version"), "", op, userInfo)
+
+	err = plugin.Admit(attrs)
+	if shouldPassValidate && err != nil {
+		t.Errorf("%s: expected no errors on Validate but received %v", testCaseName, err)
+	} else if !shouldPassValidate && err == nil {
+		t.Errorf("%s: expected errors on Validate but received none", testCaseName)
+	}
+}
+
+func testResourceValidation(testCaseName string, op kadmission.Operation, resource, subresource, name, namespace string,
+	userInfo user.Info, object runtime.Object, oldObject runtime.Object, shouldPassValidate bool, t *testing.T) {
+
+	defaultConfigFile := fmt.Sprintf(defaultConfigFileFormat, testServiceAccountsGroup, pspFileName, clusterID)
+	configFile := strings.NewReader(defaultConfigFile)
+	plugin, err := NewVMwareAdmissionController(configFile)
+	if err != nil {
+		t.Errorf("%s: failed to create admission controller %v", testCaseName, err)
+	}
+
+	groupResource := kapi.Resource(resource).WithVersion("version")
+	if resource == "clusterroles" || resource == "clusterrolebindings" {
+		groupResource = rbac.Resource(resource).WithVersion("version")
+	}
+
+	attrs := kadmission.NewAttributesRecord(object, oldObject, kapi.Kind("kind").WithVersion("version"),
+		namespace, name, groupResource, subresource, op, userInfo)
+
+	err = plugin.Admit(attrs)
+	if shouldPassValidate && err != nil {
+		t.Errorf("%s: expected no errors on Validate but received %v", testCaseName, err)
+	} else if !shouldPassValidate && err == nil {
+		t.Errorf("%s: expected errors on Validate but received none", testCaseName)
+	}
+}
+
+// testPodBuilder
+type testPodBuilder struct {
+	pod kapi.Pod
+}
+
+func newTestPodBuilder() *testPodBuilder {
+	builder := new(testPodBuilder)
+	return builder.init()
+}
+
+func (p *testPodBuilder) init() *testPodBuilder {
+	p.pod = kapi.Pod{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:        "pod",
+			Namespace:   "namespace",
+			Annotations: map[string]string{},
+		},
+		Spec: kapi.PodSpec{
+			ServiceAccountName: "default",
+			SecurityContext:    &kapi.PodSecurityContext{},
+			Containers: []kapi.Container{
+				{
+					Name:            "test-container-1",
+					SecurityContext: &kapi.SecurityContext{},
+				},
+			},
+		},
+	}
+	return p
+}
+
+func (p *testPodBuilder) build() *kapi.Pod {
+	return &p.pod
+}
+
+func (p *testPodBuilder) withNamespace(namespace string) *testPodBuilder {
+	p.pod.ObjectMeta.Namespace = namespace
+	return p
+}
+
+func (p *testPodBuilder) withServiceAccount(sa string) *testPodBuilder {
+	p.pod.Spec.ServiceAccountName = sa
+	return p
+}
+
+func (p *testPodBuilder) withPrivileged(v bool) *testPodBuilder {
+	p.pod.Spec.Containers[0].SecurityContext.Privileged = &v
+	return p
+}
+
+func (p *testPodBuilder) withHostNetwork(v bool) *testPodBuilder {
+	p.pod.Spec.SecurityContext.HostNetwork = v
+	return p
+}
+
+func (p *testPodBuilder) withHostIPC(v bool) *testPodBuilder {
+	p.pod.Spec.SecurityContext.HostIPC = v
+	return p
+}
+
+func (p *testPodBuilder) withHostPID(v bool) *testPodBuilder {
+	p.pod.Spec.SecurityContext.HostPID = v
+	return p
+}
+
+func (p *testPodBuilder) withHostPort() *testPodBuilder {
+	containerPorts := []kapi.ContainerPort{
+		{
+			HostPort:      3000,
+			ContainerPort: 80,
+		},
+	}
+
+	p.pod.Spec.Containers[0].Ports = containerPorts
+	return p
+}
+
+func (p *testPodBuilder) withHostVolume(hostPath string, readOnly bool) *testPodBuilder {
+	volume := kapi.Volume{
+		Name: "host",
+		VolumeSource: kapi.VolumeSource{
+			HostPath: &kapi.HostPathVolumeSource{
+				Path: hostPath,
+			},
+		},
+	}
+	volumeMount := kapi.VolumeMount{Name: "host", MountPath: "/data", ReadOnly: readOnly}
+
+	p.pod.Spec.Volumes = append(p.pod.Spec.Volumes, volume)
+	p.pod.Spec.Containers[0].VolumeMounts = append(p.pod.Spec.Containers[0].VolumeMounts, volumeMount)
+	return p
+}
+
+func (p *testPodBuilder) withCascadeDisk() *testPodBuilder {
+	volume := kapi.Volume{
+		Name: "cascadeDisk",
+		VolumeSource: kapi.VolumeSource{
+			PersistentVolumeClaim: &kapi.PersistentVolumeClaimVolumeSource{
+				ClaimName: "00000000-0000-0000-0000-000000000001",
+				ReadOnly:  false,
+			},
+		},
+	}
+	device := kapi.VolumeDevice{Name: "cascadeDisk", DevicePath: "/cascadeDisk"}
+
+	p.pod.Spec.Volumes = append(p.pod.Spec.Volumes, volume)
+	p.pod.Spec.Containers[0].VolumeDevices = append(p.pod.Spec.Containers[0].VolumeDevices, device)
+	return p
+}
+
+func (p *testPodBuilder) withContainer() *testPodBuilder {
+	container := kapi.Container{
+		Name:            "test-container-2",
+		SecurityContext: &kapi.SecurityContext{},
+	}
+
+	p.pod.Spec.Containers = append(p.pod.Spec.Containers, container)
+	return p
+}
+
+func (p *testPodBuilder) withInitContainer() *testPodBuilder {
+	container := kapi.Container{
+		Name:            "test-init-container",
+		SecurityContext: &kapi.SecurityContext{},
+	}
+
+	p.pod.Spec.Containers = append(p.pod.Spec.Containers, container)
+	return p
+}
+
+func (p *testPodBuilder) withToleration(key, value string, operator kapi.TolerationOperator, effect kapi.TaintEffect) *testPodBuilder {
+	p.pod.Spec.Tolerations = append(p.pod.Spec.Tolerations, kapi.Toleration{
+		Key:      key,
+		Value:    value,
+		Operator: operator,
+		Effect:   effect,
+	})
+	return p
+}
+
+// testUserBuilder
+type testUserBuilder struct {
+	user *user.DefaultInfo
+}
+
+func newTestUserBuilder() *testUserBuilder {
+	builder := new(testUserBuilder)
+	return builder.init()
+}
+
+func (p *testUserBuilder) init() *testUserBuilder {
+	p.user = &user.DefaultInfo{
+		Name:   "https://lightwave.cascade-cloud.com/openidconnect/00000000-0000-0000-0000-000000000001#joe@vmware.com",
+		UID:    "10001",
+		Groups: []string{},
+	}
+	return p
+}
+
+func (p *testUserBuilder) build() *user.DefaultInfo {
+	return p.user
+}
+
+func (p *testUserBuilder) withName(name string) *testUserBuilder {
+	p.user.Name = name
+	return p
+}
+
+func (p *testUserBuilder) withGroup(group string) *testUserBuilder {
+	p.user.Groups = append(p.user.Groups, group)
+	return p
+}
+
+// testNodeBuilder
+type testNodeBuilder struct {
+	node kapi.Node
+}
+
+func newTestNodeBuilder() *testNodeBuilder {
+	builder := new(testNodeBuilder)
+	return builder.init()
+}
+
+func (n *testNodeBuilder) init() *testNodeBuilder {
+	n.node = kapi.Node{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:        "node",
+			Namespace:   "",
+			Annotations: nil,
+		},
+		Spec: kapi.NodeSpec{
+			Taints: []kapi.Taint{
+				{
+					"Dedicated", "Master", "NoSchedule", nil,
+				},
+			},
+		},
+	}
+	return n
+}
+
+func (n *testNodeBuilder) build() *kapi.Node {
+	return &n.node
+}
+
+func (n *testNodeBuilder) withTaint(taints []kapi.Taint) *testNodeBuilder {
+	n.node.Spec.Taints = taints
+	return n
+}
diff --git a/plugin/pkg/auth/authorizer/vke/BUILD b/plugin/pkg/auth/authorizer/vke/BUILD
new file mode 100644
index 0000000..4b984f1
--- /dev/null
+++ b/plugin/pkg/auth/authorizer/vke/BUILD
@@ -0,0 +1,40 @@
+package(default_visibility = ["//visibility:public"])
+
+load(
+    "@io_bazel_rules_go//go:def.bzl",
+    "go_library",
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = ["vke_authorizer_test.go"],
+    embed = [":go_default_library"],
+    deps = [
+        "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
+        "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
+    ],
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "vke_authorizer.go",
+    ],
+    importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/vke",
+    deps = [
+        "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
+    ],
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)
diff --git a/plugin/pkg/auth/authorizer/vke/OWNERS b/plugin/pkg/auth/authorizer/vke/OWNERS
new file mode 100644
index 0000000..48f5944
--- /dev/null
+++ b/plugin/pkg/auth/authorizer/vke/OWNERS
@@ -0,0 +1,3 @@
+maintainers:
+- ashokc
+- vivekgoyal
\ No newline at end of file
diff --git a/plugin/pkg/auth/authorizer/vke/vke_authorizer.go b/plugin/pkg/auth/authorizer/vke/vke_authorizer.go
new file mode 100644
index 0000000..5f3103b
--- /dev/null
+++ b/plugin/pkg/auth/authorizer/vke/vke_authorizer.go
@@ -0,0 +1,123 @@
+package vke
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/golang/glog"
+	"k8s.io/apiserver/pkg/authorization/authorizer"
+)
+
+const (
+	systemClusterPrefix = "system:clusterID:"
+	systemNodePrefix    = "system:node:"
+	systemWorkerGroup   = "system:worker"
+	masterPrefix        = "master-"
+)
+
+// VKEAuthorizer authorizes requests which comes from nodes using certificates.
+// If a request is from a node which is not a part of the cluster, reject.
+type VKEAuthorizer struct {
+	clusterID string
+}
+
+// NewAuthorizer returns a new node authorizer
+func NewAuthorizer() (authorizer.Authorizer, error) {
+	file, err := os.Open("/etc/kubernetes/cc_cloud.config")
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	scanner := bufio.NewScanner(file)
+
+	for scanner.Scan() {
+		lineContent := scanner.Text()
+		if strings.Contains(lineContent, "clusterID = ") {
+			clusterID := strings.SplitAfter(lineContent, "= ")[1]
+			return &VKEAuthorizer{clusterID: clusterID}, nil
+		}
+	}
+
+	return nil, fmt.Errorf("Cluster ID cannot be parsed from config file successfully")
+}
+
+// Authorize authorizes requests based on the VKE authorizer.
+func (v *VKEAuthorizer) Authorize(attrs authorizer.Attributes) (authorizer.Decision, string, error) {
+	// If the request comes from a non cluster node, then deny.
+	if isNonClusterNode(attrs, v.clusterID) {
+		glog.V(2).Infof("VKE authorizer: DENY the request because it is from a different cluster")
+		return authorizer.DecisionDeny, "", nil
+	}
+
+	// If a worker node name does not have the node prefix, then deny. This is needed for the request to go through node
+	// authorizer and node restriction admission controller. If it is not set, then a user can bypass node authorizer
+	// and the node restriction admission controller and modify the master node.
+	if isWorkerWithoutNodeNameRestriction(attrs) {
+		glog.V(2).Infof("VKE authorizer: DENY the request because the node name restriction is not met")
+		return authorizer.DecisionDeny, "", nil
+	}
+
+	// If it is a proxy request to the master node to exec, run or attach to a container, then deny.
+	if isProxyRequestToMasterNode(attrs) {
+		glog.V(2).Infof("VKE authorizer: DENY the request because it tried to execute commands on master pods")
+		return authorizer.DecisionDeny, "", nil
+	}
+
+	return authorizer.DecisionNoOpinion, "", nil
+}
+
+// isNonClusterNode verifies that the request is not from a node which does not belong to this cluster. This is needed
+// to prevent cross cluster attacks where a user can use the kubelet certificate of one cluster to access the resources
+// in another cluster. The reason we have this check is because, when a certificate is presented for authentication,
+// Kubernetes just verifies that the certificate is signed by the CA that the cluster trusts. Since, in our case, the CA
+// is same for all clusters, authentication with certificate is not enough. So, we make sure that the request is not
+// from another cluster. Lightwave will make sure that any certificates generated on a worker will always have the
+// "system:clusterID:<id>" group. This way, we can just check the cluster ID in the group and reject if it is not the
+// same as this cluster's ID.
+func isNonClusterNode(attrs authorizer.Attributes, clusterID string) bool {
+	groups := attrs.GetUser().GetGroups()
+	for _, group := range groups {
+		if strings.HasPrefix(group, systemClusterPrefix) {
+			groupParts := strings.Split(group, ":")
+			if clusterID != "" && groupParts[len(groupParts)-1] != clusterID {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// isWorkerWithoutNodeNameRestriction verifies that the certificate presented by the worker nodes also have the
+// appropriate name "system:node:<hostname>". We need to validate this because it is possible for a user to generate
+// certificates with CN other than "system:node:<hostname>". This is because lightwave does group validation and that
+// the CNs can be one of system:node:<hostname>, kubernetes-master and etcd. When that happens, that certificate will be
+// able to bypass the NodeRestriction admission controller which will allow access to modify master node and create pods
+// on master. To prevent that from happening, we need to verify that if a request comes from a worker node, then it has
+// to have the expected name so that NodeRestriction admission controller is enforced.
+func isWorkerWithoutNodeNameRestriction(attrs authorizer.Attributes) bool {
+	groups := attrs.GetUser().GetGroups()
+	name := attrs.GetUser().GetName()
+	for _, group := range groups {
+		if group == systemWorkerGroup && !strings.HasPrefix(name, systemNodePrefix) {
+			return true
+		}
+	}
+	return false
+}
+
+// isProxyRequestToMasterNode checks if the request is made to exec, run or attach to a container on the master node. We
+// need this check because we do not want to allow a user to exec into a privileged pod on the master node.
+func isProxyRequestToMasterNode(attrs authorizer.Attributes) bool {
+	if attrs.GetResource() == "nodes" && strings.HasPrefix(attrs.GetName(), masterPrefix) {
+		if strings.Contains(attrs.GetPath(), "proxy/run") ||
+			strings.Contains(attrs.GetPath(), "proxy/exec") ||
+			strings.Contains(attrs.GetPath(), "proxy/attach") {
+			return true
+		}
+	}
+	return false
+}
diff --git a/plugin/pkg/auth/authorizer/vke/vke_authorizer_test.go b/plugin/pkg/auth/authorizer/vke/vke_authorizer_test.go
new file mode 100644
index 0000000..6aba9ec
--- /dev/null
+++ b/plugin/pkg/auth/authorizer/vke/vke_authorizer_test.go
@@ -0,0 +1,230 @@
+package vke
+
+import (
+	"testing"
+
+	"k8s.io/apiserver/pkg/authentication/user"
+	"k8s.io/apiserver/pkg/authorization/authorizer"
+)
+
+const (
+	masterProxyAttachPath = "/api/v1/nodes/master-0/proxy/attach/vke-system/pod-name/container-name"
+	masterProxyExecPath   = "/api/v1/nodes/master-0/proxy/exec/vke-system/pod-name/container-name"
+	masterProxyRunPath    = "/api/v1/nodes/master-0/proxy/run/vke-system/pod-name/container-name"
+	workerProxyAttachPath = "/api/v1/nodes/worker-0/proxy/attach/vke-system/pod-name/container-name"
+	workerProxyExecPath   = "/api/v1/nodes/worker-0/proxy/exec/vke-system/pod-name/container-name"
+	workerProxyRunPath    = "/api/v1/nodes/worker-0/proxy/run/vke-system/pod-name/container-name"
+)
+
+func TestAuthorizer(t *testing.T) {
+	authz := &VKEAuthorizer{clusterID: "cluster-id"}
+
+	clusterNode := &user.DefaultInfo{Name: "system:node:worker-0", Groups: []string{"system:nodes", "system:clusterID:cluster-id", "system:worker"}}
+	crossClusterNode := &user.DefaultInfo{Name: "system:node:worker-0", Groups: []string{"system:nodes", "system:clusterID:invalid", "system:worker"}}
+	invalidWorker := &user.DefaultInfo{Name: "worker-0", Groups: []string{"system:nodes", "system:clusterID:cluster-id", "system:worker"}}
+
+	tests := []struct {
+		name   string
+		attrs  authorizer.AttributesRecord
+		expect authorizer.Decision
+	}{
+		// Do not deny requests from a normal cluster node user.
+		{
+			name:   "allowed: cluster node get pod",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "get", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node list pod",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "list", Resource: "pods", Namespace: "ns"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node create pod",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node update pod",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "update", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node delete pod",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "delete", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node get node",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "get", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node list nodes",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "list", Resource: "nodes"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node create nodes",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node update nodes",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "update", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: cluster node delete nodes",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "delete", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionNoOpinion,
+		},
+
+		// Deny requests from another cluster node.
+		{
+			name:   "denied: cross cluster node get pod",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "get", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node list pod",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "list", Resource: "pods", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node create pod",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "create", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node update pod",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "update", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node delete pod",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "delete", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node get node",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "get", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node list nodes",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "list", Resource: "nodes"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node create nodes",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node update nodes",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "update", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: cross cluster node delete nodes",
+			attrs:  authorizer.AttributesRecord{User: crossClusterNode, ResourceRequest: true, Verb: "delete", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+
+		// Deny requests from invalid node.
+		{
+			name:   "denied: invalid worker get pod",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "get", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker list pod",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "list", Resource: "pods", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker create pod",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "create", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker update pod",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "update", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker delete pod",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "delete", Resource: "pods", Name: "pod", Namespace: "ns"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker get node",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "get", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker list nodes",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "list", Resource: "nodes"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker create nodes",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker update nodes",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "update", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: invalid worker delete nodes",
+			attrs:  authorizer.AttributesRecord{User: invalidWorker, ResourceRequest: true, Verb: "delete", Resource: "nodes", Name: "node"},
+			expect: authorizer.DecisionDeny,
+		},
+
+		// Deny exec, run and attach operation on master nodes using proxy resource.
+		{
+			name:   "denied: attach using proxy resource on master",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "master-0", Path: masterProxyAttachPath},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: exec using proxy resource on master",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "master-0", Path: masterProxyExecPath},
+			expect: authorizer.DecisionDeny,
+		},
+		{
+			name:   "denied: run using proxy resource on master",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "master-0", Path: masterProxyRunPath},
+			expect: authorizer.DecisionDeny,
+		},
+
+		// Do not deny exec, run and attach operation on worker nodes using proxy resource.
+		{
+			name:   "allowed: attach using proxy resource on worker",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "worker-0", Path: workerProxyAttachPath},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: exec using proxy resource on worker",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "worker-0", Path: workerProxyExecPath},
+			expect: authorizer.DecisionNoOpinion,
+		},
+		{
+			name:   "allowed: run using proxy resource on worker",
+			attrs:  authorizer.AttributesRecord{User: clusterNode, ResourceRequest: true, Verb: "create", Resource: "nodes", Name: "worker-0", Path: workerProxyRunPath},
+			expect: authorizer.DecisionNoOpinion,
+		},
+	}
+
+	for _, tc := range tests {
+		t.Run(tc.name, func(t *testing.T) {
+			decision, _, _ := authz.Authorize(tc.attrs)
+			if decision != tc.expect {
+				t.Errorf("expected %v, got %v", tc.expect, decision)
+			}
+		})
+	}
+}
diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go
index 5aeae2c..a7d5b12 100644
--- a/staging/src/k8s.io/api/core/v1/generated.pb.go
+++ b/staging/src/k8s.io/api/core/v1/generated.pb.go
@@ -35,6 +35,7 @@ limitations under the License.
 		Binding
 		CSIPersistentVolumeSource
 		Capabilities
+		CascadeDiskVolumeSource
 		CephFSPersistentVolumeSource
 		CephFSVolumeSource
 		CinderVolumeSource
@@ -260,9 +261,11 @@ func (m *AvoidPods) Reset()                    { *m = AvoidPods{} }
 func (*AvoidPods) ProtoMessage()               {}
 func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} }
 
-func (m *AzureDiskVolumeSource) Reset()                    { *m = AzureDiskVolumeSource{} }
-func (*AzureDiskVolumeSource) ProtoMessage()               {}
-func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
+func (m *CascadeDiskVolumeSource) Reset()      { *m = CascadeDiskVolumeSource{} }
+func (*CascadeDiskVolumeSource) ProtoMessage() {}
+func (*CascadeDiskVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptorGenerated, []int{4}
+}
 
 func (m *AzureFilePersistentVolumeSource) Reset()      { *m = AzureFilePersistentVolumeSource{} }
 func (*AzureFilePersistentVolumeSource) ProtoMessage() {}
@@ -1040,6 +1043,11 @@ func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
 	return fileDescriptorGenerated, []int{185}
 }
 
+func (m *AzureDiskVolumeSource) Reset()                    { *m = AzureDiskVolumeSource{} }
+func (*AzureDiskVolumeSource) ProtoMessage()               {}
+func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{186} }
+
+
 func init() {
 	proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource")
 	proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity")
@@ -1051,6 +1059,7 @@ func init() {
 	proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding")
 	proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource")
 	proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities")
+	proto.RegisterType((*CascadeDiskVolumeSource)(nil), "k8s.io.api.core.v1.CascadeDiskVolumeSource")
 	proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource")
 	proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource")
 	proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource")
@@ -1613,6 +1622,32 @@ func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) {
 	return i, nil
 }
 
+func (m *CascadeDiskVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CascadeDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskID)))
+	i += copy(dAtA[i:], m.DiskID)
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i += copy(dAtA[i:], m.FSType)
+	return i, nil
+}
+
 func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -6283,13 +6318,13 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
 		}
 		i += n120
 	}
-	if m.AzureDisk != nil {
+	if m.CascadeDisk != nil {
 		dAtA[i] = 0x82
 		i++
 		dAtA[i] = 0x1
 		i++
-		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
-		n121, err := m.AzureDisk.MarshalTo(dAtA[i:])
+		i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size()))
+		n121, err := m.CascadeDisk.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
@@ -6367,6 +6402,18 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
 		}
 		i += n127
 	}
+	if m.AzureDisk != nil {
+		dAtA[i] = 0xba
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
+		n128, err := m.AzureDisk.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n128
+	}
 	return i, nil
 }
 
@@ -10316,13 +10363,13 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
 		}
 		i += n223
 	}
-	if m.AzureDisk != nil {
+	if m.CascadeDisk != nil {
 		dAtA[i] = 0xb2
 		i++
 		dAtA[i] = 0x1
 		i++
-		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
-		n224, err := m.AzureDisk.MarshalTo(dAtA[i:])
+		i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size()))
+		n224, err := m.CascadeDisk.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
@@ -10388,6 +10435,18 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
 		}
 		i += n229
 	}
+	if m.AzureDisk != nil {
+		dAtA[i] = 0xe2
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
+		n230, err := m.AzureDisk.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n230
+	}
 	return i, nil
 }
 
@@ -10623,6 +10682,16 @@ func (m *Capabilities) Size() (n int) {
 	return n
 }
 
+func (m *CascadeDiskVolumeSource) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.DiskID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
 func (m *CephFSPersistentVolumeSource) Size() (n int) {
 	var l int
 	_ = l
@@ -12331,8 +12400,8 @@ func (m *PersistentVolumeSource) Size() (n int) {
 		l = m.Quobyte.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
-	if m.AzureDisk != nil {
-		l = m.AzureDisk.Size()
+	if m.CascadeDisk != nil {
+		l = m.CascadeDisk.Size()
 		n += 2 + l + sovGenerated(uint64(l))
 	}
 	if m.PhotonPersistentDisk != nil {
@@ -12359,6 +12428,10 @@ func (m *PersistentVolumeSource) Size() (n int) {
 		l = m.CSI.Size()
 		n += 2 + l + sovGenerated(uint64(l))
 	}
+	if m.AzureDisk != nil {
+		l = m.AzureDisk.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -13788,8 +13861,8 @@ func (m *VolumeSource) Size() (n int) {
 		l = m.Quobyte.Size()
 		n += 2 + l + sovGenerated(uint64(l))
 	}
-	if m.AzureDisk != nil {
-		l = m.AzureDisk.Size()
+	if m.CascadeDisk != nil {
+		l = m.CascadeDisk.Size()
 		n += 2 + l + sovGenerated(uint64(l))
 	}
 	if m.PhotonPersistentDisk != nil {
@@ -13812,6 +13885,10 @@ func (m *VolumeSource) Size() (n int) {
 		l = m.StorageOS.Size()
 		n += 2 + l + sovGenerated(uint64(l))
 	}
+	if m.AzureDisk != nil {
+		l = m.AzureDisk.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -13971,6 +14048,17 @@ func (this *Capabilities) String() string {
 	}, "")
 	return s
 }
+func (this *CascadeDiskVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CascadeDiskVolumeSource{`,
+		`DiskID:` + fmt.Sprintf("%v", this.DiskID) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *CephFSPersistentVolumeSource) String() string {
 	if this == nil {
 		return "nil"
@@ -15335,13 +15423,14 @@ func (this *PersistentVolumeSource) String() string {
 		`AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`,
 		`VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
 		`Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
-		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
+		`CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
 		`PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
 		`PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
 		`ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`,
 		`Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`,
 		`StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`,
 		`CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`,
+		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -16468,12 +16557,13 @@ func (this *VolumeSource) String() string {
 		`ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`,
 		`VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
 		`Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
-		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
+		`CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
 		`PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
 		`PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
 		`ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`,
 		`Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`,
 		`StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`,
+		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -34322,7 +34412,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error {
 			iNdEx = postIndex
 		case 16:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -34346,10 +34436,10 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.AzureDisk == nil {
-				m.AzureDisk = &AzureDiskVolumeSource{}
+			if m.CascadeDisk == nil {
+				m.CascadeDisk = &CascadeDiskVolumeSource{}
 			}
-			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -34551,6 +34641,39 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 23:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AzureDisk == nil {
+				m.AzureDisk = &AzureDiskVolumeSource{}
+			}
+			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -35089,6 +35212,114 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *CascadeDiskVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CascadeDiskVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CascadeDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DiskID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DiskID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -48522,7 +48753,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error {
 			iNdEx = postIndex
 		case 22:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -48546,10 +48777,10 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.AzureDisk == nil {
-				m.AzureDisk = &AzureDiskVolumeSource{}
+			if m.CascadeDisk == nil {
+				m.CascadeDisk = &CascadeDiskVolumeSource{}
 			}
-			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -48718,6 +48949,39 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 28:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AzureDisk == nil {
+				m.AzureDisk = &AzureDiskVolumeSource{}
+			}
+			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go
index 728cbd5..87ba6a4 100644
--- a/staging/src/k8s.io/api/core/v1/types.go
+++ b/staging/src/k8s.io/api/core/v1/types.go
@@ -333,9 +333,8 @@ type VolumeSource struct {
 	// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
 	// +optional
 	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
-	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
-	// +optional
-	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
+	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+	CascadeDisk *CascadeDiskVolumeSource `json:"vkeDisk,omitempty" protobuf:"bytes,22,opt,name=cascadeDisk"`
 	// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
 	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
 	// Items for all in one resources secrets, configmaps, and downward API
@@ -349,6 +348,9 @@ type VolumeSource struct {
 	// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
 	// +optional
 	StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
+	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+	// +optional
+	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,28,opt,name=azureDisk"`
 }
 
 // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@@ -428,9 +430,8 @@ type PersistentVolumeSource struct {
 	// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
 	// +optional
 	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
-	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
-	// +optional
-	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
+	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
+	CascadeDisk *CascadeDiskVolumeSource `json:"vkeDisk,omitempty" protobuf:"bytes,16,opt,name=cascadeDisk"`
 	// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
 	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
 	// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
@@ -449,6 +450,9 @@ type PersistentVolumeSource struct {
 	// CSI represents storage that handled by an external CSI driver
 	// +optional
 	CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
+	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+	// +optional
+	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,23,opt,name=azureDisk"`
 }
 
 const (
@@ -1578,6 +1582,16 @@ type StorageOSPersistentVolumeSource struct {
 	SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
 }
 
+// Represents a Photon Controller persistent disk resource.
+type CascadeDiskVolumeSource struct {
+	// ID that identifies Cascade persistent disk
+	DiskID string `json:"diskID" protobuf:"bytes,1,opt,name=diskID"`
+	// Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+}
+
 // Adapts a ConfigMap into a volume.
 //
 // The contents of the target ConfigMap's Data field will be presented in a
-- 
2.7.4