Browse code

Adding service patch.

Change-Id: I70b661ed7a09ed976edbe76b8d9d93f92f7ddcbc
Reviewed-on: http://photon-jenkins.eng.vmware.com:8082/5164
Reviewed-by: Ashok Chandrasekar <ashokc@vmware.com>
Reviewed-by: Sharath George
Tested-by: Sharath George

sharathjg authored on 2018/05/18 02:40:06
Showing 2 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,4379 @@
0
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/apps_v1alpha1.json kubernetes/api/swagger-spec/apps_v1alpha1.json
1
+--- kubernetes-orig/api/swagger-spec/apps_v1alpha1.json	2018-04-26 12:17:57.000000000 +0000
2
+@@ -1459,6 +1459,10 @@
3
+      "photonPersistentDisk": {
4
+       "$ref": "v1.PhotonPersistentDiskVolumeSource",
5
+       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
6
++     },
7
++     "cascadeDisk": {
8
++      "$ref": "v1.CascadeDiskVolumeSource",
9
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
10
+      }
11
+     }
12
+    },
13
+@@ -2105,6 +2109,23 @@
14
+      },
15
+      "fsType": {
16
+       "type": "string",
17
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
18
++     }
19
++    }
20
++   },
21
++   "v1.CascadeDiskVolumeSource": {
22
++    "id": "v1.CascadeDiskVolumeSource",
23
++    "description": "Represents a Cascade persistent disk resource.",
24
++    "required": [
25
++     "diskID"
26
++    ],
27
++    "properties": {
28
++     "diskID": {
29
++      "type": "string",
30
++      "description": "ID that identifies Cascade persistent disk"
31
++     },
32
++     "fsType": {
33
++      "type": "string",
34
+       "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
35
+      }
36
+     }
37
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/apps_v1beta1.json kubernetes/api/swagger-spec/apps_v1beta1.json
38
+--- kubernetes-orig/api/swagger-spec/apps_v1beta1.json	2018-04-26 12:17:57.000000000 +0000
39
+@@ -4483,6 +4483,10 @@
40
+       "$ref": "v1.PhotonPersistentDiskVolumeSource",
41
+       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
42
+      },
43
++     "cascadeDisk": {
44
++      "$ref": "v1.CascadeDiskVolumeSource",
45
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
46
++     },
47
+      "projected": {
48
+       "$ref": "v1.ProjectedVolumeSource",
49
+       "description": "Items for all in one resources secrets, configmaps, and downward API"
50
+@@ -5206,6 +5210,23 @@
51
+      },
52
+      "fsType": {
53
+       "type": "string",
54
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
55
++     }
56
++    }
57
++   },
58
++   "v1.CascadeDiskVolumeSource": {
59
++    "id": "v1.CascadeDiskVolumeSource",
60
++    "description": "Represents a Cascade persistent disk resource.",
61
++    "required": [
62
++     "diskID"
63
++    ],
64
++    "properties": {
65
++     "diskID": {
66
++      "type": "string",
67
++      "description": "ID that identifies Cascade persistent disk"
68
++     },
69
++     "fsType": {
70
++      "type": "string",
71
+       "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
72
+      }
73
+     }
74
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/apps_v1beta2.json kubernetes/api/swagger-spec/apps_v1beta2.json
75
+--- kubernetes-orig/api/swagger-spec/apps_v1beta2.json	2018-04-26 12:17:57.000000000 +0000
76
+@@ -6849,6 +6849,10 @@
77
+       "$ref": "v1.PhotonPersistentDiskVolumeSource",
78
+       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
79
+      },
80
++     "cascadeDisk": {
81
++      "$ref": "v1.CascadeDiskVolumeSource",
82
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
83
++     },
84
+      "projected": {
85
+       "$ref": "v1.ProjectedVolumeSource",
86
+       "description": "Items for all in one resources secrets, configmaps, and downward API"
87
+@@ -7572,6 +7576,23 @@
88
+      },
89
+      "fsType": {
90
+       "type": "string",
91
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
92
++     }
93
++    }
94
++   },
95
++   "v1.CascadeDiskVolumeSource": {
96
++    "id": "v1.CascadeDiskVolumeSource",
97
++    "description": "Represents a Cascade persistent disk resource.",
98
++    "required": [
99
++     "diskID"
100
++    ],
101
++    "properties": {
102
++     "diskID": {
103
++      "type": "string",
104
++      "description": "ID that identifies Cascade persistent disk"
105
++     },
106
++     "fsType": {
107
++      "type": "string",
108
+       "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
109
+      }
110
+     }
111
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/batch_v1beta1.json kubernetes/api/swagger-spec/batch_v1beta1.json
112
+--- kubernetes-orig/api/swagger-spec/batch_v1beta1.json	2018-04-26 12:17:57.000000000 +0000
113
+@@ -1878,6 +1878,10 @@
114
+       "$ref": "v1.PhotonPersistentDiskVolumeSource",
115
+       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
116
+      },
117
++     "cascadeDisk": {
118
++      "$ref": "v1.CascadeDiskVolumeSource",
119
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
120
++     },
121
+      "projected": {
122
+       "$ref": "v1.ProjectedVolumeSource",
123
+       "description": "Items for all in one resources secrets, configmaps, and downward API"
124
+@@ -2601,6 +2605,23 @@
125
+      },
126
+      "fsType": {
127
+       "type": "string",
128
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
129
++     }
130
++    }
131
++   },
132
++   "v1.CascadeDiskVolumeSource": {
133
++    "id": "v1.CascadeDiskVolumeSource",
134
++    "description": "Represents a Cascade persistent disk resource.",
135
++    "required": [
136
++     "diskID"
137
++    ],
138
++    "properties": {
139
++     "diskID": {
140
++      "type": "string",
141
++      "description": "ID that identifies Cascade persistent disk"
142
++     },
143
++     "fsType": {
144
++      "type": "string",
145
+       "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
146
+      }
147
+     }
148
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/batch_v1.json kubernetes/api/swagger-spec/batch_v1.json
149
+--- kubernetes-orig/api/swagger-spec/batch_v1.json	2018-04-26 12:17:57.000000000 +0000
150
+@@ -1823,6 +1823,10 @@
151
+       "$ref": "v1.PhotonPersistentDiskVolumeSource",
152
+       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
153
+      },
154
++     "cascadeDisk": {
155
++      "$ref": "v1.CascadeDiskVolumeSource",
156
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
157
++     },
158
+      "projected": {
159
+       "$ref": "v1.ProjectedVolumeSource",
160
+       "description": "Items for all in one resources secrets, configmaps, and downward API"
161
+@@ -2546,6 +2550,23 @@
162
+      },
163
+      "fsType": {
164
+       "type": "string",
165
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
166
++     }
167
++    }
168
++   },
169
++   "v1.CascadeDiskVolumeSource": {
170
++    "id": "v1.CascadeDiskVolumeSource",
171
++    "description": "Represents a Cascade persistent disk resource.",
172
++    "required": [
173
++     "diskID"
174
++    ],
175
++    "properties": {
176
++     "diskID": {
177
++      "type": "string",
178
++      "description": "ID that identifies Cascade persistent disk"
179
++     },
180
++     "fsType": {
181
++      "type": "string",
182
+       "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
183
+      }
184
+     }
185
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/batch_v2alpha1.json kubernetes/api/swagger-spec/batch_v2alpha1.json
186
+--- kubernetes-orig/api/swagger-spec/batch_v2alpha1.json	2018-04-26 12:17:57.000000000 +0000
187
+@@ -1893,6 +1893,10 @@
188
+      "storageos": {
189
+       "$ref": "v1.StorageOSVolumeSource",
190
+       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
191
++     },
192
++     "cascadeDisk": {
193
++      "$ref": "v1.CascadeDiskVolumeSource",
194
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
195
+      }
196
+     }
197
+    },
198
+@@ -2797,6 +2801,23 @@
199
+      }
200
+     }
201
+    },
202
++   "v1.CascadeDiskVolumeSource": {
203
++    "id": "v1.CascadeDiskVolumeSource",
204
++    "description": "Represents a Cascade persistent disk resource.",
205
++    "required": [
206
++     "diskID"
207
++    ],
208
++    "properties": {
209
++     "diskID": {
210
++      "type": "string",
211
++      "description": "ID that identifies Cascade persistent disk"
212
++     },
213
++     "fsType": {
214
++      "type": "string",
215
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
216
++     }
217
++    }
218
++   },
219
+    "v1.Container": {
220
+     "id": "v1.Container",
221
+     "description": "A single application container that you want to run within a pod.",
222
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/extensions_v1beta1.json kubernetes/api/swagger-spec/extensions_v1beta1.json
223
+--- kubernetes-orig/api/swagger-spec/extensions_v1beta1.json	2018-04-26 12:17:57.000000000 +0000
224
+@@ -7506,6 +7506,10 @@
225
+      "storageos": {
226
+       "$ref": "v1.StorageOSVolumeSource",
227
+       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
228
++     },
229
++     "cascadeDisk": {
230
++      "$ref": "v1.CascadeDiskVolumeSource",
231
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
232
+      }
233
+     }
234
+    },
235
+@@ -8214,6 +8218,23 @@
236
+      },
237
+      "fsType": {
238
+       "type": "string",
239
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
240
++     }
241
++    }
242
++   },
243
++   "v1.CascadeDiskVolumeSource": {
244
++    "id": "v1.CascadeDiskVolumeSource",
245
++    "description": "Represents a Cascade persistent disk resource.",
246
++    "required": [
247
++     "diskID"
248
++    ],
249
++    "properties": {
250
++     "diskID": {
251
++      "type": "string",
252
++      "description": "ID that identifies Cascade persistent disk"
253
++     },
254
++     "fsType": {
255
++      "type": "string",
256
+       "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
257
+      }
258
+     }
259
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/settings.k8s.io_v1alpha1.json kubernetes/api/swagger-spec/settings.k8s.io_v1alpha1.json
260
+--- kubernetes-orig/api/swagger-spec/settings.k8s.io_v1alpha1.json	2018-04-26 12:17:57.000000000 +0000
261
+@@ -1676,6 +1676,10 @@
262
+      "storageos": {
263
+       "$ref": "v1.StorageOSVolumeSource",
264
+       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
265
++     },
266
++     "cascadeDisk": {
267
++      "$ref": "v1.CascadeDiskVolumeSource",
268
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
269
+      }
270
+     }
271
+    },
272
+@@ -2346,6 +2350,23 @@
273
+      },
274
+      "fsType": {
275
+       "type": "string",
276
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
277
++     }
278
++    }
279
++   },
280
++   "v1.CascadeDiskVolumeSource": {
281
++    "id": "v1.CascadeDiskVolumeSource",
282
++    "description": "Represents a Cascade persistent disk resource.",
283
++    "required": [
284
++     "diskID"
285
++    ],
286
++    "properties": {
287
++     "diskID": {
288
++      "type": "string",
289
++      "description": "ID that identifies Cascade persistent disk"
290
++     },
291
++     "fsType": {
292
++      "type": "string",
293
+       "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
294
+      }
295
+     }
296
+diff -uNr --no-dereference kubernetes-orig/api/swagger-spec/v1.json kubernetes/api/swagger-spec/v1.json
297
+--- kubernetes-orig/api/swagger-spec/v1.json	2018-04-26 12:17:57.000000000 +0000
298
+@@ -19310,6 +19310,10 @@
299
+       "$ref": "v1.PhotonPersistentDiskVolumeSource",
300
+       "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
301
+      },
302
++     "cascadeDisk": {
303
++      "$ref": "v1.CascadeDiskVolumeSource",
304
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
305
++     },
306
+      "portworxVolume": {
307
+       "$ref": "v1.PortworxVolumeSource",
308
+       "description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine"
309
+@@ -19875,6 +19879,23 @@
310
+      }
311
+     }
312
+    },
313
++   "v1.CascadeDiskVolumeSource": {
314
++    "id": "v1.CascadeDiskVolumeSource",
315
++    "description": "Represents a Cascade persistent disk resource.",
316
++    "required": [
317
++     "diskID"
318
++    ],
319
++    "properties": {
320
++     "diskID": {
321
++      "type": "string",
322
++      "description": "ID that identifies Cascade persistent disk"
323
++     },
324
++     "fsType": {
325
++      "type": "string",
326
++      "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
327
++     }
328
++    }
329
++   },
330
+    "v1.PortworxVolumeSource": {
331
+     "id": "v1.PortworxVolumeSource",
332
+     "description": "PortworxVolumeSource represents a Portworx volume resource.",
333
+@@ -20423,6 +20444,10 @@
334
+      "storageos": {
335
+       "$ref": "v1.StorageOSVolumeSource",
336
+       "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
337
++     },
338
++     "cascadeDisk": {
339
++      "$ref": "v1.CascadeDiskVolumeSource",
340
++      "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine"
341
+      }
342
+     }
343
+    },
344
+diff -uNr --no-dereference kubernetes-orig/cmd/kube-controller-manager/app/BUILD kubernetes/cmd/kube-controller-manager/app/BUILD
345
+--- kubernetes-orig/cmd/kube-controller-manager/app/BUILD	2018-04-26 12:17:57.000000000 +0000
346
+@@ -88,6 +88,7 @@
347
+         "//pkg/volume/aws_ebs:go_default_library",
348
+         "//pkg/volume/azure_dd:go_default_library",
349
+         "//pkg/volume/azure_file:go_default_library",
350
++        "//pkg/volume/cascade_disk:go_default_library",
351
+         "//pkg/volume/cinder:go_default_library",
352
+         "//pkg/volume/csi:go_default_library",
353
+         "//pkg/volume/fc:go_default_library",
354
+diff -uNr --no-dereference kubernetes-orig/cmd/kube-controller-manager/app/plugins.go kubernetes/cmd/kube-controller-manager/app/plugins.go
355
+--- kubernetes-orig/cmd/kube-controller-manager/app/plugins.go	2018-04-26 12:17:57.000000000 +0000
356
+@@ -34,6 +34,7 @@
357
+ 	"k8s.io/kubernetes/pkg/volume/aws_ebs"
358
+ 	"k8s.io/kubernetes/pkg/volume/azure_dd"
359
+ 	"k8s.io/kubernetes/pkg/volume/azure_file"
360
++	"k8s.io/kubernetes/pkg/volume/cascade_disk"
361
+ 	"k8s.io/kubernetes/pkg/volume/cinder"
362
+ 	"k8s.io/kubernetes/pkg/volume/csi"
363
+ 	"k8s.io/kubernetes/pkg/volume/fc"
364
+@@ -77,6 +78,7 @@
365
+ 	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
366
+ 	allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
367
+ 	allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
368
++	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
369
+ 	if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
370
+ 		allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
371
+ 	}
372
+@@ -107,6 +109,7 @@
373
+ 	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
374
+ 	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
375
+ 	allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
376
++	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
377
+ 	return allPlugins
378
+ }
379
+ 
380
+@@ -163,6 +166,7 @@
381
+ 	allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
382
+ 	allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
383
+ 	allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...)
384
++	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
385
+ 
386
+ 	return allPlugins
387
+ }
388
+diff -uNr --no-dereference kubernetes-orig/cmd/kubelet/app/BUILD kubernetes/cmd/kubelet/app/BUILD
389
+--- kubernetes-orig/cmd/kubelet/app/BUILD	2018-04-26 12:17:57.000000000 +0000
390
+@@ -117,6 +117,7 @@
391
+         "//pkg/volume/aws_ebs:go_default_library",
392
+         "//pkg/volume/azure_dd:go_default_library",
393
+         "//pkg/volume/azure_file:go_default_library",
394
++        "//pkg/volume/cascade_disk:go_default_library",
395
+         "//pkg/volume/cephfs:go_default_library",
396
+         "//pkg/volume/cinder:go_default_library",
397
+         "//pkg/volume/configmap:go_default_library",
398
+diff -uNr --no-dereference kubernetes-orig/cmd/kubelet/app/plugins.go kubernetes/cmd/kubelet/app/plugins.go
399
+--- kubernetes-orig/cmd/kubelet/app/plugins.go	2018-04-26 12:17:57.000000000 +0000
400
+@@ -32,6 +32,7 @@
401
+ 	"k8s.io/kubernetes/pkg/volume/aws_ebs"
402
+ 	"k8s.io/kubernetes/pkg/volume/azure_dd"
403
+ 	"k8s.io/kubernetes/pkg/volume/azure_file"
404
++	"k8s.io/kubernetes/pkg/volume/cascade_disk"
405
+ 	"k8s.io/kubernetes/pkg/volume/cephfs"
406
+ 	"k8s.io/kubernetes/pkg/volume/cinder"
407
+ 	"k8s.io/kubernetes/pkg/volume/configmap"
408
+@@ -100,6 +101,7 @@
409
+ 	allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
410
+ 	allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
411
+ 	allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
412
++	allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...)
413
+ 	if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
414
+ 		allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
415
+ 	}
416
+diff -uNr --no-dereference kubernetes-orig/pkg/apis/core/types.go kubernetes/pkg/apis/core/types.go
417
+--- kubernetes-orig/pkg/apis/core/types.go	2018-04-26 12:17:57.000000000 +0000
418
+@@ -316,6 +316,8 @@
419
+ 	// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
420
+ 	// +optional
421
+ 	StorageOS *StorageOSVolumeSource
422
++	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
423
++	CascadeDisk *CascadeDiskVolumeSource
424
+ }
425
+ 
426
+ // Similar to VolumeSource but meant for the administrator who creates PVs.
427
+@@ -394,6 +396,8 @@
428
+ 	// CSI (Container Storage Interface) represents storage that handled by an external CSI driver (Beta feature).
429
+ 	// +optional
430
+ 	CSI *CSIPersistentVolumeSource
431
++	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
432
++	CascadeDisk *CascadeDiskVolumeSource
433
+ }
434
+ 
435
+ type PersistentVolumeClaimVolumeSource struct {
436
+@@ -1510,6 +1514,16 @@
437
+ 	SecretRef *ObjectReference
438
+ }
439
+ 
440
++// Represents a Cascade persistent disk resource.
441
++type CascadeDiskVolumeSource struct {
442
++	// ID that identifies Cascade persistent disk
443
++	DiskID string
444
++	// Filesystem type to mount.
445
++	// Must be a filesystem type supported by the host operating system.
446
++	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
447
++	FSType string
448
++}
449
++
450
+ // Adapts a ConfigMap into a volume.
451
+ //
452
+ // The contents of the target ConfigMap's Data field will be presented in a
453
+diff -uNr --no-dereference kubernetes-orig/pkg/apis/core/validation/validation.go kubernetes/pkg/apis/core/validation/validation.go
454
+--- kubernetes-orig/pkg/apis/core/validation/validation.go	2018-04-26 12:17:57.000000000 +0000
455
+@@ -664,6 +664,14 @@
456
+ 			allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...)
457
+ 		}
458
+ 	}
459
++	if source.CascadeDisk != nil {
460
++		if numVolumes > 0 {
461
++			allErrs = append(allErrs, field.Forbidden(fldPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
462
++		} else {
463
++			numVolumes++
464
++			allErrs = append(allErrs, validateCascadeDiskVolumeSource(source.CascadeDisk, fldPath.Child("cascadeDisk"))...)
465
++		}
466
++	}
467
+ 
468
+ 	if numVolumes == 0 {
469
+ 		allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
470
+@@ -1494,6 +1502,14 @@
471
+ 	return allErrs
472
+ }
473
+ 
474
++func validateCascadeDiskVolumeSource(cd *core.CascadeDiskVolumeSource, fldPath *field.Path) field.ErrorList {
475
++	allErrs := field.ErrorList{}
476
++	if len(cd.DiskID) == 0 {
477
++		allErrs = append(allErrs, field.Required(fldPath.Child("diskID"), ""))
478
++	}
479
++	return allErrs
480
++}
481
++
482
+ // ValidatePersistentVolumeName checks that a name is appropriate for a
483
+ // PersistentVolumeName object.
484
+ var ValidatePersistentVolumeName = NameIsDNSSubdomain
485
+@@ -1737,6 +1753,15 @@
486
+ 		}
487
+ 	}
488
+ 
489
++	if pv.Spec.CascadeDisk != nil {
490
++		if numVolumes > 0 {
491
++			allErrs = append(allErrs, field.Forbidden(specPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
492
++		} else {
493
++			numVolumes++
494
++			allErrs = append(allErrs, validateCascadeDiskVolumeSource(pv.Spec.CascadeDisk, specPath.Child("cascadeDisk"))...)
495
++		}
496
++	}
497
++
498
+ 	if numVolumes == 0 {
499
+ 		allErrs = append(allErrs, field.Required(specPath, "must specify a volume type"))
500
+ 	}
501
+@@ -4370,7 +4395,7 @@
502
+ 			allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(core.ServiceAccountNameKey), ""))
503
+ 		}
504
+ 	case core.SecretTypeOpaque, "":
505
+-	// no-op
506
++		// no-op
507
+ 	case core.SecretTypeDockercfg:
508
+ 		dockercfgBytes, exists := secret.Data[core.DockerConfigKey]
509
+ 		if !exists {
510
+@@ -4416,7 +4441,7 @@
511
+ 		if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists {
512
+ 			allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), ""))
513
+ 		}
514
+-	// TODO: Verify that the key matches the cert.
515
++		// TODO: Verify that the key matches the cert.
516
+ 	default:
517
+ 		// no-op
518
+ 	}
519
+diff -uNr --no-dereference kubernetes-orig/pkg/apis/extensions/types.go kubernetes/pkg/apis/extensions/types.go
520
+--- kubernetes-orig/pkg/apis/extensions/types.go	2018-04-26 12:17:57.000000000 +0000
521
+@@ -925,6 +925,7 @@
522
+ 	PortworxVolume        FSType = "portworxVolume"
523
+ 	ScaleIO               FSType = "scaleIO"
524
+ 	CSI                   FSType = "csi"
525
++	CascadeDisk           FSType = "cascadeDisk"
526
+ 	All                   FSType = "*"
527
+ )
528
+ 
529
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/BUILD kubernetes/pkg/cloudprovider/providers/BUILD
530
+--- kubernetes-orig/pkg/cloudprovider/providers/BUILD	2018-04-26 12:17:57.000000000 +0000
531
+@@ -12,6 +12,7 @@
532
+     deps = [
533
+         "//pkg/cloudprovider/providers/aws:go_default_library",
534
+         "//pkg/cloudprovider/providers/azure:go_default_library",
535
++        "//pkg/cloudprovider/providers/cascade:go_default_library",
536
+         "//pkg/cloudprovider/providers/cloudstack:go_default_library",
537
+         "//pkg/cloudprovider/providers/gce:go_default_library",
538
+         "//pkg/cloudprovider/providers/openstack:go_default_library",
539
+@@ -34,6 +35,7 @@
540
+         ":package-srcs",
541
+         "//pkg/cloudprovider/providers/aws:all-srcs",
542
+         "//pkg/cloudprovider/providers/azure:all-srcs",
543
++        "//pkg/cloudprovider/providers/cascade:all-srcs",
544
+         "//pkg/cloudprovider/providers/cloudstack:all-srcs",
545
+         "//pkg/cloudprovider/providers/fake:all-srcs",
546
+         "//pkg/cloudprovider/providers/gce:all-srcs",
547
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/apitypes.go kubernetes/pkg/cloudprovider/providers/cascade/apitypes.go
548
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/apitypes.go	1970-01-01 00:00:00.000000000 +0000
549
+@@ -0,0 +1,227 @@
550
++package cascade
551
++
552
++import "fmt"
553
++
554
++const (
555
++	NotFoundError     = 1408
556
++	VMNotFoundError   = 2006
557
++	DiskNotFoundError = 3011
558
++	DiskInUseError    = 3012
559
++)
560
++
561
++// Represents APIError returned by the API in case of an error.
562
++type APIError struct {
563
++	Code           *string           `json:"code"`
564
++	Data           map[string]string `json:"data"`
565
++	ErrorCode      int32             `json:"errorCode,omitempty"`
566
++	Message        *string           `json:"message"`
567
++	HttpStatusCode int               `json:"-"` // Not part of API contract
568
++}
569
++
570
++// Implement Go error interface for ApiError.
571
++func (e APIError) Error() string {
572
++	return fmt.Sprintf(
573
++		"Cascade: { HTTP status: '%d', code: '%s', message: '%s', data: '%v', errorcode: '%d' }",
574
++		e.HttpStatusCode, StringVal(e.Code), StringVal(e.Message), e.Data, e.ErrorCode)
575
++}
576
++
577
++// Used to represent a generic HTTP error, i.e. an unexpected HTTP 500.
578
++type HttpError struct {
579
++	StatusCode int
580
++	Message    string
581
++}
582
++
583
++// Implementation of error interface for HttpError.
584
++func (e HttpError) Error() string {
585
++	return fmt.Sprintf("Cascade: HTTP %d: %v", e.StatusCode, e.Message)
586
++}
587
++
588
++// Represents a task which gets returned for long running API calls.
589
++type Task struct {
590
++	EndTime            int64       `json:"endTime,omitempty"`
591
++	Entity             *Entity     `json:"entity,omitempty"`
592
++	ID                 *string     `json:"id"`
593
++	Operation          string      `json:"operation,omitempty"`
594
++	QueuedTime         *int64      `json:"queuedTime"`
595
++	ResourceProperties interface{} `json:"resourceProperties,omitempty"`
596
++	SelfLink           string      `json:"selfLink,omitempty"`
597
++	StartedTime        *int64      `json:"startedTime"`
598
++	State              *string     `json:"state"`
599
++	Steps              []*Step     `json:"steps"`
600
++}
601
++
602
++// Represents the entity associated with the task.
603
++type Entity struct {
604
++	ID   *string `json:"id"`
605
++	Kind *string `json:"kind"`
606
++}
607
++
608
++// Represents a task that has entered into an error state. Task errors can be caught and type-checked against with the
609
++// usual Go idiom.
610
++type TaskError struct {
611
++	ID   string `json:"id"`
612
++	Step Step   `json:"step,omitempty"`
613
++}
614
++
615
++// Implement Go error interface for TaskError.
616
++func (e TaskError) Error() string {
617
++	return fmt.Sprintf("Cascade: Task '%s' is in error state: {@step==%s}", e.ID, GetStep(e.Step))
618
++}
619
++
620
++// An error representing a timeout while waiting for a task to complete.
621
++type TaskTimeoutError struct {
622
++	ID string
623
++}
624
++
625
++// Implement Go error interface for TaskTimeoutError.
626
++func (e TaskTimeoutError) Error() string {
627
++	return fmt.Sprintf("Cascade: Timed out waiting for task '%s'. "+
628
++		"Task may not be in error state, examine task for full details.", e.ID)
629
++}
630
++
631
++// Represents a step in a task.
632
++type Step struct {
633
++	EndTime     int64             `json:"endTime,omitempty"`
634
++	Errors      []*APIError       `json:"errors"`
635
++	Operation   string            `json:"operation,omitempty"`
636
++	Options     map[string]string `json:"options,omitempty"`
637
++	QueuedTime  *int64            `json:"queuedTime"`
638
++	Sequence    int32             `json:"sequence,omitempty"`
639
++	StartedTime *int64            `json:"startedTime"`
640
++	State       *string           `json:"state"`
641
++	Warnings    []*APIError       `json:"warnings"`
642
++}
643
++
644
++// Implement Go error interface for Step.
645
++func GetStep(s Step) string {
646
++	return fmt.Sprintf("{\"operation\"=>\"%s\",\"state\"=>\"%s}", s.Operation, StringVal(s.State))
647
++}
648
++
649
++// Represents the VM response returned by the API.
650
++type VM struct {
651
++	AttachedDisks          []*AttachedDisk  `json:"attachedDisks"`
652
++	Cost                   []*QuotaLineItem `json:"cost"`
653
++	Flavor                 *string          `json:"flavor"`
654
++	FloatingIP             string           `json:"floatingIp,omitempty"`
655
++	HighAvailableVMGroupID string           `json:"highAvailableVMGroupID,omitempty"`
656
++	ID                     *string          `json:"id"`
657
++	Kind                   string           `json:"kind"`
658
++	Name                   *string          `json:"name"`
659
++	SelfLink               string           `json:"selfLink,omitempty"`
660
++	SourceImageID          string           `json:"sourceImageId,omitempty"`
661
++	State                  *string          `json:"state"`
662
++	Subnets                []string         `json:"subnets"`
663
++	Tags                   []string         `json:"tags"`
664
++}
665
++
666
++// Represents the listVMs response returned by the API.
667
++type VMList struct {
668
++	Items            []*VM  `json:"items"`
669
++	NextPageLink     string `json:"nextPageLink,omitempty"`
670
++	PreviousPageLink string `json:"previousPageLink,omitempty"`
671
++}
672
++
673
++// Represents multiple VMs returned by the API.
674
++type VMs struct {
675
++	Items []VM `json:"items"`
676
++}
677
++
678
++// Represents the disks attached to the VMs.
679
++type AttachedDisk struct {
680
++	BootDisk   *bool   `json:"bootDisk"`
681
++	CapacityGb *int32  `json:"capacityGb"`
682
++	Flavor     *string `json:"flavor"`
683
++	ID         *string `json:"id"`
684
++	Kind       *string `json:"kind"`
685
++	Name       *string `json:"name"`
686
++	State      *string `json:"state"`
687
++}
688
++
689
++// Represents an attach disk operation request.
690
++type VMDiskOperation struct {
691
++	Arguments map[string]string `json:"arguments,omitempty"`
692
++	DiskID    *string           `json:"diskId"`
693
++}
694
++
695
++// Represents the quota line items for the VM.
696
++type QuotaLineItem struct {
697
++	Key   *string  `json:"key"`
698
++	Unit  *string  `json:"unit"`
699
++	Value *float64 `json:"value"`
700
++}
701
++
702
++// Represents a persistent disk
703
++type PersistentDisk struct {
704
++	CapacityGB  int32            `json:"capacityGb,omitempty"`
705
++	Cost        []*QuotaLineItem `json:"cost"`
706
++	Datastore   string           `json:"datastore,omitempty"`
707
++	Flavor      *string          `json:"flavor"`
708
++	ID          *string          `json:"id"`
709
++	Kind        string           `json:"kind"`
710
++	Name        *string          `json:"name"`
711
++	SelfLink    string           `json:"selfLink,omitempty"`
712
++	State       *string          `json:"state"`
713
++	Tags        []string         `json:"tags"`
714
++	VM          string           `json:"vm"`
715
++	MountDevice string           `json:"mountDevice,omitempty"`
716
++	Zone        *string          `json:"zone"`
717
++}
718
++
719
++// Represents the spec for creating a disk.
720
++type DiskCreateSpec struct {
721
++	Affinities []*LocalitySpec `json:"affinities"`
722
++	CapacityGB *int32          `json:"capacityGb"`
723
++	Flavor     *string         `json:"flavor"`
724
++	Kind       *string         `json:"kind"`
725
++	Name       *string         `json:"name"`
726
++	Tags       []string        `json:"tags"`
727
++	Zone       *string         `json:"zone"`
728
++}
729
++
730
++// Represents the spec for specifying affinity for a disk with another entity.
731
++type LocalitySpec struct {
732
++	ID   *string `json:"id"`
733
++	Kind *string `json:"kind"`
734
++}
735
++
736
++// Represens the LoadBalancer response returned by the API.
737
++type LoadBalancer struct {
738
++	Endpoint *string `json:"endpoint"`
739
++}
740
++
741
++// Represents the spec for creating a LoadBalancer.
742
++type LoadBalancerCreateSpec struct {
743
++	HealthCheck *LoadBalancerHealthCheck `json:"healthCheck"`
744
++	Name        *string                  `json:"name"`
745
++	PortMaps    []*LoadBalancerPortMap   `json:"portMaps"`
746
++	Type        *string                  `json:"type"`
747
++	SubDomain   *string                  `json:"subDomain"`
748
++}
749
++
750
++// Represents the health check spec for a load balancer.
751
++type LoadBalancerHealthCheck struct {
752
++	HealthyThreshold  int64   `json:"healthyThreshold,omitempty"`
753
++	IntervalInSeconds int64   `json:"intervalInSeconds,omitempty"`
754
++	Path              *string `json:"path,omitempty"`
755
++	Port              *int64  `json:"port"`
756
++	Protocol          *string `json:"protocol"`
757
++}
758
++
759
++// Represents a port mapping spec for a load balancer.
760
++type LoadBalancerPortMap struct {
761
++	AllowedCidrs         []*string `json:"allowedCidrs"`
762
++	InstancePort         *int64    `json:"instancePort"`
763
++	InstanceProtocol     *string   `json:"instanceProtocol"`
764
++	LoadBalancerPort     *int64    `json:"loadBalancerPort"`
765
++	LoadBalancerProtocol *string   `json:"loadBalancerProtocol"`
766
++}
767
++
768
++// Represents a VM to be registered with or deregistered from the load balancer.
769
++type LoadBalancerVM struct {
770
++	ID *string `json:"id"`
771
++}
772
++
773
++// Represents a list of VMs to be registered with or deregistered from the load balancer.
774
++type LoadBalancerVMUpdate struct {
775
++	VMIds []*LoadBalancerVM `json:"vmIds"`
776
++}
777
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/auth.go kubernetes/pkg/cloudprovider/providers/cascade/auth.go
778
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/auth.go	1970-01-01 00:00:00.000000000 +0000
779
+@@ -0,0 +1,145 @@
780
++package cascade
781
++
782
++import (
783
++	"fmt"
784
++	"strings"
785
++	"github.com/golang/glog"
786
++	"os/exec"
787
++)
788
++
789
++const (
790
++	tScope = "openid offline_access rs_admin_server at_groups rs_vmdir"
791
++
792
++	afdCli                     = "/opt/vmware/bin/vmafd-cli"
793
++	afdCliMachineAccountCmd    = "get-machine-account-info"
794
++	afdCliPasswordPrefix       = "Password: "
795
++	afdCliSeparator            = "\n"
796
++)
797
++
798
++// AuthConfig contains configuration information for the authentication client.
799
++type AuthConfig struct {
800
++	tenantName string
801
++	authEndpoint string
802
++	machineAccountName string
803
++}
804
++
805
++// AuthClient defines functions related to authentication.
806
++type AuthClient struct {
807
++	cfg *AuthConfig
808
++}
809
++
810
++// NewAuthClient creates a new authentication client
811
++func NewAuthClient(cascadeCfg *CascadeConfig) (*AuthClient, error) {
812
++	return &AuthClient{
813
++		cfg: &AuthConfig{
814
++			tenantName: cascadeCfg.Global.TenantName,
815
++			authEndpoint: cascadeCfg.Global.AuthEndpoint,
816
++			machineAccountName: fmt.Sprintf("%s@%s", cascadeCfg.Global.DNSName, cascadeCfg.Global.DomainName),
817
++		},
818
++	}, nil
819
++}
820
++
821
++func (c *AuthClient) GetTokensByMachineAccount() (*TokenOptions, error) {
822
++	// Use the VMAFD CLI to get the machine account password
823
++	cmd := exec.Command(afdCli, afdCliMachineAccountCmd)
824
++	output, err := cmd.Output()
825
++	if err != nil {
826
++		glog.Errorf("Cascade Cloud Provider: Failed to get machine account credentials. Cannot create Client.")
827
++		return nil, fmt.Errorf("Failed to get machine account credentials, err: %v", err)
828
++	}
829
++
830
++	password, err := parseMachineAccountInfo(output)
831
++	if err != nil {
832
++		glog.Errorf("Cascade Cloud Provider: Failed to parse machine account credentials. Cannot create Client.")
833
++		return nil, fmt.Errorf("Failed to parse machine account credentials, err: %v", err)
834
++	}
835
++
836
++	return c.GetTokensByCredentials(c.cfg.machineAccountName, password)
837
++}
838
++
839
++// GetTokensByPassword gets tokens using username and password
840
++func (c *AuthClient) GetTokensByCredentials(username, password string) (*TokenOptions, error) {
841
++	// Parse tenant part from username
842
++	parts := strings.Split(username, "@")
843
++	if len(parts) != 2 {
844
++		return nil, fmt.Errorf("Invalid full user name '%s': expected user@tenant", username)
845
++	}
846
++	tenant := parts[1]
847
++
848
++	oidcClient, err := buildOIDCClient(c.cfg.authEndpoint)
849
++	if err != nil {
850
++		return nil, err
851
++	}
852
++
853
++	tokenResponse, err := oidcClient.GetTokenByPasswordGrant(tenant, username, password)
854
++	if err != nil {
855
++		return nil, err
856
++	}
857
++
858
++	return toTokenOptions(tokenResponse), nil
859
++}
860
++
861
++// GetTokensByRefreshToken gets tokens using refresh token
862
++func (c *AuthClient) GetTokensByRefreshToken(refreshtoken string) (*TokenOptions, error) {
863
++	oidcClient, err := buildOIDCClient(c.cfg.authEndpoint)
864
++	if err != nil {
865
++		return nil, err
866
++	}
867
++
868
++	tokenResponse, err := oidcClient.GetTokenByRefreshTokenGrant(c.cfg.tenantName, refreshtoken)
869
++	if err != nil {
870
++		return nil, err
871
++	}
872
++
873
++	return toTokenOptions(tokenResponse), nil
874
++}
875
++
876
++func buildOIDCClient(authEndpoint string) (*OIDCClient, error) {
877
++	options := &OIDCClientOptions{
878
++		IgnoreCertificate: false,
879
++		RootCAs:           nil,
880
++		TokenScope:        tScope,
881
++	}
882
++
883
++	return NewOIDCClient(authEndpoint, options, nil), nil
884
++}
885
++
886
++func toTokenOptions(response *OIDCTokenResponse) *TokenOptions {
887
++	return &TokenOptions{
888
++		AccessToken:  response.AccessToken,
889
++		ExpiresIn:    response.ExpiresIn,
890
++		RefreshToken: response.RefreshToken,
891
++		IDToken:      response.IDToken,
892
++		TokenType:    response.TokenType,
893
++	}
894
++}
895
++
896
++// parseMachineAccountInfo parses the machine account password from the machine-account-info output which looks like
897
++// this:
898
++//MachineAccount: photon-8rwdscr1.lw-testdom.com
899
++//Password: FT`])}]d/3\EPwRpz9k1
900
++func parseMachineAccountInfo(output []byte) (string, error) {
901
++	if len(output) <= 0 {
902
++		return "", fmt.Errorf("account info is not specified")
903
++	}
904
++
905
++	strOut := string(output)
906
++	strOutLen := len(strOut)
907
++
908
++	pwdStart := strings.Index(strOut, afdCliPasswordPrefix)
909
++	if pwdStart < 0 {
910
++		return "", fmt.Errorf("account info is not in expected format")
911
++	}
912
++	pwdStart = pwdStart + len(afdCliPasswordPrefix)
913
++	if pwdStart >= strOutLen {
914
++		return "", fmt.Errorf("account info is not in expected format")
915
++	}
916
++	pwdEnd := strings.LastIndex(strOut, afdCliSeparator)
917
++	if pwdEnd < 0 || pwdEnd <= pwdStart || pwdEnd >= strOutLen {
918
++		return "", fmt.Errorf("account info is not in expected format")
919
++	}
920
++
921
++	pwd := strOut[pwdStart:pwdEnd]
922
++
923
++	return pwd, nil
924
++}
925
+\ No newline at end of file
926
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/BUILD kubernetes/pkg/cloudprovider/providers/cascade/BUILD
927
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/BUILD	1970-01-01 00:00:00.000000000 +0000
928
+@@ -0,0 +1,44 @@
929
++package(default_visibility = ["//visibility:public"])
930
++
931
++load(
932
++    "@io_bazel_rules_go//go:def.bzl",
933
++    "go_library",
934
++)
935
++
936
++go_library(
937
++    name = "go_default_library",
938
++    srcs = [
939
++        "apitypes.go",
940
++        "auth.go",
941
++        "cascade.go",
942
++        "cascade_disks.go",
943
++        "cascade_instances.go",
944
++        "cascade_loadbalancer.go",
945
++        "client.go",
946
++        "oidcclient.go",
947
++        "restclient.go",
948
++        "utils.go"
949
++        ],
950
++    deps = [
951
++        "//pkg/api/v1/helper:go_default_library",
952
++        "//pkg/cloudprovider:go_default_library",
953
++        "//pkg/controller:go_default_library",
954
++        "//vendor/github.com/golang/glog:go_default_library",
955
++        "//vendor/gopkg.in/gcfg.v1:go_default_library",
956
++        "//vendor/k8s.io/api/core/v1:go_default_library",
957
++        "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
958
++    ],
959
++)
960
++
961
++filegroup(
962
++    name = "package-srcs",
963
++    srcs = glob(["**"]),
964
++    tags = ["automanaged"],
965
++    visibility = ["//visibility:private"],
966
++)
967
++
968
++filegroup(
969
++    name = "all-srcs",
970
++    srcs = [":package-srcs"],
971
++    tags = ["automanaged"],
972
++)
973
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade_disks.go kubernetes/pkg/cloudprovider/providers/cascade/cascade_disks.go
974
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade_disks.go	1970-01-01 00:00:00.000000000 +0000
975
+@@ -0,0 +1,227 @@
976
++package cascade
977
++
978
++import (
979
++	"context"
980
++	"github.com/golang/glog"
981
++	k8stypes "k8s.io/apimachinery/pkg/types"
982
++	"k8s.io/apimachinery/pkg/util/sets"
983
++	"k8s.io/kubernetes/pkg/kubelet/apis"
984
++	"k8s.io/kubernetes/pkg/volume"
985
++	volumeutil "k8s.io/kubernetes/pkg/volume/util"
986
++)
987
++
988
++// Attaches given virtual disk volume to the node running kubelet.
989
++func (cc *CascadeCloud) AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error) {
990
++	// Check if disk is already attached to that node.
991
++	attached, err := cc.DiskIsAttached(diskID, nodeName)
992
++	if err != nil {
993
++		glog.Errorf("Cascade Cloud Provider: cc.DiskIsAttached failed during AttachDisk. Error[%v]", err)
994
++		return "", err
995
++	}
996
++
997
++	// If not already attached, attach the disk.
998
++	if !attached {
999
++		operation := &VMDiskOperation{
1000
++			DiskID: StringPtr(diskID),
1001
++		}
1002
++
1003
++		vmID, err := cc.InstanceID(context.TODO(), nodeName)
1004
++		if err != nil {
1005
++			glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for AttachDisk. Error[%v]", err)
1006
++			return "", err
1007
++		}
1008
++
1009
++		task, err := cc.apiClient.AttachDisk(vmID, operation)
1010
++		if err != nil {
1011
++			glog.Errorf("Cascade Cloud Provider: Failed to attach disk with ID %s. Error[%v]", diskID, err)
1012
++			return "", err
1013
++		}
1014
++
1015
++		_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
1016
++		if err != nil {
1017
++			glog.Errorf("Cascade Cloud Provider: Failed to wait for task to attach disk with ID %s. Error[%v]",
1018
++				diskID, err)
1019
++			return "", err
1020
++		}
1021
++	}
1022
++
1023
++	// Get mount device of the attached disk.
1024
++	disk, err := cc.apiClient.GetDisk(diskID)
1025
++	if err != nil {
1026
++		glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
1027
++		return "", err
1028
++	}
1029
++
1030
++	return disk.MountDevice, nil
1031
++}
1032
++
1033
++// Detaches given virtual disk volume from the node running kubelet.
1034
++func (cc *CascadeCloud) DetachDisk(diskID string, nodeName k8stypes.NodeName) error {
1035
++	operation := &VMDiskOperation{
1036
++		DiskID: StringPtr(diskID),
1037
++	}
1038
++
1039
++	vmID, err := cc.InstanceID(context.TODO(), nodeName)
1040
++	if err != nil {
1041
++		glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DetachDisk. Error[%v]", err)
1042
++		return err
1043
++	}
1044
++
1045
++	task, err := cc.apiClient.DetachDisk(vmID, operation)
1046
++	if err != nil {
1047
++		glog.Errorf("Cascade Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", diskID, err)
1048
++		return err
1049
++	}
1050
++
1051
++	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
1052
++	if err != nil {
1053
++		glog.Errorf("Cascade Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]",
1054
++			diskID, err)
1055
++		return err
1056
++	}
1057
++
1058
++	return nil
1059
++}
1060
++
1061
++// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
1062
++func (cc *CascadeCloud) DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error) {
1063
++	vmID, err := cc.InstanceID(context.TODO(), nodeName)
1064
++	if err != nil {
1065
++		glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
1066
++		return false, err
1067
++	}
1068
++
1069
++	_, err = cc.apiClient.GetVM(vmID)
1070
++	if err != nil {
1071
++		switch err.(type) {
1072
++		case APIError:
1073
++			if err.(APIError).ErrorCode == VMNotFoundError {
1074
++				// If instance no longer exists, we will assume that the volume is not attached.
1075
++				glog.Warningf("Cascade Cloud Provider: Instance %s does not exist. DiskIsAttached will assume"+
1076
++					" disk %s is not attached to it.", nodeName, diskID)
1077
++				return false, nil
1078
++			}
1079
++		}
1080
++		return false, err
1081
++	}
1082
++
1083
++	disk, err := cc.apiClient.GetDisk(diskID)
1084
++	if err != nil {
1085
++		glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
1086
++		return false, err
1087
++	}
1088
++
1089
++	if disk.VM == vmID {
1090
++		return true, nil
1091
++	}
1092
++
1093
++	return false, nil
1094
++}
1095
++
1096
++// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin.
1097
++func (cc *CascadeCloud) DisksAreAttached(diskIDs []string, nodeName k8stypes.NodeName) (map[string]bool, error) {
1098
++	attached := make(map[string]bool)
1099
++	for _, diskID := range diskIDs {
1100
++		attached[diskID] = false
1101
++	}
1102
++
1103
++	vmID, err := cc.InstanceID(context.TODO(), nodeName)
1104
++	if err != nil {
1105
++		glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
1106
++		return attached, err
1107
++	}
1108
++
1109
++	for _, diskID := range diskIDs {
1110
++		disk, err := cc.apiClient.GetDisk(diskID)
1111
++		if err != nil {
1112
++			glog.Warningf("Cascade Cloud Provider: failed to get VMs for persistent disk %s, err [%v]",
1113
++				diskID, err)
1114
++		} else {
1115
++			if disk.VM == vmID {
1116
++				attached[diskID] = true
1117
++			}
1118
++		}
1119
++	}
1120
++
1121
++	return attached, nil
1122
++}
1123
++
1124
++// Create a volume of given size (in GB).
1125
++func (cc *CascadeCloud) CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error) {
1126
++	// Get Zones for the cluster
1127
++	zones, err := cc.apiClient.GetZones()
1128
++	if err != nil {
1129
++		glog.Errorf("Cascade Cloud Provider: Failed to Get zones for the cluster. Error[%v]", err)
1130
++		return "", err
1131
++	}
1132
++
1133
++	// Pick a zone to place the disk in.
1134
++	zoneSet := sets.NewString()
1135
++	for _, zone := range zones {
1136
++		zoneSet.Insert(zone)
1137
++	}
1138
++	zone := volumeutil.ChooseZoneForVolume(zoneSet, volumeOptions.Name)
1139
++
1140
++	diskSpec := DiskCreateSpec{}
1141
++	diskSpec.Name = StringPtr(volumeOptions.Name)
1142
++	diskSpec.Flavor = StringPtr(volumeOptions.Flavor)
1143
++	diskSpec.CapacityGB = Int32Ptr(int32(volumeOptions.CapacityGB))
1144
++	diskSpec.Kind = StringPtr(DiskSpecKind)
1145
++	diskSpec.Zone = StringPtr(zone)
1146
++
1147
++	task, err := cc.apiClient.CreateDisk(&diskSpec)
1148
++	if err != nil {
1149
++		glog.Errorf("Cascade Cloud Provider: Failed to CreateDisk. Error[%v]", err)
1150
++		return "", err
1151
++	}
1152
++
1153
++	waitTask, err := cc.apiClient.WaitForTask(StringVal(task.ID))
1154
++	if err != nil {
1155
++		glog.Errorf("Cascade Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err)
1156
++		return "", err
1157
++	}
1158
++
1159
++	return StringVal(waitTask.Entity.ID), nil
1160
++}
1161
++
1162
++// Deletes a volume given volume name.
1163
++func (cc *CascadeCloud) DeleteDisk(diskID string) error {
1164
++	task, err := cc.apiClient.DeleteDisk(diskID)
1165
++	if err != nil {
1166
++		glog.Errorf("Cascade Cloud Provider: Failed to DeleteDisk. Error[%v]", err)
1167
++		// If we get a DiskNotFound error, we assume that the disk is already deleted. So we don't return an error here.
1168
++		switch err.(type) {
1169
++		case APIError:
1170
++			if err.(APIError).ErrorCode == DiskNotFoundError {
1171
++				return nil
1172
++			}
1173
++			if err.(APIError).ErrorCode == DiskInUseError {
1174
++				return volume.NewDeletedVolumeInUseError(err.Error())
1175
++			}
1176
++		}
1177
++		return err
1178
++	}
1179
++
1180
++	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
1181
++	if err != nil {
1182
++		glog.Errorf("Cascade Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err)
1183
++		return err
1184
++	}
1185
++
1186
++	return nil
1187
++}
1188
++
1189
++// Gets the zone and region for the volume.
1190
++func (cc *CascadeCloud) GetVolumeLabels(diskID string) (map[string]string, error) {
1191
++	disk, err := cc.apiClient.GetDisk(diskID)
1192
++	if err != nil {
1193
++		glog.Errorf("Cascade Cloud Provider: Failed to GetDisk for GetVolumeLabels. Error[%v]", err)
1194
++		return nil, err
1195
++	}
1196
++
1197
++	labels := make(map[string]string)
1198
++	labels[apis.LabelZoneFailureDomain] = StringVal(disk.Zone)
1199
++	labels[apis.LabelZoneRegion] = cc.cfg.Global.Region
1200
++
1201
++	return labels, nil
1202
++}
1203
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade.go kubernetes/pkg/cloudprovider/providers/cascade/cascade.go
1204
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade.go	1970-01-01 00:00:00.000000000 +0000
1205
+@@ -0,0 +1,212 @@
1206
++// The use of Cascade cloud provider requires the kubelet, kube-apiserver, and kube-controller-manager to be started
1207
++// with config flag: '--cloud-provider=cascade --cloud-config=[path_to_config_file]'.
1208
++package cascade
1209
++
1210
++import (
1211
++	"context"
1212
++	"errors"
1213
++	"fmt"
1214
++	"github.com/golang/glog"
1215
++	"gopkg.in/gcfg.v1"
1216
++	"io"
1217
++	k8stypes "k8s.io/apimachinery/pkg/types"
1218
++	"k8s.io/kubernetes/pkg/cloudprovider"
1219
++	"k8s.io/kubernetes/pkg/controller"
1220
++	"os"
1221
++	"strings"
1222
++)
1223
++
1224
++const (
1225
++	ProviderName = "cascade"
1226
++	DiskSpecKind = "persistent-disk"
1227
++	MasterPrefix = "master"
1228
++)
1229
++
1230
++// CascadeCloud is an implementation of the cloud provider interface for Cascade Controller.
1231
++type CascadeCloud struct {
1232
++	cfg *CascadeConfig
1233
++	// Authentication client to get token for Cascade API calls
1234
++	authClient *AuthClient
1235
++	// API Client to make Cascade API calls
1236
++	apiClient *Client
1237
++	// local $HOSTNAME
1238
++	localHostname string
1239
++	// hostname from K8S, could be overridden
1240
++	localK8sHostname string
1241
++}
1242
++
1243
++// CascadeCloud represents Cascade cloud provider's configuration.
1244
++type CascadeConfig struct {
1245
++	Global struct {
1246
++		// the Cascade Controller endpoint
1247
++		CloudTarget string `gcfg:"target"`
1248
++		// Cascade Controller tenantName name
1249
++		TenantName string `gcfg:"tenantName"`
1250
++		// Cascade Controller cluster ID
1251
++		ClusterID string `gcfg:"clusterID"`
1252
++		// Authentication server endpoint for Cascade Controller
1253
++		AuthEndpoint string `gcfg:"authEndpoint"`
1254
++		// Lightwave domain name for the node
1255
++		DomainName string `gcfg:"domainName"`
1256
++		// DNS name of the node.
1257
++		DNSName string `gcfg:"dnsName"`
1258
++		// Region in which the cluster is in
1259
++		Region string `gcfg:"region"`
1260
++		// Availability zone in which the cluster is in
1261
++		Zone string `gcfg:"zone"`
1262
++	}
1263
++}
1264
++
1265
++// Disks is interface for manipulation with Cascade Controller Persistent Disks.
1266
++type Disks interface {
1267
++	// AttachDisk attaches given disk to given node. Current node
1268
++	// is used when nodeName is empty string.
1269
++	AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error)
1270
++
1271
++	// DetachDisk detaches given disk to given node. Current node
1272
++	// is used when nodeName is empty string.
1273
++	DetachDisk(diskID string, nodeName k8stypes.NodeName) error
1274
++
1275
++	// DiskIsAttached checks if a disk is attached to the given node.
1276
++	DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error)
1277
++
1278
++	// DisksAreAttached is a batch function to check if a list of disks are attached
1279
++	// to the node with the specified NodeName.
1280
++	DisksAreAttached(diskID []string, nodeName k8stypes.NodeName) (map[string]bool, error)
1281
++
1282
++	// CreateDisk creates a new PD with given properties.
1283
++	CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error)
1284
++
1285
++	// DeleteDisk deletes PD.
1286
++	DeleteDisk(diskID string) error
1287
++
1288
++	// Get labels to apply to volume on creation.
1289
++	GetVolumeLabels(diskID string) (map[string]string, error)
1290
++}
1291
++
1292
++// VolumeOptions specifies capacity, tags, name and flavorID for a volume.
1293
++type VolumeOptions struct {
1294
++	CapacityGB int
1295
++	Tags       map[string]string
1296
++	Name       string
1297
++	Flavor     string
1298
++}
1299
++
1300
++func readConfig(config io.Reader) (*CascadeConfig, error) {
1301
++	if config == nil {
1302
++		err := fmt.Errorf("Cascade Cloud Provider: config file is missing. Please restart with " +
1303
++			"--cloud-provider=cascade --cloud-config=[path_to_config_file]")
1304
++		return nil, err
1305
++	}
1306
++
1307
++	var cfg CascadeConfig
1308
++	err := gcfg.ReadInto(&cfg, config)
1309
++	return &cfg, err
1310
++}
1311
++
1312
++func init() {
1313
++	cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
1314
++		cfg, err := readConfig(config)
1315
++		if err != nil {
1316
++			glog.Errorf("Cascade Cloud Provider: failed to read in cloud provider config file. Error[%v]", err)
1317
++			return nil, err
1318
++		}
1319
++		return newCascadeCloud(cfg)
1320
++	})
1321
++}
1322
++
1323
++func newCascadeCloud(cfg *CascadeConfig) (*CascadeCloud, error) {
1324
++	if len(cfg.Global.CloudTarget) == 0 {
1325
++		return nil, fmt.Errorf("Cascade Controller endpoint was not specified.")
1326
++	}
1327
++
1328
++	// Get local hostname
1329
++	hostname, err := os.Hostname()
1330
++	if err != nil {
1331
++		glog.Errorf("Cascade Cloud Provider: get hostname failed. Error[%v]", err)
1332
++		return nil, err
1333
++	}
1334
++
1335
++	cc := CascadeCloud{
1336
++		cfg:              cfg,
1337
++		localHostname:    hostname,
1338
++		localK8sHostname: "",
1339
++	}
1340
++
1341
++	// Instantiate the auth and API clients only on the master nodes. Kubelets running on the workers don't need them as
1342
++	// they are used primarily for making API calls to Cascade.
1343
++	if strings.HasPrefix(hostname, MasterPrefix) {
1344
++		if cc.authClient, err = NewAuthClient(cfg); err != nil {
1345
++			return nil, err
1346
++		}
1347
++
1348
++		if cc.apiClient, err = NewClient(cfg, cc.authClient); err != nil {
1349
++			return nil, err
1350
++		}
1351
++	}
1352
++
1353
++	return &cc, nil
1354
++}
1355
++
1356
++// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
1357
++func (cc *CascadeCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
1358
++
1359
++// Instances returns an implementation of Instances for Cascade Controller.
1360
++func (cc *CascadeCloud) Instances() (cloudprovider.Instances, bool) {
1361
++	return cc, true
1362
++}
1363
++
1364
++func (cc *CascadeCloud) Clusters() (cloudprovider.Clusters, bool) {
1365
++	return nil, true
1366
++}
1367
++
1368
++// ProviderName returns the cloud provider ID.
1369
++func (cc *CascadeCloud) ProviderName() string {
1370
++	return ProviderName
1371
++}
1372
++
1373
++// LoadBalancer returns an implementation of LoadBalancer for Cascade Controller.
1374
++func (cc *CascadeCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
1375
++	return cc, true
1376
++}
1377
++
1378
++// Zones returns an implementation of Zones for Cascade Controller.
1379
++func (cc *CascadeCloud) Zones() (cloudprovider.Zones, bool) {
1380
++	return cc, true
1381
++}
1382
++
1383
++func (cc *CascadeCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
1384
++	return cloudprovider.Zone{
1385
++		Region:        cc.cfg.Global.Region,
1386
++		FailureDomain: cc.cfg.Global.Zone,
1387
++	}, nil
1388
++}
1389
++
1390
++// GetZoneByProviderID implements Zones.GetZoneByProviderID
1391
++// This is particularly useful in external cloud providers where the kubelet
1392
++// does not initialize node data.
1393
++func (cc *CascadeCloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
1394
++	return cloudprovider.Zone{}, errors.New("unimplemented")
1395
++}
1396
++
1397
++// GetZoneByNodeName implements Zones.GetZoneByNodeName
1398
++// This is particularly useful in external cloud providers where the kubelet
1399
++// does not initialize node data.
1400
++func (cc *CascadeCloud) GetZoneByNodeName(ctx context.Context, nodeName k8stypes.NodeName) (cloudprovider.Zone, error) {
1401
++	return cloudprovider.Zone{}, errors.New("unimeplemented")
1402
++}
1403
++
1404
++// Routes returns a false since the interface is not supported for Cascade controller.
1405
++func (cc *CascadeCloud) Routes() (cloudprovider.Routes, bool) {
1406
++	return nil, false
1407
++}
1408
++
1409
++// ScrubDNS filters DNS settings for pods.
1410
++func (cc *CascadeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
1411
++	return nameservers, searches
1412
++}
1413
++
1414
++// HasClusterID returns true if the cluster has a clusterID
1415
++func (cc *CascadeCloud) HasClusterID() bool {
1416
++	return true
1417
++}
1418
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade_instances.go kubernetes/pkg/cloudprovider/providers/cascade/cascade_instances.go
1419
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade_instances.go	1970-01-01 00:00:00.000000000 +0000
1420
+@@ -0,0 +1,91 @@
1421
++package cascade
1422
++
1423
++import (
1424
++	"context"
1425
++	"errors"
1426
++	"k8s.io/api/core/v1"
1427
++	k8stypes "k8s.io/apimachinery/pkg/types"
1428
++	"strings"
1429
++)
1430
++
1431
++// NodeAddresses is an implementation of Instances.NodeAddresses. In the future, private IP address, external IP, etc.
1432
++// will be added based on need.
1433
++func (cc *CascadeCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
1434
++	addresses := []v1.NodeAddress{}
1435
++	addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: cc.cfg.Global.DNSName})
1436
++	return addresses, nil
1437
++}
1438
++
1439
++// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
1440
++// This method will not be called from the node that is requesting this ID. i.e. metadata service
1441
++// and other local methods cannot be used here
1442
++func (cc *CascadeCloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
1443
++	// Get the name of the VM using the ID and generate the DNS name based on the VM name.
1444
++	vm, err := cc.apiClient.GetVM(providerID)
1445
++	if err != nil {
1446
++		return nil, err
1447
++	}
1448
++	// Get the DNS name for the master VM and replace the VM name portion with the requested VM name.
1449
++	dnsNameParts := strings.SplitN(cc.cfg.Global.DNSName, ".", 2)
1450
++	if len(dnsNameParts) != 2 {
1451
++		return nil, errors.New("Cascade cloud provider: Invalid DNS name specified in the configuation. " +
1452
++			"Cannot get NodeAddressByProviderID.")
1453
++	}
1454
++	dnsAddress := StringVal(vm.Name) + dnsNameParts[1]
1455
++	addresses := []v1.NodeAddress{}
1456
++	addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: dnsAddress})
1457
++	return addresses, nil
1458
++}
1459
++
1460
++func (cc *CascadeCloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
1461
++	return errors.New("unimplemented")
1462
++}
1463
++
1464
++// Current node name returns node name based on host name. For Cascade Kubernetes nodes, we will use host name as the
1465
++// node name.
1466
++func (cc *CascadeCloud) CurrentNodeName(ctx context.Context, hostname string) (k8stypes.NodeName, error) {
1467
++	cc.localK8sHostname = hostname
1468
++	return k8stypes.NodeName(hostname), nil
1469
++}
1470
++
1471
++// ExternalID returns the cloud provider ID of the specified instance (deprecated).
1472
++// Note: We do not call Cascade Controller here to check if the instance is alive or not because that requires the
1473
++// worker nodes to also login to Cascade Controller. That check is used by Kubernetes to proactively remove nodes that
1474
++// the cloud provider believes is no longer available. Even otherwise, Kubernetes will remove those nodes eventually.
1475
++// So we are not losing much by not doing that check.
1476
++func (cc *CascadeCloud) ExternalID(ctx context.Context, nodeName k8stypes.NodeName) (string, error) {
1477
++	return getInstanceIDFromNodeName(nodeName)
1478
++}
1479
++
1480
++// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
1481
++// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
1482
++func (cc *CascadeCloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
1483
++	return false, errors.New("unimplemented")
1484
++}
1485
++
1486
++// InstanceID returns the cloud provider ID of the specified instance.
1487
++func (cc *CascadeCloud) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) (string, error) {
1488
++	return getInstanceIDFromNodeName(nodeName)
1489
++}
1490
++
1491
++// This gets the Cascade VM ID from the Kubernetes node name.
1492
++func getInstanceIDFromNodeName(nodeName k8stypes.NodeName) (string, error) {
1493
++	// nodeName is of the format master-instance-id or worker-instance-id. To compute the instance ID, we need to just
1494
++	// get the portion after master- or worker-. That is what we do below.
1495
++	nodeParts := strings.SplitN(string(nodeName), "-", 2)
1496
++	if len(nodeParts) != 2 {
1497
++		return "", errors.New("Cascade cloud provider: Invalid node name. Cannot fetch instance ID.")
1498
++	}
1499
++	return nodeParts[1], nil
1500
++}
1501
++
1502
++// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
1503
++// This method will not be called from the node that is requesting this ID. i.e. metadata service
1504
++// and other local methods cannot be used here
1505
++func (cc *CascadeCloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
1506
++	return "", errors.New("unimplemented")
1507
++}
1508
++
1509
++func (cc *CascadeCloud) InstanceType(ctx context.Context, nodeName k8stypes.NodeName) (string, error) {
1510
++	return "", nil
1511
++}
1512
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go kubernetes/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go
1513
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go	1970-01-01 00:00:00.000000000 +0000
1514
+@@ -0,0 +1,285 @@
1515
++package cascade
1516
++
1517
++import (
1518
++	"context"
1519
++	"fmt"
1520
++	"github.com/golang/glog"
1521
++	"k8s.io/api/core/v1"
1522
++	"k8s.io/apimachinery/pkg/types"
1523
++	"k8s.io/kubernetes/pkg/api/v1/service"
1524
++	"k8s.io/kubernetes/pkg/cloudprovider"
1525
++)
1526
++
1527
++const TCP_PROTOCOL = "TCP"
1528
++
1529
++const HTTP_PROTOCOL = "HTTP"
1530
++
1531
++// EnsureLoadBalancer creates or updates a Cascade load balancer
1532
++func (cc *CascadeCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, k8sService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
1533
++	logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancer")
1534
++
1535
++	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
1536
++	logger.Infof("Load balancer name: %s", loadBalancerName)
1537
++
1538
++	// Sanity checks
1539
++	if k8sService.Spec.SessionAffinity != v1.ServiceAffinityNone {
1540
++		logger.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
1541
++		return nil, fmt.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
1542
++	}
1543
++
1544
++	if len(k8sService.Spec.Ports) == 0 {
1545
++		logger.Errorf("No port mapping is specified")
1546
++		return nil, fmt.Errorf("No port mapping is specified")
1547
++	}
1548
++
1549
++	// Create load balancer port maps
1550
++	portMaps := []*LoadBalancerPortMap{}
1551
++	for _, port := range k8sService.Spec.Ports {
1552
++		if port.Protocol != v1.ProtocolTCP {
1553
++			logger.Warningf("Ignoring port that does not use TCP protocol: %+v", port)
1554
++			continue
1555
++		}
1556
++
1557
++		if port.NodePort == 0 {
1558
++			logger.Warningf("Ignoring port without node port defined: %+v", port)
1559
++			continue
1560
++		}
1561
++
1562
++		// TODO: For now we only support SSL pass through. All port mappings are using TCP protocol.
1563
++		//       Also note that we allow all external traffic to access the ports.
1564
++		portMap := &LoadBalancerPortMap{
1565
++			InstancePort:         Int64Ptr(int64(port.NodePort)),
1566
++			InstanceProtocol:     StringPtr(TCP_PROTOCOL),
1567
++			LoadBalancerPort:     Int64Ptr(int64(port.Port)),
1568
++			LoadBalancerProtocol: StringPtr(TCP_PROTOCOL),
1569
++		}
1570
++		portMaps = append(portMaps, portMap)
1571
++	}
1572
++
1573
++	// Create load balancer health check
1574
++	healthCheck := &LoadBalancerHealthCheck{
1575
++		HealthyThreshold:  5,
1576
++		IntervalInSeconds: 10,
1577
++	}
1578
++	if healthCheckPath, healthCheckNodePort := service.GetServiceHealthCheckPathPort(k8sService); healthCheckPath != "" {
1579
++		logger.Infof("HTTP health checks on: %s:%d", healthCheckPath, healthCheckNodePort)
1580
++		healthCheck.Path = StringPtr(healthCheckPath)
1581
++		healthCheck.Port = Int64Ptr(int64(healthCheckNodePort))
1582
++		healthCheck.Protocol = StringPtr(HTTP_PROTOCOL)
1583
++	} else {
1584
++		logger.Infof("TCP health check on port: %d", Int64Val(portMaps[0].InstancePort))
1585
++		healthCheck.Port = portMaps[0].InstancePort
1586
++		healthCheck.Protocol = StringPtr(TCP_PROTOCOL)
1587
++	}
1588
++
1589
++	// Create load balancer
1590
++	createSpec := &LoadBalancerCreateSpec{
1591
++		Name:        StringPtr(loadBalancerName),
1592
++		Type:        StringPtr("PUBLIC"),
1593
++		PortMaps:    portMaps,
1594
++		HealthCheck: healthCheck,
1595
++		SubDomain:   StringPtr(k8sService.Name),
1596
++	}
1597
++	logger.Infof("Load balancer create spec: %+v", *createSpec)
1598
++
1599
++	task, err := cc.apiClient.CreateOrUpdateLoadBalancer(createSpec)
1600
++	if err != nil {
1601
++		logger.Errorf("Failed to create or update load balancer. Error: [%v]", err)
1602
++		return nil, err
1603
++	}
1604
++
1605
++	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
1606
++	if err != nil {
1607
++		logger.Errorf("Failed to poll task status of creating or updating load balancer. Error: [%v]", err)
1608
++		return nil, err
1609
++	}
1610
++
1611
++	// Apply VM update to load balancer
1612
++	err = cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger)
1613
++	if err != nil {
1614
++		// The private function already did logging. No need to log again.
1615
++		return nil, err
1616
++	}
1617
++
1618
++	// Get load balancer
1619
++	loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName))
1620
++	if err != nil {
1621
++		glog.Errorf("Failed to get load balancer. Error: [%v]", err)
1622
++		return nil, err
1623
++	}
1624
++
1625
++	return toLoadBalancerStatus(loadBalancer), nil
1626
++}
1627
++
1628
++// GetLoadBalancer returns the information about a Cascade load balancer
1629
++func (cc *CascadeCloud) GetLoadBalancer(ctx context.Context, clusterName string, k8sService *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
1630
++	logger := newLoadBalancerLogger(clusterName, k8sService, "GetLoadBalancer")
1631
++
1632
++	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
1633
++	logger.Infof("Load balancer name: %s", loadBalancerName)
1634
++
1635
++	// Get load balancer
1636
++	loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName))
1637
++	if err != nil {
1638
++		logger.Errorf("Failed to get load balancer. Error: [%v]", err)
1639
++		// Do not return error here because we want the caller of this function to determine
1640
++		// what to do with the not-found situation.
1641
++		switch err.(type) {
1642
++		case APIError:
1643
++			if err.(APIError).ErrorCode == NotFoundError {
1644
++				return nil, false, nil
1645
++			}
1646
++		}
1647
++		return nil, false, err
1648
++	}
1649
++
1650
++	return toLoadBalancerStatus(loadBalancer), true, nil
1651
++}
1652
++
1653
++// UpdateLoadBalancer updates the node information of a Cascade load balancer
1654
++func (cc *CascadeCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, k8sService *v1.Service, nodes []*v1.Node) error {
1655
++	logger := newLoadBalancerLogger(clusterName, k8sService, "UpdateLoadBalancer")
1656
++
1657
++	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
1658
++	logger.Infof("Load balancer name: %s", loadBalancerName)
1659
++
1660
++	err := cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger)
1661
++	if err != nil {
1662
++		// The private function already did logging. No need to log again.
1663
++		return err
1664
++	}
1665
++
1666
++	return nil
1667
++}
1668
++
1669
++// EnsureLoadBalancerDeleted deletes a Cascade load balancer
1670
++func (cc *CascadeCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, k8sService *v1.Service) error {
1671
++	logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancerDeleted")
1672
++
1673
++	loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService)
1674
++	logger.Infof("Load balancer name: %s", loadBalancerName)
1675
++
1676
++	task, err := cc.apiClient.DeleteLoadBalancer(StringPtr(loadBalancerName))
1677
++	if err != nil {
1678
++		logger.Errorf("Failed to delete load balancer. Error: [%v]", err)
1679
++		// If we get a NotFound error, we assume that the load balancer is already deleted. So we don't return an error
1680
++		// here.
1681
++		switch err.(type) {
1682
++		case APIError:
1683
++			if err.(APIError).ErrorCode == NotFoundError {
1684
++				return nil
1685
++			}
1686
++		}
1687
++		return err
1688
++	}
1689
++
1690
++	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
1691
++	if err != nil {
1692
++		logger.Errorf("Failed to poll task status of deleting load balancer. Error: [%v]", err)
1693
++		return err
1694
++	}
1695
++
1696
++	return nil
1697
++}
1698
++
1699
++func (cc *CascadeCloud) updateLoadBalancerVMs(
1700
++	nodes []*v1.Node, loadBalancerName string, logger *loadBalancerLogger) error {
1701
++
1702
++	// Apply VM update to the load balancer
1703
++	loadBalancerVMs := make([]*LoadBalancerVM, 0)
1704
++
1705
++	for _, node := range nodes {
1706
++		// If the node does not have a name, we cannot derive its instance ID. Therefore we skip this node.
1707
++		if len(node.Name) == 0 {
1708
++			logger.Warningf("Node %s does not have a name. Skip updating this VM for load balancer", node.UID)
1709
++			continue
1710
++		}
1711
++
1712
++		// If we cannot get the instance ID, something is wrong on the Cascade Controller side.
1713
++		// However, we should tolerate such failure and continue the load balancer VM update
1714
++		// by skipping this VM.
1715
++		instanceID, err := cc.InstanceID(context.TODO(), types.NodeName(node.Name))
1716
++		if err != nil {
1717
++			logger.Warningf("Unable to get instance ID for node %s, skip updating this VM for load balancer. Error [%v]", node.Name, err)
1718
++			continue
1719
++		}
1720
++
1721
++		loadBalancerVMs = append(loadBalancerVMs, &LoadBalancerVM{
1722
++			ID: StringPtr(instanceID),
1723
++		})
1724
++	}
1725
++
1726
++	if len(loadBalancerVMs) == 0 {
1727
++		logger.Infof("No nodes to be added to the load balancer. Skip updating load balancer VMs")
1728
++		return nil
1729
++	}
1730
++
1731
++	vmUpdate := &LoadBalancerVMUpdate{
1732
++		VMIds: loadBalancerVMs,
1733
++	}
1734
++	logger.Infof("Load balancer VM update spec: %+v", vmUpdate.VMIds)
1735
++
1736
++	task, err := cc.apiClient.ApplyVMsToLoadBalancer(StringPtr(loadBalancerName), vmUpdate)
1737
++	if err != nil {
1738
++		logger.Errorf("Failed to update load balancer VMs. Error: [%v]", err)
1739
++		return err
1740
++	}
1741
++
1742
++	_, err = cc.apiClient.WaitForTask(StringVal(task.ID))
1743
++	if err != nil {
1744
++		logger.Errorf("Failed to poll task status of updating load balancer VMs. Error: [%v]", err)
1745
++		return err
1746
++	}
1747
++
1748
++	return nil
1749
++}
1750
++
1751
++func toLoadBalancerStatus(lb *LoadBalancer) *v1.LoadBalancerStatus {
1752
++	var endpoint string
1753
++	if lb != nil && lb.Endpoint != nil {
1754
++		endpoint = StringVal(lb.Endpoint)
1755
++	}
1756
++
1757
++	return &v1.LoadBalancerStatus{
1758
++		Ingress: []v1.LoadBalancerIngress{
1759
++			{
1760
++				Hostname: endpoint,
1761
++			},
1762
++		},
1763
++	}
1764
++}
1765
++
1766
++type loadBalancerLogger struct {
1767
++	clusterName string
1768
++	k8sService  *v1.Service
1769
++	callingFunc string
1770
++}
1771
++
1772
++func newLoadBalancerLogger(clusterName string, k8sService *v1.Service, callingFunc string) *loadBalancerLogger {
1773
++	return &loadBalancerLogger{
1774
++		clusterName: clusterName,
1775
++		k8sService:  k8sService,
1776
++		callingFunc: callingFunc,
1777
++	}
1778
++}
1779
++
1780
++func (l *loadBalancerLogger) getLogMsg(
1781
++	msgTemplate string, args ...interface{}) string {
1782
++
1783
++	errorMsg := fmt.Sprintf("Cascade Cloud Provider::%s::Cluster [%s] Service [%s]: %s",
1784
++		l.callingFunc, l.clusterName, l.k8sService.Name,
1785
++		msgTemplate)
1786
++	return fmt.Sprintf(errorMsg, args)
1787
++}
1788
++
1789
++func (l *loadBalancerLogger) Errorf(msgTemplate string, args ...interface{}) {
1790
++	glog.Errorln(l.getLogMsg(msgTemplate, args))
1791
++}
1792
++
1793
++func (l *loadBalancerLogger) Warningf(msgTemplate string, args ...interface{}) {
1794
++	glog.Warningln(l.getLogMsg(msgTemplate, args))
1795
++}
1796
++
1797
++func (l *loadBalancerLogger) Infof(msgTemplate string, args ...interface{}) {
1798
++	glog.Infoln(l.getLogMsg(msgTemplate, args))
1799
++}
1800
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/client.go kubernetes/pkg/cloudprovider/providers/cascade/client.go
1801
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/client.go	1970-01-01 00:00:00.000000000 +0000
1802
+@@ -0,0 +1,394 @@
1803
++package cascade
1804
++
1805
++import (
1806
++	"bytes"
1807
++	"crypto/tls"
1808
++	"crypto/x509"
1809
++	"encoding/json"
1810
++	"fmt"
1811
++	"github.com/golang/glog"
1812
++	"net/http"
1813
++	"strings"
1814
++	"time"
1815
++)
1816
++
1817
++// Represents stateless context needed to call Cascade APIs.
1818
++// Note that we are implementing the Cascade APIs manually instead of using the swagger generated code
1819
++// because swagger uses a different version of openapi library than kubernetes. It is difficult to
1820
++// address the version conflict to make it compile.
1821
++type Client struct {
1822
++	cfg        *ClientConfig
1823
++	options    ClientOptions
1824
++	restClient *restClient
1825
++}
1826
++
1827
++type ClientConfig struct {
1828
++	tenantName string
1829
++	clusterID  string
1830
++	region     string
1831
++	endpoint   string
1832
++}
1833
++
1834
++// Represents Tokens
1835
++type TokenOptions struct {
1836
++	AccessToken  string `json:"access_token"`
1837
++	ExpiresIn    int    `json:"expires_in"`
1838
++	RefreshToken string `json:"refresh_token,omitempty"`
1839
++	IDToken      string `json:"id_token"`
1840
++	TokenType    string `json:"token_type"`
1841
++}
1842
++
1843
++type TokenCallback func(string)
1844
++
1845
++// Options for Client
1846
++type ClientOptions struct {
1847
++	// When using the Tasks.Wait APIs, defines the duration of how long
1848
++	// we should continue to poll the server. Default is 30 minutes.
1849
++	// TasksAPI.WaitTimeout() can be used to specify timeout on
1850
++	// individual calls.
1851
++	TaskPollTimeout time.Duration
1852
++
1853
++	// Whether or not to ignore any TLS errors when talking to Cascade,
1854
++	// false by default.
1855
++	IgnoreCertificate bool
1856
++
1857
++	// List of root CA's to use for server validation
1858
++	// nil by default.
1859
++	RootCAs *x509.CertPool
1860
++
1861
++	// For tasks APIs, defines the number of retries to make in the event
1862
++	// of an error. Default is 3.
1863
++	TaskRetryCount int
1864
++
1865
++	// Tokens for user authentication. Default is empty.
1866
++	TokenOptions *TokenOptions
1867
++}
1868
++
1869
++const minimumTaskPollDelay = 500 * time.Millisecond
1870
++
1871
++// Creates a new Cascade client which can be used to make API calls to Cascade.
1872
++func NewClient(cfg *CascadeConfig, authClient *AuthClient) (c *Client, err error) {
1873
++	tokenOptions, err := authClient.GetTokensByMachineAccount()
1874
++	if err != nil {
1875
++		glog.Errorf("Cascade Cloud Provider: Failed to create new client due to error: %+v", err)
1876
++		return
1877
++	}
1878
++
1879
++	options := &ClientOptions{
1880
++		TaskPollTimeout:   30 * time.Minute,
1881
++		TaskRetryCount:    3,
1882
++		TokenOptions:      tokenOptions,
1883
++		IgnoreCertificate: false,
1884
++		RootCAs:           nil,
1885
++	}
1886
++
1887
++	tr := &http.Transport{
1888
++		TLSClientConfig: &tls.Config{
1889
++			InsecureSkipVerify: options.IgnoreCertificate,
1890
++			RootCAs:            options.RootCAs},
1891
++	}
1892
++
1893
++	tokenCallback := func(newToken string) {
1894
++		c.options.TokenOptions.AccessToken = newToken
1895
++	}
1896
++
1897
++	restClient := &restClient{
1898
++		authClient:                authClient,
1899
++		httpClient:                &http.Client{Transport: tr},
1900
++		UpdateAccessTokenCallback: tokenCallback,
1901
++	}
1902
++
1903
++	clientConfig := &ClientConfig{
1904
++		tenantName: cfg.Global.TenantName,
1905
++		clusterID:  cfg.Global.ClusterID,
1906
++		region:     cfg.Global.Region,
1907
++		endpoint:   strings.TrimRight(cfg.Global.CloudTarget, "/"),
1908
++	}
1909
++
1910
++	c = &Client{
1911
++		cfg:        clientConfig,
1912
++		restClient: restClient,
1913
++		// Ensure a copy of options is made, rather than using a pointer
1914
++		// which may change out from underneath if misused by the caller.
1915
++		options: *options,
1916
++	}
1917
++
1918
++	return
1919
++}
1920
++
1921
++// Gets VM with the specified ID.
1922
++func (api *Client) GetVM(vmID string) (vm *VM, err error) {
1923
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s", api.cfg.endpoint, api.cfg.tenantName,
1924
++		api.cfg.clusterID, vmID)
1925
++	res, err := api.restClient.Get(uri, api.options.TokenOptions)
1926
++	if err != nil {
1927
++		return
1928
++	}
1929
++	defer res.Body.Close()
1930
++	res, err = getError(res)
1931
++	if err != nil {
1932
++		return
1933
++	}
1934
++	vm = &VM{}
1935
++	err = json.NewDecoder(res.Body).Decode(vm)
1936
++	return
1937
++}
1938
++
1939
++// Gets disk with the specified ID.
1940
++func (api *Client) GetDisk(diskID string) (disk *PersistentDisk, err error) {
1941
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
1942
++		api.cfg.clusterID, diskID)
1943
++	res, err := api.restClient.Get(uri, api.options.TokenOptions)
1944
++	if err != nil {
1945
++		return
1946
++	}
1947
++	defer res.Body.Close()
1948
++	res, err = getError(res)
1949
++	if err != nil {
1950
++		return
1951
++	}
1952
++	disk = &PersistentDisk{}
1953
++	err = json.NewDecoder(res.Body).Decode(disk)
1954
++	return
1955
++}
1956
++
1957
++// Creates a disk under the cluster.
1958
++func (api *Client) CreateDisk(spec *DiskCreateSpec) (task *Task, err error) {
1959
++	body, err := json.Marshal(spec)
1960
++	if err != nil {
1961
++		return
1962
++	}
1963
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks", api.cfg.endpoint, api.cfg.tenantName,
1964
++		api.cfg.clusterID)
1965
++	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
1966
++	if err != nil {
1967
++		return
1968
++	}
1969
++	defer res.Body.Close()
1970
++	task, err = getTask(getError(res))
1971
++	return
1972
++}
1973
++
1974
++// Deletes a disk with the specified ID.
1975
++func (api *Client) DeleteDisk(diskID string) (task *Task, err error) {
1976
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
1977
++		api.cfg.clusterID, diskID)
1978
++	res, err := api.restClient.Delete(uri, api.options.TokenOptions)
1979
++	if err != nil {
1980
++		return
1981
++	}
1982
++	defer res.Body.Close()
1983
++	task, err = getTask(getError(res))
1984
++	return
1985
++}
1986
++
1987
++// Attaches a disk to the specified VM.
1988
++func (api *Client) AttachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
1989
++	body, err := json.Marshal(op)
1990
++	if err != nil {
1991
++		return
1992
++	}
1993
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/attach_disk", api.cfg.endpoint, api.cfg.tenantName,
1994
++		api.cfg.clusterID, vmID)
1995
++	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
1996
++	if err != nil {
1997
++		return
1998
++	}
1999
++	defer res.Body.Close()
2000
++	task, err = getTask(getError(res))
2001
++	return
2002
++}
2003
++
2004
++// Detaches a disk from the specified VM.
2005
++func (api *Client) DetachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
2006
++	body, err := json.Marshal(op)
2007
++	if err != nil {
2008
++		return
2009
++	}
2010
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/detach_disk", api.cfg.endpoint, api.cfg.tenantName,
2011
++		api.cfg.clusterID, vmID)
2012
++	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
2013
++	if err != nil {
2014
++		return
2015
++	}
2016
++	defer res.Body.Close()
2017
++	task, err = getTask(getError(res))
2018
++	return
2019
++}
2020
++
2021
++// Gets a task by ID.
2022
++func (api *Client) GetTask(taskID string) (task *Task, err error) {
2023
++	uri := fmt.Sprintf("%s/v1/tenants/%s/tasks/%s?region=%s", api.cfg.endpoint, api.cfg.tenantName,
2024
++		taskID, api.cfg.region)
2025
++	res, err := api.restClient.Get(uri, api.options.TokenOptions)
2026
++	if err != nil {
2027
++		return
2028
++	}
2029
++	defer res.Body.Close()
2030
++	result, err := getTask(getError(res))
2031
++	return result, err
2032
++}
2033
++
2034
++// Waits for a task to complete by polling the tasks API until a task returns with the state COMPLETED or ERROR.
2035
++func (api *Client) WaitForTask(taskID string) (task *Task, err error) {
2036
++	start := time.Now()
2037
++	numErrors := 0
2038
++	maxErrors := api.options.TaskRetryCount
2039
++	backoffMultiplier := 1
2040
++
2041
++	for time.Since(start) < api.options.TaskPollTimeout {
2042
++		task, err = api.GetTask(taskID)
2043
++		if err != nil {
2044
++			switch err.(type) {
2045
++			// If an ApiError comes back, something is wrong, return the error to the caller
2046
++			case APIError:
2047
++				return
2048
++				// For other errors, retry before giving up
2049
++			default:
2050
++				numErrors++
2051
++				if numErrors > maxErrors {
2052
++					return
2053
++				}
2054
++			}
2055
++		} else {
2056
++			// Reset the error count any time a successful call is made
2057
++			numErrors = 0
2058
++			if StringVal(task.State) == "COMPLETED" {
2059
++				return
2060
++			}
2061
++			if StringVal(task.State) == "ERROR" {
2062
++				err = TaskError{StringVal(task.ID), getFailedStep(task)}
2063
++				return
2064
++			}
2065
++		}
2066
++
2067
++		// Perform backoff based on how long it has been since we started polling. The logic is as follows:
2068
++		// For the first 10 seconds, poll every 500 milliseconds.
2069
++		// From there till the first 1 minute, poll every 1 second.
2070
++		// From there till the first 10 minutes, poll every 5 seconds.
2071
++		// From there till the timeout (30 minutes), poll every 10 seconds.
2072
++		elapsedTime := time.Since(start)
2073
++		if elapsedTime > 10*time.Second && elapsedTime <= 60*time.Second {
2074
++			backoffMultiplier = 2
2075
++		} else if elapsedTime > 60*time.Second && elapsedTime <= 600*time.Second {
2076
++			backoffMultiplier = 10
2077
++		} else if elapsedTime > 600*time.Second && elapsedTime <= api.options.TaskPollTimeout {
2078
++			backoffMultiplier = 20
2079
++		}
2080
++		time.Sleep(time.Duration(backoffMultiplier) * minimumTaskPollDelay)
2081
++	}
2082
++	err = TaskTimeoutError{taskID}
2083
++	return
2084
++}
2085
++
2086
++// CreateOrUpdateLoadBalancer creates a load balancer if not existed, or update one otherwise
2087
++func (api *Client) CreateOrUpdateLoadBalancer(spec *LoadBalancerCreateSpec) (*Task, error) {
2088
++	body, err := json.Marshal(spec)
2089
++	if err != nil {
2090
++		return nil, err
2091
++	}
2092
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers", api.cfg.endpoint, api.cfg.tenantName,
2093
++		api.cfg.clusterID)
2094
++	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
2095
++	if err != nil {
2096
++		return nil, err
2097
++	}
2098
++	defer res.Body.Close()
2099
++	return getTask(getError(res))
2100
++}
2101
++
2102
++// GetLoadBalancer returns a load balancer by name
2103
++func (api *Client) GetLoadBalancer(loadBalancerName *string) (*LoadBalancer, error) {
2104
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
2105
++		api.cfg.clusterID, StringVal(loadBalancerName))
2106
++	res, err := api.restClient.Get(uri, api.options.TokenOptions)
2107
++	if err != nil {
2108
++		return nil, err
2109
++	}
2110
++	defer res.Body.Close()
2111
++	res, err = getError(res)
2112
++	if err != nil {
2113
++		return nil, err
2114
++	}
2115
++	loadBalancer := &LoadBalancer{}
2116
++	err = json.NewDecoder(res.Body).Decode(loadBalancer)
2117
++	return loadBalancer, err
2118
++}
2119
++
2120
++// DeleteLoadBalancer deletes a load balancer by name
2121
++func (api *Client) DeleteLoadBalancer(loadBalancerName *string) (*Task, error) {
2122
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
2123
++		api.cfg.clusterID, StringVal(loadBalancerName))
2124
++	res, err := api.restClient.Delete(uri, api.options.TokenOptions)
2125
++	if err != nil {
2126
++		return nil, err
2127
++	}
2128
++	return getTask(getError(res))
2129
++}
2130
++
2131
++// ApplyVMsToLoadBalancer updates the instances that are registered with the load balancer
2132
++func (api *Client) ApplyVMsToLoadBalancer(loadBalancerName *string, update *LoadBalancerVMUpdate) (*Task, error) {
2133
++	body, err := json.Marshal(update)
2134
++	if err != nil {
2135
++		return nil, err
2136
++	}
2137
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s/update_vms", api.cfg.endpoint, api.cfg.tenantName,
2138
++		api.cfg.clusterID, StringVal(loadBalancerName))
2139
++	res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions)
2140
++	if err != nil {
2141
++		return nil, err
2142
++	}
2143
++	defer res.Body.Close()
2144
++	return getTask(getError(res))
2145
++}
2146
++
2147
++// Gets all the zones in which the cluster has the VMs in.
2148
++func (api *Client) GetZones() (zones []string, err error) {
2149
++	uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/zones", api.cfg.endpoint, api.cfg.tenantName,
2150
++		api.cfg.clusterID)
2151
++	res, err := api.restClient.Get(uri, api.options.TokenOptions)
2152
++	if err != nil {
2153
++		return
2154
++	}
2155
++	defer res.Body.Close()
2156
++	res, err = getError(res)
2157
++	if err != nil {
2158
++		return
2159
++	}
2160
++	err = json.NewDecoder(res.Body).Decode(&zones)
2161
++	return
2162
++}
2163
++
2164
++// Reads a task object out of the HTTP response. Takes an error argument
2165
++// so that GetTask can easily wrap GetError. This function will do nothing
2166
++// if e is not nil.
2167
++// e.g. res, err := getTask(getError(someApi.Get()))
2168
++func getTask(res *http.Response, e error) (*Task, error) {
2169
++	if e != nil {
2170
++		return nil, e
2171
++	}
2172
++	var task Task
2173
++	err := json.NewDecoder(res.Body).Decode(&task)
2174
++	if err != nil {
2175
++		return nil, err
2176
++	}
2177
++	if StringVal(task.State) == "ERROR" {
2178
++		// Critical: return task as well, so that it can be examined
2179
++		// for error details.
2180
++		return &task, TaskError{StringVal(task.ID), getFailedStep(&task)}
2181
++	}
2182
++	return &task, nil
2183
++}
2184
++
2185
++// Gets the failed step in the task to get error details for failed task.
2186
++func getFailedStep(task *Task) (step Step) {
2187
++	var errorStep Step
2188
++	for _, s := range task.Steps {
2189
++		if StringVal(s.State) == "ERROR" {
2190
++			errorStep = *s
2191
++			break
2192
++		}
2193
++	}
2194
++
2195
++	return errorStep
2196
++}
2197
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/oidcclient.go kubernetes/pkg/cloudprovider/providers/cascade/oidcclient.go
2198
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/oidcclient.go	1970-01-01 00:00:00.000000000 +0000
2199
+@@ -0,0 +1,297 @@
2200
++package cascade
2201
++
2202
++import (
2203
++	"crypto/tls"
2204
++	"crypto/x509"
2205
++	"encoding/json"
2206
++	"encoding/pem"
2207
++	"fmt"
2208
++	"io/ioutil"
2209
++	"log"
2210
++	"net/http"
2211
++	"net/url"
2212
++	"strings"
2213
++)
2214
++
2215
++const tokenScope string = "openid offline_access"
2216
++
2217
++// OIDCClient is client for OIDC
2218
++type OIDCClient struct {
2219
++	httpClient *http.Client
2220
++	logger     *log.Logger
2221
++
2222
++	Endpoint string
2223
++	Options  *OIDCClientOptions
2224
++}
2225
++
2226
++// OIDCClientOptions is OIDC client options
2227
++type OIDCClientOptions struct {
2228
++	// Whether or not to ignore any TLS errors when talking to Cascade,
2229
++	// false by default.
2230
++	IgnoreCertificate bool
2231
++
2232
++	// List of root CA's to use for server validation
2233
++	// nil by default.
2234
++	RootCAs *x509.CertPool
2235
++
2236
++	// The scope values to use when requesting tokens
2237
++	TokenScope string
2238
++}
2239
++
2240
++// NewOIDCClient creates an instance of OIDCClient
2241
++func NewOIDCClient(endpoint string, options *OIDCClientOptions, logger *log.Logger) (c *OIDCClient) {
2242
++	if logger == nil {
2243
++		logger = log.New(ioutil.Discard, "", log.LstdFlags)
2244
++	}
2245
++
2246
++	options = buildOptions(options)
2247
++	tr := &http.Transport{
2248
++		TLSClientConfig: &tls.Config{
2249
++			InsecureSkipVerify: options.IgnoreCertificate,
2250
++			RootCAs:            options.RootCAs},
2251
++	}
2252
++
2253
++	c = &OIDCClient{
2254
++		httpClient: &http.Client{Transport: tr},
2255
++		logger:     logger,
2256
++		Endpoint:   strings.TrimRight(endpoint, "/"),
2257
++		Options:    options,
2258
++	}
2259
++	return
2260
++}
2261
++
2262
++func buildOptions(options *OIDCClientOptions) (result *OIDCClientOptions) {
2263
++	result = &OIDCClientOptions{
2264
++		TokenScope: tokenScope,
2265
++	}
2266
++
2267
++	if options == nil {
2268
++		return
2269
++	}
2270
++
2271
++	result.IgnoreCertificate = options.IgnoreCertificate
2272
++
2273
++	if options.RootCAs != nil {
2274
++		result.RootCAs = options.RootCAs
2275
++	}
2276
++
2277
++	if options.TokenScope != "" {
2278
++		result.TokenScope = options.TokenScope
2279
++	}
2280
++
2281
++	return
2282
++}
2283
++
2284
++func (client *OIDCClient) buildURL(path string) (url string) {
2285
++	return fmt.Sprintf("%s%s", client.Endpoint, path)
2286
++}
2287
++
2288
++// Cert download helper
2289
++
2290
++const certDownloadPath string = "/afd/vecs/ssl"
2291
++
2292
++type lightWaveCert struct {
2293
++	Value string `json:"encoded"`
2294
++}
2295
++
2296
++// GetRootCerts gets root certs
2297
++func (client *OIDCClient) GetRootCerts() (certList []*x509.Certificate, err error) {
2298
++	// turn TLS verification off for
2299
++	originalTr := client.httpClient.Transport
2300
++	defer client.setTransport(originalTr)
2301
++
2302
++	tr := &http.Transport{
2303
++		TLSClientConfig: &tls.Config{
2304
++			InsecureSkipVerify: false,
2305
++		},
2306
++	}
2307
++	client.setTransport(tr)
2308
++
2309
++	// get the certs
2310
++	resp, err := client.httpClient.Get(client.buildURL(certDownloadPath))
2311
++	if err != nil {
2312
++		return
2313
++	}
2314
++	defer resp.Body.Close()
2315
++	if resp.StatusCode != 200 {
2316
++		err = fmt.Errorf("Unexpected error retrieving auth server certs: %v %s", resp.StatusCode, resp.Status)
2317
++		return
2318
++	}
2319
++
2320
++	// parse the certs
2321
++	certsData := &[]lightWaveCert{}
2322
++	err = json.NewDecoder(resp.Body).Decode(certsData)
2323
++	if err != nil {
2324
++		return
2325
++	}
2326
++
2327
++	certList = make([]*x509.Certificate, len(*certsData))
2328
++	for idx, cert := range *certsData {
2329
++		block, _ := pem.Decode([]byte(cert.Value))
2330
++		if block == nil {
2331
++			err = fmt.Errorf("Unexpected response format: %v", certsData)
2332
++			return nil, err
2333
++		}
2334
++
2335
++		decodedCert, err := x509.ParseCertificate(block.Bytes)
2336
++		if err != nil {
2337
++			return nil, err
2338
++		}
2339
++
2340
++		certList[idx] = decodedCert
2341
++	}
2342
++
2343
++	return
2344
++}
2345
++
2346
++func (client *OIDCClient) setTransport(tr http.RoundTripper) {
2347
++	client.httpClient.Transport = tr
2348
++}
2349
++
2350
++// Metadata request helpers
2351
++const metadataPathFormat string = "/openidconnect/%s/.well-known/openid-configuration"
2352
++
2353
++// OIDCMetadataResponse is the response for Metadata request
2354
++type OIDCMetadataResponse struct {
2355
++	TokenEndpoint         string `json:"token_endpoint"`
2356
++	AuthorizationEndpoint string `json:"authorization_endpoint"`
2357
++	EndSessionEndpoint    string `json:"end_session_endpoint"`
2358
++}
2359
++
2360
++func (client *OIDCClient) getMetadata(domain string) (metadata *OIDCMetadataResponse, err error) {
2361
++	metadataPath := fmt.Sprintf(metadataPathFormat, domain)
2362
++	request, err := http.NewRequest("GET", client.buildURL(metadataPath), nil)
2363
++	if err != nil {
2364
++		return nil, err
2365
++	}
2366
++
2367
++	resp, err := client.httpClient.Do(request)
2368
++	if err != nil {
2369
++		return nil, err
2370
++	}
2371
++	defer resp.Body.Close()
2372
++
2373
++	err = client.checkResponse(resp)
2374
++	if err != nil {
2375
++		return nil, err
2376
++	}
2377
++
2378
++	metadata = &OIDCMetadataResponse{}
2379
++	err = json.NewDecoder(resp.Body).Decode(metadata)
2380
++	if err != nil {
2381
++		return nil, err
2382
++	}
2383
++
2384
++	return
2385
++}
2386
++
2387
++// Token request helpers
2388
++
2389
++const passwordGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s"
2390
++const refreshTokenGrantFormatString = "grant_type=refresh_token&refresh_token=%s"
2391
++const clientGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s&client_id=%s"
2392
++
2393
++// OIDCTokenResponse is the response for OIDC request
2394
++type OIDCTokenResponse struct {
2395
++	AccessToken  string `json:"access_token"`
2396
++	ExpiresIn    int    `json:"expires_in"`
2397
++	RefreshToken string `json:"refresh_token,omitempty"`
2398
++	IDToken      string `json:"id_token"`
2399
++	TokenType    string `json:"token_type"`
2400
++}
2401
++
2402
++// GetTokenByPasswordGrant gets OIDC tokens by password
2403
++func (client *OIDCClient) GetTokenByPasswordGrant(domain, username, password string) (tokens *OIDCTokenResponse, err error) {
2404
++	metadata, err := client.getMetadata(domain)
2405
++	if err != nil {
2406
++		return nil, err
2407
++	}
2408
++
2409
++	username = url.QueryEscape(username)
2410
++	password = url.QueryEscape(password)
2411
++	body := fmt.Sprintf(passwordGrantFormatString, username, password, client.Options.TokenScope)
2412
++	return client.getToken(metadata.TokenEndpoint, body)
2413
++}
2414
++
2415
++// GetClientTokenByPasswordGrant gets OIDC tokens by password
2416
++func (client *OIDCClient) GetClientTokenByPasswordGrant(domain, username, password, clientID string) (tokens *OIDCTokenResponse, err error) {
2417
++	metadata, err := client.getMetadata(domain)
2418
++	if err != nil {
2419
++		return nil, err
2420
++	}
2421
++
2422
++	username = url.QueryEscape(username)
2423
++	password = url.QueryEscape(password)
2424
++	clientID = url.QueryEscape(clientID)
2425
++	body := fmt.Sprintf(clientGrantFormatString, username, password, client.Options.TokenScope, clientID)
2426
++	return client.getToken(metadata.TokenEndpoint, body)
2427
++}
2428
++
2429
++// GetTokenByRefreshTokenGrant gets OIDC tokens by refresh token
2430
++func (client *OIDCClient) GetTokenByRefreshTokenGrant(domain, refreshToken string) (tokens *OIDCTokenResponse, err error) {
2431
++	metadata, err := client.getMetadata(domain)
2432
++	if err != nil {
2433
++		return nil, err
2434
++	}
2435
++
2436
++	body := fmt.Sprintf(refreshTokenGrantFormatString, refreshToken)
2437
++	return client.getToken(metadata.TokenEndpoint, body)
2438
++}
2439
++
2440
++func (client *OIDCClient) getToken(tokenEndpoint, body string) (tokens *OIDCTokenResponse, err error) {
2441
++	request, err := http.NewRequest("POST", tokenEndpoint, strings.NewReader(body))
2442
++	if err != nil {
2443
++		return nil, err
2444
++	}
2445
++	request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
2446
++
2447
++	resp, err := client.httpClient.Do(request)
2448
++	if err != nil {
2449
++		return nil, err
2450
++	}
2451
++	defer resp.Body.Close()
2452
++
2453
++	err = client.checkResponse(resp)
2454
++	if err != nil {
2455
++		return nil, err
2456
++	}
2457
++
2458
++	tokens = &OIDCTokenResponse{}
2459
++	err = json.NewDecoder(resp.Body).Decode(tokens)
2460
++	if err != nil {
2461
++		return nil, err
2462
++	}
2463
++
2464
++	return
2465
++}
2466
++
2467
++// OIDCError is OIDC error
2468
++type OIDCError struct {
2469
++	Code    string `json:"error"`
2470
++	Message string `json:"error_description"`
2471
++}
2472
++
2473
++func (e OIDCError) Error() string {
2474
++	return fmt.Sprintf("%v: %v", e.Code, e.Message)
2475
++}
2476
++
2477
++func (client *OIDCClient) checkResponse(response *http.Response) (err error) {
2478
++	if response.StatusCode/100 == 2 {
2479
++		return
2480
++	}
2481
++
2482
++	respBody, readErr := ioutil.ReadAll(response.Body)
2483
++	if readErr != nil {
2484
++		return fmt.Errorf(
2485
++			"Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr)
2486
++	}
2487
++
2488
++	var oidcErr OIDCError
2489
++	err = json.Unmarshal(respBody, &oidcErr)
2490
++	if err != nil || oidcErr.Code == "" {
2491
++		return fmt.Errorf(
2492
++			"Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr)
2493
++	}
2494
++
2495
++	return oidcErr
2496
++}
2497
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/OWNERS kubernetes/pkg/cloudprovider/providers/cascade/OWNERS
2498
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/OWNERS	1970-01-01 00:00:00.000000000 +0000
2499
+@@ -0,0 +1,3 @@
2500
++maintainers:
2501
++- ashokc
2502
++- ysheng
2503
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/restclient.go kubernetes/pkg/cloudprovider/providers/cascade/restclient.go
2504
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/restclient.go	1970-01-01 00:00:00.000000000 +0000
2505
+@@ -0,0 +1,262 @@
2506
++package cascade
2507
++
2508
++import (
2509
++	"bytes"
2510
++	"encoding/json"
2511
++	"io"
2512
++	"io/ioutil"
2513
++	"net/http"
2514
++)
2515
++
2516
++type restClient struct {
2517
++	httpClient                *http.Client
2518
++	authClient                *AuthClient
2519
++	UpdateAccessTokenCallback TokenCallback
2520
++}
2521
++
2522
++type request struct {
2523
++	Method      string
2524
++	URL         string
2525
++	ContentType string
2526
++	Body        io.Reader
2527
++	Tokens      *TokenOptions
2528
++}
2529
++
2530
++type page struct {
2531
++	Items            []interface{} `json:"items"`
2532
++	NextPageLink     string        `json:"nextPageLink"`
2533
++	PreviousPageLink string        `json:"previousPageLink"`
2534
++}
2535
++
2536
++type documentList struct {
2537
++	Items []interface{}
2538
++}
2539
++
2540
++type bodyRewinder func() io.Reader
2541
++
2542
++const appJson string = "application/json"
2543
++const expiredAuthToken int32 = 1904
2544
++
2545
++func (client *restClient) AppendSlice(origSlice []interface{}, dataToAppend []interface{}) []interface{} {
2546
++	origLen := len(origSlice)
2547
++	newLen := origLen + len(dataToAppend)
2548
++
2549
++	if newLen > cap(origSlice) {
2550
++		newSlice := make([]interface{}, (newLen+1)*2)
2551
++		copy(newSlice, origSlice)
2552
++		origSlice = newSlice
2553
++	}
2554
++
2555
++	origSlice = origSlice[0:newLen]
2556
++	copy(origSlice[origLen:newLen], dataToAppend)
2557
++
2558
++	return origSlice
2559
++}
2560
++
2561
++func (client *restClient) Get(url string, tokens *TokenOptions) (res *http.Response, err error) {
2562
++	req := request{"GET", url, "", nil, tokens}
2563
++	res, err = client.SendRequest(&req, nil)
2564
++	return
2565
++}
2566
++
2567
++func (client *restClient) GetList(endpoint string, url string, tokens *TokenOptions) (result []byte, err error) {
2568
++	req := request{"GET", url, "", nil, tokens}
2569
++	res, err := client.SendRequest(&req, nil)
2570
++	if err != nil {
2571
++		return
2572
++	}
2573
++	res, err = getError(res)
2574
++	if err != nil {
2575
++		return
2576
++	}
2577
++
2578
++	decoder := json.NewDecoder(res.Body)
2579
++	decoder.UseNumber()
2580
++
2581
++	page := &page{}
2582
++	err = decoder.Decode(page)
2583
++	if err != nil {
2584
++		return
2585
++	}
2586
++
2587
++	documentList := &documentList{}
2588
++	documentList.Items = client.AppendSlice(documentList.Items, page.Items)
2589
++
2590
++	for page.NextPageLink != "" {
2591
++		req = request{"GET", endpoint + page.NextPageLink, "", nil, tokens}
2592
++		res, err = client.SendRequest(&req, nil)
2593
++		if err != nil {
2594
++			return
2595
++		}
2596
++		res, err = getError(res)
2597
++		if err != nil {
2598
++			return
2599
++		}
2600
++
2601
++		decoder = json.NewDecoder(res.Body)
2602
++		decoder.UseNumber()
2603
++
2604
++		page.NextPageLink = ""
2605
++		page.PreviousPageLink = ""
2606
++
2607
++		err = decoder.Decode(page)
2608
++		if err != nil {
2609
++			return
2610
++		}
2611
++
2612
++		documentList.Items = client.AppendSlice(documentList.Items, page.Items)
2613
++	}
2614
++
2615
++	result, err = json.Marshal(documentList)
2616
++
2617
++	return
2618
++}
2619
++
2620
++func (client *restClient) Post(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
2621
++	if contentType == "" {
2622
++		contentType = appJson
2623
++	}
2624
++
2625
++	req := request{"POST", url, contentType, body, tokens}
2626
++	rewinder := func() io.Reader {
2627
++		body.Seek(0, 0)
2628
++		return body
2629
++	}
2630
++	res, err = client.SendRequest(&req, rewinder)
2631
++	return
2632
++}
2633
++
2634
++func (client *restClient) Patch(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
2635
++	if contentType == "" {
2636
++		contentType = appJson
2637
++	}
2638
++
2639
++	req := request{"PATCH", url, contentType, body, tokens}
2640
++	rewinder := func() io.Reader {
2641
++		body.Seek(0, 0)
2642
++		return body
2643
++	}
2644
++	res, err = client.SendRequest(&req, rewinder)
2645
++	return
2646
++}
2647
++
2648
++func (client *restClient) Put(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
2649
++	if contentType == "" {
2650
++		contentType = appJson
2651
++	}
2652
++
2653
++	req := request{"PUT", url, contentType, body, tokens}
2654
++	rewinder := func() io.Reader {
2655
++		body.Seek(0, 0)
2656
++		return body
2657
++	}
2658
++	res, err = client.SendRequest(&req, rewinder)
2659
++	return
2660
++}
2661
++
2662
++func (client *restClient) Delete(url string, tokens *TokenOptions) (res *http.Response, err error) {
2663
++	req := request{"DELETE", url, "", nil, tokens}
2664
++	res, err = client.SendRequest(&req, nil)
2665
++	return
2666
++}
2667
++
2668
++func (client *restClient) SendRequest(req *request, bodyRewinder bodyRewinder) (res *http.Response, err error) {
2669
++	res, err = client.sendRequestHelper(req)
2670
++	// In most cases, we'll return immediately
2671
++	// If the operation succeeded, but we got a 401 response and if we're using
2672
++	// authentication, then we'll look into the body to see if the token expired
2673
++	if err != nil {
2674
++		return res, err
2675
++	}
2676
++	if res.StatusCode != 401 {
2677
++		// It's not a 401, so the token didn't expire
2678
++		return res, err
2679
++	}
2680
++	if req.Tokens == nil || req.Tokens.AccessToken == "" {
2681
++		// We don't have a token, so we can't renew the token, no need to proceed
2682
++		return res, err
2683
++	}
2684
++
2685
++	// We're going to look in the body to see if it failed because the token expired
2686
++	// This means we need to read the body, but the functions that call us also
2687
++	// expect to read the body. So we read the body, then create a new reader
2688
++	// so they can read the body as normal.
2689
++	body, err := ioutil.ReadAll(res.Body)
2690
++	if err != nil {
2691
++		return res, err
2692
++	}
2693
++	res.Body = ioutil.NopCloser(bytes.NewReader(body))
2694
++
2695
++	// Now see if we had an expired token or not
2696
++	var apiError APIError
2697
++	err = json.Unmarshal(body, &apiError)
2698
++	if err != nil {
2699
++		return res, err
2700
++	}
2701
++	if apiError.ErrorCode != expiredAuthToken {
2702
++		return res, nil
2703
++	}
2704
++
2705
++	// We were told that the access token expired, so we acquire a new token using the refresh token.
2706
++	newTokens, err := client.authClient.GetTokensByRefreshToken(req.Tokens.RefreshToken)
2707
++	// If there is an error during token refresh, we assume that the refresh token also expired. So we login again using
2708
++	// the machine account.
2709
++	if err != nil {
2710
++		newTokens, err = client.authClient.GetTokensByMachineAccount()
2711
++		if err != nil {
2712
++			return res, err
2713
++		}
2714
++	}
2715
++	req.Tokens.AccessToken = newTokens.AccessToken
2716
++	if client.UpdateAccessTokenCallback != nil {
2717
++		client.UpdateAccessTokenCallback(newTokens.AccessToken)
2718
++	}
2719
++	if req.Body != nil && bodyRewinder != nil {
2720
++		req.Body = bodyRewinder()
2721
++	}
2722
++	res, err = client.sendRequestHelper(req)
2723
++	return res, nil
2724
++}
2725
++
2726
++func (client *restClient) sendRequestHelper(req *request) (res *http.Response, err error) {
2727
++	r, err := http.NewRequest(req.Method, req.URL, req.Body)
2728
++	if err != nil {
2729
++		return
2730
++	}
2731
++	if req.ContentType != "" {
2732
++		r.Header.Add("Content-Type", req.ContentType)
2733
++	}
2734
++	if req.Tokens != nil && req.Tokens.AccessToken != "" {
2735
++		r.Header.Add("Authorization", "Bearer "+req.Tokens.AccessToken)
2736
++	}
2737
++	res, err = client.httpClient.Do(r)
2738
++	if err != nil {
2739
++		return
2740
++	}
2741
++
2742
++	return
2743
++}
2744
++
2745
++// Reads an error out of the HTTP response, or does nothing if
2746
++// no error occured.
2747
++func getError(res *http.Response) (*http.Response, error) {
2748
++	// Do nothing if the response is a successful 2xx
2749
++	if res.StatusCode/100 == 2 {
2750
++		return res, nil
2751
++	}
2752
++	var apiError APIError
2753
++	// ReadAll is usually a bad practice, but here we need to read the response all
2754
++	// at once because we may attempt to use the data twice. It's preferable to use
2755
++	// methods that take io.Reader, e.g. json.NewDecoder
2756
++	body, err := ioutil.ReadAll(res.Body)
2757
++	if err != nil {
2758
++		return nil, err
2759
++	}
2760
++	err = json.Unmarshal(body, &apiError)
2761
++	if err != nil {
2762
++		// If deserializing into ApiError fails, return a generic HttpError instead
2763
++		return nil, HttpError{res.StatusCode, string(body[:])}
2764
++	}
2765
++	apiError.HttpStatusCode = res.StatusCode
2766
++	return nil, apiError
2767
++}
2768
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/tests_owed kubernetes/pkg/cloudprovider/providers/cascade/tests_owed
2769
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/tests_owed	1970-01-01 00:00:00.000000000 +0000
2770
+@@ -0,0 +1,5 @@
2771
++
2772
++Yu Sheng
2773
++Change-Id: Ifc11818f65a3e018aeea6988d9e2c0719b592920
2774
++
2775
++
2776
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/cascade/utils.go kubernetes/pkg/cloudprovider/providers/cascade/utils.go
2777
+--- kubernetes-orig/pkg/cloudprovider/providers/cascade/utils.go	1970-01-01 00:00:00.000000000 +0000
2778
+@@ -0,0 +1,25 @@
2779
++package cascade
2780
++
2781
++func StringPtr(s string) *string {
2782
++	return &s
2783
++}
2784
++
2785
++// StringVal returns string from string pointer, nil returns ""
2786
++func StringVal(p *string) (s string) {
2787
++	if p != nil {
2788
++		s = *p
2789
++	}
2790
++	return
2791
++}
2792
++
2793
++func Int64Ptr(s int64) *int64 {
2794
++	return &s
2795
++}
2796
++
2797
++func Int64Val(s *int64) int64 {
2798
++	return *s
2799
++}
2800
++
2801
++func Int32Ptr(s int32) *int32 {
2802
++	return &s
2803
++}
2804
+\ No newline at end of file
2805
+diff -uNr --no-dereference kubernetes-orig/pkg/cloudprovider/providers/providers.go kubernetes/pkg/cloudprovider/providers/providers.go
2806
+--- kubernetes-orig/pkg/cloudprovider/providers/providers.go	2018-04-26 12:17:57.000000000 +0000
2807
+@@ -20,6 +20,7 @@
2808
+ 	// Cloud providers
2809
+ 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
2810
+ 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
2811
++	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
2812
+ 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack"
2813
+ 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
2814
+ 	_ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
2815
+diff -uNr --no-dereference kubernetes-orig/pkg/printers/internalversion/describe.go kubernetes/pkg/printers/internalversion/describe.go
2816
+--- kubernetes-orig/pkg/printers/internalversion/describe.go	2018-04-26 12:17:57.000000000 +0000
2817
+@@ -754,6 +754,8 @@
2818
+ 			printFlexVolumeSource(volume.VolumeSource.FlexVolume, w)
2819
+ 		case volume.VolumeSource.Flocker != nil:
2820
+ 			printFlockerVolumeSource(volume.VolumeSource.Flocker, w)
2821
++		case volume.VolumeSource.CascadeDisk != nil:
2822
++			printCascadeDiskVolumeSource(volume.VolumeSource.CascadeDisk, w)
2823
+ 		default:
2824
+ 			w.Write(LEVEL_1, "<unknown>\n")
2825
+ 		}
2826
+@@ -1114,6 +1116,13 @@
2827
+ 		csi.Driver, csi.VolumeHandle, csi.ReadOnly)
2828
+ }
2829
+ 
2830
++func printCascadeDiskVolumeSource(cascade *api.CascadeDiskVolumeSource, w PrefixWriter) {
2831
++	w.Write(LEVEL_2, "Type:\tCascadeDisk (a Persistent Disk resource in Cascade)\n"+
2832
++		"    DiskID:\t%v\n"+
2833
++		"    FSType:\t%v\n",
2834
++		cascade.DiskID, cascade.FSType)
2835
++}
2836
++
2837
+ type PersistentVolumeDescriber struct {
2838
+ 	clientset.Interface
2839
+ }
2840
+@@ -1250,6 +1259,8 @@
2841
+ 			printFlockerVolumeSource(pv.Spec.Flocker, w)
2842
+ 		case pv.Spec.CSI != nil:
2843
+ 			printCSIPersistentVolumeSource(pv.Spec.CSI, w)
2844
++		case pv.Spec.CascadeDisk != nil:
2845
++			printCascadeDiskVolumeSource(pv.Spec.CascadeDisk, w)
2846
+ 		default:
2847
+ 			w.Write(LEVEL_1, "<unknown>\n")
2848
+ 		}
2849
+diff -uNr --no-dereference kubernetes-orig/pkg/security/podsecuritypolicy/util/util.go kubernetes/pkg/security/podsecuritypolicy/util/util.go
2850
+--- kubernetes-orig/pkg/security/podsecuritypolicy/util/util.go	2018-04-26 12:17:57.000000000 +0000
2851
+@@ -68,6 +68,7 @@
2852
+ 		string(extensions.PortworxVolume),
2853
+ 		string(extensions.ScaleIO),
2854
+ 		string(extensions.CSI),
2855
++		string(extensions.CascadeDisk),
2856
+ 	)
2857
+ 	return fstypes
2858
+ }
2859
+@@ -129,6 +130,8 @@
2860
+ 		return extensions.PortworxVolume, nil
2861
+ 	case v.ScaleIO != nil:
2862
+ 		return extensions.ScaleIO, nil
2863
++	case v.CascadeDisk != nil:
2864
++		return extensions.CascadeDisk, nil
2865
+ 	}
2866
+ 
2867
+ 	return "", fmt.Errorf("unknown volume type for volume: %#v", v)
2868
+diff -uNr --no-dereference kubernetes-orig/pkg/volume/cascade_disk/attacher.go kubernetes/pkg/volume/cascade_disk/attacher.go
2869
+--- kubernetes-orig/pkg/volume/cascade_disk/attacher.go	1970-01-01 00:00:00.000000000 +0000
2870
+@@ -0,0 +1,268 @@
2871
++package cascade_disk
2872
++
2873
++import (
2874
++	"fmt"
2875
++	"os"
2876
++	"path"
2877
++	"time"
2878
++
2879
++	"github.com/golang/glog"
2880
++	"k8s.io/api/core/v1"
2881
++	"k8s.io/apimachinery/pkg/types"
2882
++	"k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
2883
++	"k8s.io/kubernetes/pkg/util/mount"
2884
++	"k8s.io/kubernetes/pkg/volume"
2885
++	volumeutil "k8s.io/kubernetes/pkg/volume/util"
2886
++	"strings"
2887
++)
2888
++
2889
++type cascadeDiskAttacher struct {
2890
++	host         volume.VolumeHost
2891
++	cascadeDisks cascade.Disks
2892
++}
2893
++
2894
++var _ volume.Attacher = &cascadeDiskAttacher{}
2895
++var _ volume.AttachableVolumePlugin = &cascadeDiskPlugin{}
2896
++
2897
++func (plugin *cascadeDiskPlugin) NewAttacher() (volume.Attacher, error) {
2898
++	cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
2899
++	if err != nil {
2900
++		glog.Errorf("Cascade attacher: NewAttacher failed to get cloud provider")
2901
++		return nil, err
2902
++	}
2903
++
2904
++	return &cascadeDiskAttacher{
2905
++		host:         plugin.host,
2906
++		cascadeDisks: cascadeCloud,
2907
++	}, nil
2908
++}
2909
++
2910
++// Attach attaches the volume specified by the given spec to the given host. On success, returns the device path where
2911
++// the device was attached on the node.
2912
++func (attacher *cascadeDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
2913
++	hostName := string(nodeName)
2914
++	volumeSource, _, err := getVolumeSource(spec)
2915
++	if err != nil {
2916
++		glog.Errorf("Cascade attacher: Attach failed to get volume source")
2917
++		return "", err
2918
++	}
2919
++
2920
++	// cascadeDisks.AttachDisk checks if disk is already attached to the node. So we don't have to do that separately
2921
++	// here.
2922
++	glog.V(4).Infof("Cascade: Attach disk called for host %s", hostName)
2923
++	devicePath, err := attacher.cascadeDisks.AttachDisk(volumeSource.DiskID, nodeName)
2924
++	if err != nil {
2925
++		glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.DiskID, nodeName, err)
2926
++		return "", err
2927
++	}
2928
++
2929
++	// Cacsade uses device names of the format /dev/sdX, but newer Linux Kernels mount them under /dev/xvdX
2930
++	// (source: AWS console). So we have to rename the first occurrence of sd to xvd.
2931
++	devicePath = strings.Replace(devicePath, "sd", "xvd", 1)
2932
++	return devicePath, nil
2933
++}
2934
++
2935
++// VolumesAreAttached verifies whether the volumes specified in the spec are attached to the specified node.
2936
++func (attacher *cascadeDiskAttacher) VolumesAreAttached(specs []*volume.Spec,
2937
++	nodeName types.NodeName) (map[*volume.Spec]bool, error) {
2938
++	volumesAttachedCheck := make(map[*volume.Spec]bool)
2939
++	volumeSpecMap := make(map[string]*volume.Spec)
2940
++	diskIDList := []string{}
2941
++	for _, spec := range specs {
2942
++		volumeSource, _, err := getVolumeSource(spec)
2943
++		if err != nil {
2944
++			glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
2945
++			continue
2946
++		}
2947
++
2948
++		diskIDList = append(diskIDList, volumeSource.DiskID)
2949
++		volumesAttachedCheck[spec] = true
2950
++		volumeSpecMap[volumeSource.DiskID] = spec
2951
++	}
2952
++	attachedResult, err := attacher.cascadeDisks.DisksAreAttached(diskIDList, nodeName)
2953
++	if err != nil {
2954
++		glog.Errorf(
2955
++			"Error checking if volumes (%v) are attached to current node (%q). err=%v",
2956
++			diskIDList, nodeName, err)
2957
++		return volumesAttachedCheck, err
2958
++	}
2959
++
2960
++	for diskID, attached := range attachedResult {
2961
++		if !attached {
2962
++			spec := volumeSpecMap[diskID]
2963
++			volumesAttachedCheck[spec] = false
2964
++			glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached",
2965
++				diskID, spec.Name())
2966
++		}
2967
++	}
2968
++	return volumesAttachedCheck, nil
2969
++}
2970
++
2971
++// WaitForAttach waits until the devicePath returned by the Attach call is available.
2972
++func (attacher *cascadeDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod,
2973
++	timeout time.Duration) (string, error) {
2974
++	volumeSource, _, err := getVolumeSource(spec)
2975
++	if err != nil {
2976
++		glog.Errorf("Cascade attacher: WaitForAttach failed to get volume source")
2977
++		return "", err
2978
++	}
2979
++
2980
++	if devicePath == "" {
2981
++		return "", fmt.Errorf("WaitForAttach failed for disk %s: devicePath is empty.", volumeSource.DiskID)
2982
++	}
2983
++
2984
++	ticker := time.NewTicker(checkSleepDuration)
2985
++	defer ticker.Stop()
2986
++
2987
++	timer := time.NewTimer(timeout)
2988
++	defer timer.Stop()
2989
++
2990
++	for {
2991
++		select {
2992
++		case <-ticker.C:
2993
++			glog.V(4).Infof("Checking disk %s is attached", volumeSource.DiskID)
2994
++			checkPath, err := verifyDevicePath(devicePath)
2995
++			if err != nil {
2996
++				// Log error, if any, and continue checking periodically. See issue #11321
2997
++				glog.Warningf("Cascade attacher: WaitForAttach with devicePath %s Checking PD %s Error verify "+
2998
++					"path", devicePath, volumeSource.DiskID)
2999
++			} else if checkPath != "" {
3000
++				// A device path has successfully been created for the disk
3001
++				glog.V(4).Infof("Successfully found attached disk %s.", volumeSource.DiskID)
3002
++				return devicePath, nil
3003
++			}
3004
++		case <-timer.C:
3005
++			return "", fmt.Errorf("Could not find attached disk %s. Timeout waiting for mount paths to be "+
3006
++				"created.", volumeSource.DiskID)
3007
++		}
3008
++	}
3009
++}
3010
++
3011
++// GetDeviceMountPath returns a path where the device should point which should be bind mounted for individual volumes.
3012
++func (attacher *cascadeDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
3013
++	volumeSource, _, err := getVolumeSource(spec)
3014
++	if err != nil {
3015
++		glog.Errorf("Cascade attacher: GetDeviceMountPath failed to get volume source")
3016
++		return "", err
3017
++	}
3018
++
3019
++	return makeGlobalPDPath(attacher.host, volumeSource.DiskID), nil
3020
++}
3021
++
3022
++// GetMountDeviceRefs finds all other references to the device referenced by deviceMountPath; returns a list of paths.
3023
++func (plugin *cascadeDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
3024
++	mounter := plugin.host.GetMounter(plugin.GetPluginName())
3025
++	return mount.GetMountRefs(mounter, deviceMountPath)
3026
++}
3027
++
3028
++// MountDevice mounts device to global mount point.
3029
++func (attacher *cascadeDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
3030
++	mounter := attacher.host.GetMounter(cascadeDiskPluginName)
3031
++	notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
3032
++	if err != nil {
3033
++		if os.IsNotExist(err) {
3034
++			if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
3035
++				glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err)
3036
++				return err
3037
++			}
3038
++			notMnt = true
3039
++		} else {
3040
++			return err
3041
++		}
3042
++	}
3043
++
3044
++	volumeSource, _, err := getVolumeSource(spec)
3045
++	if err != nil {
3046
++		glog.Errorf("Cascade attacher: MountDevice failed to get volume source. err: %s", err)
3047
++		return err
3048
++	}
3049
++
3050
++	options := []string{}
3051
++
3052
++	if notMnt {
3053
++		diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cascadeDiskPluginName, attacher.host)
3054
++		mountOptions := volumeutil.MountOptionFromSpec(spec)
3055
++		err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
3056
++		if err != nil {
3057
++			os.Remove(deviceMountPath)
3058
++			return err
3059
++		}
3060
++		glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v",
3061
++			spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
3062
++	}
3063
++	return nil
3064
++}
3065
++
3066
++type cascadeDiskDetacher struct {
3067
++	mounter      mount.Interface
3068
++	cascadeDisks cascade.Disks
3069
++}
3070
++
3071
++var _ volume.Detacher = &cascadeDiskDetacher{}
3072
++
3073
++// NewDetacher returns the detacher associated with the Cascade volume plugin.
3074
++func (plugin *cascadeDiskPlugin) NewDetacher() (volume.Detacher, error) {
3075
++	cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
3076
++	if err != nil {
3077
++		glog.Errorf("Cascade attacher: NewDetacher failed to get cloud provider. err: %s", err)
3078
++		return nil, err
3079
++	}
3080
++
3081
++	return &cascadeDiskDetacher{
3082
++		mounter:      plugin.host.GetMounter(plugin.GetPluginName()),
3083
++		cascadeDisks: cascadeCloud,
3084
++	}, nil
3085
++}
3086
++
3087
++// Detach detaches the given device from the given host.
3088
++func (detacher *cascadeDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
3089
++	hostName := string(nodeName)
3090
++	diskID := path.Base(deviceMountPath)
3091
++	attached, err := detacher.cascadeDisks.DiskIsAttached(diskID, nodeName)
3092
++	if err != nil {
3093
++		// Log error and continue with detach
3094
++		glog.Errorf(
3095
++			"Error checking if persistent disk (%q) is already attached to current node (%q). "+
3096
++				"Will continue and try detach anyway. err=%v", diskID, hostName, err)
3097
++	}
3098
++
3099
++	if err == nil && !attached {
3100
++		// Volume is already detached from node.
3101
++		glog.V(4).Infof("detach operation was successful. persistent disk %q is already detached "+
3102
++			"from node %q.", diskID, hostName)
3103
++		return nil
3104
++	}
3105
++
3106
++	if err := detacher.cascadeDisks.DetachDisk(diskID, nodeName); err != nil {
3107
++		glog.Errorf("Error detaching volume %q: %v", diskID, err)
3108
++		return err
3109
++	}
3110
++	return nil
3111
++}
3112
++
3113
++// WaitForDetach waits for the devicePath to become unavailable.
3114
++func (detacher *cascadeDiskDetacher) WaitForDetach(devicePath string, timeout time.Duration) error {
3115
++	ticker := time.NewTicker(checkSleepDuration)
3116
++	defer ticker.Stop()
3117
++	timer := time.NewTimer(timeout)
3118
++	defer timer.Stop()
3119
++
3120
++	for {
3121
++		select {
3122
++		case <-ticker.C:
3123
++			glog.V(4).Infof("Checking device %q is detached.", devicePath)
3124
++			if pathExists, err := volumeutil.PathExists(devicePath); err != nil {
3125
++				return fmt.Errorf("Error checking if device path exists: %v", err)
3126
++			} else if !pathExists {
3127
++				return nil
3128
++			}
3129
++		case <-timer.C:
3130
++			return fmt.Errorf("Timeout reached; Device %v is still attached", devicePath)
3131
++		}
3132
++	}
3133
++}
3134
++
3135
++// UnmountDevice unmounts the disk specified by the device mount path.
3136
++func (detacher *cascadeDiskDetacher) UnmountDevice(deviceMountPath string) error {
3137
++	return volumeutil.UnmountPath(deviceMountPath, detacher.mounter)
3138
++}
3139
+diff -uNr --no-dereference kubernetes-orig/pkg/volume/cascade_disk/BUILD kubernetes/pkg/volume/cascade_disk/BUILD
3140
+--- kubernetes-orig/pkg/volume/cascade_disk/BUILD	1970-01-01 00:00:00.000000000 +0000
3141
+@@ -0,0 +1,43 @@
3142
++package(default_visibility = ["//visibility:public"])
3143
++
3144
++load(
3145
++    "@io_bazel_rules_go//go:def.bzl",
3146
++    "go_library",
3147
++    "go_test",
3148
++)
3149
++
3150
++go_library(
3151
++    name = "go_default_library",
3152
++    srcs = [
3153
++        "attacher.go",
3154
++        "cascade_disk.go",
3155
++        "cascade_util.go",
3156
++    ],
3157
++    deps = [
3158
++        "//pkg/cloudprovider:go_default_library",
3159
++        "//pkg/cloudprovider/providers/cascade:go_default_library",
3160
++        "//pkg/util/mount:go_default_library",
3161
++        "//pkg/util/strings:go_default_library",
3162
++        "//pkg/volume:go_default_library",
3163
++        "//pkg/volume/util:go_default_library",
3164
++        "//pkg/volume/util/volumehelper:go_default_library",
3165
++        "//vendor/github.com/golang/glog:go_default_library",
3166
++        "//vendor/k8s.io/api/core/v1:go_default_library",
3167
++        "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
3168
++        "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
3169
++        "//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
3170
++    ],
3171
++)
3172
++
3173
++filegroup(
3174
++    name = "package-srcs",
3175
++    srcs = glob(["**"]),
3176
++    tags = ["automanaged"],
3177
++    visibility = ["//visibility:private"],
3178
++)
3179
++
3180
++filegroup(
3181
++    name = "all-srcs",
3182
++    srcs = [":package-srcs"],
3183
++    tags = ["automanaged"],
3184
++)
3185
+diff -uNr --no-dereference kubernetes-orig/pkg/volume/cascade_disk/cascade_disk.go kubernetes/pkg/volume/cascade_disk/cascade_disk.go
3186
+--- kubernetes-orig/pkg/volume/cascade_disk/cascade_disk.go	1970-01-01 00:00:00.000000000 +0000
3187
+@@ -0,0 +1,390 @@
3188
++package cascade_disk
3189
++
3190
++import (
3191
++	"fmt"
3192
++	"os"
3193
++	"path"
3194
++
3195
++	"github.com/golang/glog"
3196
++	"k8s.io/api/core/v1"
3197
++	"k8s.io/apimachinery/pkg/api/resource"
3198
++	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3199
++	"k8s.io/apimachinery/pkg/types"
3200
++	"k8s.io/kubernetes/pkg/util/mount"
3201
++	utilstrings "k8s.io/kubernetes/pkg/util/strings"
3202
++	"k8s.io/kubernetes/pkg/volume"
3203
++	"k8s.io/kubernetes/pkg/volume/util"
3204
++)
3205
++
3206
++// This is the primary entrypoint for volume plugins.
3207
++func ProbeVolumePlugins() []volume.VolumePlugin {
3208
++	return []volume.VolumePlugin{&cascadeDiskPlugin{}}
3209
++}
3210
++
3211
++type cascadeDiskPlugin struct {
3212
++	host volume.VolumeHost
3213
++}
3214
++
3215
++var _ volume.VolumePlugin = &cascadeDiskPlugin{}
3216
++var _ volume.PersistentVolumePlugin = &cascadeDiskPlugin{}
3217
++var _ volume.DeletableVolumePlugin = &cascadeDiskPlugin{}
3218
++var _ volume.ProvisionableVolumePlugin = &cascadeDiskPlugin{}
3219
++
3220
++const (
3221
++	cascadeDiskPluginName = "kubernetes.io/cascade-disk"
3222
++)
3223
++
3224
++// Init initializes the Cascade volume plugin.
3225
++func (plugin *cascadeDiskPlugin) Init(host volume.VolumeHost) error {
3226
++	plugin.host = host
3227
++	return nil
3228
++}
3229
++
3230
++// GetPluginName returns the name of the Cascade volume plugin.
3231
++func (plugin *cascadeDiskPlugin) GetPluginName() string {
3232
++	return cascadeDiskPluginName
3233
++}
3234
++
3235
++// GetVolumeName returns the name of the volume which is the diskID in our case.
3236
++func (plugin *cascadeDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
3237
++	volumeSource, _, err := getVolumeSource(spec)
3238
++	if err != nil {
3239
++		glog.Errorf("Cascade volume plugin: GetVolumeName failed to get volume source")
3240
++		return "", err
3241
++	}
3242
++
3243
++	return volumeSource.DiskID, nil
3244
++}
3245
++
3246
++// CanSupport specifies whether the Cascade volume plguin can support the specific resource type.
3247
++// Cascade plugin only supports the persistent volume and volume resource which has the Cascade disk annotation.
3248
++func (plugin *cascadeDiskPlugin) CanSupport(spec *volume.Spec) bool {
3249
++	return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil) ||
3250
++		(spec.Volume != nil && spec.Volume.CascadeDisk != nil)
3251
++}
3252
++
3253
++// RequiresRemount specifies whether remount is required for the disk.
3254
++func (plugin *cascadeDiskPlugin) RequiresRemount() bool {
3255
++	return false
3256
++}
3257
++
3258
++// SupportsMountOption specifies whether the Cascade volume plugin supports the mount operation.
3259
++func (plugin *cascadeDiskPlugin) SupportsMountOption() bool {
3260
++	return true
3261
++}
3262
++
3263
++// SupportsBulkVolumeVerification specifies whether bulk volume verification is supported.
3264
++func (plugin *cascadeDiskPlugin) SupportsBulkVolumeVerification() bool {
3265
++	return false
3266
++}
3267
++
3268
++// NewMounter returns the mounter associated with the Cascade volume plugin.
3269
++func (plugin *cascadeDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod,
3270
++	_ volume.VolumeOptions) (volume.Mounter, error) {
3271
++	return plugin.newMounterInternal(spec, pod.UID, &CascadeDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
3272
++}
3273
++
3274
++// NewUnmounter returns the unmounter associated with the Cascade volume plugin.
3275
++func (plugin *cascadeDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
3276
++	return plugin.newUnmounterInternal(volName, podUID, &CascadeDiskUtil{},
3277
++		plugin.host.GetMounter(plugin.GetPluginName()))
3278
++}
3279
++
3280
++func (plugin *cascadeDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager,
3281
++	mounter mount.Interface) (volume.Mounter, error) {
3282
++	volumeSource, _, err := getVolumeSource(spec)
3283
++	if err != nil {
3284
++		glog.Errorf("Cascade volume plugin: newMounterInternal failed to get volume source")
3285
++		return nil, err
3286
++	}
3287
++
3288
++	diskID := volumeSource.DiskID
3289
++	fsType := volumeSource.FSType
3290
++
3291
++	return &cascadeDiskMounter{
3292
++		cascadeDisk: &cascadeDisk{
3293
++			podUID:  podUID,
3294
++			volName: spec.Name(),
3295
++			diskID:  diskID,
3296
++			manager: manager,
3297
++			mounter: mounter,
3298
++			plugin:  plugin,
3299
++		},
3300
++		fsType:      fsType,
3301
++		diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
3302
++}
3303
++
3304
++func (plugin *cascadeDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager,
3305
++	mounter mount.Interface) (volume.Unmounter, error) {
3306
++	return &cascadeDiskUnmounter{
3307
++		&cascadeDisk{
3308
++			podUID:  podUID,
3309
++			volName: volName,
3310
++			manager: manager,
3311
++			mounter: mounter,
3312
++			plugin:  plugin,
3313
++		}}, nil
3314
++}
3315
++
3316
++// ConstructVolumeSpec constructs a Cascade volume spec based on the name and mount path.
3317
++func (plugin *cascadeDiskPlugin) ConstructVolumeSpec(volumeSpecName, mountPath string) (*volume.Spec, error) {
3318
++	mounter := plugin.host.GetMounter(plugin.GetPluginName())
3319
++	pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
3320
++	diskID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
3321
++	if err != nil {
3322
++		return nil, err
3323
++	}
3324
++
3325
++	cascadeDisk := &v1.Volume{
3326
++		Name: volumeSpecName,
3327
++		VolumeSource: v1.VolumeSource{
3328
++			CascadeDisk: &v1.CascadeDiskVolumeSource{
3329
++				DiskID: diskID,
3330
++			},
3331
++		},
3332
++	}
3333
++	return volume.NewSpecFromVolume(cascadeDisk), nil
3334
++}
3335
++
3336
++// Abstract interface to disk operations.
3337
++type diskManager interface {
3338
++	// Creates a volume
3339
++	CreateVolume(provisioner *cascadeDiskProvisioner) (diskID string, volumeSizeGB int, fstype string, err error)
3340
++	// Deletes a volume
3341
++	DeleteVolume(deleter *cascadeDiskDeleter) error
3342
++}
3343
++
3344
++// cascadeDisk volumes are disk resources attached to the kubelet's host machine and exposed to the pod.
3345
++type cascadeDisk struct {
3346
++	volName string
3347
++	podUID  types.UID
3348
++	diskID  string
3349
++	fsType  string
3350
++	manager diskManager
3351
++	mounter mount.Interface
3352
++	plugin  *cascadeDiskPlugin
3353
++	volume.MetricsNil
3354
++}
3355
++
3356
++var _ volume.Mounter = &cascadeDiskMounter{}
3357
++
3358
++type cascadeDiskMounter struct {
3359
++	*cascadeDisk
3360
++	fsType      string
3361
++	diskMounter *mount.SafeFormatAndMount
3362
++}
3363
++
3364
++// GetAttributes returns the attributes associated with a Cascade disk.
3365
++func (b *cascadeDiskMounter) GetAttributes() volume.Attributes {
3366
++	return volume.Attributes{
3367
++		SupportsSELinux: true,
3368
++	}
3369
++}
3370
++
3371
++// CanMount checks prior to mount operations to verify that the required components (binaries, etc.) to mount the
3372
++// volume are available on the underlying node. If not, it returns an error.
3373
++func (b *cascadeDiskMounter) CanMount() error {
3374
++	return nil
3375
++}
3376
++
3377
++// SetUp attaches the disk and bind mounts to the volume path.
3378
++func (b *cascadeDiskMounter) SetUp(fsGroup *int64) error {
3379
++	return b.SetUpAt(b.GetPath(), fsGroup)
3380
++}
3381
++
3382
++// SetUpAt attaches the disk and bind mounts to the volume path.
3383
++func (b *cascadeDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
3384
++	glog.V(4).Infof("Cascade Persistent Disk setup %s to %s", b.diskID, dir)
3385
++
3386
++	// TODO: handle failed mounts here.
3387
++	notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
3388
++	if err != nil && !os.IsNotExist(err) {
3389
++		glog.Errorf("cannot validate mount point: %s %v", dir, err)
3390
++		return err
3391
++	}
3392
++	if !notmnt {
3393
++		return nil
3394
++	}
3395
++
3396
++	if err := os.MkdirAll(dir, 0750); err != nil {
3397
++		glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
3398
++		return err
3399
++	}
3400
++
3401
++	options := []string{"bind"}
3402
++
3403
++	// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
3404
++	globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskID)
3405
++	glog.V(4).Infof("attempting to mount %s", dir)
3406
++
3407
++	err = b.mounter.Mount(globalPDPath, dir, "", options)
3408
++	if err != nil {
3409
++		notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
3410
++		if mntErr != nil {
3411
++			glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
3412
++			return err
3413
++		}
3414
++		if !notmnt {
3415
++			if mntErr = b.mounter.Unmount(dir); mntErr != nil {
3416
++				glog.Errorf("Failed to unmount: %v", mntErr)
3417
++				return err
3418
++			}
3419
++			notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
3420
++			if mntErr != nil {
3421
++				glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
3422
++				return err
3423
++			}
3424
++			if !notmnt {
3425
++				glog.Errorf("%s is still mounted, despite call to unmount().  Will try again next sync loop.",
3426
++					b.GetPath())
3427
++				return err
3428
++			}
3429
++		}
3430
++		os.Remove(dir)
3431
++		glog.Errorf("Mount of disk %s failed: %v", dir, err)
3432
++		return err
3433
++	}
3434
++	volume.SetVolumeOwnership(b, fsGroup)
3435
++
3436
++	return nil
3437
++}
3438
++
3439
++var _ volume.Unmounter = &cascadeDiskUnmounter{}
3440
++
3441
++type cascadeDiskUnmounter struct {
3442
++	*cascadeDisk
3443
++}
3444
++
3445
++// TearDown unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that
3446
++// disk on the kubelet.
3447
++func (c *cascadeDiskUnmounter) TearDown() error {
3448
++	return c.TearDownAt(c.GetPath())
3449
++}
3450
++
3451
++// TearDownAt unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that
3452
++// disk on the kubelet.
3453
++func (c *cascadeDiskUnmounter) TearDownAt(dir string) error {
3454
++	return util.UnmountPath(dir, c.mounter)
3455
++}
3456
++
3457
++func makeGlobalPDPath(host volume.VolumeHost, diskID string) string {
3458
++	return path.Join(host.GetPluginDir(cascadeDiskPluginName), mount.MountsInGlobalPDPath, diskID)
3459
++}
3460
++
3461
++func (cd *cascadeDisk) GetPath() string {
3462
++	name := cascadeDiskPluginName
3463
++	return cd.plugin.host.GetPodVolumeDir(cd.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cd.volName)
3464
++}
3465
++
3466
++func (plugin *cascadeDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
3467
++	return []v1.PersistentVolumeAccessMode{
3468
++		v1.ReadWriteOnce,
3469
++	}
3470
++}
3471
++
3472
++type cascadeDiskDeleter struct {
3473
++	*cascadeDisk
3474
++}
3475
++
3476
++var _ volume.Deleter = &cascadeDiskDeleter{}
3477
++
3478
++// NewDeleter returns the deleter associated with the Cascade volume plugin.
3479
++func (plugin *cascadeDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
3480
++	return plugin.newDeleterInternal(spec, &CascadeDiskUtil{})
3481
++}
3482
++
3483
++func (plugin *cascadeDiskPlugin) newDeleterInternal(spec *volume.Spec, manager diskManager) (volume.Deleter, error) {
3484
++	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk == nil {
3485
++		return nil, fmt.Errorf("spec.PersistentVolumeSource.CascadeDisk is nil")
3486
++	}
3487
++	return &cascadeDiskDeleter{
3488
++		&cascadeDisk{
3489
++			volName: spec.Name(),
3490
++			diskID:  spec.PersistentVolume.Spec.CascadeDisk.DiskID,
3491
++			manager: manager,
3492
++			plugin:  plugin,
3493
++		}}, nil
3494
++}
3495
++
3496
++func (r *cascadeDiskDeleter) Delete() error {
3497
++	return r.manager.DeleteVolume(r)
3498
++}
3499
++
3500
++type cascadeDiskProvisioner struct {
3501
++	*cascadeDisk
3502
++	options volume.VolumeOptions
3503
++}
3504
++
3505
++var _ volume.Provisioner = &cascadeDiskProvisioner{}
3506
++
3507
++// NewProvisioner returns the provisioner associated with the Cascade volume plugin.
3508
++func (plugin *cascadeDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
3509
++	return plugin.newProvisionerInternal(options, &CascadeDiskUtil{})
3510
++}
3511
++
3512
++func (plugin *cascadeDiskPlugin) newProvisionerInternal(options volume.VolumeOptions,
3513
++	manager diskManager) (volume.Provisioner, error) {
3514
++	return &cascadeDiskProvisioner{
3515
++		cascadeDisk: &cascadeDisk{
3516
++			manager: manager,
3517
++			plugin:  plugin,
3518
++		},
3519
++		options: options,
3520
++	}, nil
3521
++}
3522
++
3523
++// Provision provisions the persistent volume by making a CreateDisk call to Cascade Controller.
3524
++func (p *cascadeDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
3525
++	if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
3526
++		return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported",
3527
++			p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
3528
++	}
3529
++
3530
++	diskID, sizeGB, fstype, err := p.manager.CreateVolume(p)
3531
++	if err != nil {
3532
++		return nil, err
3533
++	}
3534
++
3535
++	if fstype == "" {
3536
++		fstype = "ext4"
3537
++	}
3538
++
3539
++	pv := &v1.PersistentVolume{
3540
++		ObjectMeta: metav1.ObjectMeta{
3541
++			Name:   p.options.PVName,
3542
++			Labels: map[string]string{},
3543
++			Annotations: map[string]string{
3544
++				util.VolumeDynamicallyCreatedByKey: "cascade-volume-dynamic-provisioner",
3545
++			},
3546
++		},
3547
++		Spec: v1.PersistentVolumeSpec{
3548
++			PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
3549
++			AccessModes:                   p.options.PVC.Spec.AccessModes,
3550
++			Capacity: v1.ResourceList{
3551
++				v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
3552
++			},
3553
++			PersistentVolumeSource: v1.PersistentVolumeSource{
3554
++				CascadeDisk: &v1.CascadeDiskVolumeSource{
3555
++					DiskID: diskID,
3556
++					FSType: fstype,
3557
++				},
3558
++			},
3559
++			MountOptions: p.options.MountOptions,
3560
++		},
3561
++	}
3562
++	if len(p.options.PVC.Spec.AccessModes) == 0 {
3563
++		pv.Spec.AccessModes = p.plugin.GetAccessModes()
3564
++	}
3565
++
3566
++	return pv, nil
3567
++}
3568
++
3569
++func getVolumeSource(spec *volume.Spec) (*v1.CascadeDiskVolumeSource, bool, error) {
3570
++	if spec.Volume != nil && spec.Volume.CascadeDisk != nil {
3571
++		return spec.Volume.CascadeDisk, spec.ReadOnly, nil
3572
++	} else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil {
3573
++		return spec.PersistentVolume.Spec.CascadeDisk, spec.ReadOnly, nil
3574
++	}
3575
++
3576
++	return nil, false, fmt.Errorf("Spec does not reference a Cascade disk type")
3577
++}
3578
+diff -uNr --no-dereference kubernetes-orig/pkg/volume/cascade_disk/cascade_util.go kubernetes/pkg/volume/cascade_disk/cascade_util.go
3579
+--- kubernetes-orig/pkg/volume/cascade_disk/cascade_util.go	1970-01-01 00:00:00.000000000 +0000
3580
+@@ -0,0 +1,107 @@
3581
++package cascade_disk
3582
++
3583
++import (
3584
++	"fmt"
3585
++	"strings"
3586
++	"time"
3587
++
3588
++	"github.com/golang/glog"
3589
++	"k8s.io/api/core/v1"
3590
++	"k8s.io/kubernetes/pkg/cloudprovider"
3591
++	"k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
3592
++	"k8s.io/kubernetes/pkg/volume"
3593
++	volumeutil "k8s.io/kubernetes/pkg/volume/util"
3594
++)
3595
++
3596
++const (
3597
++	checkSleepDuration = time.Second
3598
++)
3599
++
3600
++type CascadeDiskUtil struct{}
3601
++
3602
++func verifyDevicePath(path string) (string, error) {
3603
++	if pathExists, err := volumeutil.PathExists(path); err != nil {
3604
++		return "", fmt.Errorf("Error checking if path exists: %v", err)
3605
++	} else if pathExists {
3606
++		return path, nil
3607
++	}
3608
++
3609
++	glog.V(4).Infof("verifyDevicePath: path does not exist yet")
3610
++	return "", nil
3611
++}
3612
++
3613
++// CreateVolume creates a Cascade persistent disk.
3614
++func (util *CascadeDiskUtil) CreateVolume(p *cascadeDiskProvisioner) (diskID string, capacityGB int, fstype string,
3615
++	err error) {
3616
++	cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider())
3617
++	if err != nil {
3618
++		glog.Errorf("Cascade Util: CreateVolume failed to get cloud provider. Error [%v]", err)
3619
++		return "", 0, "", err
3620
++	}
3621
++
3622
++	capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
3623
++	volSizeBytes := capacity.Value()
3624
++	// Cascade works with GB, convert to GB with rounding up
3625
++	volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
3626
++	name := volumeutil.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
3627
++	volumeOptions := &cascade.VolumeOptions{
3628
++		CapacityGB: volSizeGB,
3629
++		Tags:       *p.options.CloudTags,
3630
++		Name:       name,
3631
++	}
3632
++
3633
++	for parameter, value := range p.options.Parameters {
3634
++		switch strings.ToLower(parameter) {
3635
++		case "flavor":
3636
++			volumeOptions.Flavor = value
3637
++		case volume.VolumeParameterFSType:
3638
++			fstype = value
3639
++			glog.V(4).Infof("Cascade Util: Setting fstype to %s", fstype)
3640
++		default:
3641
++			glog.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
3642
++				p.plugin.GetPluginName())
3643
++			return "", 0, "", fmt.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
3644
++				p.plugin.GetPluginName())
3645
++		}
3646
++	}
3647
++
3648
++	diskID, err = cloud.CreateDisk(volumeOptions)
3649
++	if err != nil {
3650
++		glog.Errorf("Cascade Util: failed to CreateDisk. Error [%v]", err)
3651
++		return "", 0, "", err
3652
++	}
3653
++
3654
++	glog.V(4).Infof("Successfully created Cascade persistent disk %s", name)
3655
++	return diskID, volSizeGB, "", nil
3656
++}
3657
++
3658
++// DeleteVolume deletes a Cascade volume.
3659
++func (util *CascadeDiskUtil) DeleteVolume(disk *cascadeDiskDeleter) error {
3660
++	cloud, err := getCloudProvider(disk.plugin.host.GetCloudProvider())
3661
++	if err != nil {
3662
++		glog.Errorf("Cascade Util: DeleteVolume failed to get cloud provider. Error [%v]", err)
3663
++		return err
3664
++	}
3665
++
3666
++	if err = cloud.DeleteDisk(disk.diskID); err != nil {
3667
++		glog.Errorf("Cascade Util: failed to DeleteDisk for diskID %s. Error [%v]", disk.diskID, err)
3668
++		return err
3669
++	}
3670
++
3671
++	glog.V(4).Infof("Successfully deleted Cascade persistent disk %s", disk.diskID)
3672
++	return nil
3673
++}
3674
++
3675
++func getCloudProvider(cloud cloudprovider.Interface) (*cascade.CascadeCloud, error) {
3676
++	if cloud == nil {
3677
++		glog.Errorf("Cascade Util: Cloud provider not initialized properly")
3678
++		return nil, fmt.Errorf("Cascade Util: Cloud provider not initialized properly")
3679
++	}
3680
++
3681
++	cc := cloud.(*cascade.CascadeCloud)
3682
++	if cc == nil {
3683
++		glog.Errorf("Invalid cloud provider: expected Cascade")
3684
++		return nil, fmt.Errorf("Invalid cloud provider: expected Cascade")
3685
++	}
3686
++	return cc, nil
3687
++}
3688
+diff -uNr --no-dereference kubernetes-orig/pkg/volume/cascade_disk/OWNERS kubernetes/pkg/volume/cascade_disk/OWNERS
3689
+--- kubernetes-orig/pkg/volume/cascade_disk/OWNERS	1970-01-01 00:00:00.000000000 +0000
3690
+@@ -0,0 +1,2 @@
3691
++maintainers:
3692
++- ashokc
3693
+diff -uNr --no-dereference kubernetes-orig/plugin/pkg/admission/persistentvolume/label/admission.go kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go
3694
+--- kubernetes-orig/plugin/pkg/admission/persistentvolume/label/admission.go	2018-04-26 12:17:57.000000000 +0000
3695
+@@ -27,6 +27,7 @@
3696
+ 	api "k8s.io/kubernetes/pkg/apis/core"
3697
+ 	"k8s.io/kubernetes/pkg/cloudprovider"
3698
+ 	"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
3699
++	"k8s.io/kubernetes/pkg/cloudprovider/providers/cascade"
3700
+ 	"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
3701
+ 	kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
3702
+ 	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
3703
+@@ -52,6 +53,7 @@
3704
+ 	ebsVolumes       aws.Volumes
3705
+ 	cloudConfig      []byte
3706
+ 	gceCloudProvider *gce.GCECloud
3707
++	cascadeDisks     cascade.Disks
3708
+ }
3709
+ 
3710
+ var _ admission.MutationInterface = &persistentVolumeLabel{}
3711
+@@ -104,6 +106,13 @@
3712
+ 		}
3713
+ 		volumeLabels = labels
3714
+ 	}
3715
++	if volume.Spec.CascadeDisk != nil {
3716
++		labels, err := l.findCascadeDiskLabels(volume)
3717
++		if err != nil {
3718
++			return admission.NewForbidden(a, fmt.Errorf("error querying Cascade volume %s: %v", volume.Spec.CascadeDisk.DiskID, err))
3719
++		}
3720
++		volumeLabels = labels
3721
++	}
3722
+ 
3723
+ 	if len(volumeLabels) != 0 {
3724
+ 		if volume.Labels == nil {
3725
+@@ -216,3 +225,48 @@
3726
+ 	}
3727
+ 	return l.gceCloudProvider, nil
3728
+ }
3729
++
3730
++func (l *persistentVolumeLabel) findCascadeDiskLabels(volume *api.PersistentVolume) (map[string]string, error) {
3731
++	// Ignore any volumes that are being provisioned
3732
++	if volume.Spec.CascadeDisk.DiskID == vol.ProvisionedVolumeName {
3733
++		return nil, nil
3734
++	}
3735
++	cascadeDisks, err := l.getCascadeDisks()
3736
++	if err != nil {
3737
++		return nil, err
3738
++	}
3739
++	if cascadeDisks == nil {
3740
++		return nil, fmt.Errorf("unable to build Cascade cloud provider for volumes")
3741
++	}
3742
++
3743
++	labels, err := cascadeDisks.GetVolumeLabels(volume.Spec.CascadeDisk.DiskID)
3744
++	if err != nil {
3745
++		return nil, err
3746
++	}
3747
++
3748
++	return labels, nil
3749
++}
3750
++
3751
++// getCascadeDisks returns the Cascade Disks interface
3752
++func (l *persistentVolumeLabel) getCascadeDisks() (cascade.Disks, error) {
3753
++	l.mutex.Lock()
3754
++	defer l.mutex.Unlock()
3755
++
3756
++	if l.cascadeDisks == nil {
3757
++		var cloudConfigReader io.Reader
3758
++		if len(l.cloudConfig) > 0 {
3759
++			cloudConfigReader = bytes.NewReader(l.cloudConfig)
3760
++		}
3761
++		cloudProvider, err := cloudprovider.GetCloudProvider("cascade", cloudConfigReader)
3762
++		if err != nil || cloudProvider == nil {
3763
++			return nil, err
3764
++		}
3765
++		provider, ok := cloudProvider.(*cascade.CascadeCloud)
3766
++		if !ok {
3767
++			// GetCloudProvider has gone very wrong
3768
++			return nil, fmt.Errorf("error retrieving Cascade cloud provider")
3769
++		}
3770
++		l.cascadeDisks = provider
3771
++	}
3772
++	return l.cascadeDisks, nil
3773
++}
3774
+diff -uNr --no-dereference kubernetes-orig/staging/src/k8s.io/api/core/v1/generated.pb.go kubernetes/staging/src/k8s.io/api/core/v1/generated.pb.go
3775
+--- kubernetes-orig/staging/src/k8s.io/api/core/v1/generated.pb.go	2018-04-26 12:17:57.000000000 +0000
3776
+@@ -35,6 +35,7 @@
3777
+ 		Binding
3778
+ 		CSIPersistentVolumeSource
3779
+ 		Capabilities
3780
++		CascadeDiskVolumeSource
3781
+ 		CephFSPersistentVolumeSource
3782
+ 		CephFSVolumeSource
3783
+ 		CinderVolumeSource
3784
+@@ -262,9 +263,11 @@
3785
+ func (*AvoidPods) ProtoMessage()               {}
3786
+ func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} }
3787
+ 
3788
+-func (m *AzureDiskVolumeSource) Reset()                    { *m = AzureDiskVolumeSource{} }
3789
+-func (*AzureDiskVolumeSource) ProtoMessage()               {}
3790
+-func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
3791
++func (m *CascadeDiskVolumeSource) Reset()      { *m = CascadeDiskVolumeSource{} }
3792
++func (*CascadeDiskVolumeSource) ProtoMessage() {}
3793
++func (*CascadeDiskVolumeSource) Descriptor() ([]byte, []int) {
3794
++	return fileDescriptorGenerated, []int{4}
3795
++}
3796
+ 
3797
+ func (m *AzureFilePersistentVolumeSource) Reset()      { *m = AzureFilePersistentVolumeSource{} }
3798
+ func (*AzureFilePersistentVolumeSource) ProtoMessage() {}
3799
+@@ -1052,6 +1055,11 @@
3800
+ 	return fileDescriptorGenerated, []int{187}
3801
+ }
3802
+ 
3803
++func (m *AzureDiskVolumeSource) Reset()                    { *m = AzureDiskVolumeSource{} }
3804
++func (*AzureDiskVolumeSource) ProtoMessage()               {}
3805
++func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} }
3806
++
3807
++
3808
+ func init() {
3809
+ 	proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource")
3810
+ 	proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity")
3811
+@@ -1063,6 +1071,7 @@
3812
+ 	proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding")
3813
+ 	proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource")
3814
+ 	proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities")
3815
++	proto.RegisterType((*CascadeDiskVolumeSource)(nil), "k8s.io.api.core.v1.CascadeDiskVolumeSource")
3816
+ 	proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource")
3817
+ 	proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource")
3818
+ 	proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource")
3819
+@@ -1683,6 +1692,32 @@
3820
+ 	return i, nil
3821
+ }
3822
+ 
3823
++func (m *CascadeDiskVolumeSource) Marshal() (dAtA []byte, err error) {
3824
++	size := m.Size()
3825
++	dAtA = make([]byte, size)
3826
++	n, err := m.MarshalTo(dAtA)
3827
++	if err != nil {
3828
++		return nil, err
3829
++	}
3830
++	return dAtA[:n], nil
3831
++}
3832
++
3833
++func (m *CascadeDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
3834
++	var i int
3835
++	_ = i
3836
++	var l int
3837
++	_ = l
3838
++	dAtA[i] = 0xa
3839
++	i++
3840
++	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskID)))
3841
++	i += copy(dAtA[i:], m.DiskID)
3842
++	dAtA[i] = 0x12
3843
++	i++
3844
++	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
3845
++	i += copy(dAtA[i:], m.FSType)
3846
++	return i, nil
3847
++}
3848
++
3849
+ func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
3850
+ 	size := m.Size()
3851
+ 	dAtA = make([]byte, size)
3852
+@@ -6447,13 +6482,13 @@
3853
+ 		}
3854
+ 		i += n124
3855
+ 	}
3856
+-	if m.AzureDisk != nil {
3857
++	if m.CascadeDisk != nil {
3858
+ 		dAtA[i] = 0x82
3859
+ 		i++
3860
+ 		dAtA[i] = 0x1
3861
+ 		i++
3862
+-		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
3863
+-		n125, err := m.AzureDisk.MarshalTo(dAtA[i:])
3864
++		i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size()))
3865
++		n125, err := m.CascadeDisk.MarshalTo(dAtA[i:])
3866
+ 		if err != nil {
3867
+ 			return 0, err
3868
+ 		}
3869
+@@ -6531,6 +6566,18 @@
3870
+ 		}
3871
+ 		i += n131
3872
+ 	}
3873
++	if m.AzureDisk != nil {
3874
++		dAtA[i] = 0xba
3875
++		i++
3876
++		dAtA[i] = 0x1
3877
++		i++
3878
++		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
3879
++		n132, err := m.AzureDisk.MarshalTo(dAtA[i:])
3880
++		if err != nil {
3881
++			return 0, err
3882
++		}
3883
++		i += n132
3884
++	}
3885
+ 	return i, nil
3886
+ }
3887
+ 
3888
+@@ -10544,13 +10591,13 @@
3889
+ 		}
3890
+ 		i += n229
3891
+ 	}
3892
+-	if m.AzureDisk != nil {
3893
++	if m.CascadeDisk != nil {
3894
+ 		dAtA[i] = 0xb2
3895
+ 		i++
3896
+ 		dAtA[i] = 0x1
3897
+ 		i++
3898
+-		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
3899
+-		n230, err := m.AzureDisk.MarshalTo(dAtA[i:])
3900
++		i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size()))
3901
++		n230, err := m.CascadeDisk.MarshalTo(dAtA[i:])
3902
+ 		if err != nil {
3903
+ 			return 0, err
3904
+ 		}
3905
+@@ -10616,6 +10663,18 @@
3906
+ 		}
3907
+ 		i += n235
3908
+ 	}
3909
++	if m.AzureDisk != nil {
3910
++		dAtA[i] = 0xe2
3911
++		i++
3912
++		dAtA[i] = 0x1
3913
++		i++
3914
++		i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size()))
3915
++		n236, err := m.AzureDisk.MarshalTo(dAtA[i:])
3916
++		if err != nil {
3917
++			return 0, err
3918
++		}
3919
++		i += n236
3920
++	}
3921
+ 	return i, nil
3922
+ }
3923
+ 
3924
+@@ -10873,6 +10932,16 @@
3925
+ 	return n
3926
+ }
3927
+ 
3928
++func (m *CascadeDiskVolumeSource) Size() (n int) {
3929
++	var l int
3930
++	_ = l
3931
++	l = len(m.DiskID)
3932
++	n += 1 + l + sovGenerated(uint64(l))
3933
++	l = len(m.FSType)
3934
++	n += 1 + l + sovGenerated(uint64(l))
3935
++	return n
3936
++}
3937
++
3938
+ func (m *CephFSPersistentVolumeSource) Size() (n int) {
3939
+ 	var l int
3940
+ 	_ = l
3941
+@@ -12616,8 +12685,8 @@
3942
+ 		l = m.Quobyte.Size()
3943
+ 		n += 1 + l + sovGenerated(uint64(l))
3944
+ 	}
3945
+-	if m.AzureDisk != nil {
3946
+-		l = m.AzureDisk.Size()
3947
++	if m.CascadeDisk != nil {
3948
++		l = m.CascadeDisk.Size()
3949
+ 		n += 2 + l + sovGenerated(uint64(l))
3950
+ 	}
3951
+ 	if m.PhotonPersistentDisk != nil {
3952
+@@ -12644,6 +12713,10 @@
3953
+ 		l = m.CSI.Size()
3954
+ 		n += 2 + l + sovGenerated(uint64(l))
3955
+ 	}
3956
++	if m.AzureDisk != nil {
3957
++		l = m.AzureDisk.Size()
3958
++		n += 2 + l + sovGenerated(uint64(l))
3959
++	}
3960
+ 	return n
3961
+ }
3962
+ 
3963
+@@ -14098,8 +14171,8 @@
3964
+ 		l = m.Quobyte.Size()
3965
+ 		n += 2 + l + sovGenerated(uint64(l))
3966
+ 	}
3967
+-	if m.AzureDisk != nil {
3968
+-		l = m.AzureDisk.Size()
3969
++	if m.CascadeDisk != nil {
3970
++		l = m.CascadeDisk.Size()
3971
+ 		n += 2 + l + sovGenerated(uint64(l))
3972
+ 	}
3973
+ 	if m.PhotonPersistentDisk != nil {
3974
+@@ -14122,6 +14195,10 @@
3975
+ 		l = m.StorageOS.Size()
3976
+ 		n += 2 + l + sovGenerated(uint64(l))
3977
+ 	}
3978
++	if m.AzureDisk != nil {
3979
++		l = m.AzureDisk.Size()
3980
++		n += 2 + l + sovGenerated(uint64(l))
3981
++	}
3982
+ 	return n
3983
+ }
3984
+ 
3985
+@@ -14296,6 +14373,17 @@
3986
+ 	}, "")
3987
+ 	return s
3988
+ }
3989
++func (this *CascadeDiskVolumeSource) String() string {
3990
++	if this == nil {
3991
++		return "nil"
3992
++	}
3993
++	s := strings.Join([]string{`&CascadeDiskVolumeSource{`,
3994
++		`DiskID:` + fmt.Sprintf("%v", this.DiskID) + `,`,
3995
++		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
3996
++		`}`,
3997
++	}, "")
3998
++	return s
3999
++}
4000
+ func (this *CephFSPersistentVolumeSource) String() string {
4001
+ 	if this == nil {
4002
+ 		return "nil"
4003
+@@ -15695,13 +15783,14 @@
4004
+ 		`AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`,
4005
+ 		`VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
4006
+ 		`Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
4007
+-		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
4008
++		`CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
4009
+ 		`PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
4010
+ 		`PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
4011
+ 		`ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`,
4012
+ 		`Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`,
4013
+ 		`StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`,
4014
+ 		`CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`,
4015
++		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
4016
+ 		`}`,
4017
+ 	}, "")
4018
+ 	return s
4019
+@@ -16843,12 +16932,13 @@
4020
+ 		`ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`,
4021
+ 		`VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
4022
+ 		`Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
4023
+-		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
4024
++		`CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
4025
+ 		`PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
4026
+ 		`PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
4027
+ 		`ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`,
4028
+ 		`Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`,
4029
+ 		`StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`,
4030
++		`AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
4031
+ 		`}`,
4032
+ 	}, "")
4033
+ 	return s
4034
+@@ -35335,7 +35425,7 @@
4035
+ 			iNdEx = postIndex
4036
+ 		case 16:
4037
+ 			if wireType != 2 {
4038
+-				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
4039
++				return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
4040
+ 			}
4041
+ 			var msglen int
4042
+ 			for shift := uint(0); ; shift += 7 {
4043
+@@ -35359,10 +35449,10 @@
4044
+ 			if postIndex > l {
4045
+ 				return io.ErrUnexpectedEOF
4046
+ 			}
4047
+-			if m.AzureDisk == nil {
4048
+-				m.AzureDisk = &AzureDiskVolumeSource{}
4049
++			if m.CascadeDisk == nil {
4050
++				m.CascadeDisk = &CascadeDiskVolumeSource{}
4051
+ 			}
4052
+-			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
4053
++			if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
4054
+ 				return err
4055
+ 			}
4056
+ 			iNdEx = postIndex
4057
+@@ -35564,6 +35654,39 @@
4058
+ 				return err
4059
+ 			}
4060
+ 			iNdEx = postIndex
4061
++		case 23:
4062
++			if wireType != 2 {
4063
++				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
4064
++			}
4065
++			var msglen int
4066
++			for shift := uint(0); ; shift += 7 {
4067
++				if shift >= 64 {
4068
++					return ErrIntOverflowGenerated
4069
++				}
4070
++				if iNdEx >= l {
4071
++					return io.ErrUnexpectedEOF
4072
++				}
4073
++				b := dAtA[iNdEx]
4074
++				iNdEx++
4075
++				msglen |= (int(b) & 0x7F) << shift
4076
++				if b < 0x80 {
4077
++					break
4078
++				}
4079
++			}
4080
++			if msglen < 0 {
4081
++				return ErrInvalidLengthGenerated
4082
++			}
4083
++			postIndex := iNdEx + msglen
4084
++			if postIndex > l {
4085
++				return io.ErrUnexpectedEOF
4086
++			}
4087
++			if m.AzureDisk == nil {
4088
++				m.AzureDisk = &AzureDiskVolumeSource{}
4089
++			}
4090
++			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
4091
++				return err
4092
++			}
4093
++			iNdEx = postIndex
4094
+ 		default:
4095
+ 			iNdEx = preIndex
4096
+ 			skippy, err := skipGenerated(dAtA[iNdEx:])
4097
+@@ -36135,6 +36258,114 @@
4098
+ 	}
4099
+ 	return nil
4100
+ }
4101
++func (m *CascadeDiskVolumeSource) Unmarshal(dAtA []byte) error {
4102
++	l := len(dAtA)
4103
++	iNdEx := 0
4104
++	for iNdEx < l {
4105
++		preIndex := iNdEx
4106
++		var wire uint64
4107
++		for shift := uint(0); ; shift += 7 {
4108
++			if shift >= 64 {
4109
++				return ErrIntOverflowGenerated
4110
++			}
4111
++			if iNdEx >= l {
4112
++				return io.ErrUnexpectedEOF
4113
++			}
4114
++			b := dAtA[iNdEx]
4115
++			iNdEx++
4116
++			wire |= (uint64(b) & 0x7F) << shift
4117
++			if b < 0x80 {
4118
++				break
4119
++			}
4120
++		}
4121
++		fieldNum := int32(wire >> 3)
4122
++		wireType := int(wire & 0x7)
4123
++		if wireType == 4 {
4124
++			return fmt.Errorf("proto: CascadeDiskVolumeSource: wiretype end group for non-group")
4125
++		}
4126
++		if fieldNum <= 0 {
4127
++			return fmt.Errorf("proto: CascadeDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
4128
++		}
4129
++		switch fieldNum {
4130
++		case 1:
4131
++			if wireType != 2 {
4132
++				return fmt.Errorf("proto: wrong wireType = %d for field DiskID", wireType)
4133
++			}
4134
++			var stringLen uint64
4135
++			for shift := uint(0); ; shift += 7 {
4136
++				if shift >= 64 {
4137
++					return ErrIntOverflowGenerated
4138
++				}
4139
++				if iNdEx >= l {
4140
++					return io.ErrUnexpectedEOF
4141
++				}
4142
++				b := dAtA[iNdEx]
4143
++				iNdEx++
4144
++				stringLen |= (uint64(b) & 0x7F) << shift
4145
++				if b < 0x80 {
4146
++					break
4147
++				}
4148
++			}
4149
++			intStringLen := int(stringLen)
4150
++			if intStringLen < 0 {
4151
++				return ErrInvalidLengthGenerated
4152
++			}
4153
++			postIndex := iNdEx + intStringLen
4154
++			if postIndex > l {
4155
++				return io.ErrUnexpectedEOF
4156
++			}
4157
++			m.DiskID = string(dAtA[iNdEx:postIndex])
4158
++			iNdEx = postIndex
4159
++		case 2:
4160
++			if wireType != 2 {
4161
++				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
4162
++			}
4163
++			var stringLen uint64
4164
++			for shift := uint(0); ; shift += 7 {
4165
++				if shift >= 64 {
4166
++					return ErrIntOverflowGenerated
4167
++				}
4168
++				if iNdEx >= l {
4169
++					return io.ErrUnexpectedEOF
4170
++				}
4171
++				b := dAtA[iNdEx]
4172
++				iNdEx++
4173
++				stringLen |= (uint64(b) & 0x7F) << shift
4174
++				if b < 0x80 {
4175
++					break
4176
++				}
4177
++			}
4178
++			intStringLen := int(stringLen)
4179
++			if intStringLen < 0 {
4180
++				return ErrInvalidLengthGenerated
4181
++			}
4182
++			postIndex := iNdEx + intStringLen
4183
++			if postIndex > l {
4184
++				return io.ErrUnexpectedEOF
4185
++			}
4186
++			m.FSType = string(dAtA[iNdEx:postIndex])
4187
++			iNdEx = postIndex
4188
++		default:
4189
++			iNdEx = preIndex
4190
++			skippy, err := skipGenerated(dAtA[iNdEx:])
4191
++			if err != nil {
4192
++				return err
4193
++			}
4194
++			if skippy < 0 {
4195
++				return ErrInvalidLengthGenerated
4196
++			}
4197
++			if (iNdEx + skippy) > l {
4198
++				return io.ErrUnexpectedEOF
4199
++			}
4200
++			iNdEx += skippy
4201
++		}
4202
++	}
4203
++
4204
++	if iNdEx > l {
4205
++		return io.ErrUnexpectedEOF
4206
++	}
4207
++	return nil
4208
++}
4209
+ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error {
4210
+ 	l := len(dAtA)
4211
+ 	iNdEx := 0
4212
+@@ -49741,7 +49972,7 @@
4213
+ 			iNdEx = postIndex
4214
+ 		case 22:
4215
+ 			if wireType != 2 {
4216
+-				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
4217
++				return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
4218
+ 			}
4219
+ 			var msglen int
4220
+ 			for shift := uint(0); ; shift += 7 {
4221
+@@ -49765,10 +49996,10 @@
4222
+ 			if postIndex > l {
4223
+ 				return io.ErrUnexpectedEOF
4224
+ 			}
4225
+-			if m.AzureDisk == nil {
4226
+-				m.AzureDisk = &AzureDiskVolumeSource{}
4227
++			if m.CascadeDisk == nil {
4228
++				m.CascadeDisk = &CascadeDiskVolumeSource{}
4229
+ 			}
4230
+-			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
4231
++			if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
4232
+ 				return err
4233
+ 			}
4234
+ 			iNdEx = postIndex
4235
+@@ -49937,6 +50168,39 @@
4236
+ 				return err
4237
+ 			}
4238
+ 			iNdEx = postIndex
4239
++		case 28:
4240
++			if wireType != 2 {
4241
++				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
4242
++			}
4243
++			var msglen int
4244
++			for shift := uint(0); ; shift += 7 {
4245
++				if shift >= 64 {
4246
++					return ErrIntOverflowGenerated
4247
++				}
4248
++				if iNdEx >= l {
4249
++					return io.ErrUnexpectedEOF
4250
++				}
4251
++				b := dAtA[iNdEx]
4252
++				iNdEx++
4253
++				msglen |= (int(b) & 0x7F) << shift
4254
++				if b < 0x80 {
4255
++					break
4256
++				}
4257
++			}
4258
++			if msglen < 0 {
4259
++				return ErrInvalidLengthGenerated
4260
++			}
4261
++			postIndex := iNdEx + msglen
4262
++			if postIndex > l {
4263
++				return io.ErrUnexpectedEOF
4264
++			}
4265
++			if m.AzureDisk == nil {
4266
++				m.AzureDisk = &AzureDiskVolumeSource{}
4267
++			}
4268
++			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
4269
++				return err
4270
++			}
4271
++			iNdEx = postIndex
4272
+ 		default:
4273
+ 			iNdEx = preIndex
4274
+ 			skippy, err := skipGenerated(dAtA[iNdEx:])
4275
+diff -uNr --no-dereference kubernetes-orig/staging/src/k8s.io/api/core/v1/types.go kubernetes/staging/src/k8s.io/api/core/v1/types.go
4276
+--- kubernetes-orig/staging/src/k8s.io/api/core/v1/types.go	2018-04-26 12:17:57.000000000 +0000
4277
+@@ -333,9 +333,9 @@
4278
+ 	// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
4279
+ 	// +optional
4280
+ 	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
4281
+-	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
4282
++	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
4283
+ 	// +optional
4284
+-	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
4285
++	CascadeDisk *CascadeDiskVolumeSource `json:"cascadeDisk,omitempty" protobuf:"bytes,22,opt,name=cascadeDisk"`
4286
+ 	// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
4287
+ 	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
4288
+ 	// Items for all in one resources secrets, configmaps, and downward API
4289
+@@ -349,6 +349,9 @@
4290
+ 	// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
4291
+ 	// +optional
4292
+ 	StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
4293
++	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
4294
++	// +optional
4295
++	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,28,opt,name=azureDisk"`
4296
+ }
4297
+ 
4298
+ // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
4299
+@@ -428,9 +431,9 @@
4300
+ 	// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
4301
+ 	// +optional
4302
+ 	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
4303
+-	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
4304
++	// CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine
4305
+ 	// +optional
4306
+-	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
4307
++	CascadeDisk *CascadeDiskVolumeSource `json:"cascadeDisk,omitempty" protobuf:"bytes,16,opt,name=cascadeDisk"`
4308
+ 	// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
4309
+ 	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
4310
+ 	// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
4311
+@@ -449,6 +452,9 @@
4312
+ 	// CSI represents storage that handled by an external CSI driver (Beta feature).
4313
+ 	// +optional
4314
+ 	CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
4315
++	// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
4316
++	// +optional
4317
++	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,23,opt,name=azureDisk"`
4318
+ }
4319
+ 
4320
+ const (
4321
+@@ -1617,6 +1623,16 @@
4322
+ 	SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
4323
+ }
4324
+ 
4325
++// Represents a Photon Controller persistent disk resource.
4326
++type CascadeDiskVolumeSource struct {
4327
++	// ID that identifies Cascade persistent disk
4328
++	DiskID string `json:"diskID" protobuf:"bytes,1,opt,name=diskID"`
4329
++	// Filesystem type to mount.
4330
++	// Must be a filesystem type supported by the host operating system.
4331
++	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
4332
++	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
4333
++}
4334
++
4335
+ // Adapts a ConfigMap into a volume.
4336
+ //
4337
+ // The contents of the target ConfigMap's Data field will be presented in a
... ...
@@ -1,13 +1,14 @@
1 1
 Summary:        Kubernetes cluster management
2 2
 Name:           kubernetes
3 3
 Version:        1.10.2
4
-Release:        1%{?dist}
4
+Release:        2%{?dist}
5 5
 License:        ASL 2.0
6 6
 URL:            https://github.com/kubernetes/kubernetes/archive/v%{version}.tar.gz
7 7
 Source0:        kubernetes-%{version}.tar.gz
8 8
 %define sha1    kubernetes-%{version}.tar.gz=e1cef85820ff16265788c96a6fd31056bfaf247c
9 9
 Source1:        https://github.com/kubernetes/contrib/archive/contrib-0.7.0.tar.gz
10 10
 %define sha1    contrib-0.7.0=47a744da3b396f07114e518226b6313ef4b2203c
11
+Patch0:         k8s-vke.patch
11 12
 Group:          Development/Tools
12 13
 Vendor:         VMware, Inc.
13 14
 Distribution:   Photon
... ...
@@ -47,6 +48,7 @@ cd ..
47 47
 tar xf %{SOURCE1} --no-same-owner
48 48
 sed -i -e 's|127.0.0.1:4001|127.0.0.1:2379|g' contrib-0.7.0/init/systemd/environ/apiserver
49 49
 cd %{name}-%{version}
50
+%patch0 -p1
50 51
 
51 52
 %build
52 53
 make
... ...
@@ -183,6 +185,8 @@ fi
183 183
 %{_bindir}/pause-amd64
184 184
 
185 185
 %changelog
186
+*   Thu May 17 2018 Sharath George <sharathg@vmware.com> 1.10.2-2
187
+-   Add vke patch.
186 188
 *   Thu May 03 2018 Xiaolin Li <xiaolinl@vmware.com> 1.10.2-1
187 189
 -   Add kubernetes 1.10.2.
188 190
 *   Tue May 01 2018 Dheeraj Shetty <dheerajs@vmware.com> 1.9.6-2