Change-Id: I5ba09abfa719a32d7f40d82a1eb094d6bf640515
Reviewed-on: http://photon-jenkins.eng.vmware.com:8082/4689
Reviewed-by: Ashok Chandrasekar <ashokc@vmware.com>
Tested-by: gerrit-photon <photon-checkins@vmware.com>
Reviewed-by: Divya Thaluru <dthaluru@vmware.com>
| 1 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,4206 @@ |
| 0 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/apps_v1alpha1.json cascade-kubernetes/api/swagger-spec/apps_v1alpha1.json |
|
| 1 |
+--- kubernetes-1.8.1/api/swagger-spec/apps_v1alpha1.json 2018-01-23 22:47:25.146819339 +0000 |
|
| 2 |
+@@ -1459,6 +1459,10 @@ |
|
| 3 |
+ "photonPersistentDisk": {
|
|
| 4 |
+ "$ref": "v1.PhotonPersistentDiskVolumeSource", |
|
| 5 |
+ "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" |
|
| 6 |
++ }, |
|
| 7 |
++ "cascadeDisk": {
|
|
| 8 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 9 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 10 |
+ } |
|
| 11 |
+ } |
|
| 12 |
+ }, |
|
| 13 |
+@@ -2105,6 +2109,23 @@ |
|
| 14 |
+ }, |
|
| 15 |
+ "fsType": {
|
|
| 16 |
+ "type": "string", |
|
| 17 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 18 |
++ } |
|
| 19 |
++ } |
|
| 20 |
++ }, |
|
| 21 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 22 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 23 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 24 |
++ "required": [ |
|
| 25 |
++ "diskID" |
|
| 26 |
++ ], |
|
| 27 |
++ "properties": {
|
|
| 28 |
++ "diskID": {
|
|
| 29 |
++ "type": "string", |
|
| 30 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 31 |
++ }, |
|
| 32 |
++ "fsType": {
|
|
| 33 |
++ "type": "string", |
|
| 34 |
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 35 |
+ } |
|
| 36 |
+ } |
|
| 37 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/apps_v1beta1.json cascade-kubernetes/api/swagger-spec/apps_v1beta1.json |
|
| 38 |
+--- kubernetes-1.8.1/api/swagger-spec/apps_v1beta1.json 2018-01-23 22:47:25.146819339 +0000 |
|
| 39 |
+@@ -4400,6 +4400,10 @@ |
|
| 40 |
+ "$ref": "v1.PhotonPersistentDiskVolumeSource", |
|
| 41 |
+ "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" |
|
| 42 |
+ }, |
|
| 43 |
++ "cascadeDisk": {
|
|
| 44 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 45 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 46 |
++ }, |
|
| 47 |
+ "projected": {
|
|
| 48 |
+ "$ref": "v1.ProjectedVolumeSource", |
|
| 49 |
+ "description": "Items for all in one resources secrets, configmaps, and downward API" |
|
| 50 |
+@@ -5123,6 +5127,23 @@ |
|
| 51 |
+ }, |
|
| 52 |
+ "fsType": {
|
|
| 53 |
+ "type": "string", |
|
| 54 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 55 |
++ } |
|
| 56 |
++ } |
|
| 57 |
++ }, |
|
| 58 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 59 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 60 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 61 |
++ "required": [ |
|
| 62 |
++ "diskID" |
|
| 63 |
++ ], |
|
| 64 |
++ "properties": {
|
|
| 65 |
++ "diskID": {
|
|
| 66 |
++ "type": "string", |
|
| 67 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 68 |
++ }, |
|
| 69 |
++ "fsType": {
|
|
| 70 |
++ "type": "string", |
|
| 71 |
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 72 |
+ } |
|
| 73 |
+ } |
|
| 74 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/apps_v1beta2.json cascade-kubernetes/api/swagger-spec/apps_v1beta2.json |
|
| 75 |
+--- kubernetes-1.8.1/api/swagger-spec/apps_v1beta2.json 2018-01-23 22:47:25.146819339 +0000 |
|
| 76 |
+@@ -6730,6 +6730,10 @@ |
|
| 77 |
+ "$ref": "v1.PhotonPersistentDiskVolumeSource", |
|
| 78 |
+ "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" |
|
| 79 |
+ }, |
|
| 80 |
++ "cascadeDisk": {
|
|
| 81 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 82 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 83 |
++ }, |
|
| 84 |
+ "projected": {
|
|
| 85 |
+ "$ref": "v1.ProjectedVolumeSource", |
|
| 86 |
+ "description": "Items for all in one resources secrets, configmaps, and downward API" |
|
| 87 |
+@@ -7453,6 +7457,23 @@ |
|
| 88 |
+ }, |
|
| 89 |
+ "fsType": {
|
|
| 90 |
+ "type": "string", |
|
| 91 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 92 |
++ } |
|
| 93 |
++ } |
|
| 94 |
++ }, |
|
| 95 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 96 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 97 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 98 |
++ "required": [ |
|
| 99 |
++ "diskID" |
|
| 100 |
++ ], |
|
| 101 |
++ "properties": {
|
|
| 102 |
++ "diskID": {
|
|
| 103 |
++ "type": "string", |
|
| 104 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 105 |
++ }, |
|
| 106 |
++ "fsType": {
|
|
| 107 |
++ "type": "string", |
|
| 108 |
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 109 |
+ } |
|
| 110 |
+ } |
|
| 111 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/batch_v1beta1.json cascade-kubernetes/api/swagger-spec/batch_v1beta1.json |
|
| 112 |
+--- kubernetes-1.8.1/api/swagger-spec/batch_v1beta1.json 2018-01-23 22:47:25.150819339 +0000 |
|
| 113 |
+@@ -1850,6 +1850,10 @@ |
|
| 114 |
+ "$ref": "v1.PhotonPersistentDiskVolumeSource", |
|
| 115 |
+ "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" |
|
| 116 |
+ }, |
|
| 117 |
++ "cascadeDisk": {
|
|
| 118 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 119 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 120 |
++ }, |
|
| 121 |
+ "projected": {
|
|
| 122 |
+ "$ref": "v1.ProjectedVolumeSource", |
|
| 123 |
+ "description": "Items for all in one resources secrets, configmaps, and downward API" |
|
| 124 |
+@@ -2573,6 +2577,23 @@ |
|
| 125 |
+ }, |
|
| 126 |
+ "fsType": {
|
|
| 127 |
+ "type": "string", |
|
| 128 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 129 |
++ } |
|
| 130 |
++ } |
|
| 131 |
++ }, |
|
| 132 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 133 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 134 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 135 |
++ "required": [ |
|
| 136 |
++ "diskID" |
|
| 137 |
++ ], |
|
| 138 |
++ "properties": {
|
|
| 139 |
++ "diskID": {
|
|
| 140 |
++ "type": "string", |
|
| 141 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 142 |
++ }, |
|
| 143 |
++ "fsType": {
|
|
| 144 |
++ "type": "string", |
|
| 145 |
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 146 |
+ } |
|
| 147 |
+ } |
|
| 148 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/batch_v1.json cascade-kubernetes/api/swagger-spec/batch_v1.json |
|
| 149 |
+--- kubernetes-1.8.1/api/swagger-spec/batch_v1.json 2018-01-23 22:47:25.150819339 +0000 |
|
| 150 |
+@@ -1795,6 +1795,10 @@ |
|
| 151 |
+ "$ref": "v1.PhotonPersistentDiskVolumeSource", |
|
| 152 |
+ "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" |
|
| 153 |
+ }, |
|
| 154 |
++ "cascadeDisk": {
|
|
| 155 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 156 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 157 |
++ }, |
|
| 158 |
+ "projected": {
|
|
| 159 |
+ "$ref": "v1.ProjectedVolumeSource", |
|
| 160 |
+ "description": "Items for all in one resources secrets, configmaps, and downward API" |
|
| 161 |
+@@ -2518,6 +2522,23 @@ |
|
| 162 |
+ }, |
|
| 163 |
+ "fsType": {
|
|
| 164 |
+ "type": "string", |
|
| 165 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 166 |
++ } |
|
| 167 |
++ } |
|
| 168 |
++ }, |
|
| 169 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 170 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 171 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 172 |
++ "required": [ |
|
| 173 |
++ "diskID" |
|
| 174 |
++ ], |
|
| 175 |
++ "properties": {
|
|
| 176 |
++ "diskID": {
|
|
| 177 |
++ "type": "string", |
|
| 178 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 179 |
++ }, |
|
| 180 |
++ "fsType": {
|
|
| 181 |
++ "type": "string", |
|
| 182 |
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 183 |
+ } |
|
| 184 |
+ } |
|
| 185 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/batch_v2alpha1.json cascade-kubernetes/api/swagger-spec/batch_v2alpha1.json |
|
| 186 |
+--- kubernetes-1.8.1/api/swagger-spec/batch_v2alpha1.json 2018-01-23 22:47:25.150819339 +0000 |
|
| 187 |
+@@ -1865,6 +1865,10 @@ |
|
| 188 |
+ "storageos": {
|
|
| 189 |
+ "$ref": "v1.StorageOSVolumeSource", |
|
| 190 |
+ "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." |
|
| 191 |
++ }, |
|
| 192 |
++ "cascadeDisk": {
|
|
| 193 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 194 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 195 |
+ } |
|
| 196 |
+ } |
|
| 197 |
+ }, |
|
| 198 |
+@@ -2769,6 +2773,23 @@ |
|
| 199 |
+ } |
|
| 200 |
+ } |
|
| 201 |
+ }, |
|
| 202 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 203 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 204 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 205 |
++ "required": [ |
|
| 206 |
++ "diskID" |
|
| 207 |
++ ], |
|
| 208 |
++ "properties": {
|
|
| 209 |
++ "diskID": {
|
|
| 210 |
++ "type": "string", |
|
| 211 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 212 |
++ }, |
|
| 213 |
++ "fsType": {
|
|
| 214 |
++ "type": "string", |
|
| 215 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 216 |
++ } |
|
| 217 |
++ } |
|
| 218 |
++ }, |
|
| 219 |
+ "v1.Container": {
|
|
| 220 |
+ "id": "v1.Container", |
|
| 221 |
+ "description": "A single application container that you want to run within a pod.", |
|
| 222 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/extensions_v1beta1.json cascade-kubernetes/api/swagger-spec/extensions_v1beta1.json |
|
| 223 |
+--- kubernetes-1.8.1/api/swagger-spec/extensions_v1beta1.json 2018-01-23 22:47:25.150819339 +0000 |
|
| 224 |
+@@ -7363,6 +7363,10 @@ |
|
| 225 |
+ "storageos": {
|
|
| 226 |
+ "$ref": "v1.StorageOSVolumeSource", |
|
| 227 |
+ "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." |
|
| 228 |
++ }, |
|
| 229 |
++ "cascadeDisk": {
|
|
| 230 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 231 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 232 |
+ } |
|
| 233 |
+ } |
|
| 234 |
+ }, |
|
| 235 |
+@@ -8071,6 +8075,23 @@ |
|
| 236 |
+ }, |
|
| 237 |
+ "fsType": {
|
|
| 238 |
+ "type": "string", |
|
| 239 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 240 |
++ } |
|
| 241 |
++ } |
|
| 242 |
++ }, |
|
| 243 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 244 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 245 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 246 |
++ "required": [ |
|
| 247 |
++ "diskID" |
|
| 248 |
++ ], |
|
| 249 |
++ "properties": {
|
|
| 250 |
++ "diskID": {
|
|
| 251 |
++ "type": "string", |
|
| 252 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 253 |
++ }, |
|
| 254 |
++ "fsType": {
|
|
| 255 |
++ "type": "string", |
|
| 256 |
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 257 |
+ } |
|
| 258 |
+ } |
|
| 259 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/settings.k8s.io_v1alpha1.json cascade-kubernetes/api/swagger-spec/settings.k8s.io_v1alpha1.json |
|
| 260 |
+--- kubernetes-1.8.1/api/swagger-spec/settings.k8s.io_v1alpha1.json 2018-01-23 22:47:25.154819339 +0000 |
|
| 261 |
+@@ -1661,6 +1661,10 @@ |
|
| 262 |
+ "storageos": {
|
|
| 263 |
+ "$ref": "v1.StorageOSVolumeSource", |
|
| 264 |
+ "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." |
|
| 265 |
++ }, |
|
| 266 |
++ "cascadeDisk": {
|
|
| 267 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 268 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 269 |
+ } |
|
| 270 |
+ } |
|
| 271 |
+ }, |
|
| 272 |
+@@ -2331,6 +2335,23 @@ |
|
| 273 |
+ }, |
|
| 274 |
+ "fsType": {
|
|
| 275 |
+ "type": "string", |
|
| 276 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 277 |
++ } |
|
| 278 |
++ } |
|
| 279 |
++ }, |
|
| 280 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 281 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 282 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 283 |
++ "required": [ |
|
| 284 |
++ "diskID" |
|
| 285 |
++ ], |
|
| 286 |
++ "properties": {
|
|
| 287 |
++ "diskID": {
|
|
| 288 |
++ "type": "string", |
|
| 289 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 290 |
++ }, |
|
| 291 |
++ "fsType": {
|
|
| 292 |
++ "type": "string", |
|
| 293 |
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 294 |
+ } |
|
| 295 |
+ } |
|
| 296 |
+diff -uNr --no-dereference kubernetes-1.8.1/api/swagger-spec/v1.json cascade-kubernetes/api/swagger-spec/v1.json |
|
| 297 |
+--- kubernetes-1.8.1/api/swagger-spec/v1.json 2018-01-23 22:47:25.154819339 +0000 |
|
| 298 |
+@@ -20271,6 +20271,10 @@ |
|
| 299 |
+ "$ref": "v1.PhotonPersistentDiskVolumeSource", |
|
| 300 |
+ "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" |
|
| 301 |
+ }, |
|
| 302 |
++ "cascadeDisk": {
|
|
| 303 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 304 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 305 |
++ }, |
|
| 306 |
+ "portworxVolume": {
|
|
| 307 |
+ "$ref": "v1.PortworxVolumeSource", |
|
| 308 |
+ "description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine" |
|
| 309 |
+@@ -20834,6 +20838,23 @@ |
|
| 310 |
+ } |
|
| 311 |
+ } |
|
| 312 |
+ }, |
|
| 313 |
++ "v1.CascadeDiskVolumeSource": {
|
|
| 314 |
++ "id": "v1.CascadeDiskVolumeSource", |
|
| 315 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 316 |
++ "required": [ |
|
| 317 |
++ "diskID" |
|
| 318 |
++ ], |
|
| 319 |
++ "properties": {
|
|
| 320 |
++ "diskID": {
|
|
| 321 |
++ "type": "string", |
|
| 322 |
++ "description": "ID that identifies Cascade persistent disk" |
|
| 323 |
++ }, |
|
| 324 |
++ "fsType": {
|
|
| 325 |
++ "type": "string", |
|
| 326 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." |
|
| 327 |
++ } |
|
| 328 |
++ } |
|
| 329 |
++ }, |
|
| 330 |
+ "v1.PortworxVolumeSource": {
|
|
| 331 |
+ "id": "v1.PortworxVolumeSource", |
|
| 332 |
+ "description": "PortworxVolumeSource represents a Portworx volume resource.", |
|
| 333 |
+@@ -21265,6 +21286,10 @@ |
|
| 334 |
+ "storageos": {
|
|
| 335 |
+ "$ref": "v1.StorageOSVolumeSource", |
|
| 336 |
+ "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." |
|
| 337 |
++ }, |
|
| 338 |
++ "cascadeDisk": {
|
|
| 339 |
++ "$ref": "v1.CascadeDiskVolumeSource", |
|
| 340 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine" |
|
| 341 |
+ } |
|
| 342 |
+ } |
|
| 343 |
+ }, |
|
| 344 |
+diff -uNr --no-dereference kubernetes-1.8.1/cmd/kube-controller-manager/app/BUILD cascade-kubernetes/cmd/kube-controller-manager/app/BUILD |
|
| 345 |
+--- kubernetes-1.8.1/cmd/kube-controller-manager/app/BUILD 2018-01-23 22:47:25.198819341 +0000 |
|
| 346 |
+@@ -42,6 +42,7 @@ |
|
| 347 |
+ "//pkg/cloudprovider/providers:go_default_library", |
|
| 348 |
+ "//pkg/cloudprovider/providers/aws:go_default_library", |
|
| 349 |
+ "//pkg/cloudprovider/providers/azure:go_default_library", |
|
| 350 |
++ "//pkg/cloudprovider/providers/cascade:go_default_library", |
|
| 351 |
+ "//pkg/cloudprovider/providers/gce:go_default_library", |
|
| 352 |
+ "//pkg/cloudprovider/providers/openstack:go_default_library", |
|
| 353 |
+ "//pkg/cloudprovider/providers/photon:go_default_library", |
|
| 354 |
+@@ -84,6 +85,7 @@ |
|
| 355 |
+ "//pkg/volume/aws_ebs:go_default_library", |
|
| 356 |
+ "//pkg/volume/azure_dd:go_default_library", |
|
| 357 |
+ "//pkg/volume/azure_file:go_default_library", |
|
| 358 |
++ "//pkg/volume/cascade_disk:go_default_library", |
|
| 359 |
+ "//pkg/volume/cinder:go_default_library", |
|
| 360 |
+ "//pkg/volume/fc:go_default_library", |
|
| 361 |
+ "//pkg/volume/flexvolume:go_default_library", |
|
| 362 |
+diff -uNr --no-dereference kubernetes-1.8.1/cmd/kube-controller-manager/app/plugins.go cascade-kubernetes/cmd/kube-controller-manager/app/plugins.go |
|
| 363 |
+--- kubernetes-1.8.1/cmd/kube-controller-manager/app/plugins.go 2018-01-23 22:47:25.198819341 +0000 |
|
| 364 |
+@@ -32,6 +32,7 @@ |
|
| 365 |
+ "k8s.io/kubernetes/pkg/cloudprovider" |
|
| 366 |
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" |
|
| 367 |
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" |
|
| 368 |
++ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade" |
|
| 369 |
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" |
|
| 370 |
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" |
|
| 371 |
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" |
|
| 372 |
+@@ -58,6 +59,7 @@ |
|
| 373 |
+ "k8s.io/kubernetes/pkg/volume/storageos" |
|
| 374 |
+ volumeutil "k8s.io/kubernetes/pkg/volume/util" |
|
| 375 |
+ "k8s.io/kubernetes/pkg/volume/vsphere_volume" |
|
| 376 |
++ "k8s.io/kubernetes/pkg/volume/cascade_disk" |
|
| 377 |
+ ) |
|
| 378 |
+ |
|
| 379 |
+ // ProbeAttachableVolumePlugins collects all volume plugins for the attach/ |
|
| 380 |
+@@ -78,6 +80,7 @@ |
|
| 381 |
+ allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) |
|
| 382 |
+ allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...) |
|
| 383 |
+ allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) |
|
| 384 |
++ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...) |
|
| 385 |
+ return allPlugins |
|
| 386 |
+ } |
|
| 387 |
+ |
|
| 388 |
+@@ -104,6 +107,7 @@ |
|
| 389 |
+ allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...) |
|
| 390 |
+ allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) |
|
| 391 |
+ allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...) |
|
| 392 |
++ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...) |
|
| 393 |
+ return allPlugins |
|
| 394 |
+ } |
|
| 395 |
+ |
|
| 396 |
+@@ -168,6 +172,8 @@ |
|
| 397 |
+ allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...) |
|
| 398 |
+ case photon.ProviderName == cloud.ProviderName(): |
|
| 399 |
+ allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...) |
|
| 400 |
++ case cascade.ProviderName == cloud.ProviderName(): |
|
| 401 |
++ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...) |
|
| 402 |
+ } |
|
| 403 |
+ } |
|
| 404 |
+ |
|
| 405 |
+diff -uNr --no-dereference kubernetes-1.8.1/cmd/kubelet/app/BUILD cascade-kubernetes/cmd/kubelet/app/BUILD |
|
| 406 |
+--- kubernetes-1.8.1/cmd/kubelet/app/BUILD 2018-01-23 22:47:25.214819342 +0000 |
|
| 407 |
+@@ -73,6 +73,7 @@ |
|
| 408 |
+ "//pkg/volume/aws_ebs:go_default_library", |
|
| 409 |
+ "//pkg/volume/azure_dd:go_default_library", |
|
| 410 |
+ "//pkg/volume/azure_file:go_default_library", |
|
| 411 |
++ "//pkg/volume/cascade_disk:go_default_library", |
|
| 412 |
+ "//pkg/volume/cephfs:go_default_library", |
|
| 413 |
+ "//pkg/volume/cinder:go_default_library", |
|
| 414 |
+ "//pkg/volume/configmap:go_default_library", |
|
| 415 |
+diff -uNr --no-dereference kubernetes-1.8.1/cmd/kubelet/app/plugins.go cascade-kubernetes/cmd/kubelet/app/plugins.go |
|
| 416 |
+--- kubernetes-1.8.1/cmd/kubelet/app/plugins.go 2018-01-23 22:47:25.214819342 +0000 |
|
| 417 |
+@@ -32,6 +32,7 @@ |
|
| 418 |
+ "k8s.io/kubernetes/pkg/volume/aws_ebs" |
|
| 419 |
+ "k8s.io/kubernetes/pkg/volume/azure_dd" |
|
| 420 |
+ "k8s.io/kubernetes/pkg/volume/azure_file" |
|
| 421 |
++ "k8s.io/kubernetes/pkg/volume/cascade_disk" |
|
| 422 |
+ "k8s.io/kubernetes/pkg/volume/cephfs" |
|
| 423 |
+ "k8s.io/kubernetes/pkg/volume/cinder" |
|
| 424 |
+ "k8s.io/kubernetes/pkg/volume/configmap" |
|
| 425 |
+@@ -96,6 +97,7 @@ |
|
| 426 |
+ allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...) |
|
| 427 |
+ allPlugins = append(allPlugins, local.ProbeVolumePlugins()...) |
|
| 428 |
+ allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) |
|
| 429 |
++ allPlugins = append(allPlugins, cascade_disk.ProbeVolumePlugins()...) |
|
| 430 |
+ return allPlugins |
|
| 431 |
+ } |
|
| 432 |
+ |
|
| 433 |
+diff -uNr --no-dereference kubernetes-1.8.1/federation/apis/openapi-spec/swagger.json cascade-kubernetes/federation/apis/openapi-spec/swagger.json |
|
| 434 |
+--- kubernetes-1.8.1/federation/apis/openapi-spec/swagger.json 2018-01-23 22:47:25.278819344 +0000 |
|
| 435 |
+@@ -10540,6 +10540,22 @@ |
|
| 436 |
+ } |
|
| 437 |
+ } |
|
| 438 |
+ }, |
|
| 439 |
++ "io.k8s.api.core.v1.CascadeDiskVolumeSource": {
|
|
| 440 |
++ "description": "Represents a Cascade persistent disk resource.", |
|
| 441 |
++ "required": [ |
|
| 442 |
++ "diskID" |
|
| 443 |
++ ], |
|
| 444 |
++ "properties": {
|
|
| 445 |
++ "fsType": {
|
|
| 446 |
++ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", |
|
| 447 |
++ "type": "string" |
|
| 448 |
++ }, |
|
| 449 |
++ "diskID": {
|
|
| 450 |
++ "description": "ID that identifies Cascade persistent disk", |
|
| 451 |
++ "type": "string" |
|
| 452 |
++ } |
|
| 453 |
++ } |
|
| 454 |
++ }, |
|
| 455 |
+ "io.k8s.api.core.v1.Capabilities": {
|
|
| 456 |
+ "description": "Adds and removes POSIX capabilities from running containers.", |
|
| 457 |
+ "properties": {
|
|
| 458 |
+@@ -12865,6 +12881,10 @@ |
|
| 459 |
+ "vsphereVolume": {
|
|
| 460 |
+ "description": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", |
|
| 461 |
+ "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" |
|
| 462 |
++ }, |
|
| 463 |
++ "cascadeDisk": {
|
|
| 464 |
++ "description": "CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine", |
|
| 465 |
++ "$ref": "#/definitions/io.k8s.api.core.v1.CascadeDiskVolumeSource" |
|
| 466 |
+ } |
|
| 467 |
+ } |
|
| 468 |
+ }, |
|
| 469 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/api/types.go cascade-kubernetes/pkg/api/types.go |
|
| 470 |
+--- kubernetes-1.8.1/pkg/api/types.go 2018-01-23 22:47:25.350819346 +0000 |
|
| 471 |
+@@ -316,6 +316,8 @@ |
|
| 472 |
+ // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod |
|
| 473 |
+ // +optional |
|
| 474 |
+ StorageOS *StorageOSVolumeSource |
|
| 475 |
++ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine |
|
| 476 |
++ CascadeDisk *CascadeDiskVolumeSource |
|
| 477 |
+ } |
|
| 478 |
+ |
|
| 479 |
+ // Similar to VolumeSource but meant for the administrator who creates PVs. |
|
| 480 |
+@@ -391,6 +393,8 @@ |
|
| 481 |
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md |
|
| 482 |
+ // +optional |
|
| 483 |
+ StorageOS *StorageOSPersistentVolumeSource |
|
| 484 |
++ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine |
|
| 485 |
++ CascadeDisk *CascadeDiskVolumeSource |
|
| 486 |
+ } |
|
| 487 |
+ |
|
| 488 |
+ type PersistentVolumeClaimVolumeSource struct {
|
|
| 489 |
+@@ -1333,6 +1337,16 @@ |
|
| 490 |
+ SecretRef *ObjectReference |
|
| 491 |
+ } |
|
| 492 |
+ |
|
| 493 |
++// Represents a Cascade persistent disk resource. |
|
| 494 |
++type CascadeDiskVolumeSource struct {
|
|
| 495 |
++ // ID that identifies Cascade persistent disk |
|
| 496 |
++ DiskID string |
|
| 497 |
++ // Filesystem type to mount. |
|
| 498 |
++ // Must be a filesystem type supported by the host operating system. |
|
| 499 |
++ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
|
| 500 |
++ FSType string |
|
| 501 |
++} |
|
| 502 |
++ |
|
| 503 |
+ // Adapts a ConfigMap into a volume. |
|
| 504 |
+ // |
|
| 505 |
+ // The contents of the target ConfigMap's Data field will be presented in a |
|
| 506 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/api/validation/validation.go cascade-kubernetes/pkg/api/validation/validation.go |
|
| 507 |
+--- kubernetes-1.8.1/pkg/api/validation/validation.go 2018-01-23 22:47:25.354819347 +0000 |
|
| 508 |
+@@ -612,6 +612,14 @@ |
|
| 509 |
+ allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...)
|
|
| 510 |
+ } |
|
| 511 |
+ } |
|
| 512 |
++ if source.CascadeDisk != nil {
|
|
| 513 |
++ if numVolumes > 0 {
|
|
| 514 |
++ allErrs = append(allErrs, field.Forbidden(fldPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
|
|
| 515 |
++ } else {
|
|
| 516 |
++ numVolumes++ |
|
| 517 |
++ allErrs = append(allErrs, validateCascadeDiskVolumeSource(source.CascadeDisk, fldPath.Child("cascadeDisk"))...)
|
|
| 518 |
++ } |
|
| 519 |
++ } |
|
| 520 |
+ |
|
| 521 |
+ if numVolumes == 0 {
|
|
| 522 |
+ allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) |
|
| 523 |
+@@ -1283,6 +1291,14 @@ |
|
| 524 |
+ return allErrs |
|
| 525 |
+ } |
|
| 526 |
+ |
|
| 527 |
++func validateCascadeDiskVolumeSource(cd *api.CascadeDiskVolumeSource, fldPath *field.Path) field.ErrorList {
|
|
| 528 |
++ allErrs := field.ErrorList{}
|
|
| 529 |
++ if len(cd.DiskID) == 0 {
|
|
| 530 |
++ allErrs = append(allErrs, field.Required(fldPath.Child("diskID"), ""))
|
|
| 531 |
++ } |
|
| 532 |
++ return allErrs |
|
| 533 |
++} |
|
| 534 |
++ |
|
| 535 |
+ // ValidatePersistentVolumeName checks that a name is appropriate for a |
|
| 536 |
+ // PersistentVolumeName object. |
|
| 537 |
+ var ValidatePersistentVolumeName = NameIsDNSSubdomain |
|
| 538 |
+@@ -1504,6 +1520,14 @@ |
|
| 539 |
+ allErrs = append(allErrs, validateStorageOSPersistentVolumeSource(pv.Spec.StorageOS, specPath.Child("storageos"))...)
|
|
| 540 |
+ } |
|
| 541 |
+ } |
|
| 542 |
++ if pv.Spec.CascadeDisk != nil {
|
|
| 543 |
++ if numVolumes > 0 {
|
|
| 544 |
++ allErrs = append(allErrs, field.Forbidden(specPath.Child("cascadeDisk"), "may not specify more than 1 volume type"))
|
|
| 545 |
++ } else {
|
|
| 546 |
++ numVolumes++ |
|
| 547 |
++ allErrs = append(allErrs, validateCascadeDiskVolumeSource(pv.Spec.CascadeDisk, specPath.Child("cascadeDisk"))...)
|
|
| 548 |
++ } |
|
| 549 |
++ } |
|
| 550 |
+ |
|
| 551 |
+ if numVolumes == 0 {
|
|
| 552 |
+ allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) |
|
| 553 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/apis/extensions/types.go cascade-kubernetes/pkg/apis/extensions/types.go |
|
| 554 |
+--- kubernetes-1.8.1/pkg/apis/extensions/types.go 2018-01-23 22:47:25.370819347 +0000 |
|
| 555 |
+@@ -1002,6 +1002,7 @@ |
|
| 556 |
+ Projected FSType = "projected" |
|
| 557 |
+ PortworxVolume FSType = "portworxVolume" |
|
| 558 |
+ ScaleIO FSType = "scaleIO" |
|
| 559 |
++ CascadeDisk FSType = "cascadeDisk" |
|
| 560 |
+ All FSType = "*" |
|
| 561 |
+ ) |
|
| 562 |
+ |
|
| 563 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/BUILD cascade-kubernetes/pkg/cloudprovider/providers/BUILD |
|
| 564 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/BUILD 2018-01-23 22:47:25.414819349 +0000 |
|
| 565 |
+@@ -11,6 +11,7 @@ |
|
| 566 |
+ deps = [ |
|
| 567 |
+ "//pkg/cloudprovider/providers/aws:go_default_library", |
|
| 568 |
+ "//pkg/cloudprovider/providers/azure:go_default_library", |
|
| 569 |
++ "//pkg/cloudprovider/providers/cascade:go_default_library", |
|
| 570 |
+ "//pkg/cloudprovider/providers/cloudstack:go_default_library", |
|
| 571 |
+ "//pkg/cloudprovider/providers/gce:go_default_library", |
|
| 572 |
+ "//pkg/cloudprovider/providers/openstack:go_default_library", |
|
| 573 |
+@@ -34,6 +35,7 @@ |
|
| 574 |
+ ":package-srcs", |
|
| 575 |
+ "//pkg/cloudprovider/providers/aws:all-srcs", |
|
| 576 |
+ "//pkg/cloudprovider/providers/azure:all-srcs", |
|
| 577 |
++ "//pkg/cloudprovider/providers/cascade:all-srcs", |
|
| 578 |
+ "//pkg/cloudprovider/providers/cloudstack:all-srcs", |
|
| 579 |
+ "//pkg/cloudprovider/providers/fake:all-srcs", |
|
| 580 |
+ "//pkg/cloudprovider/providers/gce:all-srcs", |
|
| 581 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/apitypes.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/apitypes.go |
|
| 582 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/apitypes.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 583 |
+@@ -0,0 +1,224 @@ |
|
| 584 |
++package cascade |
|
| 585 |
++ |
|
| 586 |
++import "fmt" |
|
| 587 |
++ |
|
| 588 |
++const ( |
|
| 589 |
++ NotFoundError = 1408 |
|
| 590 |
++ DiskNotFoundError = 3011 |
|
| 591 |
++) |
|
| 592 |
++ |
|
| 593 |
++// Represents APIError returned by the API in case of an error. |
|
| 594 |
++type APIError struct {
|
|
| 595 |
++ Code *string `json:"code"` |
|
| 596 |
++ Data map[string]string `json:"data"` |
|
| 597 |
++ ErrorCode int32 `json:"errorCode,omitempty"` |
|
| 598 |
++ Message *string `json:"message"` |
|
| 599 |
++ HttpStatusCode int `json:"-"` // Not part of API contract |
|
| 600 |
++} |
|
| 601 |
++ |
|
| 602 |
++// Implement Go error interface for ApiError. |
|
| 603 |
++func (e APIError) Error() string {
|
|
| 604 |
++ return fmt.Sprintf( |
|
| 605 |
++ "Cascade: { HTTP status: '%d', code: '%s', message: '%s', data: '%v', errorcode: '%d' }",
|
|
| 606 |
++ e.HttpStatusCode, StringVal(e.Code), StringVal(e.Message), e.Data, e.ErrorCode) |
|
| 607 |
++} |
|
| 608 |
++ |
|
| 609 |
++// Used to represent a generic HTTP error, i.e. an unexpected HTTP 500. |
|
| 610 |
++type HttpError struct {
|
|
| 611 |
++ StatusCode int |
|
| 612 |
++ Message string |
|
| 613 |
++} |
|
| 614 |
++ |
|
| 615 |
++// Implementation of error interface for HttpError. |
|
| 616 |
++func (e HttpError) Error() string {
|
|
| 617 |
++ return fmt.Sprintf("Cascade: HTTP %d: %v", e.StatusCode, e.Message)
|
|
| 618 |
++} |
|
| 619 |
++ |
|
| 620 |
++// Represents a task which gets returned for long running API calls. |
|
| 621 |
++type Task struct {
|
|
| 622 |
++ EndTime int64 `json:"endTime,omitempty"` |
|
| 623 |
++ Entity *Entity `json:"entity,omitempty"` |
|
| 624 |
++ ID *string `json:"id"` |
|
| 625 |
++ Operation string `json:"operation,omitempty"` |
|
| 626 |
++ QueuedTime *int64 `json:"queuedTime"` |
|
| 627 |
++ ResourceProperties interface{} `json:"resourceProperties,omitempty"`
|
|
| 628 |
++ SelfLink string `json:"selfLink,omitempty"` |
|
| 629 |
++ StartedTime *int64 `json:"startedTime"` |
|
| 630 |
++ State *string `json:"state"` |
|
| 631 |
++ Steps []*Step `json:"steps"` |
|
| 632 |
++} |
|
| 633 |
++ |
|
| 634 |
++// Represents the entity associated with the task. |
|
| 635 |
++type Entity struct {
|
|
| 636 |
++ ID *string `json:"id"` |
|
| 637 |
++ Kind *string `json:"kind"` |
|
| 638 |
++} |
|
| 639 |
++ |
|
| 640 |
++// Represents a task that has entered into an error state. Task errors can be caught and type-checked against with the |
|
| 641 |
++// usual Go idiom. |
|
| 642 |
++type TaskError struct {
|
|
| 643 |
++ ID string `json:"id"` |
|
| 644 |
++ Step Step `json:"step,omitempty"` |
|
| 645 |
++} |
|
| 646 |
++ |
|
| 647 |
++// Implement Go error interface for TaskError. |
|
| 648 |
++func (e TaskError) Error() string {
|
|
| 649 |
++ return fmt.Sprintf("Cascade: Task '%s' is in error state: {@step==%s}", e.ID, GetStep(e.Step))
|
|
| 650 |
++} |
|
| 651 |
++ |
|
| 652 |
++// An error representing a timeout while waiting for a task to complete. |
|
| 653 |
++type TaskTimeoutError struct {
|
|
| 654 |
++ ID string |
|
| 655 |
++} |
|
| 656 |
++ |
|
| 657 |
++// Implement Go error interface for TaskTimeoutError. |
|
| 658 |
++func (e TaskTimeoutError) Error() string {
|
|
| 659 |
++ return fmt.Sprintf("Cascade: Timed out waiting for task '%s'. "+
|
|
| 660 |
++ "Task may not be in error state, examine task for full details.", e.ID) |
|
| 661 |
++} |
|
| 662 |
++ |
|
| 663 |
++// Represents a step in a task. |
|
| 664 |
++type Step struct {
|
|
| 665 |
++ EndTime int64 `json:"endTime,omitempty"` |
|
| 666 |
++ Errors []*APIError `json:"errors"` |
|
| 667 |
++ Operation string `json:"operation,omitempty"` |
|
| 668 |
++ Options map[string]string `json:"options,omitempty"` |
|
| 669 |
++ QueuedTime *int64 `json:"queuedTime"` |
|
| 670 |
++ Sequence int32 `json:"sequence,omitempty"` |
|
| 671 |
++ StartedTime *int64 `json:"startedTime"` |
|
| 672 |
++ State *string `json:"state"` |
|
| 673 |
++ Warnings []*APIError `json:"warnings"` |
|
| 674 |
++} |
|
| 675 |
++ |
|
| 676 |
++// Implement Go error interface for Step. |
|
| 677 |
++func GetStep(s Step) string {
|
|
| 678 |
++ return fmt.Sprintf("{\"operation\"=>\"%s\",\"state\"=>\"%s}", s.Operation, StringVal(s.State))
|
|
| 679 |
++} |
|
| 680 |
++ |
|
| 681 |
++// Represents the VM response returned by the API. |
|
| 682 |
++type VM struct {
|
|
| 683 |
++ AttachedDisks []*AttachedDisk `json:"attachedDisks"` |
|
| 684 |
++ Cost []*QuotaLineItem `json:"cost"` |
|
| 685 |
++ Flavor *string `json:"flavor"` |
|
| 686 |
++ FloatingIP string `json:"floatingIp,omitempty"` |
|
| 687 |
++ HighAvailableVMGroupID string `json:"highAvailableVMGroupID,omitempty"` |
|
| 688 |
++ ID *string `json:"id"` |
|
| 689 |
++ Kind string `json:"kind"` |
|
| 690 |
++ Name *string `json:"name"` |
|
| 691 |
++ SelfLink string `json:"selfLink,omitempty"` |
|
| 692 |
++ SourceImageID string `json:"sourceImageId,omitempty"` |
|
| 693 |
++ State *string `json:"state"` |
|
| 694 |
++ Subnets []string `json:"subnets"` |
|
| 695 |
++ Tags []string `json:"tags"` |
|
| 696 |
++} |
|
| 697 |
++ |
|
| 698 |
++// Represents the listVMs response returned by the API. |
|
| 699 |
++type VMList struct {
|
|
| 700 |
++ Items []*VM `json:"items"` |
|
| 701 |
++ NextPageLink string `json:"nextPageLink,omitempty"` |
|
| 702 |
++ PreviousPageLink string `json:"previousPageLink,omitempty"` |
|
| 703 |
++} |
|
| 704 |
++ |
|
| 705 |
++// Represents multiple VMs returned by the API. |
|
| 706 |
++type VMs struct {
|
|
| 707 |
++ Items []VM `json:"items"` |
|
| 708 |
++} |
|
| 709 |
++ |
|
| 710 |
++// Represents the disks attached to the VMs. |
|
| 711 |
++type AttachedDisk struct {
|
|
| 712 |
++ BootDisk *bool `json:"bootDisk"` |
|
| 713 |
++ CapacityGb *int32 `json:"capacityGb"` |
|
| 714 |
++ Flavor *string `json:"flavor"` |
|
| 715 |
++ ID *string `json:"id"` |
|
| 716 |
++ Kind *string `json:"kind"` |
|
| 717 |
++ Name *string `json:"name"` |
|
| 718 |
++ State *string `json:"state"` |
|
| 719 |
++} |
|
| 720 |
++ |
|
| 721 |
++// Represents an attach disk operation request. |
|
| 722 |
++type VMDiskOperation struct {
|
|
| 723 |
++ Arguments map[string]string `json:"arguments,omitempty"` |
|
| 724 |
++ DiskID *string `json:"diskId"` |
|
| 725 |
++} |
|
| 726 |
++ |
|
| 727 |
++// Represents the quota line items for the VM. |
|
| 728 |
++type QuotaLineItem struct {
|
|
| 729 |
++ Key *string `json:"key"` |
|
| 730 |
++ Unit *string `json:"unit"` |
|
| 731 |
++ Value *float64 `json:"value"` |
|
| 732 |
++} |
|
| 733 |
++ |
|
| 734 |
++// Represents a persistent disk |
|
| 735 |
++type PersistentDisk struct {
|
|
| 736 |
++ CapacityGB int32 `json:"capacityGb,omitempty"` |
|
| 737 |
++ Cost []*QuotaLineItem `json:"cost"` |
|
| 738 |
++ Datastore string `json:"datastore,omitempty"` |
|
| 739 |
++ Flavor *string `json:"flavor"` |
|
| 740 |
++ ID *string `json:"id"` |
|
| 741 |
++ Kind string `json:"kind"` |
|
| 742 |
++ Name *string `json:"name"` |
|
| 743 |
++ SelfLink string `json:"selfLink,omitempty"` |
|
| 744 |
++ State *string `json:"state"` |
|
| 745 |
++ Tags []string `json:"tags"` |
|
| 746 |
++ VM string `json:"vm"` |
|
| 747 |
++ MountDevice string `json:"mountDevice,omitempty"` |
|
| 748 |
++ Zone *string `json:"zone"` |
|
| 749 |
++} |
|
| 750 |
++ |
|
| 751 |
++// Represents the spec for creating a disk. |
|
| 752 |
++type DiskCreateSpec struct {
|
|
| 753 |
++ Affinities []*LocalitySpec `json:"affinities"` |
|
| 754 |
++ CapacityGB *int32 `json:"capacityGb"` |
|
| 755 |
++ Flavor *string `json:"flavor"` |
|
| 756 |
++ Kind *string `json:"kind"` |
|
| 757 |
++ Name *string `json:"name"` |
|
| 758 |
++ Tags []string `json:"tags"` |
|
| 759 |
++ Zone *string `json:"zone"` |
|
| 760 |
++} |
|
| 761 |
++ |
|
| 762 |
++// Represents the spec for specifying affinity for a disk with another entity. |
|
| 763 |
++type LocalitySpec struct {
|
|
| 764 |
++ ID *string `json:"id"` |
|
| 765 |
++ Kind *string `json:"kind"` |
|
| 766 |
++} |
|
| 767 |
++ |
|
| 768 |
++// Represens the LoadBalancer response returned by the API. |
|
| 769 |
++type LoadBalancer struct {
|
|
| 770 |
++ Endpoint *string `json:"endpoint"` |
|
| 771 |
++} |
|
| 772 |
++ |
|
| 773 |
++// Represents the spec for creating a LoadBalancer. |
|
| 774 |
++type LoadBalancerCreateSpec struct {
|
|
| 775 |
++ HealthCheck *LoadBalancerHealthCheck `json:"healthCheck"` |
|
| 776 |
++ Name *string `json:"name"` |
|
| 777 |
++ PortMaps []*LoadBalancerPortMap `json:"portMaps"` |
|
| 778 |
++ Type *string `json:"type"` |
|
| 779 |
++} |
|
| 780 |
++ |
|
| 781 |
++// Represents the health check spec for a load balancer. |
|
| 782 |
++type LoadBalancerHealthCheck struct {
|
|
| 783 |
++ HealthyThreshold int64 `json:"healthyThreshold,omitempty"` |
|
| 784 |
++ IntervalInSeconds int64 `json:"intervalInSeconds,omitempty"` |
|
| 785 |
++ Path *string `json:"path,omitempty"` |
|
| 786 |
++ Port *int64 `json:"port"` |
|
| 787 |
++ Protocol *string `json:"protocol"` |
|
| 788 |
++} |
|
| 789 |
++ |
|
| 790 |
++// Represents a port mapping spec for a load balancer. |
|
| 791 |
++type LoadBalancerPortMap struct {
|
|
| 792 |
++ AllowedCidrs []*string `json:"allowedCidrs"` |
|
| 793 |
++ InstancePort *int64 `json:"instancePort"` |
|
| 794 |
++ InstanceProtocol *string `json:"instanceProtocol"` |
|
| 795 |
++ LoadBalancerPort *int64 `json:"loadBalancerPort"` |
|
| 796 |
++ LoadBalancerProtocol *string `json:"loadBalancerProtocol"` |
|
| 797 |
++} |
|
| 798 |
++ |
|
| 799 |
++// Represents a VM to be registered with or deregistered from the load balancer. |
|
| 800 |
++type LoadBalancerVM struct {
|
|
| 801 |
++ ID *string `json:"id"` |
|
| 802 |
++} |
|
| 803 |
++ |
|
| 804 |
++// Represents a list of VMs to be registered with or deregistered from the load balancer. |
|
| 805 |
++type LoadBalancerVMUpdate struct {
|
|
| 806 |
++ VMIds []*LoadBalancerVM `json:"vmIds"` |
|
| 807 |
++} |
|
| 808 |
+\ No newline at end of file |
|
| 809 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/auth.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/auth.go |
|
| 810 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/auth.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 811 |
+@@ -0,0 +1,129 @@ |
|
| 812 |
++package cascade |
|
| 813 |
++ |
|
| 814 |
++import ( |
|
| 815 |
++ "fmt" |
|
| 816 |
++ "strings" |
|
| 817 |
++ "github.com/golang/glog" |
|
| 818 |
++ "os/exec" |
|
| 819 |
++) |
|
| 820 |
++ |
|
| 821 |
++const ( |
|
| 822 |
++ tScope = "openid offline_access rs_admin_server at_groups rs_vmdir" |
|
| 823 |
++) |
|
| 824 |
++ |
|
| 825 |
++// AuthConfig contains configuration information for the authentication client. |
|
| 826 |
++type AuthConfig struct {
|
|
| 827 |
++ tenantName string |
|
| 828 |
++ authEndpoint string |
|
| 829 |
++ machineAccountName string |
|
| 830 |
++} |
|
| 831 |
++ |
|
| 832 |
++// AuthClient defines functions related to authentication. |
|
| 833 |
++type AuthClient struct {
|
|
| 834 |
++ cfg *AuthConfig |
|
| 835 |
++} |
|
| 836 |
++ |
|
| 837 |
++// NewAuthClient creates a new authentication client |
|
| 838 |
++func NewAuthClient(cascadeCfg *CascadeConfig) (*AuthClient, error) {
|
|
| 839 |
++ return &AuthClient{
|
|
| 840 |
++ cfg: &AuthConfig{
|
|
| 841 |
++ tenantName: cascadeCfg.Global.TenantName, |
|
| 842 |
++ authEndpoint: cascadeCfg.Global.AuthEndpoint, |
|
| 843 |
++ machineAccountName: fmt.Sprintf("%s@%s", cascadeCfg.Global.DNSName, cascadeCfg.Global.DomainName),
|
|
| 844 |
++ }, |
|
| 845 |
++ }, nil |
|
| 846 |
++} |
|
| 847 |
++ |
|
| 848 |
++func (c *AuthClient) GetTokensByMachineAccount() (*TokenOptions, error) {
|
|
| 849 |
++ // Execute a lwregshell command which gets the machine account password, trims it and un-escapes it. |
|
| 850 |
++ cmd := "/opt/likewise/bin/lwregshell list_values '[\\Services\\vmdir]' | grep dcAccountPassword | " + |
|
| 851 |
++ "awk '{print $NF}' | rev | cut -c2- | rev | cut -c2-"
|
|
| 852 |
++ output, err := exec.Command("bash", "-c", cmd).Output()
|
|
| 853 |
++ if err != nil {
|
|
| 854 |
++ glog.Errorf("Cascade Cloud Provider: Failed to get machine account credentials. Cannot create Client.")
|
|
| 855 |
++ return nil, fmt.Errorf("Failed to get machine account credentials, err: %v", err)
|
|
| 856 |
++ } |
|
| 857 |
++ // Unescape the escaped machine account password. This could not be done as a part of the shell command itself |
|
| 858 |
++ // because characters like ` are not escaped correctly by Lightwave causing failures while unescaping using shell |
|
| 859 |
++ // commands like echo or printf. So this is done by the unescape function below which is written based on |
|
| 860 |
++ // recommendation from the Lightwave team. |
|
| 861 |
++ escapedPassword := strings.TrimSpace(string(output)) |
|
| 862 |
++ password := unescape(escapedPassword) |
|
| 863 |
++ |
|
| 864 |
++ return c.GetTokensByCredentials(c.cfg.machineAccountName, password) |
|
| 865 |
++} |
|
| 866 |
++ |
|
| 867 |
++// GetTokensByPassword gets tokens using username and password |
|
| 868 |
++func (c *AuthClient) GetTokensByCredentials(username, password string) (*TokenOptions, error) {
|
|
| 869 |
++ // Parse tenant part from username |
|
| 870 |
++ parts := strings.Split(username, "@") |
|
| 871 |
++ if len(parts) != 2 {
|
|
| 872 |
++ return nil, fmt.Errorf("Invalid full user name '%s': expected user@tenant", username)
|
|
| 873 |
++ } |
|
| 874 |
++ tenant := parts[1] |
|
| 875 |
++ |
|
| 876 |
++ oidcClient, err := buildOIDCClient(c.cfg.authEndpoint) |
|
| 877 |
++ if err != nil {
|
|
| 878 |
++ return nil, err |
|
| 879 |
++ } |
|
| 880 |
++ |
|
| 881 |
++ tokenResponse, err := oidcClient.GetTokenByPasswordGrant(tenant, username, password) |
|
| 882 |
++ if err != nil {
|
|
| 883 |
++ return nil, err |
|
| 884 |
++ } |
|
| 885 |
++ |
|
| 886 |
++ return toTokenOptions(tokenResponse), nil |
|
| 887 |
++} |
|
| 888 |
++ |
|
| 889 |
++// GetTokensByRefreshToken gets tokens using refresh token |
|
| 890 |
++func (c *AuthClient) GetTokensByRefreshToken(refreshtoken string) (*TokenOptions, error) {
|
|
| 891 |
++ oidcClient, err := buildOIDCClient(c.cfg.authEndpoint) |
|
| 892 |
++ if err != nil {
|
|
| 893 |
++ return nil, err |
|
| 894 |
++ } |
|
| 895 |
++ |
|
| 896 |
++ tokenResponse, err := oidcClient.GetTokenByRefreshTokenGrant(c.cfg.tenantName, refreshtoken) |
|
| 897 |
++ if err != nil {
|
|
| 898 |
++ return nil, err |
|
| 899 |
++ } |
|
| 900 |
++ |
|
| 901 |
++ return toTokenOptions(tokenResponse), nil |
|
| 902 |
++} |
|
| 903 |
++ |
|
| 904 |
++func buildOIDCClient(authEndpoint string) (*OIDCClient, error) {
|
|
| 905 |
++ options := &OIDCClientOptions{
|
|
| 906 |
++ IgnoreCertificate: true, |
|
| 907 |
++ RootCAs: nil, |
|
| 908 |
++ TokenScope: tScope, |
|
| 909 |
++ } |
|
| 910 |
++ |
|
| 911 |
++ return NewOIDCClient(authEndpoint, options, nil), nil |
|
| 912 |
++} |
|
| 913 |
++ |
|
| 914 |
++func toTokenOptions(response *OIDCTokenResponse) *TokenOptions {
|
|
| 915 |
++ return &TokenOptions{
|
|
| 916 |
++ AccessToken: response.AccessToken, |
|
| 917 |
++ ExpiresIn: response.ExpiresIn, |
|
| 918 |
++ RefreshToken: response.RefreshToken, |
|
| 919 |
++ IDToken: response.IDToken, |
|
| 920 |
++ TokenType: response.TokenType, |
|
| 921 |
++ } |
|
| 922 |
++} |
|
| 923 |
++ |
|
| 924 |
++// unescape function unescapes an escaped string. It does so by removing a backslash character and keeping the character |
|
| 925 |
++// following the backslash character. |
|
| 926 |
++func unescape(input string) string {
|
|
| 927 |
++ var output string |
|
| 928 |
++ escaped := false |
|
| 929 |
++ for _, character := range input {
|
|
| 930 |
++ if character == '\\' && !escaped {
|
|
| 931 |
++ escaped = true |
|
| 932 |
++ } else {
|
|
| 933 |
++ if escaped {
|
|
| 934 |
++ escaped = false |
|
| 935 |
++ } |
|
| 936 |
++ output = output + string(character) |
|
| 937 |
++ } |
|
| 938 |
++ } |
|
| 939 |
++ return output |
|
| 940 |
++} |
|
| 941 |
+\ No newline at end of file |
|
| 942 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/BUILD cascade-kubernetes/pkg/cloudprovider/providers/cascade/BUILD |
|
| 943 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/BUILD 1970-01-01 00:00:00.000000000 +0000 |
|
| 944 |
+@@ -0,0 +1,44 @@ |
|
| 945 |
++package(default_visibility = ["//visibility:public"]) |
|
| 946 |
++ |
|
| 947 |
++load( |
|
| 948 |
++ "@io_bazel_rules_go//go:def.bzl", |
|
| 949 |
++ "go_library", |
|
| 950 |
++) |
|
| 951 |
++ |
|
| 952 |
++go_library( |
|
| 953 |
++ name = "go_default_library", |
|
| 954 |
++ srcs = [ |
|
| 955 |
++ "apitypes.go", |
|
| 956 |
++ "auth.go", |
|
| 957 |
++ "cascade.go", |
|
| 958 |
++ "cascade_disks.go", |
|
| 959 |
++ "cascade_instances.go", |
|
| 960 |
++ "cascade_loadbalancer.go", |
|
| 961 |
++ "client.go", |
|
| 962 |
++ "oidcclient.go", |
|
| 963 |
++ "restclient.go", |
|
| 964 |
++ "utils.go" |
|
| 965 |
++ ], |
|
| 966 |
++ deps = [ |
|
| 967 |
++ "//pkg/api/v1/helper:go_default_library", |
|
| 968 |
++ "//pkg/cloudprovider:go_default_library", |
|
| 969 |
++ "//pkg/controller:go_default_library", |
|
| 970 |
++ "//vendor/github.com/golang/glog:go_default_library", |
|
| 971 |
++ "//vendor/gopkg.in/gcfg.v1:go_default_library", |
|
| 972 |
++ "//vendor/k8s.io/api/core/v1:go_default_library", |
|
| 973 |
++ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", |
|
| 974 |
++ ], |
|
| 975 |
++) |
|
| 976 |
++ |
|
| 977 |
++filegroup( |
|
| 978 |
++ name = "package-srcs", |
|
| 979 |
++ srcs = glob(["**"]), |
|
| 980 |
++ tags = ["automanaged"], |
|
| 981 |
++ visibility = ["//visibility:private"], |
|
| 982 |
++) |
|
| 983 |
++ |
|
| 984 |
++filegroup( |
|
| 985 |
++ name = "all-srcs", |
|
| 986 |
++ srcs = [":package-srcs"], |
|
| 987 |
++ tags = ["automanaged"], |
|
| 988 |
++) |
|
| 989 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade_disks.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/cascade_disks.go |
|
| 990 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade_disks.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 991 |
+@@ -0,0 +1,197 @@ |
|
| 992 |
++package cascade |
|
| 993 |
++ |
|
| 994 |
++import ( |
|
| 995 |
++ "github.com/golang/glog" |
|
| 996 |
++ k8stypes "k8s.io/apimachinery/pkg/types" |
|
| 997 |
++ "k8s.io/kubernetes/pkg/volume" |
|
| 998 |
++ "k8s.io/apimachinery/pkg/util/sets" |
|
| 999 |
++ "k8s.io/kubernetes/pkg/kubelet/apis" |
|
| 1000 |
++) |
|
| 1001 |
++ |
|
| 1002 |
++// Attaches given virtual disk volume to the node running kubelet. |
|
| 1003 |
++func (cc *CascadeCloud) AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error) {
|
|
| 1004 |
++ operation := &VMDiskOperation{
|
|
| 1005 |
++ DiskID: StringPtr(diskID), |
|
| 1006 |
++ } |
|
| 1007 |
++ |
|
| 1008 |
++ vmID, err := cc.InstanceID(nodeName) |
|
| 1009 |
++ if err != nil {
|
|
| 1010 |
++ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for AttachDisk. Error[%v]", err)
|
|
| 1011 |
++ return "", err |
|
| 1012 |
++ } |
|
| 1013 |
++ |
|
| 1014 |
++ task, err := cc.apiClient.AttachDisk(vmID, operation) |
|
| 1015 |
++ if err != nil {
|
|
| 1016 |
++ glog.Errorf("Cascade Cloud Provider: Failed to attach disk with ID %s. Error[%v]", diskID, err)
|
|
| 1017 |
++ return "", err |
|
| 1018 |
++ } |
|
| 1019 |
++ |
|
| 1020 |
++ _, err = cc.apiClient.WaitForTask(StringVal(task.ID)) |
|
| 1021 |
++ if err != nil {
|
|
| 1022 |
++ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to attach disk with ID %s. Error[%v]",
|
|
| 1023 |
++ diskID, err) |
|
| 1024 |
++ return "", err |
|
| 1025 |
++ } |
|
| 1026 |
++ |
|
| 1027 |
++ disk, err := cc.apiClient.GetDisk(diskID) |
|
| 1028 |
++ if err != nil {
|
|
| 1029 |
++ glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
|
|
| 1030 |
++ return "", err |
|
| 1031 |
++ } |
|
| 1032 |
++ |
|
| 1033 |
++ return disk.MountDevice, nil |
|
| 1034 |
++} |
|
| 1035 |
++ |
|
| 1036 |
++// Detaches given virtual disk volume from the node running kubelet. |
|
| 1037 |
++func (cc *CascadeCloud) DetachDisk(diskID string, nodeName k8stypes.NodeName) error {
|
|
| 1038 |
++ operation := &VMDiskOperation{
|
|
| 1039 |
++ DiskID: StringPtr(diskID), |
|
| 1040 |
++ } |
|
| 1041 |
++ |
|
| 1042 |
++ vmID, err := cc.InstanceID(nodeName) |
|
| 1043 |
++ if err != nil {
|
|
| 1044 |
++ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DetachDisk. Error[%v]", err)
|
|
| 1045 |
++ return err |
|
| 1046 |
++ } |
|
| 1047 |
++ |
|
| 1048 |
++ task, err := cc.apiClient.DetachDisk(vmID, operation) |
|
| 1049 |
++ if err != nil {
|
|
| 1050 |
++ glog.Errorf("Cascade Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", diskID, err)
|
|
| 1051 |
++ return err |
|
| 1052 |
++ } |
|
| 1053 |
++ |
|
| 1054 |
++ _, err = cc.apiClient.WaitForTask(StringVal(task.ID)) |
|
| 1055 |
++ if err != nil {
|
|
| 1056 |
++ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]",
|
|
| 1057 |
++ diskID, err) |
|
| 1058 |
++ return err |
|
| 1059 |
++ } |
|
| 1060 |
++ |
|
| 1061 |
++ return nil |
|
| 1062 |
++} |
|
| 1063 |
++ |
|
| 1064 |
++// DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin. |
|
| 1065 |
++func (cc *CascadeCloud) DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error) {
|
|
| 1066 |
++ disk, err := cc.apiClient.GetDisk(diskID) |
|
| 1067 |
++ if err != nil {
|
|
| 1068 |
++ glog.Errorf("Cascade Cloud Provider: Failed to Get disk with diskID %s. Error[%v]", diskID, err)
|
|
| 1069 |
++ return false, err |
|
| 1070 |
++ } |
|
| 1071 |
++ |
|
| 1072 |
++ vmID, err := cc.InstanceID(nodeName) |
|
| 1073 |
++ if err != nil {
|
|
| 1074 |
++ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
|
|
| 1075 |
++ return false, err |
|
| 1076 |
++ } |
|
| 1077 |
++ |
|
| 1078 |
++ if disk.VM == vmID {
|
|
| 1079 |
++ return true, nil |
|
| 1080 |
++ } |
|
| 1081 |
++ |
|
| 1082 |
++ return false, nil |
|
| 1083 |
++} |
|
| 1084 |
++ |
|
| 1085 |
++// DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin. |
|
| 1086 |
++func (cc *CascadeCloud) DisksAreAttached(diskIDs []string, nodeName k8stypes.NodeName) (map[string]bool, error) {
|
|
| 1087 |
++ attached := make(map[string]bool) |
|
| 1088 |
++ for _, diskID := range diskIDs {
|
|
| 1089 |
++ attached[diskID] = false |
|
| 1090 |
++ } |
|
| 1091 |
++ |
|
| 1092 |
++ vmID, err := cc.InstanceID(nodeName) |
|
| 1093 |
++ if err != nil {
|
|
| 1094 |
++ glog.Errorf("Cascade Cloud Provider: cc.InstanceID failed for DiskIsAttached. Error[%v]", err)
|
|
| 1095 |
++ return attached, err |
|
| 1096 |
++ } |
|
| 1097 |
++ |
|
| 1098 |
++ for _, diskID := range diskIDs {
|
|
| 1099 |
++ disk, err := cc.apiClient.GetDisk(diskID) |
|
| 1100 |
++ if err != nil {
|
|
| 1101 |
++ glog.Warningf("Cascade Cloud Provider: failed to get VMs for persistent disk %s, err [%v]",
|
|
| 1102 |
++ diskID, err) |
|
| 1103 |
++ } else {
|
|
| 1104 |
++ if disk.VM == vmID {
|
|
| 1105 |
++ attached[diskID] = true |
|
| 1106 |
++ } |
|
| 1107 |
++ } |
|
| 1108 |
++ } |
|
| 1109 |
++ |
|
| 1110 |
++ return attached, nil |
|
| 1111 |
++} |
|
| 1112 |
++ |
|
| 1113 |
++// Create a volume of given size (in GB). |
|
| 1114 |
++func (cc *CascadeCloud) CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error) {
|
|
| 1115 |
++ // Get Zones for the cluster |
|
| 1116 |
++ zones, err := cc.apiClient.GetZones() |
|
| 1117 |
++ if err != nil {
|
|
| 1118 |
++ glog.Errorf("Cascade Cloud Provider: Failed to Get zones for the cluster. Error[%v]", err)
|
|
| 1119 |
++ return "", err |
|
| 1120 |
++ } |
|
| 1121 |
++ |
|
| 1122 |
++ // Pick a zone to place the disk in. |
|
| 1123 |
++ zoneSet := sets.NewString() |
|
| 1124 |
++ for _, zone := range zones {
|
|
| 1125 |
++ zoneSet.Insert(zone) |
|
| 1126 |
++ } |
|
| 1127 |
++ zone := volume.ChooseZoneForVolume(zoneSet, volumeOptions.Name) |
|
| 1128 |
++ |
|
| 1129 |
++ diskSpec := DiskCreateSpec{}
|
|
| 1130 |
++ diskSpec.Name = StringPtr(volumeOptions.Name) |
|
| 1131 |
++ diskSpec.Flavor = StringPtr(volumeOptions.Flavor) |
|
| 1132 |
++ diskSpec.CapacityGB = Int32Ptr(int32(volumeOptions.CapacityGB)) |
|
| 1133 |
++ diskSpec.Kind = StringPtr(DiskSpecKind) |
|
| 1134 |
++ diskSpec.Zone = StringPtr(zone) |
|
| 1135 |
++ |
|
| 1136 |
++ task, err := cc.apiClient.CreateDisk(&diskSpec) |
|
| 1137 |
++ if err != nil {
|
|
| 1138 |
++ glog.Errorf("Cascade Cloud Provider: Failed to CreateDisk. Error[%v]", err)
|
|
| 1139 |
++ return "", err |
|
| 1140 |
++ } |
|
| 1141 |
++ |
|
| 1142 |
++ waitTask, err := cc.apiClient.WaitForTask(StringVal(task.ID)) |
|
| 1143 |
++ if err != nil {
|
|
| 1144 |
++ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err)
|
|
| 1145 |
++ return "", err |
|
| 1146 |
++ } |
|
| 1147 |
++ |
|
| 1148 |
++ return StringVal(waitTask.Entity.ID), nil |
|
| 1149 |
++} |
|
| 1150 |
++ |
|
| 1151 |
++// Deletes a volume given volume name. |
|
| 1152 |
++func (cc *CascadeCloud) DeleteDisk(diskID string) error {
|
|
| 1153 |
++ task, err := cc.apiClient.DeleteDisk(diskID) |
|
| 1154 |
++ if err != nil {
|
|
| 1155 |
++ glog.Errorf("Cascade Cloud Provider: Failed to DeleteDisk. Error[%v]", err)
|
|
| 1156 |
++ // If we get a DiskNotFound error, we assume that the disk is already deleted. So we don't return an error here. |
|
| 1157 |
++ switch err.(type) {
|
|
| 1158 |
++ case APIError: |
|
| 1159 |
++ if err.(APIError).ErrorCode == DiskNotFoundError {
|
|
| 1160 |
++ return nil |
|
| 1161 |
++ } |
|
| 1162 |
++ } |
|
| 1163 |
++ return err |
|
| 1164 |
++ } |
|
| 1165 |
++ |
|
| 1166 |
++ _, err = cc.apiClient.WaitForTask(StringVal(task.ID)) |
|
| 1167 |
++ if err != nil {
|
|
| 1168 |
++ glog.Errorf("Cascade Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err)
|
|
| 1169 |
++ return err |
|
| 1170 |
++ } |
|
| 1171 |
++ |
|
| 1172 |
++ return nil |
|
| 1173 |
++} |
|
| 1174 |
++ |
|
| 1175 |
++// Gets the zone and region for the volume. |
|
| 1176 |
++func (cc *CascadeCloud) GetVolumeLabels(diskID string) (map[string]string, error) {
|
|
| 1177 |
++ disk, err := cc.apiClient.GetDisk(diskID) |
|
| 1178 |
++ if err != nil {
|
|
| 1179 |
++ glog.Errorf("Cascade Cloud Provider: Failed to GetDisk for GetVolumeLabels. Error[%v]", err)
|
|
| 1180 |
++ return nil, err |
|
| 1181 |
++ } |
|
| 1182 |
++ |
|
| 1183 |
++ labels := make(map[string]string) |
|
| 1184 |
++ labels[apis.LabelZoneFailureDomain] = StringVal(disk.Zone) |
|
| 1185 |
++ labels[apis.LabelZoneRegion] = cc.cfg.Global.Region |
|
| 1186 |
++ |
|
| 1187 |
++ return labels,nil |
|
| 1188 |
++} |
|
| 1189 |
+\ No newline at end of file |
|
| 1190 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/cascade.go |
|
| 1191 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 1192 |
+@@ -0,0 +1,216 @@ |
|
| 1193 |
++// The use of Cascade cloud provider requires the kubelet, kube-apiserver, and kube-controller-manager to be started |
|
| 1194 |
++// with config flag: '--cloud-provider=cascade --cloud-config=[path_to_config_file]'. |
|
| 1195 |
++package cascade |
|
| 1196 |
++ |
|
| 1197 |
++import ( |
|
| 1198 |
++ "errors" |
|
| 1199 |
++ "fmt" |
|
| 1200 |
++ "io" |
|
| 1201 |
++ "os" |
|
| 1202 |
++ "github.com/golang/glog" |
|
| 1203 |
++ "gopkg.in/gcfg.v1" |
|
| 1204 |
++ k8stypes "k8s.io/apimachinery/pkg/types" |
|
| 1205 |
++ "k8s.io/kubernetes/pkg/cloudprovider" |
|
| 1206 |
++ "k8s.io/kubernetes/pkg/controller" |
|
| 1207 |
++ "strings" |
|
| 1208 |
++) |
|
| 1209 |
++ |
|
| 1210 |
++const ( |
|
| 1211 |
++ ProviderName = "cascade" |
|
| 1212 |
++ DiskSpecKind = "persistent-disk" |
|
| 1213 |
++ MasterPrefix = "master" |
|
| 1214 |
++) |
|
| 1215 |
++ |
|
| 1216 |
++// CascadeCloud is an implementation of the cloud provider interface for Cascade Controller. |
|
| 1217 |
++type CascadeCloud struct {
|
|
| 1218 |
++ cfg *CascadeConfig |
|
| 1219 |
++ // Authentication client to get token for Cascade API calls |
|
| 1220 |
++ authClient *AuthClient |
|
| 1221 |
++ // API Client to make Cascade API calls |
|
| 1222 |
++ apiClient *Client |
|
| 1223 |
++ // local $HOSTNAME |
|
| 1224 |
++ localHostname string |
|
| 1225 |
++ // hostname from K8S, could be overridden |
|
| 1226 |
++ localK8sHostname string |
|
| 1227 |
++} |
|
| 1228 |
++ |
|
| 1229 |
++// CascadeCloud represents Cascade cloud provider's configuration. |
|
| 1230 |
++type CascadeConfig struct {
|
|
| 1231 |
++ Global struct {
|
|
| 1232 |
++ // the Cascade Controller endpoint |
|
| 1233 |
++ CloudTarget string `gcfg:"target"` |
|
| 1234 |
++ // Cascade Controller tenantName name |
|
| 1235 |
++ TenantName string `gcfg:"tenantName"` |
|
| 1236 |
++ // Cascade Controller cluster ID |
|
| 1237 |
++ ClusterID string `gcfg:"clusterID"` |
|
| 1238 |
++ // Authentication server endpoint for Cascade Controller |
|
| 1239 |
++ AuthEndpoint string `gcfg:"authEndpoint"` |
|
| 1240 |
++ // Lightwave domain name for the node |
|
| 1241 |
++ DomainName string `gcfg:"domainName"` |
|
| 1242 |
++ // DNS name of the node. |
|
| 1243 |
++ DNSName string `gcfg:"dnsName"` |
|
| 1244 |
++ // Region in which the cluster is in |
|
| 1245 |
++ Region string `gcfg:"region"` |
|
| 1246 |
++ // Availability zone in which the cluster is in |
|
| 1247 |
++ Zone string `gcfg:"zone"` |
|
| 1248 |
++ } |
|
| 1249 |
++} |
|
| 1250 |
++ |
|
| 1251 |
++// Disks is interface for manipulation with Cascade Controller Persistent Disks. |
|
| 1252 |
++type Disks interface {
|
|
| 1253 |
++ // AttachDisk attaches given disk to given node. Current node |
|
| 1254 |
++ // is used when nodeName is empty string. |
|
| 1255 |
++ AttachDisk(diskID string, nodeName k8stypes.NodeName) (string, error) |
|
| 1256 |
++ |
|
| 1257 |
++ // DetachDisk detaches given disk to given node. Current node |
|
| 1258 |
++ // is used when nodeName is empty string. |
|
| 1259 |
++ DetachDisk(diskID string, nodeName k8stypes.NodeName) error |
|
| 1260 |
++ |
|
| 1261 |
++ // DiskIsAttached checks if a disk is attached to the given node. |
|
| 1262 |
++ DiskIsAttached(diskID string, nodeName k8stypes.NodeName) (bool, error) |
|
| 1263 |
++ |
|
| 1264 |
++ // DisksAreAttached is a batch function to check if a list of disks are attached |
|
| 1265 |
++ // to the node with the specified NodeName. |
|
| 1266 |
++ DisksAreAttached(diskID []string, nodeName k8stypes.NodeName) (map[string]bool, error) |
|
| 1267 |
++ |
|
| 1268 |
++ // CreateDisk creates a new PD with given properties. |
|
| 1269 |
++ CreateDisk(volumeOptions *VolumeOptions) (diskID string, err error) |
|
| 1270 |
++ |
|
| 1271 |
++ // DeleteDisk deletes PD. |
|
| 1272 |
++ DeleteDisk(diskID string) error |
|
| 1273 |
++ |
|
| 1274 |
++ // Get labels to apply to volume on creation. |
|
| 1275 |
++ GetVolumeLabels(diskID string) (map[string]string, error) |
|
| 1276 |
++} |
|
| 1277 |
++ |
|
| 1278 |
++// VolumeOptions specifies capacity, tags, name and flavorID for a volume. |
|
| 1279 |
++type VolumeOptions struct {
|
|
| 1280 |
++ CapacityGB int |
|
| 1281 |
++ Tags map[string]string |
|
| 1282 |
++ Name string |
|
| 1283 |
++ Flavor string |
|
| 1284 |
++} |
|
| 1285 |
++ |
|
| 1286 |
++func readConfig(config io.Reader) (*CascadeConfig, error) {
|
|
| 1287 |
++ if config == nil {
|
|
| 1288 |
++ err := fmt.Errorf("Cascade Cloud Provider: config file is missing. Please restart with " +
|
|
| 1289 |
++ "--cloud-provider=cascade --cloud-config=[path_to_config_file]") |
|
| 1290 |
++ return nil, err |
|
| 1291 |
++ } |
|
| 1292 |
++ |
|
| 1293 |
++ var cfg CascadeConfig |
|
| 1294 |
++ err := gcfg.ReadInto(&cfg, config) |
|
| 1295 |
++ return &cfg, err |
|
| 1296 |
++} |
|
| 1297 |
++ |
|
| 1298 |
++func init() {
|
|
| 1299 |
++ cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
|
| 1300 |
++ cfg, err := readConfig(config) |
|
| 1301 |
++ if err != nil {
|
|
| 1302 |
++ glog.Errorf("Cascade Cloud Provider: failed to read in cloud provider config file. Error[%v]", err)
|
|
| 1303 |
++ return nil, err |
|
| 1304 |
++ } |
|
| 1305 |
++ return newCascadeCloud(cfg) |
|
| 1306 |
++ }) |
|
| 1307 |
++} |
|
| 1308 |
++ |
|
| 1309 |
++func newCascadeCloud(cfg *CascadeConfig) (*CascadeCloud, error) {
|
|
| 1310 |
++ if len(cfg.Global.CloudTarget) == 0 {
|
|
| 1311 |
++ return nil, fmt.Errorf("Cascade Controller endpoint was not specified.")
|
|
| 1312 |
++ } |
|
| 1313 |
++ |
|
| 1314 |
++ // Get local hostname |
|
| 1315 |
++ hostname, err := os.Hostname() |
|
| 1316 |
++ if err != nil {
|
|
| 1317 |
++ glog.Errorf("Cascade Cloud Provider: get hostname failed. Error[%v]", err)
|
|
| 1318 |
++ return nil, err |
|
| 1319 |
++ } |
|
| 1320 |
++ |
|
| 1321 |
++ cc := CascadeCloud{
|
|
| 1322 |
++ cfg: cfg, |
|
| 1323 |
++ localHostname: hostname, |
|
| 1324 |
++ localK8sHostname: "", |
|
| 1325 |
++ } |
|
| 1326 |
++ |
|
| 1327 |
++ // Instantiate the auth and API clients only on the master nodes. Kubelets running on the workers don't need them as |
|
| 1328 |
++ // they are used primarily for making API calls to Cascade. |
|
| 1329 |
++ if strings.HasPrefix(hostname, MasterPrefix) {
|
|
| 1330 |
++ if cc.authClient, err = NewAuthClient(cfg); err != nil {
|
|
| 1331 |
++ return nil, err |
|
| 1332 |
++ } |
|
| 1333 |
++ |
|
| 1334 |
++ if cc.apiClient, err = NewClient(cfg, cc.authClient); err != nil {
|
|
| 1335 |
++ return nil, err |
|
| 1336 |
++ } |
|
| 1337 |
++ } |
|
| 1338 |
++ |
|
| 1339 |
++ return &cc, nil |
|
| 1340 |
++} |
|
| 1341 |
++ |
|
| 1342 |
++// Initialize passes a Kubernetes clientBuilder interface to the cloud provider |
|
| 1343 |
++func (cc *CascadeCloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
|
| 1344 |
++ |
|
| 1345 |
++// Instances returns an implementation of Instances for Cascade Controller. |
|
| 1346 |
++func (cc *CascadeCloud) Instances() (cloudprovider.Instances, bool) {
|
|
| 1347 |
++ return cc, true |
|
| 1348 |
++} |
|
| 1349 |
++ |
|
| 1350 |
++// List is an implementation of Instances.List. |
|
| 1351 |
++func (cc *CascadeCloud) List(filter string) ([]k8stypes.NodeName, error) {
|
|
| 1352 |
++ return nil, errors.New("unimplemented")
|
|
| 1353 |
++} |
|
| 1354 |
++ |
|
| 1355 |
++func (cc *CascadeCloud) Clusters() (cloudprovider.Clusters, bool) {
|
|
| 1356 |
++ return nil, true |
|
| 1357 |
++} |
|
| 1358 |
++ |
|
| 1359 |
++// ProviderName returns the cloud provider ID. |
|
| 1360 |
++func (cc *CascadeCloud) ProviderName() string {
|
|
| 1361 |
++ return ProviderName |
|
| 1362 |
++} |
|
| 1363 |
++ |
|
| 1364 |
++// LoadBalancer returns an implementation of LoadBalancer for Cascade Controller. |
|
| 1365 |
++func (cc *CascadeCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
|
| 1366 |
++ return cc, true |
|
| 1367 |
++} |
|
| 1368 |
++ |
|
| 1369 |
++// Zones returns an implementation of Zones for Cascade Controller. |
|
| 1370 |
++func (cc *CascadeCloud) Zones() (cloudprovider.Zones, bool) {
|
|
| 1371 |
++ return cc, true |
|
| 1372 |
++} |
|
| 1373 |
++ |
|
| 1374 |
++func (cc *CascadeCloud) GetZone() (cloudprovider.Zone, error) {
|
|
| 1375 |
++ return cloudprovider.Zone{
|
|
| 1376 |
++ Region: cc.cfg.Global.Region, |
|
| 1377 |
++ FailureDomain: cc.cfg.Global.Zone, |
|
| 1378 |
++ }, nil |
|
| 1379 |
++} |
|
| 1380 |
++ |
|
| 1381 |
++// GetZoneByProviderID implements Zones.GetZoneByProviderID |
|
| 1382 |
++// This is particularly useful in external cloud providers where the kubelet |
|
| 1383 |
++// does not initialize node data. |
|
| 1384 |
++func (cc *CascadeCloud) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) {
|
|
| 1385 |
++ return cloudprovider.Zone{}, errors.New("unimplemented")
|
|
| 1386 |
++} |
|
| 1387 |
++ |
|
| 1388 |
++// GetZoneByNodeName implements Zones.GetZoneByNodeName |
|
| 1389 |
++// This is particularly useful in external cloud providers where the kubelet |
|
| 1390 |
++// does not initialize node data. |
|
| 1391 |
++func (cc *CascadeCloud) GetZoneByNodeName(nodeName k8stypes.NodeName) (cloudprovider.Zone, error) {
|
|
| 1392 |
++ return cloudprovider.Zone{}, errors.New("unimeplemented")
|
|
| 1393 |
++} |
|
| 1394 |
++ |
|
| 1395 |
++// Routes returns a false since the interface is not supported for Cascade controller. |
|
| 1396 |
++func (cc *CascadeCloud) Routes() (cloudprovider.Routes, bool) {
|
|
| 1397 |
++ return nil, false |
|
| 1398 |
++} |
|
| 1399 |
++ |
|
| 1400 |
++// ScrubDNS filters DNS settings for pods. |
|
| 1401 |
++func (cc *CascadeCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
|
| 1402 |
++ return nameservers, searches |
|
| 1403 |
++} |
|
| 1404 |
++ |
|
| 1405 |
++// HasClusterID returns true if the cluster has a clusterID |
|
| 1406 |
++func (cc *CascadeCloud) HasClusterID() bool {
|
|
| 1407 |
++ return true |
|
| 1408 |
++} |
|
| 1409 |
+\ No newline at end of file |
|
| 1410 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade_instances.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/cascade_instances.go |
|
| 1411 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade_instances.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 1412 |
+@@ -0,0 +1,90 @@ |
|
| 1413 |
++package cascade |
|
| 1414 |
++ |
|
| 1415 |
++import ( |
|
| 1416 |
++ "k8s.io/api/core/v1" |
|
| 1417 |
++ k8stypes "k8s.io/apimachinery/pkg/types" |
|
| 1418 |
++ "errors" |
|
| 1419 |
++ "strings" |
|
| 1420 |
++) |
|
| 1421 |
++ |
|
| 1422 |
++// NodeAddresses is an implementation of Instances.NodeAddresses. In the future, private IP address, external IP, etc. |
|
| 1423 |
++// will be added based on need. |
|
| 1424 |
++func (cc *CascadeCloud) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) {
|
|
| 1425 |
++ addresses := []v1.NodeAddress{}
|
|
| 1426 |
++ addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: cc.cfg.Global.DNSName})
|
|
| 1427 |
++ return addresses, nil |
|
| 1428 |
++} |
|
| 1429 |
++ |
|
| 1430 |
++// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID |
|
| 1431 |
++// This method will not be called from the node that is requesting this ID. i.e. metadata service |
|
| 1432 |
++// and other local methods cannot be used here |
|
| 1433 |
++func (cc *CascadeCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) {
|
|
| 1434 |
++ // Get the name of the VM using the ID and generate the DNS name based on the VM name. |
|
| 1435 |
++ vm, err := cc.apiClient.GetVM(providerID) |
|
| 1436 |
++ if err != nil {
|
|
| 1437 |
++ return nil, err |
|
| 1438 |
++ } |
|
| 1439 |
++ // Get the DNS name for the master VM and replace the VM name portion with the requested VM name. |
|
| 1440 |
++ dnsNameParts := strings.SplitN(cc.cfg.Global.DNSName, ".", 2) |
|
| 1441 |
++ if len(dnsNameParts) != 2 {
|
|
| 1442 |
++ return nil, errors.New("Cascade cloud provider: Invalid DNS name specified in the configuation. " +
|
|
| 1443 |
++ "Cannot get NodeAddressByProviderID.") |
|
| 1444 |
++ } |
|
| 1445 |
++ dnsAddress := StringVal(vm.Name) + dnsNameParts[1] |
|
| 1446 |
++ addresses := []v1.NodeAddress{}
|
|
| 1447 |
++ addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: dnsAddress})
|
|
| 1448 |
++ return addresses, nil |
|
| 1449 |
++} |
|
| 1450 |
++ |
|
| 1451 |
++func (cc *CascadeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
|
|
| 1452 |
++ return errors.New("unimplemented")
|
|
| 1453 |
++} |
|
| 1454 |
++ |
|
| 1455 |
++// Current node name returns node name based on host name. For Cascade Kubernetes nodes, we will use host name as the |
|
| 1456 |
++// node name. |
|
| 1457 |
++func (cc *CascadeCloud) CurrentNodeName(hostname string) (k8stypes.NodeName, error) {
|
|
| 1458 |
++ cc.localK8sHostname = hostname |
|
| 1459 |
++ return k8stypes.NodeName(hostname), nil |
|
| 1460 |
++} |
|
| 1461 |
++ |
|
| 1462 |
++// ExternalID returns the cloud provider ID of the specified instance (deprecated). |
|
| 1463 |
++// Note: We do not call Cascade Controller here to check if the instance is alive or not because that requires the |
|
| 1464 |
++// worker nodes to also login to Cascade Controller. That check is used by Kubernetes to proactively remove nodes that |
|
| 1465 |
++// the cloud provider believes is no longer available. Even otherwise, Kubernetes will remove those nodes eventually. |
|
| 1466 |
++// So we are not losing much by not doing that check. |
|
| 1467 |
++func (cc *CascadeCloud) ExternalID(nodeName k8stypes.NodeName) (string, error) {
|
|
| 1468 |
++ return getInstanceIDFromNodeName(nodeName) |
|
| 1469 |
++} |
|
| 1470 |
++ |
|
| 1471 |
++// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. |
|
| 1472 |
++// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. |
|
| 1473 |
++func (cc *CascadeCloud) InstanceExistsByProviderID(providerID string) (bool, error) {
|
|
| 1474 |
++ return false, errors.New("unimplemented")
|
|
| 1475 |
++} |
|
| 1476 |
++ |
|
| 1477 |
++// InstanceID returns the cloud provider ID of the specified instance. |
|
| 1478 |
++func (cc *CascadeCloud) InstanceID(nodeName k8stypes.NodeName) (string, error) {
|
|
| 1479 |
++ return getInstanceIDFromNodeName(nodeName) |
|
| 1480 |
++} |
|
| 1481 |
++ |
|
| 1482 |
++// This gets the Cascade VM ID from the Kubernetes node name. |
|
| 1483 |
++func getInstanceIDFromNodeName(nodeName k8stypes.NodeName) (string, error) {
|
|
| 1484 |
++ // nodeName is of the format master-instance-id or worker-instance-id. To compute the instance ID, we need to just |
|
| 1485 |
++ // get the portion after master- or worker-. That is what we do below. |
|
| 1486 |
++ nodeParts := strings.SplitN(string(nodeName), "-", 2) |
|
| 1487 |
++ if len(nodeParts) != 2 {
|
|
| 1488 |
++ return "", errors.New("Cascade cloud provider: Invalid node name. Cannot fetch instance ID.")
|
|
| 1489 |
++ } |
|
| 1490 |
++ return nodeParts[1], nil |
|
| 1491 |
++} |
|
| 1492 |
++ |
|
| 1493 |
++// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID |
|
| 1494 |
++// This method will not be called from the node that is requesting this ID. i.e. metadata service |
|
| 1495 |
++// and other local methods cannot be used here |
|
| 1496 |
++func (cc *CascadeCloud) InstanceTypeByProviderID(providerID string) (string, error) {
|
|
| 1497 |
++ return "", errors.New("unimplemented")
|
|
| 1498 |
++} |
|
| 1499 |
++ |
|
| 1500 |
++func (cc *CascadeCloud) InstanceType(nodeName k8stypes.NodeName) (string, error) {
|
|
| 1501 |
++ return "", nil |
|
| 1502 |
++} |
|
| 1503 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go |
|
| 1504 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/cascade_loadbalancer.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 1505 |
+@@ -0,0 +1,283 @@ |
|
| 1506 |
++package cascade |
|
| 1507 |
++ |
|
| 1508 |
++import ( |
|
| 1509 |
++ "fmt" |
|
| 1510 |
++ "github.com/golang/glog" |
|
| 1511 |
++ "k8s.io/api/core/v1" |
|
| 1512 |
++ "k8s.io/kubernetes/pkg/api/v1/service" |
|
| 1513 |
++ "k8s.io/kubernetes/pkg/cloudprovider" |
|
| 1514 |
++ "k8s.io/apimachinery/pkg/types" |
|
| 1515 |
++) |
|
| 1516 |
++ |
|
| 1517 |
++const TCP_PROTOCOL = "TCP" |
|
| 1518 |
++ |
|
| 1519 |
++const HTTP_PROTOCOL = "HTTP" |
|
| 1520 |
++ |
|
| 1521 |
++// EnsureLoadBalancer creates or updates a Cascade load balancer |
|
| 1522 |
++func (cc *CascadeCloud) EnsureLoadBalancer(clusterName string, k8sService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
|
| 1523 |
++ logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancer") |
|
| 1524 |
++ |
|
| 1525 |
++ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService) |
|
| 1526 |
++ logger.Infof("Load balancer name: %s", loadBalancerName)
|
|
| 1527 |
++ |
|
| 1528 |
++ // Sanity checks |
|
| 1529 |
++ if k8sService.Spec.SessionAffinity != v1.ServiceAffinityNone {
|
|
| 1530 |
++ logger.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
|
|
| 1531 |
++ return nil, fmt.Errorf("Unsupported load balancer session affinity: %+v", k8sService.Spec.SessionAffinity)
|
|
| 1532 |
++ } |
|
| 1533 |
++ |
|
| 1534 |
++ if len(k8sService.Spec.Ports) == 0 {
|
|
| 1535 |
++ logger.Errorf("No port mapping is specified")
|
|
| 1536 |
++ return nil, fmt.Errorf("No port mapping is specified")
|
|
| 1537 |
++ } |
|
| 1538 |
++ |
|
| 1539 |
++ // Create load balancer port maps |
|
| 1540 |
++ portMaps := []*LoadBalancerPortMap{}
|
|
| 1541 |
++ for _, port := range k8sService.Spec.Ports {
|
|
| 1542 |
++ if port.Protocol != v1.ProtocolTCP {
|
|
| 1543 |
++ logger.Warningf("Ignoring port that does not use TCP protocol: %+v", port)
|
|
| 1544 |
++ continue |
|
| 1545 |
++ } |
|
| 1546 |
++ |
|
| 1547 |
++ if port.NodePort == 0 {
|
|
| 1548 |
++ logger.Warningf("Ignoring port without node port defined: %+v", port)
|
|
| 1549 |
++ continue |
|
| 1550 |
++ } |
|
| 1551 |
++ |
|
| 1552 |
++ // TODO: For now we only support SSL pass through. All port mappings are using TCP protocol. |
|
| 1553 |
++ // Also note that we allow all external traffic to access the ports. |
|
| 1554 |
++ portMap := &LoadBalancerPortMap{
|
|
| 1555 |
++ InstancePort: Int64Ptr(int64(port.NodePort)), |
|
| 1556 |
++ InstanceProtocol: StringPtr(TCP_PROTOCOL), |
|
| 1557 |
++ LoadBalancerPort: Int64Ptr(int64(port.Port)), |
|
| 1558 |
++ LoadBalancerProtocol: StringPtr(TCP_PROTOCOL), |
|
| 1559 |
++ } |
|
| 1560 |
++ portMaps = append(portMaps, portMap) |
|
| 1561 |
++ } |
|
| 1562 |
++ |
|
| 1563 |
++ // Create load balancer health check |
|
| 1564 |
++ healthCheck := &LoadBalancerHealthCheck{
|
|
| 1565 |
++ HealthyThreshold: 5, |
|
| 1566 |
++ IntervalInSeconds: 10, |
|
| 1567 |
++ } |
|
| 1568 |
++ if healthCheckPath, healthCheckNodePort := service.GetServiceHealthCheckPathPort(k8sService); healthCheckPath != "" {
|
|
| 1569 |
++ logger.Infof("HTTP health checks on: %s:%d", healthCheckPath, healthCheckNodePort)
|
|
| 1570 |
++ healthCheck.Path = StringPtr(healthCheckPath) |
|
| 1571 |
++ healthCheck.Port = Int64Ptr(int64(healthCheckNodePort)) |
|
| 1572 |
++ healthCheck.Protocol = StringPtr(HTTP_PROTOCOL) |
|
| 1573 |
++ } else {
|
|
| 1574 |
++ logger.Infof("TCP health check on port: %d", Int64Val(portMaps[0].InstancePort))
|
|
| 1575 |
++ healthCheck.Port = portMaps[0].InstancePort |
|
| 1576 |
++ healthCheck.Protocol = StringPtr(TCP_PROTOCOL) |
|
| 1577 |
++ } |
|
| 1578 |
++ |
|
| 1579 |
++ // Create load balancer |
|
| 1580 |
++ createSpec := &LoadBalancerCreateSpec{
|
|
| 1581 |
++ Name: StringPtr(loadBalancerName), |
|
| 1582 |
++ Type: StringPtr("PUBLIC"),
|
|
| 1583 |
++ PortMaps: portMaps, |
|
| 1584 |
++ HealthCheck: healthCheck, |
|
| 1585 |
++ } |
|
| 1586 |
++ logger.Infof("Load balancer create spec: %+v", *createSpec)
|
|
| 1587 |
++ |
|
| 1588 |
++ task, err := cc.apiClient.CreateOrUpdateLoadBalancer(createSpec) |
|
| 1589 |
++ if err != nil {
|
|
| 1590 |
++ logger.Errorf("Failed to create or update load balancer. Error: [%v]", err)
|
|
| 1591 |
++ return nil, err |
|
| 1592 |
++ } |
|
| 1593 |
++ |
|
| 1594 |
++ _, err = cc.apiClient.WaitForTask(StringVal(task.ID)) |
|
| 1595 |
++ if err != nil {
|
|
| 1596 |
++ logger.Errorf("Failed to poll task status of creating or updating load balancer. Error: [%v]", err)
|
|
| 1597 |
++ return nil, err |
|
| 1598 |
++ } |
|
| 1599 |
++ |
|
| 1600 |
++ // Apply VM update to load balancer |
|
| 1601 |
++ err = cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger) |
|
| 1602 |
++ if err != nil {
|
|
| 1603 |
++ // The private function already did logging. No need to log again. |
|
| 1604 |
++ return nil, err |
|
| 1605 |
++ } |
|
| 1606 |
++ |
|
| 1607 |
++ // Get load balancer |
|
| 1608 |
++ loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName)) |
|
| 1609 |
++ if err != nil {
|
|
| 1610 |
++ glog.Errorf("Failed to get load balancer. Error: [%v]", err)
|
|
| 1611 |
++ return nil, err |
|
| 1612 |
++ } |
|
| 1613 |
++ |
|
| 1614 |
++ return toLoadBalancerStatus(loadBalancer), nil |
|
| 1615 |
++} |
|
| 1616 |
++ |
|
| 1617 |
++// GetLoadBalancer returns the information about a Cascade load balancer |
|
| 1618 |
++func (cc *CascadeCloud) GetLoadBalancer(clusterName string, k8sService *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
|
| 1619 |
++ logger := newLoadBalancerLogger(clusterName, k8sService, "GetLoadBalancer") |
|
| 1620 |
++ |
|
| 1621 |
++ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService) |
|
| 1622 |
++ logger.Infof("Load balancer name: %s", loadBalancerName)
|
|
| 1623 |
++ |
|
| 1624 |
++ // Get load balancer |
|
| 1625 |
++ loadBalancer, err := cc.apiClient.GetLoadBalancer(StringPtr(loadBalancerName)) |
|
| 1626 |
++ if err != nil {
|
|
| 1627 |
++ logger.Errorf("Failed to get load balancer. Error: [%v]", err)
|
|
| 1628 |
++ // Do not return error here because we want the caller of this function to determine |
|
| 1629 |
++ // what to do with the not-found situation. |
|
| 1630 |
++ switch err.(type) {
|
|
| 1631 |
++ case APIError: |
|
| 1632 |
++ if err.(APIError).ErrorCode == NotFoundError {
|
|
| 1633 |
++ return nil, false, nil |
|
| 1634 |
++ } |
|
| 1635 |
++ } |
|
| 1636 |
++ return nil, false, err |
|
| 1637 |
++ } |
|
| 1638 |
++ |
|
| 1639 |
++ return toLoadBalancerStatus(loadBalancer), true, nil |
|
| 1640 |
++} |
|
| 1641 |
++ |
|
| 1642 |
++// UpdateLoadBalancer updates the node information of a Cascade load balancer |
|
| 1643 |
++func (cc *CascadeCloud) UpdateLoadBalancer(clusterName string, k8sService *v1.Service, nodes []*v1.Node) error {
|
|
| 1644 |
++ logger := newLoadBalancerLogger(clusterName, k8sService, "UpdateLoadBalancer") |
|
| 1645 |
++ |
|
| 1646 |
++ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService) |
|
| 1647 |
++ logger.Infof("Load balancer name: %s", loadBalancerName)
|
|
| 1648 |
++ |
|
| 1649 |
++ err := cc.updateLoadBalancerVMs(nodes, loadBalancerName, logger) |
|
| 1650 |
++ if err != nil {
|
|
| 1651 |
++ // The private function already did logging. No need to log again. |
|
| 1652 |
++ return err |
|
| 1653 |
++ } |
|
| 1654 |
++ |
|
| 1655 |
++ return nil |
|
| 1656 |
++} |
|
| 1657 |
++ |
|
| 1658 |
++// EnsureLoadBalancerDeleted deletes a Cascade load balancer |
|
| 1659 |
++func (cc *CascadeCloud) EnsureLoadBalancerDeleted(clusterName string, k8sService *v1.Service) error {
|
|
| 1660 |
++ logger := newLoadBalancerLogger(clusterName, k8sService, "EnsureLoadBalancerDeleted") |
|
| 1661 |
++ |
|
| 1662 |
++ loadBalancerName := cloudprovider.GetLoadBalancerName(k8sService) |
|
| 1663 |
++ logger.Infof("Load balancer name: %s", loadBalancerName)
|
|
| 1664 |
++ |
|
| 1665 |
++ task, err := cc.apiClient.DeleteLoadBalancer(StringPtr(loadBalancerName)) |
|
| 1666 |
++ if err != nil {
|
|
| 1667 |
++ logger.Errorf("Failed to delete load balancer. Error: [%v]", err)
|
|
| 1668 |
++ // If we get a NotFound error, we assume that the load balancer is already deleted. So we don't return an error |
|
| 1669 |
++ // here. |
|
| 1670 |
++ switch err.(type) {
|
|
| 1671 |
++ case APIError: |
|
| 1672 |
++ if err.(APIError).ErrorCode == NotFoundError {
|
|
| 1673 |
++ return nil |
|
| 1674 |
++ } |
|
| 1675 |
++ } |
|
| 1676 |
++ return err |
|
| 1677 |
++ } |
|
| 1678 |
++ |
|
| 1679 |
++ _, err = cc.apiClient.WaitForTask(StringVal(task.ID)) |
|
| 1680 |
++ if err != nil {
|
|
| 1681 |
++ logger.Errorf("Failed to poll task status of deleting load balancer. Error: [%v]", err)
|
|
| 1682 |
++ return err |
|
| 1683 |
++ } |
|
| 1684 |
++ |
|
| 1685 |
++ return nil |
|
| 1686 |
++} |
|
| 1687 |
++ |
|
| 1688 |
++func (cc *CascadeCloud) updateLoadBalancerVMs( |
|
| 1689 |
++ nodes []*v1.Node, loadBalancerName string, logger *loadBalancerLogger) error {
|
|
| 1690 |
++ |
|
| 1691 |
++ // Apply VM update to the load balancer |
|
| 1692 |
++ loadBalancerVMs := make([]*LoadBalancerVM, 0) |
|
| 1693 |
++ |
|
| 1694 |
++ for _, node := range(nodes) {
|
|
| 1695 |
++ // If the node does not have a name, we cannot derive its instance ID. Therefore we skip this node. |
|
| 1696 |
++ if len(node.Name) == 0 {
|
|
| 1697 |
++ logger.Warningf("Node %s does not have a name. Skip updating this VM for load balancer", node.UID)
|
|
| 1698 |
++ continue |
|
| 1699 |
++ } |
|
| 1700 |
++ |
|
| 1701 |
++ // If we cannot get the instance ID, something is wrong on the Cascade Controller side. |
|
| 1702 |
++ // However, we should tolerate such failure and continue the load balancer VM update |
|
| 1703 |
++ // by skipping this VM. |
|
| 1704 |
++ instanceID, err := cc.InstanceID(types.NodeName(node.Name)) |
|
| 1705 |
++ if err != nil {
|
|
| 1706 |
++ logger.Warningf("Unable to get instance ID for node %s, skip updating this VM for load balancer. Error [%v]", node.Name, err)
|
|
| 1707 |
++ continue |
|
| 1708 |
++ } |
|
| 1709 |
++ |
|
| 1710 |
++ loadBalancerVMs = append(loadBalancerVMs, &LoadBalancerVM{
|
|
| 1711 |
++ ID: StringPtr(instanceID), |
|
| 1712 |
++ }) |
|
| 1713 |
++ } |
|
| 1714 |
++ |
|
| 1715 |
++ if len(loadBalancerVMs) == 0 {
|
|
| 1716 |
++ logger.Infof("No nodes to be added to the load balancer. Skip updating load balancer VMs")
|
|
| 1717 |
++ return nil |
|
| 1718 |
++ } |
|
| 1719 |
++ |
|
| 1720 |
++ vmUpdate := &LoadBalancerVMUpdate{
|
|
| 1721 |
++ VMIds: loadBalancerVMs, |
|
| 1722 |
++ } |
|
| 1723 |
++ logger.Infof("Load balancer VM update spec: %+v", vmUpdate.VMIds)
|
|
| 1724 |
++ |
|
| 1725 |
++ task, err := cc.apiClient.ApplyVMsToLoadBalancer(StringPtr(loadBalancerName), vmUpdate) |
|
| 1726 |
++ if err != nil {
|
|
| 1727 |
++ logger.Errorf("Failed to update load balancer VMs. Error: [%v]", err)
|
|
| 1728 |
++ return err |
|
| 1729 |
++ } |
|
| 1730 |
++ |
|
| 1731 |
++ _, err = cc.apiClient.WaitForTask(StringVal(task.ID)) |
|
| 1732 |
++ if err != nil {
|
|
| 1733 |
++ logger.Errorf("Failed to poll task status of updating load balancer VMs. Error: [%v]", err)
|
|
| 1734 |
++ return err |
|
| 1735 |
++ } |
|
| 1736 |
++ |
|
| 1737 |
++ return nil |
|
| 1738 |
++} |
|
| 1739 |
++ |
|
| 1740 |
++func toLoadBalancerStatus(lb *LoadBalancer) *v1.LoadBalancerStatus {
|
|
| 1741 |
++ var endpoint string |
|
| 1742 |
++ if lb != nil && lb.Endpoint != nil {
|
|
| 1743 |
++ endpoint = StringVal(lb.Endpoint) |
|
| 1744 |
++ } |
|
| 1745 |
++ |
|
| 1746 |
++ return &v1.LoadBalancerStatus{
|
|
| 1747 |
++ Ingress: []v1.LoadBalancerIngress{
|
|
| 1748 |
++ {
|
|
| 1749 |
++ Hostname: endpoint, |
|
| 1750 |
++ }, |
|
| 1751 |
++ }, |
|
| 1752 |
++ } |
|
| 1753 |
++} |
|
| 1754 |
++ |
|
| 1755 |
++type loadBalancerLogger struct {
|
|
| 1756 |
++ clusterName string |
|
| 1757 |
++ k8sService *v1.Service |
|
| 1758 |
++ callingFunc string |
|
| 1759 |
++} |
|
| 1760 |
++ |
|
| 1761 |
++func newLoadBalancerLogger(clusterName string, k8sService *v1.Service, callingFunc string) *loadBalancerLogger {
|
|
| 1762 |
++ return &loadBalancerLogger{
|
|
| 1763 |
++ clusterName: clusterName, |
|
| 1764 |
++ k8sService: k8sService, |
|
| 1765 |
++ callingFunc: callingFunc, |
|
| 1766 |
++ } |
|
| 1767 |
++} |
|
| 1768 |
++ |
|
| 1769 |
++func (l *loadBalancerLogger) getLogMsg( |
|
| 1770 |
++ msgTemplate string, args ...interface{}) string {
|
|
| 1771 |
++ |
|
| 1772 |
++ errorMsg := fmt.Sprintf("Cascade Cloud Provider::%s::Cluster [%s] Service [%s]: %s",
|
|
| 1773 |
++ l.callingFunc, l.clusterName, l.k8sService.Name, |
|
| 1774 |
++ msgTemplate) |
|
| 1775 |
++ return fmt.Sprintf(errorMsg, args) |
|
| 1776 |
++} |
|
| 1777 |
++ |
|
| 1778 |
++func (l *loadBalancerLogger) Errorf(msgTemplate string, args ...interface{}) {
|
|
| 1779 |
++ glog.Errorln(l.getLogMsg(msgTemplate, args)) |
|
| 1780 |
++} |
|
| 1781 |
++ |
|
| 1782 |
++func (l *loadBalancerLogger) Warningf(msgTemplate string, args ...interface{}) {
|
|
| 1783 |
++ glog.Warningln(l.getLogMsg(msgTemplate, args)) |
|
| 1784 |
++} |
|
| 1785 |
++ |
|
| 1786 |
++func (l *loadBalancerLogger) Infof(msgTemplate string, args ...interface{}) {
|
|
| 1787 |
++ glog.Infoln(l.getLogMsg(msgTemplate, args)) |
|
| 1788 |
++} |
|
| 1789 |
+\ No newline at end of file |
|
| 1790 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/client.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/client.go |
|
| 1791 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/client.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 1792 |
+@@ -0,0 +1,382 @@ |
|
| 1793 |
++package cascade |
|
| 1794 |
++ |
|
| 1795 |
++import ( |
|
| 1796 |
++ "crypto/tls" |
|
| 1797 |
++ "crypto/x509" |
|
| 1798 |
++ "encoding/json" |
|
| 1799 |
++ "fmt" |
|
| 1800 |
++ "net/http" |
|
| 1801 |
++ "strings" |
|
| 1802 |
++ "time" |
|
| 1803 |
++ "bytes" |
|
| 1804 |
++ "github.com/golang/glog" |
|
| 1805 |
++) |
|
| 1806 |
++ |
|
| 1807 |
++// Represents stateless context needed to call Cascade APIs. |
|
| 1808 |
++// Note that we are implementing the Cascade APIs manually instead of using the swagger generated code |
|
| 1809 |
++// because swagger uses a different version of openapi library than kubernetes. It is difficult to |
|
| 1810 |
++// address the version conflict to make it compile. |
|
| 1811 |
++type Client struct {
|
|
| 1812 |
++ cfg *ClientConfig |
|
| 1813 |
++ options ClientOptions |
|
| 1814 |
++ restClient *restClient |
|
| 1815 |
++} |
|
| 1816 |
++ |
|
| 1817 |
++type ClientConfig struct {
|
|
| 1818 |
++ tenantName string |
|
| 1819 |
++ clusterID string |
|
| 1820 |
++ region string |
|
| 1821 |
++ endpoint string |
|
| 1822 |
++} |
|
| 1823 |
++ |
|
| 1824 |
++// Represents Tokens |
|
| 1825 |
++type TokenOptions struct {
|
|
| 1826 |
++ AccessToken string `json:"access_token"` |
|
| 1827 |
++ ExpiresIn int `json:"expires_in"` |
|
| 1828 |
++ RefreshToken string `json:"refresh_token,omitempty"` |
|
| 1829 |
++ IDToken string `json:"id_token"` |
|
| 1830 |
++ TokenType string `json:"token_type"` |
|
| 1831 |
++} |
|
| 1832 |
++ |
|
| 1833 |
++type TokenCallback func(string) |
|
| 1834 |
++ |
|
| 1835 |
++// Options for Client |
|
| 1836 |
++type ClientOptions struct {
|
|
| 1837 |
++ // When using the Tasks.Wait APIs, defines the duration of how long |
|
| 1838 |
++ // we should continue to poll the server. Default is 30 minutes. |
|
| 1839 |
++ // TasksAPI.WaitTimeout() can be used to specify timeout on |
|
| 1840 |
++ // individual calls. |
|
| 1841 |
++ TaskPollTimeout time.Duration |
|
| 1842 |
++ |
|
| 1843 |
++ // Whether or not to ignore any TLS errors when talking to Cascade, |
|
| 1844 |
++ // false by default. |
|
| 1845 |
++ IgnoreCertificate bool |
|
| 1846 |
++ |
|
| 1847 |
++ // List of root CA's to use for server validation |
|
| 1848 |
++ // nil by default. |
|
| 1849 |
++ RootCAs *x509.CertPool |
|
| 1850 |
++ |
|
| 1851 |
++ // For tasks APIs, defines the delay between each polling attempt. |
|
| 1852 |
++ // Default is 100 milliseconds. |
|
| 1853 |
++ TaskPollDelay time.Duration |
|
| 1854 |
++ |
|
| 1855 |
++ // For tasks APIs, defines the number of retries to make in the event |
|
| 1856 |
++ // of an error. Default is 3. |
|
| 1857 |
++ TaskRetryCount int |
|
| 1858 |
++ |
|
| 1859 |
++ // Tokens for user authentication. Default is empty. |
|
| 1860 |
++ TokenOptions *TokenOptions |
|
| 1861 |
++} |
|
| 1862 |
++ |
|
| 1863 |
++// Creates a new Cascade client which can be used to make API calls to Cascade. |
|
| 1864 |
++func NewClient(cfg *CascadeConfig, authClient *AuthClient) (c *Client, err error) {
|
|
| 1865 |
++ tokenOptions, err := authClient.GetTokensByMachineAccount() |
|
| 1866 |
++ if err != nil {
|
|
| 1867 |
++ glog.Errorf("Cascade Cloud Provider: Failed to create new client due to error: %+v", err)
|
|
| 1868 |
++ return |
|
| 1869 |
++ } |
|
| 1870 |
++ |
|
| 1871 |
++ options := &ClientOptions{
|
|
| 1872 |
++ TaskPollTimeout: 30 * time.Minute, |
|
| 1873 |
++ TaskPollDelay: 100 * time.Millisecond, |
|
| 1874 |
++ TaskRetryCount: 3, |
|
| 1875 |
++ TokenOptions: tokenOptions, |
|
| 1876 |
++ IgnoreCertificate: false, |
|
| 1877 |
++ RootCAs: nil, |
|
| 1878 |
++ } |
|
| 1879 |
++ |
|
| 1880 |
++ tr := &http.Transport{
|
|
| 1881 |
++ TLSClientConfig: &tls.Config{
|
|
| 1882 |
++ InsecureSkipVerify: options.IgnoreCertificate, |
|
| 1883 |
++ RootCAs: options.RootCAs}, |
|
| 1884 |
++ } |
|
| 1885 |
++ |
|
| 1886 |
++ tokenCallback := func(newToken string) {
|
|
| 1887 |
++ c.options.TokenOptions.AccessToken = newToken |
|
| 1888 |
++ } |
|
| 1889 |
++ |
|
| 1890 |
++ restClient := &restClient{
|
|
| 1891 |
++ authClient: authClient, |
|
| 1892 |
++ httpClient: &http.Client{Transport: tr},
|
|
| 1893 |
++ UpdateAccessTokenCallback: tokenCallback, |
|
| 1894 |
++ } |
|
| 1895 |
++ |
|
| 1896 |
++ clientConfig := &ClientConfig {
|
|
| 1897 |
++ tenantName: cfg.Global.TenantName, |
|
| 1898 |
++ clusterID: cfg.Global.ClusterID, |
|
| 1899 |
++ region: cfg.Global.Region, |
|
| 1900 |
++ endpoint: strings.TrimRight(cfg.Global.CloudTarget, "/"), |
|
| 1901 |
++ } |
|
| 1902 |
++ |
|
| 1903 |
++ c = &Client{
|
|
| 1904 |
++ cfg: clientConfig, |
|
| 1905 |
++ restClient: restClient, |
|
| 1906 |
++ // Ensure a copy of options is made, rather than using a pointer |
|
| 1907 |
++ // which may change out from underneath if misused by the caller. |
|
| 1908 |
++ options: *options, |
|
| 1909 |
++ } |
|
| 1910 |
++ |
|
| 1911 |
++ return |
|
| 1912 |
++} |
|
| 1913 |
++ |
|
| 1914 |
++// Gets VM with the specified ID. |
|
| 1915 |
++func (api *Client) GetVM(vmID string) (vm *VM, err error) {
|
|
| 1916 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 1917 |
++ api.cfg.clusterID, vmID) |
|
| 1918 |
++ res, err := api.restClient.Get(uri, api.options.TokenOptions) |
|
| 1919 |
++ if err != nil {
|
|
| 1920 |
++ return |
|
| 1921 |
++ } |
|
| 1922 |
++ defer res.Body.Close() |
|
| 1923 |
++ res, err = getError(res) |
|
| 1924 |
++ if err != nil {
|
|
| 1925 |
++ return |
|
| 1926 |
++ } |
|
| 1927 |
++ vm = &VM{}
|
|
| 1928 |
++ err = json.NewDecoder(res.Body).Decode(vm) |
|
| 1929 |
++ return |
|
| 1930 |
++} |
|
| 1931 |
++ |
|
| 1932 |
++// Gets disk with the specified ID. |
|
| 1933 |
++func (api *Client) GetDisk(diskID string) (disk *PersistentDisk, err error) {
|
|
| 1934 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 1935 |
++ api.cfg.clusterID, diskID) |
|
| 1936 |
++ res, err := api.restClient.Get(uri, api.options.TokenOptions) |
|
| 1937 |
++ if err != nil {
|
|
| 1938 |
++ return |
|
| 1939 |
++ } |
|
| 1940 |
++ defer res.Body.Close() |
|
| 1941 |
++ res, err = getError(res) |
|
| 1942 |
++ if err != nil {
|
|
| 1943 |
++ return |
|
| 1944 |
++ } |
|
| 1945 |
++ disk = &PersistentDisk{}
|
|
| 1946 |
++ err = json.NewDecoder(res.Body).Decode(disk) |
|
| 1947 |
++ return |
|
| 1948 |
++} |
|
| 1949 |
++ |
|
| 1950 |
++// Creates a disk under the cluster. |
|
| 1951 |
++func (api *Client) CreateDisk(spec *DiskCreateSpec) (task *Task, err error) {
|
|
| 1952 |
++ body, err := json.Marshal(spec) |
|
| 1953 |
++ if err != nil {
|
|
| 1954 |
++ return |
|
| 1955 |
++ } |
|
| 1956 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 1957 |
++ api.cfg.clusterID) |
|
| 1958 |
++ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions) |
|
| 1959 |
++ if err != nil {
|
|
| 1960 |
++ return |
|
| 1961 |
++ } |
|
| 1962 |
++ defer res.Body.Close() |
|
| 1963 |
++ task, err = getTask(getError(res)) |
|
| 1964 |
++ return |
|
| 1965 |
++} |
|
| 1966 |
++ |
|
| 1967 |
++// Deletes a disk with the specified ID. |
|
| 1968 |
++func (api *Client) DeleteDisk(diskID string) (task *Task, err error) {
|
|
| 1969 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/disks/%s", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 1970 |
++ api.cfg.clusterID, diskID) |
|
| 1971 |
++ res, err := api.restClient.Delete(uri, api.options.TokenOptions) |
|
| 1972 |
++ if err != nil {
|
|
| 1973 |
++ return |
|
| 1974 |
++ } |
|
| 1975 |
++ defer res.Body.Close() |
|
| 1976 |
++ task, err = getTask(getError(res)) |
|
| 1977 |
++ return |
|
| 1978 |
++} |
|
| 1979 |
++ |
|
| 1980 |
++// Attaches a disk to the specified VM. |
|
| 1981 |
++func (api *Client) AttachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
|
|
| 1982 |
++ body, err := json.Marshal(op) |
|
| 1983 |
++ if err != nil {
|
|
| 1984 |
++ return |
|
| 1985 |
++ } |
|
| 1986 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/attach_disk", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 1987 |
++ api.cfg.clusterID, vmID) |
|
| 1988 |
++ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions) |
|
| 1989 |
++ if err != nil {
|
|
| 1990 |
++ return |
|
| 1991 |
++ } |
|
| 1992 |
++ defer res.Body.Close() |
|
| 1993 |
++ task, err = getTask(getError(res)) |
|
| 1994 |
++ return |
|
| 1995 |
++} |
|
| 1996 |
++ |
|
| 1997 |
++// Detaches a disk from the specified VM. |
|
| 1998 |
++func (api *Client) DetachDisk(vmID string, op *VMDiskOperation) (task *Task, err error) {
|
|
| 1999 |
++ body, err := json.Marshal(op) |
|
| 2000 |
++ if err != nil {
|
|
| 2001 |
++ return |
|
| 2002 |
++ } |
|
| 2003 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/vms/%s/detach_disk", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 2004 |
++ api.cfg.clusterID, vmID) |
|
| 2005 |
++ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions) |
|
| 2006 |
++ if err != nil {
|
|
| 2007 |
++ return |
|
| 2008 |
++ } |
|
| 2009 |
++ defer res.Body.Close() |
|
| 2010 |
++ task, err = getTask(getError(res)) |
|
| 2011 |
++ return |
|
| 2012 |
++} |
|
| 2013 |
++ |
|
| 2014 |
++// Gets a task by ID. |
|
| 2015 |
++func (api *Client) GetTask(taskID string) (task *Task, err error) {
|
|
| 2016 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/tasks/%s?region=%s", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 2017 |
++ taskID, api.cfg.region) |
|
| 2018 |
++ res, err := api.restClient.Get(uri, api.options.TokenOptions) |
|
| 2019 |
++ if err != nil {
|
|
| 2020 |
++ return |
|
| 2021 |
++ } |
|
| 2022 |
++ defer res.Body.Close() |
|
| 2023 |
++ result, err := getTask(getError(res)) |
|
| 2024 |
++ return result, err |
|
| 2025 |
++} |
|
| 2026 |
++ |
|
| 2027 |
++// Waits for a task to complete by polling the tasks API until a task returns with the state COMPLETED or ERROR. |
|
| 2028 |
++func (api *Client) WaitForTask(taskID string) (task *Task, err error) {
|
|
| 2029 |
++ start := time.Now() |
|
| 2030 |
++ numErrors := 0 |
|
| 2031 |
++ maxErrors := api.options.TaskRetryCount |
|
| 2032 |
++ |
|
| 2033 |
++ for time.Since(start) < api.options.TaskPollTimeout {
|
|
| 2034 |
++ task, err = api.GetTask(taskID) |
|
| 2035 |
++ if err != nil {
|
|
| 2036 |
++ switch err.(type) {
|
|
| 2037 |
++ // If an ApiError comes back, something is wrong, return the error to the caller |
|
| 2038 |
++ case APIError: |
|
| 2039 |
++ return |
|
| 2040 |
++ // For other errors, retry before giving up |
|
| 2041 |
++ default: |
|
| 2042 |
++ numErrors++ |
|
| 2043 |
++ if numErrors > maxErrors {
|
|
| 2044 |
++ return |
|
| 2045 |
++ } |
|
| 2046 |
++ } |
|
| 2047 |
++ } else {
|
|
| 2048 |
++ // Reset the error count any time a successful call is made |
|
| 2049 |
++ numErrors = 0 |
|
| 2050 |
++ if StringVal(task.State) == "COMPLETED" {
|
|
| 2051 |
++ return |
|
| 2052 |
++ } |
|
| 2053 |
++ if StringVal(task.State) == "ERROR" {
|
|
| 2054 |
++ err = TaskError{StringVal(task.ID), getFailedStep(task)}
|
|
| 2055 |
++ return |
|
| 2056 |
++ } |
|
| 2057 |
++ } |
|
| 2058 |
++ time.Sleep(api.options.TaskPollDelay) |
|
| 2059 |
++ } |
|
| 2060 |
++ err = TaskTimeoutError{taskID}
|
|
| 2061 |
++ return |
|
| 2062 |
++} |
|
| 2063 |
++ |
|
| 2064 |
++// CreateOrUpdateLoadBalancer creates a load balancer if not existed, or update one otherwise |
|
| 2065 |
++func (api *Client) CreateOrUpdateLoadBalancer(spec *LoadBalancerCreateSpec) (*Task, error) {
|
|
| 2066 |
++ body, err := json.Marshal(spec) |
|
| 2067 |
++ if err != nil {
|
|
| 2068 |
++ return nil, err |
|
| 2069 |
++ } |
|
| 2070 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 2071 |
++ api.cfg.clusterID) |
|
| 2072 |
++ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions) |
|
| 2073 |
++ if err != nil {
|
|
| 2074 |
++ return nil, err |
|
| 2075 |
++ } |
|
| 2076 |
++ defer res.Body.Close() |
|
| 2077 |
++ return getTask(getError(res)) |
|
| 2078 |
++} |
|
| 2079 |
++ |
|
| 2080 |
++// GetLoadBalancer returns a load balancer by name |
|
| 2081 |
++func (api *Client) GetLoadBalancer(loadBalancerName *string) (*LoadBalancer, error) {
|
|
| 2082 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 2083 |
++ api.cfg.clusterID, StringVal(loadBalancerName)) |
|
| 2084 |
++ res, err := api.restClient.Get(uri, api.options.TokenOptions) |
|
| 2085 |
++ if err != nil {
|
|
| 2086 |
++ return nil, err |
|
| 2087 |
++ } |
|
| 2088 |
++ defer res.Body.Close() |
|
| 2089 |
++ res, err = getError(res) |
|
| 2090 |
++ if err != nil {
|
|
| 2091 |
++ return nil, err |
|
| 2092 |
++ } |
|
| 2093 |
++ loadBalancer := &LoadBalancer{}
|
|
| 2094 |
++ err = json.NewDecoder(res.Body).Decode(loadBalancer) |
|
| 2095 |
++ return loadBalancer, err |
|
| 2096 |
++} |
|
| 2097 |
++ |
|
| 2098 |
++// DeleteLoadBalancer deletes a load balancer by name |
|
| 2099 |
++func (api *Client) DeleteLoadBalancer(loadBalancerName *string) (*Task, error) {
|
|
| 2100 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 2101 |
++ api.cfg.clusterID, StringVal(loadBalancerName)) |
|
| 2102 |
++ res, err := api.restClient.Delete(uri, api.options.TokenOptions) |
|
| 2103 |
++ if err != nil {
|
|
| 2104 |
++ return nil, err |
|
| 2105 |
++ } |
|
| 2106 |
++ return getTask(getError(res)) |
|
| 2107 |
++} |
|
| 2108 |
++ |
|
| 2109 |
++// ApplyVMsToLoadBalancer updates the instances that are registered with the load balancer |
|
| 2110 |
++func (api *Client) ApplyVMsToLoadBalancer(loadBalancerName *string, update *LoadBalancerVMUpdate) (*Task, error) {
|
|
| 2111 |
++ body, err := json.Marshal(update) |
|
| 2112 |
++ if err != nil {
|
|
| 2113 |
++ return nil, err |
|
| 2114 |
++ } |
|
| 2115 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/loadbalancers/%s/update_vms", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 2116 |
++ api.cfg.clusterID, StringVal(loadBalancerName)) |
|
| 2117 |
++ res, err := api.restClient.Post(uri, "application/json", bytes.NewReader(body), api.options.TokenOptions) |
|
| 2118 |
++ if err != nil {
|
|
| 2119 |
++ return nil, err |
|
| 2120 |
++ } |
|
| 2121 |
++ defer res.Body.Close() |
|
| 2122 |
++ return getTask(getError(res)) |
|
| 2123 |
++} |
|
| 2124 |
++ |
|
| 2125 |
++// Gets all the zones in which the cluster has the VMs in. |
|
| 2126 |
++func (api *Client) GetZones() (zones []string, err error) {
|
|
| 2127 |
++ uri := fmt.Sprintf("%s/v1/tenants/%s/clusters/%s/zones", api.cfg.endpoint, api.cfg.tenantName,
|
|
| 2128 |
++ api.cfg.clusterID) |
|
| 2129 |
++ res, err := api.restClient.Get(uri, api.options.TokenOptions) |
|
| 2130 |
++ if err != nil {
|
|
| 2131 |
++ return |
|
| 2132 |
++ } |
|
| 2133 |
++ defer res.Body.Close() |
|
| 2134 |
++ res, err = getError(res) |
|
| 2135 |
++ if err != nil {
|
|
| 2136 |
++ return |
|
| 2137 |
++ } |
|
| 2138 |
++ err = json.NewDecoder(res.Body).Decode(&zones) |
|
| 2139 |
++ return |
|
| 2140 |
++} |
|
| 2141 |
++ |
|
| 2142 |
++// Reads a task object out of the HTTP response. Takes an error argument |
|
| 2143 |
++// so that GetTask can easily wrap GetError. This function will do nothing |
|
| 2144 |
++// if e is not nil. |
|
| 2145 |
++// e.g. res, err := getTask(getError(someApi.Get())) |
|
| 2146 |
++func getTask(res *http.Response, e error) (*Task, error) {
|
|
| 2147 |
++ if e != nil {
|
|
| 2148 |
++ return nil, e |
|
| 2149 |
++ } |
|
| 2150 |
++ var task Task |
|
| 2151 |
++ err := json.NewDecoder(res.Body).Decode(&task) |
|
| 2152 |
++ if err != nil {
|
|
| 2153 |
++ return nil, err |
|
| 2154 |
++ } |
|
| 2155 |
++ if StringVal(task.State) == "ERROR" {
|
|
| 2156 |
++ // Critical: return task as well, so that it can be examined |
|
| 2157 |
++ // for error details. |
|
| 2158 |
++ return &task, TaskError{StringVal(task.ID), getFailedStep(&task)}
|
|
| 2159 |
++ } |
|
| 2160 |
++ return &task, nil |
|
| 2161 |
++} |
|
| 2162 |
++ |
|
| 2163 |
++// Gets the failed step in the task to get error details for failed task. |
|
| 2164 |
++func getFailedStep(task *Task) (step Step) {
|
|
| 2165 |
++ var errorStep Step |
|
| 2166 |
++ for _, s := range task.Steps {
|
|
| 2167 |
++ if StringVal(s.State) == "ERROR" {
|
|
| 2168 |
++ errorStep = *s |
|
| 2169 |
++ break |
|
| 2170 |
++ } |
|
| 2171 |
++ } |
|
| 2172 |
++ |
|
| 2173 |
++ return errorStep |
|
| 2174 |
++} |
|
| 2175 |
+\ No newline at end of file |
|
| 2176 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/oidcclient.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/oidcclient.go |
|
| 2177 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/oidcclient.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 2178 |
+@@ -0,0 +1,297 @@ |
|
| 2179 |
++package cascade |
|
| 2180 |
++ |
|
| 2181 |
++import ( |
|
| 2182 |
++ "crypto/tls" |
|
| 2183 |
++ "crypto/x509" |
|
| 2184 |
++ "encoding/json" |
|
| 2185 |
++ "encoding/pem" |
|
| 2186 |
++ "fmt" |
|
| 2187 |
++ "io/ioutil" |
|
| 2188 |
++ "log" |
|
| 2189 |
++ "net/http" |
|
| 2190 |
++ "net/url" |
|
| 2191 |
++ "strings" |
|
| 2192 |
++) |
|
| 2193 |
++ |
|
| 2194 |
++const tokenScope string = "openid offline_access" |
|
| 2195 |
++ |
|
| 2196 |
++// OIDCClient is client for OIDC |
|
| 2197 |
++type OIDCClient struct {
|
|
| 2198 |
++ httpClient *http.Client |
|
| 2199 |
++ logger *log.Logger |
|
| 2200 |
++ |
|
| 2201 |
++ Endpoint string |
|
| 2202 |
++ Options *OIDCClientOptions |
|
| 2203 |
++} |
|
| 2204 |
++ |
|
| 2205 |
++// OIDCClientOptions is OIDC client options |
|
| 2206 |
++type OIDCClientOptions struct {
|
|
| 2207 |
++ // Whether or not to ignore any TLS errors when talking to Cascade, |
|
| 2208 |
++ // false by default. |
|
| 2209 |
++ IgnoreCertificate bool |
|
| 2210 |
++ |
|
| 2211 |
++ // List of root CA's to use for server validation |
|
| 2212 |
++ // nil by default. |
|
| 2213 |
++ RootCAs *x509.CertPool |
|
| 2214 |
++ |
|
| 2215 |
++ // The scope values to use when requesting tokens |
|
| 2216 |
++ TokenScope string |
|
| 2217 |
++} |
|
| 2218 |
++ |
|
| 2219 |
++// NewOIDCClient creates an instance of OIDCClient |
|
| 2220 |
++func NewOIDCClient(endpoint string, options *OIDCClientOptions, logger *log.Logger) (c *OIDCClient) {
|
|
| 2221 |
++ if logger == nil {
|
|
| 2222 |
++ logger = log.New(ioutil.Discard, "", log.LstdFlags) |
|
| 2223 |
++ } |
|
| 2224 |
++ |
|
| 2225 |
++ options = buildOptions(options) |
|
| 2226 |
++ tr := &http.Transport{
|
|
| 2227 |
++ TLSClientConfig: &tls.Config{
|
|
| 2228 |
++ InsecureSkipVerify: options.IgnoreCertificate, |
|
| 2229 |
++ RootCAs: options.RootCAs}, |
|
| 2230 |
++ } |
|
| 2231 |
++ |
|
| 2232 |
++ c = &OIDCClient{
|
|
| 2233 |
++ httpClient: &http.Client{Transport: tr},
|
|
| 2234 |
++ logger: logger, |
|
| 2235 |
++ Endpoint: strings.TrimRight(endpoint, "/"), |
|
| 2236 |
++ Options: options, |
|
| 2237 |
++ } |
|
| 2238 |
++ return |
|
| 2239 |
++} |
|
| 2240 |
++ |
|
| 2241 |
++func buildOptions(options *OIDCClientOptions) (result *OIDCClientOptions) {
|
|
| 2242 |
++ result = &OIDCClientOptions{
|
|
| 2243 |
++ TokenScope: tokenScope, |
|
| 2244 |
++ } |
|
| 2245 |
++ |
|
| 2246 |
++ if options == nil {
|
|
| 2247 |
++ return |
|
| 2248 |
++ } |
|
| 2249 |
++ |
|
| 2250 |
++ result.IgnoreCertificate = options.IgnoreCertificate |
|
| 2251 |
++ |
|
| 2252 |
++ if options.RootCAs != nil {
|
|
| 2253 |
++ result.RootCAs = options.RootCAs |
|
| 2254 |
++ } |
|
| 2255 |
++ |
|
| 2256 |
++ if options.TokenScope != "" {
|
|
| 2257 |
++ result.TokenScope = options.TokenScope |
|
| 2258 |
++ } |
|
| 2259 |
++ |
|
| 2260 |
++ return |
|
| 2261 |
++} |
|
| 2262 |
++ |
|
| 2263 |
++func (client *OIDCClient) buildURL(path string) (url string) {
|
|
| 2264 |
++ return fmt.Sprintf("%s%s", client.Endpoint, path)
|
|
| 2265 |
++} |
|
| 2266 |
++ |
|
| 2267 |
++// Cert download helper |
|
| 2268 |
++ |
|
| 2269 |
++const certDownloadPath string = "/afd/vecs/ssl" |
|
| 2270 |
++ |
|
| 2271 |
++type lightWaveCert struct {
|
|
| 2272 |
++ Value string `json:"encoded"` |
|
| 2273 |
++} |
|
| 2274 |
++ |
|
| 2275 |
++// GetRootCerts gets root certs |
|
| 2276 |
++func (client *OIDCClient) GetRootCerts() (certList []*x509.Certificate, err error) {
|
|
| 2277 |
++ // turn TLS verification off for |
|
| 2278 |
++ originalTr := client.httpClient.Transport |
|
| 2279 |
++ defer client.setTransport(originalTr) |
|
| 2280 |
++ |
|
| 2281 |
++ tr := &http.Transport{
|
|
| 2282 |
++ TLSClientConfig: &tls.Config{
|
|
| 2283 |
++ InsecureSkipVerify: true, |
|
| 2284 |
++ }, |
|
| 2285 |
++ } |
|
| 2286 |
++ client.setTransport(tr) |
|
| 2287 |
++ |
|
| 2288 |
++ // get the certs |
|
| 2289 |
++ resp, err := client.httpClient.Get(client.buildURL(certDownloadPath)) |
|
| 2290 |
++ if err != nil {
|
|
| 2291 |
++ return |
|
| 2292 |
++ } |
|
| 2293 |
++ defer resp.Body.Close() |
|
| 2294 |
++ if resp.StatusCode != 200 {
|
|
| 2295 |
++ err = fmt.Errorf("Unexpected error retrieving auth server certs: %v %s", resp.StatusCode, resp.Status)
|
|
| 2296 |
++ return |
|
| 2297 |
++ } |
|
| 2298 |
++ |
|
| 2299 |
++ // parse the certs |
|
| 2300 |
++ certsData := &[]lightWaveCert{}
|
|
| 2301 |
++ err = json.NewDecoder(resp.Body).Decode(certsData) |
|
| 2302 |
++ if err != nil {
|
|
| 2303 |
++ return |
|
| 2304 |
++ } |
|
| 2305 |
++ |
|
| 2306 |
++ certList = make([]*x509.Certificate, len(*certsData)) |
|
| 2307 |
++ for idx, cert := range *certsData {
|
|
| 2308 |
++ block, _ := pem.Decode([]byte(cert.Value)) |
|
| 2309 |
++ if block == nil {
|
|
| 2310 |
++ err = fmt.Errorf("Unexpected response format: %v", certsData)
|
|
| 2311 |
++ return nil, err |
|
| 2312 |
++ } |
|
| 2313 |
++ |
|
| 2314 |
++ decodedCert, err := x509.ParseCertificate(block.Bytes) |
|
| 2315 |
++ if err != nil {
|
|
| 2316 |
++ return nil, err |
|
| 2317 |
++ } |
|
| 2318 |
++ |
|
| 2319 |
++ certList[idx] = decodedCert |
|
| 2320 |
++ } |
|
| 2321 |
++ |
|
| 2322 |
++ return |
|
| 2323 |
++} |
|
| 2324 |
++ |
|
| 2325 |
++func (client *OIDCClient) setTransport(tr http.RoundTripper) {
|
|
| 2326 |
++ client.httpClient.Transport = tr |
|
| 2327 |
++} |
|
| 2328 |
++ |
|
| 2329 |
++// Metadata request helpers |
|
| 2330 |
++const metadataPathFormat string = "/openidconnect/%s/.well-known/openid-configuration" |
|
| 2331 |
++ |
|
| 2332 |
++// OIDCMetadataResponse is the response for Metadata request |
|
| 2333 |
++type OIDCMetadataResponse struct {
|
|
| 2334 |
++ TokenEndpoint string `json:"token_endpoint"` |
|
| 2335 |
++ AuthorizationEndpoint string `json:"authorization_endpoint"` |
|
| 2336 |
++ EndSessionEndpoint string `json:"end_session_endpoint"` |
|
| 2337 |
++} |
|
| 2338 |
++ |
|
| 2339 |
++func (client *OIDCClient) getMetadata(domain string) (metadata *OIDCMetadataResponse, err error) {
|
|
| 2340 |
++ metadataPath := fmt.Sprintf(metadataPathFormat, domain) |
|
| 2341 |
++ request, err := http.NewRequest("GET", client.buildURL(metadataPath), nil)
|
|
| 2342 |
++ if err != nil {
|
|
| 2343 |
++ return nil, err |
|
| 2344 |
++ } |
|
| 2345 |
++ |
|
| 2346 |
++ resp, err := client.httpClient.Do(request) |
|
| 2347 |
++ if err != nil {
|
|
| 2348 |
++ return nil, err |
|
| 2349 |
++ } |
|
| 2350 |
++ defer resp.Body.Close() |
|
| 2351 |
++ |
|
| 2352 |
++ err = client.checkResponse(resp) |
|
| 2353 |
++ if err != nil {
|
|
| 2354 |
++ return nil, err |
|
| 2355 |
++ } |
|
| 2356 |
++ |
|
| 2357 |
++ metadata = &OIDCMetadataResponse{}
|
|
| 2358 |
++ err = json.NewDecoder(resp.Body).Decode(metadata) |
|
| 2359 |
++ if err != nil {
|
|
| 2360 |
++ return nil, err |
|
| 2361 |
++ } |
|
| 2362 |
++ |
|
| 2363 |
++ return |
|
| 2364 |
++} |
|
| 2365 |
++ |
|
| 2366 |
++// Token request helpers |
|
| 2367 |
++ |
|
| 2368 |
++const passwordGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s" |
|
| 2369 |
++const refreshTokenGrantFormatString = "grant_type=refresh_token&refresh_token=%s" |
|
| 2370 |
++const clientGrantFormatString = "grant_type=password&username=%s&password=%s&scope=%s&client_id=%s" |
|
| 2371 |
++ |
|
| 2372 |
++// OIDCTokenResponse is the response for OIDC request |
|
| 2373 |
++type OIDCTokenResponse struct {
|
|
| 2374 |
++ AccessToken string `json:"access_token"` |
|
| 2375 |
++ ExpiresIn int `json:"expires_in"` |
|
| 2376 |
++ RefreshToken string `json:"refresh_token,omitempty"` |
|
| 2377 |
++ IDToken string `json:"id_token"` |
|
| 2378 |
++ TokenType string `json:"token_type"` |
|
| 2379 |
++} |
|
| 2380 |
++ |
|
| 2381 |
++// GetTokenByPasswordGrant gets OIDC tokens by password |
|
| 2382 |
++func (client *OIDCClient) GetTokenByPasswordGrant(domain, username, password string) (tokens *OIDCTokenResponse, err error) {
|
|
| 2383 |
++ metadata, err := client.getMetadata(domain) |
|
| 2384 |
++ if err != nil {
|
|
| 2385 |
++ return nil, err |
|
| 2386 |
++ } |
|
| 2387 |
++ |
|
| 2388 |
++ username = url.QueryEscape(username) |
|
| 2389 |
++ password = url.QueryEscape(password) |
|
| 2390 |
++ body := fmt.Sprintf(passwordGrantFormatString, username, password, client.Options.TokenScope) |
|
| 2391 |
++ return client.getToken(metadata.TokenEndpoint, body) |
|
| 2392 |
++} |
|
| 2393 |
++ |
|
| 2394 |
++// GetClientTokenByPasswordGrant gets OIDC tokens by password |
|
| 2395 |
++func (client *OIDCClient) GetClientTokenByPasswordGrant(domain, username, password, clientID string) (tokens *OIDCTokenResponse, err error) {
|
|
| 2396 |
++ metadata, err := client.getMetadata(domain) |
|
| 2397 |
++ if err != nil {
|
|
| 2398 |
++ return nil, err |
|
| 2399 |
++ } |
|
| 2400 |
++ |
|
| 2401 |
++ username = url.QueryEscape(username) |
|
| 2402 |
++ password = url.QueryEscape(password) |
|
| 2403 |
++ clientID = url.QueryEscape(clientID) |
|
| 2404 |
++ body := fmt.Sprintf(clientGrantFormatString, username, password, client.Options.TokenScope, clientID) |
|
| 2405 |
++ return client.getToken(metadata.TokenEndpoint, body) |
|
| 2406 |
++} |
|
| 2407 |
++ |
|
| 2408 |
++// GetTokenByRefreshTokenGrant gets OIDC tokens by refresh token |
|
| 2409 |
++func (client *OIDCClient) GetTokenByRefreshTokenGrant(domain, refreshToken string) (tokens *OIDCTokenResponse, err error) {
|
|
| 2410 |
++ metadata, err := client.getMetadata(domain) |
|
| 2411 |
++ if err != nil {
|
|
| 2412 |
++ return nil, err |
|
| 2413 |
++ } |
|
| 2414 |
++ |
|
| 2415 |
++ body := fmt.Sprintf(refreshTokenGrantFormatString, refreshToken) |
|
| 2416 |
++ return client.getToken(metadata.TokenEndpoint, body) |
|
| 2417 |
++} |
|
| 2418 |
++ |
|
| 2419 |
++func (client *OIDCClient) getToken(tokenEndpoint, body string) (tokens *OIDCTokenResponse, err error) {
|
|
| 2420 |
++ request, err := http.NewRequest("POST", tokenEndpoint, strings.NewReader(body))
|
|
| 2421 |
++ if err != nil {
|
|
| 2422 |
++ return nil, err |
|
| 2423 |
++ } |
|
| 2424 |
++ request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
|
| 2425 |
++ |
|
| 2426 |
++ resp, err := client.httpClient.Do(request) |
|
| 2427 |
++ if err != nil {
|
|
| 2428 |
++ return nil, err |
|
| 2429 |
++ } |
|
| 2430 |
++ defer resp.Body.Close() |
|
| 2431 |
++ |
|
| 2432 |
++ err = client.checkResponse(resp) |
|
| 2433 |
++ if err != nil {
|
|
| 2434 |
++ return nil, err |
|
| 2435 |
++ } |
|
| 2436 |
++ |
|
| 2437 |
++ tokens = &OIDCTokenResponse{}
|
|
| 2438 |
++ err = json.NewDecoder(resp.Body).Decode(tokens) |
|
| 2439 |
++ if err != nil {
|
|
| 2440 |
++ return nil, err |
|
| 2441 |
++ } |
|
| 2442 |
++ |
|
| 2443 |
++ return |
|
| 2444 |
++} |
|
| 2445 |
++ |
|
| 2446 |
++// OIDCError is OIDC error |
|
| 2447 |
++type OIDCError struct {
|
|
| 2448 |
++ Code string `json:"error"` |
|
| 2449 |
++ Message string `json:"error_description"` |
|
| 2450 |
++} |
|
| 2451 |
++ |
|
| 2452 |
++func (e OIDCError) Error() string {
|
|
| 2453 |
++ return fmt.Sprintf("%v: %v", e.Code, e.Message)
|
|
| 2454 |
++} |
|
| 2455 |
++ |
|
| 2456 |
++func (client *OIDCClient) checkResponse(response *http.Response) (err error) {
|
|
| 2457 |
++ if response.StatusCode/100 == 2 {
|
|
| 2458 |
++ return |
|
| 2459 |
++ } |
|
| 2460 |
++ |
|
| 2461 |
++ respBody, readErr := ioutil.ReadAll(response.Body) |
|
| 2462 |
++ if readErr != nil {
|
|
| 2463 |
++ return fmt.Errorf( |
|
| 2464 |
++ "Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr) |
|
| 2465 |
++ } |
|
| 2466 |
++ |
|
| 2467 |
++ var oidcErr OIDCError |
|
| 2468 |
++ err = json.Unmarshal(respBody, &oidcErr) |
|
| 2469 |
++ if err != nil || oidcErr.Code == "" {
|
|
| 2470 |
++ return fmt.Errorf( |
|
| 2471 |
++ "Status: %v, Body: %v [%v]", response.Status, string(respBody[:]), readErr) |
|
| 2472 |
++ } |
|
| 2473 |
++ |
|
| 2474 |
++ return oidcErr |
|
| 2475 |
++} |
|
| 2476 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/OWNERS cascade-kubernetes/pkg/cloudprovider/providers/cascade/OWNERS |
|
| 2477 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/OWNERS 1970-01-01 00:00:00.000000000 +0000 |
|
| 2478 |
+@@ -0,0 +1,3 @@ |
|
| 2479 |
++maintainers: |
|
| 2480 |
++- ashokc |
|
| 2481 |
++- ysheng |
|
| 2482 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/restclient.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/restclient.go |
|
| 2483 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/restclient.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 2484 |
+@@ -0,0 +1,262 @@ |
|
| 2485 |
++package cascade |
|
| 2486 |
++ |
|
| 2487 |
++import ( |
|
| 2488 |
++ "bytes" |
|
| 2489 |
++ "encoding/json" |
|
| 2490 |
++ "io" |
|
| 2491 |
++ "io/ioutil" |
|
| 2492 |
++ "net/http" |
|
| 2493 |
++) |
|
| 2494 |
++ |
|
| 2495 |
++type restClient struct {
|
|
| 2496 |
++ httpClient *http.Client |
|
| 2497 |
++ authClient *AuthClient |
|
| 2498 |
++ UpdateAccessTokenCallback TokenCallback |
|
| 2499 |
++} |
|
| 2500 |
++ |
|
| 2501 |
++type request struct {
|
|
| 2502 |
++ Method string |
|
| 2503 |
++ URL string |
|
| 2504 |
++ ContentType string |
|
| 2505 |
++ Body io.Reader |
|
| 2506 |
++ Tokens *TokenOptions |
|
| 2507 |
++} |
|
| 2508 |
++ |
|
| 2509 |
++type page struct {
|
|
| 2510 |
++ Items []interface{} `json:"items"`
|
|
| 2511 |
++ NextPageLink string `json:"nextPageLink"` |
|
| 2512 |
++ PreviousPageLink string `json:"previousPageLink"` |
|
| 2513 |
++} |
|
| 2514 |
++ |
|
| 2515 |
++type documentList struct {
|
|
| 2516 |
++ Items []interface{}
|
|
| 2517 |
++} |
|
| 2518 |
++ |
|
| 2519 |
++type bodyRewinder func() io.Reader |
|
| 2520 |
++ |
|
| 2521 |
++const appJson string = "application/json" |
|
| 2522 |
++const expiredAuthToken int32 = 1904 |
|
| 2523 |
++ |
|
| 2524 |
++func (client *restClient) AppendSlice(origSlice []interface{}, dataToAppend []interface{}) []interface{} {
|
|
| 2525 |
++ origLen := len(origSlice) |
|
| 2526 |
++ newLen := origLen + len(dataToAppend) |
|
| 2527 |
++ |
|
| 2528 |
++ if newLen > cap(origSlice) {
|
|
| 2529 |
++ newSlice := make([]interface{}, (newLen+1)*2)
|
|
| 2530 |
++ copy(newSlice, origSlice) |
|
| 2531 |
++ origSlice = newSlice |
|
| 2532 |
++ } |
|
| 2533 |
++ |
|
| 2534 |
++ origSlice = origSlice[0:newLen] |
|
| 2535 |
++ copy(origSlice[origLen:newLen], dataToAppend) |
|
| 2536 |
++ |
|
| 2537 |
++ return origSlice |
|
| 2538 |
++} |
|
| 2539 |
++ |
|
| 2540 |
++func (client *restClient) Get(url string, tokens *TokenOptions) (res *http.Response, err error) {
|
|
| 2541 |
++ req := request{"GET", url, "", nil, tokens}
|
|
| 2542 |
++ res, err = client.SendRequest(&req, nil) |
|
| 2543 |
++ return |
|
| 2544 |
++} |
|
| 2545 |
++ |
|
| 2546 |
++func (client *restClient) GetList(endpoint string, url string, tokens *TokenOptions) (result []byte, err error) {
|
|
| 2547 |
++ req := request{"GET", url, "", nil, tokens}
|
|
| 2548 |
++ res, err := client.SendRequest(&req, nil) |
|
| 2549 |
++ if err != nil {
|
|
| 2550 |
++ return |
|
| 2551 |
++ } |
|
| 2552 |
++ res, err = getError(res) |
|
| 2553 |
++ if err != nil {
|
|
| 2554 |
++ return |
|
| 2555 |
++ } |
|
| 2556 |
++ |
|
| 2557 |
++ decoder := json.NewDecoder(res.Body) |
|
| 2558 |
++ decoder.UseNumber() |
|
| 2559 |
++ |
|
| 2560 |
++ page := &page{}
|
|
| 2561 |
++ err = decoder.Decode(page) |
|
| 2562 |
++ if err != nil {
|
|
| 2563 |
++ return |
|
| 2564 |
++ } |
|
| 2565 |
++ |
|
| 2566 |
++ documentList := &documentList{}
|
|
| 2567 |
++ documentList.Items = client.AppendSlice(documentList.Items, page.Items) |
|
| 2568 |
++ |
|
| 2569 |
++ for page.NextPageLink != "" {
|
|
| 2570 |
++ req = request{"GET", endpoint + page.NextPageLink, "", nil, tokens}
|
|
| 2571 |
++ res, err = client.SendRequest(&req, nil) |
|
| 2572 |
++ if err != nil {
|
|
| 2573 |
++ return |
|
| 2574 |
++ } |
|
| 2575 |
++ res, err = getError(res) |
|
| 2576 |
++ if err != nil {
|
|
| 2577 |
++ return |
|
| 2578 |
++ } |
|
| 2579 |
++ |
|
| 2580 |
++ decoder = json.NewDecoder(res.Body) |
|
| 2581 |
++ decoder.UseNumber() |
|
| 2582 |
++ |
|
| 2583 |
++ page.NextPageLink = "" |
|
| 2584 |
++ page.PreviousPageLink = "" |
|
| 2585 |
++ |
|
| 2586 |
++ err = decoder.Decode(page) |
|
| 2587 |
++ if err != nil {
|
|
| 2588 |
++ return |
|
| 2589 |
++ } |
|
| 2590 |
++ |
|
| 2591 |
++ documentList.Items = client.AppendSlice(documentList.Items, page.Items) |
|
| 2592 |
++ } |
|
| 2593 |
++ |
|
| 2594 |
++ result, err = json.Marshal(documentList) |
|
| 2595 |
++ |
|
| 2596 |
++ return |
|
| 2597 |
++} |
|
| 2598 |
++ |
|
| 2599 |
++func (client *restClient) Post(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
|
|
| 2600 |
++ if contentType == "" {
|
|
| 2601 |
++ contentType = appJson |
|
| 2602 |
++ } |
|
| 2603 |
++ |
|
| 2604 |
++ req := request{"POST", url, contentType, body, tokens}
|
|
| 2605 |
++ rewinder := func() io.Reader {
|
|
| 2606 |
++ body.Seek(0, 0) |
|
| 2607 |
++ return body |
|
| 2608 |
++ } |
|
| 2609 |
++ res, err = client.SendRequest(&req, rewinder) |
|
| 2610 |
++ return |
|
| 2611 |
++} |
|
| 2612 |
++ |
|
| 2613 |
++func (client *restClient) Patch(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
|
|
| 2614 |
++ if contentType == "" {
|
|
| 2615 |
++ contentType = appJson |
|
| 2616 |
++ } |
|
| 2617 |
++ |
|
| 2618 |
++ req := request{"PATCH", url, contentType, body, tokens}
|
|
| 2619 |
++ rewinder := func() io.Reader {
|
|
| 2620 |
++ body.Seek(0, 0) |
|
| 2621 |
++ return body |
|
| 2622 |
++ } |
|
| 2623 |
++ res, err = client.SendRequest(&req, rewinder) |
|
| 2624 |
++ return |
|
| 2625 |
++} |
|
| 2626 |
++ |
|
| 2627 |
++func (client *restClient) Put(url string, contentType string, body io.ReadSeeker, tokens *TokenOptions) (res *http.Response, err error) {
|
|
| 2628 |
++ if contentType == "" {
|
|
| 2629 |
++ contentType = appJson |
|
| 2630 |
++ } |
|
| 2631 |
++ |
|
| 2632 |
++ req := request{"PUT", url, contentType, body, tokens}
|
|
| 2633 |
++ rewinder := func() io.Reader {
|
|
| 2634 |
++ body.Seek(0, 0) |
|
| 2635 |
++ return body |
|
| 2636 |
++ } |
|
| 2637 |
++ res, err = client.SendRequest(&req, rewinder) |
|
| 2638 |
++ return |
|
| 2639 |
++} |
|
| 2640 |
++ |
|
| 2641 |
++func (client *restClient) Delete(url string, tokens *TokenOptions) (res *http.Response, err error) {
|
|
| 2642 |
++ req := request{"DELETE", url, "", nil, tokens}
|
|
| 2643 |
++ res, err = client.SendRequest(&req, nil) |
|
| 2644 |
++ return |
|
| 2645 |
++} |
|
| 2646 |
++ |
|
| 2647 |
++func (client *restClient) SendRequest(req *request, bodyRewinder bodyRewinder) (res *http.Response, err error) {
|
|
| 2648 |
++ res, err = client.sendRequestHelper(req) |
|
| 2649 |
++ // In most cases, we'll return immediately |
|
| 2650 |
++ // If the operation succeeded, but we got a 401 response and if we're using |
|
| 2651 |
++ // authentication, then we'll look into the body to see if the token expired |
|
| 2652 |
++ if err != nil {
|
|
| 2653 |
++ return res, err |
|
| 2654 |
++ } |
|
| 2655 |
++ if res.StatusCode != 401 {
|
|
| 2656 |
++ // It's not a 401, so the token didn't expire |
|
| 2657 |
++ return res, err |
|
| 2658 |
++ } |
|
| 2659 |
++ if req.Tokens == nil || req.Tokens.AccessToken == "" {
|
|
| 2660 |
++ // We don't have a token, so we can't renew the token, no need to proceed |
|
| 2661 |
++ return res, err |
|
| 2662 |
++ } |
|
| 2663 |
++ |
|
| 2664 |
++ // We're going to look in the body to see if it failed because the token expired |
|
| 2665 |
++ // This means we need to read the body, but the functions that call us also |
|
| 2666 |
++ // expect to read the body. So we read the body, then create a new reader |
|
| 2667 |
++ // so they can read the body as normal. |
|
| 2668 |
++ body, err := ioutil.ReadAll(res.Body) |
|
| 2669 |
++ if err != nil {
|
|
| 2670 |
++ return res, err |
|
| 2671 |
++ } |
|
| 2672 |
++ res.Body = ioutil.NopCloser(bytes.NewReader(body)) |
|
| 2673 |
++ |
|
| 2674 |
++ // Now see if we had an expired token or not |
|
| 2675 |
++ var apiError APIError |
|
| 2676 |
++ err = json.Unmarshal(body, &apiError) |
|
| 2677 |
++ if err != nil {
|
|
| 2678 |
++ return res, err |
|
| 2679 |
++ } |
|
| 2680 |
++ if apiError.ErrorCode != expiredAuthToken {
|
|
| 2681 |
++ return res, nil |
|
| 2682 |
++ } |
|
| 2683 |
++ |
|
| 2684 |
++ // We were told that the access token expired, so we acquire a new token using the refresh token. |
|
| 2685 |
++ newTokens, err := client.authClient.GetTokensByRefreshToken(req.Tokens.RefreshToken) |
|
| 2686 |
++ // If there is an error during token refresh, we assume that the refresh token also expired. So we login again using |
|
| 2687 |
++ // the machine account. |
|
| 2688 |
++ if err != nil {
|
|
| 2689 |
++ newTokens, err = client.authClient.GetTokensByMachineAccount() |
|
| 2690 |
++ if err != nil {
|
|
| 2691 |
++ return res, err |
|
| 2692 |
++ } |
|
| 2693 |
++ } |
|
| 2694 |
++ req.Tokens.AccessToken = newTokens.AccessToken |
|
| 2695 |
++ if client.UpdateAccessTokenCallback != nil {
|
|
| 2696 |
++ client.UpdateAccessTokenCallback(newTokens.AccessToken) |
|
| 2697 |
++ } |
|
| 2698 |
++ if req.Body != nil && bodyRewinder != nil {
|
|
| 2699 |
++ req.Body = bodyRewinder() |
|
| 2700 |
++ } |
|
| 2701 |
++ res, err = client.sendRequestHelper(req) |
|
| 2702 |
++ return res, nil |
|
| 2703 |
++} |
|
| 2704 |
++ |
|
| 2705 |
++func (client *restClient) sendRequestHelper(req *request) (res *http.Response, err error) {
|
|
| 2706 |
++ r, err := http.NewRequest(req.Method, req.URL, req.Body) |
|
| 2707 |
++ if err != nil {
|
|
| 2708 |
++ return |
|
| 2709 |
++ } |
|
| 2710 |
++ if req.ContentType != "" {
|
|
| 2711 |
++ r.Header.Add("Content-Type", req.ContentType)
|
|
| 2712 |
++ } |
|
| 2713 |
++ if req.Tokens != nil && req.Tokens.AccessToken != "" {
|
|
| 2714 |
++ r.Header.Add("Authorization", "Bearer "+req.Tokens.AccessToken)
|
|
| 2715 |
++ } |
|
| 2716 |
++ res, err = client.httpClient.Do(r) |
|
| 2717 |
++ if err != nil {
|
|
| 2718 |
++ return |
|
| 2719 |
++ } |
|
| 2720 |
++ |
|
| 2721 |
++ return |
|
| 2722 |
++} |
|
| 2723 |
++ |
|
| 2724 |
++// Reads an error out of the HTTP response, or does nothing if |
|
| 2725 |
++// no error occured. |
|
| 2726 |
++func getError(res *http.Response) (*http.Response, error) {
|
|
| 2727 |
++ // Do nothing if the response is a successful 2xx |
|
| 2728 |
++ if res.StatusCode/100 == 2 {
|
|
| 2729 |
++ return res, nil |
|
| 2730 |
++ } |
|
| 2731 |
++ var apiError APIError |
|
| 2732 |
++ // ReadAll is usually a bad practice, but here we need to read the response all |
|
| 2733 |
++ // at once because we may attempt to use the data twice. It's preferable to use |
|
| 2734 |
++ // methods that take io.Reader, e.g. json.NewDecoder |
|
| 2735 |
++ body, err := ioutil.ReadAll(res.Body) |
|
| 2736 |
++ if err != nil {
|
|
| 2737 |
++ return nil, err |
|
| 2738 |
++ } |
|
| 2739 |
++ err = json.Unmarshal(body, &apiError) |
|
| 2740 |
++ if err != nil {
|
|
| 2741 |
++ // If deserializing into ApiError fails, return a generic HttpError instead |
|
| 2742 |
++ return nil, HttpError{res.StatusCode, string(body[:])}
|
|
| 2743 |
++ } |
|
| 2744 |
++ apiError.HttpStatusCode = res.StatusCode |
|
| 2745 |
++ return nil, apiError |
|
| 2746 |
++} |
|
| 2747 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/utils.go cascade-kubernetes/pkg/cloudprovider/providers/cascade/utils.go |
|
| 2748 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/cascade/utils.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 2749 |
+@@ -0,0 +1,25 @@ |
|
| 2750 |
++package cascade |
|
| 2751 |
++ |
|
| 2752 |
++func StringPtr(s string) *string {
|
|
| 2753 |
++ return &s |
|
| 2754 |
++} |
|
| 2755 |
++ |
|
| 2756 |
++// StringVal returns string from string pointer, nil returns "" |
|
| 2757 |
++func StringVal(p *string) (s string) {
|
|
| 2758 |
++ if p != nil {
|
|
| 2759 |
++ s = *p |
|
| 2760 |
++ } |
|
| 2761 |
++ return |
|
| 2762 |
++} |
|
| 2763 |
++ |
|
| 2764 |
++func Int64Ptr(s int64) *int64 {
|
|
| 2765 |
++ return &s |
|
| 2766 |
++} |
|
| 2767 |
++ |
|
| 2768 |
++func Int64Val(s *int64) int64 {
|
|
| 2769 |
++ return *s |
|
| 2770 |
++} |
|
| 2771 |
++ |
|
| 2772 |
++func Int32Ptr(s int32) *int32 {
|
|
| 2773 |
++ return &s |
|
| 2774 |
++} |
|
| 2775 |
+\ No newline at end of file |
|
| 2776 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/cloudprovider/providers/providers.go cascade-kubernetes/pkg/cloudprovider/providers/providers.go |
|
| 2777 |
+--- kubernetes-1.8.1/pkg/cloudprovider/providers/providers.go 2018-01-23 22:47:25.422819349 +0000 |
|
| 2778 |
+@@ -20,6 +20,7 @@ |
|
| 2779 |
+ // Cloud providers |
|
| 2780 |
+ _ "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" |
|
| 2781 |
+ _ "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" |
|
| 2782 |
++ _ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade" |
|
| 2783 |
+ _ "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack" |
|
| 2784 |
+ _ "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" |
|
| 2785 |
+ _ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" |
|
| 2786 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/printers/internalversion/describe.go cascade-kubernetes/pkg/printers/internalversion/describe.go |
|
| 2787 |
+--- kubernetes-1.8.1/pkg/printers/internalversion/describe.go 2018-01-23 22:47:25.518819352 +0000 |
|
| 2788 |
+@@ -764,6 +764,8 @@ |
|
| 2789 |
+ printFlexVolumeSource(volume.VolumeSource.FlexVolume, w) |
|
| 2790 |
+ case volume.VolumeSource.Flocker != nil: |
|
| 2791 |
+ printFlockerVolumeSource(volume.VolumeSource.Flocker, w) |
|
| 2792 |
++ case volume.VolumeSource.CascadeDisk != nil: |
|
| 2793 |
++ printCascadeDiskVolumeSource(volume.VolumeSource.CascadeDisk, w) |
|
| 2794 |
+ default: |
|
| 2795 |
+ w.Write(LEVEL_1, "<unknown>\n") |
|
| 2796 |
+ } |
|
| 2797 |
+@@ -1047,6 +1049,13 @@ |
|
| 2798 |
+ flocker.DatasetName, flocker.DatasetUUID) |
|
| 2799 |
+ } |
|
| 2800 |
+ |
|
| 2801 |
++func printCascadeDiskVolumeSource(cascade *api.CascadeDiskVolumeSource, w PrefixWriter) {
|
|
| 2802 |
++ w.Write(LEVEL_2, "Type:\tCascadeDisk (a Persistent Disk resource in Cascade)\n"+ |
|
| 2803 |
++ " DiskID:\t%v\n"+ |
|
| 2804 |
++ " FSType:\t%v\n", |
|
| 2805 |
++ cascade.DiskID, cascade.FSType) |
|
| 2806 |
++} |
|
| 2807 |
++ |
|
| 2808 |
+ type PersistentVolumeDescriber struct {
|
|
| 2809 |
+ clientset.Interface |
|
| 2810 |
+ } |
|
| 2811 |
+@@ -1130,6 +1139,8 @@ |
|
| 2812 |
+ printFlexVolumeSource(pv.Spec.FlexVolume, w) |
|
| 2813 |
+ case pv.Spec.Flocker != nil: |
|
| 2814 |
+ printFlockerVolumeSource(pv.Spec.Flocker, w) |
|
| 2815 |
++ case pv.Spec.CascadeDisk != nil: |
|
| 2816 |
++ printCascadeDiskVolumeSource(pv.Spec.CascadeDisk, w) |
|
| 2817 |
+ default: |
|
| 2818 |
+ w.Write(LEVEL_1, "<unknown>\n") |
|
| 2819 |
+ } |
|
| 2820 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/security/podsecuritypolicy/util/util.go cascade-kubernetes/pkg/security/podsecuritypolicy/util/util.go |
|
| 2821 |
+--- kubernetes-1.8.1/pkg/security/podsecuritypolicy/util/util.go 2018-01-23 22:47:25.558819354 +0000 |
|
| 2822 |
+@@ -67,6 +67,7 @@ |
|
| 2823 |
+ string(extensions.Projected), |
|
| 2824 |
+ string(extensions.PortworxVolume), |
|
| 2825 |
+ string(extensions.ScaleIO), |
|
| 2826 |
++ string(extensions.CascadeDisk), |
|
| 2827 |
+ ) |
|
| 2828 |
+ return fstypes |
|
| 2829 |
+ } |
|
| 2830 |
+@@ -128,6 +129,8 @@ |
|
| 2831 |
+ return extensions.PortworxVolume, nil |
|
| 2832 |
+ case v.ScaleIO != nil: |
|
| 2833 |
+ return extensions.ScaleIO, nil |
|
| 2834 |
++ case v.CascadeDisk != nil: |
|
| 2835 |
++ return extensions.CascadeDisk, nil |
|
| 2836 |
+ } |
|
| 2837 |
+ |
|
| 2838 |
+ return "", fmt.Errorf("unknown volume type for volume: %#v", v)
|
|
| 2839 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/volume/cascade_disk/attacher.go cascade-kubernetes/pkg/volume/cascade_disk/attacher.go |
|
| 2840 |
+--- kubernetes-1.8.1/pkg/volume/cascade_disk/attacher.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 2841 |
+@@ -0,0 +1,278 @@ |
|
| 2842 |
++package cascade_disk |
|
| 2843 |
++ |
|
| 2844 |
++import ( |
|
| 2845 |
++ "fmt" |
|
| 2846 |
++ "os" |
|
| 2847 |
++ "path" |
|
| 2848 |
++ "time" |
|
| 2849 |
++ |
|
| 2850 |
++ "github.com/golang/glog" |
|
| 2851 |
++ "k8s.io/api/core/v1" |
|
| 2852 |
++ "k8s.io/apimachinery/pkg/types" |
|
| 2853 |
++ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade" |
|
| 2854 |
++ "k8s.io/kubernetes/pkg/util/mount" |
|
| 2855 |
++ "k8s.io/kubernetes/pkg/volume" |
|
| 2856 |
++ volumeutil "k8s.io/kubernetes/pkg/volume/util" |
|
| 2857 |
++ "k8s.io/kubernetes/pkg/volume/util/volumehelper" |
|
| 2858 |
++ "strings" |
|
| 2859 |
++) |
|
| 2860 |
++ |
|
| 2861 |
++type cascadeDiskAttacher struct {
|
|
| 2862 |
++ host volume.VolumeHost |
|
| 2863 |
++ cascadeDisks cascade.Disks |
|
| 2864 |
++} |
|
| 2865 |
++ |
|
| 2866 |
++var _ volume.Attacher = &cascadeDiskAttacher{}
|
|
| 2867 |
++var _ volume.AttachableVolumePlugin = &cascadeDiskPlugin{}
|
|
| 2868 |
++ |
|
| 2869 |
++func (plugin *cascadeDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
|
| 2870 |
++ cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) |
|
| 2871 |
++ if err != nil {
|
|
| 2872 |
++ glog.Errorf("Cascade attacher: NewAttacher failed to get cloud provider")
|
|
| 2873 |
++ return nil, err |
|
| 2874 |
++ } |
|
| 2875 |
++ |
|
| 2876 |
++ return &cascadeDiskAttacher{
|
|
| 2877 |
++ host: plugin.host, |
|
| 2878 |
++ cascadeDisks: cascadeCloud, |
|
| 2879 |
++ }, nil |
|
| 2880 |
++} |
|
| 2881 |
++ |
|
| 2882 |
++// Attach attaches the volume specified by the given spec to the given host. On success, returns the device path where |
|
| 2883 |
++// the device was attached on the node. |
|
| 2884 |
++func (attacher *cascadeDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
|
| 2885 |
++ hostName := string(nodeName) |
|
| 2886 |
++ volumeSource, _, err := getVolumeSource(spec) |
|
| 2887 |
++ if err != nil {
|
|
| 2888 |
++ glog.Errorf("Cascade attacher: Attach failed to get volume source")
|
|
| 2889 |
++ return "", err |
|
| 2890 |
++ } |
|
| 2891 |
++ |
|
| 2892 |
++ attached, err := attacher.cascadeDisks.DiskIsAttached(volumeSource.DiskID, nodeName) |
|
| 2893 |
++ if err != nil {
|
|
| 2894 |
++ glog.Warningf("Cascade: couldn't check if disk is Attached for host %s, will try attach disk: %+v",
|
|
| 2895 |
++ hostName, err) |
|
| 2896 |
++ attached = false |
|
| 2897 |
++ } |
|
| 2898 |
++ |
|
| 2899 |
++ var devicePath string |
|
| 2900 |
++ if !attached {
|
|
| 2901 |
++ glog.V(4).Infof("Cascade: Attach disk called for host %s", hostName)
|
|
| 2902 |
++ |
|
| 2903 |
++ devicePath, err = attacher.cascadeDisks.AttachDisk(volumeSource.DiskID, nodeName) |
|
| 2904 |
++ if err != nil {
|
|
| 2905 |
++ glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.DiskID, nodeName, err)
|
|
| 2906 |
++ return "", err |
|
| 2907 |
++ } |
|
| 2908 |
++ } |
|
| 2909 |
++ |
|
| 2910 |
++ // Cacsade uses device names of the format /dev/sdX, but newer Linux Kernels mount them under /dev/xvdX |
|
| 2911 |
++ // (source: AWS console). So we have to rename the first occurrence of sd to xvd. |
|
| 2912 |
++ devicePath = strings.Replace(devicePath, "sd", "xvd", 1) |
|
| 2913 |
++ return devicePath, nil |
|
| 2914 |
++} |
|
| 2915 |
++ |
|
| 2916 |
++// VolumesAreAttached verifies whether the volumes specified in the spec are attached to the specified node. |
|
| 2917 |
++func (attacher *cascadeDiskAttacher) VolumesAreAttached(specs []*volume.Spec, |
|
| 2918 |
++ nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
|
| 2919 |
++ volumesAttachedCheck := make(map[*volume.Spec]bool) |
|
| 2920 |
++ volumeSpecMap := make(map[string]*volume.Spec) |
|
| 2921 |
++ diskIDList := []string{}
|
|
| 2922 |
++ for _, spec := range specs {
|
|
| 2923 |
++ volumeSource, _, err := getVolumeSource(spec) |
|
| 2924 |
++ if err != nil {
|
|
| 2925 |
++ glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
|
| 2926 |
++ continue |
|
| 2927 |
++ } |
|
| 2928 |
++ |
|
| 2929 |
++ diskIDList = append(diskIDList, volumeSource.DiskID) |
|
| 2930 |
++ volumesAttachedCheck[spec] = true |
|
| 2931 |
++ volumeSpecMap[volumeSource.DiskID] = spec |
|
| 2932 |
++ } |
|
| 2933 |
++ attachedResult, err := attacher.cascadeDisks.DisksAreAttached(diskIDList, nodeName) |
|
| 2934 |
++ if err != nil {
|
|
| 2935 |
++ glog.Errorf( |
|
| 2936 |
++ "Error checking if volumes (%v) are attached to current node (%q). err=%v", |
|
| 2937 |
++ diskIDList, nodeName, err) |
|
| 2938 |
++ return volumesAttachedCheck, err |
|
| 2939 |
++ } |
|
| 2940 |
++ |
|
| 2941 |
++ for diskID, attached := range attachedResult {
|
|
| 2942 |
++ if !attached {
|
|
| 2943 |
++ spec := volumeSpecMap[diskID] |
|
| 2944 |
++ volumesAttachedCheck[spec] = false |
|
| 2945 |
++ glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached",
|
|
| 2946 |
++ diskID, spec.Name()) |
|
| 2947 |
++ } |
|
| 2948 |
++ } |
|
| 2949 |
++ return volumesAttachedCheck, nil |
|
| 2950 |
++} |
|
| 2951 |
++ |
|
| 2952 |
++// WaitForAttach waits until the devicePath returned by the Attach call is available. |
|
| 2953 |
++func (attacher *cascadeDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, |
|
| 2954 |
++ timeout time.Duration) (string, error) {
|
|
| 2955 |
++ volumeSource, _, err := getVolumeSource(spec) |
|
| 2956 |
++ if err != nil {
|
|
| 2957 |
++ glog.Errorf("Cascade attacher: WaitForAttach failed to get volume source")
|
|
| 2958 |
++ return "", err |
|
| 2959 |
++ } |
|
| 2960 |
++ |
|
| 2961 |
++ if devicePath == "" {
|
|
| 2962 |
++ return "", fmt.Errorf("WaitForAttach failed for disk %s: devicePath is empty.", volumeSource.DiskID)
|
|
| 2963 |
++ } |
|
| 2964 |
++ |
|
| 2965 |
++ ticker := time.NewTicker(checkSleepDuration) |
|
| 2966 |
++ defer ticker.Stop() |
|
| 2967 |
++ |
|
| 2968 |
++ timer := time.NewTimer(timeout) |
|
| 2969 |
++ defer timer.Stop() |
|
| 2970 |
++ |
|
| 2971 |
++ for {
|
|
| 2972 |
++ select {
|
|
| 2973 |
++ case <-ticker.C: |
|
| 2974 |
++ glog.V(4).Infof("Checking disk %s is attached", volumeSource.DiskID)
|
|
| 2975 |
++ checkPath, err := verifyDevicePath(devicePath) |
|
| 2976 |
++ if err != nil {
|
|
| 2977 |
++ // Log error, if any, and continue checking periodically. See issue #11321 |
|
| 2978 |
++ glog.Warningf("Cascade attacher: WaitForAttach with devicePath %s Checking PD %s Error verify " +
|
|
| 2979 |
++ "path", devicePath, volumeSource.DiskID) |
|
| 2980 |
++ } else if checkPath != "" {
|
|
| 2981 |
++ // A device path has successfully been created for the disk |
|
| 2982 |
++ glog.V(4).Infof("Successfully found attached disk %s.", volumeSource.DiskID)
|
|
| 2983 |
++ return devicePath, nil |
|
| 2984 |
++ } |
|
| 2985 |
++ case <-timer.C: |
|
| 2986 |
++ return "", fmt.Errorf("Could not find attached disk %s. Timeout waiting for mount paths to be " +
|
|
| 2987 |
++ "created.", volumeSource.DiskID) |
|
| 2988 |
++ } |
|
| 2989 |
++ } |
|
| 2990 |
++} |
|
| 2991 |
++ |
|
| 2992 |
++// GetDeviceMountPath returns a path where the device should point which should be bind mounted for individual volumes. |
|
| 2993 |
++func (attacher *cascadeDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
|
| 2994 |
++ volumeSource, _, err := getVolumeSource(spec) |
|
| 2995 |
++ if err != nil {
|
|
| 2996 |
++ glog.Errorf("Cascade attacher: GetDeviceMountPath failed to get volume source")
|
|
| 2997 |
++ return "", err |
|
| 2998 |
++ } |
|
| 2999 |
++ |
|
| 3000 |
++ return makeGlobalPDPath(attacher.host, volumeSource.DiskID), nil |
|
| 3001 |
++} |
|
| 3002 |
++ |
|
| 3003 |
++// GetMountDeviceRefs finds all other references to the device referenced by deviceMountPath; returns a list of paths. |
|
| 3004 |
++func (plugin *cascadeDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
|
| 3005 |
++ mounter := plugin.host.GetMounter(plugin.GetPluginName()) |
|
| 3006 |
++ return mount.GetMountRefs(mounter, deviceMountPath) |
|
| 3007 |
++} |
|
| 3008 |
++ |
|
| 3009 |
++// MountDevice mounts device to global mount point. |
|
| 3010 |
++func (attacher *cascadeDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
|
| 3011 |
++ mounter := attacher.host.GetMounter(cascadeDiskPluginName) |
|
| 3012 |
++ notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) |
|
| 3013 |
++ if err != nil {
|
|
| 3014 |
++ if os.IsNotExist(err) {
|
|
| 3015 |
++ if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
|
| 3016 |
++ glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err)
|
|
| 3017 |
++ return err |
|
| 3018 |
++ } |
|
| 3019 |
++ notMnt = true |
|
| 3020 |
++ } else {
|
|
| 3021 |
++ return err |
|
| 3022 |
++ } |
|
| 3023 |
++ } |
|
| 3024 |
++ |
|
| 3025 |
++ volumeSource, _, err := getVolumeSource(spec) |
|
| 3026 |
++ if err != nil {
|
|
| 3027 |
++ glog.Errorf("Cascade attacher: MountDevice failed to get volume source. err: %s", err)
|
|
| 3028 |
++ return err |
|
| 3029 |
++ } |
|
| 3030 |
++ |
|
| 3031 |
++ options := []string{}
|
|
| 3032 |
++ |
|
| 3033 |
++ if notMnt {
|
|
| 3034 |
++ diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cascadeDiskPluginName, attacher.host) |
|
| 3035 |
++ mountOptions := volume.MountOptionFromSpec(spec) |
|
| 3036 |
++ err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) |
|
| 3037 |
++ if err != nil {
|
|
| 3038 |
++ os.Remove(deviceMountPath) |
|
| 3039 |
++ return err |
|
| 3040 |
++ } |
|
| 3041 |
++ glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v",
|
|
| 3042 |
++ spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) |
|
| 3043 |
++ } |
|
| 3044 |
++ return nil |
|
| 3045 |
++} |
|
| 3046 |
++ |
|
| 3047 |
++type cascadeDiskDetacher struct {
|
|
| 3048 |
++ mounter mount.Interface |
|
| 3049 |
++ cascadeDisks cascade.Disks |
|
| 3050 |
++} |
|
| 3051 |
++ |
|
| 3052 |
++var _ volume.Detacher = &cascadeDiskDetacher{}
|
|
| 3053 |
++ |
|
| 3054 |
++// NewDetacher returns the detacher associated with the Cascade volume plugin. |
|
| 3055 |
++func (plugin *cascadeDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
|
| 3056 |
++ cascadeCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) |
|
| 3057 |
++ if err != nil {
|
|
| 3058 |
++ glog.Errorf("Cascade attacher: NewDetacher failed to get cloud provider. err: %s", err)
|
|
| 3059 |
++ return nil, err |
|
| 3060 |
++ } |
|
| 3061 |
++ |
|
| 3062 |
++ return &cascadeDiskDetacher{
|
|
| 3063 |
++ mounter: plugin.host.GetMounter(plugin.GetPluginName()), |
|
| 3064 |
++ cascadeDisks: cascadeCloud, |
|
| 3065 |
++ }, nil |
|
| 3066 |
++} |
|
| 3067 |
++ |
|
| 3068 |
++// Detach detaches the given device from the given host. |
|
| 3069 |
++func (detacher *cascadeDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
|
|
| 3070 |
++ hostName := string(nodeName) |
|
| 3071 |
++ diskID := path.Base(deviceMountPath) |
|
| 3072 |
++ attached, err := detacher.cascadeDisks.DiskIsAttached(diskID, nodeName) |
|
| 3073 |
++ if err != nil {
|
|
| 3074 |
++ // Log error and continue with detach |
|
| 3075 |
++ glog.Errorf( |
|
| 3076 |
++ "Error checking if persistent disk (%q) is already attached to current node (%q). " + |
|
| 3077 |
++ "Will continue and try detach anyway. err=%v", diskID, hostName, err) |
|
| 3078 |
++ } |
|
| 3079 |
++ |
|
| 3080 |
++ if err == nil && !attached {
|
|
| 3081 |
++ // Volume is already detached from node. |
|
| 3082 |
++ glog.V(4).Infof("detach operation was successful. persistent disk %q is already detached " +
|
|
| 3083 |
++ "from node %q.", diskID, hostName) |
|
| 3084 |
++ return nil |
|
| 3085 |
++ } |
|
| 3086 |
++ |
|
| 3087 |
++ if err := detacher.cascadeDisks.DetachDisk(diskID, nodeName); err != nil {
|
|
| 3088 |
++ glog.Errorf("Error detaching volume %q: %v", diskID, err)
|
|
| 3089 |
++ return err |
|
| 3090 |
++ } |
|
| 3091 |
++ return nil |
|
| 3092 |
++} |
|
| 3093 |
++ |
|
| 3094 |
++// WaitForDetach waits for the devicePath to become unavailable. |
|
| 3095 |
++func (detacher *cascadeDiskDetacher) WaitForDetach(devicePath string, timeout time.Duration) error {
|
|
| 3096 |
++ ticker := time.NewTicker(checkSleepDuration) |
|
| 3097 |
++ defer ticker.Stop() |
|
| 3098 |
++ timer := time.NewTimer(timeout) |
|
| 3099 |
++ defer timer.Stop() |
|
| 3100 |
++ |
|
| 3101 |
++ for {
|
|
| 3102 |
++ select {
|
|
| 3103 |
++ case <-ticker.C: |
|
| 3104 |
++ glog.V(4).Infof("Checking device %q is detached.", devicePath)
|
|
| 3105 |
++ if pathExists, err := volumeutil.PathExists(devicePath); err != nil {
|
|
| 3106 |
++ return fmt.Errorf("Error checking if device path exists: %v", err)
|
|
| 3107 |
++ } else if !pathExists {
|
|
| 3108 |
++ return nil |
|
| 3109 |
++ } |
|
| 3110 |
++ case <-timer.C: |
|
| 3111 |
++ return fmt.Errorf("Timeout reached; Device %v is still attached", devicePath)
|
|
| 3112 |
++ } |
|
| 3113 |
++ } |
|
| 3114 |
++} |
|
| 3115 |
++ |
|
| 3116 |
++// UnmountDevice unmounts the disk specified by the device mount path. |
|
| 3117 |
++func (detacher *cascadeDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
|
| 3118 |
++ return volumeutil.UnmountPath(deviceMountPath, detacher.mounter) |
|
| 3119 |
++} |
|
| 3120 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/volume/cascade_disk/BUILD cascade-kubernetes/pkg/volume/cascade_disk/BUILD |
|
| 3121 |
+--- kubernetes-1.8.1/pkg/volume/cascade_disk/BUILD 1970-01-01 00:00:00.000000000 +0000 |
|
| 3122 |
+@@ -0,0 +1,43 @@ |
|
| 3123 |
++package(default_visibility = ["//visibility:public"]) |
|
| 3124 |
++ |
|
| 3125 |
++load( |
|
| 3126 |
++ "@io_bazel_rules_go//go:def.bzl", |
|
| 3127 |
++ "go_library", |
|
| 3128 |
++ "go_test", |
|
| 3129 |
++) |
|
| 3130 |
++ |
|
| 3131 |
++go_library( |
|
| 3132 |
++ name = "go_default_library", |
|
| 3133 |
++ srcs = [ |
|
| 3134 |
++ "attacher.go", |
|
| 3135 |
++ "cascade_disk.go", |
|
| 3136 |
++ "cascade_util.go", |
|
| 3137 |
++ ], |
|
| 3138 |
++ deps = [ |
|
| 3139 |
++ "//pkg/cloudprovider:go_default_library", |
|
| 3140 |
++ "//pkg/cloudprovider/providers/cascade:go_default_library", |
|
| 3141 |
++ "//pkg/util/mount:go_default_library", |
|
| 3142 |
++ "//pkg/util/strings:go_default_library", |
|
| 3143 |
++ "//pkg/volume:go_default_library", |
|
| 3144 |
++ "//pkg/volume/util:go_default_library", |
|
| 3145 |
++ "//pkg/volume/util/volumehelper:go_default_library", |
|
| 3146 |
++ "//vendor/github.com/golang/glog:go_default_library", |
|
| 3147 |
++ "//vendor/k8s.io/api/core/v1:go_default_library", |
|
| 3148 |
++ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", |
|
| 3149 |
++ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", |
|
| 3150 |
++ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", |
|
| 3151 |
++ ], |
|
| 3152 |
++) |
|
| 3153 |
++ |
|
| 3154 |
++filegroup( |
|
| 3155 |
++ name = "package-srcs", |
|
| 3156 |
++ srcs = glob(["**"]), |
|
| 3157 |
++ tags = ["automanaged"], |
|
| 3158 |
++ visibility = ["//visibility:private"], |
|
| 3159 |
++) |
|
| 3160 |
++ |
|
| 3161 |
++filegroup( |
|
| 3162 |
++ name = "all-srcs", |
|
| 3163 |
++ srcs = [":package-srcs"], |
|
| 3164 |
++ tags = ["automanaged"], |
|
| 3165 |
++) |
|
| 3166 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/volume/cascade_disk/cascade_disk.go cascade-kubernetes/pkg/volume/cascade_disk/cascade_disk.go |
|
| 3167 |
+--- kubernetes-1.8.1/pkg/volume/cascade_disk/cascade_disk.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 3168 |
+@@ -0,0 +1,391 @@ |
|
| 3169 |
++package cascade_disk |
|
| 3170 |
++ |
|
| 3171 |
++import ( |
|
| 3172 |
++ "fmt" |
|
| 3173 |
++ "os" |
|
| 3174 |
++ "path" |
|
| 3175 |
++ |
|
| 3176 |
++ "github.com/golang/glog" |
|
| 3177 |
++ "k8s.io/api/core/v1" |
|
| 3178 |
++ "k8s.io/apimachinery/pkg/api/resource" |
|
| 3179 |
++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
|
| 3180 |
++ "k8s.io/apimachinery/pkg/types" |
|
| 3181 |
++ "k8s.io/kubernetes/pkg/util/mount" |
|
| 3182 |
++ utilstrings "k8s.io/kubernetes/pkg/util/strings" |
|
| 3183 |
++ "k8s.io/kubernetes/pkg/volume" |
|
| 3184 |
++ "k8s.io/kubernetes/pkg/volume/util" |
|
| 3185 |
++ "k8s.io/kubernetes/pkg/volume/util/volumehelper" |
|
| 3186 |
++) |
|
| 3187 |
++ |
|
| 3188 |
++// This is the primary entrypoint for volume plugins. |
|
| 3189 |
++func ProbeVolumePlugins() []volume.VolumePlugin {
|
|
| 3190 |
++ return []volume.VolumePlugin{&cascadeDiskPlugin{}}
|
|
| 3191 |
++} |
|
| 3192 |
++ |
|
| 3193 |
++type cascadeDiskPlugin struct {
|
|
| 3194 |
++ host volume.VolumeHost |
|
| 3195 |
++} |
|
| 3196 |
++ |
|
| 3197 |
++var _ volume.VolumePlugin = &cascadeDiskPlugin{}
|
|
| 3198 |
++var _ volume.PersistentVolumePlugin = &cascadeDiskPlugin{}
|
|
| 3199 |
++var _ volume.DeletableVolumePlugin = &cascadeDiskPlugin{}
|
|
| 3200 |
++var _ volume.ProvisionableVolumePlugin = &cascadeDiskPlugin{}
|
|
| 3201 |
++ |
|
| 3202 |
++const ( |
|
| 3203 |
++ cascadeDiskPluginName = "kubernetes.io/cascade-disk" |
|
| 3204 |
++) |
|
| 3205 |
++ |
|
| 3206 |
++// Init initializes the Cascade volume plugin. |
|
| 3207 |
++func (plugin *cascadeDiskPlugin) Init(host volume.VolumeHost) error {
|
|
| 3208 |
++ plugin.host = host |
|
| 3209 |
++ return nil |
|
| 3210 |
++} |
|
| 3211 |
++ |
|
| 3212 |
++// GetPluginName returns the name of the Cascade volume plugin. |
|
| 3213 |
++func (plugin *cascadeDiskPlugin) GetPluginName() string {
|
|
| 3214 |
++ return cascadeDiskPluginName |
|
| 3215 |
++} |
|
| 3216 |
++ |
|
| 3217 |
++// GetVolumeName returns the name of the volume which is the diskID in our case. |
|
| 3218 |
++func (plugin *cascadeDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
|
| 3219 |
++ volumeSource, _, err := getVolumeSource(spec) |
|
| 3220 |
++ if err != nil {
|
|
| 3221 |
++ glog.Errorf("Cascade volume plugin: GetVolumeName failed to get volume source")
|
|
| 3222 |
++ return "", err |
|
| 3223 |
++ } |
|
| 3224 |
++ |
|
| 3225 |
++ return volumeSource.DiskID, nil |
|
| 3226 |
++} |
|
| 3227 |
++ |
|
| 3228 |
++// CanSupport specifies whether the Cascade volume plguin can support the specific resource type. |
|
| 3229 |
++// Cascade plugin only supports the persistent volume and volume resource which has the Cascade disk annotation. |
|
| 3230 |
++func (plugin *cascadeDiskPlugin) CanSupport(spec *volume.Spec) bool {
|
|
| 3231 |
++ return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil) || |
|
| 3232 |
++ (spec.Volume != nil && spec.Volume.CascadeDisk != nil) |
|
| 3233 |
++} |
|
| 3234 |
++ |
|
| 3235 |
++// RequiresRemount specifies whether remount is required for the disk. |
|
| 3236 |
++func (plugin *cascadeDiskPlugin) RequiresRemount() bool {
|
|
| 3237 |
++ return false |
|
| 3238 |
++} |
|
| 3239 |
++ |
|
| 3240 |
++// SupportsMountOption specifies whether the Cascade volume plugin supports the mount operation. |
|
| 3241 |
++func (plugin *cascadeDiskPlugin) SupportsMountOption() bool {
|
|
| 3242 |
++ return true |
|
| 3243 |
++} |
|
| 3244 |
++ |
|
| 3245 |
++// SupportsBulkVolumeVerification specifies whether bulk volume verification is supported. |
|
| 3246 |
++func (plugin *cascadeDiskPlugin) SupportsBulkVolumeVerification() bool {
|
|
| 3247 |
++ return false |
|
| 3248 |
++} |
|
| 3249 |
++ |
|
| 3250 |
++// NewMounter returns the mounter associated with the Cascade volume plugin. |
|
| 3251 |
++func (plugin *cascadeDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, |
|
| 3252 |
++ _ volume.VolumeOptions) (volume.Mounter, error) {
|
|
| 3253 |
++ return plugin.newMounterInternal(spec, pod.UID, &CascadeDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
|
| 3254 |
++} |
|
| 3255 |
++ |
|
| 3256 |
++// NewUnmounter returns the unmounter associated with the Cascade volume plugin. |
|
| 3257 |
++func (plugin *cascadeDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
|
| 3258 |
++ return plugin.newUnmounterInternal(volName, podUID, &CascadeDiskUtil{},
|
|
| 3259 |
++ plugin.host.GetMounter(plugin.GetPluginName())) |
|
| 3260 |
++} |
|
| 3261 |
++ |
|
| 3262 |
++func (plugin *cascadeDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, |
|
| 3263 |
++ mounter mount.Interface) (volume.Mounter, error) {
|
|
| 3264 |
++ volumeSource, _, err := getVolumeSource(spec) |
|
| 3265 |
++ if err != nil {
|
|
| 3266 |
++ glog.Errorf("Cascade volume plugin: newMounterInternal failed to get volume source")
|
|
| 3267 |
++ return nil, err |
|
| 3268 |
++ } |
|
| 3269 |
++ |
|
| 3270 |
++ diskID := volumeSource.DiskID |
|
| 3271 |
++ fsType := volumeSource.FSType |
|
| 3272 |
++ |
|
| 3273 |
++ return &cascadeDiskMounter{
|
|
| 3274 |
++ cascadeDisk: &cascadeDisk{
|
|
| 3275 |
++ podUID: podUID, |
|
| 3276 |
++ volName: spec.Name(), |
|
| 3277 |
++ diskID: diskID, |
|
| 3278 |
++ manager: manager, |
|
| 3279 |
++ mounter: mounter, |
|
| 3280 |
++ plugin: plugin, |
|
| 3281 |
++ }, |
|
| 3282 |
++ fsType: fsType, |
|
| 3283 |
++ diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil |
|
| 3284 |
++} |
|
| 3285 |
++ |
|
| 3286 |
++func (plugin *cascadeDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, |
|
| 3287 |
++ mounter mount.Interface) (volume.Unmounter, error) {
|
|
| 3288 |
++ return &cascadeDiskUnmounter{
|
|
| 3289 |
++ &cascadeDisk{
|
|
| 3290 |
++ podUID: podUID, |
|
| 3291 |
++ volName: volName, |
|
| 3292 |
++ manager: manager, |
|
| 3293 |
++ mounter: mounter, |
|
| 3294 |
++ plugin: plugin, |
|
| 3295 |
++ }}, nil |
|
| 3296 |
++} |
|
| 3297 |
++ |
|
| 3298 |
++// ConstructVolumeSpec constructs a Cascade volume spec based on the name and mount path. |
|
| 3299 |
++func (plugin *cascadeDiskPlugin) ConstructVolumeSpec(volumeSpecName, mountPath string) (*volume.Spec, error) {
|
|
| 3300 |
++ mounter := plugin.host.GetMounter(plugin.GetPluginName()) |
|
| 3301 |
++ pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) |
|
| 3302 |
++ diskID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) |
|
| 3303 |
++ if err != nil {
|
|
| 3304 |
++ return nil, err |
|
| 3305 |
++ } |
|
| 3306 |
++ |
|
| 3307 |
++ cascadeDisk := &v1.Volume{
|
|
| 3308 |
++ Name: volumeSpecName, |
|
| 3309 |
++ VolumeSource: v1.VolumeSource{
|
|
| 3310 |
++ CascadeDisk: &v1.CascadeDiskVolumeSource{
|
|
| 3311 |
++ DiskID: diskID, |
|
| 3312 |
++ }, |
|
| 3313 |
++ }, |
|
| 3314 |
++ } |
|
| 3315 |
++ return volume.NewSpecFromVolume(cascadeDisk), nil |
|
| 3316 |
++} |
|
| 3317 |
++ |
|
| 3318 |
++// Abstract interface to disk operations. |
|
| 3319 |
++type diskManager interface {
|
|
| 3320 |
++ // Creates a volume |
|
| 3321 |
++ CreateVolume(provisioner *cascadeDiskProvisioner) (diskID string, volumeSizeGB int, fstype string, err error) |
|
| 3322 |
++ // Deletes a volume |
|
| 3323 |
++ DeleteVolume(deleter *cascadeDiskDeleter) error |
|
| 3324 |
++} |
|
| 3325 |
++ |
|
| 3326 |
++// cascadeDisk volumes are disk resources attached to the kubelet's host machine and exposed to the pod. |
|
| 3327 |
++type cascadeDisk struct {
|
|
| 3328 |
++ volName string |
|
| 3329 |
++ podUID types.UID |
|
| 3330 |
++ diskID string |
|
| 3331 |
++ fsType string |
|
| 3332 |
++ manager diskManager |
|
| 3333 |
++ mounter mount.Interface |
|
| 3334 |
++ plugin *cascadeDiskPlugin |
|
| 3335 |
++ volume.MetricsNil |
|
| 3336 |
++} |
|
| 3337 |
++ |
|
| 3338 |
++var _ volume.Mounter = &cascadeDiskMounter{}
|
|
| 3339 |
++ |
|
| 3340 |
++type cascadeDiskMounter struct {
|
|
| 3341 |
++ *cascadeDisk |
|
| 3342 |
++ fsType string |
|
| 3343 |
++ diskMounter *mount.SafeFormatAndMount |
|
| 3344 |
++} |
|
| 3345 |
++ |
|
| 3346 |
++// GetAttributes returns the attributes associated with a Cascade disk. |
|
| 3347 |
++func (b *cascadeDiskMounter) GetAttributes() volume.Attributes {
|
|
| 3348 |
++ return volume.Attributes{
|
|
| 3349 |
++ SupportsSELinux: true, |
|
| 3350 |
++ } |
|
| 3351 |
++} |
|
| 3352 |
++ |
|
| 3353 |
++// CanMount checks prior to mount operations to verify that the required components (binaries, etc.) to mount the |
|
| 3354 |
++// volume are available on the underlying node. If not, it returns an error. |
|
| 3355 |
++func (b *cascadeDiskMounter) CanMount() error {
|
|
| 3356 |
++ return nil |
|
| 3357 |
++} |
|
| 3358 |
++ |
|
| 3359 |
++// SetUp attaches the disk and bind mounts to the volume path. |
|
| 3360 |
++func (b *cascadeDiskMounter) SetUp(fsGroup *int64) error {
|
|
| 3361 |
++ return b.SetUpAt(b.GetPath(), fsGroup) |
|
| 3362 |
++} |
|
| 3363 |
++ |
|
| 3364 |
++// SetUpAt attaches the disk and bind mounts to the volume path. |
|
| 3365 |
++func (b *cascadeDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|
| 3366 |
++ glog.V(4).Infof("Cascade Persistent Disk setup %s to %s", b.diskID, dir)
|
|
| 3367 |
++ |
|
| 3368 |
++ // TODO: handle failed mounts here. |
|
| 3369 |
++ notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) |
|
| 3370 |
++ if err != nil && !os.IsNotExist(err) {
|
|
| 3371 |
++ glog.Errorf("cannot validate mount point: %s %v", dir, err)
|
|
| 3372 |
++ return err |
|
| 3373 |
++ } |
|
| 3374 |
++ if !notmnt {
|
|
| 3375 |
++ return nil |
|
| 3376 |
++ } |
|
| 3377 |
++ |
|
| 3378 |
++ if err := os.MkdirAll(dir, 0750); err != nil {
|
|
| 3379 |
++ glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
|
|
| 3380 |
++ return err |
|
| 3381 |
++ } |
|
| 3382 |
++ |
|
| 3383 |
++ options := []string{"bind"}
|
|
| 3384 |
++ |
|
| 3385 |
++ // Perform a bind mount to the full path to allow duplicate mounts of the same PD. |
|
| 3386 |
++ globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskID) |
|
| 3387 |
++ glog.V(4).Infof("attempting to mount %s", dir)
|
|
| 3388 |
++ |
|
| 3389 |
++ err = b.mounter.Mount(globalPDPath, dir, "", options) |
|
| 3390 |
++ if err != nil {
|
|
| 3391 |
++ notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) |
|
| 3392 |
++ if mntErr != nil {
|
|
| 3393 |
++ glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
|
| 3394 |
++ return err |
|
| 3395 |
++ } |
|
| 3396 |
++ if !notmnt {
|
|
| 3397 |
++ if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
|
| 3398 |
++ glog.Errorf("Failed to unmount: %v", mntErr)
|
|
| 3399 |
++ return err |
|
| 3400 |
++ } |
|
| 3401 |
++ notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) |
|
| 3402 |
++ if mntErr != nil {
|
|
| 3403 |
++ glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
|
| 3404 |
++ return err |
|
| 3405 |
++ } |
|
| 3406 |
++ if !notmnt {
|
|
| 3407 |
++ glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.",
|
|
| 3408 |
++ b.GetPath()) |
|
| 3409 |
++ return err |
|
| 3410 |
++ } |
|
| 3411 |
++ } |
|
| 3412 |
++ os.Remove(dir) |
|
| 3413 |
++ glog.Errorf("Mount of disk %s failed: %v", dir, err)
|
|
| 3414 |
++ return err |
|
| 3415 |
++ } |
|
| 3416 |
++ volume.SetVolumeOwnership(b, fsGroup) |
|
| 3417 |
++ |
|
| 3418 |
++ return nil |
|
| 3419 |
++} |
|
| 3420 |
++ |
|
| 3421 |
++var _ volume.Unmounter = &cascadeDiskUnmounter{}
|
|
| 3422 |
++ |
|
| 3423 |
++type cascadeDiskUnmounter struct {
|
|
| 3424 |
++ *cascadeDisk |
|
| 3425 |
++} |
|
| 3426 |
++ |
|
| 3427 |
++// TearDown unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that |
|
| 3428 |
++// disk on the kubelet. |
|
| 3429 |
++func (c *cascadeDiskUnmounter) TearDown() error {
|
|
| 3430 |
++ return c.TearDownAt(c.GetPath()) |
|
| 3431 |
++} |
|
| 3432 |
++ |
|
| 3433 |
++// TearDownAt unmounts the bind mount, and detaches the disk only if the disk resource was the last reference to that |
|
| 3434 |
++// disk on the kubelet. |
|
| 3435 |
++func (c *cascadeDiskUnmounter) TearDownAt(dir string) error {
|
|
| 3436 |
++ return util.UnmountPath(dir, c.mounter) |
|
| 3437 |
++} |
|
| 3438 |
++ |
|
| 3439 |
++func makeGlobalPDPath(host volume.VolumeHost, diskID string) string {
|
|
| 3440 |
++ return path.Join(host.GetPluginDir(cascadeDiskPluginName), mount.MountsInGlobalPDPath, diskID) |
|
| 3441 |
++} |
|
| 3442 |
++ |
|
| 3443 |
++func (cd *cascadeDisk) GetPath() string {
|
|
| 3444 |
++ name := cascadeDiskPluginName |
|
| 3445 |
++ return cd.plugin.host.GetPodVolumeDir(cd.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cd.volName) |
|
| 3446 |
++} |
|
| 3447 |
++ |
|
| 3448 |
++func (plugin *cascadeDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
|
| 3449 |
++ return []v1.PersistentVolumeAccessMode{
|
|
| 3450 |
++ v1.ReadWriteOnce, |
|
| 3451 |
++ } |
|
| 3452 |
++} |
|
| 3453 |
++ |
|
| 3454 |
++type cascadeDiskDeleter struct {
|
|
| 3455 |
++ *cascadeDisk |
|
| 3456 |
++} |
|
| 3457 |
++ |
|
| 3458 |
++var _ volume.Deleter = &cascadeDiskDeleter{}
|
|
| 3459 |
++ |
|
| 3460 |
++// NewDeleter returns the deleter associated with the Cascade volume plugin. |
|
| 3461 |
++func (plugin *cascadeDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
|
| 3462 |
++ return plugin.newDeleterInternal(spec, &CascadeDiskUtil{})
|
|
| 3463 |
++} |
|
| 3464 |
++ |
|
| 3465 |
++func (plugin *cascadeDiskPlugin) newDeleterInternal(spec *volume.Spec, manager diskManager) (volume.Deleter, error) {
|
|
| 3466 |
++ if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk == nil {
|
|
| 3467 |
++ return nil, fmt.Errorf("spec.PersistentVolumeSource.CascadeDisk is nil")
|
|
| 3468 |
++ } |
|
| 3469 |
++ return &cascadeDiskDeleter{
|
|
| 3470 |
++ &cascadeDisk{
|
|
| 3471 |
++ volName: spec.Name(), |
|
| 3472 |
++ diskID: spec.PersistentVolume.Spec.CascadeDisk.DiskID, |
|
| 3473 |
++ manager: manager, |
|
| 3474 |
++ plugin: plugin, |
|
| 3475 |
++ }}, nil |
|
| 3476 |
++} |
|
| 3477 |
++ |
|
| 3478 |
++func (r *cascadeDiskDeleter) Delete() error {
|
|
| 3479 |
++ return r.manager.DeleteVolume(r) |
|
| 3480 |
++} |
|
| 3481 |
++ |
|
| 3482 |
++type cascadeDiskProvisioner struct {
|
|
| 3483 |
++ *cascadeDisk |
|
| 3484 |
++ options volume.VolumeOptions |
|
| 3485 |
++} |
|
| 3486 |
++ |
|
| 3487 |
++var _ volume.Provisioner = &cascadeDiskProvisioner{}
|
|
| 3488 |
++ |
|
| 3489 |
++// NewProvisioner returns the provisioner associated with the Cascade volume plugin. |
|
| 3490 |
++func (plugin *cascadeDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
|
| 3491 |
++ return plugin.newProvisionerInternal(options, &CascadeDiskUtil{})
|
|
| 3492 |
++} |
|
| 3493 |
++ |
|
| 3494 |
++func (plugin *cascadeDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, |
|
| 3495 |
++ manager diskManager) (volume.Provisioner, error) {
|
|
| 3496 |
++ return &cascadeDiskProvisioner{
|
|
| 3497 |
++ cascadeDisk: &cascadeDisk{
|
|
| 3498 |
++ manager: manager, |
|
| 3499 |
++ plugin: plugin, |
|
| 3500 |
++ }, |
|
| 3501 |
++ options: options, |
|
| 3502 |
++ }, nil |
|
| 3503 |
++} |
|
| 3504 |
++ |
|
| 3505 |
++// Provision provisions the persistent volume by making a CreateDisk call to Cascade Controller. |
|
| 3506 |
++func (p *cascadeDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
|
| 3507 |
++ if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
|
| 3508 |
++ return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported",
|
|
| 3509 |
++ p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) |
|
| 3510 |
++ } |
|
| 3511 |
++ |
|
| 3512 |
++ diskID, sizeGB, fstype, err := p.manager.CreateVolume(p) |
|
| 3513 |
++ if err != nil {
|
|
| 3514 |
++ return nil, err |
|
| 3515 |
++ } |
|
| 3516 |
++ |
|
| 3517 |
++ if fstype == "" {
|
|
| 3518 |
++ fstype = "ext4" |
|
| 3519 |
++ } |
|
| 3520 |
++ |
|
| 3521 |
++ pv := &v1.PersistentVolume{
|
|
| 3522 |
++ ObjectMeta: metav1.ObjectMeta{
|
|
| 3523 |
++ Name: p.options.PVName, |
|
| 3524 |
++ Labels: map[string]string{},
|
|
| 3525 |
++ Annotations: map[string]string{
|
|
| 3526 |
++ volumehelper.VolumeDynamicallyCreatedByKey: "cascade-volume-dynamic-provisioner", |
|
| 3527 |
++ }, |
|
| 3528 |
++ }, |
|
| 3529 |
++ Spec: v1.PersistentVolumeSpec{
|
|
| 3530 |
++ PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy, |
|
| 3531 |
++ AccessModes: p.options.PVC.Spec.AccessModes, |
|
| 3532 |
++ Capacity: v1.ResourceList{
|
|
| 3533 |
++ v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
|
| 3534 |
++ }, |
|
| 3535 |
++ PersistentVolumeSource: v1.PersistentVolumeSource{
|
|
| 3536 |
++ CascadeDisk: &v1.CascadeDiskVolumeSource{
|
|
| 3537 |
++ DiskID: diskID, |
|
| 3538 |
++ FSType: fstype, |
|
| 3539 |
++ }, |
|
| 3540 |
++ }, |
|
| 3541 |
++ MountOptions: p.options.MountOptions, |
|
| 3542 |
++ }, |
|
| 3543 |
++ } |
|
| 3544 |
++ if len(p.options.PVC.Spec.AccessModes) == 0 {
|
|
| 3545 |
++ pv.Spec.AccessModes = p.plugin.GetAccessModes() |
|
| 3546 |
++ } |
|
| 3547 |
++ |
|
| 3548 |
++ return pv, nil |
|
| 3549 |
++} |
|
| 3550 |
++ |
|
| 3551 |
++func getVolumeSource(spec *volume.Spec) (*v1.CascadeDiskVolumeSource, bool, error) {
|
|
| 3552 |
++ if spec.Volume != nil && spec.Volume.CascadeDisk != nil {
|
|
| 3553 |
++ return spec.Volume.CascadeDisk, spec.ReadOnly, nil |
|
| 3554 |
++ } else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CascadeDisk != nil {
|
|
| 3555 |
++ return spec.PersistentVolume.Spec.CascadeDisk, spec.ReadOnly, nil |
|
| 3556 |
++ } |
|
| 3557 |
++ |
|
| 3558 |
++ return nil, false, fmt.Errorf("Spec does not reference a Cascade disk type")
|
|
| 3559 |
++} |
|
| 3560 |
+\ No newline at end of file |
|
| 3561 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/volume/cascade_disk/cascade_util.go cascade-kubernetes/pkg/volume/cascade_disk/cascade_util.go |
|
| 3562 |
+--- kubernetes-1.8.1/pkg/volume/cascade_disk/cascade_util.go 1970-01-01 00:00:00.000000000 +0000 |
|
| 3563 |
+@@ -0,0 +1,107 @@ |
|
| 3564 |
++package cascade_disk |
|
| 3565 |
++ |
|
| 3566 |
++import ( |
|
| 3567 |
++ "fmt" |
|
| 3568 |
++ "strings" |
|
| 3569 |
++ "time" |
|
| 3570 |
++ |
|
| 3571 |
++ "github.com/golang/glog" |
|
| 3572 |
++ "k8s.io/api/core/v1" |
|
| 3573 |
++ "k8s.io/kubernetes/pkg/cloudprovider" |
|
| 3574 |
++ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade" |
|
| 3575 |
++ "k8s.io/kubernetes/pkg/volume" |
|
| 3576 |
++ volumeutil "k8s.io/kubernetes/pkg/volume/util" |
|
| 3577 |
++) |
|
| 3578 |
++ |
|
| 3579 |
++const ( |
|
| 3580 |
++ checkSleepDuration = time.Second |
|
| 3581 |
++) |
|
| 3582 |
++ |
|
| 3583 |
++type CascadeDiskUtil struct{}
|
|
| 3584 |
++ |
|
| 3585 |
++func verifyDevicePath(path string) (string, error) {
|
|
| 3586 |
++ if pathExists, err := volumeutil.PathExists(path); err != nil {
|
|
| 3587 |
++ return "", fmt.Errorf("Error checking if path exists: %v", err)
|
|
| 3588 |
++ } else if pathExists {
|
|
| 3589 |
++ return path, nil |
|
| 3590 |
++ } |
|
| 3591 |
++ |
|
| 3592 |
++ glog.V(4).Infof("verifyDevicePath: path does not exist yet")
|
|
| 3593 |
++ return "", nil |
|
| 3594 |
++} |
|
| 3595 |
++ |
|
| 3596 |
++// CreateVolume creates a Cascade persistent disk. |
|
| 3597 |
++func (util *CascadeDiskUtil) CreateVolume(p *cascadeDiskProvisioner) (diskID string, capacityGB int, fstype string, |
|
| 3598 |
++ err error) {
|
|
| 3599 |
++ cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider()) |
|
| 3600 |
++ if err != nil {
|
|
| 3601 |
++ glog.Errorf("Cascade Util: CreateVolume failed to get cloud provider. Error [%v]", err)
|
|
| 3602 |
++ return "", 0, "", err |
|
| 3603 |
++ } |
|
| 3604 |
++ |
|
| 3605 |
++ capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] |
|
| 3606 |
++ volSizeBytes := capacity.Value() |
|
| 3607 |
++ // Cascade works with GB, convert to GB with rounding up |
|
| 3608 |
++ volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) |
|
| 3609 |
++ name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255) |
|
| 3610 |
++ volumeOptions := &cascade.VolumeOptions{
|
|
| 3611 |
++ CapacityGB: volSizeGB, |
|
| 3612 |
++ Tags: *p.options.CloudTags, |
|
| 3613 |
++ Name: name, |
|
| 3614 |
++ } |
|
| 3615 |
++ |
|
| 3616 |
++ for parameter, value := range p.options.Parameters {
|
|
| 3617 |
++ switch strings.ToLower(parameter) {
|
|
| 3618 |
++ case "flavor": |
|
| 3619 |
++ volumeOptions.Flavor = value |
|
| 3620 |
++ case volume.VolumeParameterFSType: |
|
| 3621 |
++ fstype = value |
|
| 3622 |
++ glog.V(4).Infof("Cascade Util: Setting fstype to %s", fstype)
|
|
| 3623 |
++ default: |
|
| 3624 |
++ glog.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
|
|
| 3625 |
++ p.plugin.GetPluginName()) |
|
| 3626 |
++ return "", 0, "", fmt.Errorf("Cascade Util: invalid option %s for volume plugin %s.", parameter,
|
|
| 3627 |
++ p.plugin.GetPluginName()) |
|
| 3628 |
++ } |
|
| 3629 |
++ } |
|
| 3630 |
++ |
|
| 3631 |
++ diskID, err = cloud.CreateDisk(volumeOptions) |
|
| 3632 |
++ if err != nil {
|
|
| 3633 |
++ glog.Errorf("Cascade Util: failed to CreateDisk. Error [%v]", err)
|
|
| 3634 |
++ return "", 0, "", err |
|
| 3635 |
++ } |
|
| 3636 |
++ |
|
| 3637 |
++ glog.V(4).Infof("Successfully created Cascade persistent disk %s", name)
|
|
| 3638 |
++ return diskID, volSizeGB, "", nil |
|
| 3639 |
++} |
|
| 3640 |
++ |
|
| 3641 |
++// DeleteVolume deletes a Cascade volume. |
|
| 3642 |
++func (util *CascadeDiskUtil) DeleteVolume(disk *cascadeDiskDeleter) error {
|
|
| 3643 |
++ cloud, err := getCloudProvider(disk.plugin.host.GetCloudProvider()) |
|
| 3644 |
++ if err != nil {
|
|
| 3645 |
++ glog.Errorf("Cascade Util: DeleteVolume failed to get cloud provider. Error [%v]", err)
|
|
| 3646 |
++ return err |
|
| 3647 |
++ } |
|
| 3648 |
++ |
|
| 3649 |
++ if err = cloud.DeleteDisk(disk.diskID); err != nil {
|
|
| 3650 |
++ glog.Errorf("Cascade Util: failed to DeleteDisk for diskID %s. Error [%v]", disk.diskID, err)
|
|
| 3651 |
++ return err |
|
| 3652 |
++ } |
|
| 3653 |
++ |
|
| 3654 |
++ glog.V(4).Infof("Successfully deleted Cascade persistent disk %s", disk.diskID)
|
|
| 3655 |
++ return nil |
|
| 3656 |
++} |
|
| 3657 |
++ |
|
| 3658 |
++func getCloudProvider(cloud cloudprovider.Interface) (*cascade.CascadeCloud, error) {
|
|
| 3659 |
++ if cloud == nil {
|
|
| 3660 |
++ glog.Errorf("Cascade Util: Cloud provider not initialized properly")
|
|
| 3661 |
++ return nil, fmt.Errorf("Cascade Util: Cloud provider not initialized properly")
|
|
| 3662 |
++ } |
|
| 3663 |
++ |
|
| 3664 |
++ cc := cloud.(*cascade.CascadeCloud) |
|
| 3665 |
++ if cc == nil {
|
|
| 3666 |
++ glog.Errorf("Invalid cloud provider: expected Cascade")
|
|
| 3667 |
++ return nil, fmt.Errorf("Invalid cloud provider: expected Cascade")
|
|
| 3668 |
++ } |
|
| 3669 |
++ return cc, nil |
|
| 3670 |
++} |
|
| 3671 |
+diff -uNr --no-dereference kubernetes-1.8.1/pkg/volume/cascade_disk/OWNERS cascade-kubernetes/pkg/volume/cascade_disk/OWNERS |
|
| 3672 |
+--- kubernetes-1.8.1/pkg/volume/cascade_disk/OWNERS 1970-01-01 00:00:00.000000000 +0000 |
|
| 3673 |
+@@ -0,0 +1,2 @@ |
|
| 3674 |
++maintainers: |
|
| 3675 |
++- ashokc |
|
| 3676 |
+diff -uNr --no-dereference kubernetes-1.8.1/plugin/pkg/admission/persistentvolume/label/admission.go cascade-kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go |
|
| 3677 |
+--- kubernetes-1.8.1/plugin/pkg/admission/persistentvolume/label/admission.go 2018-01-23 22:47:25.566819354 +0000 |
|
| 3678 |
+@@ -31,6 +31,7 @@ |
|
| 3679 |
+ kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" |
|
| 3680 |
+ kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" |
|
| 3681 |
+ vol "k8s.io/kubernetes/pkg/volume" |
|
| 3682 |
++ "k8s.io/kubernetes/pkg/cloudprovider/providers/cascade" |
|
| 3683 |
+ ) |
|
| 3684 |
+ |
|
| 3685 |
+ // Register registers a plugin |
|
| 3686 |
+@@ -50,6 +51,7 @@ |
|
| 3687 |
+ ebsVolumes aws.Volumes |
|
| 3688 |
+ cloudConfig []byte |
|
| 3689 |
+ gceCloudProvider *gce.GCECloud |
|
| 3690 |
++ cascadeDisks cascade.Disks |
|
| 3691 |
+ } |
|
| 3692 |
+ |
|
| 3693 |
+ var _ kubeapiserveradmission.WantsCloudConfig = &persistentVolumeLabel{}
|
|
| 3694 |
+@@ -101,6 +103,13 @@ |
|
| 3695 |
+ } |
|
| 3696 |
+ volumeLabels = labels |
|
| 3697 |
+ } |
|
| 3698 |
++ if volume.Spec.CascadeDisk != nil {
|
|
| 3699 |
++ labels, err := l.findCascadeDiskLabels(volume) |
|
| 3700 |
++ if err != nil {
|
|
| 3701 |
++ return admission.NewForbidden(a, fmt.Errorf("error querying Cascade volume %s: %v", volume.Spec.CascadeDisk.DiskID, err))
|
|
| 3702 |
++ } |
|
| 3703 |
++ volumeLabels = labels |
|
| 3704 |
++ } |
|
| 3705 |
+ |
|
| 3706 |
+ if len(volumeLabels) != 0 {
|
|
| 3707 |
+ if volume.Labels == nil {
|
|
| 3708 |
+@@ -213,3 +222,48 @@ |
|
| 3709 |
+ } |
|
| 3710 |
+ return l.gceCloudProvider, nil |
|
| 3711 |
+ } |
|
| 3712 |
++ |
|
| 3713 |
++func (l *persistentVolumeLabel) findCascadeDiskLabels(volume *api.PersistentVolume) (map[string]string, error) {
|
|
| 3714 |
++ // Ignore any volumes that are being provisioned |
|
| 3715 |
++ if volume.Spec.CascadeDisk.DiskID == vol.ProvisionedVolumeName {
|
|
| 3716 |
++ return nil, nil |
|
| 3717 |
++ } |
|
| 3718 |
++ cascadeDisks, err := l.getCascadeDisks() |
|
| 3719 |
++ if err != nil {
|
|
| 3720 |
++ return nil, err |
|
| 3721 |
++ } |
|
| 3722 |
++ if cascadeDisks == nil {
|
|
| 3723 |
++ return nil, fmt.Errorf("unable to build Cascade cloud provider for volumes")
|
|
| 3724 |
++ } |
|
| 3725 |
++ |
|
| 3726 |
++ labels, err := cascadeDisks.GetVolumeLabels(volume.Spec.CascadeDisk.DiskID) |
|
| 3727 |
++ if err != nil {
|
|
| 3728 |
++ return nil, err |
|
| 3729 |
++ } |
|
| 3730 |
++ |
|
| 3731 |
++ return labels, nil |
|
| 3732 |
++} |
|
| 3733 |
++ |
|
| 3734 |
++// getCascadeDisks returns the Cascade Disks interface |
|
| 3735 |
++func (l *persistentVolumeLabel) getCascadeDisks() (cascade.Disks, error) {
|
|
| 3736 |
++ l.mutex.Lock() |
|
| 3737 |
++ defer l.mutex.Unlock() |
|
| 3738 |
++ |
|
| 3739 |
++ if l.cascadeDisks == nil {
|
|
| 3740 |
++ var cloudConfigReader io.Reader |
|
| 3741 |
++ if len(l.cloudConfig) > 0 {
|
|
| 3742 |
++ cloudConfigReader = bytes.NewReader(l.cloudConfig) |
|
| 3743 |
++ } |
|
| 3744 |
++ cloudProvider, err := cloudprovider.GetCloudProvider("cascade", cloudConfigReader)
|
|
| 3745 |
++ if err != nil || cloudProvider == nil {
|
|
| 3746 |
++ return nil, err |
|
| 3747 |
++ } |
|
| 3748 |
++ provider, ok := cloudProvider.(*cascade.CascadeCloud) |
|
| 3749 |
++ if !ok {
|
|
| 3750 |
++ // GetCloudProvider has gone very wrong |
|
| 3751 |
++ return nil, fmt.Errorf("error retrieving Cascade cloud provider")
|
|
| 3752 |
++ } |
|
| 3753 |
++ l.cascadeDisks = provider |
|
| 3754 |
++ } |
|
| 3755 |
++ return l.cascadeDisks, nil |
|
| 3756 |
++} |
|
| 3757 |
+\ No newline at end of file |
|
| 3758 |
+diff -uNr --no-dereference kubernetes-1.8.1/staging/src/k8s.io/api/core/v1/generated.pb.go cascade-kubernetes/staging/src/k8s.io/api/core/v1/generated.pb.go |
|
| 3759 |
+--- kubernetes-1.8.1/staging/src/k8s.io/api/core/v1/generated.pb.go 2018-01-23 22:47:25.594819355 +0000 |
|
| 3760 |
+@@ -34,6 +34,7 @@ |
|
| 3761 |
+ AzureFileVolumeSource |
|
| 3762 |
+ Binding |
|
| 3763 |
+ Capabilities |
|
| 3764 |
++ CascadeDiskVolumeSource |
|
| 3765 |
+ CephFSPersistentVolumeSource |
|
| 3766 |
+ CephFSVolumeSource |
|
| 3767 |
+ CinderVolumeSource |
|
| 3768 |
+@@ -992,6 +993,12 @@ |
|
| 3769 |
+ return fileDescriptorGenerated, []int{177}
|
|
| 3770 |
+ } |
|
| 3771 |
+ |
|
| 3772 |
++func (m *CascadeDiskVolumeSource) Reset() { *m = CascadeDiskVolumeSource{} }
|
|
| 3773 |
++func (*CascadeDiskVolumeSource) ProtoMessage() {}
|
|
| 3774 |
++func (*CascadeDiskVolumeSource) Descriptor() ([]byte, []int) {
|
|
| 3775 |
++ return fileDescriptorGenerated, []int{178}
|
|
| 3776 |
++} |
|
| 3777 |
++ |
|
| 3778 |
+ func init() {
|
|
| 3779 |
+ proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") |
|
| 3780 |
+ proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") |
|
| 3781 |
+@@ -1002,6 +1009,7 @@ |
|
| 3782 |
+ proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") |
|
| 3783 |
+ proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") |
|
| 3784 |
+ proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") |
|
| 3785 |
++ proto.RegisterType((*CascadeDiskVolumeSource)(nil), "k8s.io.api.core.v1.CascadeDiskVolumeSource") |
|
| 3786 |
+ proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") |
|
| 3787 |
+ proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") |
|
| 3788 |
+ proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") |
|
| 3789 |
+@@ -1523,6 +1531,32 @@ |
|
| 3790 |
+ return i, nil |
|
| 3791 |
+ } |
|
| 3792 |
+ |
|
| 3793 |
++func (m *CascadeDiskVolumeSource) Marshal() (dAtA []byte, err error) {
|
|
| 3794 |
++ size := m.Size() |
|
| 3795 |
++ dAtA = make([]byte, size) |
|
| 3796 |
++ n, err := m.MarshalTo(dAtA) |
|
| 3797 |
++ if err != nil {
|
|
| 3798 |
++ return nil, err |
|
| 3799 |
++ } |
|
| 3800 |
++ return dAtA[:n], nil |
|
| 3801 |
++} |
|
| 3802 |
++ |
|
| 3803 |
++func (m *CascadeDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
|
|
| 3804 |
++ var i int |
|
| 3805 |
++ _ = i |
|
| 3806 |
++ var l int |
|
| 3807 |
++ _ = l |
|
| 3808 |
++ dAtA[i] = 0xa |
|
| 3809 |
++ i++ |
|
| 3810 |
++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskID))) |
|
| 3811 |
++ i += copy(dAtA[i:], m.DiskID) |
|
| 3812 |
++ dAtA[i] = 0x12 |
|
| 3813 |
++ i++ |
|
| 3814 |
++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) |
|
| 3815 |
++ i += copy(dAtA[i:], m.FSType) |
|
| 3816 |
++ return i, nil |
|
| 3817 |
++} |
|
| 3818 |
++ |
|
| 3819 |
+ func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
|
|
| 3820 |
+ size := m.Size() |
|
| 3821 |
+ dAtA = make([]byte, size) |
|
| 3822 |
+@@ -6080,6 +6114,18 @@ |
|
| 3823 |
+ } |
|
| 3824 |
+ i += n121 |
|
| 3825 |
+ } |
|
| 3826 |
++ if m.CascadeDisk != nil {
|
|
| 3827 |
++ dAtA[i] = 0xb2 |
|
| 3828 |
++ i++ |
|
| 3829 |
++ dAtA[i] = 0x1 |
|
| 3830 |
++ i++ |
|
| 3831 |
++ i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size())) |
|
| 3832 |
++ n122, err := m.CascadeDisk.MarshalTo(dAtA[i:]) |
|
| 3833 |
++ if err != nil {
|
|
| 3834 |
++ return 0, err |
|
| 3835 |
++ } |
|
| 3836 |
++ i += n122 |
|
| 3837 |
++ } |
|
| 3838 |
+ return i, nil |
|
| 3839 |
+ } |
|
| 3840 |
+ |
|
| 3841 |
+@@ -9824,6 +9870,18 @@ |
|
| 3842 |
+ } |
|
| 3843 |
+ i += n220 |
|
| 3844 |
+ } |
|
| 3845 |
++ if m.CascadeDisk != nil {
|
|
| 3846 |
++ dAtA[i] = 0xe2 |
|
| 3847 |
++ i++ |
|
| 3848 |
++ dAtA[i] = 0x1 |
|
| 3849 |
++ i++ |
|
| 3850 |
++ i = encodeVarintGenerated(dAtA, i, uint64(m.CascadeDisk.Size())) |
|
| 3851 |
++ n221, err := m.CascadeDisk.MarshalTo(dAtA[i:]) |
|
| 3852 |
++ if err != nil {
|
|
| 3853 |
++ return 0, err |
|
| 3854 |
++ } |
|
| 3855 |
++ i += n221 |
|
| 3856 |
++ } |
|
| 3857 |
+ return i, nil |
|
| 3858 |
+ } |
|
| 3859 |
+ |
|
| 3860 |
+@@ -10048,6 +10106,16 @@ |
|
| 3861 |
+ return n |
|
| 3862 |
+ } |
|
| 3863 |
+ |
|
| 3864 |
++func (m *CascadeDiskVolumeSource) Size() (n int) {
|
|
| 3865 |
++ var l int |
|
| 3866 |
++ _ = l |
|
| 3867 |
++ l = len(m.DiskID) |
|
| 3868 |
++ n += 1 + l + sovGenerated(uint64(l)) |
|
| 3869 |
++ l = len(m.FSType) |
|
| 3870 |
++ n += 1 + l + sovGenerated(uint64(l)) |
|
| 3871 |
++ return n |
|
| 3872 |
++} |
|
| 3873 |
++ |
|
| 3874 |
+ func (m *CephFSPersistentVolumeSource) Size() (n int) {
|
|
| 3875 |
+ var l int |
|
| 3876 |
+ _ = l |
|
| 3877 |
+@@ -11711,6 +11779,10 @@ |
|
| 3878 |
+ l = m.StorageOS.Size() |
|
| 3879 |
+ n += 2 + l + sovGenerated(uint64(l)) |
|
| 3880 |
+ } |
|
| 3881 |
++ if m.CascadeDisk != nil {
|
|
| 3882 |
++ l = m.CascadeDisk.Size() |
|
| 3883 |
++ n += 2 + l + sovGenerated(uint64(l)) |
|
| 3884 |
++ } |
|
| 3885 |
+ return n |
|
| 3886 |
+ } |
|
| 3887 |
+ |
|
| 3888 |
+@@ -13055,6 +13127,10 @@ |
|
| 3889 |
+ l = m.StorageOS.Size() |
|
| 3890 |
+ n += 2 + l + sovGenerated(uint64(l)) |
|
| 3891 |
+ } |
|
| 3892 |
++ if m.CascadeDisk != nil {
|
|
| 3893 |
++ l = m.CascadeDisk.Size() |
|
| 3894 |
++ n += 2 + l + sovGenerated(uint64(l)) |
|
| 3895 |
++ } |
|
| 3896 |
+ return n |
|
| 3897 |
+ } |
|
| 3898 |
+ |
|
| 3899 |
+@@ -13202,6 +13278,17 @@ |
|
| 3900 |
+ }, "") |
|
| 3901 |
+ return s |
|
| 3902 |
+ } |
|
| 3903 |
++func (this *CascadeDiskVolumeSource) String() string {
|
|
| 3904 |
++ if this == nil {
|
|
| 3905 |
++ return "nil" |
|
| 3906 |
++ } |
|
| 3907 |
++ s := strings.Join([]string{`&CascadeDiskVolumeSource{`,
|
|
| 3908 |
++ `DiskID:` + fmt.Sprintf("%v", this.DiskID) + `,`,
|
|
| 3909 |
++ `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
|
|
| 3910 |
++ `}`, |
|
| 3911 |
++ }, "") |
|
| 3912 |
++ return s |
|
| 3913 |
++} |
|
| 3914 |
+ func (this *CephFSPersistentVolumeSource) String() string {
|
|
| 3915 |
+ if this == nil {
|
|
| 3916 |
+ return "nil" |
|
| 3917 |
+@@ -14532,6 +14619,7 @@ |
|
| 3918 |
+ `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`,
|
|
| 3919 |
+ `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`,
|
|
| 3920 |
+ `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`,
|
|
| 3921 |
++ `CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
|
|
| 3922 |
+ `}`, |
|
| 3923 |
+ }, "") |
|
| 3924 |
+ return s |
|
| 3925 |
+@@ -15592,6 +15680,7 @@ |
|
| 3926 |
+ `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`,
|
|
| 3927 |
+ `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`,
|
|
| 3928 |
+ `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`,
|
|
| 3929 |
++ `CascadeDisk:` + strings.Replace(fmt.Sprintf("%v", this.CascadeDisk), "CascadeDiskVolumeSource", "CascadeDiskVolumeSource", 1) + `,`,
|
|
| 3930 |
+ `}`, |
|
| 3931 |
+ }, "") |
|
| 3932 |
+ return s |
|
| 3933 |
+@@ -32799,6 +32888,39 @@ |
|
| 3934 |
+ return err |
|
| 3935 |
+ } |
|
| 3936 |
+ iNdEx = postIndex |
|
| 3937 |
++ case 22: |
|
| 3938 |
++ if wireType != 2 {
|
|
| 3939 |
++ return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
|
|
| 3940 |
++ } |
|
| 3941 |
++ var msglen int |
|
| 3942 |
++ for shift := uint(0); ; shift += 7 {
|
|
| 3943 |
++ if shift >= 64 {
|
|
| 3944 |
++ return ErrIntOverflowGenerated |
|
| 3945 |
++ } |
|
| 3946 |
++ if iNdEx >= l {
|
|
| 3947 |
++ return io.ErrUnexpectedEOF |
|
| 3948 |
++ } |
|
| 3949 |
++ b := dAtA[iNdEx] |
|
| 3950 |
++ iNdEx++ |
|
| 3951 |
++ msglen |= (int(b) & 0x7F) << shift |
|
| 3952 |
++ if b < 0x80 {
|
|
| 3953 |
++ break |
|
| 3954 |
++ } |
|
| 3955 |
++ } |
|
| 3956 |
++ if msglen < 0 {
|
|
| 3957 |
++ return ErrInvalidLengthGenerated |
|
| 3958 |
++ } |
|
| 3959 |
++ postIndex := iNdEx + msglen |
|
| 3960 |
++ if postIndex > l {
|
|
| 3961 |
++ return io.ErrUnexpectedEOF |
|
| 3962 |
++ } |
|
| 3963 |
++ if m.CascadeDisk == nil {
|
|
| 3964 |
++ m.CascadeDisk = &CascadeDiskVolumeSource{}
|
|
| 3965 |
++ } |
|
| 3966 |
++ if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
| 3967 |
++ return err |
|
| 3968 |
++ } |
|
| 3969 |
++ iNdEx = postIndex |
|
| 3970 |
+ default: |
|
| 3971 |
+ iNdEx = preIndex |
|
| 3972 |
+ skippy, err := skipGenerated(dAtA[iNdEx:]) |
|
| 3973 |
+@@ -33307,6 +33429,114 @@ |
|
| 3974 |
+ } |
|
| 3975 |
+ return nil |
|
| 3976 |
+ } |
|
| 3977 |
++func (m *CascadeDiskVolumeSource) Unmarshal(dAtA []byte) error {
|
|
| 3978 |
++ l := len(dAtA) |
|
| 3979 |
++ iNdEx := 0 |
|
| 3980 |
++ for iNdEx < l {
|
|
| 3981 |
++ preIndex := iNdEx |
|
| 3982 |
++ var wire uint64 |
|
| 3983 |
++ for shift := uint(0); ; shift += 7 {
|
|
| 3984 |
++ if shift >= 64 {
|
|
| 3985 |
++ return ErrIntOverflowGenerated |
|
| 3986 |
++ } |
|
| 3987 |
++ if iNdEx >= l {
|
|
| 3988 |
++ return io.ErrUnexpectedEOF |
|
| 3989 |
++ } |
|
| 3990 |
++ b := dAtA[iNdEx] |
|
| 3991 |
++ iNdEx++ |
|
| 3992 |
++ wire |= (uint64(b) & 0x7F) << shift |
|
| 3993 |
++ if b < 0x80 {
|
|
| 3994 |
++ break |
|
| 3995 |
++ } |
|
| 3996 |
++ } |
|
| 3997 |
++ fieldNum := int32(wire >> 3) |
|
| 3998 |
++ wireType := int(wire & 0x7) |
|
| 3999 |
++ if wireType == 4 {
|
|
| 4000 |
++ return fmt.Errorf("proto: CascadeDiskVolumeSource: wiretype end group for non-group")
|
|
| 4001 |
++ } |
|
| 4002 |
++ if fieldNum <= 0 {
|
|
| 4003 |
++ return fmt.Errorf("proto: CascadeDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
| 4004 |
++ } |
|
| 4005 |
++ switch fieldNum {
|
|
| 4006 |
++ case 1: |
|
| 4007 |
++ if wireType != 2 {
|
|
| 4008 |
++ return fmt.Errorf("proto: wrong wireType = %d for field DiskID", wireType)
|
|
| 4009 |
++ } |
|
| 4010 |
++ var stringLen uint64 |
|
| 4011 |
++ for shift := uint(0); ; shift += 7 {
|
|
| 4012 |
++ if shift >= 64 {
|
|
| 4013 |
++ return ErrIntOverflowGenerated |
|
| 4014 |
++ } |
|
| 4015 |
++ if iNdEx >= l {
|
|
| 4016 |
++ return io.ErrUnexpectedEOF |
|
| 4017 |
++ } |
|
| 4018 |
++ b := dAtA[iNdEx] |
|
| 4019 |
++ iNdEx++ |
|
| 4020 |
++ stringLen |= (uint64(b) & 0x7F) << shift |
|
| 4021 |
++ if b < 0x80 {
|
|
| 4022 |
++ break |
|
| 4023 |
++ } |
|
| 4024 |
++ } |
|
| 4025 |
++ intStringLen := int(stringLen) |
|
| 4026 |
++ if intStringLen < 0 {
|
|
| 4027 |
++ return ErrInvalidLengthGenerated |
|
| 4028 |
++ } |
|
| 4029 |
++ postIndex := iNdEx + intStringLen |
|
| 4030 |
++ if postIndex > l {
|
|
| 4031 |
++ return io.ErrUnexpectedEOF |
|
| 4032 |
++ } |
|
| 4033 |
++ m.DiskID = string(dAtA[iNdEx:postIndex]) |
|
| 4034 |
++ iNdEx = postIndex |
|
| 4035 |
++ case 2: |
|
| 4036 |
++ if wireType != 2 {
|
|
| 4037 |
++ return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
|
|
| 4038 |
++ } |
|
| 4039 |
++ var stringLen uint64 |
|
| 4040 |
++ for shift := uint(0); ; shift += 7 {
|
|
| 4041 |
++ if shift >= 64 {
|
|
| 4042 |
++ return ErrIntOverflowGenerated |
|
| 4043 |
++ } |
|
| 4044 |
++ if iNdEx >= l {
|
|
| 4045 |
++ return io.ErrUnexpectedEOF |
|
| 4046 |
++ } |
|
| 4047 |
++ b := dAtA[iNdEx] |
|
| 4048 |
++ iNdEx++ |
|
| 4049 |
++ stringLen |= (uint64(b) & 0x7F) << shift |
|
| 4050 |
++ if b < 0x80 {
|
|
| 4051 |
++ break |
|
| 4052 |
++ } |
|
| 4053 |
++ } |
|
| 4054 |
++ intStringLen := int(stringLen) |
|
| 4055 |
++ if intStringLen < 0 {
|
|
| 4056 |
++ return ErrInvalidLengthGenerated |
|
| 4057 |
++ } |
|
| 4058 |
++ postIndex := iNdEx + intStringLen |
|
| 4059 |
++ if postIndex > l {
|
|
| 4060 |
++ return io.ErrUnexpectedEOF |
|
| 4061 |
++ } |
|
| 4062 |
++ m.FSType = string(dAtA[iNdEx:postIndex]) |
|
| 4063 |
++ iNdEx = postIndex |
|
| 4064 |
++ default: |
|
| 4065 |
++ iNdEx = preIndex |
|
| 4066 |
++ skippy, err := skipGenerated(dAtA[iNdEx:]) |
|
| 4067 |
++ if err != nil {
|
|
| 4068 |
++ return err |
|
| 4069 |
++ } |
|
| 4070 |
++ if skippy < 0 {
|
|
| 4071 |
++ return ErrInvalidLengthGenerated |
|
| 4072 |
++ } |
|
| 4073 |
++ if (iNdEx + skippy) > l {
|
|
| 4074 |
++ return io.ErrUnexpectedEOF |
|
| 4075 |
++ } |
|
| 4076 |
++ iNdEx += skippy |
|
| 4077 |
++ } |
|
| 4078 |
++ } |
|
| 4079 |
++ |
|
| 4080 |
++ if iNdEx > l {
|
|
| 4081 |
++ return io.ErrUnexpectedEOF |
|
| 4082 |
++ } |
|
| 4083 |
++ return nil |
|
| 4084 |
++} |
|
| 4085 |
+ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error {
|
|
| 4086 |
+ l := len(dAtA) |
|
| 4087 |
+ iNdEx := 0 |
|
| 4088 |
+@@ -45941,6 +46171,39 @@ |
|
| 4089 |
+ return err |
|
| 4090 |
+ } |
|
| 4091 |
+ iNdEx = postIndex |
|
| 4092 |
++ case 28: |
|
| 4093 |
++ if wireType != 2 {
|
|
| 4094 |
++ return fmt.Errorf("proto: wrong wireType = %d for field CascadeDisk", wireType)
|
|
| 4095 |
++ } |
|
| 4096 |
++ var msglen int |
|
| 4097 |
++ for shift := uint(0); ; shift += 7 {
|
|
| 4098 |
++ if shift >= 64 {
|
|
| 4099 |
++ return ErrIntOverflowGenerated |
|
| 4100 |
++ } |
|
| 4101 |
++ if iNdEx >= l {
|
|
| 4102 |
++ return io.ErrUnexpectedEOF |
|
| 4103 |
++ } |
|
| 4104 |
++ b := dAtA[iNdEx] |
|
| 4105 |
++ iNdEx++ |
|
| 4106 |
++ msglen |= (int(b) & 0x7F) << shift |
|
| 4107 |
++ if b < 0x80 {
|
|
| 4108 |
++ break |
|
| 4109 |
++ } |
|
| 4110 |
++ } |
|
| 4111 |
++ if msglen < 0 {
|
|
| 4112 |
++ return ErrInvalidLengthGenerated |
|
| 4113 |
++ } |
|
| 4114 |
++ postIndex := iNdEx + msglen |
|
| 4115 |
++ if postIndex > l {
|
|
| 4116 |
++ return io.ErrUnexpectedEOF |
|
| 4117 |
++ } |
|
| 4118 |
++ if m.CascadeDisk == nil {
|
|
| 4119 |
++ m.CascadeDisk = &CascadeDiskVolumeSource{}
|
|
| 4120 |
++ } |
|
| 4121 |
++ if err := m.CascadeDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
|
| 4122 |
++ return err |
|
| 4123 |
++ } |
|
| 4124 |
++ iNdEx = postIndex |
|
| 4125 |
+ default: |
|
| 4126 |
+ iNdEx = preIndex |
|
| 4127 |
+ skippy, err := skipGenerated(dAtA[iNdEx:]) |
|
| 4128 |
+diff -uNr --no-dereference kubernetes-1.8.1/staging/src/k8s.io/api/core/v1/types.go cascade-kubernetes/staging/src/k8s.io/api/core/v1/types.go |
|
| 4129 |
+--- kubernetes-1.8.1/staging/src/k8s.io/api/core/v1/types.go 2018-01-23 22:47:25.594819355 +0000 |
|
| 4130 |
+@@ -350,6 +350,8 @@ |
|
| 4131 |
+ // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. |
|
| 4132 |
+ // +optional |
|
| 4133 |
+ StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` |
|
| 4134 |
++ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine |
|
| 4135 |
++ CascadeDisk *CascadeDiskVolumeSource `json:"cascadeDisk,omitempty" protobuf:"bytes,28,opt,name=cascadeDisk"` |
|
| 4136 |
+ } |
|
| 4137 |
+ |
|
| 4138 |
+ // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. |
|
| 4139 |
+@@ -448,6 +450,8 @@ |
|
| 4140 |
+ // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md |
|
| 4141 |
+ // +optional |
|
| 4142 |
+ StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` |
|
| 4143 |
++ // CascadeDisk represents a Cascade persistent disk attached and mounted on kubelets host machine |
|
| 4144 |
++ CascadeDisk *CascadeDiskVolumeSource `json:"cascadeDisk,omitempty" protobuf:"bytes,22,opt,name=cascadeDisk"` |
|
| 4145 |
+ } |
|
| 4146 |
+ |
|
| 4147 |
+ const ( |
|
| 4148 |
+@@ -1431,6 +1435,16 @@ |
|
| 4149 |
+ SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` |
|
| 4150 |
+ } |
|
| 4151 |
+ |
|
| 4152 |
++// Represents a Photon Controller persistent disk resource. |
|
| 4153 |
++type CascadeDiskVolumeSource struct {
|
|
| 4154 |
++ // ID that identifies Cascade persistent disk |
|
| 4155 |
++ DiskID string `json:"diskID" protobuf:"bytes,1,opt,name=diskID"` |
|
| 4156 |
++ // Filesystem type to mount. |
|
| 4157 |
++ // Must be a filesystem type supported by the host operating system. |
|
| 4158 |
++ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. |
|
| 4159 |
++ FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` |
|
| 4160 |
++} |
|
| 4161 |
++ |
|
| 4162 |
+ // Adapts a ConfigMap into a volume. |
|
| 4163 |
+ // |
|
| 4164 |
+ // The contents of the target ConfigMap's Data field will be presented in a |
| ... | ... |
@@ -1,13 +1,14 @@ |
| 1 | 1 |
Summary: Kubernetes cluster management |
| 2 | 2 |
Name: kubernetes |
| 3 | 3 |
Version: 1.8.1 |
| 4 |
-Release: 3%{?dist}
|
|
| 4 |
+Release: 4%{?dist}
|
|
| 5 | 5 |
License: ASL 2.0 |
| 6 | 6 |
URL: https://github.com/kubernetes/kubernetes/archive/v%{version}.tar.gz
|
| 7 | 7 |
Source0: kubernetes-v%{version}.tar.gz
|
| 8 | 8 |
%define sha1 kubernetes-v%{version}.tar.gz=74bad7a52f3fc0b3c70e5ccc986a46a9fdfe358b
|
| 9 | 9 |
Source1: https://github.com/kubernetes/contrib/archive/contrib-0.7.0.tar.gz |
| 10 | 10 |
%define sha1 contrib-0.7.0=47a744da3b396f07114e518226b6313ef4b2203c |
| 11 |
+Patch0: k8s-cascade.patch |
|
| 11 | 12 |
Group: Development/Tools |
| 12 | 13 |
Vendor: VMware, Inc. |
| 13 | 14 |
Distribution: Photon |
| ... | ... |
@@ -47,6 +48,7 @@ cd .. |
| 47 | 47 |
tar xf %{SOURCE1} --no-same-owner
|
| 48 | 48 |
sed -i -e 's|127.0.0.1:4001|127.0.0.1:2379|g' contrib-0.7.0/init/systemd/environ/apiserver |
| 49 | 49 |
cd %{name}-%{version}
|
| 50 |
+%patch0 -p1 |
|
| 50 | 51 |
|
| 51 | 52 |
%build |
| 52 | 53 |
make |
| ... | ... |
@@ -184,6 +186,8 @@ fi |
| 184 | 184 |
%{_bindir}/pause-amd64
|
| 185 | 185 |
|
| 186 | 186 |
%changelog |
| 187 |
+* Tue Jan 23 2018 Ashok Chandrasekar <ashokc@vmware.com> 1.8.1-4 |
|
| 188 |
+- Add Cascade cloud provider. |
|
| 187 | 189 |
* Fri Nov 15 2017 Vinay Kulkarni <kulkarniv@vmware.com> 1.8.1-3 |
| 188 | 190 |
- Specify --kubeconfig to pass in config file. |
| 189 | 191 |
* Fri Nov 10 2017 Vinay Kulkarni <kulkarniv@vmware.com> 1.8.1-2 |