Browse code

package-builder: explicitly use bash as executable for running commands

subprocess uses /bin/sh and in systems like Ubuntu, /bin/sh points to
/bin/dash. Dash shell doesn't support special commands like:

mkdir -p /tmp/{a,b,c}. So use /bin/bash as executable for running
commands in shell.

Change-Id: Id4a8f093c2e06426c8d8c5215a3a6a987b6576cf
Signed-off-by: Shreenidhi Shedi <sshedi@vmware.com>
Reviewed-on: http://photon-jenkins.eng.vmware.com:8082/19438
Tested-by: gerrit-photon <photon-checkins@vmware.com>
Reviewed-by: Alexey Makhalov <amakhalov@vmware.com>

Shreenidhi Shedi authored on 2023/02/10 19:41:00
Showing 6 changed files
... ...
@@ -603,7 +603,8 @@ class CleanUp:
603 603
         )
604 604
         command = command.format(ph_path, basecommit)
605 605
 
606
-        with Popen(command, stdout=PIPE, stderr=None, shell=True) as process:
606
+        with Popen(command, stdout=PIPE, stderr=None,
607
+                   shell=True, executable="/bin/bash") as process:
607 608
             spec_fns = process.communicate()[0].decode("utf-8")
608 609
             if process.returncode:
609 610
                 raise Exception("Error in clean_stage_for_incremental_build")
... ...
@@ -975,7 +976,8 @@ class CheckTools:
975 975
             if commit_id:
976 976
                 command = "git diff --name-only %s" % commit_id
977 977
 
978
-        with Popen(command, stdout=PIPE, stderr=None, shell=True) as process:
978
+        with Popen(command, stdout=PIPE, stderr=None,
979
+                   shell=True, executable="/bin/bash") as process:
979 980
             files = process.communicate()[0].decode("utf-8").splitlines()
980 981
             if process.returncode:
981 982
                 raise Exception("Something went wrong in check_spec_files")
... ...
@@ -1010,7 +1012,8 @@ class CheckTools:
1010 1010
             local_hash = photon_installer.__version__.split("+")[1]
1011 1011
 
1012 1012
             remote_hash = "git ls-remote %s HEAD | cut -f1" % url
1013
-            with Popen(remote_hash, stdout=PIPE, stderr=None, shell=True) as p:
1013
+            with Popen(remote_hash, stdout=PIPE, stderr=None,
1014
+                       shell=True, executable="/bin/bash") as p:
1014 1015
                 remote_hash = p.communicate()[0].decode("utf-8")
1015 1016
                 if p.returncode:
1016 1017
                     raise Exception("Something went wrong in check_photon_installer")
... ...
@@ -1372,7 +1375,8 @@ def set_default_value_of_config():
1372 1372
         configdict.setdefault(key, {}).setdefault(cfg, None)
1373 1373
 
1374 1374
     key = "photon-build-param"
1375
-    ret = subprocess.check_output(["git rev-parse --short HEAD"], shell=True)
1375
+    ret = subprocess.check_output(["git rev-parse --short HEAD"],
1376
+                                  shell=True, executable="/bin/bash")
1376 1377
     ret = ret.decode("ASCII").rstrip()
1377 1378
     configdict[key]["input-photon-build-number"] = ret
1378 1379
 
... ...
@@ -54,7 +54,10 @@ class Utils(object):
54 54
         # use debug parameter for now, implement logging for image builder later
55 55
         if debug:
56 56
             print(cmd)
57
-        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
57
+        p = subprocess.Popen(cmd,
58
+                             shell=True, executable="/bin/bash",
59
+                             stdout=subprocess.PIPE,
60
+                             stderr=subprocess.PIPE)
58 61
         output, err = p.communicate()
59 62
         rc = p.returncode
60 63
         if not ignore_errors:
... ...
@@ -26,16 +26,23 @@ class CommandUtils:
26 26
     @staticmethod
27 27
     def runCommandInShell(cmd, logfile=None, logfn=None):
28 28
         if logfn:
29
-            process = subprocess.Popen("%s" %cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
29
+            process = subprocess.Popen(cmd,
30
+                                       shell=True, executable="/bin/bash",
31
+                                       stdout=subprocess.PIPE,
32
+                                       stderr=subprocess.STDOUT)
33
+
30 34
             logfn(process.communicate()[0].decode())
31 35
         else:
32 36
             if logfile is None:
33 37
                 logfile = os.devnull
34 38
             with open(logfile, "w") as f:
35
-                process = subprocess.Popen("%s" %cmd, shell=True, stdout=f, stderr=f)
39
+                process = subprocess.Popen(cmd,
40
+                                           shell=True, executable="/bin/bash",
41
+                                           stdout=f, stderr=f)
36 42
         return process.wait()
37 43
 
38 44
     @staticmethod
39 45
     def runShellCmd(cmd):
40
-        if subprocess.Popen([cmd], shell=True).wait():
46
+        if subprocess.Popen([cmd],
47
+                            shell=True, executable="/bin/bash").wait():
41 48
             raise Exception(f"ERROR: {cmd} failed")
... ...
@@ -8,12 +8,14 @@ import subprocess
8 8
 import uuid
9 9
 import sys
10 10
 import signal
11
+
11 12
 from argparse import ArgumentParser
12 13
 from Logger import Logger
13 14
 from constants import constants
14 15
 from kubernetes import client, config, watch
15 16
 from kubernetes import stream
16 17
 
18
+
17 19
 class DistributedBuilder:
18 20
 
19 21
     def __init__(self, distributedBuildConfig, logName=None, logPath=None):
... ...
@@ -35,101 +37,102 @@ class DistributedBuilder:
35 35
     def getBuildGuid(self):
36 36
          guid = str(uuid.uuid4()).split("-")[1]
37 37
          guid = guid.lower()
38
-         self.logger.info("guid: %s"%guid)
38
+         self.logger.info(f"guid: {guid}")
39 39
          return guid
40 40
 
41 41
     def createPersistentVolume(self):
42
-        with open(os.path.join(os.path.dirname(__file__), "yaml/persistentVolume.yaml"), 'r') as f:
42
+        with open(os.path.join(os.path.dirname(__file__), "yaml/persistentVolume.yaml"), "r") as f:
43 43
             for pvFile in yaml.safe_load_all(f):
44
-                pvFile['metadata']['name'] += "-" + self.buildGuid
45
-                pvFile['metadata']['labels']['storage-tier'] += "-" + self.buildGuid
46
-                pvFile['spec']['nfs']['server'] = self.distributedBuildConfig["nfs-server-ip"]
47
-                if 'nfspod' in pvFile['metadata']['name']:
48
-                    pvFile['spec']['nfs']['path'] = self.distributedBuildConfig["nfs-server-path"]
44
+                pvFile["metadata"]["name"] += f"-{self.buildGuid}"
45
+                pvFile["metadata"]["labels"]["storage-tier"] += f"-{self.buildGuid}"
46
+                pvFile["spec"]["nfs"]["server"] = self.distributedBuildConfig["nfs-server-ip"]
47
+                if "nfspod" in pvFile["metadata"]["name"]:
48
+                    pvFile["spec"]["nfs"]["path"] = self.distributedBuildConfig["nfs-server-path"]
49 49
                 else:
50
-                    pvFile['spec']['nfs']['path'] = self.distributedBuildConfig["nfs-server-path"] + "/build-" + self.buildGuid + pvFile['spec']['nfs']['path']
50
+                    pvFile["spec"]["nfs"]["path"] = self.distributedBuildConfig["nfs-server-path"] + f"/build-{self.buildGuid}" + pvFile["spec"]["nfs"]["path"]
51 51
 
52 52
                 try:
53 53
                     resp = self.coreV1ApiInstance.create_persistent_volume(body=pvFile)
54
-                    self.logger.info("Created pv %s"%resp.metadata.name)
54
+                    self.logger.info(f"Created pv {resp.metadata.name}")
55 55
                 except client.rest.ApiException as e:
56
-                    self.logger.error("Exception when calling CoreV1Api->create_persistent_volume: %s\n" % e.reason)
56
+                    self.logger.error(f"Exception when calling CoreV1Api->create_persistent_volume: {e.reason}\n")
57 57
                     self.clean()
58 58
                     sys.exit(1)
59 59
 
60 60
     def createPersistentVolumeClaim(self):
61
-        with open(os.path.join(os.path.dirname(__file__), "yaml/persistentVolumeClaim.yaml"), 'r') as f:
61
+        with open(os.path.join(os.path.dirname(__file__), "yaml/persistentVolumeClaim.yaml"), "r") as f:
62 62
             for pvcFile in yaml.safe_load_all(f):
63
-                pvcFile['metadata']['name'] += "-" + self.buildGuid
64
-                pvcFile['spec']['selector']['matchLabels']['storage-tier'] += "-" + self.buildGuid
63
+                pvcFile["metadata"]["name"] += f"-{self.buildGuid}"
64
+                pvcFile["spec"]["selector"]["matchLabels"]["storage-tier"] += f"-{self.buildGuid}"
65 65
                 try:
66
-                    resp = self.coreV1ApiInstance.create_namespaced_persistent_volume_claim(namespace='default', body=pvcFile)
67
-                    self.logger.info("created pvc %s"%resp.metadata.name)
66
+                    resp = self.coreV1ApiInstance.create_namespaced_persistent_volume_claim(namespace="default", body=pvcFile)
67
+                    self.logger.info(f"Created pvc {resp.metadata.name}")
68 68
                 except client.rest.ApiException as e:
69
-                    self.logger.error("Exception when calling CoreV1Api->create_namespaced_persistent_volume_claim: %s\n" % e.reason)
69
+                    self.logger.error(f"Exception when calling CoreV1Api->create_namespaced_persistent_volume_claim: {e.reason}\n")
70 70
                     self.clean()
71 71
                     sys.exit(1)
72 72
 
73 73
     def createNfsPod(self):
74 74
         with open(os.path.join(os.path.dirname(__file__), "yaml/nfspod.yaml")) as f:
75 75
             nfspodFile = yaml.safe_load(f)
76
-            nfspodFile['metadata']['name'] += "-" + self.buildGuid
77
-            nfspodFile['spec']['containers'][0]['workingDir'] += "/build-" + self.buildGuid
78
-            nfspodFile['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
76
+            nfspodFile["metadata"]["name"] += f"-{self.buildGuid}"
77
+            nfspodFile["spec"]["containers"][0]["workingDir"] += f"/build-{self.buildGuid}"
78
+            nfspodFile["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"] += f"-{self.buildGuid}"
79 79
             try:
80
-                resp = self.coreV1ApiInstance.create_namespaced_pod(namespace='default', body=nfspodFile)
81
-                self.logger.info("created nfspod %s"%resp.metadata.name)
80
+                resp = self.coreV1ApiInstance.create_namespaced_pod(namespace="default", body=nfspodFile)
81
+                self.logger.info("Created nfspod {resp.metadata.name}")
82 82
             except client.rest.ApiException as e:
83
-                self.logger.error("Exception when calling CoreV1Api->create_namespaced_pod: %s\n" % e.reason)
83
+                self.logger.error(f"Exception when calling CoreV1Api->create_namespaced_pod: {e.reason}\n")
84 84
                 self.clean()
85 85
                 sys.exit(1)
86 86
 
87 87
     def createMasterService(self):
88 88
         with open(os.path.join(os.path.dirname(__file__), "yaml/masterService.yaml")) as f:
89 89
             masterServiceFile = yaml.safe_load(f)
90
-            masterServiceFile['metadata']['name'] += "-" + self.buildGuid
91
-            masterServiceFile['spec']['selector']['app'] += "-" + self.buildGuid
90
+            masterServiceFile["metadata"]["name"] += f"-{self.buildGuid}"
91
+            masterServiceFile["spec"]["selector"]["app"] += f"-{self.buildGuid}"
92 92
             try:
93
-                resp = self.coreV1ApiInstance.create_namespaced_service(namespace='default', body=masterServiceFile)
94
-                self.logger.info("created pvc %s"%resp.metadata.name)
93
+                resp = self.coreV1ApiInstance.create_namespaced_service(namespace="default", body=masterServiceFile)
94
+                self.logger.info(f"Created pvc {resp.metadata.name}")
95 95
             except client.rest.ApiException as e:
96
-                self.logger.error("Exception when calling CoreV1Api->create_namespaced_service: %s\n" % e.reason)
96
+                self.logger.error(f"Exception when calling CoreV1Api->create_namespaced_service: {e.reason}\n")
97 97
                 self.clean()
98 98
                 sys.exit(1)
99 99
 
100 100
     def createMasterJob(self):
101 101
         with open(os.path.join(os.path.dirname(__file__), "yaml/master.yaml")) as f:
102 102
             masterFile = yaml.safe_load(f)
103
-            masterFile['metadata']['name'] += "-" + self.buildGuid
104
-            masterFile['spec']['template']['metadata']['labels']['app'] += "-" + self.buildGuid
105
-            masterFile['spec']['template']['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
106
-            str = masterFile['spec']['template']['spec']['containers'][0]['args'][1]
107
-            masterFile['spec']['template']['spec']['containers'][0]['args'][1] = str + " && " + self.distributedBuildConfig["command"]
103
+            masterFile["metadata"]["name"] += f"-{self.buildGuid}"
104
+            masterFile["spec"]["template"]["metadata"]["labels"]["app"] += f"-{self.buildGuid}"
105
+            masterFile["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"] += f"-{self.buildGuid}"
106
+            tmp_str = masterFile["spec"]["template"]["spec"]["containers"][0]["args"][1]
107
+            masterFile["spec"]["template"]["spec"]["containers"][0]["args"][1] = f"{tmp_str} && " + self.distributedBuildConfig["command"]
108 108
             try:
109 109
                 resp = self.batchV1ApiInstance.create_namespaced_job(namespace="default", body=masterFile)
110
-                self.logger.info("Created Job %s"%resp.metadata.name)
110
+                self.logger.info(f"Created Job {resp.metadata.name}")
111 111
             except client.rest.ApiException as e:
112
-                self.logger.error("Exception when calling BatchV1Api->create_namespaced_job: %s\n" % e.reason)
112
+                self.logger.error(f"Exception when calling BatchV1Api->create_namespaced_job: {e.reason}\n")
113 113
                 self.clean()
114 114
                 sys.exit(1)
115 115
 
116 116
     def createDeployment(self):
117 117
         with open(os.path.join(os.path.dirname(__file__), "yaml/worker.yaml")) as f:
118 118
             workerFile = yaml.safe_load(f)
119
-            workerFile['metadata']['name'] += "-" + self.buildGuid
120
-            workerFile['spec']['template']['spec']['containers'][0]['env'][0]['value'] = self.buildGuid.upper()
121
-            workerFile['spec']['template']['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
122
-            workerFile['spec']['template']['spec']['volumes'][1]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
123
-            workerFile['spec']['template']['spec']['volumes'][2]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
124
-            workerFile['spec']['template']['spec']['volumes'][3]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
125
-            workerFile['spec']['template']['spec']['volumes'][4]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
126
-            workerFile['spec']['template']['spec']['volumes'][5]['persistentVolumeClaim']['claimName'] += "-" + self.buildGuid
127
-            workerFile['spec']['replicas'] = self.distributedBuildConfig["pods"]
119
+            workerFile["metadata"]["name"] += "-" + self.buildGuid
120
+            workerFile["spec"]["template"]["spec"]["containers"][0]["env"][0]["value"] = self.buildGuid.upper()
121
+            guid = f"-{self.buildGuid}"
122
+            workerFile["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"] += guid
123
+            workerFile["spec"]["template"]["spec"]["volumes"][1]["persistentVolumeClaim"]["claimName"] += guid
124
+            workerFile["spec"]["template"]["spec"]["volumes"][2]["persistentVolumeClaim"]["claimName"] += guid
125
+            workerFile["spec"]["template"]["spec"]["volumes"][3]["persistentVolumeClaim"]["claimName"] += guid
126
+            workerFile["spec"]["template"]["spec"]["volumes"][4]["persistentVolumeClaim"]["claimName"] += guid
127
+            workerFile["spec"]["template"]["spec"]["volumes"][5]["persistentVolumeClaim"]["claimName"] += guid
128
+            workerFile["spec"]["replicas"] = self.distributedBuildConfig["pods"]
128 129
             try:
129 130
                 resp = self.AppsV1ApiInstance.create_namespaced_deployment(body=workerFile, namespace="default")
130
-                self.logger.info("Created deployment %s"%resp.metadata.name)
131
+                self.logger.info(f"Created deployment {resp.metadata.name}")
131 132
             except client.rest.ApiException as e:
132
-                self.logger.error("Exception when calling AppsV1Api->create_namespaced_deployment: %s\n" % e.reason)
133
+                self.logger.error(f"Exception when calling AppsV1Api->create_namespaced_deployment: {e.reason}\n")
133 134
                 self.clean()
134 135
                 sys.exit(1)
135 136
 
... ...
@@ -138,77 +141,80 @@ class DistributedBuilder:
138 138
         for name in pvNames:
139 139
             try:
140 140
                 resp = self.coreV1ApiInstance.delete_persistent_volume(name + "-" + self.buildGuid)
141
-                self.logger.info("Deleted pv %s"%name)
141
+                self.logger.info(f"Deleted pv {name}")
142 142
             except client.rest.ApiException as e:
143
-                self.logger.error("Exception when calling CoreV1Api->delete_persistent_volume: %s\n" % e.reason)
143
+                self.logger.error(f"Exception when calling CoreV1Api->delete_persistent_volume: {e.reason}")
144 144
 
145 145
     def deletePersistentVolumeClaim(self):
146 146
         pvcNames = ["builder", "logs", "specs", "rpms", "publishrpms", "publishxrpms", "photon", "nfspod"]
147 147
         for name in pvcNames:
148 148
             try:
149
-                resp = self.coreV1ApiInstance.delete_namespaced_persistent_volume_claim(name + "-" + self.buildGuid, namespace="default")
150
-                self.logger.info("Deleted pvc %s"%name)
149
+                resp = self.coreV1ApiInstance.delete_namespaced_persistent_volume_claim(f"{name}-{self.buildGuid}", namespace="default")
150
+                self.logger.info(f"Deleted pvc {name"})
151 151
             except client.rest.ApiException as e:
152
-                self.logger.error("Exception when calling CoreV1Api->delete_namespaced_persistent_volume_claim: %s\n" % e.reason)
152
+                self.logger.error(f"Exception when calling CoreV1Api->delete_namespaced_persistent_volume_claim: {e.reason}\n")
153 153
 
154 154
     def deleteMasterJob(self):
155 155
        try:
156
-           job = "master" + "-" + self.buildGuid
156
+           job = f"master-{self.buildGuid}"
157 157
            resp = self.batchV1ApiInstance.delete_namespaced_job(name=job, namespace="default", propagation_policy="Foreground", grace_period_seconds=10)
158 158
            self.logger.info("deleted job master")
159 159
        except client.rest.ApiException as e:
160
-           self.logger.error("Exception when calling BatchV1Api->delete_namespaced_job: %s\n" % e.reason)
160
+           self.logger.error(f"Exception when calling BatchV1Api->delete_namespaced_job: {e.reason}")
161 161
 
162 162
     def deleteBuild(self):
163 163
         self.logger.info("Removing Build folder ...")
164
-        pod = "nfspod" + "-" + self.buildGuid
165
-        cmd = ['/bin/sh', '-c', 'rm -rf ' + '/root/build-' + self.buildGuid]
164
+        pod = f"nfspod-{self.buildGuid}"
165
+        cmd = ["/bin/bash", "-c", f"rm -rf /root/build-{self.buildGuid}"]
166 166
         try:
167
-            resp = stream.stream(self.coreV1ApiInstance.connect_get_namespaced_pod_exec, pod, 'default', command=cmd, \
167
+            resp = stream.stream(self.coreV1ApiInstance.connect_get_namespaced_pod_exec, pod, "default", command=cmd, \
168 168
                    stderr=True, stdin=False, stdout=True, tty=False, _preload_content=False)
169 169
             resp.run_forever(timeout=10)
170 170
             self.logger.info("Deleted Build folder Successfully...")
171 171
         except client.rest.ApiException as e:
172
-            self.logger.error("Exception when calling CoreV1Api->connect_namespaced_pod_exec: %s\n" % e.reason)
172
+            self.logger.error(f"Exception when calling CoreV1Api->connect_namespaced_pod_exec: {e.reason}")
173 173
 
174 174
     def deleteNfsPod(self):
175 175
         try:
176
-            pod = "nfspod" + "-" + self.buildGuid
176
+            pod = f"nfspod-{self.buildGuid}"
177 177
             resp = self.coreV1ApiInstance.delete_namespaced_pod(name=pod, namespace="default")
178 178
             self.logger.info("deleted nfs pod")
179 179
         except client.rest.ApiException as e:
180
-            self.logger.error("Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e.reason)
180
+            self.logger.error(f"Exception when calling CoreV1Api->delete_namespaced_pod: {e.reason}")
181 181
 
182 182
     def deleteMasterService(self):
183 183
        try:
184
-           service = "master-service" + "-" + self.buildGuid
184
+           service = f"master-service-{self.buildGuid}"
185 185
            resp = self.coreV1ApiInstance.delete_namespaced_service(name=service, namespace="default")
186 186
            self.logger.info("deleted master service")
187 187
        except client.rest.ApiException as e:
188
-           self.logger.error("Exception when calling BatchV1Api->delete_namespaced_service %s\n" % e.reason)
188
+           self.logger.error(f"Exception when calling BatchV1Api->delete_namespaced_service {e.reason}\n")
189 189
 
190 190
     def deleteDeployment(self):
191 191
         try:
192
-            deploy = "worker" + "-" + self.buildGuid
192
+            deploy = f"worker-{self.buildGuid}"
193 193
             resp = self.AppsV1ApiInstance.delete_namespaced_deployment(name = deploy, namespace="default", grace_period_seconds=15)
194 194
             self.logger.info("deleted worker deployment ")
195 195
         except client.rest.ApiException as e:
196
-            self.logger.error("Exception when calling AppsV1Api->delete_namespaced_deployment: %s\n" % e.reason)
196
+            self.logger.error(f"Exception when calling AppsV1Api->delete_namespaced_deployment: {e.reason}\n")
197 197
 
198 198
     def copyToNfs(self):
199
-        podName = "nfspod" + "-" + self.buildGuid
199
+        podName = f"nfspod-{self.buildGuid}"
200 200
         while True:
201
-            resp = self.coreV1ApiInstance.read_namespaced_pod(name=podName, namespace='default')
201
+            resp = self.coreV1ApiInstance.read_namespaced_pod(name=podName, namespace="default")
202 202
             status = resp.status.phase
203
-            if status == 'Running':
203
+            if status == "Running":
204 204
                 break
205 205
 
206
-        cmd = "kubectl cp " + str( os.path.join(os.path.dirname(__file__)).replace('support/package-builder', '')) \
207
-               + " " + podName + ":/root/" + "build-" + self.buildGuid + "/photon"
208
-        self.logger.info("%s"%cmd)
209
-        process = subprocess.Popen("%s" %cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
210
-        retval = process.wait()
211
-        if retval == 0:
206
+        cmd = "kubectl cp "
207
+        cmd += str(os.path.join(os.path.dirname(__file__)).replace("support/package-builder", ""))
208
+        cmd += f" {podName}:/root/build-{self.buildGuid}/photon"
209
+        self.logger.info(cmd)
210
+        process = subprocess.Popen(cmd,
211
+                                   shell=True, executable="/bin/bash",
212
+                                   stdout=subprocess.PIPE,
213
+                                   stderr=subprocess.STDOUT)
214
+        if process.wait():
212 215
             self.logger.info("kubectl cp successfull.")
213 216
         else:
214 217
             self.logger.error("kubectl cp failed.")
... ...
@@ -216,18 +222,21 @@ class DistributedBuilder:
216 216
             sys.exit(1)
217 217
 
218 218
     def copyFromNfs(self):
219
-        podName = "nfspod" + "-" + self.buildGuid
219
+        podName = f"nfspod-{self.buildGuid}"
220 220
         while True:
221
-            resp = self.coreV1ApiInstance.read_namespaced_pod(name=podName, namespace='default')
221
+            resp = self.coreV1ApiInstance.read_namespaced_pod(name=podName, namespace="default")
222 222
             status = resp.status.phase
223
-            if status == 'Running':
223
+            if status == "Running":
224 224
                 break
225 225
 
226
-        cmd = "kubectl cp" + " " + podName + ":/root/" + "build-" + self.buildGuid + "/photon/stage" + " " + str( os.path.join(os.path.dirname(__file__)).replace('support/package-builder', '')) + "stage"
227
-        self.logger.info("%s"%cmd)
228
-        process = subprocess.Popen("%s" %cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
229
-        retval = process.wait()
230
-        if retval == 0:
226
+        cmd = f"kubectl cp {podName}:/root/build-{self.buildGuid}/photon/stage "
227
+        cmd += str(os.path.join(os.path.dirname(__file__)).replace("support/package-builder", "")) + "stage"
228
+        self.logger.info(cmd)
229
+        process = subprocess.Popen(cmd,
230
+                                   shell=True, executable="/bin/bash",
231
+                                   stdout=subprocess.PIPE,
232
+                                   stderr=subprocess.STDOUT)
233
+        if process.wait():
231 234
             self.logger.info("kubectl cp successfull.")
232 235
         else:
233 236
             self.logger.error("kubectl cp failed.")
... ...
@@ -236,9 +245,9 @@ class DistributedBuilder:
236 236
 
237 237
     def monitorJob(self):
238 238
         w = watch.Watch()
239
-        for job in w.stream(self.batchV1ApiInstance.list_namespaced_job, namespace='default', timeout_seconds=21600):
240
-            if "master" in job['object'].metadata.name:
241
-                name = job['object']
239
+        for job in w.stream(self.batchV1ApiInstance.list_namespaced_job, namespace="default", timeout_seconds=21600):
240
+            if "master" in job["object"].metadata.name:
241
+                name = job["object"]
242 242
                 self.logger.info("Checking job status ...")
243 243
                 self.logger.debug(name.status)
244 244
                 if name.status.succeeded or name.status.failed:
... ...
@@ -247,19 +256,19 @@ class DistributedBuilder:
247 247
                     break
248 248
 
249 249
     def getLogs(self):
250
-        label = "app=master" + "-" + self.buildGuid
251
-        resp = self.coreV1ApiInstance.list_namespaced_pod(label_selector = label, namespace='default')
250
+        label = f"app=master-{self.buildGuid}"
251
+        resp = self.coreV1ApiInstance.list_namespaced_pod(label_selector = label, namespace="default")
252 252
         podName = resp.items[0].metadata.name
253
-        status = ''
253
+        status = ""
254 254
         while True:
255
-            resp = self.coreV1ApiInstance.read_namespaced_pod(name=podName, namespace='default')
255
+            resp = self.coreV1ApiInstance.read_namespaced_pod(name=podName, namespace="default")
256 256
             status = resp.status.phase
257
-            if status == 'Running' or status == 'Succeeded':
257
+            if status == "Running" or status == "Succeeded":
258 258
                 break
259 259
 
260 260
         w = watch.Watch()
261 261
         try:
262
-            for line in w.stream(self.coreV1ApiInstance.read_namespaced_pod_log, name = podName, namespace='default'):
262
+            for line in w.stream(self.coreV1ApiInstance.read_namespaced_pod_log, name = podName, namespace="default"):
263 263
                 self.logger.info(line)
264 264
         except Exception as e:
265 265
             self.logger.error(e)
... ...
@@ -294,6 +303,7 @@ class DistributedBuilder:
294 294
         self.createMasterJob()
295 295
         self.createDeployment()
296 296
 
297
+
297 298
 def main(distributedBuildConfig):
298 299
     distributedBuilder = DistributedBuilder(distributedBuildConfig)
299 300
     signal.signal(signal.SIGINT, distributedBuilder.signal_handler)
... ...
@@ -303,6 +313,7 @@ def main(distributedBuildConfig):
303 303
     distributedBuilder.copyFromNfs()
304 304
     distributedBuilder.clean()
305 305
 
306
+
306 307
 if __name__ == "__main__":
307 308
 
308 309
     parser = ArgumentParser()
... ...
@@ -313,6 +324,8 @@ if __name__ == "__main__":
313 313
     options = parser.parse_args()
314 314
     constants.setLogPath(options.logPath)
315 315
     constants.setLogLevel(options.logLevel)
316
-    with open(os.path.join(os.path.dirname(__file__), options.distributedBuildFile), 'r') as configFile:
316
+
317
+    with open(os.path.join(os.path.dirname(__file__), options.distributedBuildFile), "r") as configFile:
317 318
         distributedBuildConfig = json.load(configFile)
319
+
318 320
     main(distributedBuildConfig)
... ...
@@ -132,14 +132,14 @@ class Chroot(Sandbox):
132 132
         for mountpoint in listmountpoints:
133 133
             cmd = f"umount {mountpoint}"
134 134
             process = subprocess.Popen(f"{cmd} && sync && sync && sync",
135
-                                       shell=True,
135
+                                       shell=True, executable="/bin/bash",
136 136
                                        stdout=subprocess.PIPE,
137 137
                                        stderr=subprocess.PIPE)
138 138
             if process.wait():
139 139
                 # Try unmount with lazy umount
140 140
                 cmd = f"umount -l {mountpoint}"
141 141
                 process = subprocess.Popen(f"{cmd} && sync && sync && sync",
142
-                                           shell=True,
142
+                                           shell=True, executable="/bin/bash",
143 143
                                            stdout=subprocess.PIPE,
144 144
                                            stderr=subprocess.PIPE)
145 145
                 if process.wait():
... ...
@@ -149,7 +149,8 @@ class Chroot(Sandbox):
149 149
         if not chrootPath.endswith("/"):
150 150
             chrootPath += "/"
151 151
         cmd = f"mount | grep {chrootPath} | cut -d' ' -s -f3"
152
-        process = subprocess.Popen(f"{cmd}", shell=True,
152
+        process = subprocess.Popen(f"{cmd}",
153
+                                   shell=True, executable="/bin/bash",
153 154
                                    stdout=subprocess.PIPE,
154 155
                                    stderr=subprocess.PIPE)
155 156
         if process.wait():
... ...
@@ -1,4 +1,5 @@
1
-#!/usr/bin/python3
1
+#!/usr/bin/env python3
2
+
2 3
 import subprocess
3 4
 import sys
4 5
 
... ...
@@ -21,12 +22,12 @@ def cleanUpChroot(chrootPath):
21 21
     return True
22 22
 
23 23
 def removeAllFilesFromChroot(chrootPath):
24
-    cmd = "rm -rf " + chrootPath
25
-    process = subprocess.Popen("%s" %cmd, shell=True,
24
+    cmd = f"rm -rf {chrootPath}"
25
+    process = subprocess.Popen(cmd,
26
+                               shell=True, executable="/bin/bash",
26 27
                                stdout=subprocess.PIPE,
27 28
                                stderr=subprocess.PIPE)
28
-    retval = process.wait()
29
-    if retval != 0:
29
+    if process.wait():
30 30
         print("Unable to remove files from chroot " + chrootPath)
31 31
         return False
32 32
     return True
... ...
@@ -36,11 +37,12 @@ def unmountmountpoints(listmountpoints):
36 36
         return True
37 37
     result = True
38 38
     for mountpoint in listmountpoints:
39
-        cmd = "umount " + mountpoint
40
-        process = subprocess.Popen("%s" %cmd, shell=True, stdout=subprocess.PIPE,
39
+        cmd = f"umount {mountpoint}"
40
+        process = subprocess.Popen(cmd,
41
+                                   shell=True, executable="/bin/bash",
42
+                                   stdout=subprocess.PIPE,
41 43
                                    stderr=subprocess.PIPE)
42
-        retval = process.wait()
43
-        if retval != 0:
44
+        if process.wait():
44 45
             result = False
45 46
             print("Unable to unmount " + mountpoint)
46 47
             break
... ...
@@ -51,13 +53,13 @@ def unmountmountpoints(listmountpoints):
51 51
 
52 52
 def findmountpoints(chrootPath):
53 53
     if not chrootPath.endswith("/"):
54
-        chrootPath = chrootPath + "/"
55
-    cmd = "mount | grep " + chrootPath + " | cut -d' ' -s -f3"
56
-    process = subprocess.Popen("%s" %cmd, shell=True,
54
+        chrootPath = f"{chrootPath}/"
55
+    cmd = f"mount | grep {chrootPath} | cut -d' ' -s -f3"
56
+    process = subprocess.Popen(cmd,
57
+                               shell=True, executable="/bin/bash",
57 58
                                stdout=subprocess.PIPE,
58 59
                                stderr=subprocess.PIPE)
59
-    retval = process.wait()
60
-    if retval != 0:
60
+    if process.wait():
61 61
         print("Unable to find mountpoints in chroot")
62 62
         return False, None
63 63
     mountpoints = process.communicate()[0].decode()
... ...
@@ -68,6 +70,7 @@ def findmountpoints(chrootPath):
68 68
     listmountpoints = mountpoints.split(" ")
69 69
     return True, listmountpoints
70 70
 
71
+
71 72
 def sortmountpoints(listmountpoints):
72 73
     if listmountpoints is None:
73 74
         return True
... ...
@@ -75,6 +78,7 @@ def sortmountpoints(listmountpoints):
75 75
     sorted(sortedmountpoints)
76 76
     sortedmountpoints.reverse()
77 77
 
78
+
78 79
 def main():
79 80
     if len(sys.argv) < 2:
80 81
         print("Usage: ./clean-up-chroot.py <chrootpath>")
... ...
@@ -83,5 +87,6 @@ def main():
83 83
         sys.exit(1)
84 84
     sys.exit(0)
85 85
 
86
+
86 87
 if __name__ == "__main__":
87 88
     main()