Browse code

Add exit return codes for all primary commands

Commands now return some form of return code, which then shows up as
the s3cmd process exit code. If an S3Error is raised, we return
EX_SOFTWARE and hope the point where it was raised also output enough
error() messaging to understand what failed.

Matt Domsch authored on 2014/05/09 05:29:51
Showing 1 changed files
... ...
@@ -59,10 +59,15 @@ def cmd_du(args):
59 59
         uri = S3Uri(args[0])
60 60
         if uri.type == "s3" and uri.has_bucket():
61 61
             subcmd_bucket_usage(s3, uri)
62
-            return
62
+            return EX_OK
63 63
     subcmd_bucket_usage_all(s3)
64
+    return EX_OK
64 65
 
65 66
 def subcmd_bucket_usage_all(s3):
67
+    """
68
+    Returns: sum of bucket sizes as integer
69
+    Raises: S3Error
70
+    """
66 71
     response = s3.list_all_buckets()
67 72
 
68 73
     buckets_size = 0
... ...
@@ -74,8 +79,14 @@ def subcmd_bucket_usage_all(s3):
74 74
     total_size_str = str(total_size) + size_coeff
75 75
     output(u"".rjust(8, "-"))
76 76
     output(u"%s Total" % (total_size_str.ljust(8)))
77
+    return size
77 78
 
78 79
 def subcmd_bucket_usage(s3, uri):
80
+    """
81
+    Returns: bucket size as integer
82
+    Raises: S3Error
83
+    """
84
+
79 85
     bucket = uri.bucket()
80 86
     object = uri.object()
81 87
 
... ...
@@ -91,9 +102,7 @@ def subcmd_bucket_usage(s3, uri):
91 91
         except S3Error, e:
92 92
             if S3.codes.has_key(e.info["Code"]):
93 93
                 error(S3.codes[e.info["Code"]] % bucket)
94
-                return
95
-            else:
96
-                raise
94
+            raise
97 95
 
98 96
         # objects in the current scope:
99 97
         for obj in response["list"]:
... ...
@@ -114,8 +123,9 @@ def cmd_ls(args):
114 114
         uri = S3Uri(args[0])
115 115
         if uri.type == "s3" and uri.has_bucket():
116 116
             subcmd_bucket_list(s3, uri)
117
-            return
117
+            return EX_OK
118 118
     subcmd_buckets_list_all(s3)
119
+    return EX_OK
119 120
 
120 121
 def cmd_buckets_list_all_all(args):
121 122
     s3 = S3(Config())
... ...
@@ -125,7 +135,7 @@ def cmd_buckets_list_all_all(args):
125 125
     for bucket in response["list"]:
126 126
         subcmd_bucket_list(s3, S3Uri("s3://" + bucket["Name"]))
127 127
         output(u"")
128
-
128
+    return EX_OK
129 129
 
130 130
 def subcmd_buckets_list_all(s3):
131 131
     response = s3.list_all_buckets()
... ...
@@ -147,9 +157,7 @@ def subcmd_bucket_list(s3, uri):
147 147
     except S3Error, e:
148 148
         if S3.codes.has_key(e.info["Code"]):
149 149
             error(S3.codes[e.info["Code"]] % bucket)
150
-            return
151
-        else:
152
-            raise
150
+        raise
153 151
 
154 152
     if cfg.list_md5:
155 153
         format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(md5)32s  %(uri)s"
... ...
@@ -196,9 +204,8 @@ def cmd_bucket_create(args):
196 196
         except S3Error, e:
197 197
             if S3.codes.has_key(e.info["Code"]):
198 198
                 error(S3.codes[e.info["Code"]] % uri.bucket())
199
-                return
200
-            else:
201
-                raise
199
+            raise
200
+    return EX_OK
202 201
 
203 202
 def cmd_website_info(args):
204 203
     s3 = S3(Config())
... ...
@@ -218,9 +225,8 @@ def cmd_website_info(args):
218 218
         except S3Error, e:
219 219
             if S3.codes.has_key(e.info["Code"]):
220 220
                 error(S3.codes[e.info["Code"]] % uri.bucket())
221
-                return
222
-            else:
223
-                raise
221
+            raise
222
+    return EX_OK
224 223
 
225 224
 def cmd_website_create(args):
226 225
     s3 = S3(Config())
... ...
@@ -234,9 +240,8 @@ def cmd_website_create(args):
234 234
         except S3Error, e:
235 235
             if S3.codes.has_key(e.info["Code"]):
236 236
                 error(S3.codes[e.info["Code"]] % uri.bucket())
237
-                return
238
-            else:
239
-                raise
237
+            raise
238
+    return EX_OK
240 239
 
241 240
 def cmd_website_delete(args):
242 241
     s3 = S3(Config())
... ...
@@ -250,9 +255,8 @@ def cmd_website_delete(args):
250 250
         except S3Error, e:
251 251
             if S3.codes.has_key(e.info["Code"]):
252 252
                 error(S3.codes[e.info["Code"]] % uri.bucket())
253
-                return
254
-            else:
255
-                raise
253
+            raise
254
+    return EX_OK
256 255
 
257 256
 def cmd_expiration_set(args):
258 257
     s3 = S3(Config())
... ...
@@ -269,9 +273,8 @@ def cmd_expiration_set(args):
269 269
         except S3Error, e:
270 270
             if S3.codes.has_key(e.info["Code"]):
271 271
                 error(S3.codes[e.info["Code"]] % uri.bucket())
272
-                return
273
-            else:
274
-                raise
272
+            raise
273
+    return EX_OK
275 274
 
276 275
 def cmd_bucket_delete(args):
277 276
     def _bucket_delete_one(uri):
... ...
@@ -279,25 +282,32 @@ def cmd_bucket_delete(args):
279 279
             response = s3.bucket_delete(uri.bucket())
280 280
             output(u"Bucket '%s' removed" % uri.uri())
281 281
         except S3Error, e:
282
+            if e.info['Code'] == 'NoSuchBucket':
283
+                if cfg.force:
284
+                    return EX_OK
285
+                else:
286
+                    return EX_USAGE
282 287
             if e.info['Code'] == 'BucketNotEmpty' and (cfg.force or cfg.recursive):
283 288
                 warning(u"Bucket is not empty. Removing all the objects from it first. This may take some time...")
284
-                success = subcmd_batch_del(uri_str = uri.uri())
285
-                if success:
289
+                rc = subcmd_batch_del(uri_str = uri.uri())
290
+                if rc == EX_OK:
286 291
                     return _bucket_delete_one(uri)
287 292
                 else:
288 293
                     output(u"Bucket was not removed")
289 294
             elif S3.codes.has_key(e.info["Code"]):
290 295
                 error(S3.codes[e.info["Code"]] % uri.bucket())
291
-                return
292
-            else:
293
-                raise
296
+            raise
297
+        return EX_OK
294 298
 
295 299
     s3 = S3(Config())
296 300
     for arg in args:
297 301
         uri = S3Uri(arg)
298 302
         if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
299 303
             raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
300
-        _bucket_delete_one(uri)
304
+        rc = _bucket_delete_one(uri)
305
+        if rc != EX_OK:
306
+            return rc
307
+    return EX_OK
301 308
 
302 309
 def cmd_object_put(args):
303 310
     cfg = Config()
... ...
@@ -345,7 +355,7 @@ def cmd_object_put(args):
345 345
             output(u"upload: %s -> %s" % (nicekey, local_list[key]['remote_uri']))
346 346
 
347 347
         warning(u"Exiting now because of --dry-run")
348
-        return
348
+        return EX_OK
349 349
 
350 350
     seq = 0
351 351
     for key in local_list:
... ...
@@ -383,6 +393,7 @@ def cmd_object_put(args):
383 383
         if Config().encrypt and full_name != full_name_orig:
384 384
             debug(u"Removing temporary encrypted file: %s" % unicodise(full_name))
385 385
             os.remove(full_name)
386
+    return EX_OK
386 387
 
387 388
 def cmd_object_get(args):
388 389
     cfg = Config()
... ...
@@ -462,7 +473,7 @@ def cmd_object_get(args):
462 462
             output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
463 463
 
464 464
         warning(u"Exiting now because of --dry-run")
465
-        return
465
+        return EX_OK
466 466
 
467 467
     seq = 0
468 468
     for key in remote_list:
... ...
@@ -537,6 +548,7 @@ def cmd_object_get(args):
537 537
         if Config().delete_after_fetch:
538 538
             s3.object_delete(uri)
539 539
             output(u"File %s removed after fetch" % (uri))
540
+    return EX_OK
540 541
 
541 542
 def cmd_object_del(args):
542 543
     recursive = Config().recursive
... ...
@@ -551,11 +563,19 @@ def cmd_object_del(args):
551 551
                 raise ParameterError("File name required, not only the bucket name. Alternatively use --recursive")
552 552
 
553 553
         if not recursive:
554
-            subcmd_object_del_uri(uri_str)
554
+            rc = subcmd_object_del_uri(uri_str)
555 555
         else:
556
-            subcmd_batch_del(uri_str = uri_str)
556
+            rc = subcmd_batch_del(uri_str = uri_str)
557
+        if not rc:
558
+            return rc
559
+    return EX_OK
557 560
 
558 561
 def subcmd_batch_del(uri_str = None, bucket = None, remote_list = None):
562
+    """
563
+    Returns: EX_OK
564
+    Raises: ValueError
565
+    """
566
+
559 567
     def _batch_del(remote_list):
560 568
         s3 = S3(cfg)
561 569
         to_delete = remote_list[:1000]
... ...
@@ -580,20 +600,24 @@ def subcmd_batch_del(uri_str = None, bucket = None, remote_list = None):
580 580
 
581 581
     if len(remote_list) == 0:
582 582
         warning(u"Remote list is empty.")
583
-        return False
583
+        return EX_OK
584 584
 
585 585
     if cfg.max_delete > 0 and len(remote_list) > cfg.max_delete:
586 586
         warning(u"delete: maximum requested number of deletes would be exceeded, none performed.")
587
-        return False
587
+        return EX_OK
588 588
 
589 589
     _batch_del(remote_list)
590 590
 
591 591
     if cfg.dry_run:
592 592
         warning(u"Exiting now because of --dry-run")
593
-        return False
594
-    return True
593
+        return EX_OK
594
+    return EX_OK
595 595
 
596 596
 def subcmd_object_del_uri(uri_str, recursive = None):
597
+    """
598
+    Returns: True if XXX, False if XXX
599
+    Raises: ValueError
600
+    """
597 601
     s3 = S3(cfg)
598 602
 
599 603
     if recursive is None:
... ...
@@ -606,7 +630,7 @@ def subcmd_object_del_uri(uri_str, recursive = None):
606 606
     info(u"Summary: %d remote files to delete" % remote_count)
607 607
     if cfg.max_delete > 0 and remote_count > cfg.max_delete:
608 608
         warning(u"delete: maximum requested number of deletes would be exceeded, none performed.")
609
-        return
609
+        return False
610 610
 
611 611
     if cfg.dry_run:
612 612
         for key in exclude_list:
... ...
@@ -615,12 +639,13 @@ def subcmd_object_del_uri(uri_str, recursive = None):
615 615
             output(u"delete: %s" % remote_list[key]['object_uri_str'])
616 616
 
617 617
         warning(u"Exiting now because of --dry-run")
618
-        return
618
+        return True
619 619
 
620 620
     for key in remote_list:
621 621
         item = remote_list[key]
622 622
         response = s3.object_delete(S3Uri(item['object_uri_str']))
623 623
         output(u"File %s deleted" % item['object_uri_str'])
624
+    return True
624 625
 
625 626
 def cmd_object_restore(args):
626 627
     s3 = S3(cfg)
... ...
@@ -641,7 +666,7 @@ def cmd_object_restore(args):
641 641
             output(u"restore: %s" % remote_list[key]['object_uri_str'])
642 642
 
643 643
         warning(u"Exiting now because of --dry-run")
644
-        return
644
+        return EX_OK
645 645
 
646 646
     for key in remote_list:
647 647
         item = remote_list[key]
... ...
@@ -652,6 +677,7 @@ def cmd_object_restore(args):
652 652
             output(u"File %s restoration started" % item['object_uri_str'])
653 653
         else:
654 654
             debug(u"Skipping directory since only files may be restored")
655
+    return EX_OK
655 656
 
656 657
 
657 658
 def subcmd_cp_mv(args, process_fce, action_str, message):
... ...
@@ -693,7 +719,7 @@ def subcmd_cp_mv(args, process_fce, action_str, message):
693 693
             output(u"%s: %s -> %s" % (action_str, remote_list[key]['object_uri_str'], remote_list[key]['dest_name']))
694 694
 
695 695
         warning(u"Exiting now because of --dry-run")
696
-        return
696
+        return EX_OK
697 697
 
698 698
     seq = 0
699 699
     for key in remote_list:
... ...
@@ -715,18 +741,19 @@ def subcmd_cp_mv(args, process_fce, action_str, message):
715 715
                 warning(u"Key not found %s" % item['object_uri_str'])
716 716
             else:
717 717
                 raise
718
+    return EX_OK
718 719
 
719 720
 def cmd_cp(args):
720 721
     s3 = S3(Config())
721
-    subcmd_cp_mv(args, s3.object_copy, "copy", u"File %(src)s copied to %(dst)s")
722
+    return subcmd_cp_mv(args, s3.object_copy, "copy", u"File %(src)s copied to %(dst)s")
722 723
 
723 724
 def cmd_modify(args):
724 725
     s3 = S3(Config())
725
-    subcmd_cp_mv(args, s3.object_copy, "modify", u"File %(src)s modified")
726
+    return subcmd_cp_mv(args, s3.object_copy, "modify", u"File %(src)s modified")
726 727
 
727 728
 def cmd_mv(args):
728 729
     s3 = S3(Config())
729
-    subcmd_cp_mv(args, s3.object_move, "move", u"File %(src)s moved to %(dst)s")
730
+    return subcmd_cp_mv(args, s3.object_move, "move", u"File %(src)s moved to %(dst)s")
730 731
 
731 732
 def cmd_info(args):
732 733
     s3 = S3(Config())
... ...
@@ -790,9 +817,8 @@ def cmd_info(args):
790 790
         except S3Error, e:
791 791
             if S3.codes.has_key(e.info["Code"]):
792 792
                 error(S3.codes[e.info["Code"]] % uri.bucket())
793
-                return
794
-            else:
795
-                raise
793
+            raise
794
+    return EX_OK
796 795
 
797 796
 def filedicts_to_keys(*args):
798 797
     keys = set()
... ...
@@ -841,7 +867,7 @@ def cmd_sync_remote2remote(args):
841 841
         for key in src_list:
842 842
             output(u"Sync: %s -> %s" % (src_list[key]['object_uri_str'], src_list[key]['target_uri']))
843 843
         warning(u"Exiting now because of --dry-run")
844
-        return
844
+        return EX_OK
845 845
 
846 846
     # if there are copy pairs, we can't do delete_before, on the chance
847 847
     # we need one of the to-be-deleted files as a copy source.
... ...
@@ -899,6 +925,7 @@ def cmd_sync_remote2remote(args):
899 899
     # Delete items in destination that are not in source
900 900
     if cfg.delete_removed and cfg.delete_after:
901 901
         subcmd_batch_del(remote_list = dst_list)
902
+    return EX_OK
902 903
 
903 904
 def cmd_sync_remote2local(args):
904 905
     def _do_deletes(local_list):
... ...
@@ -963,7 +990,7 @@ def cmd_sync_remote2local(args):
963 963
             output(u"download: %s -> %s" % (update_list[key]['object_uri_str'], update_list[key]['local_filename']))
964 964
 
965 965
         warning(u"Exiting now because of --dry-run")
966
-        return
966
+        return EX_OK
967 967
 
968 968
     # if there are copy pairs, we can't do delete_before, on the chance
969 969
     # we need one of the to-be-deleted files as a copy source.
... ...
@@ -1130,6 +1157,7 @@ def cmd_sync_remote2local(args):
1130 1130
 
1131 1131
     if cfg.delete_removed and cfg.delete_after:
1132 1132
         _do_deletes(local_list)
1133
+    return EX_OK
1133 1134
 
1134 1135
 def local_copy(copy_pairs, destination_base):
1135 1136
     # Do NOT hardlink local files by default, that'd be silly
... ...
@@ -1212,20 +1240,24 @@ def _build_attr_header(local_list, src):
1212 1212
 
1213 1213
 def cmd_sync_local2remote(args):
1214 1214
     def _single_process(local_list):
1215
+        any_child_failed = False
1215 1216
         for dest in destinations:
1216 1217
             ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
1217 1218
             destination_base_uri = S3Uri(dest)
1218 1219
             if destination_base_uri.type != 's3':
1219 1220
                 raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
1220 1221
             destination_base = str(destination_base_uri)
1221
-            _child(destination_base, local_list)
1222
-            return destination_base_uri
1222
+            rc = _child(destination_base, local_list)
1223
+            if rc:
1224
+                any_child_failed = True
1225
+        return any_child_failed
1223 1226
 
1224 1227
     def _parent():
1225 1228
         # Now that we've done all the disk I/O to look at the local file system and
1226 1229
         # calculate the md5 for each file, fork for each destination to upload to them separately
1227 1230
         # and in parallel
1228 1231
         child_pids = []
1232
+        any_child_failed = False
1229 1233
 
1230 1234
         for dest in destinations:
1231 1235
             ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
... ...
@@ -1243,8 +1275,10 @@ def cmd_sync_local2remote(args):
1243 1243
         while len(child_pids):
1244 1244
             (pid, status) = os.wait()
1245 1245
             child_pids.remove(pid)
1246
+            if status:
1247
+                any_child_failed = True
1246 1248
 
1247
-        return
1249
+        return any_child_failed
1248 1250
 
1249 1251
     def _child(destination_base, local_list):
1250 1252
         def _set_remote_uri(local_list, destination_base, single_file_local):
... ...
@@ -1331,7 +1365,7 @@ def cmd_sync_local2remote(args):
1331 1331
                     output(u"delete: %s" % remote_list[key]['object_uri_str'])
1332 1332
 
1333 1333
             warning(u"Exiting now because of --dry-run")
1334
-            return
1334
+            return EX_OK
1335 1335
 
1336 1336
         # if there are copy pairs, we can't do delete_before, on the chance
1337 1337
         # we need one of the to-be-deleted files as a copy source.
... ...
@@ -1372,7 +1406,7 @@ def cmd_sync_local2remote(args):
1372 1372
         else:
1373 1373
             info(outstr)
1374 1374
 
1375
-        return
1375
+        return EX_OK
1376 1376
 
1377 1377
     def _invalidate_on_cf(destination_base_uri):
1378 1378
         cf = CloudFront(cfg)
... ...
@@ -1407,16 +1441,23 @@ def cmd_sync_local2remote(args):
1407 1407
         destinations = destinations + cfg.additional_destinations
1408 1408
 
1409 1409
     if 'fork' not in os.__all__ or len(destinations) < 2:
1410
-        destination_base_uri = _single_process(local_list)
1410
+        any_child_failed = _single_process(local_list)
1411
+        destination_base_uri = S3Uri(destinations[-1])
1411 1412
         if cfg.invalidate_on_cf:
1412 1413
             if len(uploaded_objects_list) == 0:
1413 1414
                 info("Nothing to invalidate in CloudFront")
1414 1415
             else:
1415 1416
                 _invalidate_on_cf(destination_base_uri)
1416 1417
     else:
1417
-        _parent()
1418
+        any_child_failed = _parent()
1418 1419
         if cfg.invalidate_on_cf:
1419 1420
             error(u"You cannot use both --cf-invalidate and --add-destination.")
1421
+            return(EX_USAGE)
1422
+
1423
+    if any_child_failed:
1424
+        return EX_SOFTWARE
1425
+    else:
1426
+        return EX_OK
1420 1427
 
1421 1428
 def cmd_sync(args):
1422 1429
     if (len(args) < 2):
... ...
@@ -1463,7 +1504,7 @@ def cmd_setacl(args):
1463 1463
             output(u"setacl: %s" % remote_list[key]['object_uri_str'])
1464 1464
 
1465 1465
         warning(u"Exiting now because of --dry-run")
1466
-        return
1466
+        return EX_OK
1467 1467
 
1468 1468
     seq = 0
1469 1469
     for key in remote_list:
... ...
@@ -1471,6 +1512,7 @@ def cmd_setacl(args):
1471 1471
         seq_label = "[%d of %d]" % (seq, remote_count)
1472 1472
         uri = S3Uri(remote_list[key]['object_uri_str'])
1473 1473
         update_acl(s3, uri, seq_label)
1474
+    return EX_OK
1474 1475
 
1475 1476
 def cmd_setpolicy(args):
1476 1477
     s3 = S3(cfg)
... ...
@@ -1478,7 +1520,7 @@ def cmd_setpolicy(args):
1478 1478
     policy_file = args[0]
1479 1479
     policy = open(policy_file, 'r').read()
1480 1480
 
1481
-    if cfg.dry_run: return
1481
+    if cfg.dry_run: return EX_OK
1482 1482
 
1483 1483
     response = s3.set_policy(uri, policy)
1484 1484
 
... ...
@@ -1486,18 +1528,19 @@ def cmd_setpolicy(args):
1486 1486
     debug(u"response - %s" % response['status'])
1487 1487
     if response['status'] == 204:
1488 1488
         output(u"%s: Policy updated" % uri)
1489
+    return EX_OK
1489 1490
 
1490 1491
 def cmd_delpolicy(args):
1491 1492
     s3 = S3(cfg)
1492 1493
     uri = S3Uri(args[0])
1493
-    if cfg.dry_run: return
1494
+    if cfg.dry_run: return EX_OK
1494 1495
 
1495 1496
     response = s3.delete_policy(uri)
1496 1497
 
1497 1498
     #if retsponse['status'] == 200:
1498 1499
     debug(u"response - %s" % response['status'])
1499 1500
     output(u"%s: Policy deleted" % uri)
1500
-
1501
+    return EX_OK
1501 1502
 
1502 1503
 def cmd_multipart(args):
1503 1504
     s3 = S3(cfg)
... ...
@@ -1517,6 +1560,7 @@ def cmd_multipart(args):
1517 1517
             output("%s\t%s\t%s" % (mpupload['Initiated'], "s3://" + uri.bucket() + "/" + mpupload['Key'], mpupload['UploadId']))
1518 1518
         except KeyError:
1519 1519
             pass
1520
+    return EX_OK
1520 1521
 
1521 1522
 def cmd_abort_multipart(args):
1522 1523
     '''{"cmd":"abortmp",   "label":"abort a multipart upload", "param":"s3://BUCKET Id", "func":cmd_abort_multipart, "argc":2},'''
... ...
@@ -1526,6 +1570,7 @@ def cmd_abort_multipart(args):
1526 1526
     response = s3.abort_multipart(uri, id)
1527 1527
     debug(u"response - %s" % response['status'])
1528 1528
     output(u"%s" % uri)
1529
+    return EX_OK
1529 1530
 
1530 1531
 def cmd_list_multipart(args):
1531 1532
     '''{"cmd":"abortmp",   "label":"list a multipart upload", "param":"s3://BUCKET Id", "func":cmd_list_multipart, "argc":2},'''
... ...
@@ -1542,6 +1587,7 @@ def cmd_list_multipart(args):
1542 1542
             output("%s\t%s\t%s\t%s" % (mpupload['LastModified'], mpupload['PartNumber'], mpupload['ETag'], mpupload['Size']))
1543 1543
         except:
1544 1544
             pass
1545
+    return EX_OK
1545 1546
 
1546 1547
 def cmd_accesslog(args):
1547 1548
     s3 = S3(cfg)
... ...
@@ -1563,12 +1609,14 @@ def cmd_accesslog(args):
1563 1563
     if accesslog.isLoggingEnabled():
1564 1564
         output(u"     Target prefix: %s" % accesslog.targetPrefix().uri())
1565 1565
         #output(u"   Public Access:   %s" % accesslog.isAclPublic())
1566
+    return EX_OK
1566 1567
 
1567 1568
 def cmd_sign(args):
1568 1569
     string_to_sign = args.pop()
1569 1570
     debug("string-to-sign: %r" % string_to_sign)
1570 1571
     signature = Utils.sign_string(string_to_sign)
1571 1572
     output("Signature: %s" % signature)
1573
+    return EX_OK
1572 1574
 
1573 1575
 def cmd_signurl(args):
1574 1576
     expiry = args.pop()
... ...
@@ -1578,6 +1626,7 @@ def cmd_signurl(args):
1578 1578
     debug("url to sign: %r" % url_to_sign)
1579 1579
     signed_url = Utils.sign_url(url_to_sign, expiry)
1580 1580
     output(signed_url)
1581
+    return EX_OK
1581 1582
 
1582 1583
 def cmd_fixbucket(args):
1583 1584
     def _unescape(text):
... ...
@@ -1645,6 +1694,7 @@ def cmd_fixbucket(args):
1645 1645
         warning("Fixed %d files' names. Their ACL were reset to Private." % count)
1646 1646
         warning("Use 's3cmd setacl --acl-public s3://...' to make")
1647 1647
         warning("them publicly readable if required.")
1648
+    return EX_OK
1648 1649
 
1649 1650
 def resolve_list(lst, args):
1650 1651
     retval = []
... ...
@@ -2354,7 +2404,8 @@ def main():
2354 2354
         sys.exit(EX_USAGE)
2355 2355
 
2356 2356
     try:
2357
-        cmd_func(args)
2357
+        rc = cmd_func(args)
2358
+        return rc
2358 2359
     except S3Error, e:
2359 2360
         error(u"S3 error: %s" % e)
2360 2361
         sys.exit(EX_SOFTWARE)
... ...
@@ -2433,8 +2484,8 @@ if __name__ == '__main__':
2433 2433
         from S3.FileLists import *
2434 2434
         from S3.MultiPart import MultiPartUpload
2435 2435
 
2436
-        main()
2437
-        sys.exit(EX_OK)
2436
+        rc = main()
2437
+        sys.exit(rc)
2438 2438
 
2439 2439
     except ImportError, e:
2440 2440
         report_exception(e)
... ...
@@ -2444,6 +2495,9 @@ if __name__ == '__main__':
2444 2444
         error(u"Parameter problem: %s" % e)
2445 2445
         sys.exit(EX_USAGE)
2446 2446
 
2447
+    except S3Error, e:
2448
+        sys.exit(EX_SOFTWARE)
2449
+
2447 2450
     except SystemExit, e:
2448 2451
         sys.exit(e.code)
2449 2452