Conflicts:
s3cmd
1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,16 @@ |
0 |
+# patterned on /usr/include/sysexits.h |
|
1 |
+ |
|
2 |
+EX_OK = 0 |
|
3 |
+EX_GENERAL = 1 |
|
4 |
+EX_SOMEFAILED = 2 # some parts of the command succeeded, while others failed |
|
5 |
+EX_USAGE = 64 # The command was used incorrectly (e.g. bad command line syntax) |
|
6 |
+EX_SOFTWARE = 70 # internal software error (e.g. S3 error of unknown specificity) |
|
7 |
+EX_OSERR = 71 # system error (e.g. out of memory) |
|
8 |
+EX_OSFILE = 72 # OS error (e.g. invalid Python version) |
|
9 |
+EX_IOERR = 74 # An error occurred while doing I/O on some file. |
|
10 |
+EX_TEMPFAIL = 75 # temporary failure (S3DownloadError or similar, retry later) |
|
11 |
+EX_NOPERM = 77 # Insufficient permissions to perform the operation on S3 |
|
12 |
+EX_CONFIG = 78 # Configuration file error |
|
13 |
+_EX_SIGNAL = 128 |
|
14 |
+_EX_SIGINT = 2 |
|
15 |
+EX_BREAK = _EX_SIGNAL + _EX_SIGINT # Control-C (KeyboardInterrupt raised) |
... | ... |
@@ -18,6 +18,7 @@ import errno |
18 | 18 |
import urllib |
19 | 19 |
from calendar import timegm |
20 | 20 |
from logging import debug, info, warning, error |
21 |
+from ExitCodes import EX_OSFILE |
|
21 | 22 |
try: |
22 | 23 |
import dateutil.parser |
23 | 24 |
except ImportError: |
... | ... |
@@ -33,7 +34,7 @@ $ pip install python-dateutil |
33 | 33 |
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! |
34 | 34 |
""") |
35 | 35 |
sys.stderr.flush() |
36 |
- sys.exit(1) |
|
36 |
+ sys.exit(EX_OSFILE) |
|
37 | 37 |
|
38 | 38 |
import Config |
39 | 39 |
import Exceptions |
... | ... |
@@ -15,6 +15,7 @@ import locale |
15 | 15 |
import getpass |
16 | 16 |
import S3.Exceptions |
17 | 17 |
import S3.Config |
18 |
+from S3.ExitCodes import * |
|
18 | 19 |
|
19 | 20 |
count_pass = 0 |
20 | 21 |
count_fail = 0 |
... | ... |
@@ -290,7 +291,7 @@ test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)], |
290 | 290 |
|
291 | 291 |
## ====== Invalid bucket name |
292 | 292 |
test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')], |
293 |
- retcode = 1, |
|
293 |
+ retcode = EX_USAGE, |
|
294 | 294 |
must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'), |
295 | 295 |
must_not_find_re = "Bucket.*created") |
296 | 296 |
|
... | ... |
@@ -421,7 +422,7 @@ test_s3cmd("Rename within S3", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xy |
421 | 421 |
|
422 | 422 |
## ====== Rename (NoSuchKey) |
423 | 423 |
test_s3cmd("Rename (NoSuchKey)", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)], |
424 |
- retcode = 1, |
|
424 |
+ retcode = EX_SOFTWARE, |
|
425 | 425 |
must_find_re = [ 'ERROR:.*NoSuchKey' ], |
426 | 426 |
must_not_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1)) ]) |
427 | 427 |
|
... | ... |
@@ -443,7 +444,7 @@ test_rmdir("Remove dst dir for get", "testsuite-out") |
443 | 443 |
|
444 | 444 |
## ====== Get multiple files |
445 | 445 |
test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'], |
446 |
- retcode = 1, |
|
446 |
+ retcode = EX_USAGE, |
|
447 | 447 |
must_find = [ 'Destination must be a directory or stdout when downloading multiple sources.' ]) |
448 | 448 |
|
449 | 449 |
## ====== put/get non-ASCII filenames |
... | ... |
@@ -22,7 +22,7 @@ import sys |
22 | 22 |
|
23 | 23 |
if float("%d.%d" %(sys.version_info[0], sys.version_info[1])) < 2.4: |
24 | 24 |
sys.stderr.write(u"ERROR: Python 2.4 or higher required, sorry.\n") |
25 |
- sys.exit(1) |
|
25 |
+ sys.exit(EX_OSFILE) |
|
26 | 26 |
|
27 | 27 |
import logging |
28 | 28 |
import time |
... | ... |
@@ -59,10 +59,15 @@ def cmd_du(args): |
59 | 59 |
uri = S3Uri(args[0]) |
60 | 60 |
if uri.type == "s3" and uri.has_bucket(): |
61 | 61 |
subcmd_bucket_usage(s3, uri) |
62 |
- return |
|
62 |
+ return EX_OK |
|
63 | 63 |
subcmd_bucket_usage_all(s3) |
64 |
+ return EX_OK |
|
64 | 65 |
|
65 | 66 |
def subcmd_bucket_usage_all(s3): |
67 |
+ """ |
|
68 |
+ Returns: sum of bucket sizes as integer |
|
69 |
+ Raises: S3Error |
|
70 |
+ """ |
|
66 | 71 |
response = s3.list_all_buckets() |
67 | 72 |
|
68 | 73 |
buckets_size = 0 |
... | ... |
@@ -74,8 +79,14 @@ def subcmd_bucket_usage_all(s3): |
74 | 74 |
total_size_str = str(total_size) + size_coeff |
75 | 75 |
output(u"".rjust(8, "-")) |
76 | 76 |
output(u"%s Total" % (total_size_str.ljust(8))) |
77 |
+ return size |
|
77 | 78 |
|
78 | 79 |
def subcmd_bucket_usage(s3, uri): |
80 |
+ """ |
|
81 |
+ Returns: bucket size as integer |
|
82 |
+ Raises: S3Error |
|
83 |
+ """ |
|
84 |
+ |
|
79 | 85 |
bucket = uri.bucket() |
80 | 86 |
object = uri.object() |
81 | 87 |
|
... | ... |
@@ -91,9 +102,7 @@ def subcmd_bucket_usage(s3, uri): |
91 | 91 |
except S3Error, e: |
92 | 92 |
if S3.codes.has_key(e.info["Code"]): |
93 | 93 |
error(S3.codes[e.info["Code"]] % bucket) |
94 |
- return |
|
95 |
- else: |
|
96 |
- raise |
|
94 |
+ raise |
|
97 | 95 |
|
98 | 96 |
# objects in the current scope: |
99 | 97 |
for obj in response["list"]: |
... | ... |
@@ -114,8 +123,9 @@ def cmd_ls(args): |
114 | 114 |
uri = S3Uri(args[0]) |
115 | 115 |
if uri.type == "s3" and uri.has_bucket(): |
116 | 116 |
subcmd_bucket_list(s3, uri) |
117 |
- return |
|
117 |
+ return EX_OK |
|
118 | 118 |
subcmd_buckets_list_all(s3) |
119 |
+ return EX_OK |
|
119 | 120 |
|
120 | 121 |
def cmd_buckets_list_all_all(args): |
121 | 122 |
s3 = S3(Config()) |
... | ... |
@@ -125,7 +135,7 @@ def cmd_buckets_list_all_all(args): |
125 | 125 |
for bucket in response["list"]: |
126 | 126 |
subcmd_bucket_list(s3, S3Uri("s3://" + bucket["Name"])) |
127 | 127 |
output(u"") |
128 |
- |
|
128 |
+ return EX_OK |
|
129 | 129 |
|
130 | 130 |
def subcmd_buckets_list_all(s3): |
131 | 131 |
response = s3.list_all_buckets() |
... | ... |
@@ -147,9 +157,7 @@ def subcmd_bucket_list(s3, uri): |
147 | 147 |
except S3Error, e: |
148 | 148 |
if S3.codes.has_key(e.info["Code"]): |
149 | 149 |
error(S3.codes[e.info["Code"]] % bucket) |
150 |
- return |
|
151 |
- else: |
|
152 |
- raise |
|
150 |
+ raise |
|
153 | 151 |
|
154 | 152 |
if cfg.list_md5: |
155 | 153 |
format_string = u"%(timestamp)16s %(size)9s%(coeff)1s %(md5)32s %(uri)s" |
... | ... |
@@ -196,9 +204,8 @@ def cmd_bucket_create(args): |
196 | 196 |
except S3Error, e: |
197 | 197 |
if S3.codes.has_key(e.info["Code"]): |
198 | 198 |
error(S3.codes[e.info["Code"]] % uri.bucket()) |
199 |
- return |
|
200 |
- else: |
|
201 |
- raise |
|
199 |
+ raise |
|
200 |
+ return EX_OK |
|
202 | 201 |
|
203 | 202 |
def cmd_website_info(args): |
204 | 203 |
s3 = S3(Config()) |
... | ... |
@@ -218,9 +225,8 @@ def cmd_website_info(args): |
218 | 218 |
except S3Error, e: |
219 | 219 |
if S3.codes.has_key(e.info["Code"]): |
220 | 220 |
error(S3.codes[e.info["Code"]] % uri.bucket()) |
221 |
- return |
|
222 |
- else: |
|
223 |
- raise |
|
221 |
+ raise |
|
222 |
+ return EX_OK |
|
224 | 223 |
|
225 | 224 |
def cmd_website_create(args): |
226 | 225 |
s3 = S3(Config()) |
... | ... |
@@ -234,9 +240,8 @@ def cmd_website_create(args): |
234 | 234 |
except S3Error, e: |
235 | 235 |
if S3.codes.has_key(e.info["Code"]): |
236 | 236 |
error(S3.codes[e.info["Code"]] % uri.bucket()) |
237 |
- return |
|
238 |
- else: |
|
239 |
- raise |
|
237 |
+ raise |
|
238 |
+ return EX_OK |
|
240 | 239 |
|
241 | 240 |
def cmd_website_delete(args): |
242 | 241 |
s3 = S3(Config()) |
... | ... |
@@ -250,9 +255,8 @@ def cmd_website_delete(args): |
250 | 250 |
except S3Error, e: |
251 | 251 |
if S3.codes.has_key(e.info["Code"]): |
252 | 252 |
error(S3.codes[e.info["Code"]] % uri.bucket()) |
253 |
- return |
|
254 |
- else: |
|
255 |
- raise |
|
253 |
+ raise |
|
254 |
+ return EX_OK |
|
256 | 255 |
|
257 | 256 |
def cmd_expiration_set(args): |
258 | 257 |
s3 = S3(Config()) |
... | ... |
@@ -269,9 +273,8 @@ def cmd_expiration_set(args): |
269 | 269 |
except S3Error, e: |
270 | 270 |
if S3.codes.has_key(e.info["Code"]): |
271 | 271 |
error(S3.codes[e.info["Code"]] % uri.bucket()) |
272 |
- return |
|
273 |
- else: |
|
274 |
- raise |
|
272 |
+ raise |
|
273 |
+ return EX_OK |
|
275 | 274 |
|
276 | 275 |
def cmd_bucket_delete(args): |
277 | 276 |
def _bucket_delete_one(uri): |
... | ... |
@@ -279,25 +282,32 @@ def cmd_bucket_delete(args): |
279 | 279 |
response = s3.bucket_delete(uri.bucket()) |
280 | 280 |
output(u"Bucket '%s' removed" % uri.uri()) |
281 | 281 |
except S3Error, e: |
282 |
+ if e.info['Code'] == 'NoSuchBucket': |
|
283 |
+ if cfg.force: |
|
284 |
+ return EX_OK |
|
285 |
+ else: |
|
286 |
+ return EX_USAGE |
|
282 | 287 |
if e.info['Code'] == 'BucketNotEmpty' and (cfg.force or cfg.recursive): |
283 | 288 |
warning(u"Bucket is not empty. Removing all the objects from it first. This may take some time...") |
284 |
- success = subcmd_batch_del(uri_str = uri.uri()) |
|
285 |
- if success: |
|
289 |
+ rc = subcmd_batch_del(uri_str = uri.uri()) |
|
290 |
+ if rc == EX_OK: |
|
286 | 291 |
return _bucket_delete_one(uri) |
287 | 292 |
else: |
288 | 293 |
output(u"Bucket was not removed") |
289 | 294 |
elif S3.codes.has_key(e.info["Code"]): |
290 | 295 |
error(S3.codes[e.info["Code"]] % uri.bucket()) |
291 |
- return |
|
292 |
- else: |
|
293 |
- raise |
|
296 |
+ raise |
|
297 |
+ return EX_OK |
|
294 | 298 |
|
295 | 299 |
s3 = S3(Config()) |
296 | 300 |
for arg in args: |
297 | 301 |
uri = S3Uri(arg) |
298 | 302 |
if not uri.type == "s3" or not uri.has_bucket() or uri.has_object(): |
299 | 303 |
raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg) |
300 |
- _bucket_delete_one(uri) |
|
304 |
+ rc = _bucket_delete_one(uri) |
|
305 |
+ if rc != EX_OK: |
|
306 |
+ return rc |
|
307 |
+ return EX_OK |
|
301 | 308 |
|
302 | 309 |
def cmd_object_put(args): |
303 | 310 |
cfg = Config() |
... | ... |
@@ -345,7 +355,7 @@ def cmd_object_put(args): |
345 | 345 |
output(u"upload: %s -> %s" % (nicekey, local_list[key]['remote_uri'])) |
346 | 346 |
|
347 | 347 |
warning(u"Exiting now because of --dry-run") |
348 |
- return |
|
348 |
+ return EX_OK |
|
349 | 349 |
|
350 | 350 |
seq = 0 |
351 | 351 |
for key in local_list: |
... | ... |
@@ -358,7 +368,7 @@ def cmd_object_put(args): |
358 | 358 |
full_name = full_name_orig |
359 | 359 |
seq_label = "[%d of %d]" % (seq, local_count) |
360 | 360 |
if Config().encrypt: |
361 |
- exitcode, full_name, extra_headers["x-amz-meta-s3tools-gpgenc"] = gpg_encrypt(full_name_orig) |
|
361 |
+ gpg_exitcode, full_name, extra_headers["x-amz-meta-s3tools-gpgenc"] = gpg_encrypt(full_name_orig) |
|
362 | 362 |
if cfg.preserve_attrs or local_list[key]['size'] > (cfg.multipart_chunk_size_mb * 1024 * 1024): |
363 | 363 |
attr_header = _build_attr_header(local_list, key) |
364 | 364 |
debug(u"attr_header: %s" % attr_header) |
... | ... |
@@ -383,6 +393,7 @@ def cmd_object_put(args): |
383 | 383 |
if Config().encrypt and full_name != full_name_orig: |
384 | 384 |
debug(u"Removing temporary encrypted file: %s" % unicodise(full_name)) |
385 | 385 |
os.remove(full_name) |
386 |
+ return EX_OK |
|
386 | 387 |
|
387 | 388 |
def cmd_object_get(args): |
388 | 389 |
cfg = Config() |
... | ... |
@@ -462,7 +473,7 @@ def cmd_object_get(args): |
462 | 462 |
output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename'])) |
463 | 463 |
|
464 | 464 |
warning(u"Exiting now because of --dry-run") |
465 |
- return |
|
465 |
+ return EX_OK |
|
466 | 466 |
|
467 | 467 |
seq = 0 |
468 | 468 |
for key in remote_list: |
... | ... |
@@ -537,6 +548,7 @@ def cmd_object_get(args): |
537 | 537 |
if Config().delete_after_fetch: |
538 | 538 |
s3.object_delete(uri) |
539 | 539 |
output(u"File %s removed after fetch" % (uri)) |
540 |
+ return EX_OK |
|
540 | 541 |
|
541 | 542 |
def cmd_object_del(args): |
542 | 543 |
recursive = Config().recursive |
... | ... |
@@ -551,11 +563,19 @@ def cmd_object_del(args): |
551 | 551 |
raise ParameterError("File name required, not only the bucket name. Alternatively use --recursive") |
552 | 552 |
|
553 | 553 |
if not recursive: |
554 |
- subcmd_object_del_uri(uri_str) |
|
554 |
+ rc = subcmd_object_del_uri(uri_str) |
|
555 | 555 |
else: |
556 |
- subcmd_batch_del(uri_str = uri_str) |
|
556 |
+ rc = subcmd_batch_del(uri_str = uri_str) |
|
557 |
+ if not rc: |
|
558 |
+ return rc |
|
559 |
+ return EX_OK |
|
557 | 560 |
|
558 | 561 |
def subcmd_batch_del(uri_str = None, bucket = None, remote_list = None): |
562 |
+ """ |
|
563 |
+ Returns: EX_OK |
|
564 |
+ Raises: ValueError |
|
565 |
+ """ |
|
566 |
+ |
|
559 | 567 |
def _batch_del(remote_list): |
560 | 568 |
s3 = S3(cfg) |
561 | 569 |
to_delete = remote_list[:1000] |
... | ... |
@@ -580,21 +600,25 @@ def subcmd_batch_del(uri_str = None, bucket = None, remote_list = None): |
580 | 580 |
remote_list, exclude_list = fetch_remote_list(uri_str, require_attribs = False) |
581 | 581 |
|
582 | 582 |
if len(remote_list) == 0: |
583 |
- debug(u"Remote list is empty.") |
|
584 |
- return False |
|
583 |
+ warning(u"Remote list is empty.") |
|
584 |
+ return EX_OK |
|
585 | 585 |
|
586 | 586 |
if cfg.max_delete > 0 and len(remote_list) > cfg.max_delete: |
587 | 587 |
warning(u"delete: maximum requested number of deletes would be exceeded, none performed.") |
588 |
- return False |
|
588 |
+ return EX_OK |
|
589 | 589 |
|
590 | 590 |
_batch_del(remote_list) |
591 | 591 |
|
592 | 592 |
if cfg.dry_run: |
593 | 593 |
warning(u"Exiting now because of --dry-run") |
594 |
- return False |
|
595 |
- return True |
|
594 |
+ return EX_OK |
|
595 |
+ return EX_OK |
|
596 | 596 |
|
597 | 597 |
def subcmd_object_del_uri(uri_str, recursive = None): |
598 |
+ """ |
|
599 |
+ Returns: True if XXX, False if XXX |
|
600 |
+ Raises: ValueError |
|
601 |
+ """ |
|
598 | 602 |
s3 = S3(cfg) |
599 | 603 |
|
600 | 604 |
if recursive is None: |
... | ... |
@@ -607,7 +631,7 @@ def subcmd_object_del_uri(uri_str, recursive = None): |
607 | 607 |
info(u"Summary: %d remote files to delete" % remote_count) |
608 | 608 |
if cfg.max_delete > 0 and remote_count > cfg.max_delete: |
609 | 609 |
warning(u"delete: maximum requested number of deletes would be exceeded, none performed.") |
610 |
- return |
|
610 |
+ return False |
|
611 | 611 |
|
612 | 612 |
if cfg.dry_run: |
613 | 613 |
for key in exclude_list: |
... | ... |
@@ -616,12 +640,13 @@ def subcmd_object_del_uri(uri_str, recursive = None): |
616 | 616 |
output(u"delete: %s" % remote_list[key]['object_uri_str']) |
617 | 617 |
|
618 | 618 |
warning(u"Exiting now because of --dry-run") |
619 |
- return |
|
619 |
+ return True |
|
620 | 620 |
|
621 | 621 |
for key in remote_list: |
622 | 622 |
item = remote_list[key] |
623 | 623 |
response = s3.object_delete(S3Uri(item['object_uri_str'])) |
624 | 624 |
output(u"File %s deleted" % item['object_uri_str']) |
625 |
+ return True |
|
625 | 626 |
|
626 | 627 |
def cmd_object_restore(args): |
627 | 628 |
s3 = S3(cfg) |
... | ... |
@@ -642,7 +667,7 @@ def cmd_object_restore(args): |
642 | 642 |
output(u"restore: %s" % remote_list[key]['object_uri_str']) |
643 | 643 |
|
644 | 644 |
warning(u"Exiting now because of --dry-run") |
645 |
- return |
|
645 |
+ return EX_OK |
|
646 | 646 |
|
647 | 647 |
for key in remote_list: |
648 | 648 |
item = remote_list[key] |
... | ... |
@@ -653,6 +678,7 @@ def cmd_object_restore(args): |
653 | 653 |
output(u"File %s restoration started" % item['object_uri_str']) |
654 | 654 |
else: |
655 | 655 |
debug(u"Skipping directory since only files may be restored") |
656 |
+ return EX_OK |
|
656 | 657 |
|
657 | 658 |
|
658 | 659 |
def subcmd_cp_mv(args, process_fce, action_str, message): |
... | ... |
@@ -694,7 +720,7 @@ def subcmd_cp_mv(args, process_fce, action_str, message): |
694 | 694 |
output(u"%s: %s -> %s" % (action_str, remote_list[key]['object_uri_str'], remote_list[key]['dest_name'])) |
695 | 695 |
|
696 | 696 |
warning(u"Exiting now because of --dry-run") |
697 |
- return |
|
697 |
+ return EX_OK |
|
698 | 698 |
|
699 | 699 |
seq = 0 |
700 | 700 |
for key in remote_list: |
... | ... |
@@ -716,18 +742,19 @@ def subcmd_cp_mv(args, process_fce, action_str, message): |
716 | 716 |
warning(u"Key not found %s" % item['object_uri_str']) |
717 | 717 |
else: |
718 | 718 |
raise |
719 |
+ return EX_OK |
|
719 | 720 |
|
720 | 721 |
def cmd_cp(args): |
721 | 722 |
s3 = S3(Config()) |
722 |
- subcmd_cp_mv(args, s3.object_copy, "copy", u"File %(src)s copied to %(dst)s") |
|
723 |
+ return subcmd_cp_mv(args, s3.object_copy, "copy", u"File %(src)s copied to %(dst)s") |
|
723 | 724 |
|
724 | 725 |
def cmd_modify(args): |
725 | 726 |
s3 = S3(Config()) |
726 |
- subcmd_cp_mv(args, s3.object_copy, "modify", u"File %(src)s modified") |
|
727 |
+ return subcmd_cp_mv(args, s3.object_copy, "modify", u"File %(src)s modified") |
|
727 | 728 |
|
728 | 729 |
def cmd_mv(args): |
729 | 730 |
s3 = S3(Config()) |
730 |
- subcmd_cp_mv(args, s3.object_move, "move", u"File %(src)s moved to %(dst)s") |
|
731 |
+ return subcmd_cp_mv(args, s3.object_move, "move", u"File %(src)s moved to %(dst)s") |
|
731 | 732 |
|
732 | 733 |
def cmd_info(args): |
733 | 734 |
s3 = S3(Config()) |
... | ... |
@@ -791,9 +818,8 @@ def cmd_info(args): |
791 | 791 |
except S3Error, e: |
792 | 792 |
if S3.codes.has_key(e.info["Code"]): |
793 | 793 |
error(S3.codes[e.info["Code"]] % uri.bucket()) |
794 |
- return |
|
795 |
- else: |
|
796 |
- raise |
|
794 |
+ raise |
|
795 |
+ return EX_OK |
|
797 | 796 |
|
798 | 797 |
def filedicts_to_keys(*args): |
799 | 798 |
keys = set() |
... | ... |
@@ -842,7 +868,7 @@ def cmd_sync_remote2remote(args): |
842 | 842 |
for key in src_list: |
843 | 843 |
output(u"Sync: %s -> %s" % (src_list[key]['object_uri_str'], src_list[key]['target_uri'])) |
844 | 844 |
warning(u"Exiting now because of --dry-run") |
845 |
- return |
|
845 |
+ return EX_OK |
|
846 | 846 |
|
847 | 847 |
# if there are copy pairs, we can't do delete_before, on the chance |
848 | 848 |
# we need one of the to-be-deleted files as a copy source. |
... | ... |
@@ -900,6 +926,7 @@ def cmd_sync_remote2remote(args): |
900 | 900 |
# Delete items in destination that are not in source |
901 | 901 |
if cfg.delete_removed and cfg.delete_after: |
902 | 902 |
subcmd_batch_del(remote_list = dst_list) |
903 |
+ return EX_OK |
|
903 | 904 |
|
904 | 905 |
def cmd_sync_remote2local(args): |
905 | 906 |
def _do_deletes(local_list): |
... | ... |
@@ -964,7 +991,7 @@ def cmd_sync_remote2local(args): |
964 | 964 |
output(u"download: %s -> %s" % (update_list[key]['object_uri_str'], update_list[key]['local_filename'])) |
965 | 965 |
|
966 | 966 |
warning(u"Exiting now because of --dry-run") |
967 |
- return |
|
967 |
+ return EX_OK |
|
968 | 968 |
|
969 | 969 |
# if there are copy pairs, we can't do delete_before, on the chance |
970 | 970 |
# we need one of the to-be-deleted files as a copy source. |
... | ... |
@@ -1131,6 +1158,7 @@ def cmd_sync_remote2local(args): |
1131 | 1131 |
|
1132 | 1132 |
if cfg.delete_removed and cfg.delete_after: |
1133 | 1133 |
_do_deletes(local_list) |
1134 |
+ return EX_OK |
|
1134 | 1135 |
|
1135 | 1136 |
def local_copy(copy_pairs, destination_base): |
1136 | 1137 |
# Do NOT hardlink local files by default, that'd be silly |
... | ... |
@@ -1213,20 +1241,24 @@ def _build_attr_header(local_list, src): |
1213 | 1213 |
|
1214 | 1214 |
def cmd_sync_local2remote(args): |
1215 | 1215 |
def _single_process(local_list): |
1216 |
+ any_child_failed = False |
|
1216 | 1217 |
for dest in destinations: |
1217 | 1218 |
## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash) |
1218 | 1219 |
destination_base_uri = S3Uri(dest) |
1219 | 1220 |
if destination_base_uri.type != 's3': |
1220 | 1221 |
raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri) |
1221 | 1222 |
destination_base = str(destination_base_uri) |
1222 |
- _child(destination_base, local_list) |
|
1223 |
- return destination_base_uri |
|
1223 |
+ rc = _child(destination_base, local_list) |
|
1224 |
+ if rc: |
|
1225 |
+ any_child_failed = True |
|
1226 |
+ return any_child_failed |
|
1224 | 1227 |
|
1225 | 1228 |
def _parent(): |
1226 | 1229 |
# Now that we've done all the disk I/O to look at the local file system and |
1227 | 1230 |
# calculate the md5 for each file, fork for each destination to upload to them separately |
1228 | 1231 |
# and in parallel |
1229 | 1232 |
child_pids = [] |
1233 |
+ any_child_failed = False |
|
1230 | 1234 |
|
1231 | 1235 |
for dest in destinations: |
1232 | 1236 |
## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash) |
... | ... |
@@ -1244,8 +1276,10 @@ def cmd_sync_local2remote(args): |
1244 | 1244 |
while len(child_pids): |
1245 | 1245 |
(pid, status) = os.wait() |
1246 | 1246 |
child_pids.remove(pid) |
1247 |
+ if status: |
|
1248 |
+ any_child_failed = True |
|
1247 | 1249 |
|
1248 |
- return |
|
1250 |
+ return any_child_failed |
|
1249 | 1251 |
|
1250 | 1252 |
def _child(destination_base, local_list): |
1251 | 1253 |
def _set_remote_uri(local_list, destination_base, single_file_local): |
... | ... |
@@ -1333,7 +1367,7 @@ def cmd_sync_local2remote(args): |
1333 | 1333 |
output(u"delete: %s" % remote_list[key]['object_uri_str']) |
1334 | 1334 |
|
1335 | 1335 |
warning(u"Exiting now because of --dry-run") |
1336 |
- return |
|
1336 |
+ return EX_OK |
|
1337 | 1337 |
|
1338 | 1338 |
# if there are copy pairs, we can't do delete_before, on the chance |
1339 | 1339 |
# we need one of the to-be-deleted files as a copy source. |
... | ... |
@@ -1374,7 +1408,7 @@ def cmd_sync_local2remote(args): |
1374 | 1374 |
else: |
1375 | 1375 |
info(outstr) |
1376 | 1376 |
|
1377 |
- return |
|
1377 |
+ return EX_OK |
|
1378 | 1378 |
|
1379 | 1379 |
def _invalidate_on_cf(destination_base_uri): |
1380 | 1380 |
cf = CloudFront(cfg) |
... | ... |
@@ -1400,7 +1434,7 @@ def cmd_sync_local2remote(args): |
1400 | 1400 |
error(u"S3cmd 'sync' doesn't yet support GPG encryption, sorry.") |
1401 | 1401 |
error(u"Either use unconditional 's3cmd put --recursive'") |
1402 | 1402 |
error(u"or disable encryption with --no-encrypt parameter.") |
1403 |
- sys.exit(1) |
|
1403 |
+ sys.exit(EX_USAGE) |
|
1404 | 1404 |
|
1405 | 1405 |
local_list, single_file_local, src_exclude_list = fetch_local_list(args[:-1], is_src = True, recursive = True) |
1406 | 1406 |
|
... | ... |
@@ -1409,16 +1443,23 @@ def cmd_sync_local2remote(args): |
1409 | 1409 |
destinations = destinations + cfg.additional_destinations |
1410 | 1410 |
|
1411 | 1411 |
if 'fork' not in os.__all__ or len(destinations) < 2: |
1412 |
- destination_base_uri = _single_process(local_list) |
|
1412 |
+ any_child_failed = _single_process(local_list) |
|
1413 |
+ destination_base_uri = S3Uri(destinations[-1]) |
|
1413 | 1414 |
if cfg.invalidate_on_cf: |
1414 | 1415 |
if len(uploaded_objects_list) == 0: |
1415 | 1416 |
info("Nothing to invalidate in CloudFront") |
1416 | 1417 |
else: |
1417 | 1418 |
_invalidate_on_cf(destination_base_uri) |
1418 | 1419 |
else: |
1419 |
- _parent() |
|
1420 |
+ any_child_failed = _parent() |
|
1420 | 1421 |
if cfg.invalidate_on_cf: |
1421 | 1422 |
error(u"You cannot use both --cf-invalidate and --add-destination.") |
1423 |
+ return(EX_USAGE) |
|
1424 |
+ |
|
1425 |
+ if any_child_failed: |
|
1426 |
+ return EX_SOFTWARE |
|
1427 |
+ else: |
|
1428 |
+ return EX_OK |
|
1422 | 1429 |
|
1423 | 1430 |
def cmd_sync(args): |
1424 | 1431 |
if (len(args) < 2): |
... | ... |
@@ -1465,7 +1506,7 @@ def cmd_setacl(args): |
1465 | 1465 |
output(u"setacl: %s" % remote_list[key]['object_uri_str']) |
1466 | 1466 |
|
1467 | 1467 |
warning(u"Exiting now because of --dry-run") |
1468 |
- return |
|
1468 |
+ return EX_OK |
|
1469 | 1469 |
|
1470 | 1470 |
seq = 0 |
1471 | 1471 |
for key in remote_list: |
... | ... |
@@ -1473,6 +1514,7 @@ def cmd_setacl(args): |
1473 | 1473 |
seq_label = "[%d of %d]" % (seq, remote_count) |
1474 | 1474 |
uri = S3Uri(remote_list[key]['object_uri_str']) |
1475 | 1475 |
update_acl(s3, uri, seq_label) |
1476 |
+ return EX_OK |
|
1476 | 1477 |
|
1477 | 1478 |
def cmd_setpolicy(args): |
1478 | 1479 |
s3 = S3(cfg) |
... | ... |
@@ -1480,7 +1522,7 @@ def cmd_setpolicy(args): |
1480 | 1480 |
policy_file = args[0] |
1481 | 1481 |
policy = open(policy_file, 'r').read() |
1482 | 1482 |
|
1483 |
- if cfg.dry_run: return |
|
1483 |
+ if cfg.dry_run: return EX_OK |
|
1484 | 1484 |
|
1485 | 1485 |
response = s3.set_policy(uri, policy) |
1486 | 1486 |
|
... | ... |
@@ -1488,18 +1530,19 @@ def cmd_setpolicy(args): |
1488 | 1488 |
debug(u"response - %s" % response['status']) |
1489 | 1489 |
if response['status'] == 204: |
1490 | 1490 |
output(u"%s: Policy updated" % uri) |
1491 |
+ return EX_OK |
|
1491 | 1492 |
|
1492 | 1493 |
def cmd_delpolicy(args): |
1493 | 1494 |
s3 = S3(cfg) |
1494 | 1495 |
uri = S3Uri(args[0]) |
1495 |
- if cfg.dry_run: return |
|
1496 |
+ if cfg.dry_run: return EX_OK |
|
1496 | 1497 |
|
1497 | 1498 |
response = s3.delete_policy(uri) |
1498 | 1499 |
|
1499 | 1500 |
#if retsponse['status'] == 200: |
1500 | 1501 |
debug(u"response - %s" % response['status']) |
1501 | 1502 |
output(u"%s: Policy deleted" % uri) |
1502 |
- |
|
1503 |
+ return EX_OK |
|
1503 | 1504 |
|
1504 | 1505 |
def cmd_multipart(args): |
1505 | 1506 |
s3 = S3(cfg) |
... | ... |
@@ -1519,6 +1562,7 @@ def cmd_multipart(args): |
1519 | 1519 |
output("%s\t%s\t%s" % (mpupload['Initiated'], "s3://" + uri.bucket() + "/" + mpupload['Key'], mpupload['UploadId'])) |
1520 | 1520 |
except KeyError: |
1521 | 1521 |
pass |
1522 |
+ return EX_OK |
|
1522 | 1523 |
|
1523 | 1524 |
def cmd_abort_multipart(args): |
1524 | 1525 |
'''{"cmd":"abortmp", "label":"abort a multipart upload", "param":"s3://BUCKET Id", "func":cmd_abort_multipart, "argc":2},''' |
... | ... |
@@ -1528,6 +1572,7 @@ def cmd_abort_multipart(args): |
1528 | 1528 |
response = s3.abort_multipart(uri, id) |
1529 | 1529 |
debug(u"response - %s" % response['status']) |
1530 | 1530 |
output(u"%s" % uri) |
1531 |
+ return EX_OK |
|
1531 | 1532 |
|
1532 | 1533 |
def cmd_list_multipart(args): |
1533 | 1534 |
'''{"cmd":"abortmp", "label":"list a multipart upload", "param":"s3://BUCKET Id", "func":cmd_list_multipart, "argc":2},''' |
... | ... |
@@ -1544,6 +1589,7 @@ def cmd_list_multipart(args): |
1544 | 1544 |
output("%s\t%s\t%s\t%s" % (mpupload['LastModified'], mpupload['PartNumber'], mpupload['ETag'], mpupload['Size'])) |
1545 | 1545 |
except: |
1546 | 1546 |
pass |
1547 |
+ return EX_OK |
|
1547 | 1548 |
|
1548 | 1549 |
def cmd_accesslog(args): |
1549 | 1550 |
s3 = S3(cfg) |
... | ... |
@@ -1565,12 +1611,14 @@ def cmd_accesslog(args): |
1565 | 1565 |
if accesslog.isLoggingEnabled(): |
1566 | 1566 |
output(u" Target prefix: %s" % accesslog.targetPrefix().uri()) |
1567 | 1567 |
#output(u" Public Access: %s" % accesslog.isAclPublic()) |
1568 |
+ return EX_OK |
|
1568 | 1569 |
|
1569 | 1570 |
def cmd_sign(args): |
1570 | 1571 |
string_to_sign = args.pop() |
1571 | 1572 |
debug("string-to-sign: %r" % string_to_sign) |
1572 | 1573 |
signature = Utils.sign_string(string_to_sign) |
1573 | 1574 |
output("Signature: %s" % signature) |
1575 |
+ return EX_OK |
|
1574 | 1576 |
|
1575 | 1577 |
def cmd_signurl(args): |
1576 | 1578 |
expiry = args.pop() |
... | ... |
@@ -1580,6 +1628,7 @@ def cmd_signurl(args): |
1580 | 1580 |
debug("url to sign: %r" % url_to_sign) |
1581 | 1581 |
signed_url = Utils.sign_url(url_to_sign, expiry) |
1582 | 1582 |
output(signed_url) |
1583 |
+ return EX_OK |
|
1583 | 1584 |
|
1584 | 1585 |
def cmd_fixbucket(args): |
1585 | 1586 |
def _unescape(text): |
... | ... |
@@ -1647,6 +1696,7 @@ def cmd_fixbucket(args): |
1647 | 1647 |
warning("Fixed %d files' names. Their ACL were reset to Private." % count) |
1648 | 1648 |
warning("Use 's3cmd setacl --acl-public s3://...' to make") |
1649 | 1649 |
warning("them publicly readable if required.") |
1650 |
+ return EX_OK |
|
1650 | 1651 |
|
1651 | 1652 |
def resolve_list(lst, args): |
1652 | 1653 |
retval = [] |
... | ... |
@@ -1839,14 +1889,14 @@ def run_configure(config_file, args): |
1839 | 1839 |
|
1840 | 1840 |
except IOError, e: |
1841 | 1841 |
error(u"Writing config file failed: %s: %s" % (config_file, e.strerror)) |
1842 |
- sys.exit(1) |
|
1842 |
+ sys.exit(EX_IOERR) |
|
1843 | 1843 |
|
1844 | 1844 |
def process_patterns_from_file(fname, patterns_list): |
1845 | 1845 |
try: |
1846 | 1846 |
fn = open(fname, "rt") |
1847 | 1847 |
except IOError, e: |
1848 | 1848 |
error(e) |
1849 |
- sys.exit(1) |
|
1849 |
+ sys.exit(EX_IOERR) |
|
1850 | 1850 |
for pattern in fn: |
1851 | 1851 |
pattern = pattern.strip() |
1852 | 1852 |
if re.match("^#", pattern) or re.match("^\s*$", pattern): |
... | ... |
@@ -2162,7 +2212,7 @@ def main(): |
2162 | 2162 |
|
2163 | 2163 |
if options.show_version: |
2164 | 2164 |
output(u"s3cmd version %s" % PkgInfo.version) |
2165 |
- sys.exit(0) |
|
2165 |
+ sys.exit(EX_OK) |
|
2166 | 2166 |
|
2167 | 2167 |
if options.quiet: |
2168 | 2168 |
try: |
... | ... |
@@ -2175,7 +2225,7 @@ def main(): |
2175 | 2175 |
## Now finally parse the config file |
2176 | 2176 |
if not options.config: |
2177 | 2177 |
error(u"Can't find a config file. Please use --config option.") |
2178 |
- sys.exit(1) |
|
2178 |
+ sys.exit(EX_CONFIG) |
|
2179 | 2179 |
|
2180 | 2180 |
try: |
2181 | 2181 |
cfg = Config(options.config, options.access_key, options.secret_key) |
... | ... |
@@ -2186,7 +2236,7 @@ def main(): |
2186 | 2186 |
error(u"%s: %s" % (options.config, e.strerror)) |
2187 | 2187 |
error(u"Configuration file not available.") |
2188 | 2188 |
error(u"Consider using --configure parameter to create one.") |
2189 |
- sys.exit(1) |
|
2189 |
+ sys.exit(EX_CONFIG) |
|
2190 | 2190 |
|
2191 | 2191 |
# allow commandline verbosity config to override config file |
2192 | 2192 |
if options.verbosity is not None: |
... | ... |
@@ -2322,20 +2372,20 @@ def main(): |
2322 | 2322 |
if cfg.encrypt and cfg.gpg_passphrase == "": |
2323 | 2323 |
error(u"Encryption requested but no passphrase set in config file.") |
2324 | 2324 |
error(u"Please re-run 's3cmd --configure' and supply it.") |
2325 |
- sys.exit(1) |
|
2325 |
+ sys.exit(EX_CONFIG) |
|
2326 | 2326 |
|
2327 | 2327 |
if options.dump_config: |
2328 | 2328 |
cfg.dump_config(sys.stdout) |
2329 |
- sys.exit(0) |
|
2329 |
+ sys.exit(EX_OK) |
|
2330 | 2330 |
|
2331 | 2331 |
if options.run_configure: |
2332 | 2332 |
# 'args' may contain the test-bucket URI |
2333 | 2333 |
run_configure(options.config, args) |
2334 |
- sys.exit(0) |
|
2334 |
+ sys.exit(EX_OK) |
|
2335 | 2335 |
|
2336 | 2336 |
if len(args) < 1: |
2337 | 2337 |
optparser.print_help() |
2338 |
- sys.exit(1) |
|
2338 |
+ sys.exit(EX_USAGE) |
|
2339 | 2339 |
|
2340 | 2340 |
## Unicodise all remaining arguments: |
2341 | 2341 |
args = [unicodise(arg) for arg in args] |
... | ... |
@@ -2349,17 +2399,20 @@ def main(): |
2349 | 2349 |
cmd_func = commands[command]["func"] |
2350 | 2350 |
except KeyError, e: |
2351 | 2351 |
error(u"Invalid command: %s" % e) |
2352 |
- sys.exit(1) |
|
2352 |
+ sys.exit(EX_USAGE) |
|
2353 | 2353 |
|
2354 | 2354 |
if len(args) < commands[command]["argc"]: |
2355 | 2355 |
error(u"Not enough parameters for command '%s'" % command) |
2356 |
- sys.exit(1) |
|
2356 |
+ sys.exit(EX_USAGE) |
|
2357 | 2357 |
|
2358 | 2358 |
try: |
2359 |
- cmd_func(args) |
|
2359 |
+ rc = cmd_func(args) |
|
2360 |
+ if rc is None: # if we missed any cmd_*() returns |
|
2361 |
+ rc = EX_GENERAL |
|
2362 |
+ return rc |
|
2360 | 2363 |
except S3Error, e: |
2361 | 2364 |
error(u"S3 error: %s" % e) |
2362 |
- sys.exit(1) |
|
2365 |
+ sys.exit(EX_SOFTWARE) |
|
2363 | 2366 |
|
2364 | 2367 |
def report_exception(e, msg=''): |
2365 | 2368 |
sys.stderr.write(u""" |
... | ... |
@@ -2419,6 +2472,7 @@ if __name__ == '__main__': |
2419 | 2419 |
## Our modules |
2420 | 2420 |
## Keep them in try/except block to |
2421 | 2421 |
## detect any syntax errors in there |
2422 |
+ from S3.ExitCodes import * |
|
2422 | 2423 |
from S3.Exceptions import * |
2423 | 2424 |
from S3 import PkgInfo |
2424 | 2425 |
from S3.S3 import S3 |
... | ... |
@@ -2434,27 +2488,39 @@ if __name__ == '__main__': |
2434 | 2434 |
from S3.FileLists import * |
2435 | 2435 |
from S3.MultiPart import MultiPartUpload |
2436 | 2436 |
|
2437 |
- main() |
|
2438 |
- sys.exit(0) |
|
2437 |
+ rc = main() |
|
2438 |
+ sys.exit(rc) |
|
2439 | 2439 |
|
2440 | 2440 |
except ImportError, e: |
2441 | 2441 |
report_exception(e) |
2442 |
- sys.exit(1) |
|
2442 |
+ sys.exit(EX_GENERAL) |
|
2443 | 2443 |
|
2444 |
- except ParameterError, e: |
|
2444 |
+ except (ParameterError, InvalidFileError), e: |
|
2445 | 2445 |
error(u"Parameter problem: %s" % e) |
2446 |
- sys.exit(1) |
|
2446 |
+ sys.exit(EX_USAGE) |
|
2447 |
+ |
|
2448 |
+ except (S3DownloadError, S3UploadError, S3RequestError), e: |
|
2449 |
+ error(u"S3 Temporary Error: %s. Please try again later." % e) |
|
2450 |
+ sys.exit(EX_TEMPFAIL) |
|
2451 |
+ |
|
2452 |
+ except (S3Error, S3Exception, S3ResponseError, CloudFrontError), e: |
|
2453 |
+ report_exception(e) |
|
2454 |
+ sys.exit(EX_SOFTWARE) |
|
2447 | 2455 |
|
2448 | 2456 |
except SystemExit, e: |
2449 | 2457 |
sys.exit(e.code) |
2450 | 2458 |
|
2451 | 2459 |
except KeyboardInterrupt: |
2452 | 2460 |
sys.stderr.write("See ya!\n") |
2453 |
- sys.exit(1) |
|
2461 |
+ sys.exit(EX_BREAK) |
|
2462 |
+ |
|
2463 |
+ except IOError, e: |
|
2464 |
+ error(e) |
|
2465 |
+ sys.exit(EX_IOERR) |
|
2454 | 2466 |
|
2455 |
- except (OSError, IOError), e: |
|
2467 |
+ except OSError, e: |
|
2456 | 2468 |
error(e) |
2457 |
- sys.exit(1) |
|
2469 |
+ sys.exit(EX_OSERR) |
|
2458 | 2470 |
|
2459 | 2471 |
except MemoryError: |
2460 | 2472 |
msg = """ |
... | ... |
@@ -2465,7 +2531,7 @@ The solutions to this are: |
2465 | 2465 |
2) use a 64-bit python on a 64-bit OS with >8GB RAM |
2466 | 2466 |
""" |
2467 | 2467 |
sys.stderr.write(msg) |
2468 |
- sys.exit(1) |
|
2468 |
+ sys.exit(EX_OSERR) |
|
2469 | 2469 |
|
2470 | 2470 |
except UnicodeEncodeError, e: |
2471 | 2471 |
lang = os.getenv("LANG") |
... | ... |
@@ -2476,10 +2542,10 @@ Please set LANG=en_US.UTF-8 or similar in your environment before |
2476 | 2476 |
invoking s3cmd. |
2477 | 2477 |
""" % lang |
2478 | 2478 |
report_exception(e, msg) |
2479 |
- sys.exit(1) |
|
2479 |
+ sys.exit(EX_GENERAL) |
|
2480 | 2480 |
|
2481 | 2481 |
except Exception, e: |
2482 | 2482 |
report_exception(e) |
2483 |
- sys.exit(1) |
|
2483 |
+ sys.exit(EX_GENERAL) |
|
2484 | 2484 |
|
2485 | 2485 |
# vim:et:ts=4:sts=4:ai |