s3cmd
12bc8cc8
 #!/usr/bin/env python2
ddbd1836
 # -*- coding: utf-8 -*-
3cc025ae
 
a97e290c
 ## --------------------------------------------------------------------
 ## s3cmd - S3 client
 ##
 ## Authors   : Michal Ludvig and contributors
 ## Copyright : TGRMN Software - http://www.tgrmn.com - and contributors
 ## Website   : http://s3tools.org
 ## License   : GPL Version 2
 ## --------------------------------------------------------------------
 ## This program is free software; you can redistribute it and/or modify
 ## it under the terms of the GNU General Public License as published by
 ## the Free Software Foundation; either version 2 of the License, or
 ## (at your option) any later version.
 ## This program is distributed in the hope that it will be useful,
 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 ## GNU General Public License for more details.
 ## --------------------------------------------------------------------
3cc025ae
 
6eae2c60
 from __future__ import absolute_import, print_function, division
08732e61
 
3cc025ae
 import sys
5c805fd7
 
e5edb9cc
 if float("%d.%d" %(sys.version_info[0], sys.version_info[1])) < 2.6:
     sys.stderr.write(u"ERROR: Python 2.6 or higher required, sorry.\n")
8214d4f0
     sys.exit(EX_OSFILE)
5c805fd7
 
3cc025ae
 import logging
 import time
8a4a98b1
 import os
 import re
ac9940ec
 import errno
2d7d5543
 import glob
4a52baa8
 import traceback
4da602a5
 import codecs
315e527b
 import locale
e3afa96a
 import subprocess
08732e61
 try:
     import htmlentitydefs
 except:
4f64f4f9
     # python 3 support
08732e61
     import html.entities as htmlentitydefs
4f64f4f9
 
be7c4de6
 try:
     unicode
 except NameError:
     # python 3 support
     # In python 3, unicode -> str, and str -> bytes
     unicode = str
 
0222ce4f
 import socket
ddb5ef90
 import shutil
3f311f98
 import tempfile
3cc025ae
 
9b7618ae
 from copy import copy
f4555c39
 from optparse import OptionParser, Option, OptionValueError, IndentedHelpFormatter
3cc025ae
 from logging import debug, info, warning, error
49731b40
 from distutils.spawn import find_executable
3cc025ae
 
26186ec4
 from ssl import SSLError
7d496da2
 import io
26186ec4
 
3cc025ae
 def output(message):
d439efb4
     sys.stdout.write(message + "\n")
bff0ccda
     sys.stdout.flush()
3cc025ae
 
7c07fd66
 def check_args_type(args, type, verbose_type):
8c13a108
     """NOTE: This function looks like to not be used."""
d439efb4
     for arg in args:
         if S3Uri(arg).type != type:
             raise ParameterError("Expecting %s instead of '%s'" % (verbose_type, arg))
7c07fd66
 
b96ddebe
 def cmd_du(args):
d439efb4
     s3 = S3(Config())
     if len(args) > 0:
         uri = S3Uri(args[0])
         if uri.type == "s3" and uri.has_bucket():
             subcmd_bucket_usage(s3, uri)
8f35b18a
             return EX_OK
d439efb4
     subcmd_bucket_usage_all(s3)
8f35b18a
     return EX_OK
b96ddebe
 
 def subcmd_bucket_usage_all(s3):
8f35b18a
     """
     Returns: sum of bucket sizes as integer
     Raises: S3Error
     """
d439efb4
     response = s3.list_all_buckets()
 
     buckets_size = 0
     for bucket in response["list"]:
         size = subcmd_bucket_usage(s3, S3Uri("s3://" + bucket["Name"]))
         if size != None:
             buckets_size += size
     total_size, size_coeff = formatSize(buckets_size, Config().human_readable_sizes)
     total_size_str = str(total_size) + size_coeff
     output(u"".rjust(8, "-"))
     output(u"%s Total" % (total_size_str.ljust(8)))
8f35b18a
     return size
b96ddebe
 
 def subcmd_bucket_usage(s3, uri):
8f35b18a
     """
     Returns: bucket size as integer
     Raises: S3Error
     """
d439efb4
     bucket_size = 0
05aa446c
     object_count = 0
90737497
     extra_info = u''
0097058f
 
     bucket = uri.bucket()
     prefix = uri.object()
6cc5f2a1
     try:
0097058f
         for _, _, objects in s3.bucket_list_streaming(bucket, prefix=prefix, recursive=True):
05aa446c
             for obj in objects:
6cc5f2a1
                 bucket_size += int(obj["Size"])
05aa446c
                 object_count += 1
444ee63c
 
08732e61
     except S3Error as e:
be9ec739
         if e.info["Code"] in S3.codes:
90737497
             error(S3.codes[e.info["Code"]] % bucket)
6cc5f2a1
         raise
444ee63c
 
08732e61
     except KeyboardInterrupt as e:
90737497
         extra_info = u' [interrupted]'
444ee63c
 
d439efb4
     total_size, size_coeff = formatSize(bucket_size, Config().human_readable_sizes)
     total_size_str = str(total_size) + size_coeff
05aa446c
     output(u"%s %s objects %s%s" % (total_size_str.ljust(8), object_count, uri, extra_info))
d439efb4
     return bucket_size
b96ddebe
 
9081133d
 def cmd_ls(args):
d439efb4
     s3 = S3(Config())
     if len(args) > 0:
         uri = S3Uri(args[0])
         if uri.type == "s3" and uri.has_bucket():
76b5ea90
             subcmd_bucket_list(s3, uri, cfg.limit)
8f35b18a
             return EX_OK
2c4459ca
 
     # If not a s3 type uri or no bucket was provided, list all the buckets
     subcmd_all_buckets_list(s3)
8f35b18a
     return EX_OK
3cc025ae
 
2c4459ca
 def subcmd_all_buckets_list(s3):
 
     response = s3.list_all_buckets()
 
88916e99
     for bucket in sorted(response["list"], key=lambda b:b["Name"]):
2c4459ca
         output(u"%s  s3://%s" % (formatDateTime(bucket["CreationDate"]),
                                  bucket["Name"]))
 
 def cmd_all_buckets_list_all_content(args):
d439efb4
     s3 = S3(Config())
b819c70c
 
d439efb4
     response = s3.list_all_buckets()
3cc025ae
 
d439efb4
     for bucket in response["list"]:
76b5ea90
         subcmd_bucket_list(s3, S3Uri("s3://" + bucket["Name"]), cfg.limit)
d439efb4
         output(u"")
8f35b18a
     return EX_OK
3cc025ae
 
76b5ea90
 def subcmd_bucket_list(s3, uri, limit):
d439efb4
     bucket = uri.bucket()
     prefix = uri.object()
 
     debug(u"Bucket 's3://%s':" % bucket)
     if prefix.endswith('*'):
         prefix = prefix[:-1]
     try:
76b5ea90
         response = s3.bucket_list(bucket, prefix = prefix, limit = limit)
08732e61
     except S3Error as e:
be9ec739
         if e.info["Code"] in S3.codes:
d439efb4
             error(S3.codes[e.info["Code"]] % bucket)
8f35b18a
         raise
d439efb4
 
d5d8424c
     if cfg.long_listing:
         format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(md5)32s  %(storageclass)s  %(uri)s"
     elif cfg.list_md5:
d439efb4
         format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(md5)32s  %(uri)s"
     else:
         format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(uri)s"
 
     for prefix in response['common_prefixes']:
         output(format_string % {
             "timestamp": "",
             "size": "DIR",
             "coeff": "",
             "md5": "",
865aefb2
             "storageclass": "",
d439efb4
             "uri": uri.compose_uri(bucket, prefix["Prefix"])})
 
     for object in response["list"]:
9b60604b
         md5 = object.get('ETag', '').strip('"\'')
d5d8424c
         storageclass = object.get('StorageClass','')
865aefb2
 
5fc2bbcc
         if cfg.list_md5:
dd1e4d52
             if '-' in md5: # need to get md5 from the object
5fc2bbcc
                 object_uri = uri.compose_uri(bucket, object["Key"])
                 info_response = s3.object_info(S3Uri(object_uri))
                 try:
                     md5 = info_response['s3cmd-attrs']['md5']
                 except KeyError:
                     pass
 
d439efb4
         size, size_coeff = formatSize(object["Size"], Config().human_readable_sizes)
         output(format_string % {
             "timestamp": formatDateTime(object["LastModified"]),
             "size" : str(size),
             "coeff": size_coeff,
5fc2bbcc
             "md5" : md5,
865aefb2
             "storageclass" : storageclass,
d439efb4
             "uri": uri.compose_uri(bucket, object["Key"]),
             })
3cc025ae
 
76b5ea90
     if response["truncated"]:
         warning(u"The list is truncated because the settings limit was reached.")
 
3cc025ae
 def cmd_bucket_create(args):
d439efb4
     s3 = S3(Config())
     for arg in args:
         uri = S3Uri(arg)
         if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
             raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
         try:
             response = s3.bucket_create(uri.bucket(), cfg.bucket_location)
             output(u"Bucket '%s' created" % uri.uri())
08732e61
         except S3Error as e:
be9ec739
             if e.info["Code"] in S3.codes:
d439efb4
                 error(S3.codes[e.info["Code"]] % uri.bucket())
8f35b18a
             raise
     return EX_OK
3cc025ae
 
3bf7d0c3
 def cmd_website_info(args):
d439efb4
     s3 = S3(Config())
     for arg in args:
         uri = S3Uri(arg)
         if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
             raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
         try:
             response = s3.website_info(uri, cfg.bucket_location)
             if response:
                 output(u"Bucket %s: Website configuration" % uri.uri())
                 output(u"Website endpoint: %s" % response['website_endpoint'])
                 output(u"Index document:   %s" % response['index_document'])
                 output(u"Error document:   %s" % response['error_document'])
             else:
                 output(u"Bucket %s: Unable to receive website configuration." % (uri.uri()))
08732e61
         except S3Error as e:
be9ec739
             if e.info["Code"] in S3.codes:
d439efb4
                 error(S3.codes[e.info["Code"]] % uri.bucket())
8f35b18a
             raise
     return EX_OK
cde72d48
 
 def cmd_website_create(args):
d439efb4
     s3 = S3(Config())
     for arg in args:
         uri = S3Uri(arg)
         if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
             raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
         try:
             response = s3.website_create(uri, cfg.bucket_location)
             output(u"Bucket '%s': website configuration created." % (uri.uri()))
08732e61
         except S3Error as e:
be9ec739
             if e.info["Code"] in S3.codes:
d439efb4
                 error(S3.codes[e.info["Code"]] % uri.bucket())
8f35b18a
             raise
     return EX_OK
cde72d48
 
 def cmd_website_delete(args):
d439efb4
     s3 = S3(Config())
     for arg in args:
         uri = S3Uri(arg)
         if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
             raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
         try:
             response = s3.website_delete(uri, cfg.bucket_location)
             output(u"Bucket '%s': website configuration deleted." % (uri.uri()))
08732e61
         except S3Error as e:
be9ec739
             if e.info["Code"] in S3.codes:
d439efb4
                 error(S3.codes[e.info["Code"]] % uri.bucket())
8f35b18a
             raise
     return EX_OK
cde72d48
 
2f39a8d3
 def cmd_expiration_set(args):
     s3 = S3(Config())
     for arg in args:
         uri = S3Uri(arg)
         if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
             raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
         try:
             response = s3.expiration_set(uri, cfg.bucket_location)
             if response["status"] is 200:
                 output(u"Bucket '%s': expiration configuration is set." % (uri.uri()))
             elif response["status"] is 204:
                 output(u"Bucket '%s': expiration configuration is deleted." % (uri.uri()))
08732e61
         except S3Error as e:
be9ec739
             if e.info["Code"] in S3.codes:
2f39a8d3
                 error(S3.codes[e.info["Code"]] % uri.bucket())
8f35b18a
             raise
     return EX_OK
2f39a8d3
 
3cc025ae
 def cmd_bucket_delete(args):
cf530bcb
     def _bucket_delete_one(uri, retry=True):
d439efb4
         try:
             response = s3.bucket_delete(uri.bucket())
e0dfb66a
             output(u"Bucket '%s' removed" % uri.uri())
08732e61
         except S3Error as e:
8f35b18a
             if e.info['Code'] == 'NoSuchBucket':
                 if cfg.force:
                     return EX_OK
                 else:
30f1914e
                     raise
cf530bcb
             if e.info['Code'] == 'BucketNotEmpty' and retry and (cfg.force or cfg.recursive):
d439efb4
                 warning(u"Bucket is not empty. Removing all the objects from it first. This may take some time...")
8f35b18a
                 rc = subcmd_batch_del(uri_str = uri.uri())
                 if rc == EX_OK:
cf530bcb
                     return _bucket_delete_one(uri, False)
e0dfb66a
                 else:
                     output(u"Bucket was not removed")
be9ec739
             elif e.info["Code"] in S3.codes:
d439efb4
                 error(S3.codes[e.info["Code"]] % uri.bucket())
8f35b18a
             raise
         return EX_OK
d439efb4
 
     s3 = S3(Config())
     for arg in args:
         uri = S3Uri(arg)
         if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
             raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
8f35b18a
         rc = _bucket_delete_one(uri)
         if rc != EX_OK:
             return rc
     return EX_OK
f4555c39
 
3cc025ae
 def cmd_object_put(args):
d439efb4
     cfg = Config()
     s3 = S3(cfg)
 
     if len(args) == 0:
         raise ParameterError("Nothing to upload. Expecting a local file or directory and a S3 URI destination.")
 
     ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
     destination_base_uri = S3Uri(args.pop())
     if destination_base_uri.type != 's3':
         raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
8c13a108
     destination_base = destination_base_uri.uri()
d439efb4
 
     if len(args) == 0:
         raise ParameterError("Nothing to upload. Expecting a local file or directory.")
 
8c69a65d
     local_list, single_file_local, exclude_list, total_size_local = fetch_local_list(args, is_src = True)
d439efb4
 
     local_count = len(local_list)
 
     info(u"Summary: %d local files to upload" % local_count)
 
db776e0c
     if local_count == 0:
         raise ParameterError("Nothing to upload.")
 
d439efb4
     if local_count > 0:
9b094718
         if not single_file_local and '-' in local_list.keys():
             raise ParameterError("Cannot specify multiple local files if uploading from '-' (ie stdin)")
490cca09
         elif single_file_local and local_list.keys()[0] == "-" and destination_base.endswith("/"):
             raise ParameterError("Destination S3 URI must not end with '/' when uploading from stdin.")
         elif not destination_base.endswith("/"):
d439efb4
             if not single_file_local:
                 raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
ca9880fa
             local_list[local_list.keys()[0]]['remote_uri'] = destination_base
d439efb4
         else:
             for key in local_list:
ca9880fa
                 local_list[key]['remote_uri'] = destination_base + key
d439efb4
 
     if cfg.dry_run:
         for key in exclude_list:
ca9880fa
             output(u"exclude: %s" % key)
d439efb4
         for key in local_list:
6012d1bd
             if key != "-":
ca9880fa
                 nicekey = local_list[key]['full_name']
6012d1bd
             else:
                 nicekey = "<stdin>"
a4387cd2
             output(u"upload: '%s' -> '%s'" % (nicekey, local_list[key]['remote_uri']))
d439efb4
 
53750b94
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return EX_OK
d439efb4
 
     seq = 0
e60e3867
     ret = EX_OK
d439efb4
     for key in local_list:
         seq += 1
 
         uri_final = S3Uri(local_list[key]['remote_uri'])
 
         extra_headers = copy(cfg.extra_headers)
         full_name_orig = local_list[key]['full_name']
         full_name = full_name_orig
         seq_label = "[%d of %d]" % (seq, local_count)
         if Config().encrypt:
8214d4f0
             gpg_exitcode, full_name, extra_headers["x-amz-meta-s3tools-gpgenc"] = gpg_encrypt(full_name_orig)
6a5fe7d7
         attr_header = _build_attr_header(local_list, key)
         debug(u"attr_header: %s" % attr_header)
         extra_headers.update(attr_header)
d439efb4
         try:
             response = s3.object_put(full_name, uri_final, extra_headers, extra_label = seq_label)
08732e61
         except S3UploadError as exc:
69bb35df
             error(u"Upload of '%s' failed too many times (Last reason: %s)" % (full_name_orig, exc))
69628e71
             if cfg.stop_on_error:
519ddbda
                 ret = EX_DATAERR
69bb35df
                 error(u"Exiting now because of --stop-on-error")
69628e71
                 break
47fdbcc2
             ret = EX_PARTIAL
d439efb4
             continue
08732e61
         except InvalidFileError as exc:
69bb35df
             error(u"Upload of '%s' is not possible (Reason: %s)" % (full_name_orig, exc))
b6a65cc8
             ret = EX_PARTIAL
69628e71
             if cfg.stop_on_error:
519ddbda
                 ret = EX_OSFILE
69bb35df
                 error(u"Exiting now because of --stop-on-error")
69628e71
                 break
d439efb4
             continue
dc071cc1
         if response is not None:
             speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
             if not Config().progress_meter:
69bb35df
                 if full_name_orig != "-":
                     nicekey = full_name_orig
                 else:
                     nicekey = "<stdin>"
a4387cd2
                 output(u"upload: '%s' -> '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
69bb35df
                        (nicekey, uri_final, response["size"], response["elapsed"],
dc071cc1
                         speed_fmt[0], speed_fmt[1], seq_label))
d439efb4
         if Config().acl_public:
             output(u"Public URL of the object is: %s" %
dc071cc1
                    (uri_final.public_url()))
d439efb4
         if Config().encrypt and full_name != full_name_orig:
ca9880fa
             debug(u"Removing temporary encrypted file: %s" % full_name)
8c13a108
             os.remove(deunicodise(full_name))
e60e3867
     return ret
3cc025ae
 
 def cmd_object_get(args):
d439efb4
     cfg = Config()
     s3 = S3(cfg)
 
     ## Check arguments:
     ## if not --recursive:
     ##   - first N arguments must be S3Uri
     ##   - if the last one is S3 make current dir the destination_base
     ##   - if the last one is a directory:
     ##       - take all 'basenames' of the remote objects and
     ##         make the destination name be 'destination_base'+'basename'
     ##   - if the last one is a file or not existing:
     ##       - if the number of sources (N, above) == 1 treat it
     ##         as a filename and save the object there.
     ##       - if there's more sources -> Error
     ## if --recursive:
     ##   - first N arguments must be S3Uri
     ##       - for each Uri get a list of remote objects with that Uri as a prefix
     ##       - apply exclude/include rules
     ##       - each list item will have MD5sum, Timestamp and pointer to S3Uri
     ##         used as a prefix.
2320b454
     ##   - the last arg may be '-' (stdout)
d439efb4
     ##   - the last arg may be a local directory - destination_base
     ##   - if the last one is S3 make current dir the destination_base
     ##   - if the last one doesn't exist check remote list:
     ##       - if there is only one item and its_prefix==its_name
     ##         download that item to the name given in last arg.
     ##       - if there are more remote items use the last arg as a destination_base
     ##         and try to create the directory (incl. all parents).
     ##
     ## In both cases we end up with a list mapping remote object names (keys) to local file names.
 
     ## Each item will be a dict with the following attributes
     # {'remote_uri', 'local_filename'}
     download_list = []
 
     if len(args) == 0:
         raise ParameterError("Nothing to download. Expecting S3 URI.")
 
     if S3Uri(args[-1]).type == 'file':
         destination_base = args.pop()
     else:
         destination_base = "."
 
     if len(args) == 0:
         raise ParameterError("Nothing to download. Expecting S3 URI.")
 
8c69a65d
     remote_list, exclude_list, remote_total_size = fetch_remote_list(args, require_attribs = False)
d439efb4
 
     remote_count = len(remote_list)
 
     info(u"Summary: %d remote files to download" % remote_count)
 
     if remote_count > 0:
2320b454
         if destination_base == "-":
             ## stdout is ok for multiple remote files!
             for key in remote_list:
                 remote_list[key]['local_filename'] = "-"
8c13a108
         elif not os.path.isdir(deunicodise(destination_base)):
2320b454
             ## We were either given a file name (existing or not)
d439efb4
             if remote_count > 1:
2320b454
                 raise ParameterError("Destination must be a directory or stdout when downloading multiple sources.")
ca9880fa
             remote_list[remote_list.keys()[0]]['local_filename'] = destination_base
8c13a108
         elif os.path.isdir(deunicodise(destination_base)):
d439efb4
             if destination_base[-1] != os.path.sep:
                 destination_base += os.path.sep
             for key in remote_list:
06119caa
                 local_filename = destination_base + key
                 if os.path.sep != "/":
                     local_filename = os.path.sep.join(local_filename.split("/"))
ca9880fa
                 remote_list[key]['local_filename'] = local_filename
d439efb4
         else:
             raise InternalError("WTF? Is it a dir or not? -- %s" % destination_base)
 
     if cfg.dry_run:
         for key in exclude_list:
ca9880fa
             output(u"exclude: %s" % key)
d439efb4
         for key in remote_list:
a4387cd2
             output(u"download: '%s' -> '%s'" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
d439efb4
 
53750b94
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return EX_OK
d439efb4
 
     seq = 0
69628e71
     ret = EX_OK
d439efb4
     for key in remote_list:
         seq += 1
         item = remote_list[key]
         uri = S3Uri(item['object_uri_str'])
         ## Encode / Decode destination with "replace" to make sure it's compatible with current encoding
         destination = unicodise_safe(item['local_filename'])
         seq_label = "[%d of %d]" % (seq, remote_count)
 
         start_position = 0
 
         if destination == "-":
             ## stdout
7d496da2
             dst_stream = io.open(sys.__stdout__.fileno(), mode='wb', closefd=False)
             dst_stream.stream_name = u'<stdout>'
d370008b
             file_exists = True
d439efb4
         else:
             ## File
             try:
8c13a108
                 file_exists = os.path.exists(deunicodise(destination))
d439efb4
                 try:
7d496da2
                     dst_stream = io.open(deunicodise(destination), mode='ab')
                     dst_stream.stream_name = destination
08732e61
                 except IOError as e:
d439efb4
                     if e.errno == errno.ENOENT:
                         basename = destination[:destination.rindex(os.path.sep)]
                         info(u"Creating directory: %s" % basename)
8c13a108
                         os.makedirs(deunicodise(basename))
7d496da2
                         dst_stream = io.open(deunicodise(destination), mode='ab')
                         dst_stream.stream_name = destination
d439efb4
                     else:
                         raise
                 if file_exists:
                     if Config().get_continue:
                         start_position = dst_stream.tell()
                     elif Config().force:
08732e61
                         start_position = 0
                         dst_stream.seek(0)
d439efb4
                         dst_stream.truncate()
                     elif Config().skip_existing:
                         info(u"Skipping over existing file: %s" % (destination))
                         continue
                     else:
                         dst_stream.close()
                         raise ParameterError(u"File %s already exists. Use either of --force / --continue / --skip-existing or give it a new name." % destination)
08732e61
             except IOError as e:
d439efb4
                 error(u"Skipping %s: %s" % (destination, e.strerror))
                 continue
1a051563
         try:
7d496da2
             try:
                 response = s3.object_get(uri, dst_stream, destination, start_position = start_position, extra_label = seq_label)
             finally:
                 dst_stream.close()
08732e61
         except S3DownloadError as e:
590b1721
             error(u"%s: Skipping that file.  This is usually a transient error, please try again later." % e)
             if not file_exists: # Delete, only if file didn't exist before!
                 debug(u"object_get failed for '%s', deleting..." % (destination,))
8c13a108
                 os.unlink(deunicodise(destination))
69628e71
                 ret = EX_PARTIAL
                 if cfg.stop_on_error:
519ddbda
                     ret = EX_DATAERR
69628e71
                     break
590b1721
             continue
08732e61
         except S3Error as e:
1a051563
             if not file_exists: # Delete, only if file didn't exist before!
                 debug(u"object_get failed for '%s', deleting..." % (destination,))
8c13a108
                 os.unlink(deunicodise(destination))
70bc13df
             raise
1a051563
 
be9ec739
         if "x-amz-meta-s3tools-gpgenc" in response["headers"]:
d439efb4
             gpg_decrypt(destination, response["headers"]["x-amz-meta-s3tools-gpgenc"])
8c13a108
             response["size"] = os.stat(deunicodise(destination))[6]
be9ec739
         if "last-modified" in response["headers"] and destination != "-":
e8bf4d2a
             last_modified = time.mktime(time.strptime(response["headers"]["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
8c13a108
             os.utime(deunicodise(destination), (last_modified, last_modified))
e8bf4d2a
             debug("set mtime to %s" % last_modified)
d439efb4
         if not Config().progress_meter and destination != "-":
             speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
a4387cd2
             output(u"download: '%s' -> '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s)" %
d439efb4
                 (uri, destination, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1]))
552df705
         if Config().delete_after_fetch:
             s3.object_delete(uri)
a4387cd2
             output(u"File '%s' removed after fetch" % (uri))
8f35b18a
     return EX_OK
3cc025ae
 
 def cmd_object_del(args):
38d4f958
     recursive = Config().recursive
d439efb4
     for uri_str in args:
         uri = S3Uri(uri_str)
         if uri.type != "s3":
             raise ParameterError("Expecting S3 URI instead of '%s'" % uri_str)
         if not uri.has_object():
38d4f958
             if recursive and not Config().force:
d439efb4
                 raise ParameterError("Please use --force to delete ALL contents of %s" % uri_str)
38d4f958
             elif not recursive:
d439efb4
                 raise ParameterError("File name required, not only the bucket name. Alternatively use --recursive")
38d4f958
 
         if not recursive:
8f35b18a
             rc = subcmd_object_del_uri(uri_str)
282e7c6c
         elif Config().exclude or Config().include or cfg.max_delete > 0:
9c8b4f80
             # subcmd_batch_del_iterative does not support file exclusion and can't
             # accurately know how many total files will be deleted, so revert to batch delete.
8f35b18a
             rc = subcmd_batch_del(uri_str = uri_str)
38d4f958
         else:
6cc5f2a1
             rc = subcmd_batch_del_iterative(uri_str = uri_str)
8f35b18a
         if not rc:
             return rc
     return EX_OK
1ae39a8d
 
6cc5f2a1
 def subcmd_batch_del_iterative(uri_str = None, bucket = None):
9c8b4f80
     """ Streaming version of batch deletion (doesn't realize whole list in memory before deleting).
f879df5b
 
     Differences from subcmd_batch_del:
9c8b4f80
       - Does not obey --exclude directives or obey cfg.max_delete (use subcmd_batch_del in those cases)
f879df5b
     """
6cc5f2a1
     if bucket and uri_str:
         raise ValueError("Pass only one of uri_str or bucket")
     if bucket: # bucket specified
         uri_str = "s3://%s" % bucket
     s3 = S3(cfg)
     uri = S3Uri(uri_str)
     bucket = uri.bucket()
 
05aa446c
     deleted_bytes = deleted_count = 0
6cc5f2a1
 
1da3ac5f
     for _, _, to_delete in s3.bucket_list_streaming(bucket, prefix=uri.object(), recursive=True):
d2b07130
         if not to_delete:
             continue
6cc5f2a1
         if not cfg.dry_run:
             response = s3.object_batch_delete_uri_strs([uri.compose_uri(bucket, item['Key']) for item in to_delete])
05aa446c
         deleted_bytes += sum(int(item["Size"]) for item in to_delete)
         deleted_count += len(to_delete)
a4387cd2
         output('\n'.join(u"delete: '%s'" % uri.compose_uri(bucket, p['Key']) for p in to_delete))
4a68d9e1
 
d2b07130
     if deleted_count:
07906929
         # display summary data of deleted files
d2b07130
         if cfg.stats:
             stats_info = StatsInfo()
             stats_info.files_deleted = deleted_count
             stats_info.size_deleted = deleted_bytes
             output(stats_info.format_output())
         else:
             total_size, size_coeff = formatSize(deleted_bytes, Config().human_readable_sizes)
             total_size_str = str(total_size) + size_coeff
             info(u"Deleted %s objects (%s) from %s" % (deleted_count, total_size_str, uri))
     else:
         warning(u"Remote list is empty.")
f879df5b
 
     return EX_OK
6cc5f2a1
 
e0dfb66a
 def subcmd_batch_del(uri_str = None, bucket = None, remote_list = None):
8f35b18a
     """
     Returns: EX_OK
     Raises: ValueError
     """
 
6f91f384
     def _batch_del(remote_list):
         s3 = S3(cfg)
         to_delete = remote_list[:1000]
         remote_list = remote_list[1000:]
         while len(to_delete):
             debug(u"Batch delete %d, remaining %d" % (len(to_delete), len(remote_list)))
             if not cfg.dry_run:
                 response = s3.object_batch_delete(to_delete)
a4387cd2
             output('\n'.join((u"delete: '%s'" % to_delete[p]['object_uri_str']) for p in to_delete))
6f91f384
             to_delete = remote_list[:1000]
             remote_list = remote_list[1000:]
e0dfb66a
 
9cb4c8e2
     if remote_list is not None and len(remote_list) == 0:
         return False
 
e0dfb66a
     if len([item for item in [uri_str, bucket, remote_list] if item]) != 1:
         raise ValueError("One and only one of 'uri_str', 'bucket', 'remote_list' can be specified.")
 
9cb4c8e2
     if bucket: # bucket specified
e0dfb66a
         uri_str = "s3://%s" % bucket
9cb4c8e2
     if remote_list is None: # uri_str specified
8c69a65d
         remote_list, exclude_list, remote_total_size = fetch_remote_list(uri_str, require_attribs = False)
7d5f6fe9
 
e0dfb66a
     if len(remote_list) == 0:
         warning(u"Remote list is empty.")
8f35b18a
         return EX_OK
e0dfb66a
 
     if cfg.max_delete > 0 and len(remote_list) > cfg.max_delete:
         warning(u"delete: maximum requested number of deletes would be exceeded, none performed.")
8f35b18a
         return EX_OK
e0dfb66a
 
6f91f384
     _batch_del(remote_list)
0880510d
 
e0dfb66a
     if cfg.dry_run:
         warning(u"Exiting now because of --dry-run")
8f35b18a
     return EX_OK
e0dfb66a
 
1ae39a8d
 def subcmd_object_del_uri(uri_str, recursive = None):
8f35b18a
     """
     Returns: True if XXX, False if XXX
     Raises: ValueError
     """
d439efb4
     s3 = S3(cfg)
7406fc6c
 
d439efb4
     if recursive is None:
         recursive = cfg.recursive
1ae39a8d
 
8c69a65d
     remote_list, exclude_list, remote_total_size = fetch_remote_list(uri_str, require_attribs = False, recursive = recursive)
1ae39a8d
 
d439efb4
     remote_count = len(remote_list)
1ae39a8d
 
d439efb4
     info(u"Summary: %d remote files to delete" % remote_count)
f230f799
     if cfg.max_delete > 0 and remote_count > cfg.max_delete:
         warning(u"delete: maximum requested number of deletes would be exceeded, none performed.")
8f35b18a
         return False
1ae39a8d
 
d439efb4
     if cfg.dry_run:
         for key in exclude_list:
ca9880fa
             output(u"exclude: %s" % key)
d439efb4
         for key in remote_list:
             output(u"delete: %s" % remote_list[key]['object_uri_str'])
1ae39a8d
 
53750b94
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return True
1ae39a8d
 
d439efb4
     for key in remote_list:
         item = remote_list[key]
         response = s3.object_delete(S3Uri(item['object_uri_str']))
a4387cd2
         output(u"delete: '%s'" % item['object_uri_str'])
8f35b18a
     return True
6e719bb5
 
40deabb4
 def cmd_object_restore(args):
     s3 = S3(cfg)
6e719bb5
 
40deabb4
     if cfg.restore_days < 1:
         raise ParameterError("You must restore a file for 1 or more days")
 
27d294f3
     # accept case-insensitive argument but fix it to match S3 API
     if cfg.restore_priority.title() not in ['Standard', 'Expedited', 'Bulk']:
         raise ParameterError("Valid restoration priorities: bulk, standard, expedited")
     else:
         cfg.restore_priority = cfg.restore_priority.title()
 
8c69a65d
     remote_list, exclude_list, remote_total_size = fetch_remote_list(args, require_attribs = False, recursive = cfg.recursive)
40deabb4
 
     remote_count = len(remote_list)
 
27d294f3
     info(u"Summary: Restoring %d remote files for %d days at %s priority" % (remote_count, cfg.restore_days, cfg.restore_priority))
40deabb4
 
     if cfg.dry_run:
         for key in exclude_list:
ca9880fa
             output(u"exclude: %s" % key)
40deabb4
         for key in remote_list:
a4387cd2
             output(u"restore: '%s'" % remote_list[key]['object_uri_str'])
40deabb4
 
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return EX_OK
40deabb4
 
     for key in remote_list:
         item = remote_list[key]
6e719bb5
 
40deabb4
         uri = S3Uri(item['object_uri_str'])
         if not item['object_uri_str'].endswith("/"):
4b751310
             try:
                 response = s3.object_restore(S3Uri(item['object_uri_str']))
                 output(u"restore: '%s'" % item['object_uri_str'])
08732e61
             except S3Error as e:
4b751310
                 if e.code == "RestoreAlreadyInProgress":
                     warning("%s: %s" % (e.message, item['object_uri_str']))
                 else:
                     raise e
40deabb4
         else:
             debug(u"Skipping directory since only files may be restored")
8f35b18a
     return EX_OK
6e719bb5
 
3cc025ae
 
e0b946c0
 def subcmd_cp_mv(args, process_fce, action_str, message):
5b2539b0
     if action_str != 'modify' and len(args) < 2:
d439efb4
         raise ParameterError("Expecting two or more S3 URIs for " + action_str)
5b2539b0
     if action_str == 'modify' and len(args) < 1:
         raise ParameterError("Expecting one or more S3 URIs for " + action_str)
     if action_str != 'modify':
         dst_base_uri = S3Uri(args.pop())
     else:
         dst_base_uri = S3Uri(args[-1])
 
38c346fb
     scoreboard = ExitScoreboard()
d439efb4
     if dst_base_uri.type != "s3":
         raise ParameterError("Destination must be S3 URI. To download a file use 'get' or 'sync'.")
     destination_base = dst_base_uri.uri()
 
8c69a65d
     remote_list, exclude_list, remote_total_size = fetch_remote_list(args, require_attribs = False)
d439efb4
 
     remote_count = len(remote_list)
 
     info(u"Summary: %d remote files to %s" % (remote_count, action_str))
 
4def8a48
     if not destination_base.endswith('/'):
         # Trying to mv dir1/ to dir2 will not pass a test in S3.FileLists,
         # so we don't need to test for it here.
         if len(remote_list) > 1 or cfg.recursive:
d293b507
            raise ParameterError("Destination must be a directory and end with '/' when acting on a folder content or on multiple sources.")
4def8a48
 
d439efb4
     if cfg.recursive:
         for key in remote_list:
             remote_list[key]['dest_name'] = destination_base + key
     else:
af984ba9
         for key in remote_list:
             if destination_base.endswith("/"):
                 remote_list[key]['dest_name'] = destination_base + key
             else:
                 remote_list[key]['dest_name'] = destination_base
d439efb4
 
     if cfg.dry_run:
         for key in exclude_list:
ca9880fa
             output(u"exclude: %s" % key)
d439efb4
         for key in remote_list:
a4387cd2
             output(u"%s: '%s' -> '%s'" % (action_str, remote_list[key]['object_uri_str'], remote_list[key]['dest_name']))
d439efb4
 
53750b94
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return EX_OK
d439efb4
 
     seq = 0
     for key in remote_list:
         seq += 1
         seq_label = "[%d of %d]" % (seq, remote_count)
 
         item = remote_list[key]
         src_uri = S3Uri(item['object_uri_str'])
         dst_uri = S3Uri(item['dest_name'])
 
         extra_headers = copy(cfg.extra_headers)
91464838
         try:
             response = process_fce(src_uri, dst_uri, extra_headers)
             output(message % { "src" : src_uri, "dst" : dst_uri })
             if Config().acl_public:
                 info(u"Public URL is: %s" % dst_uri.public_url())
38c346fb
             scoreboard.success()
08732e61
         except S3Error as e:
38c346fb
             if e.code == "NoSuchKey":
                 scoreboard.notfound()
91464838
                 warning(u"Key not found %s" % item['object_uri_str'])
             else:
79ee3195
                 scoreboard.failed()
             if cfg.stop_on_error: break
38c346fb
     return scoreboard.rc()
7d0ac8ee
 
7d61be89
 def cmd_cp(args):
d439efb4
     s3 = S3(Config())
a4387cd2
     return subcmd_cp_mv(args, s3.object_copy, "copy", u"remote copy: '%(src)s' -> '%(dst)s'")
7d61be89
 
5b2539b0
 def cmd_modify(args):
     s3 = S3(Config())
a4387cd2
     return subcmd_cp_mv(args, s3.object_modify, "modify", u"modify: '%(src)s'")
5b2539b0
 
7d61be89
 def cmd_mv(args):
d439efb4
     s3 = S3(Config())
a4387cd2
     return subcmd_cp_mv(args, s3.object_move, "move", u"move: '%(src)s' -> '%(dst)s'")
7d61be89
 
e5c6f6c5
 def cmd_info(args):
d439efb4
     s3 = S3(Config())
 
     while (len(args)):
         uri_arg = args.pop(0)
         uri = S3Uri(uri_arg)
         if uri.type != "s3" or not uri.has_bucket():
             raise ParameterError("Expecting S3 URI instead of '%s'" % uri_arg)
 
         try:
             if uri.has_object():
                 info = s3.object_info(uri)
                 output(u"%s (object):" % uri.uri())
                 output(u"   File size: %s" % info['headers']['content-length'])
                 output(u"   Last mod:  %s" % info['headers']['last-modified'])
ce712340
                 output(u"   MIME type: %s" % info['headers'].get('content-type', 'none'))
8d0b0009
                 output(u"   Storage:   %s" % info['headers'].get('x-amz-storage-class', 'STANDARD'))
3377e6a1
                 md5 = info['headers'].get('etag', '').strip('"\'')
5fc2bbcc
                 try:
                     md5 = info['s3cmd-attrs']['md5']
                 except KeyError:
                     pass
                 output(u"   MD5 sum:   %s" % md5)
ad868788
                 if 'x-amz-server-side-encryption' in info['headers']:
2761a3fb
                     output(u"   SSE:       %s" % info['headers']['x-amz-server-side-encryption'])
                 else:
a0fa0c3e
                     output(u"   SSE:       none")
ad868788
 
d439efb4
             else:
                 info = s3.bucket_info(uri)
                 output(u"%s (bucket):" % uri.uri())
                 output(u"   Location:  %s" % info['bucket-location'])
bb08c727
                 output(u"   Payer:     %s" % info['requester-pays'])
5d820093
                 expiration = s3.expiration_info(uri, cfg.bucket_location)
                 if expiration:
2f39a8d3
                     expiration_desc = "Expiration Rule: "
                     if expiration['prefix'] == "":
                         expiration_desc += "all objects in this bucket "
                     else:
                         expiration_desc += "objects with key prefix '" + expiration['prefix'] + "' "
                     expiration_desc += "will expire in '"
                     if expiration['days']:
                         expiration_desc += expiration['days'] + "' day(s) after creation"
                     elif expiration['date']:
                         expiration_desc += expiration['date'] + "' "
                     output(u"   %s" % expiration_desc)
5d820093
                 else:
2f39a8d3
                     output(u"   Expiration Rule: none")
fb9367b1
 
e1c07294
             try:
                 policy = s3.get_policy(uri)
0911d2e6
                 output(u"   policy:    %s" % policy)
fb9367b1
             except S3Error as exc:
                 # Ignore the exception and don't fail the info
                 # if the server doesn't support setting ACLs
                 if exc.status not in [404, 501]:
                     raise exc
0911d2e6
                 output(u"   policy:    none")
fb9367b1
 
dceb754f
             try:
                 cors = s3.get_cors(uri)
                 output(u"   cors:      %s" % cors)
fb9367b1
             except S3Error as exc:
                 # Ignore the exception and don't fail the info
                 # if the server doesn't support setting ACLs
                 if exc.status not in [404, 501]:
                     raise exc
                 output(u"   cors:    none")
98692c12
 
fb9367b1
             try:
                 acl = s3.get_acl(uri)
                 acl_grant_list = acl.getGrantList()
                 for grant in acl_grant_list:
                     output(u"   ACL:       %s: %s" % (grant['grantee'], grant['permission']))
                 if acl.isAnonRead():
                     output(u"   URL:       %s" % uri.public_url())
             except S3Error as exc:
                 # Ignore the exception and don't fail the info
                 # if the server doesn't support setting ACLs
                 if exc.status not in [404, 501]:
                     raise exc
e1c07294
 
36352241
             if uri.has_object():
f22fefab
                 # Temporary hack for performance + python3 compatibility
                 try:
                     # Check python 2 first
                     info_headers_iter = info['headers'].iteritems()
                 except:
                     info_headers_iter = info['headers'].items()
                 for header, value in info_headers_iter:
36352241
                     if header.startswith('x-amz-meta-'):
                         output(u"   %s: %s" % (header, value))
 
08732e61
         except S3Error as e:
be9ec739
             if e.info["Code"] in S3.codes:
d439efb4
                 error(S3.codes[e.info["Code"]] % uri.bucket())
8f35b18a
             raise
     return EX_OK
e5c6f6c5
 
1e621ce7
 def filedicts_to_keys(*args):
     keys = set()
     for a in args:
         keys.update(a.keys())
     keys = list(keys)
     keys.sort()
     return keys
 
13fc0d5f
 def cmd_sync_remote2remote(args):
d439efb4
     s3 = S3(Config())
 
     # Normalise s3://uri (e.g. assert trailing slash)
8c13a108
     destination_base = S3Uri(args[-1]).uri()
d439efb4
 
002b4189
     destbase_with_source_list = set()
     for source_arg in args[:-1]:
         if source_arg.endswith('/'):
             destbase_with_source_list.add(destination_base)
         else:
             destbase_with_source_list.add(os.path.join(destination_base,
                                                   os.path.basename(source_arg)))
 
8c69a65d
     stats_info = StatsInfo()
 
     src_list, src_exclude_list, remote_total_size = fetch_remote_list(args[:-1], recursive = True, require_attribs = True)
     dst_list, dst_exclude_list, _ = fetch_remote_list(destbase_with_source_list, recursive = True, require_attribs = True)
d439efb4
 
     src_count = len(src_list)
5cb49227
     orig_src_count = src_count
d439efb4
     dst_count = len(dst_list)
8c69a65d
     deleted_count = 0
d439efb4
 
     info(u"Found %d source files, %d destination files" % (src_count, dst_count))
 
a3387558
     src_list, dst_list, update_list, copy_pairs = compare_filelists(src_list, dst_list, src_remote = True, dst_remote = True)
d439efb4
 
     src_count = len(src_list)
c3deb6a8
     update_count = len(update_list)
d439efb4
     dst_count = len(dst_list)
 
4107cb29
     print(u"Summary: %d source files to copy, %d files at destination to delete" % (src_count + update_count, dst_count))
d439efb4
 
2a4fafc7
     ### Populate 'target_uri' only if we've got something to sync from src to dst
     for key in src_list:
         src_list[key]['target_uri'] = destination_base + key
     for key in update_list:
         update_list[key]['target_uri'] = destination_base + key
d439efb4
 
     if cfg.dry_run:
1e621ce7
         keys = filedicts_to_keys(src_exclude_list, dst_exclude_list)
         for key in keys:
ca9880fa
             output(u"exclude: %s" % key)
d439efb4
         if cfg.delete_removed:
             for key in dst_list:
a4387cd2
                 output(u"delete: '%s'" % dst_list[key]['object_uri_str'])
d439efb4
         for key in src_list:
04d7732f
             output(u"remote copy: '%s' -> '%s'" % (src_list[key]['object_uri_str'], src_list[key]['target_uri']))
         for key in update_list:
             output(u"remote copy: '%s' -> '%s'" % (update_list[key]['object_uri_str'], update_list[key]['target_uri']))
53750b94
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return EX_OK
d439efb4
 
1703df70
     # if there are copy pairs, we can't do delete_before, on the chance
     # we need one of the to-be-deleted files as a copy source.
     if len(copy_pairs) > 0:
         cfg.delete_after = True
 
5cb49227
     if cfg.delete_removed and orig_src_count == 0 and len(dst_list) and not cfg.force:
248a3fa4
         warning(u"delete: cowardly refusing to delete because no source files were found.  Use --force to override.")
         cfg.delete_removed = False
 
d439efb4
     # Delete items in destination that are not in source
d5dc2c0f
     if cfg.delete_removed and not cfg.delete_after:
e0dfb66a
         subcmd_batch_del(remote_list = dst_list)
8c69a65d
         deleted_count = len(dst_list)
d439efb4
 
c3deb6a8
     def _upload(src_list, seq, src_count):
         file_list = src_list.keys()
         file_list.sort()
47fdbcc2
         ret = EX_OK
8c69a65d
         total_nb_files = 0
         total_size = 0
c3deb6a8
         for file in file_list:
             seq += 1
             item = src_list[file]
             src_uri = S3Uri(item['object_uri_str'])
             dst_uri = S3Uri(item['target_uri'])
             seq_label = "[%d of %d]" % (seq, src_count)
             extra_headers = copy(cfg.extra_headers)
             try:
                 response = s3.object_copy(src_uri, dst_uri, extra_headers)
a4387cd2
                 output("remote copy: '%(src)s' -> '%(dst)s'" % { "src" : src_uri, "dst" : dst_uri })
8c69a65d
                 total_nb_files += 1
                 total_size += item.get(u'size', 0)
08732e61
             except S3Error as e:
47fdbcc2
                 ret = EX_PARTIAL
a4387cd2
                 error("File '%(src)s' could not be copied: %(e)s" % { "src" : src_uri, "e" : e })
69628e71
                 if cfg.stop_on_error:
                     raise
8c69a65d
         return ret, seq, total_nb_files, total_size
d439efb4
 
     # Perform the synchronization of files
     timestamp_start = time.time()
     seq = 0
8c69a65d
     ret, seq, nb_files, size = _upload(src_list, seq, src_count + update_count)
     total_files_copied = nb_files
     total_size_copied = size
 
     status, seq, nb_files, size = _upload(update_list, seq, src_count + update_count)
47fdbcc2
     if ret == EX_OK:
         ret = status
8c69a65d
     total_files_copied += nb_files
     total_size_copied += size
 
 
74012f3e
     n_copied, bytes_saved, failed_copy_files = remote_copy(s3, copy_pairs, destination_base)
8c69a65d
     total_files_copied += n_copied
     total_size_copied += bytes_saved
74012f3e
 
     #process files not copied
a4d09655
     debug("Process files that were not remote copied")
0e6685ac
     failed_copy_count = len (failed_copy_files)
74012f3e
     for key in failed_copy_files:
         failed_copy_files[key]['target_uri'] = destination_base + key
8c69a65d
     status, seq, nb_files, size = _upload(failed_copy_files, seq, src_count + update_count + failed_copy_count)
47fdbcc2
     if ret == EX_OK:
         ret = status
8c69a65d
     total_files_copied += nb_files
     total_size_copied += size
 
     # Delete items in destination that are not in source
     if cfg.delete_removed and cfg.delete_after:
         subcmd_batch_del(remote_list = dst_list)
         deleted_count = len(dst_list)
 
     stats_info.files = orig_src_count
     stats_info.size = remote_total_size
     stats_info.files_copied = total_files_copied
     stats_info.size_copied = total_size_copied
     stats_info.files_deleted = deleted_count
c3deb6a8
 
0f44d611
     total_elapsed = max(1.0, time.time() - timestamp_start)
6eae2c60
     outstr = "Done. Copied %d files in %0.1f seconds, %0.2f files/s." % (total_files_copied, total_elapsed, seq / total_elapsed)
8c69a65d
     if cfg.stats:
         outstr += stats_info.format_output()
         output(outstr)
     elif seq > 0:
d439efb4
         output(outstr)
     else:
         info(outstr)
13fc0d5f
 
47fdbcc2
     return ret
d5dc2c0f
 
227fabf8
 def cmd_sync_remote2local(args):
7ee75496
     def _do_deletes(local_list):
8c69a65d
         total_size = 0
f230f799
         if cfg.max_delete > 0 and len(local_list) > cfg.max_delete:
             warning(u"delete: maximum requested number of deletes would be exceeded, none performed.")
8c69a65d
             return total_size
7ee75496
         for key in local_list:
8c13a108
             os.unlink(deunicodise(local_list[key]['full_name']))
a4387cd2
             output(u"delete: '%s'" % local_list[key]['full_name'])
8c69a65d
             total_size += local_list[key].get(u'size', 0)
         return len(local_list), total_size
d439efb4
 
     s3 = S3(Config())
 
002b4189
     destination_base = args[-1]
4c02d97a
     source_args = args[:-1]
     fetch_source_args = args[:-1]
 
     if not destination_base.endswith(os.path.sep):
         if fetch_source_args[0].endswith(u'/') or len(fetch_source_args) > 1:
             raise ParameterError("Destination must be a directory and end with '/' when downloading multiple sources.")
 
8c69a65d
     stats_info = StatsInfo()
 
     remote_list, src_exclude_list, remote_total_size = fetch_remote_list(fetch_source_args, recursive = True, require_attribs = True)
4c02d97a
 
002b4189
 
     # - The source path is either like "/myPath/my_src_folder" and
     # the user want to download this single folder and Optionally only delete
     # things that have been removed inside this folder. For this case, we only
     # have to look inside destination_base/my_src_folder and not at the root of
     # destination_base.
     # - Or like "/myPath/my_src_folder/" and the user want to have the sync
     # with the content of this folder
     destbase_with_source_list = set()
45346b76
     for source_arg in fetch_source_args:
         if source_arg.endswith('/'):
             if destination_base.endswith(os.path.sep):
                 destbase_with_source_list.add(destination_base)
4c02d97a
             else:
45346b76
                 destbase_with_source_list.add(destination_base + os.path.sep)
         else:
             destbase_with_source_list.add(os.path.join(destination_base,
4c02d97a
                                                       os.path.basename(source_arg)))
8c69a65d
     local_list, single_file_local, dst_exclude_list, local_total_size = fetch_local_list(destbase_with_source_list, is_src = False, recursive = True)
002b4189
 
d439efb4
     local_count = len(local_list)
     remote_count = len(remote_list)
5cb49227
     orig_remote_count = remote_count
d439efb4
 
     info(u"Found %d remote files, %d local files" % (remote_count, local_count))
 
a3387558
     remote_list, local_list, update_list, copy_pairs = compare_filelists(remote_list, local_list, src_remote = True, dst_remote = False)
d439efb4
 
     local_count = len(local_list)
     remote_count = len(remote_list)
c3deb6a8
     update_count = len(update_list)
ddb5ef90
     copy_pairs_count = len(copy_pairs)
d439efb4
 
ddb5ef90
     info(u"Summary: %d remote files to download, %d local files to delete, %d local files to hardlink" % (remote_count + update_count, local_count, copy_pairs_count))
d439efb4
 
4c02d97a
     def _set_local_filename(remote_list, destination_base, source_args):
30cea492
         if len(remote_list) == 0:
             return
4c02d97a
 
         if destination_base.endswith(os.path.sep):
             if not os.path.exists(deunicodise(destination_base)):
                 if not cfg.dry_run:
                     os.makedirs(deunicodise(destination_base))
             if not os.path.isdir(deunicodise(destination_base)):
                 raise ParameterError("Destination is not an existing directory")
         elif len(remote_list) == 1 and \
              source_args[0] == remote_list[remote_list.keys()[0]].get(u'object_uri_str', ''):
             if os.path.isdir(deunicodise(destination_base)):
                 raise ParameterError("Destination already exists and is a directory")
             remote_list[remote_list.keys()[0]]['local_filename'] = destination_base
             return
 
         if destination_base[-1] != os.path.sep:
             destination_base += os.path.sep
         for key in remote_list:
             local_filename = destination_base + key
             if os.path.sep != "/":
                 local_filename = os.path.sep.join(local_filename.split("/"))
             remote_list[key]['local_filename'] = local_filename
c3deb6a8
 
4c02d97a
     _set_local_filename(remote_list, destination_base, source_args)
     _set_local_filename(update_list, destination_base, source_args)
136fb2d6
 
d439efb4
     if cfg.dry_run:
1e621ce7
         keys = filedicts_to_keys(src_exclude_list, dst_exclude_list)
         for key in keys:
ca9880fa
             output(u"exclude: %s" % key)
d439efb4
         if cfg.delete_removed:
             for key in local_list:
a4387cd2
                 output(u"delete: '%s'" % local_list[key]['full_name'])
d439efb4
         for key in remote_list:
a4387cd2
             output(u"download: '%s' -> '%s'" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
c3deb6a8
         for key in update_list:
a4387cd2
             output(u"download: '%s' -> '%s'" % (update_list[key]['object_uri_str'], update_list[key]['local_filename']))
d439efb4
 
53750b94
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return EX_OK
d439efb4
 
1703df70
     # if there are copy pairs, we can't do delete_before, on the chance
     # we need one of the to-be-deleted files as a copy source.
     if len(copy_pairs) > 0:
         cfg.delete_after = True
 
5cb49227
     if cfg.delete_removed and orig_remote_count == 0 and len(local_list) and not cfg.force:
248a3fa4
         warning(u"delete: cowardly refusing to delete because no source files were found.  Use --force to override.")
         cfg.delete_removed = False
 
7ee75496
     if cfg.delete_removed and not cfg.delete_after:
8c69a65d
         deleted_count, deleted_size = _do_deletes(local_list)
     else:
         deleted_count, deleted_size = (0, 0)
d439efb4
 
c3deb6a8
     def _download(remote_list, seq, total, total_size, dir_cache):
7904f332
         original_umask = os.umask(0);
         os.umask(original_umask);
c3deb6a8
         file_list = remote_list.keys()
         file_list.sort()
47fdbcc2
         ret = EX_OK
c3deb6a8
         for file in file_list:
             seq += 1
             item = remote_list[file]
             uri = S3Uri(item['object_uri_str'])
             dst_file = item['local_filename']
7904f332
             is_empty_directory = dst_file.endswith('/')
c3deb6a8
             seq_label = "[%d of %d]" % (seq, total)
69bb35df
 
             dst_dir = unicodise(os.path.dirname(deunicodise(dst_file)))
be9ec739
             if not dst_dir in dir_cache:
69bb35df
                 dir_cache[dst_dir] = Utils.mkdir_with_parents(dst_dir)
             if dir_cache[dst_dir] == False:
                 if cfg.stop_on_error:
                     error(u"Exiting now because of --stop-on-error")
                     raise OSError("Download of '%s' failed (Reason: %s destination directory is not writable)" % (file, dst_dir))
                 error(u"Download of '%s' failed (Reason: %s destination directory is not writable)" % (file, dst_dir))
                 ret = EX_PARTIAL
                 continue
 
d439efb4
             try:
7d496da2
                 chkptfname_b = ''
69bb35df
                 if not is_empty_directory: # ignore empty directory at S3:
                     debug(u"dst_file=%s" % dst_file)
                     # create temporary files (of type .s3cmd.XXXX.tmp) in the same directory
                     # for downloading and then rename once downloaded
7d496da2
                     # unicode provided to mkstemp argument
                     chkptfd, chkptfname_b = tempfile.mkstemp(u".tmp", u".s3cmd.",
                                                            os.path.dirname(dst_file))
                     with io.open(chkptfd, mode='wb') as dst_stream:
                         dst_stream.stream_name = unicodise(chkptfname_b)
                         debug(u"created chkptfname=%s" % dst_stream.stream_name)
                         response = s3.object_get(uri, dst_stream, dst_file, extra_label = seq_label)
 
69bb35df
                     # download completed, rename the file to destination
2c70a29d
                     if os.name == "nt":
                         # Windows is really a bad OS. Rename can't overwrite an existing file
                         try:
                             os.unlink(deunicodise(dst_file))
                         except OSError:
                             pass
7d496da2
                     os.rename(chkptfname_b, deunicodise(dst_file))
                     debug(u"renamed chkptfname=%s to dst_file=%s" % (dst_stream.stream_name, dst_file))
08732e61
             except OSError as exc:
2c70a29d
                 allow_partial = True
 
                 if exc.errno == errno.EISDIR:
                     error(u"Download of '%s' failed (Reason: %s is a directory)" % (file, dst_file))
                 elif os.name != "nt" and exc.errno == errno.ETXTBSY:
                     error(u"Download of '%s' failed (Reason: %s is currently open for execute, cannot be overwritten)" % (file, dst_file))
                 elif exc.errno == errno.EPERM or exc.errno == errno.EACCES:
                     error(u"Download of '%s' failed (Reason: %s permission denied)" % (file, dst_file))
                 elif exc.errno == errno.EBUSY:
                     error(u"Download of '%s' failed (Reason: %s is busy)" % (file, dst_file))
                 elif exc.errno == errno.EFBIG:
                     error(u"Download of '%s' failed (Reason: %s is too big)" % (file, dst_file))
                 elif exc.errno == errno.ENAMETOOLONG:
                     error(u"Download of '%s' failed (Reason: File Name is too long)" % file)
 
                 elif (exc.errno == errno.ENOSPC or (os.name != "nt" and exc.errno == errno.EDQUOT)):
                     error(u"Download of '%s' failed (Reason: No space left)" % file)
                     allow_partial = False
                 else:
                     error(u"Download of '%s' failed (Reason: Unknown OsError %d)" % (file, exc.errno))
                     allow_partial = False
 
                 try:
                     # Try to remove the temp file if it exists
7d496da2
                     if chkptfname_b:
                         os.unlink(chkptfname_b)
2c70a29d
                 except:
                     pass
 
                 if allow_partial and not cfg.stop_on_error:
47fdbcc2
                     ret = EX_PARTIAL
0229551b
                     continue
7904f332
 
2c70a29d
                 ret = EX_OSFILE
62e25860
                 if allow_partial:
                     error(u"Exiting now because of --stop-on-error")
                 else:
                     error(u"Exiting now because of fatal error")
69bb35df
                 raise
08732e61
             except S3DownloadError as exc:
69bb35df
                 error(u"Download of '%s' failed too many times (Last Reason: %s). "
                       "This is usually a transient error, please try again "
                       "later." % (file, exc))
bac265c4
                 try:
7d496da2
                     os.unlink(chkptfname_b)
08732e61
                 except Exception as sub_exc:
7d496da2
                     warning(u"Error deleting temporary file %s (Reason: %s)",
                             (dst_stream.stream_name, sub_exc))
69bb35df
                 if cfg.stop_on_error:
                     ret = EX_DATAERR
                     error(u"Exiting now because of --stop-on-error")
                     raise
                 ret = EX_PARTIAL
                 continue
08732e61
             except S3Error as exc:
69bb35df
                 warning(u"Remote file '%s'. S3Error: %s" % (exc.resource, exc))
bac265c4
                 try:
7d496da2
                     os.unlink(chkptfname_b)
08732e61
                 except Exception as sub_exc:
7d496da2
                     warning(u"Error deleting temporary file %s (Reason: %s)",
                             (dst_stream.stream_name, sub_exc))
69bb35df
                 if cfg.stop_on_error:
7904f332
                     raise
69bb35df
                 ret = EX_PARTIAL
                 continue
7904f332
 
69bb35df
             try:
                 # set permissions on destination file
                 if not is_empty_directory: # a normal file
08732e61
                     mode = 0o777 - original_umask;
69bb35df
                 else: # an empty directory, make them readable/executable
08732e61
                     mode = 0o775
69bb35df
                 debug(u"mode=%s" % oct(mode))
                 os.chmod(deunicodise(dst_file), mode);
             except:
                 raise
 
             # because we don't upload empty directories,
             # we can continue the loop here, we won't be setting stat info.
             # if we do start to upload empty directories, we'll have to reconsider this.
             if is_empty_directory:
                 continue
7904f332
 
69bb35df
             try:
be9ec739
                 if 's3cmd-attrs' in response and cfg.preserve_attrs:
69bb35df
                     attrs = response['s3cmd-attrs']
be9ec739
                     if 'mode' in attrs:
69bb35df
                         os.chmod(deunicodise(dst_file), int(attrs['mode']))
be9ec739
                     if 'mtime' in attrs or 'atime' in attrs:
                         mtime = ('mtime' in attrs) and int(attrs['mtime']) or int(time.time())
                         atime = ('atime' in attrs) and int(attrs['atime']) or int(time.time())
69bb35df
                         os.utime(deunicodise(dst_file), (atime, mtime))
be9ec739
                     if 'uid' in attrs and 'gid' in attrs:
69bb35df
                         uid = int(attrs['uid'])
                         gid = int(attrs['gid'])
                         os.lchown(deunicodise(dst_file),uid,gid)
be9ec739
                 elif 'last-modified' in response['headers']:
69bb35df
                     last_modified = time.mktime(time.strptime(response["headers"]["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
                     os.utime(deunicodise(dst_file), (last_modified, last_modified))
                     debug("set mtime to %s" % last_modified)
08732e61
             except OSError as e:
69bb35df
                 ret = EX_PARTIAL
                 if e.errno == errno.EEXIST:
                     warning(u"%s exists - not overwriting" % dst_file)
                     continue
                 if e.errno in (errno.EPERM, errno.EACCES):
                     warning(u"%s not writable: %s" % (dst_file, e.strerror))
519ddbda
                     if cfg.stop_on_error:
69bb35df
                         raise e
d439efb4
                     continue
69bb35df
                 raise e
             except KeyboardInterrupt:
                 warning(u"Exiting after keyboard interrupt")
                 return
08732e61
             except Exception as e:
47fdbcc2
                 ret = EX_PARTIAL
69bb35df
                 error(u"%s: %s" % (file, e))
69628e71
                 if cfg.stop_on_error:
08732e61
                     raise OSError(e)
d439efb4
                 continue
7d496da2
             finally:
                 try:
                     os.remove(chkptfname_b)
                 except:
                     pass
 
c3deb6a8
             speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
             if not Config().progress_meter:
a4387cd2
                 output(u"download: '%s' -> '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
ca9880fa
                     (uri, dst_file, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1],
c3deb6a8
                     seq_label))
             total_size += response["size"]
8928efea
             if Config().delete_after_fetch:
                 s3.object_delete(uri)
                 output(u"File '%s' removed after syncing" % (uri))
47fdbcc2
         return ret, seq, total_size
d439efb4
 
8c69a65d
     size_transferred = 0
c3deb6a8
     total_elapsed = 0.0
     timestamp_start = time.time()
     dir_cache = {}
     seq = 0
8c69a65d
     ret, seq, size_transferred = _download(remote_list, seq, remote_count + update_count, size_transferred, dir_cache)
     status, seq, size_transferred = _download(update_list, seq, remote_count + update_count, size_transferred, dir_cache)
47fdbcc2
     if ret == EX_OK:
         ret = status
d439efb4
 
8c69a65d
     n_copies, size_copies, failed_copy_list = local_copy(copy_pairs, destination_base)
4c02d97a
     _set_local_filename(failed_copy_list, destination_base, source_args)
8c69a65d
     status, seq, size_transferred = _download(failed_copy_list, seq, len(failed_copy_list) + remote_count + update_count, size_transferred, dir_cache)
47fdbcc2
     if ret == EX_OK:
         ret = status
fccd98cc
 
8c69a65d
     if cfg.delete_removed and cfg.delete_after:
         deleted_count, deleted_size = _do_deletes(local_list)
 
0f44d611
     total_elapsed = max(1.0, time.time() - timestamp_start)
6eae2c60
     speed_fmt = formatSize(size_transferred / total_elapsed, human_readable = True, floating_point = True)
8c69a65d
 
     stats_info.files = orig_remote_count
     stats_info.size = remote_total_size
     stats_info.files_transferred = len(failed_copy_list) + remote_count + update_count
     stats_info.size_transferred = size_transferred
     stats_info.files_copied = n_copies
     stats_info.size_copied = size_copies
     stats_info.files_deleted = deleted_count
     stats_info.size_deleted = deleted_size
d439efb4
 
     # Only print out the result if any work has been done or
     # if the user asked for verbose output
8c69a65d
     outstr = "Done. Downloaded %d bytes in %0.1f seconds, %0.2f %sB/s." % (size_transferred, total_elapsed, speed_fmt[0], speed_fmt[1])
     if cfg.stats:
         outstr += stats_info.format_output()
         output(outstr)
     elif size_transferred > 0:
d439efb4
         output(outstr)
     else:
         info(outstr)
01fe3a25
 
47fdbcc2
     return ret
7ee75496
 
fccd98cc
 def local_copy(copy_pairs, destination_base):
     # Do NOT hardlink local files by default, that'd be silly
     # For instance all empty files would become hardlinked together!
8c69a65d
     saved_bytes = 0
7800900e
     failed_copy_list = FileDict()
77675162
     for (src_obj, dst1, relative_file) in copy_pairs:
e40adda8
         src_file = os.path.join(destination_base, dst1)
         dst_file = os.path.join(destination_base, relative_file)
8c13a108
         dst_dir = os.path.dirname(deunicodise(dst_file))
ddb5ef90
         try:
8c13a108
             if not os.path.isdir(deunicodise(dst_dir)):
fccd98cc
                 debug("MKDIR %s" % dst_dir)
8c13a108
                 os.makedirs(deunicodise(dst_dir))
fccd98cc
             debug(u"Copying %s to %s" % (src_file, dst_file))
8c13a108
             shutil.copy2(deunicodise(src_file), deunicodise(dst_file))
8c69a65d
             saved_bytes += src_obj.get(u'size', 0)
08732e61
         except (IOError, OSError) as e:
4c02d97a
             warning(u'Unable to copy or hardlink files %s -> %s (Reason: %s)' % (src_file, dst_file, e))
fccd98cc
             failed_copy_list[relative_file] = src_obj
8c69a65d
     return len(copy_pairs), saved_bytes, failed_copy_list
ddb5ef90
 
517ee933
 def remote_copy(s3, copy_pairs, destination_base):
1703df70
     saved_bytes = 0
74012f3e
     failed_copy_list = FileDict()
77675162
     for (src_obj, dst1, dst2) in copy_pairs:
1703df70
         debug(u"Remote Copying from %s to %s" % (dst1, dst2))
d4e5a52a
         dst1_uri = S3Uri(destination_base + dst1)
         dst2_uri = S3Uri(destination_base + dst2)
1703df70
         extra_headers = copy(cfg.extra_headers)
         try:
d4e5a52a
             s3.object_copy(dst1_uri, dst2_uri, extra_headers)
8c69a65d
             saved_bytes += src_obj.get(u'size', 0)
a4387cd2
             output(u"remote copy: '%s' -> '%s'" % (dst1, dst2))
1703df70
         except:
a4387cd2
             warning(u"Unable to remote copy files '%s' -> '%s'" % (dst1_uri, dst2_uri))
0e6685ac
             failed_copy_list[dst2] = src_obj
74012f3e
     return (len(copy_pairs), saved_bytes, failed_copy_list)
1703df70
 
5fc2bbcc
 def _build_attr_header(local_list, src):
     attrs = {}
6a5fe7d7
     if cfg.preserve_attrs:
         for attr in cfg.preserve_attrs_list:
             if attr == 'uname':
                 try:
4a6991fc
                     val = Utils.urlencode_string(Utils.getpwuid_username(local_list[src]['uid']), unicode_output=True)
6a5fe7d7
                 except (KeyError, TypeError):
                     attr = "uid"
                     val = local_list[src].get('uid')
                     if val:
                         warning(u"%s: Owner username not known. Storing UID=%d instead." % (src, val))
             elif attr == 'gname':
                 try:
4a6991fc
                     val = Utils.urlencode_string(Utils.getgrgid_grpname(local_list[src].get('gid')), unicode_output=True)
6a5fe7d7
                 except (KeyError, TypeError):
                     attr = "gid"
                     val = local_list[src].get('gid')
                     if val:
                         warning(u"%s: Owner groupname not known. Storing GID=%d instead." % (src, val))
             elif attr != "md5":
                 try:
                     val = getattr(local_list[src]['sr'], 'st_' + attr)
                 except:
                     val = None
             if val is not None:
                 attrs[attr] = val
1703df70
 
6a5fe7d7
     if 'md5' in cfg.preserve_attrs_list:
         try:
             val = local_list.get_md5(src)
             if val is not None:
                 attrs['md5'] = val
         except IOError:
             pass
 
     if attrs:
         result = ""
267a312e
         for k in sorted(attrs.keys()):
6a5fe7d7
             result += "%s:%s/" % (k, attrs[k])
         return {'x-amz-meta-s3cmd-attrs' : result[:-1]}
     else:
         return {}
1703df70
 
 
5fc2bbcc
 def cmd_sync_local2remote(args):
8c69a65d
     def _single_process(source_args):
fb6441ca
         for dest in destinations:
             ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
             destination_base_uri = S3Uri(dest)
             if destination_base_uri.type != 's3':
                 raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
ca9880fa
             destination_base = destination_base_uri.uri()
8c69a65d
         return _child(destination_base, source_args)
fb6441ca
 
8c69a65d
     def _parent(source_args):
fb6441ca
         # Now that we've done all the disk I/O to look at the local file system and
         # calculate the md5 for each file, fork for each destination to upload to them separately
         # and in parallel
         child_pids = []
47fdbcc2
         ret = EX_OK
fb6441ca
 
         for dest in destinations:
             ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
             destination_base_uri = S3Uri(dest)
             if destination_base_uri.type != 's3':
                 raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
ca9880fa
             destination_base = destination_base_uri.uri()
fb6441ca
             child_pid = os.fork()
             if child_pid == 0:
8c69a65d
                 os._exit(_child(destination_base, source_args))
fb6441ca
             else:
                 child_pids.append(child_pid)
d439efb4
 
07c9e2de
         while len(child_pids):
             (pid, status) = os.wait()
             child_pids.remove(pid)
47fdbcc2
             if ret == EX_OK:
7b008e9c
                 ret = os.WEXITSTATUS(status)
d439efb4
 
47fdbcc2
         return ret
d439efb4
 
8c69a65d
     def _child(destination_base, source_args):
fb6441ca
         def _set_remote_uri(local_list, destination_base, single_file_local):
             if len(local_list) > 0:
                 ## Populate 'remote_uri' only if we've got something to upload
                 if not destination_base.endswith("/"):
                     if not single_file_local:
                         raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
ca9880fa
                     local_list[local_list.keys()[0]]['remote_uri'] = destination_base
fb6441ca
                 else:
                     for key in local_list:
ca9880fa
                         local_list[key]['remote_uri'] = destination_base + key
fb6441ca
 
         def _upload(local_list, seq, total, total_size):
             file_list = local_list.keys()
             file_list.sort()
47fdbcc2
             ret = EX_OK
fb6441ca
             for file in file_list:
                 seq += 1
                 item = local_list[file]
                 src = item['full_name']
                 uri = S3Uri(item['remote_uri'])
                 seq_label = "[%d of %d]" % (seq, total)
                 extra_headers = copy(cfg.extra_headers)
                 try:
6a5fe7d7
                     attr_header = _build_attr_header(local_list, file)
                     debug(u"attr_header: %s" % attr_header)
                     extra_headers.update(attr_header)
fb6441ca
                     response = s3.object_put(src, uri, extra_headers, extra_label = seq_label)
08732e61
                 except S3UploadError as exc:
69bb35df
                     error(u"Upload of '%s' failed too many times (Last reason: %s)" % (item['full_name'], exc))
69628e71
                     if cfg.stop_on_error:
69bb35df
                         ret = EX_DATAERR
                         error(u"Exiting now because of --stop-on-error")
69628e71
                         raise
69bb35df
                     ret = EX_PARTIAL
fb6441ca
                     continue
08732e61
                 except InvalidFileError as exc:
69bb35df
                     error(u"Upload of '%s' is not possible (Reason: %s)" % (item['full_name'], exc))
47fdbcc2
                     ret = EX_PARTIAL
69628e71
                     if cfg.stop_on_error:
69bb35df
                         ret = EX_OSFILE
                         error(u"Exiting now because of --stop-on-error")
69628e71
                         raise
fb6441ca
                     continue
                 speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
                 if not cfg.progress_meter:
a4387cd2
                     output(u"upload: '%s' -> '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
ca9880fa
                         (item['full_name'], uri, response["size"], response["elapsed"],
fb6441ca
                         speed_fmt[0], speed_fmt[1], seq_label))
                 total_size += response["size"]
                 uploaded_objects_list.append(uri.object())
47fdbcc2
             return ret, seq, total_size
d439efb4
 
002b4189
 
8c69a65d
         stats_info = StatsInfo()
 
         local_list, single_file_local, src_exclude_list, local_total_size = fetch_local_list(args[:-1], is_src = True, recursive = True)
 
002b4189
         # - The source path is either like "/myPath/my_src_folder" and
         # the user want to upload this single folder and optionally only delete
         # things that have been removed inside this folder. For this case,
         # we only have to look inside destination_base/my_src_folder and not at
         # the root of destination_base.
         # - Or like "/myPath/my_src_folder/" and the user want to have the sync
         # with the content of this folder
89d5a0ff
         # Special case, "." for current folder.
002b4189
         destbase_with_source_list = set()
         for source_arg in source_args:
c5392bcb
             if not source_arg.endswith('/') and os.path.basename(source_arg) != '.' \
                and not single_file_local:
002b4189
                 destbase_with_source_list.add(os.path.join(destination_base,
                                                     os.path.basename(source_arg)))
             else:
                 destbase_with_source_list.add(destination_base)
 
8c69a65d
         remote_list, dst_exclude_list, remote_total_size = fetch_remote_list(destbase_with_source_list, recursive = True, require_attribs = True)
d439efb4
 
fb6441ca
         local_count = len(local_list)
5cb49227
         orig_local_count = local_count
fb6441ca
         remote_count = len(remote_list)
d439efb4
 
fb6441ca
         info(u"Found %d local files, %d remote files" % (local_count, remote_count))
d439efb4
 
fb6441ca
         if single_file_local and len(local_list) == 1 and len(remote_list) == 1:
             ## Make remote_key same as local_key for comparison if we're dealing with only one file
             remote_list_entry = remote_list[remote_list.keys()[0]]
             # Flush remote_list, by the way
7800900e
             remote_list = FileDict()
5631c00c
             remote_list[local_list.keys()[0]] =  remote_list_entry
d439efb4
 
a3387558
         local_list, remote_list, update_list, copy_pairs = compare_filelists(local_list, remote_list, src_remote = False, dst_remote = True)
d439efb4
 
fb6441ca
         local_count = len(local_list)
         update_count = len(update_list)
         copy_count = len(copy_pairs)
         remote_count = len(remote_list)
fd0edd02
         upload_count = local_count + update_count
d439efb4
 
fd0edd02
         info(u"Summary: %d local files to upload, %d files to remote copy, %d remote files to delete" % (upload_count, copy_count, remote_count))
c3deb6a8
 
fb6441ca
         _set_remote_uri(local_list, destination_base, single_file_local)
         _set_remote_uri(update_list, destination_base, single_file_local)
d439efb4
 
fb6441ca
         if cfg.dry_run:
1e621ce7
             keys = filedicts_to_keys(src_exclude_list, dst_exclude_list)
             for key in keys:
ca9880fa
                 output(u"exclude: %s" % key)
d439efb4
             for key in local_list:
a4387cd2
                 output(u"upload: '%s' -> '%s'" % (local_list[key]['full_name'], local_list[key]['remote_uri']))
fb6441ca
             for key in update_list:
a4387cd2
                 output(u"upload: '%s' -> '%s'" % (update_list[key]['full_name'], update_list[key]['remote_uri']))
77675162
             for (src_obj, dst1, dst2) in copy_pairs:
a4387cd2
                 output(u"remote copy: '%s' -> '%s'" % (dst1, dst2))
fb6441ca
             if cfg.delete_removed:
                 for key in remote_list:
a4387cd2
                     output(u"delete: '%s'" % remote_list[key]['object_uri_str'])
fb6441ca
 
53750b94
             warning(u"Exiting now because of --dry-run")
8f35b18a
             return EX_OK
d439efb4
 
fb6441ca
         # if there are copy pairs, we can't do delete_before, on the chance
         # we need one of the to-be-deleted files as a copy source.
         if len(copy_pairs) > 0:
             cfg.delete_after = True
 
5cb49227
         if cfg.delete_removed and orig_local_count == 0 and len(remote_list) and not cfg.force:
248a3fa4
             warning(u"delete: cowardly refusing to delete because no source files were found.  Use --force to override.")
             cfg.delete_removed = False
 
0da3046b
         if cfg.delete_removed and not cfg.delete_after and remote_list:
e0dfb66a
             subcmd_batch_del(remote_list = remote_list)
fb6441ca
 
8c69a65d
         size_transferred = 0
fb6441ca
         total_elapsed = 0.0
         timestamp_start = time.time()
8c69a65d
         ret, n, size_transferred = _upload(local_list, 0, upload_count, size_transferred)
         status, n, size_transferred = _upload(update_list, n, upload_count, size_transferred)
47fdbcc2
         if ret == EX_OK:
             ret = status
74012f3e
         n_copies, saved_bytes, failed_copy_files  = remote_copy(s3, copy_pairs, destination_base)
 
         #upload file that could not be copied
a4d09655
         debug("Process files that were not remote copied")
74012f3e
         failed_copy_count = len(failed_copy_files)
         _set_remote_uri(failed_copy_files, destination_base, single_file_local)
8c69a65d
         status, n, size_transferred = _upload(failed_copy_files, n, upload_count + failed_copy_count, size_transferred)
47fdbcc2
         if ret == EX_OK:
             ret = status
74012f3e
 
0da3046b
         if cfg.delete_removed and cfg.delete_after and remote_list:
e0dfb66a
             subcmd_batch_del(remote_list = remote_list)
0f44d611
         total_elapsed = max(1.0, time.time() - timestamp_start)
6eae2c60
         total_speed = total_elapsed and size_transferred / total_elapsed or 0.0
fb6441ca
         speed_fmt = formatSize(total_speed, human_readable = True, floating_point = True)
 
8c69a65d
 
         stats_info.files = orig_local_count
         stats_info.size = local_total_size
         stats_info.files_transferred = upload_count + failed_copy_count
         stats_info.size_transferred = size_transferred
         stats_info.files_copied = n_copies
         stats_info.size_copied = saved_bytes
         stats_info.files_deleted = remote_count
 
 
fb6441ca
         # Only print out the result if any work has been done or
         # if the user asked for verbose output
8c69a65d
         outstr = "Done. Uploaded %d bytes in %0.1f seconds, %0.2f %sB/s." % (size_transferred, total_elapsed, speed_fmt[0], speed_fmt[1])
         if cfg.stats:
             outstr += stats_info.format_output()
             output(outstr)
         elif size_transferred + saved_bytes > 0:
fb6441ca
             output(outstr)
         else:
             info(outstr)
d439efb4
 
47fdbcc2
         return ret
d439efb4
 
481aa3da
     def _invalidate_on_cf(destination_base_uri):
         cf = CloudFront(cfg)
         default_index_file = None
         if cfg.invalidate_default_index_on_cf or cfg.invalidate_default_index_root_on_cf:
             info_response = s3.website_info(destination_base_uri, cfg.bucket_location)
             if info_response:
               default_index_file = info_response['index_document']
               if len(default_index_file) < 1:
                   default_index_file = None
 
becdfa66
         results = cf.InvalidateObjects(destination_base_uri, uploaded_objects_list, default_index_file, cfg.invalidate_default_index_on_cf, cfg.invalidate_default_index_root_on_cf)
         for result in results:
             if result['status'] == 201:
                 output(u"Created invalidation request for %d paths" % len(uploaded_objects_list))
                 output(u"Check progress with: s3cmd cfinvalinfo cf://%s/%s" % (result['dist_id'], result['request_id']))
481aa3da
 
fb6441ca
     # main execution
     s3 = S3(cfg)
d439efb4
     uploaded_objects_list = []
 
fb6441ca
     if cfg.encrypt:
         error(u"S3cmd 'sync' doesn't yet support GPG encryption, sorry.")
         error(u"Either use unconditional 's3cmd put --recursive'")
         error(u"or disable encryption with --no-encrypt parameter.")
8214d4f0
         sys.exit(EX_USAGE)
d439efb4
 
8759b22e
     for arg in args[:-1]:
         if not os.path.exists(deunicodise(arg)):
             raise ParameterError("Invalid source: '%s' is not an existing file or directory" % arg)
 
fb6441ca
     destinations = [args[-1]]
     if cfg.additional_destinations:
         destinations = destinations + cfg.additional_destinations
 
     if 'fork' not in os.__all__ or len(destinations) < 2:
8c69a65d
         ret = _single_process(args[:-1])
8f35b18a
         destination_base_uri = S3Uri(destinations[-1])
0fd679c9
         if cfg.invalidate_on_cf:
             if len(uploaded_objects_list) == 0:
                 info("Nothing to invalidate in CloudFront")
             else:
                 _invalidate_on_cf(destination_base_uri)
d439efb4
     else:
8c69a65d
         ret = _parent(args[:-1])
0fd679c9
         if cfg.invalidate_on_cf:
             error(u"You cannot use both --cf-invalidate and --add-destination.")
8f35b18a
             return(EX_USAGE)
 
47fdbcc2
     return ret
0c7bf275
 
01fe3a25
 def cmd_sync(args):
d439efb4
     if (len(args) < 2):
         raise ParameterError("Too few parameters! Expected: %s" % commands['sync']['param'])
a3387558
     if cfg.delay_updates:
         warning(u"`delay-updates` is obsolete.")
01fe3a25
 
45346b76
     for arg in args:
         if arg == u'-':
             raise ParameterError("Stdin or stdout ('-') can't be used for a source or a destination with the sync command.")
 
d439efb4
     if S3Uri(args[0]).type == "file" and S3Uri(args[-1]).type == "s3":
         return cmd_sync_local2remote(args)
     if S3Uri(args[0]).type == "s3" and S3Uri(args[-1]).type == "file":
         return cmd_sync_remote2local(args)
     if S3Uri(args[0]).type == "s3" and S3Uri(args[-1]).type == "s3":
         return cmd_sync_remote2remote(args)
     raise ParameterError("Invalid source/destination: '%s'" % "' '".join(args))
585c735a
 
 def cmd_setacl(args):
d439efb4
     s3 = S3(cfg)
 
     set_to_acl = cfg.acl_public and "Public" or "Private"
 
     if not cfg.recursive:
         old_args = args
         args = []
         for arg in old_args:
             uri = S3Uri(arg)
             if not uri.has_object():
                 if cfg.acl_public != None:
                     info("Setting bucket-level ACL for %s to %s" % (uri.uri(), set_to_acl))
                 else:
                     info("Setting bucket-level ACL for %s" % (uri.uri()))
                 if not cfg.dry_run:
992b7daf
                     update_acl(s3, uri)
d439efb4
             else:
                 args.append(arg)
 
8c69a65d
     remote_list, exclude_list, _ = fetch_remote_list(args)
d439efb4
 
     remote_count = len(remote_list)
 
     info(u"Summary: %d remote files to update" % remote_count)
 
     if cfg.dry_run:
         for key in exclude_list:
ca9880fa
             output(u"exclude: %s" % key)
d439efb4
         for key in remote_list:
a4387cd2
             output(u"setacl: '%s'" % remote_list[key]['object_uri_str'])
d439efb4
 
53750b94
         warning(u"Exiting now because of --dry-run")
8f35b18a
         return EX_OK
d439efb4
 
     seq = 0
     for key in remote_list:
         seq += 1
         seq_label = "[%d of %d]" % (seq, remote_count)
         uri = S3Uri(remote_list[key]['object_uri_str'])
992b7daf
         update_acl(s3, uri, seq_label)
8f35b18a
     return EX_OK
585c735a
 
dd427743
 def cmd_setpolicy(args):
     s3 = S3(cfg)
e1c07294
     uri = S3Uri(args[1])
     policy_file = args[0]
8c13a108
     policy = open(deunicodise(policy_file), 'r').read()
e1c07294
 
8f35b18a
     if cfg.dry_run: return EX_OK
e1c07294
 
     response = s3.set_policy(uri, policy)
 
     #if retsponse['status'] == 200:
     debug(u"response - %s" % response['status'])
dd427743
     if response['status'] == 204:
e1c07294
         output(u"%s: Policy updated" % uri)
8f35b18a
     return EX_OK
e1c07294
 
 def cmd_delpolicy(args):
     s3 = S3(cfg)
     uri = S3Uri(args[0])
8f35b18a
     if cfg.dry_run: return EX_OK
e1c07294
 
     response = s3.delete_policy(uri)
 
     #if retsponse['status'] == 200:
     debug(u"response - %s" % response['status'])
     output(u"%s: Policy deleted" % uri)
8f35b18a
     return EX_OK
585c735a
 
dceb754f
 def cmd_setcors(args):
     s3 = S3(cfg)
     uri = S3Uri(args[1])
     cors_file = args[0]
     cors = open(deunicodise(cors_file), 'r').read()
 
     if cfg.dry_run: return EX_OK
 
     response = s3.set_cors(uri, cors)
 
     #if retsponse['status'] == 200:
     debug(u"response - %s" % response['status'])
     if response['status'] == 204:
         output(u"%s: CORS updated" % uri)
     return EX_OK
 
 def cmd_delcors(args):
     s3 = S3(cfg)
     uri = S3Uri(args[0])
     if cfg.dry_run: return EX_OK
 
     response = s3.delete_cors(uri)
 
     #if retsponse['status'] == 200:
     debug(u"response - %s" % response['status'])
     output(u"%s: CORS deleted" % uri)
     return EX_OK
 
3d00b475
 def cmd_set_payer(args):
     s3 = S3(cfg)
     uri = S3Uri(args[0])
 
     if cfg.dry_run: return EX_OK
 
     response = s3.set_payer(uri)
     if response['status'] == 200:
         output(u"%s: Payer updated" % uri)
         return EX_OK
     else:
         output(u"%s: Payer NOT updated" % uri)
         return EX_CONFLICT
 
80ad41ec
 def cmd_setlifecycle(args):
     s3 = S3(cfg)
     uri = S3Uri(args[1])
     lifecycle_policy_file = args[0]
8c13a108
     lifecycle_policy = open(deunicodise(lifecycle_policy_file), 'r').read()
80ad41ec
 
     if cfg.dry_run: return EX_OK
 
     response = s3.set_lifecycle_policy(uri, lifecycle_policy)
 
     debug(u"response - %s" % response['status'])
f5415b4f
     if response['status'] == 200:
80ad41ec
         output(u"%s: Lifecycle Policy updated" % uri)
     return EX_OK
 
ac1743bd
 def cmd_getlifecycle(args):
     s3 = S3(cfg)
     uri = S3Uri(args[0])
 
     response = s3.get_lifecycle_policy(uri)
 
     output(u"%s" % getPrettyFromXml(response['data']))
     return EX_OK
 
80ad41ec
 def cmd_dellifecycle(args):
     s3 = S3(cfg)
     uri = S3Uri(args[0])
     if cfg.dry_run: return EX_OK
 
     response = s3.delete_lifecycle_policy(uri)
 
     debug(u"response - %s" % response['status'])
     output(u"%s: Lifecycle Policy deleted" % uri)
     return EX_OK
 
c2354f7a
 def cmd_multipart(args):
     s3 = S3(cfg)
     uri = S3Uri(args[0])
dc071cc1
 
c2354f7a
     #id = ''
     #if(len(args) > 1): id = args[1]
 
     response = s3.get_multipart(uri)
     debug(u"response - %s" % response['status'])
     output(u"%s" % uri)
     tree = getTreeFromXml(response['data'])
     debug(parseNodes(tree))
dc071cc1
     output(u"Initiated\tPath\tId")
c2354f7a
     for mpupload in parseNodes(tree):
         try:
dc071cc1
             output("%s\t%s\t%s" % (mpupload['Initiated'], "s3://" + uri.bucket() + "/" + mpupload['Key'], mpupload['UploadId']))
         except KeyError:
c2354f7a
             pass
8f35b18a
     return EX_OK
c2354f7a
 
 def cmd_abort_multipart(args):
     '''{"cmd":"abortmp",   "label":"abort a multipart upload", "param":"s3://BUCKET Id", "func":cmd_abort_multipart, "argc":2},'''
     s3 = S3(cfg)
     uri = S3Uri(args[0])
     id = args[1]
     response = s3.abort_multipart(uri, id)
     debug(u"response - %s" % response['status'])
     output(u"%s" % uri)
8f35b18a
     return EX_OK
c2354f7a
 
e65e3f06
 def cmd_list_multipart(args):
     '''{"cmd":"abortmp",   "label":"list a multipart upload", "param":"s3://BUCKET Id", "func":cmd_list_multipart, "argc":2},'''
     s3 = S3(cfg)
     uri = S3Uri(args[0])
     id = args[1]
4d7039f4
 
e65e3f06
     response = s3.list_multipart(uri, id)
     debug(u"response - %s" % response['status'])
     tree = getTreeFromXml(response['data'])
     output(u"LastModified\t\t\tPartNumber\tETag\tSize")
     for mpupload in parseNodes(tree):
         try:
             output("%s\t%s\t%s\t%s" % (mpupload['LastModified'], mpupload['PartNumber'], mpupload['ETag'], mpupload['Size']))
         except:
             pass
8f35b18a
     return EX_OK
e65e3f06
 
cb0bbaef
 def cmd_accesslog(args):
d439efb4
     s3 = S3(cfg)
     bucket_uri = S3Uri(args.pop())
     if bucket_uri.object():
         raise ParameterError("Only bucket name is required for [accesslog] command")
     if cfg.log_target_prefix == False:
         accesslog, response = s3.set_accesslog(bucket_uri, enable = False)
     elif cfg.log_target_prefix:
         log_target_prefix_uri = S3Uri(cfg.log_target_prefix)
         if log_target_prefix_uri.type != "s3":
             raise ParameterError("--log-target-prefix must be a S3 URI")
         accesslog, response = s3.set_accesslog(bucket_uri, enable = True, log_target_prefix_uri = log_target_prefix_uri, acl_public = cfg.acl_public)
     else:   # cfg.log_target_prefix == None
         accesslog = s3.get_accesslog(bucket_uri)
 
     output(u"Access logging for: %s" % bucket_uri.uri())
     output(u"   Logging Enabled: %s" % accesslog.isLoggingEnabled())
     if accesslog.isLoggingEnabled():
         output(u"     Target prefix: %s" % accesslog.targetPrefix().uri())
         #output(u"   Public Access:   %s" % accesslog.isAclPublic())
8f35b18a
     return EX_OK
d439efb4
 
0b8ea559
 def cmd_sign(args):
d439efb4
     string_to_sign = args.pop()
     debug("string-to-sign: %r" % string_to_sign)
267a312e
     signature = Crypto.sign_string_v2(encode_to_s3(string_to_sign))
     output("Signature: %s" % decode_from_s3(signature))
8f35b18a
     return EX_OK
0b8ea559
 
ff6e561b
 def cmd_signurl(args):
136fb2d6
     expiry = args.pop()
     url_to_sign = S3Uri(args.pop())
     if url_to_sign.type != 's3':
         raise ParameterError("Must be S3Uri. Got: %s" % url_to_sign)
     debug("url to sign: %r" % url_to_sign)
b6228e9f
     signed_url = Crypto.sign_url_v2(url_to_sign, expiry)
136fb2d6
     output(signed_url)
8f35b18a
     return EX_OK
ff6e561b
 
3c07424d
 def cmd_fixbucket(args):
d439efb4
     def _unescape(text):
         ##
         # Removes HTML or XML character references and entities from a text string.
         #
         # @param text The HTML (or XML) source text.
         # @return The plain text, as a Unicode string, if necessary.
         #
         # From: http://effbot.org/zone/re-sub.htm#unescape-html
         def _unescape_fixup(m):
             text = m.group(0)
be9ec739
             if not 'apos' in htmlentitydefs.name2codepoint:
d439efb4
                 htmlentitydefs.name2codepoint['apos'] = ord("'")
             if text[:2] == "&#":
                 # character reference
                 try:
                     if text[:3] == "&#x":
                         return unichr(int(text[3:-1], 16))
                     else:
                         return unichr(int(text[2:-1]))
                 except ValueError:
                     pass
             else:
                 # named entity
                 try:
                     text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
                 except KeyError:
                     pass
             return text # leave as is
             text = text.encode('ascii', 'xmlcharrefreplace')
         return re.sub("&#?\w+;", _unescape_fixup, text)
 
     cfg.urlencoding_mode = "fixbucket"
     s3 = S3(cfg)
 
     count = 0
     for arg in args:
         culprit = S3Uri(arg)
         if culprit.type != "s3":
             raise ParameterError("Expecting S3Uri instead of: %s" % arg)
         response = s3.bucket_list_noparse(culprit.bucket(), culprit.object(), recursive = True)
         r_xent = re.compile("&#x[\da-fA-F]+;")
ca9880fa
         keys = re.findall("<Key>(.*?)</Key>", response['data'], re.MULTILINE | re.UNICODE)
d439efb4
         debug("Keys: %r" % keys)
         for key in keys:
             if r_xent.search(key):
                 info("Fixing: %s" % key)
                 debug("Step 1: Transforming %s" % key)
                 key_bin = _unescape(key)
                 debug("Step 2:       ... to %s" % key_bin)
                 key_new = replace_nonprintables(key_bin)
                 debug("Step 3:  ... then to %s" % key_new)
                 src = S3Uri("s3://%s/%s" % (culprit.bucket(), key_bin))
                 dst = S3Uri("s3://%s/%s" % (culprit.bucket(), key_new))
08732e61
                 if cfg.dry_run:
                     output("[--dry-run] File %r would be renamed to %s" % (key_bin, key_new))
                     continue
6e904f3a
                 try:
                     resp_move = s3.object_move(src, dst)
                     if resp_move['status'] == 200:
a4387cd2
                         output("File '%r' renamed to '%s'" % (key_bin, key_new))
6e904f3a
                         count += 1
                     else:
                         error("Something went wrong for: %r" % key)
                         error("Please report the problem to s3tools-bugs@lists.sourceforge.net")
                 except S3Error:
d439efb4
                     error("Something went wrong for: %r" % key)
                     error("Please report the problem to s3tools-bugs@lists.sourceforge.net")
6e904f3a
 
d439efb4
     if count > 0:
         warning("Fixed %d files' names. Their ACL were reset to Private." % count)
         warning("Use 's3cmd setacl --acl-public s3://...' to make")
         warning("them publicly readable if required.")
8f35b18a
     return EX_OK
3c07424d
 
8ec1807f
 def resolve_list(lst, args):
d439efb4
     retval = []
     for item in lst:
         retval.append(item % args)
     return retval
8ec1807f
 
 def gpg_command(command, passphrase = ""):
d439efb4
     debug("GPG command: " + " ".join(command))
8c13a108
     command = [deunicodise(cmd_entry) for cmd_entry in command]
0129be5c
     p = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT,
                          close_fds = True)
8c13a108
     p_stdout, p_stderr = p.communicate(deunicodise(passphrase) + "\n")
d439efb4
     debug("GPG output:")
     for line in p_stdout.split("\n"):
         debug("GPG: " + line)
     p_exitcode = p.wait()
     return p_exitcode
8ec1807f
 
 def gpg_encrypt(filename):
d439efb4
     tmp_filename = Utils.mktmpfile()
     args = {
         "gpg_command" : cfg.gpg_command,
         "passphrase_fd" : "0",
         "input_file" : filename,
         "output_file" : tmp_filename,
     }
ca9880fa
     info(u"Encrypting file %s to %s..." % (filename, tmp_filename))
d439efb4
     command = resolve_list(cfg.gpg_encrypt.split(" "), args)
     code = gpg_command(command, cfg.gpg_passphrase)
     return (code, tmp_filename, "gpg")
8ec1807f
 
49731b40
 def gpg_decrypt(filename, gpgenc_header = "", in_place = True):
d439efb4
     tmp_filename = Utils.mktmpfile(filename)
     args = {
         "gpg_command" : cfg.gpg_command,
         "passphrase_fd" : "0",
         "input_file" : filename,
         "output_file" : tmp_filename,
     }
ca9880fa
     info(u"Decrypting file %s to %s..." % (filename, tmp_filename))
d439efb4
     command = resolve_list(cfg.gpg_decrypt.split(" "), args)
     code = gpg_command(command, cfg.gpg_passphrase)
     if code == 0 and in_place:
ca9880fa
         debug(u"Renaming %s to %s" % (tmp_filename, filename))
8c13a108
         os.unlink(deunicodise(filename))
         os.rename(deunicodise(tmp_filename), deunicodise(filename))
d439efb4
         tmp_filename = filename
     return (code, tmp_filename)
8ec1807f
 
a2340ee7
 def run_configure(config_file, args):
d439efb4
     cfg = Config()
     options = [
200211ab
         ("access_key", "Access Key", "Access key and Secret key are your identifiers for Amazon S3. Leave them empty for using the env variables."),
d439efb4
         ("secret_key", "Secret Key"),
15a94e00
         ("bucket_location", "Default Region"),
7c1943e5
         ("host_base", "S3 Endpoint", "Use \"s3.amazonaws.com\" for S3 Endpoint and not modify it to the target Amazon S3."),
         ("host_bucket", "DNS-style bucket+hostname:port template for accessing a bucket", "Use \"%(bucket)s.s3.amazonaws.com\" to the target Amazon S3. \"%(bucket)s\" and \"%(location)s\" vars can be used\nif the target S3 system supports dns based buckets."),
d439efb4
         ("gpg_passphrase", "Encryption password", "Encryption password is used to protect your files from reading\nby unauthorized persons while in transfer to S3"),
         ("gpg_command", "Path to GPG program"),
4c825680
         ("use_https", "Use HTTPS protocol", "When using secure HTTPS protocol all communication with Amazon S3\nservers is protected from 3rd party eavesdropping. This method is\nslower than plain HTTP, and can only be proxied with Python 2.7 or newer"),
9423f537
         ("proxy_host", "HTTP Proxy server name", "On some networks all internet access must go through a HTTP proxy.\nTry setting it here if you can't connect to S3 directly"),
d439efb4
         ("proxy_port", "HTTP Proxy server port"),
         ]
     ## Option-specfic defaults
     if getattr(cfg, "gpg_command") == "":
         setattr(cfg, "gpg_command", find_executable("gpg"))
 
     if getattr(cfg, "proxy_host") == "" and os.getenv("http_proxy"):
         re_match=re.match("(http://)?([^:]+):(\d+)", os.getenv("http_proxy"))
         if re_match:
             setattr(cfg, "proxy_host", re_match.groups()[1])
             setattr(cfg, "proxy_port", re_match.groups()[2])
 
     try:
         while 1:
             output(u"\nEnter new values or accept defaults in brackets with Enter.")
             output(u"Refer to user manual for detailed description of all options.")
             for option in options:
                 prompt = option[1]
                 ## Option-specific handling
4c825680
                 if option[0] == 'proxy_host' and getattr(cfg, 'use_https') == True and sys.hexversion < 0x02070000:
d439efb4
                     setattr(cfg, option[0], "")
                     continue
                 if option[0] == 'proxy_port' and getattr(cfg, 'proxy_host') == "":
                     setattr(cfg, option[0], 0)
                     continue
 
                 try:
                     val = getattr(cfg, option[0])
                     if type(val) is bool:
                         val = val and "Yes" or "No"
                     if val not in (None, ""):
                         prompt += " [%s]" % val
                 except AttributeError:
                     pass
 
                 if len(option) >= 3:
                     output(u"\n%s" % option[2])
 
                 val = raw_input(prompt + ": ")
                 if val != "":
                     if type(getattr(cfg, option[0])) is bool:
                         # Turn 'Yes' into True, everything else into False
                         val = val.lower().startswith('y')
                     setattr(cfg, option[0], val)
             output(u"\nNew settings:")
             for option in options:
                 output(u"  %s: %s" % (option[1], getattr(cfg, option[0])))
             val = raw_input("\nTest access with supplied credentials? [Y/n] ")
             if val.lower().startswith("y") or val == "":
                 try:
a2340ee7
                     # Default, we try to list 'all' buckets which requires
                     # ListAllMyBuckets permission
                     if len(args) == 0:
                         output(u"Please wait, attempting to list all buckets...")
                         S3(Config()).bucket_list("", "")
                     else:
                         # If user specified a bucket name directly, we check it and only it.
                         # Thus, access check can succeed even if user only has access to
                         # to a single bucket and not ListAllMyBuckets permission.
                         output(u"Please wait, attempting to list bucket: " + args[0])
                         uri = S3Uri(args[0])
                         if uri.type == "s3" and uri.has_bucket():
                             S3(Config()).bucket_list(uri.bucket(), "")
                         else:
                             raise Exception(u"Invalid bucket uri: " + args[0])
 
d439efb4
                     output(u"Success. Your access key and secret key worked fine :-)")
 
                     output(u"\nNow verifying that encryption works...")
                     if not getattr(cfg, "gpg_command") or not getattr(cfg, "gpg_passphrase"):
                         output(u"Not configured. Never mind.")
                     else:
                         if not getattr(cfg, "gpg_command"):
                             raise Exception("Path to GPG program not set")
8c13a108
                         if not os.path.isfile(deunicodise(getattr(cfg, "gpg_command"))):
d439efb4
                             raise Exception("GPG program not found")
                         filename = Utils.mktmpfile()
8c13a108
                         f = open(deunicodise(filename), "w")
d439efb4
                         f.write(os.sys.copyright)
                         f.close()
                         ret_enc = gpg_encrypt(filename)
                         ret_dec = gpg_decrypt(ret_enc[1], ret_enc[2], False)
                         hash = [
                             Utils.hash_file_md5(filename),
                             Utils.hash_file_md5(ret_enc[1]),
                             Utils.hash_file_md5(ret_dec[1]),
                         ]
8c13a108
                         os.unlink(deunicodise(filename))
                         os.unlink(deunicodise(ret_enc[1]))
                         os.unlink(deunicodise(ret_dec[1]))
d439efb4
                         if hash[0] == hash[2] and hash[0] != hash[1]:
                             output ("Success. Encryption and decryption worked fine :-)")
                         else:
                             raise Exception("Encryption verification error.")
 
08732e61
                 except S3Error as e:
d439efb4
                     error(u"Test failed: %s" % (e))
ac785d12
                     if e.code == "AccessDenied":
a1a48f4c
                         error(u"Are you sure your keys have s3:ListAllMyBuckets permissions?")
d439efb4
                     val = raw_input("\nRetry configuration? [Y/n] ")
                     if val.lower().startswith("y") or val == "":
                         continue
08732e61
                 except Exception as e:
ac785d12
                     error(u"Test failed: %s" % (e))
                     val = raw_input("\nRetry configuration? [Y/n] ")
                     if val.lower().startswith("y") or val == "":
                         continue
d439efb4
 
 
             val = raw_input("\nSave settings? [y/N] ")
             if val.lower().startswith("y"):
                 break
             val = raw_input("Retry configuration? [Y/n] ")
             if val.lower().startswith("n"):
                 raise EOFError()
 
         ## Overwrite existing config file, make it user-readable only
08732e61
         old_mask = os.umask(0o077)
d439efb4
         try:
8c13a108
             os.remove(deunicodise(config_file))
08732e61
         except OSError as e:
d439efb4
             if e.errno != errno.ENOENT:
                 raise
         f = open(config_file, "w")
         os.umask(old_mask)
         cfg.dump_config(f)
         f.close()
         output(u"Configuration saved to '%s'" % config_file)
 
     except (EOFError, KeyboardInterrupt):
         output(u"\nConfiguration aborted. Changes were NOT saved.")
         return
 
08732e61
     except IOError as e:
d439efb4
         error(u"Writing config file failed: %s: %s" % (config_file, e.strerror))
8214d4f0
         sys.exit(EX_IOERR)
5a736f08
 
7484d6c8
 def process_patterns_from_file(fname, patterns_list):
d439efb4
     try:
8c13a108
         fn = open(deunicodise(fname), "rt")
08732e61
     except IOError as e:
d439efb4
         error(e)
8214d4f0
         sys.exit(EX_IOERR)
d439efb4
     for pattern in fn:
5151324e
         pattern = unicodise(pattern).strip()
d439efb4
         if re.match("^#", pattern) or re.match("^\s*$", pattern):
             continue
         debug(u"%s: adding rule: %s" % (fname, pattern))
         patterns_list.append(pattern)
 
     return patterns_list
7484d6c8
 
 def process_patterns(patterns_list, patterns_from, is_glob, option_txt = ""):
d439efb4
     """
     process_patterns(patterns, patterns_from, is_glob, option_txt = "")
     Process --exclude / --include GLOB and REGEXP patterns.
     'option_txt' is 'exclude' / 'include' / 'rexclude' / 'rinclude'
     Returns: patterns_compiled, patterns_text
     """
 
     patterns_compiled = []
     patterns_textual = {}
 
     if patterns_list is None:
         patterns_list = []
 
     if patterns_from:
         ## Append patterns from glob_from
         for fname in patterns_from:
             debug(u"processing --%s-from %s" % (option_txt, fname))
             patterns_list = process_patterns_from_file(fname, patterns_list)
 
     for pattern in patterns_list:
         debug(u"processing %s rule: %s" % (option_txt, patterns_list))
         if is_glob:
             pattern = glob.fnmatch.translate(pattern)
         r = re.compile(pattern)
         patterns_compiled.append(r)
         patterns_textual[r] = pattern
 
     return patterns_compiled, patterns_textual
2d7d5543
 
b3488bab
 def get_commands_list():
d439efb4
     return [
     {"cmd":"mb", "label":"Make bucket", "param":"s3://BUCKET", "func":cmd_bucket_create, "argc":1},
     {"cmd":"rb", "label":"Remove bucket", "param":"s3://BUCKET", "func":cmd_bucket_delete, "argc":1},
     {"cmd":"ls", "label":"List objects or buckets", "param":"[s3://BUCKET[/PREFIX]]", "func":cmd_ls, "argc":0},
2c4459ca
     {"cmd":"la", "label":"List all object in all buckets", "param":"", "func":cmd_all_buckets_list_all_content, "argc":0},
d439efb4
     {"cmd":"put", "label":"Put file into bucket", "param":"FILE [FILE...] s3://BUCKET[/PREFIX]", "func":cmd_object_put, "argc":2},
     {"cmd":"get", "label":"Get file from bucket", "param":"s3://BUCKET/OBJECT LOCAL_FILE", "func":cmd_object_get, "argc":1},
     {"cmd":"del", "label":"Delete file from bucket", "param":"s3://BUCKET/OBJECT", "func":cmd_object_del, "argc":1},
7268831a
     {"cmd":"rm", "label":"Delete file from bucket (alias for del)", "param":"s3://BUCKET/OBJECT", "func":cmd_object_del, "argc":1},
d439efb4
     #{"cmd":"mkdir", "label":"Make a virtual S3 directory", "param":"s3://BUCKET/path/to/dir", "func":cmd_mkdir, "argc":1},
40deabb4
     {"cmd":"restore", "label":"Restore file from Glacier storage", "param":"s3://BUCKET/OBJECT", "func":cmd_object_restore, "argc":1},
23ace818
     {"cmd":"sync", "label":"Synchronize a directory tree to S3 (checks files freshness using size and md5 checksum, unless overridden by options, see below)", "param":"LOCAL_DIR s3://BUCKET[/PREFIX] or s3://BUCKET[/PREFIX] LOCAL_DIR", "func":cmd_sync, "argc":2},
d439efb4
     {"cmd":"du", "label":"Disk usage by buckets", "param":"[s3://BUCKET[/PREFIX]]", "func":cmd_du, "argc":0},
     {"cmd":"info", "label":"Get various information about Buckets or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_info, "argc":1},
     {"cmd":"cp", "label":"Copy object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_cp, "argc":2},
5b2539b0
     {"cmd":"modify", "label":"Modify object metadata", "param":"s3://BUCKET1/OBJECT", "func":cmd_modify, "argc":1},
d439efb4
     {"cmd":"mv", "label":"Move object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_mv, "argc":2},
     {"cmd":"setacl", "label":"Modify Access control list for Bucket or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_setacl, "argc":1},
e1c07294
 
     {"cmd":"setpolicy", "label":"Modify Bucket Policy", "param":"FILE s3://BUCKET", "func":cmd_setpolicy, "argc":2},
     {"cmd":"delpolicy", "label":"Delete Bucket Policy", "param":"s3://BUCKET", "func":cmd_delpolicy, "argc":1},
dceb754f
     {"cmd":"setcors", "label":"Modify Bucket CORS", "param":"FILE s3://BUCKET", "func":cmd_setcors, "argc":2},
     {"cmd":"delcors", "label":"Delete Bucket CORS", "param":"s3://BUCKET", "func":cmd_delcors, "argc":1},
 
3d00b475
     {"cmd":"payer",     "label":"Modify Bucket Requester Pays policy", "param":"s3://BUCKET", "func":cmd_set_payer, "argc":1},
a97f3baa
     {"cmd":"multipart", "label":"Show multipart uploads", "param":"s3://BUCKET [Id]", "func":cmd_multipart, "argc":1},
     {"cmd":"abortmp",   "label":"Abort a multipart upload", "param":"s3://BUCKET/OBJECT Id", "func":cmd_abort_multipart, "argc":2},
9cb307f6
 
a97f3baa
     {"cmd":"listmp",    "label":"List parts of a multipart upload", "param":"s3://BUCKET/OBJECT Id", "func":cmd_list_multipart, "argc":2},
c2354f7a
 
d439efb4
     {"cmd":"accesslog", "label":"Enable/disable bucket access logging", "param":"s3://BUCKET", "func":cmd_accesslog, "argc":1},
     {"cmd":"sign", "label":"Sign arbitrary string using the secret key", "param":"STRING-TO-SIGN", "func":cmd_sign, "argc":1},
496c8d8e
     {"cmd":"signurl", "label":"Sign an S3 URL to provide limited public access with expiry", "param":"s3://BUCKET/OBJECT <expiry_epoch|+expiry_offset>", "func":cmd_signurl, "argc":2},
d439efb4
     {"cmd":"fixbucket", "label":"Fix invalid file names in a bucket", "param":"s3://BUCKET[/PREFIX]", "func":cmd_fixbucket, "argc":1},
 
     ## Website commands
     {"cmd":"ws-create", "label":"Create Website from bucket", "param":"s3://BUCKET", "func":cmd_website_create, "argc":1},
     {"cmd":"ws-delete", "label":"Delete Website", "param":"s3://BUCKET", "func":cmd_website_delete, "argc":1},
     {"cmd":"ws-info", "label":"Info about Website", "param":"s3://BUCKET", "func":cmd_website_info, "argc":1},
 
2f39a8d3
     ## Lifecycle commands
     {"cmd":"expire", "label":"Set or delete expiration rule for the bucket", "param":"s3://BUCKET", "func":cmd_expiration_set, "argc":1},
f5415b4f
     {"cmd":"setlifecycle", "label":"Upload a lifecycle policy for the bucket", "param":"FILE s3://BUCKET", "func":cmd_setlifecycle, "argc":2},
ac1743bd
     {"cmd":"getlifecycle", "label":"Get a lifecycle policy for the bucket",    "param":"s3://BUCKET", "func":cmd_getlifecycle, "argc":1},
80ad41ec
     {"cmd":"dellifecycle", "label":"Remove a lifecycle policy for the bucket", "param":"s3://BUCKET", "func":cmd_dellifecycle, "argc":1},
2f39a8d3
 
d439efb4
     ## CloudFront commands
     {"cmd":"cflist", "label":"List CloudFront distribution points", "param":"", "func":CfCmd.info, "argc":0},
     {"cmd":"cfinfo", "label":"Display CloudFront distribution point parameters", "param":"[cf://DIST_ID]", "func":CfCmd.info, "argc":0},
     {"cmd":"cfcreate", "label":"Create CloudFront distribution point", "param":"s3://BUCKET", "func":CfCmd.create, "argc":1},
     {"cmd":"cfdelete", "label":"Delete CloudFront distribution point", "param":"cf://DIST_ID", "func":CfCmd.delete, "argc":1},
     {"cmd":"cfmodify", "label":"Change CloudFront distribution point parameters", "param":"cf://DIST_ID", "func":CfCmd.modify, "argc":1},
     #{"cmd":"cfinval", "label":"Invalidate CloudFront objects", "param":"s3://BUCKET/OBJECT [s3://BUCKET/OBJECT ...]", "func":CfCmd.invalidate, "argc":1},
     {"cmd":"cfinvalinfo", "label":"Display CloudFront invalidation request(s) status", "param":"cf://DIST_ID[/INVAL_ID]", "func":CfCmd.invalinfo, "argc":1},
     ]
3cc025ae
 
ccb78539
 def format_commands(progname, commands_list):
d439efb4
     help = "Commands:\n"
     for cmd in commands_list:
         help += "  %s\n      %s %s %s\n" % (cmd["label"], progname, cmd["cmd"], cmd["param"])
     return help
f4555c39
 
992b7daf
 
 def update_acl(s3, uri, seq_label=""):
     something_changed = False
     acl = s3.get_acl(uri)
     debug(u"acl: %s - %r" % (uri, acl.grantees))
     if cfg.acl_public == True:
         if acl.isAnonRead():
             info(u"%s: already Public, skipping %s" % (uri, seq_label))
         else:
             acl.grantAnonRead()
             something_changed = True
     elif cfg.acl_public == False:  # we explicitely check for False, because it could be None
         if not acl.isAnonRead():
             info(u"%s: already Private, skipping %s" % (uri, seq_label))
         else:
             acl.revokeAnonRead()
             something_changed = True
 
     # update acl with arguments
     # grant first and revoke later, because revoke has priority
     if cfg.acl_grants:
         something_changed = True
         for grant in cfg.acl_grants:
             acl.grant(**grant)
 
     if cfg.acl_revokes:
         something_changed = True
         for revoke in cfg.acl_revokes:
             acl.revoke(**revoke)
 
     if not something_changed:
         return
 
     retsponse = s3.set_acl(uri, acl)
     if retsponse['status'] == 200:
         if cfg.acl_public in (True, False):
279c608f
             set_to_acl = cfg.acl_public and "Public" or "Private"
992b7daf
             output(u"%s: ACL set to %s  %s" % (uri, set_to_acl, seq_label))
         else:
             output(u"%s: ACL updated" % uri)
 
9b7618ae
 class OptionMimeType(Option):
d439efb4
     def check_mimetype(option, opt, value):
615eed45
         if re.compile("^[a-z0-9]+/[a-z0-9+\.-]+(;.*)?$", re.IGNORECASE).match(value):
d439efb4
             return value
         raise OptionValueError("option %s: invalid MIME-Type format: %r" % (opt, value))
9b7618ae
 
4f11bf57
 class OptionS3ACL(Option):
d439efb4
     def check_s3acl(option, opt, value):
         permissions = ('read', 'write', 'read_acp', 'write_acp', 'full_control', 'all')
         try:
             permission, grantee = re.compile("^(\w+):(.+)$", re.IGNORECASE).match(value).groups()
             if not permission or not grantee:
                 raise
             if permission in permissions:
                 return { 'name' : grantee, 'permission' : permission.upper() }
             else:
                 raise OptionValueError("option %s: invalid S3 ACL permission: %s (valid values: %s)" %
                     (opt, permission, ", ".join(permissions)))
         except:
             raise OptionValueError("option %s: invalid S3 ACL format: %r" % (opt, value))
4f11bf57
 
 class OptionAll(OptionMimeType, OptionS3ACL):
d439efb4
     TYPE_CHECKER = copy(Option.TYPE_CHECKER)
     TYPE_CHECKER["mimetype"] = OptionMimeType.check_mimetype
     TYPE_CHECKER["s3acl"] = OptionS3ACL.check_s3acl
     TYPES = Option.TYPES + ("mimetype", "s3acl")
9b7618ae
 
f4555c39
 class MyHelpFormatter(IndentedHelpFormatter):
d439efb4
     def format_epilog(self, epilog):
         if epilog:
             return "\n" + epilog + "\n"
         else:
             return ""
f4555c39
 
4a52baa8
 def main():
d439efb4
     global cfg
84168b2b
     cfg = Config()
d439efb4
     commands_list = get_commands_list()
     commands = {}
 
     ## Populate "commands" from "commands_list"
     for cmd in commands_list:
be9ec739
         if 'cmd' in cmd:
             commands[cmd['cmd']] = cmd
d439efb4
 
     optparser = OptionParser(option_class=OptionAll, formatter=MyHelpFormatter())
     #optparser.disable_interspersed_args()
 
     config_file = None
2c9cbcf2
     if os.getenv("S3CMD_CONFIG"):
         config_file = os.getenv("S3CMD_CONFIG")
d439efb4
     elif os.name == "nt" and os.getenv("USERPROFILE"):
58431bc5
         config_file = os.path.join(os.getenv("USERPROFILE").decode('mbcs'), os.getenv("APPDATA").decode('mbcs') or 'Application Data', "s3cmd.ini")
c65c7df7
     else:
         from os.path import expanduser
         config_file = os.path.join(expanduser("~"), ".s3cfg")
d439efb4
 
f5dbc768
     autodetected_encoding = locale.getpreferredencoding() or "UTF-8"
d439efb4
 
     optparser.set_defaults(config = config_file)
 
19ae946e
     optparser.add_option(      "--configure", dest="run_configure", action="store_true", help="Invoke interactive (re)configuration tool. Optionally use as '--configure s3://some-bucket' to test access to a specific bucket instead of attempting to list them all.")
8c75d521
     optparser.add_option("-c", "--config", dest="config", metavar="FILE", help="Config file name. Defaults to $HOME/.s3cfg")
d439efb4
     optparser.add_option(      "--dump-config", dest="dump_config", action="store_true", help="Dump current configuration after parsing config files and command line options and exit.")
5b432a5b
     optparser.add_option(      "--access_key", dest="access_key", help="AWS Access Key")
     optparser.add_option(      "--secret_key", dest="secret_key", help="AWS Secret Key")
37d52a0a
     optparser.add_option(      "--access_token", dest="access_token", help="AWS Access Token")
d439efb4
 
     optparser.add_option("-n", "--dry-run", dest="dry_run", action="store_true", help="Only show what should be uploaded or downloaded but don't actually do it. May still perform S3 requests to get bucket listings and other information though (only for file transfer commands)")
 
b5dc9b76
     optparser.add_option("-s", "--ssl", dest="use_https", action="store_true", help="Use HTTPS connection when communicating with S3. (default)")
     optparser.add_option(      "--no-ssl", dest="use_https", action="store_false", help="Don't use HTTPS.")
d439efb4
     optparser.add_option("-e", "--encrypt", dest="encrypt", action="store_true", help="Encrypt files before uploading to S3.")
     optparser.add_option(      "--no-encrypt", dest="encrypt", action="store_false", help="Don't encrypt files.")
     optparser.add_option("-f", "--force", dest="force", action="store_true", help="Force overwrite and other dangerous operations.")
     optparser.add_option(      "--continue", dest="get_continue", action="store_true", help="Continue getting a partially downloaded file (only for [get] command).")
dc071cc1
     optparser.add_option(      "--continue-put", dest="put_continue", action="store_true", help="Continue uploading partially uploaded files or multipart upload parts.  Restarts/parts files that don't have matching size and md5.  Skips files/parts that do.  Note: md5sum checks are not always sufficient to check (part) file equality.  Enable this at your own risk.")
     optparser.add_option(      "--upload-id", dest="upload_id", help="UploadId for Multipart Upload, in case you want continue an existing upload (equivalent to --continue-put) and there are multiple partial uploads.  Use s3cmd multipart [URI] to see what UploadIds are associated with the given URI.")
d439efb4
     optparser.add_option(      "--skip-existing", dest="skip_existing", action="store_true", help="Skip over files that exist at the destination (only for [get] and [sync] commands).")
     optparser.add_option("-r", "--recursive", dest="recursive", action="store_true", help="Recursive upload, download or removal.")
     optparser.add_option(      "--check-md5", dest="check_md5", action="store_true", help="Check MD5 sums when comparing files for [sync]. (default)")
     optparser.add_option(      "--no-check-md5", dest="check_md5", action="store_false", help="Do not check MD5 sums when comparing files for [sync]. Only size will be compared. May significantly speed up transfer but may also miss some changed files.")
     optparser.add_option("-P", "--acl-public", dest="acl_public", action="store_true", help="Store objects with ACL allowing read for anyone.")
     optparser.add_option(      "--acl-private", dest="acl_public", action="store_false", help="Store objects with default ACL allowing access for you only.")
     optparser.add_option(      "--acl-grant", dest="acl_grants", type="s3acl", action="append", metavar="PERMISSION:EMAIL or USER_CANONICAL_ID", help="Grant stated permission to a given amazon user. Permission is one of: read, write, read_acp, write_acp, full_control, all")
     optparser.add_option(      "--acl-revoke", dest="acl_revokes", type="s3acl", action="append", metavar="PERMISSION:USER_CANONICAL_ID", help="Revoke stated permission for a given amazon user. Permission is one of: read, write, read_acp, wr     ite_acp, full_control, all")
 
27d294f3
     optparser.add_option("-D", "--restore-days", dest="restore_days", action="store", help="Number of days to keep restored file available (only for 'restore' command).", metavar="NUM")
     optparser.add_option(      "--restore-priority", dest="restore_priority", action="store", choices=['standard', 'expedited', 'bulk'], help="Priority for restoring files from S3 Glacier (only for 'restore' command). Choices available: bulk, standard, expedited")
40deabb4
 
15739e51
     optparser.add_option(      "--delete-removed", dest="delete_removed", action="store_true", help="Delete destination objects with no corresponding source file [sync]")
     optparser.add_option(      "--no-delete-removed", dest="delete_removed", action="store_false", help="Don't delete destination objects.")
d5dc2c0f
     optparser.add_option(      "--delete-after", dest="delete_after", action="store_true", help="Perform deletes after new uploads [sync]")
a3387558
     optparser.add_option(      "--delay-updates", dest="delay_updates", action="store_true", help="*OBSOLETE* Put all updated files into place at end [sync]")  # OBSOLETE
f230f799
     optparser.add_option(      "--max-delete", dest="max_delete", action="store", help="Do not delete more than NUM files. [del] and [sync]", metavar="NUM")
76b5ea90
     optparser.add_option(      "--limit", dest="limit", action="store", help="Limit number of objects returned in the response body (only for [ls] and [la] commands)", metavar="NUM")
07c9e2de
     optparser.add_option(      "--add-destination", dest="additional_destinations", action="append", help="Additional destination for parallel uploads, in addition to last arg.  May be repeated.")
552df705
     optparser.add_option(      "--delete-after-fetch", dest="delete_after_fetch", action="store_true", help="Delete remote objects after fetching to local file (only for [get] and [sync] commands).")
d439efb4
     optparser.add_option("-p", "--preserve", dest="preserve_attrs", action="store_true", help="Preserve filesystem attributes (mode, ownership, timestamps). Default for [sync] command.")
     optparser.add_option(      "--no-preserve", dest="preserve_attrs", action="store_false", help="Don't store FS attributes")
     optparser.add_option(      "--exclude", dest="exclude", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be excluded from sync")
     optparser.add_option(      "--exclude-from", dest="exclude_from", action="append", metavar="FILE", help="Read --exclude GLOBs from FILE")
     optparser.add_option(      "--rexclude", dest="rexclude", action="append", metavar="REGEXP", help="Filenames and paths matching REGEXP (regular expression) will be excluded from sync")
     optparser.add_option(      "--rexclude-from", dest="rexclude_from", action="append", metavar="FILE", help="Read --rexclude REGEXPs from FILE")
     optparser.add_option(      "--include", dest="include", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be included even if previously excluded by one of --(r)exclude(-from) patterns")
     optparser.add_option(      "--include-from", dest="include_from", action="append", metavar="FILE", help="Read --include GLOBs from FILE")
     optparser.add_option(      "--rinclude", dest="rinclude", action="append", metavar="REGEXP", help="Same as --include but uses REGEXP (regular expression) instead of GLOB")
     optparser.add_option(      "--rinclude-from", dest="rinclude_from", action="append", metavar="FILE", help="Read --rinclude REGEXPs from FILE")
 
b76c5b38
     optparser.add_option(      "--files-from", dest="files_from", action="append", metavar="FILE", help="Read list of source-file names from FILE. Use - to read from stdin.")
4a6ffc5d
     optparser.add_option(      "--region", "--bucket-location", metavar="REGION", dest="bucket_location", help="Region to create bucket in. As of now the regions are: us-east-1, us-west-1, us-west-2, eu-west-1, eu-central-1, ap-northeast-1, ap-southeast-1, ap-southeast-2, sa-east-1")
84168b2b
     optparser.add_option(      "--host", metavar="HOSTNAME", dest="host_base", help="HOSTNAME:PORT for S3 endpoint (default: %s, alternatives such as s3-eu-west-1.amazonaws.com). You should also set --host-bucket." % (cfg.host_base))
     optparser.add_option(      "--host-bucket", dest="host_bucket", help="DNS-style bucket+hostname:port template for accessing a bucket (default: %s)" % (cfg.host_bucket))
d439efb4
     optparser.add_option(      "--reduced-redundancy", "--rr", dest="reduced_redundancy", action="store_true", help="Store object with 'Reduced redundancy'. Lower per-GB price. [put, cp, mv]")
2398c792
     optparser.add_option(      "--no-reduced-redundancy", "--no-rr", dest="reduced_redundancy", action="store_false", help="Store object without 'Reduced redundancy'. Higher per-GB price. [put, cp, mv]")
efd41714
     optparser.add_option(      "--storage-class", dest="storage_class", action="store", metavar="CLASS", help="Store object with specified CLASS (STANDARD, STANDARD_IA, or REDUCED_REDUNDANCY). Lower per-GB price. [put, cp, mv]")
d439efb4
     optparser.add_option(      "--access-logging-target-prefix", dest="log_target_prefix", help="Target prefix for access logs (S3 URI) (for [cfmodify] and [accesslog] commands)")
     optparser.add_option(      "--no-access-logging", dest="log_target_prefix", action="store_false", help="Disable access logging (for [cfmodify] and [accesslog] commands)")
 
fdfb1a0c
     optparser.add_option(      "--default-mime-type", dest="default_mime_type", type="mimetype", action="store", help="Default MIME-type for stored objects. Application default is binary/octet-stream.")
69bee45f
     optparser.add_option("-M", "--guess-mime-type", dest="guess_mime_type", action="store_true", help="Guess MIME-type of files by their extension or mime magic. Fall back to default MIME-Type as specified by --default-mime-type option")
35612e61
     optparser.add_option(      "--no-guess-mime-type", dest="guess_mime_type", action="store_false", help="Don't guess MIME-type and use the default type instead.")
b4207d9c
     optparser.add_option(      "--no-mime-magic", dest="use_mime_magic", action="store_false", help="Don't use mime magic when guessing MIME-type.")
35612e61
     optparser.add_option("-m", "--mime-type", dest="mime_type", type="mimetype", metavar="MIME/TYPE", help="Force MIME-type. Override both --default-mime-type and --guess-mime-type.")
d439efb4
 
a97e290c
     optparser.add_option(      "--add-header", dest="add_header", action="append", metavar="NAME:VALUE", help="Add a given HTTP header to the upload request. Can be used multiple times. For instance set 'Expires' or 'Cache-Control' headers (or both) using this option.")
bb636cd1
     optparser.add_option(      "--remove-header", dest="remove_headers", action="append", metavar="NAME", help="Remove a given HTTP header.  Can be used multiple times.  For instance, remove 'Expires' or 'Cache-Control' headers (or both) using this option. [modify]")
d439efb4
 
bb636cd1
     optparser.add_option(      "--server-side-encryption", dest="server_side_encryption", action="store_true", help="Specifies that server-side encryption will be used when putting objects. [put, sync, cp, modify]")
d140f2b1
     optparser.add_option(      "--server-side-encryption-kms-id", dest="kms_key", action="store", help="Specifies the key id used for server-side encryption with AWS KMS-Managed Keys (SSE-KMS) when putting objects. [put, sync, cp, modify]")
754f575d
 
f5dbc768
     optparser.add_option(      "--encoding", dest="encoding", metavar="ENCODING", help="Override autodetected terminal and filesystem encoding (character set). Autodetected: %s" % autodetected_encoding)
833f07bb
     optparser.add_option(      "--add-encoding-exts", dest="add_encoding_exts", metavar="EXTENSIONs", help="Add encoding to these comma delimited extensions i.e. (css,js,html) when uploading to S3 )")
d439efb4
     optparser.add_option(      "--verbatim", dest="urlencoding_mode", action="store_const", const="verbatim", help="Use the S3 name as given on the command line. No pre-processing, encoding, etc. Use with caution!")
 
589be07b
     optparser.add_option(      "--disable-multipart", dest="enable_multipart", action="store_false", help="Disable multipart upload on files bigger than --multipart-chunk-size-mb")
a97e290c
     optparser.add_option(      "--multipart-chunk-size-mb", dest="multipart_chunk_size_mb", type="int", action="store", metavar="SIZE", help="Size of each chunk of a multipart upload. Files bigger than SIZE are automatically uploaded as multithreaded-multipart, smaller files are uploaded using the traditional method. SIZE is in Mega-Bytes, default chunk size is 15MB, minimum allowed chunk size is 5MB, maximum is 5GB.")
880e0dec
 
d439efb4
     optparser.add_option(      "--list-md5", dest="list_md5", action="store_true", help="Include MD5 sums in bucket listings (only for 'ls' command).")
     optparser.add_option("-H", "--human-readable-sizes", dest="human_readable_sizes", action="store_true", help="Print sizes in human readable form (eg 1kB instead of 1234).")
 
867ed9ba
     optparser.add_option(      "--ws-index", dest="website_index", action="store", help="Name of index-document (only for [ws-create] command)")
     optparser.add_option(      "--ws-error", dest="website_error", action="store", help="Name of error-document (only for [ws-create] command)")
d439efb4
 
a97e290c
     optparser.add_option(      "--expiry-date", dest="expiry_date", action="store", help="Indicates when the expiration rule takes effect. (only for [expire] command)")
     optparser.add_option(      "--expiry-days", dest="expiry_days", action="store", help="Indicates the number of days after object creation the expiration rule takes effect. (only for [expire] command)")
2f39a8d3
     optparser.add_option(      "--expiry-prefix", dest="expiry_prefix", action="store", help="Identifying one or more objects with the prefix to which the expiration rule applies. (only for [expire] command)")
 
d439efb4
     optparser.add_option(      "--progress", dest="progress_meter", action="store_true", help="Display progress meter (default on TTY).")
     optparser.add_option(      "--no-progress", dest="progress_meter", action="store_false", help="Don't display progress meter (default on non-TTY).")
be48baf2
     optparser.add_option(      "--stats", dest="stats", action="store_true", help="Give some file-transfer stats.")
d439efb4
     optparser.add_option(      "--enable", dest="enable", action="store_true", help="Enable given CloudFront distribution (only for [cfmodify] command)")
27d294f3
     optparser.add_option(      "--disable", dest="enable", action="store_false", help="Disable given CloudFront distribution (only for [cfmodify] command)")
d439efb4
     optparser.add_option(      "--cf-invalidate", dest="invalidate_on_cf", action="store_true", help="Invalidate the uploaded filed in CloudFront. Also see [cfinval] command.")
c0a81434
     # joseprio: adding options to invalidate the default index and the default
     # index root
dc00a200
     optparser.add_option(      "--cf-invalidate-default-index", dest="invalidate_default_index_on_cf", action="store_true", help="When using Custom Origin and S3 static website, invalidate the default index file.")
     optparser.add_option(      "--cf-no-invalidate-default-index-root", dest="invalidate_default_index_root_on_cf", action="store_false", help="When using Custom Origin and S3 static website, don't invalidate the path to the default index file.")
d439efb4
     optparser.add_option(      "--cf-add-cname", dest="cf_cnames_add", action="append", metavar="CNAME", help="Add given CNAME to a CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
     optparser.add_option(      "--cf-remove-cname", dest="cf_cnames_remove", action="append", metavar="CNAME", help="Remove given CNAME from a CloudFront distribution (only for [cfmodify] command)")
     optparser.add_option(      "--cf-comment", dest="cf_comment", action="store", metavar="COMMENT", help="Set COMMENT for a given CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
     optparser.add_option(      "--cf-default-root-object", dest="cf_default_root_object", action="store", metavar="DEFAULT_ROOT_OBJECT", help="Set the default root object to return when no object is specified in the URL. Use a relative path, i.e. default/index.html instead of /default/index.html or s3://bucket/default/index.html (only for [cfcreate] and [cfmodify] commands)")
     optparser.add_option("-v", "--verbose", dest="verbosity", action="store_const", const=logging.INFO, help="Enable verbose output.")
     optparser.add_option("-d", "--debug", dest="verbosity", action="store_const", const=logging.DEBUG, help="Enable debug output.")
     optparser.add_option(      "--version", dest="show_version", action="store_true", help="Show s3cmd version (%s) and exit." % (PkgInfo.version))
     optparser.add_option("-F", "--follow-symlinks", dest="follow_symlinks", action="store_true", default=False, help="Follow symbolic links as if they are regular files")
488c9565
     optparser.add_option(      "--cache-file", dest="cache_file", action="store", default="",  metavar="FILE", help="Cache FILE containing local source MD5 values")
fd5b2b48
     optparser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False, help="Silence output on stdout")
d5d8424c
     optparser.add_option(      "--ca-certs", dest="ca_certs_file", action="store", default=None, help="Path to SSL CA certificate FILE (instead of system default)")
     optparser.add_option(      "--check-certificate", dest="check_ssl_certificate", action="store_true", help="Check SSL certificate validity")
6da15859
     optparser.add_option(      "--no-check-certificate", dest="check_ssl_certificate", action="store_false", help="Do not check SSL certificate validity")
2e8254ae
     optparser.add_option(      "--check-hostname", dest="check_ssl_hostname", action="store_true", help="Check SSL certificate hostname validity")
     optparser.add_option(      "--no-check-hostname", dest="check_ssl_hostname", action="store_false", help="Do not check SSL certificate hostname validity")
d5d8424c
     optparser.add_option(      "--signature-v2", dest="signature_v2", action="store_true", help="Use AWS Signature version 2 instead of newer signature methods. Helpful for S3-like systems that don't have AWS Signature v4 yet.")
     optparser.add_option(      "--limit-rate", dest="limitrate", action="store", type="string", help="Limit the upload or download speed to amount bytes per second.  Amount may be expressed in bytes, kilobytes with the k suffix, or megabytes with the m suffix")
     optparser.add_option(      "--requester-pays", dest="requester_pays", action="store_true", help="Set the REQUESTER PAYS flag for operations")
     optparser.add_option("-l", "--long-listing", dest="long_listing", action="store_true", help="Produce long listing [ls]")
aa320b12
     optparser.add_option(      "--stop-on-error", dest="stop_on_error", action="store_true", help="stop if error in transfer")
f2fc0daa
     optparser.add_option(      "--content-disposition", dest="content_disposition", action="store", help="Provide a Content-Disposition for signed URLs, e.g., \"inline; filename=myvideo.mp4\"")
     optparser.add_option(      "--content-type", dest="content_type", action="store", help="Provide a Content-Type for signed URLs, e.g., \"video/mp4\"")
d439efb4
 
     optparser.set_usage(optparser.usage + " COMMAND [parameters]")
     optparser.set_description('S3cmd is a tool for managing objects in '+
         'Amazon S3 storage. It allows for making and removing '+
         '"buckets" and uploading, downloading and removing '+
         '"objects" from these buckets.')
     optparser.epilog = format_commands(optparser.get_prog_name(), commands_list)
a97e290c
     optparser.epilog += ("\nFor more information, updates and news, visit the s3cmd website:\n%s\n" % PkgInfo.url)
d439efb4
 
     (options, args) = optparser.parse_args()
 
     ## Some mucking with logging levels to enable
     ## debugging/verbose output for config file parser on request
8c568ece
     logging.basicConfig(level=options.verbosity or Config().verbosity,
d439efb4
                         format='%(levelname)s: %(message)s',
                         stream = sys.stderr)
 
     if options.show_version:
         output(u"s3cmd version %s" % PkgInfo.version)
8214d4f0
         sys.exit(EX_OK)
c5668564
     debug(u"s3cmd version %s" % PkgInfo.version)
d439efb4
 
2510ec16
     # TODO: PY3 warning, to remove when it will be stable
     if sys.version_info >= (3,0):
         warning("!!!!!!! Support for python3 is currently in a 'Work In Progress' state.\nPlease don't use s3cmd with python3 on production tasks or with sensitive data as unexpected behaviors could occur !!!!!!!")
 
fd5b2b48
     if options.quiet:
990b3bd1
         try:
             f = open("/dev/null", "w")
             sys.stdout = f
         except IOError:
12acd8a4
             warning(u"Unable to open /dev/null: --quiet disabled.")
fd5b2b48
 
d439efb4
     ## Now finally parse the config file
     if not options.config:
         error(u"Can't find a config file. Please use --config option.")
8214d4f0
         sys.exit(EX_CONFIG)
d439efb4
 
     try:
37d52a0a
         cfg = Config(options.config, options.access_key, options.secret_key, options.access_token)
08732e61
     except IOError as e:
d439efb4
         if options.run_configure:
             cfg = Config()
         else:
             error(u"%s: %s"  % (options.config, e.strerror))
             error(u"Configuration file not available.")
             error(u"Consider using --configure parameter to create one.")
8214d4f0
             sys.exit(EX_CONFIG)
d439efb4
 
8c568ece
     # allow commandline verbosity config to override config file
     if options.verbosity is not None:
d439efb4
         cfg.verbosity = options.verbosity
     logging.root.setLevel(cfg.verbosity)
     ## Unsupported features on Win32 platform
     if os.name == "nt":
         if cfg.preserve_attrs:
             error(u"Option --preserve is not yet supported on MS Windows platform. Assuming --no-preserve.")
             cfg.preserve_attrs = False
         if cfg.progress_meter:
             error(u"Option --progress is not yet supported on MS Windows platform. Assuming --no-progress.")
             cfg.progress_meter = False
 
     ## Pre-process --add-header's and put them to Config.extra_headers SortedDict()
     if options.add_header:
         for hdr in options.add_header:
             try:
                 key, val = hdr.split(":", 1)
             except ValueError:
                 raise ParameterError("Invalid header format: %s" % hdr)
             key_inval = re.sub("[a-zA-Z0-9-.]", "", key)
             if key_inval:
                 key_inval = key_inval.replace(" ", "<space>")
                 key_inval = key_inval.replace("\t", "<tab>")
                 raise ParameterError("Invalid character(s) in header name '%s': \"%s\"" % (key, key_inval))
5d702a56
             debug(u"Updating Config.Config extra_headers[%s] -> %s" % (key.strip().lower(), val.strip()))
e3bb25e4
             cfg.extra_headers[key.strip().lower()] = val.strip()
d439efb4
 
bb636cd1
     # Process --remove-header
     if options.remove_headers:
         cfg.remove_headers = options.remove_headers
 
d439efb4
     ## --acl-grant/--acl-revoke arguments are pre-parsed by OptionS3ACL()
     if options.acl_grants:
         for grant in options.acl_grants:
             cfg.acl_grants.append(grant)
 
     if options.acl_revokes:
         for grant in options.acl_revokes:
             cfg.acl_revokes.append(grant)
 
     ## Process --(no-)check-md5
     if options.check_md5 == False:
         try:
             cfg.sync_checks.remove("md5")
c7b91705
             cfg.preserve_attrs_list.remove("md5")
6351bcde
         except Exception:
d439efb4
             pass
c7b91705
     if options.check_md5 == True:
         if cfg.sync_checks.count("md5") == 0:
             cfg.sync_checks.append("md5")
         if cfg.preserve_attrs_list.count("md5") == 0:
             cfg.preserve_attrs_list.append("md5")
d439efb4
 
     ## Update Config with other parameters
     for option in cfg.option_list():
         try:
             if getattr(options, option) != None:
                 debug(u"Updating Config.Config %s -> %s" % (option, getattr(options, option)))
                 cfg.update_option(option, getattr(options, option))
         except AttributeError:
             ## Some Config() options are not settable from command line
             pass
 
     ## Special handling for tri-state options (True, False, None)
     cfg.update_option("enable", options.enable)
bc38c2a8
     if options.acl_public is not None:
         cfg.update_option("acl_public", options.acl_public)
d439efb4
 
9dda31d0
     ## Check multipart chunk constraints
80310166
     if cfg.multipart_chunk_size_mb < MultiPartUpload.MIN_CHUNK_SIZE_MB:
         raise ParameterError("Chunk size %d MB is too small, must be >= %d MB. Please adjust --multipart-chunk-size-mb" % (cfg.multipart_chunk_size_mb, MultiPartUpload.MIN_CHUNK_SIZE_MB))
     if cfg.multipart_chunk_size_mb > MultiPartUpload.MAX_CHUNK_SIZE_MB:
         raise ParameterError("Chunk size %d MB is too large, must be <= %d MB. Please adjust --multipart-chunk-size-mb" % (cfg.multipart_chunk_size_mb, MultiPartUpload.MAX_CHUNK_SIZE_MB))
9dda31d0
 
dc071cc1
     ## If an UploadId was provided, set put_continue True
     if options.upload_id is not None:
         cfg.upload_id = options.upload_id
         cfg.put_continue = True
 
f1138fd9
     if cfg.upload_id and not cfg.multipart_chunk_size_mb:
dc071cc1
         raise ParameterError("Must have --multipart-chunk-size-mb if using --put-continue or --upload-id")
 
d439efb4
     ## CloudFront's cf_enable and Config's enable share the same --enable switch
     options.cf_enable = options.enable
 
     ## CloudFront's cf_logging and Config's log_target_prefix share the same --log-target-prefix switch
     options.cf_logging = options.log_target_prefix
 
     ## Update CloudFront options if some were set
     for option in CfCmd.options.option_list():
         try:
             if getattr(options, option) != None:
                 debug(u"Updating CloudFront.Cmd %s -> %s" % (option, getattr(options, option)))
                 CfCmd.options.update_option(option, getattr(options, option))
         except AttributeError:
             ## Some CloudFront.Cmd.Options() options are not settable from command line
             pass
 
07c9e2de
     if options.additional_destinations:
         cfg.additional_destinations = options.additional_destinations
3ce5e989
     if options.files_from:
         cfg.files_from = options.files_from
07c9e2de
 
d439efb4
     ## Set output and filesystem encoding for printing out filenames.
4f64f4f9
     try:
         # Support for python3
         # That don't need codecs if output is the
         # encoding of the system, but just in case, still use it.
         # For that, we need to use directly the binary buffer
         # of stdout/stderr
83b7b61f
         sys.stdout = codecs.getwriter(cfg.encoding)(sys.stdout.buffer, "replace")
         sys.stderr = codecs.getwriter(cfg.encoding)(sys.stderr.buffer, "replace")
4f64f4f9
     except AttributeError:
         sys.stdout = codecs.getwriter(cfg.encoding)(sys.stdout, "replace")
         sys.stderr = codecs.getwriter(cfg.encoding)(sys.stderr, "replace")
d439efb4
 
     ## Process --exclude and --exclude-from
     patterns_list, patterns_textual = process_patterns(options.exclude, options.exclude_from, is_glob = True, option_txt = "exclude")
     cfg.exclude.extend(patterns_list)
     cfg.debug_exclude.update(patterns_textual)
 
     ## Process --rexclude and --rexclude-from
     patterns_list, patterns_textual = process_patterns(options.rexclude, options.rexclude_from, is_glob = False, option_txt = "rexclude")
     cfg.exclude.extend(patterns_list)
     cfg.debug_exclude.update(patterns_textual)
 
     ## Process --include and --include-from
     patterns_list, patterns_textual = process_patterns(options.include, options.include_from, is_glob = True, option_txt = "include")
     cfg.include.extend(patterns_list)
     cfg.debug_include.update(patterns_textual)
 
     ## Process --rinclude and --rinclude-from
     patterns_list, patterns_textual = process_patterns(options.rinclude, options.rinclude_from, is_glob = False, option_txt = "rinclude")
     cfg.include.extend(patterns_list)
     cfg.debug_include.update(patterns_textual)
 
     ## Set socket read()/write() timeout
     socket.setdefaulttimeout(cfg.socket_timeout)
 
     if cfg.encrypt and cfg.gpg_passphrase == "":
         error(u"Encryption requested but no passphrase set in config file.")
         error(u"Please re-run 's3cmd --configure' and supply it.")
8214d4f0
         sys.exit(EX_CONFIG)
d439efb4
 
     if options.dump_config:
         cfg.dump_config(sys.stdout)
8214d4f0
         sys.exit(EX_OK)
d439efb4
 
     if options.run_configure:
3234ccf8
         # 'args' may contain the test-bucket URI
a2340ee7
         run_configure(options.config, args)
8214d4f0
         sys.exit(EX_OK)
d439efb4
 
69628e71
     ## set config if stop_on_error is set
     if options.stop_on_error:
         cfg.stop_on_error = options.stop_on_error
 
f2fc0daa
     if options.content_disposition:
         cfg.content_disposition = options.content_disposition
 
     if options.content_type:
         cfg.content_type = options.content_type
 
d439efb4
     if len(args) < 1:
178719e3
         optparser.print_help()
8214d4f0
         sys.exit(EX_USAGE)
d439efb4
 
     ## Unicodise all remaining arguments:
     args = [unicodise(arg) for arg in args]
 
     command = args.pop(0)
     try:
         debug(u"Command: %s" % commands[command]["cmd"])
         ## We must do this lookup in extra step to
         ## avoid catching all KeyError exceptions
         ## from inner functions.
         cmd_func = commands[command]["func"]
08732e61
     except KeyError as e:
66e96d07
         error(u"Invalid command: %s", command)
8214d4f0
         sys.exit(EX_USAGE)
d439efb4
 
     if len(args) < commands[command]["argc"]:
742061fd
         error(u"Not enough parameters for command '%s'" % command)
8214d4f0
         sys.exit(EX_USAGE)
d439efb4
 
657159cc
     rc = cmd_func(args)
     if rc is None: # if we missed any cmd_*() returns
         rc = EX_GENERAL
     return rc
3cc025ae
 
b282366d
 def report_exception(e, msg=u''):
         alert_header = u"""
4a52baa8
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
     An unexpected error has occurred.
8d314b01
   Please try reproducing the error using
   the latest s3cmd code from the git master
   branch found at:
     https://github.com/s3tools/s3cmd
2567fbba
   and have a look at the known issues list:
     https://github.com/s3tools/s3cmd/wiki/Common-known-issues-and-their-solutions
8d314b01
   If the error persists, please report the
b282366d
   %s (removing any private
8d314b01
   info as necessary) to:
b282366d
    s3tools-bugs@lists.sourceforge.net%s
4a52baa8
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
b282366d
 """
         sys.stderr.write(alert_header % (u"following lines", u"\n\n" + msg))
4f64f4f9
         tb = traceback.format_exc()
23c05948
         try:
04d1e315
             s = u' '.join([unicodise(a) for a in sys.argv])
23c05948
         except NameError:
             s = u' '.join([(a) for a in sys.argv])
ea5451d8
         sys.stderr.write(u"Invoked as: %s\n" % s)
8d314b01
 
d439efb4
         e_class = str(e.__class__)
         e_class = e_class[e_class.rfind(".")+1 : -2]
         sys.stderr.write(u"Problem: %s: %s\n" % (e_class, e))
         try:
ea5451d8
             sys.stderr.write(u"S3cmd:   %s\n" % PkgInfo.version)
d439efb4
         except NameError:
ea5451d8
             sys.stderr.write(u"S3cmd:   unknown version. Module import problem?\n")
         sys.stderr.write(u"python:   %s\n" % sys.version)
         sys.stderr.write(u"environment LANG=%s\n" % os.getenv("LANG"))
         sys.stderr.write(u"\n")
be7c4de6
         if type(tb) == unicode:
             sys.stderr.write(tb)
         else:
             sys.stderr.write(unicode(tb, errors="replace"))
d439efb4
 
         if type(e) == ImportError:
             sys.stderr.write("\n")
             sys.stderr.write("Your sys.path contains these entries:\n")
             for path in sys.path:
                 sys.stderr.write(u"\t%s\n" % path)
             sys.stderr.write("Now the question is where have the s3cmd modules been installed?\n")
 
b282366d
         sys.stderr.write(alert_header % (u"above lines", u""))
1c88eb58
 
 if __name__ == '__main__':
d439efb4
     try:
         ## Our modules
         ## Keep them in try/except block to
         ## detect any syntax errors in there
8214d4f0
         from S3.ExitCodes import *
d439efb4
         from S3.Exceptions import *
         from S3 import PkgInfo
         from S3.S3 import S3
         from S3.Config import Config
         from S3.SortedDict import SortedDict
7800900e
         from S3.FileDict import FileDict
d439efb4
         from S3.S3Uri import S3Uri
         from S3 import Utils
b6228e9f
         from S3 import Crypto
d439efb4
         from S3.Utils import *
8c69a65d
         from S3.Progress import Progress, StatsInfo
d439efb4
         from S3.CloudFront import Cmd as CfCmd
         from S3.CloudFront import CloudFront
         from S3.FileLists import *
9dda31d0
         from S3.MultiPart import MultiPartUpload
23c05948
     except Exception as e:
a302ca96
         report_exception(e, "Error loading some components of s3cmd (Import Error)")
23c05948
         # 1 = EX_GENERAL but be safe in that situation
         sys.exit(1)
d439efb4
 
23c05948
     try:
8f35b18a
         rc = main()
         sys.exit(rc)
d439efb4
 
08732e61
     except ImportError as e:
d439efb4
         report_exception(e)
23c05948
         sys.exit(EX_GENERAL)
d439efb4
 
08732e61
     except (ParameterError, InvalidFileError) as e:
d439efb4
         error(u"Parameter problem: %s" % e)
8214d4f0
         sys.exit(EX_USAGE)
d439efb4
 
08732e61
     except (S3DownloadError, S3UploadError, S3RequestError) as e:
01de57ac
         error(u"S3 Temporary Error: %s.  Please try again later." % e)
         sys.exit(EX_TEMPFAIL)
 
08732e61
     except S3Error as e:
657159cc
         error(u"S3 error: %s" % e)
         sys.exit(e.get_error_code())
 
08732e61
     except (S3Exception, S3ResponseError, CloudFrontError) as e:
01de57ac
         report_exception(e)
8f35b18a
         sys.exit(EX_SOFTWARE)
d439efb4
 
08732e61
     except SystemExit as e:
d439efb4
         sys.exit(e.code)
 
     except KeyboardInterrupt:
         sys.stderr.write("See ya!\n")
8214d4f0
         sys.exit(EX_BREAK)
d439efb4
 
08732e61
     except SSLError as e:
26186ec4
         # SSLError is a subtype of IOError
         error("SSL certificate verification failure: %s" % e)
         sys.exit(EX_ACCESSDENIED)
 
08732e61
     except socket.gaierror as e:
427a5c3c
         # gaierror is a subset of IOError
         # typically encountered error is:
         # gaierror: [Errno -2] Name or service not known
         error(e)
         error("Connection Error: Error resolving a server hostname.\n"
               "Please check the servers address specified in 'host_base', 'host_bucket', 'cloudfront_host', 'website_endpoint'")
         sys.exit(EX_IOERR)
 
08732e61
     except IOError as e:
2d7e6269
         if e.errno == errno.EPIPE:
             # Fail silently on SIGPIPE. This likely means we wrote to a closed
             # pipe and user does not care for any more output.
             sys.exit(EX_IOERR)
 
bdc67614
         report_exception(e)
8214d4f0
         sys.exit(EX_IOERR)
d439efb4
 
08732e61
     except OSError as e:
dd6098e1
         error(e)
8214d4f0
         sys.exit(EX_OSERR)
889c5a7b
 
6decc41b
     except MemoryError:
         msg = """
 MemoryError!  You have exceeded the amount of memory available for this process.
 This usually occurs when syncing >750,000 files on a 32-bit python instance.
 The solutions to this are:
 1) sync several smaller subtrees; or
 2) use a 64-bit python on a 64-bit OS with >8GB RAM
         """
         sys.stderr.write(msg)
8214d4f0
         sys.exit(EX_OSERR)
6decc41b
 
08732e61
     except UnicodeEncodeError as e:
e44d9b78
         lang = os.getenv("LANG")
         msg = """
 You have encountered a UnicodeEncodeError.  Your environment
 variable LANG=%s may not specify a Unicode encoding (e.g. UTF-8).
 Please set LANG=en_US.UTF-8 or similar in your environment before
 invoking s3cmd.
         """ % lang
         report_exception(e, msg)
8214d4f0
         sys.exit(EX_GENERAL)
e44d9b78
 
08732e61
     except Exception as e:
d439efb4
         report_exception(e)
8214d4f0
         sys.exit(EX_GENERAL)
d439efb4
 
 # vim:et:ts=4:sts=4:ai