Replace leading <Tab> with 4x<Space>
Remove trailing whitespace
... | ... |
@@ -440,7 +440,7 @@ class CloudFront(object): |
440 | 440 |
else: |
441 | 441 |
new_paths.append(path) |
442 | 442 |
paths = new_paths |
443 |
- |
|
443 |
+ |
|
444 | 444 |
# uri could be either cf:// or s3:// uri |
445 | 445 |
cfuri = self.get_dist_name_for_bucket(uri) |
446 | 446 |
if len(paths) > 999: |
... | ... |
@@ -575,7 +575,7 @@ class CloudFront(object): |
575 | 575 |
elif d.info.has_key("CustomOrigin"): |
576 | 576 |
# Aral: This used to skip over distributions with CustomOrigin, however, we mustn't |
577 | 577 |
# do this since S3 buckets that are set up as websites use custom origins. |
578 |
- # Thankfully, the custom origin URLs they use start with the URL of the |
|
578 |
+ # Thankfully, the custom origin URLs they use start with the URL of the |
|
579 | 579 |
# S3 bucket. Here, we make use this naming convention to support this use case. |
580 | 580 |
distListIndex = getBucketFromHostname(d.info['CustomOrigin']['DNSName'])[0]; |
581 | 581 |
distListIndex = distListIndex[:len(uri.bucket())] |
... | ... |
@@ -20,43 +20,40 @@ import copy |
20 | 20 |
__all__ = ["fetch_local_list", "fetch_remote_list", "compare_filelists", "filter_exclude_include", "parse_attrs_header"] |
21 | 21 |
|
22 | 22 |
def _fswalk_follow_symlinks(path): |
23 |
- ''' |
|
24 |
- Walk filesystem, following symbolic links (but without recursion), on python2.4 and later |
|
25 |
- |
|
26 |
- If a symlink directory loop is detected, emit a warning and skip. |
|
27 |
- E.g. |
|
28 |
- dir1/ |
|
29 |
- dir2/ |
|
30 |
- sym-dir -> ../dir2 |
|
31 |
- ''' |
|
32 |
- assert os.path.isdir(path) # only designed for directory argument |
|
33 |
- walkdirs = set([path]) |
|
34 |
- for dirpath, dirnames, filenames in os.walk(path): |
|
35 |
- handle_exclude_include_walk(dirpath, dirnames, []) |
|
36 |
- real_dirpath = os.path.realpath(dirpath) |
|
37 |
- for dirname in dirnames: |
|
38 |
- current = os.path.join(dirpath, dirname) |
|
39 |
- real_current = os.path.realpath(current) |
|
40 |
- if os.path.islink(current): |
|
41 |
- if (real_dirpath == real_current or |
|
42 |
- real_dirpath.startswith(real_current + os.path.sep)): |
|
43 |
- warning("Skipping recursively symlinked directory %s" % dirname) |
|
44 |
- else: |
|
45 |
- walkdirs.add(current) |
|
46 |
- for walkdir in walkdirs: |
|
47 |
- for dirpath, dirnames, filenames in os.walk(walkdir): |
|
48 |
- handle_exclude_include_walk(dirpath, dirnames, []) |
|
49 |
- yield (dirpath, dirnames, filenames) |
|
23 |
+ ''' |
|
24 |
+ Walk filesystem, following symbolic links (but without recursion), on python2.4 and later |
|
25 |
+ |
|
26 |
+ If a symlink directory loop is detected, emit a warning and skip. |
|
27 |
+ E.g.: dir1/dir2/sym-dir -> ../dir2 |
|
28 |
+ ''' |
|
29 |
+ assert os.path.isdir(path) # only designed for directory argument |
|
30 |
+ walkdirs = set([path]) |
|
31 |
+ for dirpath, dirnames, filenames in os.walk(path): |
|
32 |
+ handle_exclude_include_walk(dirpath, dirnames, []) |
|
33 |
+ real_dirpath = os.path.realpath(dirpath) |
|
34 |
+ for dirname in dirnames: |
|
35 |
+ current = os.path.join(dirpath, dirname) |
|
36 |
+ real_current = os.path.realpath(current) |
|
37 |
+ if os.path.islink(current): |
|
38 |
+ if (real_dirpath == real_current or |
|
39 |
+ real_dirpath.startswith(real_current + os.path.sep)): |
|
40 |
+ warning("Skipping recursively symlinked directory %s" % dirname) |
|
41 |
+ else: |
|
42 |
+ walkdirs.add(current) |
|
43 |
+ for walkdir in walkdirs: |
|
44 |
+ for dirpath, dirnames, filenames in os.walk(walkdir): |
|
45 |
+ handle_exclude_include_walk(dirpath, dirnames, []) |
|
46 |
+ yield (dirpath, dirnames, filenames) |
|
50 | 47 |
|
51 | 48 |
def _fswalk_no_symlinks(path): |
52 |
- ''' |
|
53 |
- Directory tree generator |
|
49 |
+ ''' |
|
50 |
+ Directory tree generator |
|
54 | 51 |
|
55 |
- path (str) is the root of the directory tree to walk |
|
56 |
- ''' |
|
57 |
- for dirpath, dirnames, filenames in os.walk(path): |
|
58 |
- handle_exclude_include_walk(dirpath, dirnames, filenames) |
|
59 |
- yield (dirpath, dirnames, filenames) |
|
52 |
+ path (str) is the root of the directory tree to walk |
|
53 |
+ ''' |
|
54 |
+ for dirpath, dirnames, filenames in os.walk(path): |
|
55 |
+ handle_exclude_include_walk(dirpath, dirnames, filenames) |
|
56 |
+ yield (dirpath, dirnames, filenames) |
|
60 | 57 |
|
61 | 58 |
def filter_exclude_include(src_list): |
62 | 59 |
info(u"Applying --exclude/--include") |
... | ... |
@@ -110,7 +107,7 @@ def handle_exclude_include_walk(root, dirs, files): |
110 | 110 |
break |
111 | 111 |
if excluded: |
112 | 112 |
## Still excluded - ok, action it |
113 |
- dirs.remove(x) |
|
113 |
+ dirs.remove(x) |
|
114 | 114 |
continue |
115 | 115 |
|
116 | 116 |
# exclude file matches in the current directory |
... | ... |
@@ -138,7 +135,7 @@ def handle_exclude_include_walk(root, dirs, files): |
138 | 138 |
def fetch_local_list(args, recursive = None): |
139 | 139 |
def _get_filelist_local(loc_list, local_uri, cache): |
140 | 140 |
info(u"Compiling list of local files...") |
141 |
- |
|
141 |
+ |
|
142 | 142 |
if deunicodise(local_uri.basename()) == "-": |
143 | 143 |
loc_list = SortedDict(ignore_case = False) |
144 | 144 |
loc_list["-"] = { |
... | ... |
@@ -184,31 +181,31 @@ def fetch_local_list(args, recursive = None): |
184 | 184 |
'full_name' : full_name, |
185 | 185 |
'size' : sr.st_size, |
186 | 186 |
'mtime' : sr.st_mtime, |
187 |
- 'dev' : sr.st_dev, |
|
188 |
- 'inode' : sr.st_ino, |
|
189 |
- 'uid' : sr.st_uid, |
|
190 |
- 'gid' : sr.st_gid, |
|
191 |
- 'sr': sr # save it all, may need it in preserve_attrs_list |
|
187 |
+ 'dev' : sr.st_dev, |
|
188 |
+ 'inode' : sr.st_ino, |
|
189 |
+ 'uid' : sr.st_uid, |
|
190 |
+ 'gid' : sr.st_gid, |
|
191 |
+ 'sr': sr # save it all, may need it in preserve_attrs_list |
|
192 | 192 |
## TODO: Possibly more to save here... |
193 | 193 |
} |
194 |
- if 'md5' in cfg.sync_checks: |
|
194 |
+ if 'md5' in cfg.sync_checks: |
|
195 | 195 |
md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size) |
196 |
- if md5 is None: |
|
197 |
- try: |
|
196 |
+ if md5 is None: |
|
197 |
+ try: |
|
198 | 198 |
md5 = loc_list.get_md5(relative_file) # this does the file I/O |
199 |
- except IOError: |
|
199 |
+ except IOError: |
|
200 | 200 |
continue |
201 |
- cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5) |
|
202 |
- loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5) |
|
201 |
+ cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5) |
|
202 |
+ loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5) |
|
203 | 203 |
return loc_list, single_file |
204 | 204 |
|
205 | 205 |
def _maintain_cache(cache, local_list): |
206 | 206 |
if cfg.cache_file: |
207 | 207 |
cache.mark_all_for_purge() |
208 |
- for i in local_list.keys(): |
|
208 |
+ for i in local_list.keys(): |
|
209 | 209 |
cache.unmark_for_purge(local_list[i]['dev'], local_list[i]['inode'], local_list[i]['mtime'], local_list[i]['size']) |
210 | 210 |
cache.purge() |
211 |
- cache.save(cfg.cache_file) |
|
211 |
+ cache.save(cfg.cache_file) |
|
212 | 212 |
|
213 | 213 |
cfg = Config() |
214 | 214 |
|
... | ... |
@@ -216,9 +213,9 @@ def fetch_local_list(args, recursive = None): |
216 | 216 |
if cfg.cache_file: |
217 | 217 |
try: |
218 | 218 |
cache.load(cfg.cache_file) |
219 |
- except IOError: |
|
220 |
- info(u"No cache file found, creating it.") |
|
221 |
- |
|
219 |
+ except IOError: |
|
220 |
+ info(u"No cache file found, creating it.") |
|
221 |
+ |
|
222 | 222 |
local_uris = [] |
223 | 223 |
local_list = SortedDict(ignore_case = False) |
224 | 224 |
single_file = False |
... | ... |
@@ -299,11 +296,11 @@ def fetch_remote_list(args, require_attribs = False, recursive = None): |
299 | 299 |
'object_key' : object['Key'], |
300 | 300 |
'object_uri_str' : object_uri_str, |
301 | 301 |
'base_uri' : remote_uri, |
302 |
- 'dev' : None, |
|
303 |
- 'inode' : None, |
|
302 |
+ 'dev' : None, |
|
303 |
+ 'inode' : None, |
|
304 | 304 |
} |
305 |
- md5 = object['ETag'][1:-1] |
|
306 |
- rem_list.record_md5(key, md5) |
|
305 |
+ md5 = object['ETag'][1:-1] |
|
306 |
+ rem_list.record_md5(key, md5) |
|
307 | 307 |
if break_now: |
308 | 308 |
break |
309 | 309 |
return rem_list |
... | ... |
@@ -329,7 +326,7 @@ def fetch_remote_list(args, require_attribs = False, recursive = None): |
329 | 329 |
objectlist = _get_filelist_remote(uri) |
330 | 330 |
for key in objectlist: |
331 | 331 |
remote_list[key] = objectlist[key] |
332 |
- remote_list.record_md5(key, objectlist.get_md5(key)) |
|
332 |
+ remote_list.record_md5(key, objectlist.get_md5(key)) |
|
333 | 333 |
else: |
334 | 334 |
for uri in remote_uris: |
335 | 335 |
uri_str = str(uri) |
... | ... |
@@ -367,12 +364,12 @@ def fetch_remote_list(args, require_attribs = False, recursive = None): |
367 | 367 |
'md5': response['headers']['etag'].strip('"\''), |
368 | 368 |
'timestamp' : dateRFC822toUnix(response['headers']['date']) |
369 | 369 |
}) |
370 |
- # get md5 from header if it's present. We would have set that during upload |
|
371 |
- if response['headers'].has_key('x-amz-meta-s3cmd-attrs'): |
|
370 |
+ # get md5 from header if it's present. We would have set that during upload |
|
371 |
+ if response['headers'].has_key('x-amz-meta-s3cmd-attrs'): |
|
372 | 372 |
attrs = parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs']) |
373 |
- if attrs.has_key('md5'): |
|
373 |
+ if attrs.has_key('md5'): |
|
374 | 374 |
remote_item.update({'md5': attrs['md5']}) |
375 |
- |
|
375 |
+ |
|
376 | 376 |
remote_list[key] = remote_item |
377 | 377 |
return remote_list |
378 | 378 |
|
... | ... |
@@ -380,7 +377,7 @@ def parse_attrs_header(attrs_header): |
380 | 380 |
attrs = {} |
381 | 381 |
for attr in attrs_header.split("/"): |
382 | 382 |
key, val = attr.split(":") |
383 |
- attrs[key] = val |
|
383 |
+ attrs[key] = val |
|
384 | 384 |
return attrs |
385 | 385 |
|
386 | 386 |
|
... | ... |
@@ -391,14 +388,14 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote, delay_updates |
391 | 391 |
def _compare(src_list, dst_lst, src_remote, dst_remote, file): |
392 | 392 |
"""Return True if src_list[file] matches dst_list[file], else False""" |
393 | 393 |
attribs_match = True |
394 |
- if not (src_list.has_key(file) and dst_list.has_key(file)): |
|
394 |
+ if not (src_list.has_key(file) and dst_list.has_key(file)): |
|
395 | 395 |
info(u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s" % (file, src_list.has_key(file), dst_list.has_key(file))) |
396 | 396 |
return False |
397 | 397 |
|
398 | 398 |
## check size first |
399 |
- if 'size' in cfg.sync_checks and dst_list[file]['size'] != src_list[file]['size']: |
|
399 |
+ if 'size' in cfg.sync_checks and dst_list[file]['size'] != src_list[file]['size']: |
|
400 | 400 |
debug(u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size'])) |
401 |
- attribs_match = False |
|
401 |
+ attribs_match = False |
|
402 | 402 |
|
403 | 403 |
## check md5 |
404 | 404 |
compare_md5 = 'md5' in cfg.sync_checks |
... | ... |
@@ -411,11 +408,11 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote, delay_updates |
411 | 411 |
try: |
412 | 412 |
src_md5 = src_list.get_md5(file) |
413 | 413 |
dst_md5 = dst_list.get_md5(file) |
414 |
- except (IOError,OSError), e: |
|
414 |
+ except (IOError,OSError), e: |
|
415 | 415 |
# md5 sum verification failed - ignore that file altogether |
416 | 416 |
debug(u"IGNR: %s (disappeared)" % (file)) |
417 | 417 |
warning(u"%s: file disappeared, ignoring." % (file)) |
418 |
- raise |
|
418 |
+ raise |
|
419 | 419 |
|
420 | 420 |
if src_md5 != dst_md5: |
421 | 421 |
## checksums are different. |
... | ... |
@@ -443,65 +440,65 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote, delay_updates |
443 | 443 |
|
444 | 444 |
if dst_list.has_key(relative_file): |
445 | 445 |
## Was --skip-existing requested? |
446 |
- if cfg.skip_existing: |
|
447 |
- debug(u"IGNR: %s (used --skip-existing)" % (relative_file)) |
|
448 |
- del(src_list[relative_file]) |
|
449 |
- del(dst_list[relative_file]) |
|
450 |
- continue |
|
446 |
+ if cfg.skip_existing: |
|
447 |
+ debug(u"IGNR: %s (used --skip-existing)" % (relative_file)) |
|
448 |
+ del(src_list[relative_file]) |
|
449 |
+ del(dst_list[relative_file]) |
|
450 |
+ continue |
|
451 | 451 |
|
452 |
- try: |
|
452 |
+ try: |
|
453 | 453 |
same_file = _compare(src_list, dst_list, src_remote, dst_remote, relative_file) |
454 |
- except (IOError,OSError), e: |
|
454 |
+ except (IOError,OSError), e: |
|
455 | 455 |
debug(u"IGNR: %s (disappeared)" % (relative_file)) |
456 | 456 |
warning(u"%s: file disappeared, ignoring." % (relative_file)) |
457 |
- del(src_list[relative_file]) |
|
458 |
- del(dst_list[relative_file]) |
|
457 |
+ del(src_list[relative_file]) |
|
458 |
+ del(dst_list[relative_file]) |
|
459 | 459 |
continue |
460 | 460 |
|
461 |
- if same_file: |
|
462 |
- debug(u"IGNR: %s (transfer not needed)" % relative_file) |
|
463 |
- del(src_list[relative_file]) |
|
464 |
- del(dst_list[relative_file]) |
|
461 |
+ if same_file: |
|
462 |
+ debug(u"IGNR: %s (transfer not needed)" % relative_file) |
|
463 |
+ del(src_list[relative_file]) |
|
464 |
+ del(dst_list[relative_file]) |
|
465 | 465 |
|
466 |
- else: |
|
466 |
+ else: |
|
467 | 467 |
# look for matching file in src |
468 | 468 |
try: |
469 | 469 |
md5 = src_list.get_md5(relative_file) |
470 |
- except IOError: |
|
470 |
+ except IOError: |
|
471 | 471 |
md5 = None |
472 |
- if md5 is not None and dst_list.by_md5.has_key(md5): |
|
472 |
+ if md5 is not None and dst_list.by_md5.has_key(md5): |
|
473 | 473 |
# Found one, we want to copy |
474 | 474 |
dst1 = list(dst_list.by_md5[md5])[0] |
475 | 475 |
debug(u"DST COPY src: %s -> %s" % (dst1, relative_file)) |
476 |
- copy_pairs.append((src_list[relative_file], dst1, relative_file)) |
|
477 |
- del(src_list[relative_file]) |
|
478 |
- del(dst_list[relative_file]) |
|
476 |
+ copy_pairs.append((src_list[relative_file], dst1, relative_file)) |
|
477 |
+ del(src_list[relative_file]) |
|
478 |
+ del(dst_list[relative_file]) |
|
479 | 479 |
else: |
480 |
- # record that we will get this file transferred to us (before all the copies), so if we come across it later again, |
|
481 |
- # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). |
|
482 |
- dst_list.record_md5(relative_file, md5) |
|
483 |
- update_list[relative_file] = src_list[relative_file] |
|
484 |
- del src_list[relative_file] |
|
485 |
- del dst_list[relative_file] |
|
486 |
- |
|
487 |
- else: |
|
480 |
+ # record that we will get this file transferred to us (before all the copies), so if we come across it later again, |
|
481 |
+ # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). |
|
482 |
+ dst_list.record_md5(relative_file, md5) |
|
483 |
+ update_list[relative_file] = src_list[relative_file] |
|
484 |
+ del src_list[relative_file] |
|
485 |
+ del dst_list[relative_file] |
|
486 |
+ |
|
487 |
+ else: |
|
488 | 488 |
# dst doesn't have this file |
489 | 489 |
# look for matching file elsewhere in dst |
490 | 490 |
try: |
491 | 491 |
md5 = src_list.get_md5(relative_file) |
492 | 492 |
except IOError: |
493 | 493 |
md5 = None |
494 |
- dst1 = dst_list.find_md5_one(md5) |
|
495 |
- if dst1 is not None: |
|
494 |
+ dst1 = dst_list.find_md5_one(md5) |
|
495 |
+ if dst1 is not None: |
|
496 | 496 |
# Found one, we want to copy |
497 | 497 |
debug(u"DST COPY dst: %s -> %s" % (dst1, relative_file)) |
498 |
- copy_pairs.append((src_list[relative_file], dst1, relative_file)) |
|
499 |
- del(src_list[relative_file]) |
|
500 |
- else: |
|
498 |
+ copy_pairs.append((src_list[relative_file], dst1, relative_file)) |
|
499 |
+ del(src_list[relative_file]) |
|
500 |
+ else: |
|
501 | 501 |
# we don't have this file, and we don't have a copy of this file elsewhere. Get it. |
502 |
- # record that we will get this file transferred to us (before all the copies), so if we come across it later again, |
|
503 |
- # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). |
|
504 |
- dst_list.record_md5(relative_file, md5) |
|
502 |
+ # record that we will get this file transferred to us (before all the copies), so if we come across it later again, |
|
503 |
+ # we can copy from _this_ copy (e.g. we only upload it once, and copy thereafter). |
|
504 |
+ dst_list.record_md5(relative_file, md5) |
|
505 | 505 |
|
506 | 506 |
for f in dst_list.keys(): |
507 | 507 |
if src_list.has_key(f) or update_list.has_key(f): |
... | ... |
@@ -3,7 +3,7 @@ import cPickle as pickle |
3 | 3 |
class HashCache(object): |
4 | 4 |
def __init__(self): |
5 | 5 |
self.inodes = dict() |
6 |
- |
|
6 |
+ |
|
7 | 7 |
def add(self, dev, inode, mtime, size, md5): |
8 | 8 |
if dev not in self.inodes: |
9 | 9 |
self.inodes[dev] = dict() |
... | ... |
@@ -38,7 +38,7 @@ class HashCache(object): |
38 | 38 |
if 'purge' in self.inodes[d][i][m]: |
39 | 39 |
del self.inodes[d][i] |
40 | 40 |
break |
41 |
- |
|
41 |
+ |
|
42 | 42 |
def save(self, f): |
43 | 43 |
d = dict(inodes=self.inodes, version=1) |
44 | 44 |
f = open(f, 'w') |
... | ... |
@@ -42,7 +42,7 @@ class MultiPartUpload(object): |
42 | 42 |
if not self.upload_id: |
43 | 43 |
raise RuntimeError("Attempting to use a multipart upload that has not been initiated.") |
44 | 44 |
|
45 |
- self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024 |
|
45 |
+ self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024 |
|
46 | 46 |
|
47 | 47 |
if self.file.name != "<stdin>": |
48 | 48 |
size_left = file_size = os.stat(self.file.name)[ST_SIZE] |
... | ... |
@@ -52,7 +52,7 @@ class MultiPartUpload(object): |
52 | 52 |
debug("MultiPart: Uploading from %s" % (self.file.name)) |
53 | 53 |
|
54 | 54 |
seq = 1 |
55 |
- if self.file.name != "<stdin>": |
|
55 |
+ if self.file.name != "<stdin>": |
|
56 | 56 |
while size_left > 0: |
57 | 57 |
offset = self.chunk_size * (seq - 1) |
58 | 58 |
current_chunk_size = min(file_size - offset, self.chunk_size) |
... | ... |
@@ -53,7 +53,7 @@ try: |
53 | 53 |
return magic_.file(file) |
54 | 54 |
def mime_magic_buffer(buffer): |
55 | 55 |
return magic_.buffer(buffer) |
56 |
- |
|
56 |
+ |
|
57 | 57 |
except AttributeError: |
58 | 58 |
## Older python-magic versions |
59 | 59 |
magic_ = magic.open(magic.MAGIC_MIME) |
... | ... |
@@ -62,7 +62,7 @@ try: |
62 | 62 |
return magic_.file(file) |
63 | 63 |
def mime_magic_buffer(buffer): |
64 | 64 |
return magic_.buffer(buffer) |
65 |
- |
|
65 |
+ |
|
66 | 66 |
def mime_magic(file): |
67 | 67 |
type = mime_magic_file(file) |
68 | 68 |
if type != "application/x-gzip; charset=binary": |
... | ... |
@@ -388,7 +388,7 @@ class S3(object): |
388 | 388 |
return True |
389 | 389 |
else: |
390 | 390 |
return False |
391 |
- |
|
391 |
+ |
|
392 | 392 |
def object_put(self, filename, uri, extra_headers = None, extra_label = ""): |
393 | 393 |
# TODO TODO |
394 | 394 |
# Make it consistent with stream-oriented object_get() |
... | ... |
@@ -419,11 +419,11 @@ class S3(object): |
419 | 419 |
content_type = self.config.default_mime_type |
420 | 420 |
if not content_encoding: |
421 | 421 |
content_encoding = self.config.encoding.upper() |
422 |
- |
|
422 |
+ |
|
423 | 423 |
## add charset to content type |
424 | 424 |
if self.add_encoding(filename, content_type) and content_encoding is not None: |
425 | 425 |
content_type = content_type + "; charset=" + content_encoding |
426 |
- |
|
426 |
+ |
|
427 | 427 |
headers["content-type"] = content_type |
428 | 428 |
if content_encoding is not None: |
429 | 429 |
headers["content-encoding"] = content_encoding |
... | ... |
@@ -912,7 +912,7 @@ class S3(object): |
912 | 912 |
while (current_position < size_total): |
913 | 913 |
this_chunk = size_left > self.config.recv_chunk and self.config.recv_chunk or size_left |
914 | 914 |
data = http_response.read(this_chunk) |
915 |
- if len(data) == 0: |
|
915 |
+ if len(data) == 0: |
|
916 | 916 |
raise S3Error("EOF from S3!") |
917 | 917 |
|
918 | 918 |
stream.write(data) |
... | ... |
@@ -59,7 +59,7 @@ class SortedDict(dict): |
59 | 59 |
return list(self.by_md5.get(md5, set()))[0] |
60 | 60 |
except: |
61 | 61 |
return None |
62 |
- |
|
62 |
+ |
|
63 | 63 |
def get_md5(self, relative_file): |
64 | 64 |
"""returns md5 if it can, or raises IOError if file is unreadable""" |
65 | 65 |
md5 = None |
... | ... |
@@ -70,7 +70,7 @@ class SortedDict(dict): |
70 | 70 |
md5 = Utils.hash_file_md5(self[relative_file]['full_name']) |
71 | 71 |
self.record_md5(relative_file, md5) |
72 | 72 |
self[relative_file]['md5'] = md5 |
73 |
- return md5 |
|
73 |
+ return md5 |
|
74 | 74 |
|
75 | 75 |
def record_hardlink(self, relative_file, dev, inode, md5): |
76 | 76 |
if dev not in self.hardlinks: |
... | ... |
@@ -753,7 +753,7 @@ def cmd_sync_remote2local(args): |
753 | 753 |
|
754 | 754 |
_set_local_filename(remote_list, destination_base) |
755 | 755 |
_set_local_filename(update_list, destination_base) |
756 |
- |
|
756 |
+ |
|
757 | 757 |
if cfg.dry_run: |
758 | 758 |
for key in exclude_list: |
759 | 759 |
output(u"exclude: %s" % unicodise(key)) |
... | ... |
@@ -794,11 +794,11 @@ def cmd_sync_remote2local(args): |
794 | 794 |
continue |
795 | 795 |
try: |
796 | 796 |
debug(u"dst_file=%s" % unicodise(dst_file)) |
797 |
- # create temporary files (of type .s3cmd.XXXX.tmp) in the same directory |
|
797 |
+ # create temporary files (of type .s3cmd.XXXX.tmp) in the same directory |
|
798 | 798 |
# for downloading and then rename once downloaded |
799 | 799 |
chkptfd, chkptfname = tempfile.mkstemp(".tmp",".s3cmd.",os.path.dirname(dst_file)) |
800 | 800 |
debug(u"created chkptfname=%s" % unicodise(chkptfname)) |
801 |
- dst_stream = os.fdopen(chkptfd, "wb") |
|
801 |
+ dst_stream = os.fdopen(chkptfd, "wb") |
|
802 | 802 |
response = s3.object_get(uri, dst_stream, extra_label = seq_label) |
803 | 803 |
dst_stream.close() |
804 | 804 |
# download completed, rename the file to destination |
... | ... |
@@ -1249,13 +1249,13 @@ def cmd_sign(args): |
1249 | 1249 |
output("Signature: %s" % signature) |
1250 | 1250 |
|
1251 | 1251 |
def cmd_signurl(args): |
1252 |
- expiry = args.pop() |
|
1253 |
- url_to_sign = S3Uri(args.pop()) |
|
1254 |
- if url_to_sign.type != 's3': |
|
1255 |
- raise ParameterError("Must be S3Uri. Got: %s" % url_to_sign) |
|
1256 |
- debug("url to sign: %r" % url_to_sign) |
|
1257 |
- signed_url = Utils.sign_url(url_to_sign, expiry) |
|
1258 |
- output(signed_url) |
|
1252 |
+ expiry = args.pop() |
|
1253 |
+ url_to_sign = S3Uri(args.pop()) |
|
1254 |
+ if url_to_sign.type != 's3': |
|
1255 |
+ raise ParameterError("Must be S3Uri. Got: %s" % url_to_sign) |
|
1256 |
+ debug("url to sign: %r" % url_to_sign) |
|
1257 |
+ signed_url = Utils.sign_url(url_to_sign, expiry) |
|
1258 |
+ output(signed_url) |
|
1259 | 1259 |
|
1260 | 1260 |
def cmd_fixbucket(args): |
1261 | 1261 |
def _unescape(text): |