Browse code

ATTENTION -- Mega WhiteSpace conversion !!!

Historically s3cmd and modules used to use <tab> for indentation.
This is not a recommended Python coding standard and many tools
treat it as an error.

This mega patch converts all <tab>s to <4-space>s and also removes
trailing white whitespace along the way.

To get meaningful diffs across this commit use: git diff -w

Michal Ludvig authored on 2011/06/08 05:18:12
Showing 18 changed files
... ...
@@ -6,217 +6,219 @@
6 6
 from Utils import getTreeFromXml
7 7
 
8 8
 try:
9
-	import xml.etree.ElementTree as ET
9
+    import xml.etree.ElementTree as ET
10 10
 except ImportError:
11
-	import elementtree.ElementTree as ET
11
+    import elementtree.ElementTree as ET
12 12
 
13 13
 class Grantee(object):
14
-	ALL_USERS_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
15
-	LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
16
-
17
-	def __init__(self):
18
-		self.xsi_type = None
19
-		self.tag = None
20
-		self.name = None
21
-		self.display_name = None
22
-		self.permission = None
23
-
24
-	def __repr__(self):
25
-		return 'Grantee("%(tag)s", "%(name)s", "%(permission)s")' % { 
26
-			"tag" : self.tag, 
27
-			"name" : self.name, 
28
-			"permission" : self.permission 
29
-		}
30
-
31
-	def isAllUsers(self):
32
-		return self.tag == "URI" and self.name == Grantee.ALL_USERS_URI
33
-	
34
-	def isAnonRead(self):
35
-		return self.isAllUsers() and (self.permission == "READ" or self.permission == "FULL_CONTROL")
36
-	
37
-	def getElement(self):
38
-		el = ET.Element("Grant")
39
-		grantee = ET.SubElement(el, "Grantee", { 
40
-			'xmlns:xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
41
-			'xsi:type' : self.xsi_type
42
-		})
43
-		name = ET.SubElement(grantee, self.tag)
44
-		name.text = self.name
45
-		permission = ET.SubElement(el, "Permission")
46
-		permission.text = self.permission
47
-		return el
14
+    ALL_USERS_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
15
+    LOG_DELIVERY_URI = "http://acs.amazonaws.com/groups/s3/LogDelivery"
16
+
17
+    def __init__(self):
18
+        self.xsi_type = None
19
+        self.tag = None
20
+        self.name = None
21
+        self.display_name = None
22
+        self.permission = None
23
+
24
+    def __repr__(self):
25
+        return 'Grantee("%(tag)s", "%(name)s", "%(permission)s")' % {
26
+            "tag" : self.tag,
27
+            "name" : self.name,
28
+            "permission" : self.permission
29
+        }
30
+
31
+    def isAllUsers(self):
32
+        return self.tag == "URI" and self.name == Grantee.ALL_USERS_URI
33
+
34
+    def isAnonRead(self):
35
+        return self.isAllUsers() and (self.permission == "READ" or self.permission == "FULL_CONTROL")
36
+
37
+    def getElement(self):
38
+        el = ET.Element("Grant")
39
+        grantee = ET.SubElement(el, "Grantee", {
40
+            'xmlns:xsi' : 'http://www.w3.org/2001/XMLSchema-instance',
41
+            'xsi:type' : self.xsi_type
42
+        })
43
+        name = ET.SubElement(grantee, self.tag)
44
+        name.text = self.name
45
+        permission = ET.SubElement(el, "Permission")
46
+        permission.text = self.permission
47
+        return el
48 48
 
49 49
 class GranteeAnonRead(Grantee):
50
-	def __init__(self):
51
-		Grantee.__init__(self)
52
-		self.xsi_type = "Group"
53
-		self.tag = "URI"
54
-		self.name = Grantee.ALL_USERS_URI
55
-		self.permission = "READ"
50
+    def __init__(self):
51
+        Grantee.__init__(self)
52
+        self.xsi_type = "Group"
53
+        self.tag = "URI"
54
+        self.name = Grantee.ALL_USERS_URI
55
+        self.permission = "READ"
56 56
 
57 57
 class GranteeLogDelivery(Grantee):
58
-	def __init__(self, permission):
59
-		"""
60
-		permission must be either READ_ACP or WRITE
61
-		"""
62
-		Grantee.__init__(self)
63
-		self.xsi_type = "Group"
64
-		self.tag = "URI"
65
-		self.name = Grantee.LOG_DELIVERY_URI
66
-		self.permission = permission
58
+    def __init__(self, permission):
59
+        """
60
+        permission must be either READ_ACP or WRITE
61
+        """
62
+        Grantee.__init__(self)
63
+        self.xsi_type = "Group"
64
+        self.tag = "URI"
65
+        self.name = Grantee.LOG_DELIVERY_URI
66
+        self.permission = permission
67 67
 
68 68
 class ACL(object):
69
-	EMPTY_ACL = "<AccessControlPolicy><Owner><ID></ID></Owner><AccessControlList></AccessControlList></AccessControlPolicy>"
70
-
71
-	def __init__(self, xml = None):
72
-		if not xml:
73
-			xml = ACL.EMPTY_ACL
74
-
75
-		self.grantees = []
76
-		self.owner_id = ""
77
-		self.owner_nick = ""
78
-
79
-		tree = getTreeFromXml(xml)
80
-		self.parseOwner(tree)
81
-		self.parseGrants(tree)
82
-
83
-	def parseOwner(self, tree):
84
-		self.owner_id = tree.findtext(".//Owner//ID")
85
-		self.owner_nick = tree.findtext(".//Owner//DisplayName")
86
-
87
-	def parseGrants(self, tree):
88
-		for grant in tree.findall(".//Grant"):
89
-			grantee = Grantee()
90
-			g = grant.find(".//Grantee")
91
-			grantee.xsi_type = g.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
92
-			grantee.permission = grant.find('Permission').text
93
-			for el in g:
94
-				if el.tag == "DisplayName":
95
-					grantee.display_name = el.text
96
-				else:
97
-					grantee.tag = el.tag
98
-					grantee.name = el.text
99
-			self.grantees.append(grantee)
100
-
101
-	def getGrantList(self):
102
-		acl = []
103
-		for grantee in self.grantees:
104
-			if grantee.display_name:
105
-				user = grantee.display_name
106
-			elif grantee.isAllUsers():
107
-				user = "*anon*"
108
-			else:
109
-				user = grantee.name
110
-			acl.append({'grantee': user, 'permission': grantee.permission})
111
-		return acl
112
-
113
-	def getOwner(self):
114
-		return { 'id' : self.owner_id, 'nick' : self.owner_nick }
115
-
116
-	def isAnonRead(self):
117
-		for grantee in self.grantees:
118
-			if grantee.isAnonRead():
119
-				return True
120
-		return False
121
-	
122
-	def grantAnonRead(self):
123
-		if not self.isAnonRead():
124
-			self.appendGrantee(GranteeAnonRead())
125
-	
126
-	def revokeAnonRead(self):
127
-		self.grantees = [g for g in self.grantees if not g.isAnonRead()]
128
-
129
-	def appendGrantee(self, grantee):
130
-		self.grantees.append(grantee)
131
-
132
-	def hasGrant(self, name, permission):
133
-		name = name.lower()
134
-		permission = permission.upper()
135
-
136
-		for grantee in self.grantees:
137
-			if grantee.name.lower() == name:
138
-				if grantee.permission == "FULL_CONTROL":
139
-					return True
140
-				elif grantee.permission.upper() == permission:
141
-					return True
142
-
143
-		return False;
144
-
145
-	def grant(self, name, permission):
146
-		if self.hasGrant(name, permission):
147
-			return
148
-
149
-		name = name.lower()
150
-		permission = permission.upper()
151
-
152
-		if "ALL" == permission:
153
-			permission = "FULL_CONTROL"
154
-
155
-		if "FULL_CONTROL" == permission:
156
-			self.revoke(name, "ALL")
157
-
158
-		grantee = Grantee()
159
-		grantee.name = name
160
-		grantee.permission = permission
161
-
162
-		if  name.find('@') <= -1: # ultra lame attempt to differenciate emails id from canonical ids
163
-			grantee.xsi_type = "CanonicalUser"
164
-			grantee.tag = "ID"
165
-		else:
166
-			grantee.xsi_type = "AmazonCustomerByEmail"
167
-			grantee.tag = "EmailAddress"
168
-				
169
-		self.appendGrantee(grantee)
170
-
171
-
172
-	def revoke(self, name, permission):
173
-		name = name.lower()
174
-		permission = permission.upper()
175
-
176
-		if "ALL" == permission:
177
-			self.grantees = [g for g in self.grantees if not g.name.lower() == name]
178
-		else:
179
-			self.grantees = [g for g in self.grantees if not (g.name.lower() == name and g.permission.upper() ==  permission)]
180
-
181
-
182
-	def __str__(self):
183
-		tree = getTreeFromXml(ACL.EMPTY_ACL)
184
-		tree.attrib['xmlns'] = "http://s3.amazonaws.com/doc/2006-03-01/"
185
-		owner = tree.find(".//Owner//ID")
186
-		owner.text = self.owner_id
187
-		acl = tree.find(".//AccessControlList")
188
-		for grantee in self.grantees:
189
-			acl.append(grantee.getElement())
190
-		return ET.tostring(tree)
69
+    EMPTY_ACL = "<AccessControlPolicy><Owner><ID></ID></Owner><AccessControlList></AccessControlList></AccessControlPolicy>"
70
+
71
+    def __init__(self, xml = None):
72
+        if not xml:
73
+            xml = ACL.EMPTY_ACL
74
+
75
+        self.grantees = []
76
+        self.owner_id = ""
77
+        self.owner_nick = ""
78
+
79
+        tree = getTreeFromXml(xml)
80
+        self.parseOwner(tree)
81
+        self.parseGrants(tree)
82
+
83
+    def parseOwner(self, tree):
84
+        self.owner_id = tree.findtext(".//Owner//ID")
85
+        self.owner_nick = tree.findtext(".//Owner//DisplayName")
86
+
87
+    def parseGrants(self, tree):
88
+        for grant in tree.findall(".//Grant"):
89
+            grantee = Grantee()
90
+            g = grant.find(".//Grantee")
91
+            grantee.xsi_type = g.attrib['{http://www.w3.org/2001/XMLSchema-instance}type']
92
+            grantee.permission = grant.find('Permission').text
93
+            for el in g:
94
+                if el.tag == "DisplayName":
95
+                    grantee.display_name = el.text
96
+                else:
97
+                    grantee.tag = el.tag
98
+                    grantee.name = el.text
99
+            self.grantees.append(grantee)
100
+
101
+    def getGrantList(self):
102
+        acl = []
103
+        for grantee in self.grantees:
104
+            if grantee.display_name:
105
+                user = grantee.display_name
106
+            elif grantee.isAllUsers():
107
+                user = "*anon*"
108
+            else:
109
+                user = grantee.name
110
+            acl.append({'grantee': user, 'permission': grantee.permission})
111
+        return acl
112
+
113
+    def getOwner(self):
114
+        return { 'id' : self.owner_id, 'nick' : self.owner_nick }
115
+
116
+    def isAnonRead(self):
117
+        for grantee in self.grantees:
118
+            if grantee.isAnonRead():
119
+                return True
120
+        return False
121
+
122
+    def grantAnonRead(self):
123
+        if not self.isAnonRead():
124
+            self.appendGrantee(GranteeAnonRead())
125
+
126
+    def revokeAnonRead(self):
127
+        self.grantees = [g for g in self.grantees if not g.isAnonRead()]
128
+
129
+    def appendGrantee(self, grantee):
130
+        self.grantees.append(grantee)
131
+
132
+    def hasGrant(self, name, permission):
133
+        name = name.lower()
134
+        permission = permission.upper()
135
+
136
+        for grantee in self.grantees:
137
+            if grantee.name.lower() == name:
138
+                if grantee.permission == "FULL_CONTROL":
139
+                    return True
140
+                elif grantee.permission.upper() == permission:
141
+                    return True
142
+
143
+        return False;
144
+
145
+    def grant(self, name, permission):
146
+        if self.hasGrant(name, permission):
147
+            return
148
+
149
+        name = name.lower()
150
+        permission = permission.upper()
151
+
152
+        if "ALL" == permission:
153
+            permission = "FULL_CONTROL"
154
+
155
+        if "FULL_CONTROL" == permission:
156
+            self.revoke(name, "ALL")
157
+
158
+        grantee = Grantee()
159
+        grantee.name = name
160
+        grantee.permission = permission
161
+
162
+        if  name.find('@') <= -1: # ultra lame attempt to differenciate emails id from canonical ids
163
+            grantee.xsi_type = "CanonicalUser"
164
+            grantee.tag = "ID"
165
+        else:
166
+            grantee.xsi_type = "AmazonCustomerByEmail"
167
+            grantee.tag = "EmailAddress"
168
+
169
+        self.appendGrantee(grantee)
170
+
171
+
172
+    def revoke(self, name, permission):
173
+        name = name.lower()
174
+        permission = permission.upper()
175
+
176
+        if "ALL" == permission:
177
+            self.grantees = [g for g in self.grantees if not g.name.lower() == name]
178
+        else:
179
+            self.grantees = [g for g in self.grantees if not (g.name.lower() == name and g.permission.upper() ==  permission)]
180
+
181
+
182
+    def __str__(self):
183
+        tree = getTreeFromXml(ACL.EMPTY_ACL)
184
+        tree.attrib['xmlns'] = "http://s3.amazonaws.com/doc/2006-03-01/"
185
+        owner = tree.find(".//Owner//ID")
186
+        owner.text = self.owner_id
187
+        acl = tree.find(".//AccessControlList")
188
+        for grantee in self.grantees:
189
+            acl.append(grantee.getElement())
190
+        return ET.tostring(tree)
191 191
 
192 192
 if __name__ == "__main__":
193
-	xml = """<?xml version="1.0" encoding="UTF-8"?>
193
+    xml = """<?xml version="1.0" encoding="UTF-8"?>
194 194
 <AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
195 195
 <Owner>
196
-	<ID>12345678901234567890</ID>
197
-	<DisplayName>owner-nickname</DisplayName>
196
+    <ID>12345678901234567890</ID>
197
+    <DisplayName>owner-nickname</DisplayName>
198 198
 </Owner>
199 199
 <AccessControlList>
200
-	<Grant>
201
-		<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">
202
-			<ID>12345678901234567890</ID>
203
-			<DisplayName>owner-nickname</DisplayName>
204
-		</Grantee>
205
-		<Permission>FULL_CONTROL</Permission>
206
-	</Grant>
207
-	<Grant>
208
-		<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group">
209
-			<URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>
210
-		</Grantee>
211
-		<Permission>READ</Permission>
212
-	</Grant>
200
+    <Grant>
201
+        <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser">
202
+            <ID>12345678901234567890</ID>
203
+            <DisplayName>owner-nickname</DisplayName>
204
+        </Grantee>
205
+        <Permission>FULL_CONTROL</Permission>
206
+    </Grant>
207
+    <Grant>
208
+        <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group">
209
+            <URI>http://acs.amazonaws.com/groups/global/AllUsers</URI>
210
+        </Grantee>
211
+        <Permission>READ</Permission>
212
+    </Grant>
213 213
 </AccessControlList>
214 214
 </AccessControlPolicy>
215
-	"""
216
-	acl = ACL(xml)
217
-	print "Grants:", acl.getGrantList()
218
-	acl.revokeAnonRead()
219
-	print "Grants:", acl.getGrantList()
220
-	acl.grantAnonRead()
221
-	print "Grants:", acl.getGrantList()
222
-	print acl
215
+    """
216
+    acl = ACL(xml)
217
+    print "Grants:", acl.getGrantList()
218
+    acl.revokeAnonRead()
219
+    print "Grants:", acl.getGrantList()
220
+    acl.grantAnonRead()
221
+    print "Grants:", acl.getGrantList()
222
+    print acl
223
+
224
+# vim:et:ts=4:sts=4:ai
... ...
@@ -9,82 +9,84 @@ from Utils import getTreeFromXml
9 9
 from ACL import GranteeAnonRead
10 10
 
11 11
 try:
12
-	import xml.etree.ElementTree as ET
12
+    import xml.etree.ElementTree as ET
13 13
 except ImportError:
14
-	import elementtree.ElementTree as ET
14
+    import elementtree.ElementTree as ET
15 15
 
16 16
 __all__ = []
17 17
 class AccessLog(object):
18
-	LOG_DISABLED = "<BucketLoggingStatus></BucketLoggingStatus>"
19
-	LOG_TEMPLATE = "<LoggingEnabled><TargetBucket></TargetBucket><TargetPrefix></TargetPrefix></LoggingEnabled>"
18
+    LOG_DISABLED = "<BucketLoggingStatus></BucketLoggingStatus>"
19
+    LOG_TEMPLATE = "<LoggingEnabled><TargetBucket></TargetBucket><TargetPrefix></TargetPrefix></LoggingEnabled>"
20 20
 
21
-	def __init__(self, xml = None):
22
-		if not xml:
23
-			xml = self.LOG_DISABLED
24
-		self.tree = getTreeFromXml(xml)
25
-		self.tree.attrib['xmlns'] = "http://doc.s3.amazonaws.com/2006-03-01"
26
-	
27
-	def isLoggingEnabled(self):
28
-		return bool(self.tree.find(".//LoggingEnabled"))
21
+    def __init__(self, xml = None):
22
+        if not xml:
23
+            xml = self.LOG_DISABLED
24
+        self.tree = getTreeFromXml(xml)
25
+        self.tree.attrib['xmlns'] = "http://doc.s3.amazonaws.com/2006-03-01"
29 26
 
30
-	def disableLogging(self):
31
-		el = self.tree.find(".//LoggingEnabled")
32
-		if el:
33
-			self.tree.remove(el)
34
-	
35
-	def enableLogging(self, target_prefix_uri):
36
-		el = self.tree.find(".//LoggingEnabled")
37
-		if not el:
38
-			el = getTreeFromXml(self.LOG_TEMPLATE)
39
-			self.tree.append(el)
40
-		el.find(".//TargetBucket").text = target_prefix_uri.bucket()
41
-		el.find(".//TargetPrefix").text = target_prefix_uri.object()
27
+    def isLoggingEnabled(self):
28
+        return bool(self.tree.find(".//LoggingEnabled"))
42 29
 
43
-	def targetPrefix(self):
44
-		if self.isLoggingEnabled():
45
-			el = self.tree.find(".//LoggingEnabled")
46
-			target_prefix = "s3://%s/%s" % (
47
-				self.tree.find(".//LoggingEnabled//TargetBucket").text, 
48
-				self.tree.find(".//LoggingEnabled//TargetPrefix").text)
49
-			return S3Uri.S3Uri(target_prefix)
50
-		else:
51
-			return ""
30
+    def disableLogging(self):
31
+        el = self.tree.find(".//LoggingEnabled")
32
+        if el:
33
+            self.tree.remove(el)
52 34
 
53
-	def setAclPublic(self, acl_public):
54
-		le = self.tree.find(".//LoggingEnabled")
55
-		if not le:
56
-			raise ParameterError("Logging not enabled, can't set default ACL for logs")
57
-		tg = le.find(".//TargetGrants")
58
-		if not acl_public:
59
-			if not tg:
60
-				## All good, it's not been there
61
-				return
62
-			else:
63
-				le.remove(tg)
64
-		else: # acl_public == True
65
-			anon_read = GranteeAnonRead().getElement()
66
-			if not tg:
67
-				tg = ET.SubElement(le, "TargetGrants")
68
-			## What if TargetGrants already exists? We should check if 
69
-			## AnonRead is there before appending a new one. Later...
70
-			tg.append(anon_read)
35
+    def enableLogging(self, target_prefix_uri):
36
+        el = self.tree.find(".//LoggingEnabled")
37
+        if not el:
38
+            el = getTreeFromXml(self.LOG_TEMPLATE)
39
+            self.tree.append(el)
40
+        el.find(".//TargetBucket").text = target_prefix_uri.bucket()
41
+        el.find(".//TargetPrefix").text = target_prefix_uri.object()
71 42
 
72
-	def isAclPublic(self):
73
-		raise NotImplementedError()
43
+    def targetPrefix(self):
44
+        if self.isLoggingEnabled():
45
+            el = self.tree.find(".//LoggingEnabled")
46
+            target_prefix = "s3://%s/%s" % (
47
+                self.tree.find(".//LoggingEnabled//TargetBucket").text,
48
+                self.tree.find(".//LoggingEnabled//TargetPrefix").text)
49
+            return S3Uri.S3Uri(target_prefix)
50
+        else:
51
+            return ""
74 52
 
75
-	def __str__(self):
76
-		return ET.tostring(self.tree)
53
+    def setAclPublic(self, acl_public):
54
+        le = self.tree.find(".//LoggingEnabled")
55
+        if not le:
56
+            raise ParameterError("Logging not enabled, can't set default ACL for logs")
57
+        tg = le.find(".//TargetGrants")
58
+        if not acl_public:
59
+            if not tg:
60
+                ## All good, it's not been there
61
+                return
62
+            else:
63
+                le.remove(tg)
64
+        else: # acl_public == True
65
+            anon_read = GranteeAnonRead().getElement()
66
+            if not tg:
67
+                tg = ET.SubElement(le, "TargetGrants")
68
+            ## What if TargetGrants already exists? We should check if
69
+            ## AnonRead is there before appending a new one. Later...
70
+            tg.append(anon_read)
71
+
72
+    def isAclPublic(self):
73
+        raise NotImplementedError()
74
+
75
+    def __str__(self):
76
+        return ET.tostring(self.tree)
77 77
 __all__.append("AccessLog")
78 78
 
79 79
 if __name__ == "__main__":
80
-	from S3Uri import S3Uri
81
-	log = AccessLog()
82
-	print log
83
-	log.enableLogging(S3Uri("s3://targetbucket/prefix/log-"))
84
-	print log
85
-	log.setAclPublic(True)
86
-	print log
87
-	log.setAclPublic(False)
88
-	print log
89
-	log.disableLogging()
90
-	print log
80
+    from S3Uri import S3Uri
81
+    log = AccessLog()
82
+    print log
83
+    log.enableLogging(S3Uri("s3://targetbucket/prefix/log-"))
84
+    print log
85
+    log.setAclPublic(True)
86
+    print log
87
+    log.setAclPublic(False)
88
+    print log
89
+    log.disableLogging()
90
+    print log
91
+
92
+# vim:et:ts=4:sts=4:ai
... ...
@@ -4,37 +4,39 @@
4 4
 ## License: GPL Version 2
5 5
 
6 6
 class BidirMap(object):
7
-	def __init__(self, **map):
8
-		self.k2v = {}
9
-		self.v2k = {}
10
-		for key in map:
11
-			self.__setitem__(key, map[key])
12
-
13
-	def __setitem__(self, key, value):
14
-		if self.v2k.has_key(value):
15
-			if self.v2k[value] != key:
16
-				raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'")
17
-		try:
18
-			del(self.v2k[self.k2v[key]])
19
-		except KeyError:
20
-			pass
21
-		self.k2v[key] = value
22
-		self.v2k[value] = key
23
-
24
-	def __getitem__(self, key):
25
-		return self.k2v[key]
26
-
27
-	def __str__(self):
28
-		return self.v2k.__str__()
29
-
30
-	def getkey(self, value):
31
-		return self.v2k[value]
32
-	
33
-	def getvalue(self, key):
34
-		return self.k2v[key]
35
-
36
-	def keys(self):
37
-		return [key for key in self.k2v]
38
-
39
-	def values(self):
40
-		return [value for value in self.v2k]
7
+    def __init__(self, **map):
8
+        self.k2v = {}
9
+        self.v2k = {}
10
+        for key in map:
11
+            self.__setitem__(key, map[key])
12
+
13
+    def __setitem__(self, key, value):
14
+        if self.v2k.has_key(value):
15
+            if self.v2k[value] != key:
16
+                raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'")
17
+        try:
18
+            del(self.v2k[self.k2v[key]])
19
+        except KeyError:
20
+            pass
21
+        self.k2v[key] = value
22
+        self.v2k[value] = key
23
+
24
+    def __getitem__(self, key):
25
+        return self.k2v[key]
26
+
27
+    def __str__(self):
28
+        return self.v2k.__str__()
29
+
30
+    def getkey(self, value):
31
+        return self.v2k[value]
32
+
33
+    def getvalue(self, key):
34
+        return self.k2v[key]
35
+
36
+    def keys(self):
37
+        return [key for key in self.k2v]
38
+
39
+    def values(self):
40
+        return [value for value in self.v2k]
41
+
42
+# vim:et:ts=4:sts=4:ai
... ...
@@ -11,9 +11,9 @@ from datetime import datetime
11 11
 from logging import debug, info, warning, error
12 12
 
13 13
 try:
14
-	import xml.etree.ElementTree as ET
14
+    import xml.etree.ElementTree as ET
15 15
 except ImportError:
16
-	import elementtree.ElementTree as ET
16
+    import elementtree.ElementTree as ET
17 17
 
18 18
 from Config import Config
19 19
 from Exceptions import *
... ...
@@ -25,714 +25,716 @@ cloudfront_api_version = "2010-11-01"
25 25
 cloudfront_resource = "/%(api_ver)s/distribution" % { 'api_ver' : cloudfront_api_version }
26 26
 
27 27
 def output(message):
28
-	sys.stdout.write(message + "\n")
28
+    sys.stdout.write(message + "\n")
29 29
 
30 30
 def pretty_output(label, message):
31
-	#label = ("%s " % label).ljust(20, ".")
32
-	label = ("%s:" % label).ljust(15)
33
-	output("%s %s" % (label, message))
31
+    #label = ("%s " % label).ljust(20, ".")
32
+    label = ("%s:" % label).ljust(15)
33
+    output("%s %s" % (label, message))
34 34
 
35 35
 class DistributionSummary(object):
36
-	## Example:
37
-	##
38
-	## <DistributionSummary>
39
-	##	<Id>1234567890ABC</Id>
40
-	##	<Status>Deployed</Status>
41
-	##	<LastModifiedTime>2009-01-16T11:49:02.189Z</LastModifiedTime>
42
-	##	<DomainName>blahblahblah.cloudfront.net</DomainName>
43
-	##	<S3Origin>
44
-	##     <DNSName>example.bucket.s3.amazonaws.com</DNSName>
45
-	##  </S3Origin>
46
-	##  <CNAME>cdn.example.com</CNAME>
47
-	##  <CNAME>img.example.com</CNAME>
48
-	##  <Comment>What Ever</Comment>
49
-	##	<Enabled>true</Enabled>
50
-	## </DistributionSummary>
51
-
52
-	def __init__(self, tree):
53
-		if tree.tag != "DistributionSummary":
54
-			raise ValueError("Expected <DistributionSummary /> xml, got: <%s />" % tree.tag)
55
-		self.parse(tree)
56
-
57
-	def parse(self, tree):
58
-		self.info = getDictFromTree(tree)
59
-		self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
60
-		if self.info.has_key("CNAME") and type(self.info['CNAME']) != list:
61
-			self.info['CNAME'] = [self.info['CNAME']]
62
-
63
-	def uri(self):
64
-		return S3Uri("cf://%s" % self.info['Id'])
36
+    ## Example:
37
+    ##
38
+    ## <DistributionSummary>
39
+    ##  <Id>1234567890ABC</Id>
40
+    ##  <Status>Deployed</Status>
41
+    ##  <LastModifiedTime>2009-01-16T11:49:02.189Z</LastModifiedTime>
42
+    ##  <DomainName>blahblahblah.cloudfront.net</DomainName>
43
+    ##  <S3Origin>
44
+    ##     <DNSName>example.bucket.s3.amazonaws.com</DNSName>
45
+    ##  </S3Origin>
46
+    ##  <CNAME>cdn.example.com</CNAME>
47
+    ##  <CNAME>img.example.com</CNAME>
48
+    ##  <Comment>What Ever</Comment>
49
+    ##  <Enabled>true</Enabled>
50
+    ## </DistributionSummary>
51
+
52
+    def __init__(self, tree):
53
+        if tree.tag != "DistributionSummary":
54
+            raise ValueError("Expected <DistributionSummary /> xml, got: <%s />" % tree.tag)
55
+        self.parse(tree)
56
+
57
+    def parse(self, tree):
58
+        self.info = getDictFromTree(tree)
59
+        self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
60
+        if self.info.has_key("CNAME") and type(self.info['CNAME']) != list:
61
+            self.info['CNAME'] = [self.info['CNAME']]
62
+
63
+    def uri(self):
64
+        return S3Uri("cf://%s" % self.info['Id'])
65 65
 
66 66
 class DistributionList(object):
67
-	## Example:
68
-	## 
69
-	## <DistributionList xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
70
-	##	<Marker />
71
-	##	<MaxItems>100</MaxItems>
72
-	##	<IsTruncated>false</IsTruncated>
73
-	##	<DistributionSummary>
74
-	##	... handled by DistributionSummary() class ...
75
-	##	</DistributionSummary>
76
-	## </DistributionList>
77
-
78
-	def __init__(self, xml):
79
-		tree = getTreeFromXml(xml)
80
-		if tree.tag != "DistributionList":
81
-			raise ValueError("Expected <DistributionList /> xml, got: <%s />" % tree.tag)
82
-		self.parse(tree)
83
-
84
-	def parse(self, tree):
85
-		self.info = getDictFromTree(tree)
86
-		## Normalise some items
87
-		self.info['IsTruncated'] = (self.info['IsTruncated'].lower() == "true")
88
-
89
-		self.dist_summs = []
90
-		for dist_summ in tree.findall(".//DistributionSummary"):
91
-			self.dist_summs.append(DistributionSummary(dist_summ))
67
+    ## Example:
68
+    ##
69
+    ## <DistributionList xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
70
+    ##  <Marker />
71
+    ##  <MaxItems>100</MaxItems>
72
+    ##  <IsTruncated>false</IsTruncated>
73
+    ##  <DistributionSummary>
74
+    ##  ... handled by DistributionSummary() class ...
75
+    ##  </DistributionSummary>
76
+    ## </DistributionList>
77
+
78
+    def __init__(self, xml):
79
+        tree = getTreeFromXml(xml)
80
+        if tree.tag != "DistributionList":
81
+            raise ValueError("Expected <DistributionList /> xml, got: <%s />" % tree.tag)
82
+        self.parse(tree)
83
+
84
+    def parse(self, tree):
85
+        self.info = getDictFromTree(tree)
86
+        ## Normalise some items
87
+        self.info['IsTruncated'] = (self.info['IsTruncated'].lower() == "true")
88
+
89
+        self.dist_summs = []
90
+        for dist_summ in tree.findall(".//DistributionSummary"):
91
+            self.dist_summs.append(DistributionSummary(dist_summ))
92 92
 
93 93
 class Distribution(object):
94
-	## Example:
95
-	##
96
-	## <Distribution xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
97
-	##	<Id>1234567890ABC</Id>
98
-	##	<Status>InProgress</Status>
99
-	##	<LastModifiedTime>2009-01-16T13:07:11.319Z</LastModifiedTime>
100
-	##	<DomainName>blahblahblah.cloudfront.net</DomainName>
101
-	##	<DistributionConfig>
102
-	##	... handled by DistributionConfig() class ...
103
-	##	</DistributionConfig>
104
-	## </Distribution>
105
-
106
-	def __init__(self, xml):
107
-		tree = getTreeFromXml(xml)
108
-		if tree.tag != "Distribution":
109
-			raise ValueError("Expected <Distribution /> xml, got: <%s />" % tree.tag)
110
-		self.parse(tree)
111
-
112
-	def parse(self, tree):
113
-		self.info = getDictFromTree(tree)
114
-		## Normalise some items
115
-		self.info['LastModifiedTime'] = dateS3toPython(self.info['LastModifiedTime'])
116
-
117
-		self.info['DistributionConfig'] = DistributionConfig(tree = tree.find(".//DistributionConfig"))
118
-	
119
-	def uri(self):
120
-		return S3Uri("cf://%s" % self.info['Id'])
94
+    ## Example:
95
+    ##
96
+    ## <Distribution xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">
97
+    ##  <Id>1234567890ABC</Id>
98
+    ##  <Status>InProgress</Status>
99
+    ##  <LastModifiedTime>2009-01-16T13:07:11.319Z</LastModifiedTime>
100
+    ##  <DomainName>blahblahblah.cloudfront.net</DomainName>
101
+    ##  <DistributionConfig>
102
+    ##  ... handled by DistributionConfig() class ...
103
+    ##  </DistributionConfig>
104
+    ## </Distribution>
105
+
106
+    def __init__(self, xml):
107
+        tree = getTreeFromXml(xml)
108
+        if tree.tag != "Distribution":
109
+            raise ValueError("Expected <Distribution /> xml, got: <%s />" % tree.tag)
110
+        self.parse(tree)
111
+
112
+    def parse(self, tree):
113
+        self.info = getDictFromTree(tree)
114
+        ## Normalise some items
115
+        self.info['LastModifiedTime'] = dateS3toPython(self.info['LastModifiedTime'])
116
+
117
+        self.info['DistributionConfig'] = DistributionConfig(tree = tree.find(".//DistributionConfig"))
118
+
119
+    def uri(self):
120
+        return S3Uri("cf://%s" % self.info['Id'])
121 121
 
122 122
 class DistributionConfig(object):
123
-	## Example:
124
-	##
125
-	## <DistributionConfig>
126
-	##	<Origin>somebucket.s3.amazonaws.com</Origin>
127
-	##	<CallerReference>s3://somebucket/</CallerReference>
128
-	##	<Comment>http://somebucket.s3.amazonaws.com/</Comment>
129
-	##	<Enabled>true</Enabled>
130
-	##  <Logging>
131
-	##    <Bucket>bu.ck.et</Bucket>
132
-	##    <Prefix>/cf-somebucket/</Prefix>
133
-	##  </Logging>
134
-	## </DistributionConfig>
135
-
136
-	EMPTY_CONFIG = "<DistributionConfig><Origin/><CallerReference/><Enabled>true</Enabled></DistributionConfig>"
137
-	xmlns = "http://cloudfront.amazonaws.com/doc/%(api_ver)s/" % { 'api_ver' : cloudfront_api_version }
138
-	def __init__(self, xml = None, tree = None):
139
-		if xml is None:
140
-			xml = DistributionConfig.EMPTY_CONFIG
141
-
142
-		if tree is None:
143
-			tree = getTreeFromXml(xml)
144
-
145
-		if tree.tag != "DistributionConfig":
146
-			raise ValueError("Expected <DistributionConfig /> xml, got: <%s />" % tree.tag)
147
-		self.parse(tree)
148
-
149
-	def parse(self, tree):
150
-		self.info = getDictFromTree(tree)
151
-		self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
152
-		if not self.info.has_key("CNAME"):
153
-			self.info['CNAME'] = []
154
-		if type(self.info['CNAME']) != list:
155
-			self.info['CNAME'] = [self.info['CNAME']]
156
-		self.info['CNAME'] = [cname.lower() for cname in self.info['CNAME']]
157
-		if not self.info.has_key("Comment"):
158
-			self.info['Comment'] = ""
159
-		if not self.info.has_key("DefaultRootObject"):
160
-			self.info['DefaultRootObject'] = ""
161
-		## Figure out logging - complex node not parsed by getDictFromTree()
162
-		logging_nodes = tree.findall(".//Logging")
163
-		if logging_nodes:
164
-			logging_dict = getDictFromTree(logging_nodes[0])
165
-			logging_dict['Bucket'], success = getBucketFromHostname(logging_dict['Bucket'])
166
-			if not success:
167
-				warning("Logging to unparsable bucket name: %s" % logging_dict['Bucket'])
168
-			self.info['Logging'] = S3UriS3("s3://%(Bucket)s/%(Prefix)s" % logging_dict)
169
-		else:
170
-			self.info['Logging'] = None
171
-
172
-	def __str__(self):
173
-		tree = ET.Element("DistributionConfig")
174
-		tree.attrib['xmlns'] = DistributionConfig.xmlns
175
-
176
-		## Retain the order of the following calls!
177
-		appendXmlTextNode("Origin", self.info['Origin'], tree)
178
-		appendXmlTextNode("CallerReference", self.info['CallerReference'], tree)
179
-		for cname in self.info['CNAME']:
180
-			appendXmlTextNode("CNAME", cname.lower(), tree)
181
-		if self.info['Comment']:
182
-			appendXmlTextNode("Comment", self.info['Comment'], tree)
183
-		appendXmlTextNode("Enabled", str(self.info['Enabled']).lower(), tree)
184
-		# don't create a empty DefaultRootObject element as it would result in a MalformedXML error
185
-		if str(self.info['DefaultRootObject']):
186
-			appendXmlTextNode("DefaultRootObject", str(self.info['DefaultRootObject']), tree)
187
-		if self.info['Logging']:
188
-			logging_el = ET.Element("Logging")
189
-			appendXmlTextNode("Bucket", getHostnameFromBucket(self.info['Logging'].bucket()), logging_el)
190
-			appendXmlTextNode("Prefix", self.info['Logging'].object(), logging_el)
191
-			tree.append(logging_el)
192
-		return ET.tostring(tree)
123
+    ## Example:
124
+    ##
125
+    ## <DistributionConfig>
126
+    ##  <Origin>somebucket.s3.amazonaws.com</Origin>
127
+    ##  <CallerReference>s3://somebucket/</CallerReference>
128
+    ##  <Comment>http://somebucket.s3.amazonaws.com/</Comment>
129
+    ##  <Enabled>true</Enabled>
130
+    ##  <Logging>
131
+    ##    <Bucket>bu.ck.et</Bucket>
132
+    ##    <Prefix>/cf-somebucket/</Prefix>
133
+    ##  </Logging>
134
+    ## </DistributionConfig>
135
+
136
+    EMPTY_CONFIG = "<DistributionConfig><Origin/><CallerReference/><Enabled>true</Enabled></DistributionConfig>"
137
+    xmlns = "http://cloudfront.amazonaws.com/doc/%(api_ver)s/" % { 'api_ver' : cloudfront_api_version }
138
+    def __init__(self, xml = None, tree = None):
139
+        if xml is None:
140
+            xml = DistributionConfig.EMPTY_CONFIG
141
+
142
+        if tree is None:
143
+            tree = getTreeFromXml(xml)
144
+
145
+        if tree.tag != "DistributionConfig":
146
+            raise ValueError("Expected <DistributionConfig /> xml, got: <%s />" % tree.tag)
147
+        self.parse(tree)
148
+
149
+    def parse(self, tree):
150
+        self.info = getDictFromTree(tree)
151
+        self.info['Enabled'] = (self.info['Enabled'].lower() == "true")
152
+        if not self.info.has_key("CNAME"):
153
+            self.info['CNAME'] = []
154
+        if type(self.info['CNAME']) != list:
155
+            self.info['CNAME'] = [self.info['CNAME']]
156
+        self.info['CNAME'] = [cname.lower() for cname in self.info['CNAME']]
157
+        if not self.info.has_key("Comment"):
158
+            self.info['Comment'] = ""
159
+        if not self.info.has_key("DefaultRootObject"):
160
+            self.info['DefaultRootObject'] = ""
161
+        ## Figure out logging - complex node not parsed by getDictFromTree()
162
+        logging_nodes = tree.findall(".//Logging")
163
+        if logging_nodes:
164
+            logging_dict = getDictFromTree(logging_nodes[0])
165
+            logging_dict['Bucket'], success = getBucketFromHostname(logging_dict['Bucket'])
166
+            if not success:
167
+                warning("Logging to unparsable bucket name: %s" % logging_dict['Bucket'])
168
+            self.info['Logging'] = S3UriS3("s3://%(Bucket)s/%(Prefix)s" % logging_dict)
169
+        else:
170
+            self.info['Logging'] = None
171
+
172
+    def __str__(self):
173
+        tree = ET.Element("DistributionConfig")
174
+        tree.attrib['xmlns'] = DistributionConfig.xmlns
175
+
176
+        ## Retain the order of the following calls!
177
+        appendXmlTextNode("Origin", self.info['Origin'], tree)
178
+        appendXmlTextNode("CallerReference", self.info['CallerReference'], tree)
179
+        for cname in self.info['CNAME']:
180
+            appendXmlTextNode("CNAME", cname.lower(), tree)
181
+        if self.info['Comment']:
182
+            appendXmlTextNode("Comment", self.info['Comment'], tree)
183
+        appendXmlTextNode("Enabled", str(self.info['Enabled']).lower(), tree)
184
+        # don't create a empty DefaultRootObject element as it would result in a MalformedXML error
185
+        if str(self.info['DefaultRootObject']):
186
+            appendXmlTextNode("DefaultRootObject", str(self.info['DefaultRootObject']), tree)
187
+        if self.info['Logging']:
188
+            logging_el = ET.Element("Logging")
189
+            appendXmlTextNode("Bucket", getHostnameFromBucket(self.info['Logging'].bucket()), logging_el)
190
+            appendXmlTextNode("Prefix", self.info['Logging'].object(), logging_el)
191
+            tree.append(logging_el)
192
+        return ET.tostring(tree)
193 193
 
194 194
 class Invalidation(object):
195
-	## Example:
196
-	##
197
-	## <Invalidation xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
198
-	##   <Id>id</Id>
199
-	##   <Status>status</Status>
200
-	##   <CreateTime>date</CreateTime>
201
-	##   <InvalidationBatch>
202
-	##       <Path>/image1.jpg</Path>
203
-	##       <Path>/image2.jpg</Path>
204
-	##       <Path>/videos/movie.flv</Path>
205
-	##       <CallerReference>my-batch</CallerReference>
206
-	##   </InvalidationBatch>
207
-	## </Invalidation>
208
-
209
-	def __init__(self, xml):
210
-		tree = getTreeFromXml(xml)
211
-		if tree.tag != "Invalidation":
212
-			raise ValueError("Expected <Invalidation /> xml, got: <%s />" % tree.tag)
213
-		self.parse(tree)
214
-
215
-	def parse(self, tree):
216
-		self.info = getDictFromTree(tree)
217
-
218
-	def __str__(self):
219
-		return str(self.info)
195
+    ## Example:
196
+    ##
197
+    ## <Invalidation xmlns="http://cloudfront.amazonaws.com/doc/2010-11-01/">
198
+    ##   <Id>id</Id>
199
+    ##   <Status>status</Status>
200
+    ##   <CreateTime>date</CreateTime>
201
+    ##   <InvalidationBatch>
202
+    ##       <Path>/image1.jpg</Path>
203
+    ##       <Path>/image2.jpg</Path>
204
+    ##       <Path>/videos/movie.flv</Path>
205
+    ##       <CallerReference>my-batch</CallerReference>
206
+    ##   </InvalidationBatch>
207
+    ## </Invalidation>
208
+
209
+    def __init__(self, xml):
210
+        tree = getTreeFromXml(xml)
211
+        if tree.tag != "Invalidation":
212
+            raise ValueError("Expected <Invalidation /> xml, got: <%s />" % tree.tag)
213
+        self.parse(tree)
214
+
215
+    def parse(self, tree):
216
+        self.info = getDictFromTree(tree)
217
+
218
+    def __str__(self):
219
+        return str(self.info)
220 220
 
221 221
 class InvalidationList(object):
222
-	## Example:
223
-	##
224
-	## <InvalidationList>
225
-	##   <Marker/>
226
-	##   <NextMarker>Invalidation ID</NextMarker>
227
-	##   <MaxItems>2</MaxItems>
228
-	##   <IsTruncated>true</IsTruncated>
229
-	##   <InvalidationSummary>
230
-	##     <Id>[Second Invalidation ID]</Id>
231
-	##     <Status>Completed</Status>
232
-	##   </InvalidationSummary>
233
-	##   <InvalidationSummary>
234
-	##     <Id>[First Invalidation ID]</Id>
235
-	##     <Status>Completed</Status>
236
-	##   </InvalidationSummary>
237
-	## </InvalidationList>
238
-
239
-	def __init__(self, xml):
240
-		tree = getTreeFromXml(xml)
241
-		if tree.tag != "InvalidationList":
242
-			raise ValueError("Expected <InvalidationList /> xml, got: <%s />" % tree.tag)
243
-		self.parse(tree)
244
-
245
-	def parse(self, tree):
246
-		self.info = getDictFromTree(tree)
247
-
248
-	def __str__(self):
249
-		return str(self.info)
222
+    ## Example:
223
+    ##
224
+    ## <InvalidationList>
225
+    ##   <Marker/>
226
+    ##   <NextMarker>Invalidation ID</NextMarker>
227
+    ##   <MaxItems>2</MaxItems>
228
+    ##   <IsTruncated>true</IsTruncated>
229
+    ##   <InvalidationSummary>
230
+    ##     <Id>[Second Invalidation ID]</Id>
231
+    ##     <Status>Completed</Status>
232
+    ##   </InvalidationSummary>
233
+    ##   <InvalidationSummary>
234
+    ##     <Id>[First Invalidation ID]</Id>
235
+    ##     <Status>Completed</Status>
236
+    ##   </InvalidationSummary>
237
+    ## </InvalidationList>
238
+
239
+    def __init__(self, xml):
240
+        tree = getTreeFromXml(xml)
241
+        if tree.tag != "InvalidationList":
242
+            raise ValueError("Expected <InvalidationList /> xml, got: <%s />" % tree.tag)
243
+        self.parse(tree)
244
+
245
+    def parse(self, tree):
246
+        self.info = getDictFromTree(tree)
247
+
248
+    def __str__(self):
249
+        return str(self.info)
250 250
 
251 251
 class InvalidationBatch(object):
252
-	## Example:
253
-	##
254
-	## <InvalidationBatch>
255
-	##   <Path>/image1.jpg</Path>
256
-	##   <Path>/image2.jpg</Path>
257
-	##   <Path>/videos/movie.flv</Path>
258
-	##   <Path>/sound%20track.mp3</Path>
259
-	##   <CallerReference>my-batch</CallerReference>
260
-	## </InvalidationBatch>
261
-
262
-	def __init__(self, reference = None, distribution = None, paths = []):
263
-		if reference:
264
-			self.reference = reference
265
-		else:
266
-			if not distribution:
267
-				distribution="0"
268
-			self.reference = "%s.%s.%s" % (distribution,
269
-				datetime.strftime(datetime.now(),"%Y%m%d%H%M%S"),
270
-				random.randint(1000,9999))
271
-		self.paths = []
272
-		self.add_objects(paths)
273
-
274
-	def add_objects(self, paths):
275
-		self.paths.extend(paths)
276
-
277
-	def get_reference(self):
278
-		return self.reference
279
-
280
-	def __str__(self):
281
-		tree = ET.Element("InvalidationBatch")
282
-
283
-		for path in self.paths:
284
-			if path[0] != "/":
285
-				path = "/" + path
286
-			appendXmlTextNode("Path", path, tree)
287
-		appendXmlTextNode("CallerReference", self.reference, tree)
288
-		return ET.tostring(tree)
252
+    ## Example:
253
+    ##
254
+    ## <InvalidationBatch>
255
+    ##   <Path>/image1.jpg</Path>
256
+    ##   <Path>/image2.jpg</Path>
257
+    ##   <Path>/videos/movie.flv</Path>
258
+    ##   <Path>/sound%20track.mp3</Path>
259
+    ##   <CallerReference>my-batch</CallerReference>
260
+    ## </InvalidationBatch>
261
+
262
+    def __init__(self, reference = None, distribution = None, paths = []):
263
+        if reference:
264
+            self.reference = reference
265
+        else:
266
+            if not distribution:
267
+                distribution="0"
268
+            self.reference = "%s.%s.%s" % (distribution,
269
+                datetime.strftime(datetime.now(),"%Y%m%d%H%M%S"),
270
+                random.randint(1000,9999))
271
+        self.paths = []
272
+        self.add_objects(paths)
273
+
274
+    def add_objects(self, paths):
275
+        self.paths.extend(paths)
276
+
277
+    def get_reference(self):
278
+        return self.reference
279
+
280
+    def __str__(self):
281
+        tree = ET.Element("InvalidationBatch")
282
+
283
+        for path in self.paths:
284
+            if path[0] != "/":
285
+                path = "/" + path
286
+            appendXmlTextNode("Path", path, tree)
287
+        appendXmlTextNode("CallerReference", self.reference, tree)
288
+        return ET.tostring(tree)
289 289
 
290 290
 class CloudFront(object):
291
-	operations = {
292
-		"CreateDist" : { 'method' : "POST", 'resource' : "" },
293
-		"DeleteDist" : { 'method' : "DELETE", 'resource' : "/%(dist_id)s" },
294
-		"GetList" : { 'method' : "GET", 'resource' : "" },
295
-		"GetDistInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s" },
296
-		"GetDistConfig" : { 'method' : "GET", 'resource' : "/%(dist_id)s/config" },
297
-		"SetDistConfig" : { 'method' : "PUT", 'resource' : "/%(dist_id)s/config" },
298
-		"Invalidate" : { 'method' : "POST", 'resource' : "/%(dist_id)s/invalidation" },
299
-		"GetInvalList" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation" },
300
-		"GetInvalInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation/%(request_id)s" },
301
-	}
302
-
303
-	## Maximum attempts of re-issuing failed requests
304
-	_max_retries = 5
305
-	dist_list = None
306
-
307
-	def __init__(self, config):
308
-		self.config = config
309
-
310
-	## --------------------------------------------------
311
-	## Methods implementing CloudFront API
312
-	## --------------------------------------------------
313
-
314
-	def GetList(self):
315
-		response = self.send_request("GetList")
316
-		response['dist_list'] = DistributionList(response['data'])
317
-		if response['dist_list'].info['IsTruncated']:
318
-			raise NotImplementedError("List is truncated. Ask s3cmd author to add support.")
319
-		## TODO: handle Truncated 
320
-		return response
321
-	
322
-	def CreateDistribution(self, uri, cnames_add = [], comment = None, logging = None, default_root_object = None):
323
-		dist_config = DistributionConfig()
324
-		dist_config.info['Enabled'] = True
325
-		dist_config.info['Origin'] = uri.host_name()
326
-		dist_config.info['CallerReference'] = str(uri)
327
-		dist_config.info['DefaultRootObject'] = default_root_object
328
-		if comment == None:
329
-			dist_config.info['Comment'] = uri.public_url()
330
-		else:
331
-			dist_config.info['Comment'] = comment
332
-		for cname in cnames_add:
333
-			if dist_config.info['CNAME'].count(cname) == 0:
334
-				dist_config.info['CNAME'].append(cname)
335
-		if logging:
336
-			dist_config.info['Logging'] = S3UriS3(logging)
337
-		request_body = str(dist_config)
338
-		debug("CreateDistribution(): request_body: %s" % request_body)
339
-		response = self.send_request("CreateDist", body = request_body)
340
-		response['distribution'] = Distribution(response['data'])
341
-		return response
342
-	
343
-	def ModifyDistribution(self, cfuri, cnames_add = [], cnames_remove = [],
344
-	                       comment = None, enabled = None, logging = None,
291
+    operations = {
292
+        "CreateDist" : { 'method' : "POST", 'resource' : "" },
293
+        "DeleteDist" : { 'method' : "DELETE", 'resource' : "/%(dist_id)s" },
294
+        "GetList" : { 'method' : "GET", 'resource' : "" },
295
+        "GetDistInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s" },
296
+        "GetDistConfig" : { 'method' : "GET", 'resource' : "/%(dist_id)s/config" },
297
+        "SetDistConfig" : { 'method' : "PUT", 'resource' : "/%(dist_id)s/config" },
298
+        "Invalidate" : { 'method' : "POST", 'resource' : "/%(dist_id)s/invalidation" },
299
+        "GetInvalList" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation" },
300
+        "GetInvalInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation/%(request_id)s" },
301
+    }
302
+
303
+    ## Maximum attempts of re-issuing failed requests
304
+    _max_retries = 5
305
+    dist_list = None
306
+
307
+    def __init__(self, config):
308
+        self.config = config
309
+
310
+    ## --------------------------------------------------
311
+    ## Methods implementing CloudFront API
312
+    ## --------------------------------------------------
313
+
314
+    def GetList(self):
315
+        response = self.send_request("GetList")
316
+        response['dist_list'] = DistributionList(response['data'])
317
+        if response['dist_list'].info['IsTruncated']:
318
+            raise NotImplementedError("List is truncated. Ask s3cmd author to add support.")
319
+        ## TODO: handle Truncated
320
+        return response
321
+
322
+    def CreateDistribution(self, uri, cnames_add = [], comment = None, logging = None, default_root_object = None):
323
+        dist_config = DistributionConfig()
324
+        dist_config.info['Enabled'] = True
325
+        dist_config.info['Origin'] = uri.host_name()
326
+        dist_config.info['CallerReference'] = str(uri)
327
+        dist_config.info['DefaultRootObject'] = default_root_object
328
+        if comment == None:
329
+            dist_config.info['Comment'] = uri.public_url()
330
+        else:
331
+            dist_config.info['Comment'] = comment
332
+        for cname in cnames_add:
333
+            if dist_config.info['CNAME'].count(cname) == 0:
334
+                dist_config.info['CNAME'].append(cname)
335
+        if logging:
336
+            dist_config.info['Logging'] = S3UriS3(logging)
337
+        request_body = str(dist_config)
338
+        debug("CreateDistribution(): request_body: %s" % request_body)
339
+        response = self.send_request("CreateDist", body = request_body)
340
+        response['distribution'] = Distribution(response['data'])
341
+        return response
342
+
343
+    def ModifyDistribution(self, cfuri, cnames_add = [], cnames_remove = [],
344
+                           comment = None, enabled = None, logging = None,
345 345
                            default_root_object = None):
346
-		if cfuri.type != "cf":
347
-			raise ValueError("Expected CFUri instead of: %s" % cfuri)
348
-		# Get current dist status (enabled/disabled) and Etag
349
-		info("Checking current status of %s" % cfuri)
350
-		response = self.GetDistConfig(cfuri)
351
-		dc = response['dist_config']
352
-		if enabled != None:
353
-			dc.info['Enabled'] = enabled
354
-		if comment != None:
355
-			dc.info['Comment'] = comment
356
-		if default_root_object != None:
357
-			dc.info['DefaultRootObject'] = default_root_object
358
-		for cname in cnames_add:
359
-			if dc.info['CNAME'].count(cname) == 0:
360
-				dc.info['CNAME'].append(cname)
361
-		for cname in cnames_remove:
362
-			while dc.info['CNAME'].count(cname) > 0:
363
-				dc.info['CNAME'].remove(cname)
364
-		if logging != None:
365
-			if logging == False:
366
-				dc.info['Logging'] = False
367
-			else:
368
-				dc.info['Logging'] = S3UriS3(logging)
369
-		response = self.SetDistConfig(cfuri, dc, response['headers']['etag'])
370
-		return response
371
-		
372
-	def DeleteDistribution(self, cfuri):
373
-		if cfuri.type != "cf":
374
-			raise ValueError("Expected CFUri instead of: %s" % cfuri)
375
-		# Get current dist status (enabled/disabled) and Etag
376
-		info("Checking current status of %s" % cfuri)
377
-		response = self.GetDistConfig(cfuri)
378
-		if response['dist_config'].info['Enabled']:
379
-			info("Distribution is ENABLED. Disabling first.")
380
-			response['dist_config'].info['Enabled'] = False
381
-			response = self.SetDistConfig(cfuri, response['dist_config'], 
382
-			                              response['headers']['etag'])
383
-			warning("Waiting for Distribution to become disabled.")
384
-			warning("This may take several minutes, please wait.")
385
-			while True:
386
-				response = self.GetDistInfo(cfuri)
387
-				d = response['distribution']
388
-				if d.info['Status'] == "Deployed" and d.info['Enabled'] == False:
389
-					info("Distribution is now disabled")
390
-					break
391
-				warning("Still waiting...")
392
-				time.sleep(10)
393
-		headers = {}
394
-		headers['if-match'] = response['headers']['etag']
395
-		response = self.send_request("DeleteDist", dist_id = cfuri.dist_id(),
396
-		                             headers = headers)
397
-		return response
398
-	
399
-	def GetDistInfo(self, cfuri):
400
-		if cfuri.type != "cf":
401
-			raise ValueError("Expected CFUri instead of: %s" % cfuri)
402
-		response = self.send_request("GetDistInfo", dist_id = cfuri.dist_id())
403
-		response['distribution'] = Distribution(response['data'])
404
-		return response
405
-
406
-	def GetDistConfig(self, cfuri):
407
-		if cfuri.type != "cf":
408
-			raise ValueError("Expected CFUri instead of: %s" % cfuri)
409
-		response = self.send_request("GetDistConfig", dist_id = cfuri.dist_id())
410
-		response['dist_config'] = DistributionConfig(response['data'])
411
-		return response
412
-	
413
-	def SetDistConfig(self, cfuri, dist_config, etag = None):
414
-		if etag == None:
415
-			debug("SetDistConfig(): Etag not set. Fetching it first.")
416
-			etag = self.GetDistConfig(cfuri)['headers']['etag']
417
-		debug("SetDistConfig(): Etag = %s" % etag)
418
-		request_body = str(dist_config)
419
-		debug("SetDistConfig(): request_body: %s" % request_body)
420
-		headers = {}
421
-		headers['if-match'] = etag
422
-		response = self.send_request("SetDistConfig", dist_id = cfuri.dist_id(),
423
-		                             body = request_body, headers = headers)
424
-		return response
425
-
426
-	def InvalidateObjects(self, uri, paths):
427
-		# uri could be either cf:// or s3:// uri
428
-		cfuri = self.get_dist_name_for_bucket(uri)
429
-		if len(paths) > 999:
430
-			try:
431
-				tmp_filename = Utils.mktmpfile()
432
-				f = open(tmp_filename, "w")
433
-				f.write("\n".join(paths)+"\n")
434
-				f.close()
435
-				warning("Request to invalidate %d paths (max 999 supported)" % len(paths))
436
-				warning("All the paths are now saved in: %s" % tmp_filename)
437
-			except:
438
-				pass
439
-			raise ParameterError("Too many paths to invalidate")
440
-		invalbatch = InvalidationBatch(distribution = cfuri.dist_id(), paths = paths)
441
-		debug("InvalidateObjects(): request_body: %s" % invalbatch)
442
-		response = self.send_request("Invalidate", dist_id = cfuri.dist_id(),
443
-		                             body = str(invalbatch))
444
-		response['dist_id'] = cfuri.dist_id()
445
-		if response['status'] == 201:
446
-			inval_info = Invalidation(response['data']).info
447
-			response['request_id'] = inval_info['Id']
448
-		debug("InvalidateObjects(): response: %s" % response)
449
-		return response
450
-
451
-	def GetInvalList(self, cfuri):
452
-		if cfuri.type != "cf":
453
-			raise ValueError("Expected CFUri instead of: %s" % cfuri)
454
-		response = self.send_request("GetInvalList", dist_id = cfuri.dist_id())
455
-		response['inval_list'] = InvalidationList(response['data'])
456
-		return response
457
-
458
-	def GetInvalInfo(self, cfuri):
459
-		if cfuri.type != "cf":
460
-			raise ValueError("Expected CFUri instead of: %s" % cfuri)
461
-		if cfuri.request_id() is None:
462
-			raise ValueError("Expected CFUri with Request ID")
463
-		response = self.send_request("GetInvalInfo", dist_id = cfuri.dist_id(), request_id = cfuri.request_id())
464
-		response['inval_status'] = Invalidation(response['data'])
465
-		return response
466
-
467
-	## --------------------------------------------------
468
-	## Low-level methods for handling CloudFront requests
469
-	## --------------------------------------------------
470
-
471
-	def send_request(self, op_name, dist_id = None, request_id = None, body = None, headers = {}, retries = _max_retries):
472
-		operation = self.operations[op_name]
473
-		if body:
474
-			headers['content-type'] = 'text/plain'
475
-		request = self.create_request(operation, dist_id, request_id, headers)
476
-		conn = self.get_connection()
477
-		debug("send_request(): %s %s" % (request['method'], request['resource']))
478
-		conn.request(request['method'], request['resource'], body, request['headers'])
479
-		http_response = conn.getresponse()
480
-		response = {}
481
-		response["status"] = http_response.status
482
-		response["reason"] = http_response.reason
483
-		response["headers"] = dict(http_response.getheaders())
484
-		response["data"] =  http_response.read()
485
-		conn.close()
486
-
487
-		debug("CloudFront: response: %r" % response)
488
-
489
-		if response["status"] >= 500:
490
-			e = CloudFrontError(response)
491
-			if retries:
492
-				warning(u"Retrying failed request: %s" % op_name)
493
-				warning(unicode(e))
494
-				warning("Waiting %d sec..." % self._fail_wait(retries))
495
-				time.sleep(self._fail_wait(retries))
496
-				return self.send_request(op_name, dist_id, body, retries - 1)
497
-			else:
498
-				raise e
499
-
500
-		if response["status"] < 200 or response["status"] > 299:
501
-			raise CloudFrontError(response)
502
-
503
-		return response
504
-
505
-	def create_request(self, operation, dist_id = None, request_id = None, headers = None):
506
-		resource = cloudfront_resource + (
507
-		           operation['resource'] % { 'dist_id' : dist_id, 'request_id' : request_id })
508
-
509
-		if not headers:
510
-			headers = {}
511
-
512
-		if headers.has_key("date"):
513
-			if not headers.has_key("x-amz-date"):
514
-				headers["x-amz-date"] = headers["date"]
515
-			del(headers["date"])
516
-		
517
-		if not headers.has_key("x-amz-date"):
518
-			headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
519
-
520
-		signature = self.sign_request(headers)
521
-		headers["Authorization"] = "AWS "+self.config.access_key+":"+signature
522
-
523
-		request = {}
524
-		request['resource'] = resource
525
-		request['headers'] = headers
526
-		request['method'] = operation['method']
527
-
528
-		return request
529
-
530
-	def sign_request(self, headers):
531
-		string_to_sign = headers['x-amz-date']
532
-		signature = sign_string(string_to_sign)
533
-		debug(u"CloudFront.sign_request('%s') = %s" % (string_to_sign, signature))
534
-		return signature
535
-
536
-	def get_connection(self):
537
-		if self.config.proxy_host != "":
538
-			raise ParameterError("CloudFront commands don't work from behind a HTTP proxy")
539
-		return httplib.HTTPSConnection(self.config.cloudfront_host)
540
-
541
-	def _fail_wait(self, retries):
542
-		# Wait a few seconds. The more it fails the more we wait.
543
-		return (self._max_retries - retries + 1) * 3
544
-
545
-	def get_dist_name_for_bucket(self, uri):
546
-		if (uri.type == "cf"):
547
-			return uri
548
-		if (uri.type != "s3"):
549
-			raise ParameterError("CloudFront or S3 URI required instead of: %s" % arg)
550
-
551
-		debug("_get_dist_name_for_bucket(%r)" % uri)
552
-		if CloudFront.dist_list is None:
553
-			response = self.GetList()
554
-			CloudFront.dist_list = {}
555
-			for d in response['dist_list'].dist_summs:
556
-				CloudFront.dist_list[getBucketFromHostname(d.info['S3Origin']['DNSName'])[0]] = d.uri()
557
-			debug("dist_list: %s" % CloudFront.dist_list)
558
-		try:
559
-			return CloudFront.dist_list[uri.bucket()]
560
-		except Exception, e:
561
-			debug(e)
562
-			raise ParameterError("Unable to translate S3 URI to CloudFront distribution name: %s" % arg)
346
+        if cfuri.type != "cf":
347
+            raise ValueError("Expected CFUri instead of: %s" % cfuri)
348
+        # Get current dist status (enabled/disabled) and Etag
349
+        info("Checking current status of %s" % cfuri)
350
+        response = self.GetDistConfig(cfuri)
351
+        dc = response['dist_config']
352
+        if enabled != None:
353
+            dc.info['Enabled'] = enabled
354
+        if comment != None:
355
+            dc.info['Comment'] = comment
356
+        if default_root_object != None:
357
+            dc.info['DefaultRootObject'] = default_root_object
358
+        for cname in cnames_add:
359
+            if dc.info['CNAME'].count(cname) == 0:
360
+                dc.info['CNAME'].append(cname)
361
+        for cname in cnames_remove:
362
+            while dc.info['CNAME'].count(cname) > 0:
363
+                dc.info['CNAME'].remove(cname)
364
+        if logging != None:
365
+            if logging == False:
366
+                dc.info['Logging'] = False
367
+            else:
368
+                dc.info['Logging'] = S3UriS3(logging)
369
+        response = self.SetDistConfig(cfuri, dc, response['headers']['etag'])
370
+        return response
371
+
372
+    def DeleteDistribution(self, cfuri):
373
+        if cfuri.type != "cf":
374
+            raise ValueError("Expected CFUri instead of: %s" % cfuri)
375
+        # Get current dist status (enabled/disabled) and Etag
376
+        info("Checking current status of %s" % cfuri)
377
+        response = self.GetDistConfig(cfuri)
378
+        if response['dist_config'].info['Enabled']:
379
+            info("Distribution is ENABLED. Disabling first.")
380
+            response['dist_config'].info['Enabled'] = False
381
+            response = self.SetDistConfig(cfuri, response['dist_config'],
382
+                                          response['headers']['etag'])
383
+            warning("Waiting for Distribution to become disabled.")
384
+            warning("This may take several minutes, please wait.")
385
+            while True:
386
+                response = self.GetDistInfo(cfuri)
387
+                d = response['distribution']
388
+                if d.info['Status'] == "Deployed" and d.info['Enabled'] == False:
389
+                    info("Distribution is now disabled")
390
+                    break
391
+                warning("Still waiting...")
392
+                time.sleep(10)
393
+        headers = {}
394
+        headers['if-match'] = response['headers']['etag']
395
+        response = self.send_request("DeleteDist", dist_id = cfuri.dist_id(),
396
+                                     headers = headers)
397
+        return response
398
+
399
+    def GetDistInfo(self, cfuri):
400
+        if cfuri.type != "cf":
401
+            raise ValueError("Expected CFUri instead of: %s" % cfuri)
402
+        response = self.send_request("GetDistInfo", dist_id = cfuri.dist_id())
403
+        response['distribution'] = Distribution(response['data'])
404
+        return response
405
+
406
+    def GetDistConfig(self, cfuri):
407
+        if cfuri.type != "cf":
408
+            raise ValueError("Expected CFUri instead of: %s" % cfuri)
409
+        response = self.send_request("GetDistConfig", dist_id = cfuri.dist_id())
410
+        response['dist_config'] = DistributionConfig(response['data'])
411
+        return response
412
+
413
+    def SetDistConfig(self, cfuri, dist_config, etag = None):
414
+        if etag == None:
415
+            debug("SetDistConfig(): Etag not set. Fetching it first.")
416
+            etag = self.GetDistConfig(cfuri)['headers']['etag']
417
+        debug("SetDistConfig(): Etag = %s" % etag)
418
+        request_body = str(dist_config)
419
+        debug("SetDistConfig(): request_body: %s" % request_body)
420
+        headers = {}
421
+        headers['if-match'] = etag
422
+        response = self.send_request("SetDistConfig", dist_id = cfuri.dist_id(),
423
+                                     body = request_body, headers = headers)
424
+        return response
425
+
426
+    def InvalidateObjects(self, uri, paths):
427
+        # uri could be either cf:// or s3:// uri
428
+        cfuri = self.get_dist_name_for_bucket(uri)
429
+        if len(paths) > 999:
430
+            try:
431
+                tmp_filename = Utils.mktmpfile()
432
+                f = open(tmp_filename, "w")
433
+                f.write("\n".join(paths)+"\n")
434
+                f.close()
435
+                warning("Request to invalidate %d paths (max 999 supported)" % len(paths))
436
+                warning("All the paths are now saved in: %s" % tmp_filename)
437
+            except:
438
+                pass
439
+            raise ParameterError("Too many paths to invalidate")
440
+        invalbatch = InvalidationBatch(distribution = cfuri.dist_id(), paths = paths)
441
+        debug("InvalidateObjects(): request_body: %s" % invalbatch)
442
+        response = self.send_request("Invalidate", dist_id = cfuri.dist_id(),
443
+                                     body = str(invalbatch))
444
+        response['dist_id'] = cfuri.dist_id()
445
+        if response['status'] == 201:
446
+            inval_info = Invalidation(response['data']).info
447
+            response['request_id'] = inval_info['Id']
448
+        debug("InvalidateObjects(): response: %s" % response)
449
+        return response
450
+
451
+    def GetInvalList(self, cfuri):
452
+        if cfuri.type != "cf":
453
+            raise ValueError("Expected CFUri instead of: %s" % cfuri)
454
+        response = self.send_request("GetInvalList", dist_id = cfuri.dist_id())
455
+        response['inval_list'] = InvalidationList(response['data'])
456
+        return response
457
+
458
+    def GetInvalInfo(self, cfuri):
459
+        if cfuri.type != "cf":
460
+            raise ValueError("Expected CFUri instead of: %s" % cfuri)
461
+        if cfuri.request_id() is None:
462
+            raise ValueError("Expected CFUri with Request ID")
463
+        response = self.send_request("GetInvalInfo", dist_id = cfuri.dist_id(), request_id = cfuri.request_id())
464
+        response['inval_status'] = Invalidation(response['data'])
465
+        return response
466
+
467
+    ## --------------------------------------------------
468
+    ## Low-level methods for handling CloudFront requests
469
+    ## --------------------------------------------------
470
+
471
+    def send_request(self, op_name, dist_id = None, request_id = None, body = None, headers = {}, retries = _max_retries):
472
+        operation = self.operations[op_name]
473
+        if body:
474
+            headers['content-type'] = 'text/plain'
475
+        request = self.create_request(operation, dist_id, request_id, headers)
476
+        conn = self.get_connection()
477
+        debug("send_request(): %s %s" % (request['method'], request['resource']))
478
+        conn.request(request['method'], request['resource'], body, request['headers'])
479
+        http_response = conn.getresponse()
480
+        response = {}
481
+        response["status"] = http_response.status
482
+        response["reason"] = http_response.reason
483
+        response["headers"] = dict(http_response.getheaders())
484
+        response["data"] =  http_response.read()
485
+        conn.close()
486
+
487
+        debug("CloudFront: response: %r" % response)
488
+
489
+        if response["status"] >= 500:
490
+            e = CloudFrontError(response)
491
+            if retries:
492
+                warning(u"Retrying failed request: %s" % op_name)
493
+                warning(unicode(e))
494
+                warning("Waiting %d sec..." % self._fail_wait(retries))
495
+                time.sleep(self._fail_wait(retries))
496
+                return self.send_request(op_name, dist_id, body, retries - 1)
497
+            else:
498
+                raise e
499
+
500
+        if response["status"] < 200 or response["status"] > 299:
501
+            raise CloudFrontError(response)
502
+
503
+        return response
504
+
505
+    def create_request(self, operation, dist_id = None, request_id = None, headers = None):
506
+        resource = cloudfront_resource + (
507
+                   operation['resource'] % { 'dist_id' : dist_id, 'request_id' : request_id })
508
+
509
+        if not headers:
510
+            headers = {}
511
+
512
+        if headers.has_key("date"):
513
+            if not headers.has_key("x-amz-date"):
514
+                headers["x-amz-date"] = headers["date"]
515
+            del(headers["date"])
516
+
517
+        if not headers.has_key("x-amz-date"):
518
+            headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
519
+
520
+        signature = self.sign_request(headers)
521
+        headers["Authorization"] = "AWS "+self.config.access_key+":"+signature
522
+
523
+        request = {}
524
+        request['resource'] = resource
525
+        request['headers'] = headers
526
+        request['method'] = operation['method']
527
+
528
+        return request
529
+
530
+    def sign_request(self, headers):
531
+        string_to_sign = headers['x-amz-date']
532
+        signature = sign_string(string_to_sign)
533
+        debug(u"CloudFront.sign_request('%s') = %s" % (string_to_sign, signature))
534
+        return signature
535
+
536
+    def get_connection(self):
537
+        if self.config.proxy_host != "":
538
+            raise ParameterError("CloudFront commands don't work from behind a HTTP proxy")
539
+        return httplib.HTTPSConnection(self.config.cloudfront_host)
540
+
541
+    def _fail_wait(self, retries):
542
+        # Wait a few seconds. The more it fails the more we wait.
543
+        return (self._max_retries - retries + 1) * 3
544
+
545
+    def get_dist_name_for_bucket(self, uri):
546
+        if (uri.type == "cf"):
547
+            return uri
548
+        if (uri.type != "s3"):
549
+            raise ParameterError("CloudFront or S3 URI required instead of: %s" % arg)
550
+
551
+        debug("_get_dist_name_for_bucket(%r)" % uri)
552
+        if CloudFront.dist_list is None:
553
+            response = self.GetList()
554
+            CloudFront.dist_list = {}
555
+            for d in response['dist_list'].dist_summs:
556
+                CloudFront.dist_list[getBucketFromHostname(d.info['S3Origin']['DNSName'])[0]] = d.uri()
557
+            debug("dist_list: %s" % CloudFront.dist_list)
558
+        try:
559
+            return CloudFront.dist_list[uri.bucket()]
560
+        except Exception, e:
561
+            debug(e)
562
+            raise ParameterError("Unable to translate S3 URI to CloudFront distribution name: %s" % arg)
563 563
 
564 564
 class Cmd(object):
565
-	"""
566
-	Class that implements CloudFront commands
567
-	"""
568
-	
569
-	class Options(object):
570
-		cf_cnames_add = []
571
-		cf_cnames_remove = []
572
-		cf_comment = None
573
-		cf_enable = None
574
-		cf_logging = None
575
-		cf_default_root_object = None
576
-
577
-		def option_list(self):
578
-			return [opt for opt in dir(self) if opt.startswith("cf_")]
579
-
580
-		def update_option(self, option, value):
581
-			setattr(Cmd.options, option, value)
582
-
583
-	options = Options()
584
-
585
-	@staticmethod
586
-	def _parse_args(args):
587
-		cf = CloudFront(Config())
588
-		cfuris = []
589
-		for arg in args:
590
-			uri = cf.get_dist_name_for_bucket(S3Uri(arg))
591
-			cfuris.append(uri)
592
-		return cfuris
593
-
594
-	@staticmethod
595
-	def info(args):
596
-		cf = CloudFront(Config())
597
-		if not args:
598
-			response = cf.GetList()
599
-			for d in response['dist_list'].dist_summs:
600
-				if d.info.has_key("S3Origin"):
601
-					origin = S3UriS3.httpurl_to_s3uri(d.info['S3Origin']['DNSName'])
602
-				elif d.info.has_key("CustomOrigin"):
603
-					origin = "http://%s/" % d.info['CustomOrigin']['DNSName']
604
-				else:
605
-					origin = "<unknown>"
606
-				pretty_output("Origin", origin)
607
-				pretty_output("DistId", d.uri())
608
-				pretty_output("DomainName", d.info['DomainName'])
609
-				if d.info.has_key("CNAME"):
610
-					pretty_output("CNAMEs", ", ".join(d.info['CNAME']))
611
-				pretty_output("Status", d.info['Status'])
612
-				pretty_output("Enabled", d.info['Enabled'])
613
-				output("")
614
-		else:
615
-			cfuris = Cmd._parse_args(args)
616
-			for cfuri in cfuris:
617
-				response = cf.GetDistInfo(cfuri)
618
-				d = response['distribution']
619
-				dc = d.info['DistributionConfig']
620
-				if dc.info.has_key("S3Origin"):
621
-					origin = S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName'])
622
-				elif dc.info.has_key("CustomOrigin"):
623
-					origin = "http://%s/" % dc.info['CustomOrigin']['DNSName']
624
-				else:
625
-					origin = "<unknown>"
626
-				pretty_output("Origin", origin)
627
-				pretty_output("DistId", d.uri())
628
-				pretty_output("DomainName", d.info['DomainName'])
629
-				if dc.info.has_key("CNAME"):
630
-					pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
631
-				pretty_output("Status", d.info['Status'])
632
-				pretty_output("Comment", dc.info['Comment'])
633
-				pretty_output("Enabled", dc.info['Enabled'])
634
-				pretty_output("DfltRootObject", dc.info['DefaultRootObject'])
635
-				pretty_output("Logging", dc.info['Logging'] or "Disabled")
636
-				pretty_output("Etag", response['headers']['etag'])
637
-
638
-	@staticmethod
639
-	def create(args):
640
-		cf = CloudFront(Config())
641
-		buckets = []
642
-		for arg in args:
643
-			uri = S3Uri(arg)
644
-			if uri.type != "s3":
645
-				raise ParameterError("Bucket can only be created from a s3:// URI instead of: %s" % arg)
646
-			if uri.object():
647
-				raise ParameterError("Use s3:// URI with a bucket name only instead of: %s" % arg)
648
-			if not uri.is_dns_compatible():
649
-				raise ParameterError("CloudFront can only handle lowercase-named buckets.")
650
-			buckets.append(uri)
651
-		if not buckets:
652
-			raise ParameterError("No valid bucket names found")
653
-		for uri in buckets:
654
-			info("Creating distribution from: %s" % uri)
655
-			response = cf.CreateDistribution(uri, cnames_add = Cmd.options.cf_cnames_add, 
656
-			                                 comment = Cmd.options.cf_comment,
657
-			                                 logging = Cmd.options.cf_logging,
565
+    """
566
+    Class that implements CloudFront commands
567
+    """
568
+
569
+    class Options(object):
570
+        cf_cnames_add = []
571
+        cf_cnames_remove = []
572
+        cf_comment = None
573
+        cf_enable = None
574
+        cf_logging = None
575
+        cf_default_root_object = None
576
+
577
+        def option_list(self):
578
+            return [opt for opt in dir(self) if opt.startswith("cf_")]
579
+
580
+        def update_option(self, option, value):
581
+            setattr(Cmd.options, option, value)
582
+
583
+    options = Options()
584
+
585
+    @staticmethod
586
+    def _parse_args(args):
587
+        cf = CloudFront(Config())
588
+        cfuris = []
589
+        for arg in args:
590
+            uri = cf.get_dist_name_for_bucket(S3Uri(arg))
591
+            cfuris.append(uri)
592
+        return cfuris
593
+
594
+    @staticmethod
595
+    def info(args):
596
+        cf = CloudFront(Config())
597
+        if not args:
598
+            response = cf.GetList()
599
+            for d in response['dist_list'].dist_summs:
600
+                if d.info.has_key("S3Origin"):
601
+                    origin = S3UriS3.httpurl_to_s3uri(d.info['S3Origin']['DNSName'])
602
+                elif d.info.has_key("CustomOrigin"):
603
+                    origin = "http://%s/" % d.info['CustomOrigin']['DNSName']
604
+                else:
605
+                    origin = "<unknown>"
606
+                pretty_output("Origin", origin)
607
+                pretty_output("DistId", d.uri())
608
+                pretty_output("DomainName", d.info['DomainName'])
609
+                if d.info.has_key("CNAME"):
610
+                    pretty_output("CNAMEs", ", ".join(d.info['CNAME']))
611
+                pretty_output("Status", d.info['Status'])
612
+                pretty_output("Enabled", d.info['Enabled'])
613
+                output("")
614
+        else:
615
+            cfuris = Cmd._parse_args(args)
616
+            for cfuri in cfuris:
617
+                response = cf.GetDistInfo(cfuri)
618
+                d = response['distribution']
619
+                dc = d.info['DistributionConfig']
620
+                if dc.info.has_key("S3Origin"):
621
+                    origin = S3UriS3.httpurl_to_s3uri(dc.info['S3Origin']['DNSName'])
622
+                elif dc.info.has_key("CustomOrigin"):
623
+                    origin = "http://%s/" % dc.info['CustomOrigin']['DNSName']
624
+                else:
625
+                    origin = "<unknown>"
626
+                pretty_output("Origin", origin)
627
+                pretty_output("DistId", d.uri())
628
+                pretty_output("DomainName", d.info['DomainName'])
629
+                if dc.info.has_key("CNAME"):
630
+                    pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
631
+                pretty_output("Status", d.info['Status'])
632
+                pretty_output("Comment", dc.info['Comment'])
633
+                pretty_output("Enabled", dc.info['Enabled'])
634
+                pretty_output("DfltRootObject", dc.info['DefaultRootObject'])
635
+                pretty_output("Logging", dc.info['Logging'] or "Disabled")
636
+                pretty_output("Etag", response['headers']['etag'])
637
+
638
+    @staticmethod
639
+    def create(args):
640
+        cf = CloudFront(Config())
641
+        buckets = []
642
+        for arg in args:
643
+            uri = S3Uri(arg)
644
+            if uri.type != "s3":
645
+                raise ParameterError("Bucket can only be created from a s3:// URI instead of: %s" % arg)
646
+            if uri.object():
647
+                raise ParameterError("Use s3:// URI with a bucket name only instead of: %s" % arg)
648
+            if not uri.is_dns_compatible():
649
+                raise ParameterError("CloudFront can only handle lowercase-named buckets.")
650
+            buckets.append(uri)
651
+        if not buckets:
652
+            raise ParameterError("No valid bucket names found")
653
+        for uri in buckets:
654
+            info("Creating distribution from: %s" % uri)
655
+            response = cf.CreateDistribution(uri, cnames_add = Cmd.options.cf_cnames_add,
656
+                                             comment = Cmd.options.cf_comment,
657
+                                             logging = Cmd.options.cf_logging,
658 658
                                              default_root_object = Cmd.options.cf_default_root_object)
659
-			d = response['distribution']
660
-			dc = d.info['DistributionConfig']
661
-			output("Distribution created:")
662
-			pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['Origin']))
663
-			pretty_output("DistId", d.uri())
664
-			pretty_output("DomainName", d.info['DomainName'])
665
-			pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
666
-			pretty_output("Comment", dc.info['Comment'])
667
-			pretty_output("Status", d.info['Status'])
668
-			pretty_output("Enabled", dc.info['Enabled'])
669
-			pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
670
-			pretty_output("Etag", response['headers']['etag'])
671
-
672
-	@staticmethod
673
-	def delete(args):
674
-		cf = CloudFront(Config())
675
-		cfuris = Cmd._parse_args(args)
676
-		for cfuri in cfuris:
677
-			response = cf.DeleteDistribution(cfuri)
678
-			if response['status'] >= 400:
679
-				error("Distribution %s could not be deleted: %s" % (cfuri, response['reason']))
680
-			output("Distribution %s deleted" % cfuri)
681
-
682
-	@staticmethod
683
-	def modify(args):
684
-		cf = CloudFront(Config())
685
-		if len(args) > 1:
686
-			raise ParameterError("Too many parameters. Modify one Distribution at a time.")
687
-		try:
688
-			cfuri = Cmd._parse_args(args)[0]
689
-		except IndexError, e:
690
-			raise ParameterError("No valid Distribution URI found.")
691
-		response = cf.ModifyDistribution(cfuri,
692
-		                                 cnames_add = Cmd.options.cf_cnames_add,
693
-		                                 cnames_remove = Cmd.options.cf_cnames_remove,
694
-		                                 comment = Cmd.options.cf_comment,
695
-		                                 enabled = Cmd.options.cf_enable,
696
-		                                 logging = Cmd.options.cf_logging,
659
+            d = response['distribution']
660
+            dc = d.info['DistributionConfig']
661
+            output("Distribution created:")
662
+            pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['Origin']))
663
+            pretty_output("DistId", d.uri())
664
+            pretty_output("DomainName", d.info['DomainName'])
665
+            pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
666
+            pretty_output("Comment", dc.info['Comment'])
667
+            pretty_output("Status", d.info['Status'])
668
+            pretty_output("Enabled", dc.info['Enabled'])
669
+            pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
670
+            pretty_output("Etag", response['headers']['etag'])
671
+
672
+    @staticmethod
673
+    def delete(args):
674
+        cf = CloudFront(Config())
675
+        cfuris = Cmd._parse_args(args)
676
+        for cfuri in cfuris:
677
+            response = cf.DeleteDistribution(cfuri)
678
+            if response['status'] >= 400:
679
+                error("Distribution %s could not be deleted: %s" % (cfuri, response['reason']))
680
+            output("Distribution %s deleted" % cfuri)
681
+
682
+    @staticmethod
683
+    def modify(args):
684
+        cf = CloudFront(Config())
685
+        if len(args) > 1:
686
+            raise ParameterError("Too many parameters. Modify one Distribution at a time.")
687
+        try:
688
+            cfuri = Cmd._parse_args(args)[0]
689
+        except IndexError, e:
690
+            raise ParameterError("No valid Distribution URI found.")
691
+        response = cf.ModifyDistribution(cfuri,
692
+                                         cnames_add = Cmd.options.cf_cnames_add,
693
+                                         cnames_remove = Cmd.options.cf_cnames_remove,
694
+                                         comment = Cmd.options.cf_comment,
695
+                                         enabled = Cmd.options.cf_enable,
696
+                                         logging = Cmd.options.cf_logging,
697 697
                                          default_root_object = Cmd.options.cf_default_root_object)
698
-		if response['status'] >= 400:
699
-			error("Distribution %s could not be modified: %s" % (cfuri, response['reason']))
700
-		output("Distribution modified: %s" % cfuri)
701
-		response = cf.GetDistInfo(cfuri)
702
-		d = response['distribution']
703
-		dc = d.info['DistributionConfig']
704
-		pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['Origin']))
705
-		pretty_output("DistId", d.uri())
706
-		pretty_output("DomainName", d.info['DomainName'])
707
-		pretty_output("Status", d.info['Status'])
708
-		pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
709
-		pretty_output("Comment", dc.info['Comment'])
710
-		pretty_output("Enabled", dc.info['Enabled'])
711
-		pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
712
-		pretty_output("Etag", response['headers']['etag'])
713
-
714
-	@staticmethod
715
-	def invalinfo(args):
716
-		cf = CloudFront(Config())
717
-		cfuris = Cmd._parse_args(args)
718
-		requests = []
719
-		for cfuri in cfuris:
720
-			if cfuri.request_id():
721
-				requests.append(str(cfuri))
722
-			else:
723
-				inval_list = cf.GetInvalList(cfuri)
724
-				try:
725
-					for i in inval_list['inval_list'].info['InvalidationSummary']:
726
-						requests.append("/".join(["cf:/", cfuri.dist_id(), i["Id"]]))
727
-				except:
728
-					continue
729
-		for req in requests:
730
-			cfuri = S3Uri(req)
731
-			inval_info = cf.GetInvalInfo(cfuri)
732
-			st = inval_info['inval_status'].info
733
-			pretty_output("URI", str(cfuri))
734
-			pretty_output("Status", st['Status'])
735
-			pretty_output("Created", st['CreateTime'])
736
-			pretty_output("Nr of paths", len(st['InvalidationBatch']['Path']))
737
-			pretty_output("Reference", st['InvalidationBatch']['CallerReference'])
738
-			output("")
698
+        if response['status'] >= 400:
699
+            error("Distribution %s could not be modified: %s" % (cfuri, response['reason']))
700
+        output("Distribution modified: %s" % cfuri)
701
+        response = cf.GetDistInfo(cfuri)
702
+        d = response['distribution']
703
+        dc = d.info['DistributionConfig']
704
+        pretty_output("Origin", S3UriS3.httpurl_to_s3uri(dc.info['Origin']))
705
+        pretty_output("DistId", d.uri())
706
+        pretty_output("DomainName", d.info['DomainName'])
707
+        pretty_output("Status", d.info['Status'])
708
+        pretty_output("CNAMEs", ", ".join(dc.info['CNAME']))
709
+        pretty_output("Comment", dc.info['Comment'])
710
+        pretty_output("Enabled", dc.info['Enabled'])
711
+        pretty_output("DefaultRootObject", dc.info['DefaultRootObject'])
712
+        pretty_output("Etag", response['headers']['etag'])
713
+
714
+    @staticmethod
715
+    def invalinfo(args):
716
+        cf = CloudFront(Config())
717
+        cfuris = Cmd._parse_args(args)
718
+        requests = []
719
+        for cfuri in cfuris:
720
+            if cfuri.request_id():
721
+                requests.append(str(cfuri))
722
+            else:
723
+                inval_list = cf.GetInvalList(cfuri)
724
+                try:
725
+                    for i in inval_list['inval_list'].info['InvalidationSummary']:
726
+                        requests.append("/".join(["cf:/", cfuri.dist_id(), i["Id"]]))
727
+                except:
728
+                    continue
729
+        for req in requests:
730
+            cfuri = S3Uri(req)
731
+            inval_info = cf.GetInvalInfo(cfuri)
732
+            st = inval_info['inval_status'].info
733
+            pretty_output("URI", str(cfuri))
734
+            pretty_output("Status", st['Status'])
735
+            pretty_output("Created", st['CreateTime'])
736
+            pretty_output("Nr of paths", len(st['InvalidationBatch']['Path']))
737
+            pretty_output("Reference", st['InvalidationBatch']['CallerReference'])
738
+            output("")
739
+
740
+# vim:et:ts=4:sts=4:ai
... ...
@@ -10,190 +10,190 @@ import Progress
10 10
 from SortedDict import SortedDict
11 11
 
12 12
 class Config(object):
13
-	_instance = None
14
-	_parsed_files = []
15
-	_doc = {}
16
-	access_key = ""
17
-	secret_key = ""
18
-	host_base = "s3.amazonaws.com"
19
-	host_bucket = "%(bucket)s.s3.amazonaws.com"
20
-	simpledb_host = "sdb.amazonaws.com"
21
-	cloudfront_host = "cloudfront.amazonaws.com"
22
-	verbosity = logging.WARNING
23
-	progress_meter = True
24
-	progress_class = Progress.ProgressCR
25
-	send_chunk = 4096
26
-	recv_chunk = 4096
27
-	list_md5 = False
28
-	human_readable_sizes = False
29
-	extra_headers = SortedDict(ignore_case = True)
30
-	force = False
31
-	enable = None
32
-	get_continue = False
33
-	skip_existing = False
34
-	recursive = False
35
-	acl_public = None
36
-	acl_grants = []
37
-	acl_revokes = []
38
-	proxy_host = ""
39
-	proxy_port = 3128
40
-	encrypt = False
41
-	dry_run = False
42
-	preserve_attrs = True
43
-	preserve_attrs_list = [ 
44
-		'uname',	# Verbose owner Name (e.g. 'root')
45
-		'uid',		# Numeric user ID (e.g. 0)
46
-		'gname',	# Group name (e.g. 'users')
47
-		'gid',		# Numeric group ID (e.g. 100)
48
-		'atime',	# Last access timestamp
49
-		'mtime',	# Modification timestamp
50
-		'ctime',	# Creation timestamp
51
-		'mode',		# File mode (e.g. rwxr-xr-x = 755)
52
-		#'acl',		# Full ACL (not yet supported)
53
-	]
54
-	delete_removed = False
55
-	_doc['delete_removed'] = "[sync] Remove remote S3 objects when local file has been deleted"
56
-	gpg_passphrase = ""
57
-	gpg_command = ""
58
-	gpg_encrypt = "%(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
59
-	gpg_decrypt = "%(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
60
-	use_https = False
61
-	bucket_location = "US"
62
-	default_mime_type = "binary/octet-stream"
63
-	guess_mime_type = True
64
-	# List of checks to be performed for 'sync'
65
-	sync_checks = ['size', 'md5']	# 'weak-timestamp'
66
-	# List of compiled REGEXPs
67
-	exclude = []
68
-	include = []
69
-	# Dict mapping compiled REGEXPs back to their textual form
70
-	debug_exclude = {}
71
-	debug_include = {}
72
-	encoding = "utf-8"
73
-	urlencoding_mode = "normal"
74
-	log_target_prefix = ""
75
-	reduced_redundancy = False
76
-	follow_symlinks = False
77
-	socket_timeout = 300
78
-	invalidate_on_cf = False
79
-	website_index = "index.html"
80
-	website_error = ""
81
-	website_endpoint = "http://%(bucket)s.s3-website-%(location)s.amazonaws.com/"
82
-
83
-	## Creating a singleton
84
-	def __new__(self, configfile = None):
85
-		if self._instance is None:
86
-			self._instance = object.__new__(self)
87
-		return self._instance
88
-
89
-	def __init__(self, configfile = None):
90
-		if configfile:
91
-			self.read_config_file(configfile)
92
-
93
-	def option_list(self):
94
-		retval = []
95
-		for option in dir(self):
96
-			## Skip attributes that start with underscore or are not string, int or bool
97
-			option_type = type(getattr(Config, option))
98
-			if option.startswith("_") or \
99
-			   not (option_type in (
100
-			   		type("string"),	# str
101
-			        	type(42),	# int
102
-					type(True))):	# bool
103
-				continue
104
-			retval.append(option)
105
-		return retval
106
-
107
-	def read_config_file(self, configfile):
108
-		cp = ConfigParser(configfile)
109
-		for option in self.option_list():
110
-			self.update_option(option, cp.get(option))
111
-		self._parsed_files.append(configfile)
112
-
113
-	def dump_config(self, stream):
114
-		ConfigDumper(stream).dump("default", self)
115
-
116
-	def update_option(self, option, value):
117
-		if value is None:
118
-			return
119
-		#### Special treatment of some options
120
-		## verbosity must be known to "logging" module
121
-		if option == "verbosity":
122
-			try:
123
-				setattr(Config, "verbosity", logging._levelNames[value])
124
-			except KeyError:
125
-				error("Config: verbosity level '%s' is not valid" % value)
126
-		## allow yes/no, true/false, on/off and 1/0 for boolean options
127
-		elif type(getattr(Config, option)) is type(True):	# bool
128
-			if str(value).lower() in ("true", "yes", "on", "1"):
129
-				setattr(Config, option, True)
130
-			elif str(value).lower() in ("false", "no", "off", "0"):
131
-				setattr(Config, option, False)
132
-			else:
133
-				error("Config: value of option '%s' must be Yes or No, not '%s'" % (option, value))
134
-		elif type(getattr(Config, option)) is type(42):		# int
135
-			try:
136
-				setattr(Config, option, int(value))
137
-			except ValueError, e:
138
-				error("Config: value of option '%s' must be an integer, not '%s'" % (option, value))
139
-		else:							# string
140
-			setattr(Config, option, value)
13
+    _instance = None
14
+    _parsed_files = []
15
+    _doc = {}
16
+    access_key = ""
17
+    secret_key = ""
18
+    host_base = "s3.amazonaws.com"
19
+    host_bucket = "%(bucket)s.s3.amazonaws.com"
20
+    simpledb_host = "sdb.amazonaws.com"
21
+    cloudfront_host = "cloudfront.amazonaws.com"
22
+    verbosity = logging.WARNING
23
+    progress_meter = True
24
+    progress_class = Progress.ProgressCR
25
+    send_chunk = 4096
26
+    recv_chunk = 4096
27
+    list_md5 = False
28
+    human_readable_sizes = False
29
+    extra_headers = SortedDict(ignore_case = True)
30
+    force = False
31
+    enable = None
32
+    get_continue = False
33
+    skip_existing = False
34
+    recursive = False
35
+    acl_public = None
36
+    acl_grants = []
37
+    acl_revokes = []
38
+    proxy_host = ""
39
+    proxy_port = 3128
40
+    encrypt = False
41
+    dry_run = False
42
+    preserve_attrs = True
43
+    preserve_attrs_list = [
44
+        'uname',    # Verbose owner Name (e.g. 'root')
45
+        'uid',      # Numeric user ID (e.g. 0)
46
+        'gname',    # Group name (e.g. 'users')
47
+        'gid',      # Numeric group ID (e.g. 100)
48
+        'atime',    # Last access timestamp
49
+        'mtime',    # Modification timestamp
50
+        'ctime',    # Creation timestamp
51
+        'mode',     # File mode (e.g. rwxr-xr-x = 755)
52
+        #'acl',     # Full ACL (not yet supported)
53
+    ]
54
+    delete_removed = False
55
+    _doc['delete_removed'] = "[sync] Remove remote S3 objects when local file has been deleted"
56
+    gpg_passphrase = ""
57
+    gpg_command = ""
58
+    gpg_encrypt = "%(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
59
+    gpg_decrypt = "%(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s"
60
+    use_https = False
61
+    bucket_location = "US"
62
+    default_mime_type = "binary/octet-stream"
63
+    guess_mime_type = True
64
+    # List of checks to be performed for 'sync'
65
+    sync_checks = ['size', 'md5']   # 'weak-timestamp'
66
+    # List of compiled REGEXPs
67
+    exclude = []
68
+    include = []
69
+    # Dict mapping compiled REGEXPs back to their textual form
70
+    debug_exclude = {}
71
+    debug_include = {}
72
+    encoding = "utf-8"
73
+    urlencoding_mode = "normal"
74
+    log_target_prefix = ""
75
+    reduced_redundancy = False
76
+    follow_symlinks = False
77
+    socket_timeout = 300
78
+    invalidate_on_cf = False
79
+    website_index = "index.html"
80
+    website_error = ""
81
+    website_endpoint = "http://%(bucket)s.s3-website-%(location)s.amazonaws.com/"
82
+
83
+    ## Creating a singleton
84
+    def __new__(self, configfile = None):
85
+        if self._instance is None:
86
+            self._instance = object.__new__(self)
87
+        return self._instance
88
+
89
+    def __init__(self, configfile = None):
90
+        if configfile:
91
+            self.read_config_file(configfile)
92
+
93
+    def option_list(self):
94
+        retval = []
95
+        for option in dir(self):
96
+            ## Skip attributes that start with underscore or are not string, int or bool
97
+            option_type = type(getattr(Config, option))
98
+            if option.startswith("_") or \
99
+               not (option_type in (
100
+                    type("string"), # str
101
+                        type(42),   # int
102
+                    type(True))):   # bool
103
+                continue
104
+            retval.append(option)
105
+        return retval
106
+
107
+    def read_config_file(self, configfile):
108
+        cp = ConfigParser(configfile)
109
+        for option in self.option_list():
110
+            self.update_option(option, cp.get(option))
111
+        self._parsed_files.append(configfile)
112
+
113
+    def dump_config(self, stream):
114
+        ConfigDumper(stream).dump("default", self)
115
+
116
+    def update_option(self, option, value):
117
+        if value is None:
118
+            return
119
+        #### Special treatment of some options
120
+        ## verbosity must be known to "logging" module
121
+        if option == "verbosity":
122
+            try:
123
+                setattr(Config, "verbosity", logging._levelNames[value])
124
+            except KeyError:
125
+                error("Config: verbosity level '%s' is not valid" % value)
126
+        ## allow yes/no, true/false, on/off and 1/0 for boolean options
127
+        elif type(getattr(Config, option)) is type(True):   # bool
128
+            if str(value).lower() in ("true", "yes", "on", "1"):
129
+                setattr(Config, option, True)
130
+            elif str(value).lower() in ("false", "no", "off", "0"):
131
+                setattr(Config, option, False)
132
+            else:
133
+                error("Config: value of option '%s' must be Yes or No, not '%s'" % (option, value))
134
+        elif type(getattr(Config, option)) is type(42):     # int
135
+            try:
136
+                setattr(Config, option, int(value))
137
+            except ValueError, e:
138
+                error("Config: value of option '%s' must be an integer, not '%s'" % (option, value))
139
+        else:                           # string
140
+            setattr(Config, option, value)
141 141
 
142 142
 class ConfigParser(object):
143
-	def __init__(self, file, sections = []):
144
-		self.cfg = {}
145
-		self.parse_file(file, sections)
146
-	
147
-	def parse_file(self, file, sections = []):
148
-		debug("ConfigParser: Reading file '%s'" % file)
149
-		if type(sections) != type([]):
150
-			sections = [sections]
151
-		in_our_section = True
152
-		f = open(file, "r")
153
-		r_comment = re.compile("^\s*#.*")
154
-		r_empty = re.compile("^\s*$")
155
-		r_section = re.compile("^\[([^\]]+)\]")
156
-		r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)")
157
-		r_quotes = re.compile("^\"(.*)\"\s*$")
158
-		for line in f:
159
-			if r_comment.match(line) or r_empty.match(line):
160
-				continue
161
-			is_section = r_section.match(line)
162
-			if is_section:
163
-				section = is_section.groups()[0]
164
-				in_our_section = (section in sections) or (len(sections) == 0)
165
-				continue
166
-			is_data = r_data.match(line)
167
-			if is_data and in_our_section:
168
-				data = is_data.groupdict()
169
-				if r_quotes.match(data["value"]):
170
-					data["value"] = data["value"][1:-1]
171
-				self.__setitem__(data["key"], data["value"])
172
-				if data["key"] in ("access_key", "secret_key", "gpg_passphrase"):
173
-					print_value = (data["value"][:2]+"...%d_chars..."+data["value"][-1:]) % (len(data["value"]) - 3)
174
-				else:
175
-					print_value = data["value"]
176
-				debug("ConfigParser: %s->%s" % (data["key"], print_value))
177
-				continue
178
-			warning("Ignoring invalid line in '%s': %s" % (file, line))
179
-
180
-	def __getitem__(self, name):
181
-		return self.cfg[name]
182
-	
183
-	def __setitem__(self, name, value):
184
-		self.cfg[name] = value
185
-	
186
-	def get(self, name, default = None):
187
-		if self.cfg.has_key(name):
188
-			return self.cfg[name]
189
-		return default
143
+    def __init__(self, file, sections = []):
144
+        self.cfg = {}
145
+        self.parse_file(file, sections)
146
+
147
+    def parse_file(self, file, sections = []):
148
+        debug("ConfigParser: Reading file '%s'" % file)
149
+        if type(sections) != type([]):
150
+            sections = [sections]
151
+        in_our_section = True
152
+        f = open(file, "r")
153
+        r_comment = re.compile("^\s*#.*")
154
+        r_empty = re.compile("^\s*$")
155
+        r_section = re.compile("^\[([^\]]+)\]")
156
+        r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)")
157
+        r_quotes = re.compile("^\"(.*)\"\s*$")
158
+        for line in f:
159
+            if r_comment.match(line) or r_empty.match(line):
160
+                continue
161
+            is_section = r_section.match(line)
162
+            if is_section:
163
+                section = is_section.groups()[0]
164
+                in_our_section = (section in sections) or (len(sections) == 0)
165
+                continue
166
+            is_data = r_data.match(line)
167
+            if is_data and in_our_section:
168
+                data = is_data.groupdict()
169
+                if r_quotes.match(data["value"]):
170
+                    data["value"] = data["value"][1:-1]
171
+                self.__setitem__(data["key"], data["value"])
172
+                if data["key"] in ("access_key", "secret_key", "gpg_passphrase"):
173
+                    print_value = (data["value"][:2]+"...%d_chars..."+data["value"][-1:]) % (len(data["value"]) - 3)
174
+                else:
175
+                    print_value = data["value"]
176
+                debug("ConfigParser: %s->%s" % (data["key"], print_value))
177
+                continue
178
+            warning("Ignoring invalid line in '%s': %s" % (file, line))
179
+
180
+    def __getitem__(self, name):
181
+        return self.cfg[name]
182
+
183
+    def __setitem__(self, name, value):
184
+        self.cfg[name] = value
185
+
186
+    def get(self, name, default = None):
187
+        if self.cfg.has_key(name):
188
+            return self.cfg[name]
189
+        return default
190 190
 
191 191
 class ConfigDumper(object):
192
-	def __init__(self, stream):
193
-		self.stream = stream
192
+    def __init__(self, stream):
193
+        self.stream = stream
194 194
 
195
-	def dump(self, section, config):
196
-		self.stream.write("[%s]\n" % section)
197
-		for option in config.option_list():
198
-			self.stream.write("%s = %s\n" % (option, getattr(config, option)))
195
+    def dump(self, section, config):
196
+        self.stream.write("[%s]\n" % section)
197
+        for option in config.option_list():
198
+            self.stream.write("%s = %s\n" % (option, getattr(config, option)))
199 199
 
... ...
@@ -7,80 +7,82 @@ from Utils import getTreeFromXml, unicodise, deunicodise
7 7
 from logging import debug, info, warning, error
8 8
 
9 9
 try:
10
-	import xml.etree.ElementTree as ET
10
+    import xml.etree.ElementTree as ET
11 11
 except ImportError:
12
-	import elementtree.ElementTree as ET
12
+    import elementtree.ElementTree as ET
13 13
 
14 14
 class S3Exception(Exception):
15
-	def __init__(self, message = ""):
16
-		self.message = unicodise(message)
15
+    def __init__(self, message = ""):
16
+        self.message = unicodise(message)
17 17
 
18
-	def __str__(self):
19
-		## Call unicode(self) instead of self.message because
20
-		## __unicode__() method could be overriden in subclasses!
21
-		return deunicodise(unicode(self))
18
+    def __str__(self):
19
+        ## Call unicode(self) instead of self.message because
20
+        ## __unicode__() method could be overriden in subclasses!
21
+        return deunicodise(unicode(self))
22 22
 
23
-	def __unicode__(self):
24
-		return self.message
23
+    def __unicode__(self):
24
+        return self.message
25 25
 
26
-	## (Base)Exception.message has been deprecated in Python 2.6
27
-	def _get_message(self):
28
-		return self._message
29
-	def _set_message(self, message):
30
-		self._message = message
31
-	message = property(_get_message, _set_message)
26
+    ## (Base)Exception.message has been deprecated in Python 2.6
27
+    def _get_message(self):
28
+        return self._message
29
+    def _set_message(self, message):
30
+        self._message = message
31
+    message = property(_get_message, _set_message)
32 32
 
33 33
 
34 34
 class S3Error (S3Exception):
35
-	def __init__(self, response):
36
-		self.status = response["status"]
37
-		self.reason = response["reason"]
38
-		self.info = {
39
-			"Code" : "",
40
-			"Message" : "",
41
-			"Resource" : ""
42
-		}
43
-		debug("S3Error: %s (%s)" % (self.status, self.reason))
44
-		if response.has_key("headers"):
45
-			for header in response["headers"]:
46
-				debug("HttpHeader: %s: %s" % (header, response["headers"][header]))
47
-		if response.has_key("data"):
48
-			tree = getTreeFromXml(response["data"])
49
-			error_node = tree
50
-			if not error_node.tag == "Error":
51
-				error_node = tree.find(".//Error")
52
-			for child in error_node.getchildren():
53
-				if child.text != "":
54
-					debug("ErrorXML: " + child.tag + ": " + repr(child.text))
55
-					self.info[child.tag] = child.text
56
-		self.code = self.info["Code"]
57
-		self.message = self.info["Message"]
58
-		self.resource = self.info["Resource"]
59
-
60
-	def __unicode__(self):
61
-		retval = u"%d " % (self.status)
62
-		retval += (u"(%s)" % (self.info.has_key("Code") and self.info["Code"] or self.reason))
63
-		if self.info.has_key("Message"):
64
-			retval += (u": %s" % self.info["Message"])
65
-		return retval
35
+    def __init__(self, response):
36
+        self.status = response["status"]
37
+        self.reason = response["reason"]
38
+        self.info = {
39
+            "Code" : "",
40
+            "Message" : "",
41
+            "Resource" : ""
42
+        }
43
+        debug("S3Error: %s (%s)" % (self.status, self.reason))
44
+        if response.has_key("headers"):
45
+            for header in response["headers"]:
46
+                debug("HttpHeader: %s: %s" % (header, response["headers"][header]))
47
+        if response.has_key("data"):
48
+            tree = getTreeFromXml(response["data"])
49
+            error_node = tree
50
+            if not error_node.tag == "Error":
51
+                error_node = tree.find(".//Error")
52
+            for child in error_node.getchildren():
53
+                if child.text != "":
54
+                    debug("ErrorXML: " + child.tag + ": " + repr(child.text))
55
+                    self.info[child.tag] = child.text
56
+        self.code = self.info["Code"]
57
+        self.message = self.info["Message"]
58
+        self.resource = self.info["Resource"]
59
+
60
+    def __unicode__(self):
61
+        retval = u"%d " % (self.status)
62
+        retval += (u"(%s)" % (self.info.has_key("Code") and self.info["Code"] or self.reason))
63
+        if self.info.has_key("Message"):
64
+            retval += (u": %s" % self.info["Message"])
65
+        return retval
66 66
 
67 67
 class CloudFrontError(S3Error):
68
-	pass
69
-		
68
+    pass
69
+
70 70
 class S3UploadError(S3Exception):
71
-	pass
71
+    pass
72 72
 
73 73
 class S3DownloadError(S3Exception):
74
-	pass
74
+    pass
75 75
 
76 76
 class S3RequestError(S3Exception):
77
-	pass
77
+    pass
78 78
 
79 79
 class S3ResponseError(S3Exception):
80
-	pass
80
+    pass
81 81
 
82 82
 class InvalidFileError(S3Exception):
83
-	pass
83
+    pass
84 84
 
85 85
 class ParameterError(S3Exception):
86
-	pass
86
+    pass
87
+
88
+# vim:et:ts=4:sts=4:ai
... ...
@@ -52,288 +52,288 @@ def _fswalk(path, follow_symlinks):
52 52
         return os.walk(path)
53 53
 
54 54
 def filter_exclude_include(src_list):
55
-	info(u"Applying --exclude/--include")
56
-	cfg = Config()
57
-	exclude_list = SortedDict(ignore_case = False)
58
-	for file in src_list.keys():
59
-		debug(u"CHECK: %s" % file)
60
-		excluded = False
61
-		for r in cfg.exclude:
62
-			if r.search(file):
63
-				excluded = True
64
-				debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r]))
65
-				break
66
-		if excluded:
67
-			## No need to check for --include if not excluded
68
-			for r in cfg.include:
69
-				if r.search(file):
70
-					excluded = False
71
-					debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r]))
72
-					break
73
-		if excluded:
74
-			## Still excluded - ok, action it
75
-			debug(u"EXCLUDE: %s" % file)
76
-			exclude_list[file] = src_list[file]
77
-			del(src_list[file])
78
-			continue
79
-		else:
80
-			debug(u"PASS: %s" % (file))
81
-	return src_list, exclude_list
55
+    info(u"Applying --exclude/--include")
56
+    cfg = Config()
57
+    exclude_list = SortedDict(ignore_case = False)
58
+    for file in src_list.keys():
59
+        debug(u"CHECK: %s" % file)
60
+        excluded = False
61
+        for r in cfg.exclude:
62
+            if r.search(file):
63
+                excluded = True
64
+                debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r]))
65
+                break
66
+        if excluded:
67
+            ## No need to check for --include if not excluded
68
+            for r in cfg.include:
69
+                if r.search(file):
70
+                    excluded = False
71
+                    debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r]))
72
+                    break
73
+        if excluded:
74
+            ## Still excluded - ok, action it
75
+            debug(u"EXCLUDE: %s" % file)
76
+            exclude_list[file] = src_list[file]
77
+            del(src_list[file])
78
+            continue
79
+        else:
80
+            debug(u"PASS: %s" % (file))
81
+    return src_list, exclude_list
82 82
 
83 83
 def fetch_local_list(args, recursive = None):
84
-	def _get_filelist_local(local_uri):
85
-		info(u"Compiling list of local files...")
86
-		if local_uri.isdir():
87
-			local_base = deunicodise(local_uri.basename())
88
-			local_path = deunicodise(local_uri.path())
89
-			filelist = _fswalk(local_path, cfg.follow_symlinks)
90
-			single_file = False
91
-		else:
92
-			local_base = ""
93
-			local_path = deunicodise(local_uri.dirname())
94
-			filelist = [( local_path, [], [deunicodise(local_uri.basename())] )]
95
-			single_file = True
96
-		loc_list = SortedDict(ignore_case = False)
97
-		for root, dirs, files in filelist:
98
-			rel_root = root.replace(local_path, local_base, 1)
99
-			for f in files:
100
-				full_name = os.path.join(root, f)
101
-				if not os.path.isfile(full_name):
102
-					continue
103
-				if os.path.islink(full_name):
84
+    def _get_filelist_local(local_uri):
85
+        info(u"Compiling list of local files...")
86
+        if local_uri.isdir():
87
+            local_base = deunicodise(local_uri.basename())
88
+            local_path = deunicodise(local_uri.path())
89
+            filelist = _fswalk(local_path, cfg.follow_symlinks)
90
+            single_file = False
91
+        else:
92
+            local_base = ""
93
+            local_path = deunicodise(local_uri.dirname())
94
+            filelist = [( local_path, [], [deunicodise(local_uri.basename())] )]
95
+            single_file = True
96
+        loc_list = SortedDict(ignore_case = False)
97
+        for root, dirs, files in filelist:
98
+            rel_root = root.replace(local_path, local_base, 1)
99
+            for f in files:
100
+                full_name = os.path.join(root, f)
101
+                if not os.path.isfile(full_name):
102
+                    continue
103
+                if os.path.islink(full_name):
104 104
                                     if not cfg.follow_symlinks:
105 105
                                             continue
106
-				relative_file = unicodise(os.path.join(rel_root, f))
107
-				if os.path.sep != "/":
108
-					# Convert non-unix dir separators to '/'
109
-					relative_file = "/".join(relative_file.split(os.path.sep))
110
-				if cfg.urlencoding_mode == "normal":
111
-					relative_file = replace_nonprintables(relative_file)
112
-				if relative_file.startswith('./'):
113
-					relative_file = relative_file[2:]
114
-				sr = os.stat_result(os.lstat(full_name))
115
-				loc_list[relative_file] = {
116
-					'full_name_unicode' : unicodise(full_name),
117
-					'full_name' : full_name,
118
-					'size' : sr.st_size, 
119
-					'mtime' : sr.st_mtime,
120
-					## TODO: Possibly more to save here...
121
-				}
122
-		return loc_list, single_file
123
-
124
-	cfg = Config()
125
-	local_uris = []
126
-	local_list = SortedDict(ignore_case = False)
127
-	single_file = False
128
-
129
-	if type(args) not in (list, tuple):
130
-		args = [args]
131
-
132
-	if recursive == None:
133
-		recursive = cfg.recursive
134
-
135
-	for arg in args:
136
-		uri = S3Uri(arg)
137
-		if not uri.type == 'file':
138
-			raise ParameterError("Expecting filename or directory instead of: %s" % arg)
139
-		if uri.isdir() and not recursive:
140
-			raise ParameterError("Use --recursive to upload a directory: %s" % arg)
141
-		local_uris.append(uri)
142
-
143
-	for uri in local_uris:
144
-		list_for_uri, single_file = _get_filelist_local(uri)
145
-		local_list.update(list_for_uri)
146
-
147
-	## Single file is True if and only if the user 
148
-	## specified one local URI and that URI represents
149
-	## a FILE. Ie it is False if the URI was of a DIR
150
-	## and that dir contained only one FILE. That's not
151
-	## a case of single_file==True.
152
-	if len(local_list) > 1:
153
-		single_file = False
154
-
155
-	return local_list, single_file
106
+                relative_file = unicodise(os.path.join(rel_root, f))
107
+                if os.path.sep != "/":
108
+                    # Convert non-unix dir separators to '/'
109
+                    relative_file = "/".join(relative_file.split(os.path.sep))
110
+                if cfg.urlencoding_mode == "normal":
111
+                    relative_file = replace_nonprintables(relative_file)
112
+                if relative_file.startswith('./'):
113
+                    relative_file = relative_file[2:]
114
+                sr = os.stat_result(os.lstat(full_name))
115
+                loc_list[relative_file] = {
116
+                    'full_name_unicode' : unicodise(full_name),
117
+                    'full_name' : full_name,
118
+                    'size' : sr.st_size,
119
+                    'mtime' : sr.st_mtime,
120
+                    ## TODO: Possibly more to save here...
121
+                }
122
+        return loc_list, single_file
123
+
124
+    cfg = Config()
125
+    local_uris = []
126
+    local_list = SortedDict(ignore_case = False)
127
+    single_file = False
128
+
129
+    if type(args) not in (list, tuple):
130
+        args = [args]
131
+
132
+    if recursive == None:
133
+        recursive = cfg.recursive
134
+
135
+    for arg in args:
136
+        uri = S3Uri(arg)
137
+        if not uri.type == 'file':
138
+            raise ParameterError("Expecting filename or directory instead of: %s" % arg)
139
+        if uri.isdir() and not recursive:
140
+            raise ParameterError("Use --recursive to upload a directory: %s" % arg)
141
+        local_uris.append(uri)
142
+
143
+    for uri in local_uris:
144
+        list_for_uri, single_file = _get_filelist_local(uri)
145
+        local_list.update(list_for_uri)
146
+
147
+    ## Single file is True if and only if the user
148
+    ## specified one local URI and that URI represents
149
+    ## a FILE. Ie it is False if the URI was of a DIR
150
+    ## and that dir contained only one FILE. That's not
151
+    ## a case of single_file==True.
152
+    if len(local_list) > 1:
153
+        single_file = False
154
+
155
+    return local_list, single_file
156 156
 
157 157
 def fetch_remote_list(args, require_attribs = False, recursive = None):
158
-	def _get_filelist_remote(remote_uri, recursive = True):
159
-		## If remote_uri ends with '/' then all remote files will have 
160
-		## the remote_uri prefix removed in the relative path.
161
-		## If, on the other hand, the remote_uri ends with something else
162
-		## (probably alphanumeric symbol) we'll use the last path part 
163
-		## in the relative path.
164
-		##
165
-		## Complicated, eh? See an example:
166
-		## _get_filelist_remote("s3://bckt/abc/def") may yield:
167
-		## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
168
-		## _get_filelist_remote("s3://bckt/abc/def/") will yield:
169
-		## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
170
-		## Furthermore a prefix-magic can restrict the return list:
171
-		## _get_filelist_remote("s3://bckt/abc/def/x") yields:
172
-		## { 'xyz/blah.txt' : {} }
173
-
174
-		info(u"Retrieving list of remote files for %s ..." % remote_uri)
175
-
176
-		s3 = S3(Config())
177
-		response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive)
178
-
179
-		rem_base_original = rem_base = remote_uri.object()
180
-		remote_uri_original = remote_uri
181
-		if rem_base != '' and rem_base[-1] != '/':
182
-			rem_base = rem_base[:rem_base.rfind('/')+1]
183
-			remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base))
184
-		rem_base_len = len(rem_base)
185
-		rem_list = SortedDict(ignore_case = False)
186
-		break_now = False
187
-		for object in response['list']:
188
-			if object['Key'] == rem_base_original and object['Key'][-1] != os.path.sep:
189
-				## We asked for one file and we got that file :-)
190
-				key = os.path.basename(object['Key'])
191
-				object_uri_str = remote_uri_original.uri()
192
-				break_now = True
193
-				rem_list = {}	## Remove whatever has already been put to rem_list
194
-			else:
195
-				key = object['Key'][rem_base_len:]		## Beware - this may be '' if object['Key']==rem_base !!
196
-				object_uri_str = remote_uri.uri() + key
197
-			rem_list[key] = { 
198
-				'size' : int(object['Size']),
199
-				'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
200
-				'md5' : object['ETag'][1:-1],
201
-				'object_key' : object['Key'],
202
-				'object_uri_str' : object_uri_str,
203
-				'base_uri' : remote_uri,
204
-			}
205
-			if break_now:
206
-				break
207
-		return rem_list
208
-
209
-	cfg = Config()
210
-	remote_uris = []
211
-	remote_list = SortedDict(ignore_case = False)
212
-
213
-	if type(args) not in (list, tuple):
214
-		args = [args]
215
-
216
-	if recursive == None:
217
-		recursive = cfg.recursive
218
-
219
-	for arg in args:
220
-		uri = S3Uri(arg)
221
-		if not uri.type == 's3':
222
-			raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
223
-		remote_uris.append(uri)
224
-
225
-	if recursive:
226
-		for uri in remote_uris:
227
-			objectlist = _get_filelist_remote(uri)
228
-			for key in objectlist:
229
-				remote_list[key] = objectlist[key]
230
-	else:
231
-		for uri in remote_uris:
232
-			uri_str = str(uri)
233
-			## Wildcards used in remote URI?
234
-			## If yes we'll need a bucket listing...
235
-			if uri_str.find('*') > -1 or uri_str.find('?') > -1:
236
-				first_wildcard = uri_str.find('*')
237
-				first_questionmark = uri_str.find('?')
238
-				if first_questionmark > -1 and first_questionmark < first_wildcard:
239
-					first_wildcard = first_questionmark
240
-				prefix = uri_str[:first_wildcard]
241
-				rest = uri_str[first_wildcard+1:]
242
-				## Only request recursive listing if the 'rest' of the URI,
243
-				## i.e. the part after first wildcard, contains '/'
244
-				need_recursion = rest.find('/') > -1
245
-				objectlist = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion)
246
-				for key in objectlist:
247
-					## Check whether the 'key' matches the requested wildcards
248
-					if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str):
249
-						remote_list[key] = objectlist[key]
250
-			else:
251
-				## No wildcards - simply append the given URI to the list
252
-				key = os.path.basename(uri.object())
253
-				if not key:
254
-					raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
255
-				remote_item = {
256
-					'base_uri': uri,
257
-					'object_uri_str': unicode(uri),
258
-					'object_key': uri.object()
259
-				}
260
-				if require_attribs:
261
-					response = S3(cfg).object_info(uri)
262
-					remote_item.update({
263
-					'size': int(response['headers']['content-length']),
264
-					'md5': response['headers']['etag'].strip('"\''),
265
-					'timestamp' : dateRFC822toUnix(response['headers']['date'])
266
-					})
267
-				remote_list[key] = remote_item
268
-	return remote_list
158
+    def _get_filelist_remote(remote_uri, recursive = True):
159
+        ## If remote_uri ends with '/' then all remote files will have
160
+        ## the remote_uri prefix removed in the relative path.
161
+        ## If, on the other hand, the remote_uri ends with something else
162
+        ## (probably alphanumeric symbol) we'll use the last path part
163
+        ## in the relative path.
164
+        ##
165
+        ## Complicated, eh? See an example:
166
+        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
167
+        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
168
+        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
169
+        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
170
+        ## Furthermore a prefix-magic can restrict the return list:
171
+        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
172
+        ## { 'xyz/blah.txt' : {} }
173
+
174
+        info(u"Retrieving list of remote files for %s ..." % remote_uri)
175
+
176
+        s3 = S3(Config())
177
+        response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive)
178
+
179
+        rem_base_original = rem_base = remote_uri.object()
180
+        remote_uri_original = remote_uri
181
+        if rem_base != '' and rem_base[-1] != '/':
182
+            rem_base = rem_base[:rem_base.rfind('/')+1]
183
+            remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base))
184
+        rem_base_len = len(rem_base)
185
+        rem_list = SortedDict(ignore_case = False)
186
+        break_now = False
187
+        for object in response['list']:
188
+            if object['Key'] == rem_base_original and object['Key'][-1] != os.path.sep:
189
+                ## We asked for one file and we got that file :-)
190
+                key = os.path.basename(object['Key'])
191
+                object_uri_str = remote_uri_original.uri()
192
+                break_now = True
193
+                rem_list = {}   ## Remove whatever has already been put to rem_list
194
+            else:
195
+                key = object['Key'][rem_base_len:]      ## Beware - this may be '' if object['Key']==rem_base !!
196
+                object_uri_str = remote_uri.uri() + key
197
+            rem_list[key] = {
198
+                'size' : int(object['Size']),
199
+                'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
200
+                'md5' : object['ETag'][1:-1],
201
+                'object_key' : object['Key'],
202
+                'object_uri_str' : object_uri_str,
203
+                'base_uri' : remote_uri,
204
+            }
205
+            if break_now:
206
+                break
207
+        return rem_list
208
+
209
+    cfg = Config()
210
+    remote_uris = []
211
+    remote_list = SortedDict(ignore_case = False)
212
+
213
+    if type(args) not in (list, tuple):
214
+        args = [args]
215
+
216
+    if recursive == None:
217
+        recursive = cfg.recursive
218
+
219
+    for arg in args:
220
+        uri = S3Uri(arg)
221
+        if not uri.type == 's3':
222
+            raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
223
+        remote_uris.append(uri)
224
+
225
+    if recursive:
226
+        for uri in remote_uris:
227
+            objectlist = _get_filelist_remote(uri)
228
+            for key in objectlist:
229
+                remote_list[key] = objectlist[key]
230
+    else:
231
+        for uri in remote_uris:
232
+            uri_str = str(uri)
233
+            ## Wildcards used in remote URI?
234
+            ## If yes we'll need a bucket listing...
235
+            if uri_str.find('*') > -1 or uri_str.find('?') > -1:
236
+                first_wildcard = uri_str.find('*')
237
+                first_questionmark = uri_str.find('?')
238
+                if first_questionmark > -1 and first_questionmark < first_wildcard:
239
+                    first_wildcard = first_questionmark
240
+                prefix = uri_str[:first_wildcard]
241
+                rest = uri_str[first_wildcard+1:]
242
+                ## Only request recursive listing if the 'rest' of the URI,
243
+                ## i.e. the part after first wildcard, contains '/'
244
+                need_recursion = rest.find('/') > -1
245
+                objectlist = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion)
246
+                for key in objectlist:
247
+                    ## Check whether the 'key' matches the requested wildcards
248
+                    if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str):
249
+                        remote_list[key] = objectlist[key]
250
+            else:
251
+                ## No wildcards - simply append the given URI to the list
252
+                key = os.path.basename(uri.object())
253
+                if not key:
254
+                    raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
255
+                remote_item = {
256
+                    'base_uri': uri,
257
+                    'object_uri_str': unicode(uri),
258
+                    'object_key': uri.object()
259
+                }
260
+                if require_attribs:
261
+                    response = S3(cfg).object_info(uri)
262
+                    remote_item.update({
263
+                    'size': int(response['headers']['content-length']),
264
+                    'md5': response['headers']['etag'].strip('"\''),
265
+                    'timestamp' : dateRFC822toUnix(response['headers']['date'])
266
+                    })
267
+                remote_list[key] = remote_item
268
+    return remote_list
269 269
 
270 270
 def compare_filelists(src_list, dst_list, src_remote, dst_remote):
271
-	def __direction_str(is_remote):
272
-		return is_remote and "remote" or "local"
273
-
274
-	# We don't support local->local sync, use 'rsync' or something like that instead ;-)
275
-	assert(not(src_remote == False and dst_remote == False))
276
-
277
-	info(u"Verifying attributes...")
278
-	cfg = Config()
279
-	exists_list = SortedDict(ignore_case = False)
280
-
281
-	debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote)))
282
-	debug("src_list.keys: %s" % src_list.keys())
283
-	debug("dst_list.keys: %s" % dst_list.keys())
284
-
285
-	for file in src_list.keys():
286
-		debug(u"CHECK: %s" % file)
287
-		if dst_list.has_key(file):
288
-			## Was --skip-existing requested?
289
-			if cfg.skip_existing:
290
-				debug(u"IGNR: %s (used --skip-existing)" % (file))
291
-				exists_list[file] = src_list[file]
292
-				del(src_list[file])
293
-				## Remove from destination-list, all that is left there will be deleted
294
-				del(dst_list[file])
295
-				continue
296
-
297
-			attribs_match = True
298
-			## Check size first
299
-			if 'size' in cfg.sync_checks and dst_list[file]['size'] != src_list[file]['size']:
300
-				debug(u"XFER: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size']))
301
-				attribs_match = False
302
-			
303
-			if attribs_match and 'md5' in cfg.sync_checks:
304
-				## ... same size, check MD5
305
-				try:
306
-					if src_remote == False and dst_remote == True:
307
-						src_md5 = hash_file_md5(src_list[file]['full_name'])
308
-						dst_md5 = dst_list[file]['md5']
309
-					elif src_remote == True and dst_remote == False:
310
-						src_md5 = src_list[file]['md5']
311
-						dst_md5 = hash_file_md5(dst_list[file]['full_name'])
312
-					elif src_remote == True and dst_remote == True:
313
-						src_md5 = src_list[file]['md5']
314
-						dst_md5 = dst_list[file]['md5']
315
-				except (IOError,OSError), e:
316
-					# MD5 sum verification failed - ignore that file altogether
317
-					debug(u"IGNR: %s (disappeared)" % (file))
318
-					warning(u"%s: file disappeared, ignoring." % (file))
319
-					del(src_list[file])
320
-					del(dst_list[file])
321
-					continue
322
-
323
-				if src_md5 != dst_md5:
324
-					## Checksums are different.
325
-					attribs_match = False
326
-					debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5))
327
-
328
-			if attribs_match:
329
-				## Remove from source-list, all that is left there will be transferred
330
-				debug(u"IGNR: %s (transfer not needed)" % file)
331
-				exists_list[file] = src_list[file]
332
-				del(src_list[file])
333
-
334
-			## Remove from destination-list, all that is left there will be deleted
335
-			del(dst_list[file])
336
-
337
-	return src_list, dst_list, exists_list
338
-
339
-
271
+    def __direction_str(is_remote):
272
+        return is_remote and "remote" or "local"
273
+
274
+    # We don't support local->local sync, use 'rsync' or something like that instead ;-)
275
+    assert(not(src_remote == False and dst_remote == False))
276
+
277
+    info(u"Verifying attributes...")
278
+    cfg = Config()
279
+    exists_list = SortedDict(ignore_case = False)
280
+
281
+    debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote)))
282
+    debug("src_list.keys: %s" % src_list.keys())
283
+    debug("dst_list.keys: %s" % dst_list.keys())
284
+
285
+    for file in src_list.keys():
286
+        debug(u"CHECK: %s" % file)
287
+        if dst_list.has_key(file):
288
+            ## Was --skip-existing requested?
289
+            if cfg.skip_existing:
290
+                debug(u"IGNR: %s (used --skip-existing)" % (file))
291
+                exists_list[file] = src_list[file]
292
+                del(src_list[file])
293
+                ## Remove from destination-list, all that is left there will be deleted
294
+                del(dst_list[file])
295
+                continue
296
+
297
+            attribs_match = True
298
+            ## Check size first
299
+            if 'size' in cfg.sync_checks and dst_list[file]['size'] != src_list[file]['size']:
300
+                debug(u"XFER: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size']))
301
+                attribs_match = False
302
+
303
+            if attribs_match and 'md5' in cfg.sync_checks:
304
+                ## ... same size, check MD5
305
+                try:
306
+                    if src_remote == False and dst_remote == True:
307
+                        src_md5 = hash_file_md5(src_list[file]['full_name'])
308
+                        dst_md5 = dst_list[file]['md5']
309
+                    elif src_remote == True and dst_remote == False:
310
+                        src_md5 = src_list[file]['md5']
311
+                        dst_md5 = hash_file_md5(dst_list[file]['full_name'])
312
+                    elif src_remote == True and dst_remote == True:
313
+                        src_md5 = src_list[file]['md5']
314
+                        dst_md5 = dst_list[file]['md5']
315
+                except (IOError,OSError), e:
316
+                    # MD5 sum verification failed - ignore that file altogether
317
+                    debug(u"IGNR: %s (disappeared)" % (file))
318
+                    warning(u"%s: file disappeared, ignoring." % (file))
319
+                    del(src_list[file])
320
+                    del(dst_list[file])
321
+                    continue
322
+
323
+                if src_md5 != dst_md5:
324
+                    ## Checksums are different.
325
+                    attribs_match = False
326
+                    debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5))
327
+
328
+            if attribs_match:
329
+                ## Remove from source-list, all that is left there will be transferred
330
+                debug(u"IGNR: %s (transfer not needed)" % file)
331
+                exists_list[file] = src_list[file]
332
+                del(src_list[file])
333
+
334
+            ## Remove from destination-list, all that is left there will be deleted
335
+            del(dst_list[file])
336
+
337
+    return src_list, dst_list, exists_list
338
+
339
+# vim:et:ts=4:sts=4:ai
... ...
@@ -4,10 +4,11 @@ url = "http://s3tools.org"
4 4
 license = "GPL version 2"
5 5
 short_description = "Command line tool for managing Amazon S3 and CloudFront services"
6 6
 long_description = """
7
-S3cmd lets you copy files from/to Amazon S3 
7
+S3cmd lets you copy files from/to Amazon S3
8 8
 (Simple Storage Service) using a simple to use
9 9
 command line client. Supports rsync-like backup,
10 10
 GPG encryption, and more. Also supports management
11 11
 of Amazon's CloudFront content delivery network.
12 12
 """
13 13
 
14
+# vim:et:ts=4:sts=4:ai
... ...
@@ -8,147 +8,149 @@ import datetime
8 8
 import Utils
9 9
 
10 10
 class Progress(object):
11
-	_stdout = sys.stdout
12
-
13
-	def __init__(self, labels, total_size):
14
-		self._stdout = sys.stdout
15
-		self.new_file(labels, total_size)
16
-	
17
-	def new_file(self, labels, total_size):
18
-		self.labels = labels
19
-		self.total_size = total_size
20
-		# Set initial_position to something in the
21
-		# case we're not counting from 0. For instance
22
-		# when appending to a partially downloaded file.
23
-		# Setting initial_position will let the speed
24
-		# be computed right.
25
-		self.initial_position = 0
26
-		self.current_position = self.initial_position
27
-		self.time_start = datetime.datetime.now()
28
-		self.time_last = self.time_start
29
-		self.time_current = self.time_start
30
-
31
-		self.display(new_file = True)
32
-	
33
-	def update(self, current_position = -1, delta_position = -1):
34
-		self.time_last = self.time_current
35
-		self.time_current = datetime.datetime.now()
36
-		if current_position > -1:
37
-			self.current_position = current_position
38
-		elif delta_position > -1:
39
-			self.current_position += delta_position
40
-		#else:
41
-		#	no update, just call display()
42
-		self.display()
43
-
44
-	def done(self, message):
45
-		self.display(done_message = message)
46
-
47
-	def output_labels(self):
48
-		self._stdout.write(u"%(source)s -> %(destination)s  %(extra)s\n" % self.labels)
49
-		self._stdout.flush()
50
-
51
-	def display(self, new_file = False, done_message = None):
52
-		"""
53
-		display(new_file = False[/True], done = False[/True])
54
-
55
-		Override this method to provide a nicer output.
56
-		"""
57
-		if new_file:
58
-			self.output_labels()
59
-			self.last_milestone = 0
60
-			return
61
-
62
-		if self.current_position == self.total_size:
63
-			print_size = Utils.formatSize(self.current_position, True)
64
-			if print_size[1] != "": print_size[1] += "B"
65
-			timedelta = self.time_current - self.time_start
66
-			sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
67
-			print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
68
-			self._stdout.write("100%%  %s%s in %.2fs (%.2f %sB/s)\n" % 
69
-				(print_size[0], print_size[1], sec_elapsed, print_speed[0], print_speed[1]))
70
-			self._stdout.flush()
71
-			return
72
-
73
-		rel_position = selfself.current_position * 100 / self.total_size
74
-		if rel_position >= self.last_milestone:
75
-			self.last_milestone = (int(rel_position) / 5) * 5
76
-			self._stdout.write("%d%% ", self.last_milestone)
77
-			self._stdout.flush()
78
-			return
11
+    _stdout = sys.stdout
12
+
13
+    def __init__(self, labels, total_size):
14
+        self._stdout = sys.stdout
15
+        self.new_file(labels, total_size)
16
+
17
+    def new_file(self, labels, total_size):
18
+        self.labels = labels
19
+        self.total_size = total_size
20
+        # Set initial_position to something in the
21
+        # case we're not counting from 0. For instance
22
+        # when appending to a partially downloaded file.
23
+        # Setting initial_position will let the speed
24
+        # be computed right.
25
+        self.initial_position = 0
26
+        self.current_position = self.initial_position
27
+        self.time_start = datetime.datetime.now()
28
+        self.time_last = self.time_start
29
+        self.time_current = self.time_start
30
+
31
+        self.display(new_file = True)
32
+
33
+    def update(self, current_position = -1, delta_position = -1):
34
+        self.time_last = self.time_current
35
+        self.time_current = datetime.datetime.now()
36
+        if current_position > -1:
37
+            self.current_position = current_position
38
+        elif delta_position > -1:
39
+            self.current_position += delta_position
40
+        #else:
41
+        #   no update, just call display()
42
+        self.display()
43
+
44
+    def done(self, message):
45
+        self.display(done_message = message)
46
+
47
+    def output_labels(self):
48
+        self._stdout.write(u"%(source)s -> %(destination)s  %(extra)s\n" % self.labels)
49
+        self._stdout.flush()
50
+
51
+    def display(self, new_file = False, done_message = None):
52
+        """
53
+        display(new_file = False[/True], done = False[/True])
54
+
55
+        Override this method to provide a nicer output.
56
+        """
57
+        if new_file:
58
+            self.output_labels()
59
+            self.last_milestone = 0
60
+            return
61
+
62
+        if self.current_position == self.total_size:
63
+            print_size = Utils.formatSize(self.current_position, True)
64
+            if print_size[1] != "": print_size[1] += "B"
65
+            timedelta = self.time_current - self.time_start
66
+            sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
67
+            print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
68
+            self._stdout.write("100%%  %s%s in %.2fs (%.2f %sB/s)\n" %
69
+                (print_size[0], print_size[1], sec_elapsed, print_speed[0], print_speed[1]))
70
+            self._stdout.flush()
71
+            return
72
+
73
+        rel_position = selfself.current_position * 100 / self.total_size
74
+        if rel_position >= self.last_milestone:
75
+            self.last_milestone = (int(rel_position) / 5) * 5
76
+            self._stdout.write("%d%% ", self.last_milestone)
77
+            self._stdout.flush()
78
+            return
79 79
 
80 80
 class ProgressANSI(Progress):
81 81
     ## http://en.wikipedia.org/wiki/ANSI_escape_code
82
-	SCI = '\x1b['
83
-	ANSI_hide_cursor = SCI + "?25l"
84
-	ANSI_show_cursor = SCI + "?25h"
85
-	ANSI_save_cursor_pos = SCI + "s"
86
-	ANSI_restore_cursor_pos = SCI + "u"
87
-	ANSI_move_cursor_to_column = SCI + "%uG"
88
-	ANSI_erase_to_eol = SCI + "0K"
89
-	ANSI_erase_current_line = SCI + "2K"
90
-
91
-	def display(self, new_file = False, done_message = None):
92
-		"""
93
-		display(new_file = False[/True], done_message = None)
94
-		"""
95
-		if new_file:
96
-			self.output_labels()
97
-			self._stdout.write(self.ANSI_save_cursor_pos)
98
-			self._stdout.flush()
99
-			return
100
-
101
-		timedelta = self.time_current - self.time_start
102
-		sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
103
-		if (sec_elapsed > 0):
104
-			print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
105
-		else:
106
-			print_speed = (0, "")
107
-		self._stdout.write(self.ANSI_restore_cursor_pos)
108
-		self._stdout.write(self.ANSI_erase_to_eol)
109
-		self._stdout.write("%(current)s of %(total)s   %(percent)3d%% in %(elapsed)ds  %(speed).2f %(speed_coeff)sB/s" % {
110
-			"current" : str(self.current_position).rjust(len(str(self.total_size))),
111
-			"total" : self.total_size,
112
-			"percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
113
-			"elapsed" : sec_elapsed,
114
-			"speed" : print_speed[0],
115
-			"speed_coeff" : print_speed[1]
116
-		})
117
-
118
-		if done_message:
119
-			self._stdout.write("  %s\n" % done_message)
120
-
121
-		self._stdout.flush()
82
+    SCI = '\x1b['
83
+    ANSI_hide_cursor = SCI + "?25l"
84
+    ANSI_show_cursor = SCI + "?25h"
85
+    ANSI_save_cursor_pos = SCI + "s"
86
+    ANSI_restore_cursor_pos = SCI + "u"
87
+    ANSI_move_cursor_to_column = SCI + "%uG"
88
+    ANSI_erase_to_eol = SCI + "0K"
89
+    ANSI_erase_current_line = SCI + "2K"
90
+
91
+    def display(self, new_file = False, done_message = None):
92
+        """
93
+        display(new_file = False[/True], done_message = None)
94
+        """
95
+        if new_file:
96
+            self.output_labels()
97
+            self._stdout.write(self.ANSI_save_cursor_pos)
98
+            self._stdout.flush()
99
+            return
100
+
101
+        timedelta = self.time_current - self.time_start
102
+        sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
103
+        if (sec_elapsed > 0):
104
+            print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
105
+        else:
106
+            print_speed = (0, "")
107
+        self._stdout.write(self.ANSI_restore_cursor_pos)
108
+        self._stdout.write(self.ANSI_erase_to_eol)
109
+        self._stdout.write("%(current)s of %(total)s   %(percent)3d%% in %(elapsed)ds  %(speed).2f %(speed_coeff)sB/s" % {
110
+            "current" : str(self.current_position).rjust(len(str(self.total_size))),
111
+            "total" : self.total_size,
112
+            "percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
113
+            "elapsed" : sec_elapsed,
114
+            "speed" : print_speed[0],
115
+            "speed_coeff" : print_speed[1]
116
+        })
117
+
118
+        if done_message:
119
+            self._stdout.write("  %s\n" % done_message)
120
+
121
+        self._stdout.flush()
122 122
 
123 123
 class ProgressCR(Progress):
124 124
     ## Uses CR char (Carriage Return) just like other progress bars do.
125
-	CR_char = chr(13)
126
-
127
-	def display(self, new_file = False, done_message = None):
128
-		"""
129
-		display(new_file = False[/True], done_message = None)
130
-		"""
131
-		if new_file:
132
-			self.output_labels()
133
-			return
134
-
135
-		timedelta = self.time_current - self.time_start
136
-		sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
137
-		if (sec_elapsed > 0):
138
-			print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
139
-		else:
140
-			print_speed = (0, "")
141
-		self._stdout.write(self.CR_char)
142
-		output = " %(current)s of %(total)s   %(percent)3d%% in %(elapsed)4ds  %(speed)7.2f %(speed_coeff)sB/s" % {
143
-			"current" : str(self.current_position).rjust(len(str(self.total_size))),
144
-			"total" : self.total_size,
145
-			"percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
146
-			"elapsed" : sec_elapsed,
147
-			"speed" : print_speed[0],
148
-			"speed_coeff" : print_speed[1]
149
-		}
150
-		self._stdout.write(output)
151
-		if done_message:
152
-			self._stdout.write("  %s\n" % done_message)
153
-
154
-		self._stdout.flush()
125
+    CR_char = chr(13)
126
+
127
+    def display(self, new_file = False, done_message = None):
128
+        """
129
+        display(new_file = False[/True], done_message = None)
130
+        """
131
+        if new_file:
132
+            self.output_labels()
133
+            return
134
+
135
+        timedelta = self.time_current - self.time_start
136
+        sec_elapsed = timedelta.days * 86400 + timedelta.seconds + float(timedelta.microseconds)/1000000.0
137
+        if (sec_elapsed > 0):
138
+            print_speed = Utils.formatSize((self.current_position - self.initial_position) / sec_elapsed, True, True)
139
+        else:
140
+            print_speed = (0, "")
141
+        self._stdout.write(self.CR_char)
142
+        output = " %(current)s of %(total)s   %(percent)3d%% in %(elapsed)4ds  %(speed)7.2f %(speed_coeff)sB/s" % {
143
+            "current" : str(self.current_position).rjust(len(str(self.total_size))),
144
+            "total" : self.total_size,
145
+            "percent" : self.total_size and (self.current_position * 100 / self.total_size) or 0,
146
+            "elapsed" : sec_elapsed,
147
+            "speed" : print_speed[0],
148
+            "speed_coeff" : print_speed[1]
149
+        }
150
+        self._stdout.write(output)
151
+        if done_message:
152
+            self._stdout.write("  %s\n" % done_message)
153
+
154
+        self._stdout.flush()
155
+
156
+# vim:et:ts=4:sts=4:ai
... ...
@@ -14,9 +14,9 @@ from logging import debug, info, warning, error
14 14
 from stat import ST_SIZE
15 15
 
16 16
 try:
17
-	from hashlib import md5
17
+    from hashlib import md5
18 18
 except ImportError:
19
-	from md5 import md5
19
+    from md5 import md5
20 20
 
21 21
 from Utils import *
22 22
 from SortedDict import SortedDict
... ...
@@ -29,796 +29,798 @@ from S3Uri import S3Uri
29 29
 
30 30
 __all__ = []
31 31
 class S3Request(object):
32
-	def __init__(self, s3, method_string, resource, headers, params = {}):
33
-		self.s3 = s3
34
-		self.headers = SortedDict(headers or {}, ignore_case = True)
35
-		self.resource = resource
36
-		self.method_string = method_string
37
-		self.params = params
38
-
39
-		self.update_timestamp()
40
-		self.sign()
41
-
42
-	def update_timestamp(self):
43
-		if self.headers.has_key("date"):
44
-			del(self.headers["date"])
45
-		self.headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
46
-
47
-	def format_param_str(self):
48
-		"""
49
-		Format URL parameters from self.params and returns
50
-		?parm1=val1&parm2=val2 or an empty string if there 
51
-		are no parameters.  Output of this function should 
52
-		be appended directly to self.resource['uri']
53
-		"""
54
-		param_str = ""
55
-		for param in self.params:
56
-			if self.params[param] not in (None, ""):
57
-				param_str += "&%s=%s" % (param, self.params[param])
58
-			else:
59
-				param_str += "&%s" % param
60
-		return param_str and "?" + param_str[1:]
61
-
62
-	def sign(self):
63
-		h  = self.method_string + "\n"
64
-		h += self.headers.get("content-md5", "")+"\n"
65
-		h += self.headers.get("content-type", "")+"\n"
66
-		h += self.headers.get("date", "")+"\n"
67
-		for header in self.headers.keys():
68
-			if header.startswith("x-amz-"):
69
-				h += header+":"+str(self.headers[header])+"\n"
70
-		if self.resource['bucket']:
71
-			h += "/" + self.resource['bucket']
72
-		h += self.resource['uri']
73
-		debug("SignHeaders: " + repr(h))
74
-		signature = sign_string(h)
75
-
76
-		self.headers["Authorization"] = "AWS "+self.s3.config.access_key+":"+signature
77
-
78
-	def get_triplet(self):
79
-		self.update_timestamp()
80
-		self.sign()
81
-		resource = dict(self.resource)	## take a copy
82
-		resource['uri'] += self.format_param_str()
83
-		return (self.method_string, resource, self.headers)
32
+    def __init__(self, s3, method_string, resource, headers, params = {}):
33
+        self.s3 = s3
34
+        self.headers = SortedDict(headers or {}, ignore_case = True)
35
+        self.resource = resource
36
+        self.method_string = method_string
37
+        self.params = params
38
+
39
+        self.update_timestamp()
40
+        self.sign()
41
+
42
+    def update_timestamp(self):
43
+        if self.headers.has_key("date"):
44
+            del(self.headers["date"])
45
+        self.headers["x-amz-date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
46
+
47
+    def format_param_str(self):
48
+        """
49
+        Format URL parameters from self.params and returns
50
+        ?parm1=val1&parm2=val2 or an empty string if there
51
+        are no parameters.  Output of this function should
52
+        be appended directly to self.resource['uri']
53
+        """
54
+        param_str = ""
55
+        for param in self.params:
56
+            if self.params[param] not in (None, ""):
57
+                param_str += "&%s=%s" % (param, self.params[param])
58
+            else:
59
+                param_str += "&%s" % param
60
+        return param_str and "?" + param_str[1:]
61
+
62
+    def sign(self):
63
+        h  = self.method_string + "\n"
64
+        h += self.headers.get("content-md5", "")+"\n"
65
+        h += self.headers.get("content-type", "")+"\n"
66
+        h += self.headers.get("date", "")+"\n"
67
+        for header in self.headers.keys():
68
+            if header.startswith("x-amz-"):
69
+                h += header+":"+str(self.headers[header])+"\n"
70
+        if self.resource['bucket']:
71
+            h += "/" + self.resource['bucket']
72
+        h += self.resource['uri']
73
+        debug("SignHeaders: " + repr(h))
74
+        signature = sign_string(h)
75
+
76
+        self.headers["Authorization"] = "AWS "+self.s3.config.access_key+":"+signature
77
+
78
+    def get_triplet(self):
79
+        self.update_timestamp()
80
+        self.sign()
81
+        resource = dict(self.resource)  ## take a copy
82
+        resource['uri'] += self.format_param_str()
83
+        return (self.method_string, resource, self.headers)
84 84
 
85 85
 class S3(object):
86
-	http_methods = BidirMap(
87
-		GET = 0x01,
88
-		PUT = 0x02,
89
-		HEAD = 0x04,
90
-		DELETE = 0x08,
91
-		MASK = 0x0F,
92
-		)
93
-	
94
-	targets = BidirMap(
95
-		SERVICE = 0x0100,
96
-		BUCKET = 0x0200,
97
-		OBJECT = 0x0400,
98
-		MASK = 0x0700,
99
-		)
100
-
101
-	operations = BidirMap(
102
-		UNDFINED = 0x0000,
103
-		LIST_ALL_BUCKETS = targets["SERVICE"] | http_methods["GET"],
104
-		BUCKET_CREATE = targets["BUCKET"] | http_methods["PUT"],
105
-		BUCKET_LIST = targets["BUCKET"] | http_methods["GET"],
106
-		BUCKET_DELETE = targets["BUCKET"] | http_methods["DELETE"],
107
-		OBJECT_PUT = targets["OBJECT"] | http_methods["PUT"],
108
-		OBJECT_GET = targets["OBJECT"] | http_methods["GET"],
109
-		OBJECT_HEAD = targets["OBJECT"] | http_methods["HEAD"],
110
-		OBJECT_DELETE = targets["OBJECT"] | http_methods["DELETE"],
111
-	)
112
-
113
-	codes = {
114
-		"NoSuchBucket" : "Bucket '%s' does not exist",
115
-		"AccessDenied" : "Access to bucket '%s' was denied",
116
-		"BucketAlreadyExists" : "Bucket '%s' already exists",
117
-		}
118
-
119
-	## S3 sometimes sends HTTP-307 response 
120
-	redir_map = {}
121
-
122
-	## Maximum attempts of re-issuing failed requests
123
-	_max_retries = 5
124
-
125
-	def __init__(self, config):
126
-		self.config = config
127
-
128
-	def get_connection(self, bucket):
129
-		if self.config.proxy_host != "":
130
-			return httplib.HTTPConnection(self.config.proxy_host, self.config.proxy_port)
131
-		else:
132
-			if self.config.use_https:
133
-				return httplib.HTTPSConnection(self.get_hostname(bucket))
134
-			else:
135
-				return httplib.HTTPConnection(self.get_hostname(bucket))
136
-
137
-	def get_hostname(self, bucket):
138
-		if bucket and check_bucket_name_dns_conformity(bucket):
139
-			if self.redir_map.has_key(bucket):
140
-				host = self.redir_map[bucket]
141
-			else:
142
-				host = getHostnameFromBucket(bucket)
143
-		else:
144
-			host = self.config.host_base
145
-		debug('get_hostname(%s): %s' % (bucket, host))
146
-		return host
147
-
148
-	def set_hostname(self, bucket, redir_hostname):
149
-		self.redir_map[bucket] = redir_hostname
150
-
151
-	def format_uri(self, resource):
152
-		if resource['bucket'] and not check_bucket_name_dns_conformity(resource['bucket']):
153
-			uri = "/%s%s" % (resource['bucket'], resource['uri'])
154
-		else:
155
-			uri = resource['uri']
156
-		if self.config.proxy_host != "":
157
-			uri = "http://%s%s" % (self.get_hostname(resource['bucket']), uri)
158
-		debug('format_uri(): ' + uri)
159
-		return uri
160
-
161
-	## Commands / Actions
162
-	def list_all_buckets(self):
163
-		request = self.create_request("LIST_ALL_BUCKETS")
164
-		response = self.send_request(request)
165
-		response["list"] = getListFromXml(response["data"], "Bucket")
166
-		return response
167
-	
168
-	def bucket_list(self, bucket, prefix = None, recursive = None):
169
-		def _list_truncated(data):
170
-			## <IsTruncated> can either be "true" or "false" or be missing completely
171
-			is_truncated = getTextFromXml(data, ".//IsTruncated") or "false"
172
-			return is_truncated.lower() != "false"
173
-
174
-		def _get_contents(data):
175
-			return getListFromXml(data, "Contents")
176
-
177
-		def _get_common_prefixes(data):
178
-			return getListFromXml(data, "CommonPrefixes")
179
-
180
-		uri_params = {}
181
-		truncated = True
182
-		list = []
183
-		prefixes = []
184
-
185
-		while truncated:
186
-			response = self.bucket_list_noparse(bucket, prefix, recursive, uri_params)
187
-			current_list = _get_contents(response["data"])
188
-			current_prefixes = _get_common_prefixes(response["data"])
189
-			truncated = _list_truncated(response["data"])
190
-			if truncated:
191
-				if current_list:
192
-					uri_params['marker'] = self.urlencode_string(current_list[-1]["Key"])
193
-				else:
194
-					uri_params['marker'] = self.urlencode_string(current_prefixes[-1]["Prefix"])
195
-				debug("Listing continues after '%s'" % uri_params['marker'])
196
-
197
-			list += current_list
198
-			prefixes += current_prefixes
199
-
200
-		response['list'] = list
201
-		response['common_prefixes'] = prefixes
202
-		return response
203
-
204
-	def bucket_list_noparse(self, bucket, prefix = None, recursive = None, uri_params = {}):
205
-		if prefix:
206
-			uri_params['prefix'] = self.urlencode_string(prefix)
207
-		if not self.config.recursive and not recursive:
208
-			uri_params['delimiter'] = "/"
209
-		request = self.create_request("BUCKET_LIST", bucket = bucket, **uri_params)
210
-		response = self.send_request(request)
211
-		#debug(response)
212
-		return response
213
-
214
-	def bucket_create(self, bucket, bucket_location = None):
215
-		headers = SortedDict(ignore_case = True)
216
-		body = ""
217
-		if bucket_location and bucket_location.strip().upper() != "US":
218
-			bucket_location = bucket_location.strip()
219
-			if bucket_location.upper() == "EU":
220
-				bucket_location = bucket_location.upper()
221
-			else:
222
-				bucket_location = bucket_location.lower()
223
-			body  = "<CreateBucketConfiguration><LocationConstraint>"
224
-			body += bucket_location
225
-			body += "</LocationConstraint></CreateBucketConfiguration>"
226
-			debug("bucket_location: " + body)
227
-			check_bucket_name(bucket, dns_strict = True)
228
-		else:
229
-			check_bucket_name(bucket, dns_strict = False)
230
-		if self.config.acl_public:
231
-			headers["x-amz-acl"] = "public-read"
232
-		request = self.create_request("BUCKET_CREATE", bucket = bucket, headers = headers)
233
-		response = self.send_request(request, body)
234
-		return response
235
-
236
-	def bucket_delete(self, bucket):
237
-		request = self.create_request("BUCKET_DELETE", bucket = bucket)
238
-		response = self.send_request(request)
239
-		return response
240
-
241
-	def get_bucket_location(self, uri):
242
-		request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?location")
243
-		response = self.send_request(request)
244
-		location = getTextFromXml(response['data'], "LocationConstraint")
245
-		if not location or location in [ "", "US" ]:
246
-			location = "us-east-1"
247
-		elif location == "EU":
248
-			location = "eu-west-1"
249
-		return location
250
-
251
-	def bucket_info(self, uri):
252
-		# For now reports only "Location". One day perhaps more.
253
-		response = {}
254
-		response['bucket-location'] = self.get_bucket_location(uri)
255
-		return response
256
-
257
-	def website_info(self, uri, bucket_location = None):
258
-		headers = SortedDict(ignore_case = True)
259
-		bucket = uri.bucket()
260
-		body = ""
261
-
262
-		request = self.create_request("BUCKET_LIST", bucket = bucket, extra="?website")
263
-		try:
264
-			response = self.send_request(request, body)
265
-			response['index_document'] = getTextFromXml(response['data'], ".//IndexDocument//Suffix")
266
-			response['error_document'] = getTextFromXml(response['data'], ".//ErrorDocument//Key")
267
-			response['website_endpoint'] = self.config.website_endpoint % {
268
-				"bucket" : uri.bucket(),
269
-				"location" : self.get_bucket_location(uri)}
270
-			return response
271
-		except S3Error, e:
272
-			if e.status == 404:
273
-				debug("Could not get /?website - website probably not configured for this bucket")
274
-				return None
275
-			raise
276
-
277
-	def website_create(self, uri, bucket_location = None):
278
-		headers = SortedDict(ignore_case = True)
279
-		bucket = uri.bucket()
280
-		body = '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
281
-		body += '  <IndexDocument>'
282
-		body += ('    <Suffix>%s</Suffix>' % self.config.website_index)
283
-		body += '  </IndexDocument>'
284
-		if self.config.website_error:
285
-			body += '  <ErrorDocument>'
286
-			body += ('    <Key>%s</Key>' % self.config.website_error)
287
-			body += '  </ErrorDocument>'
288
-		body += '</WebsiteConfiguration>'
289
-
290
-		request = self.create_request("BUCKET_CREATE", bucket = bucket, extra="?website")
291
-		debug("About to send request '%s' with body '%s'" % (request, body))
292
-		response = self.send_request(request, body)
293
-		debug("Received response '%s'" % (response))
294
-
295
-		return response
296
-
297
-	def website_delete(self, uri, bucket_location = None):
298
-		headers = SortedDict(ignore_case = True)
299
-		bucket = uri.bucket()
300
-		body = ""
301
-
302
-		request = self.create_request("BUCKET_DELETE", bucket = bucket, extra="?website")
303
-		debug("About to send request '%s' with body '%s'" % (request, body))
304
-		response = self.send_request(request, body)
305
-		debug("Received response '%s'" % (response))
306
-
307
-		if response['status'] != 204:
308
-			raise S3ResponseError("Expected status 204: %s" % response)
309
-
310
-		return response
311
-
312
-	def object_put(self, filename, uri, extra_headers = None, extra_label = ""):
313
-		# TODO TODO
314
-		# Make it consistent with stream-oriented object_get()
315
-		if uri.type != "s3":
316
-			raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
317
-
318
-		if not os.path.isfile(filename):
319
-			raise InvalidFileError(u"%s is not a regular file" % unicodise(filename))
320
-		try:
321
-			file = open(filename, "rb")
322
-			size = os.stat(filename)[ST_SIZE]
323
-		except (IOError, OSError), e:
324
-			raise InvalidFileError(u"%s: %s" % (unicodise(filename), e.strerror))
325
-		headers = SortedDict(ignore_case = True)
326
-		if extra_headers:
327
-			headers.update(extra_headers)
328
-		headers["content-length"] = size
329
-		content_type = None
330
-		if self.config.guess_mime_type:
331
-			content_type = mimetypes.guess_type(filename)[0]
332
-		if not content_type:
333
-			content_type = self.config.default_mime_type
334
-		debug("Content-Type set to '%s'" % content_type)
335
-		headers["content-type"] = content_type
336
-		if self.config.acl_public:
337
-			headers["x-amz-acl"] = "public-read"
338
-		if self.config.reduced_redundancy:
339
-			headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY"
340
-		request = self.create_request("OBJECT_PUT", uri = uri, headers = headers)
341
-		labels = { 'source' : unicodise(filename), 'destination' : unicodise(uri.uri()), 'extra' : extra_label }
342
-		response = self.send_file(request, file, labels)
343
-		return response
344
-
345
-	def object_get(self, uri, stream, start_position = 0, extra_label = ""):
346
-		if uri.type != "s3":
347
-			raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
348
-		request = self.create_request("OBJECT_GET", uri = uri)
349
-		labels = { 'source' : unicodise(uri.uri()), 'destination' : unicodise(stream.name), 'extra' : extra_label }
350
-		response = self.recv_file(request, stream, labels, start_position)
351
-		return response
352
-
353
-	def object_delete(self, uri):
354
-		if uri.type != "s3":
355
-			raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
356
-		request = self.create_request("OBJECT_DELETE", uri = uri)
357
-		response = self.send_request(request)
358
-		return response
359
-
360
-	def object_copy(self, src_uri, dst_uri, extra_headers = None):
361
-		if src_uri.type != "s3":
362
-			raise ValueError("Expected URI type 's3', got '%s'" % src_uri.type)
363
-		if dst_uri.type != "s3":
364
-			raise ValueError("Expected URI type 's3', got '%s'" % dst_uri.type)
365
-		headers = SortedDict(ignore_case = True)
366
-		headers['x-amz-copy-source'] = "/%s/%s" % (src_uri.bucket(), self.urlencode_string(src_uri.object()))
367
-		## TODO: For now COPY, later maybe add a switch?
368
-		headers['x-amz-metadata-directive'] = "COPY"
369
-		if self.config.acl_public:
370
-			headers["x-amz-acl"] = "public-read"
371
-		if self.config.reduced_redundancy:
372
-			headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY"
373
-		# if extra_headers:
374
-		# 	headers.update(extra_headers)
375
-		request = self.create_request("OBJECT_PUT", uri = dst_uri, headers = headers)
376
-		response = self.send_request(request)
377
-		return response
378
-
379
-	def object_move(self, src_uri, dst_uri, extra_headers = None):
380
-		response_copy = self.object_copy(src_uri, dst_uri, extra_headers)
381
-		debug("Object %s copied to %s" % (src_uri, dst_uri))
382
-		if getRootTagName(response_copy["data"]) == "CopyObjectResult":
383
-			response_delete = self.object_delete(src_uri)
384
-			debug("Object %s deleted" % src_uri)
385
-		return response_copy
386
-
387
-	def object_info(self, uri):
388
-		request = self.create_request("OBJECT_HEAD", uri = uri)
389
-		response = self.send_request(request)
390
-		return response
391
-
392
-	def get_acl(self, uri):
393
-		if uri.has_object():
394
-			request = self.create_request("OBJECT_GET", uri = uri, extra = "?acl")
395
-		else:
396
-			request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?acl")
397
-
398
-		response = self.send_request(request)
399
-		acl = ACL(response['data'])
400
-		return acl
401
-
402
-	def set_acl(self, uri, acl):
403
-		if uri.has_object():
404
-			request = self.create_request("OBJECT_PUT", uri = uri, extra = "?acl")
405
-		else:
406
-			request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(), extra = "?acl")
407
-
408
-		body = str(acl)
409
-		debug(u"set_acl(%s): acl-xml: %s" % (uri, body))
410
-		response = self.send_request(request, body)
411
-		return response
412
-
413
-	def get_accesslog(self, uri):
414
-		request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?logging")
415
-		response = self.send_request(request)
416
-		accesslog = AccessLog(response['data'])
417
-		return accesslog
418
-
419
-	def set_accesslog_acl(self, uri):
420
-		acl = self.get_acl(uri)
421
-		debug("Current ACL(%s): %s" % (uri.uri(), str(acl)))
422
-		acl.appendGrantee(GranteeLogDelivery("READ_ACP"))
423
-		acl.appendGrantee(GranteeLogDelivery("WRITE"))
424
-		debug("Updated ACL(%s): %s" % (uri.uri(), str(acl)))
425
-		self.set_acl(uri, acl)
426
-
427
-	def set_accesslog(self, uri, enable, log_target_prefix_uri = None, acl_public = False):
428
-		request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(), extra = "?logging")
429
-		accesslog = AccessLog()
430
-		if enable:
431
-			accesslog.enableLogging(log_target_prefix_uri)
432
-			accesslog.setAclPublic(acl_public)
433
-		else:
434
-			accesslog.disableLogging()
435
-		body = str(accesslog)
436
-		debug(u"set_accesslog(%s): accesslog-xml: %s" % (uri, body))
437
-		try:
438
-			response = self.send_request(request, body)
439
-		except S3Error, e:
440
-			if e.info['Code'] == "InvalidTargetBucketForLogging":
441
-				info("Setting up log-delivery ACL for target bucket.")
442
-				self.set_accesslog_acl(S3Uri("s3://%s" % log_target_prefix_uri.bucket()))
443
-				response = self.send_request(request, body)
444
-			else:
445
-				raise
446
-		return accesslog, response
447
-
448
-	## Low level methods
449
-	def urlencode_string(self, string, urlencoding_mode = None):
450
-		if type(string) == unicode:
451
-			string = string.encode("utf-8")
452
-
453
-		if urlencoding_mode is None:
454
-			urlencoding_mode = self.config.urlencoding_mode
455
-
456
-		if urlencoding_mode == "verbatim":
457
-			## Don't do any pre-processing
458
-			return string
459
-
460
-		encoded = ""
461
-		## List of characters that must be escaped for S3
462
-		## Haven't found this in any official docs
463
-		## but my tests show it's more less correct.
464
-		## If you start getting InvalidSignature errors
465
-		## from S3 check the error headers returned
466
-		## from S3 to see whether the list hasn't
467
-		## changed.
468
-		for c in string:	# I'm not sure how to know in what encoding 
469
-					# 'object' is. Apparently "type(object)==str"
470
-					# but the contents is a string of unicode
471
-					# bytes, e.g. '\xc4\x8d\xc5\xafr\xc3\xa1k'
472
-					# Don't know what it will do on non-utf8 
473
-					# systems.
474
-					#           [hope that sounds reassuring ;-)]
475
-			o = ord(c)
476
-			if (o < 0x20 or o == 0x7f):
477
-				if urlencoding_mode == "fixbucket":
478
-					encoded += "%%%02X" % o
479
-				else:
480
-					error(u"Non-printable character 0x%02x in: %s" % (o, string))
481
-					error(u"Please report it to s3tools-bugs@lists.sourceforge.net")
482
-					encoded += replace_nonprintables(c)
483
-			elif (o == 0x20 or	# Space and below
484
-			    o == 0x22 or	# "
485
-			    o == 0x23 or	# #
486
-			    o == 0x25 or	# % (escape character)
487
-			    o == 0x26 or	# &
488
-			    o == 0x2B or	# + (or it would become <space>)
489
-			    o == 0x3C or	# <
490
-			    o == 0x3E or	# >
491
-			    o == 0x3F or	# ?
492
-			    o == 0x60 or	# `
493
-			    o >= 123):   	# { and above, including >= 128 for UTF-8
494
-				encoded += "%%%02X" % o
495
-			else:
496
-				encoded += c
497
-		debug("String '%s' encoded to '%s'" % (string, encoded))
498
-		return encoded
499
-
500
-	def create_request(self, operation, uri = None, bucket = None, object = None, headers = None, extra = None, **params):
501
-		resource = { 'bucket' : None, 'uri' : "/" }
502
-
503
-		if uri and (bucket or object):
504
-			raise ValueError("Both 'uri' and either 'bucket' or 'object' parameters supplied")
505
-		## If URI is given use that instead of bucket/object parameters
506
-		if uri:
507
-			bucket = uri.bucket()
508
-			object = uri.has_object() and uri.object() or None
509
-
510
-		if bucket:
511
-			resource['bucket'] = str(bucket)
512
-			if object:
513
-				resource['uri'] = "/" + self.urlencode_string(object)
514
-		if extra:
515
-			resource['uri'] += extra
516
-
517
-		method_string = S3.http_methods.getkey(S3.operations[operation] & S3.http_methods["MASK"])
518
-
519
-		request = S3Request(self, method_string, resource, headers, params)
520
-
521
-		debug("CreateRequest: resource[uri]=" + resource['uri'])
522
-		return request
523
-	
524
-	def _fail_wait(self, retries):
525
-		# Wait a few seconds. The more it fails the more we wait.
526
-		return (self._max_retries - retries + 1) * 3
527
-		
528
-	def send_request(self, request, body = None, retries = _max_retries):
529
-		method_string, resource, headers = request.get_triplet()
530
-		debug("Processing request, please wait...")
531
-		if not headers.has_key('content-length'):
532
-			headers['content-length'] = body and len(body) or 0
533
-		try:
534
-			# "Stringify" all headers
535
-			for header in headers.keys():
536
-				headers[header] = str(headers[header])
537
-			conn = self.get_connection(resource['bucket'])
538
-			conn.request(method_string, self.format_uri(resource), body, headers)
539
-			response = {}
540
-			http_response = conn.getresponse()
541
-			response["status"] = http_response.status
542
-			response["reason"] = http_response.reason
543
-			response["headers"] = convertTupleListToDict(http_response.getheaders())
544
-			response["data"] =  http_response.read()
545
-			debug("Response: " + str(response))
546
-			conn.close()
547
-		except Exception, e:
548
-			if retries:
549
-				warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
550
-				warning("Waiting %d sec..." % self._fail_wait(retries))
551
-				time.sleep(self._fail_wait(retries))
552
-				return self.send_request(request, body, retries - 1)
553
-			else:
554
-				raise S3RequestError("Request failed for: %s" % resource['uri'])
555
-
556
-		if response["status"] == 307:
557
-			## RedirectPermanent
558
-			redir_bucket = getTextFromXml(response['data'], ".//Bucket")
559
-			redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
560
-			self.set_hostname(redir_bucket, redir_hostname)
561
-			warning("Redirected to: %s" % (redir_hostname))
562
-			return self.send_request(request, body)
563
-
564
-		if response["status"] >= 500:
565
-			e = S3Error(response)
566
-			if retries:
567
-				warning(u"Retrying failed request: %s" % resource['uri'])
568
-				warning(unicode(e))
569
-				warning("Waiting %d sec..." % self._fail_wait(retries))
570
-				time.sleep(self._fail_wait(retries))
571
-				return self.send_request(request, body, retries - 1)
572
-			else:
573
-				raise e
574
-
575
-		if response["status"] < 200 or response["status"] > 299:
576
-			raise S3Error(response)
577
-
578
-		return response
579
-
580
-	def send_file(self, request, file, labels, throttle = 0, retries = _max_retries):
581
-		method_string, resource, headers = request.get_triplet()
582
-		size_left = size_total = headers.get("content-length")
583
-		if self.config.progress_meter:
584
-			progress = self.config.progress_class(labels, size_total)
585
-		else:
586
-			info("Sending file '%s', please wait..." % file.name)
587
-		timestamp_start = time.time()
588
-		try:
589
-			conn = self.get_connection(resource['bucket'])
590
-			conn.connect()
591
-			conn.putrequest(method_string, self.format_uri(resource))
592
-			for header in headers.keys():
593
-				conn.putheader(header, str(headers[header]))
594
-			conn.endheaders()
595
-		except Exception, e:
596
-			if self.config.progress_meter:
597
-				progress.done("failed")
598
-			if retries:
599
-				warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
600
-				warning("Waiting %d sec..." % self._fail_wait(retries))
601
-				time.sleep(self._fail_wait(retries))
602
-				# Connection error -> same throttle value
603
-				return self.send_file(request, file, labels, throttle, retries - 1)
604
-			else:
605
-				raise S3UploadError("Upload failed for: %s" % resource['uri'])
606
-		file.seek(0)
607
-		md5_hash = md5()
608
-		try:
609
-			while (size_left > 0):
610
-				#debug("SendFile: Reading up to %d bytes from '%s'" % (self.config.send_chunk, file.name))
611
-				data = file.read(self.config.send_chunk)
612
-				md5_hash.update(data)
613
-				conn.send(data)
614
-				if self.config.progress_meter:
615
-					progress.update(delta_position = len(data))
616
-				size_left -= len(data)
617
-				if throttle:
618
-					time.sleep(throttle)
619
-			md5_computed = md5_hash.hexdigest()
620
-			response = {}
621
-			http_response = conn.getresponse()
622
-			response["status"] = http_response.status
623
-			response["reason"] = http_response.reason
624
-			response["headers"] = convertTupleListToDict(http_response.getheaders())
625
-			response["data"] = http_response.read()
626
-			response["size"] = size_total
627
-			conn.close()
628
-			debug(u"Response: %s" % response)
629
-		except Exception, e:
630
-			if self.config.progress_meter:
631
-				progress.done("failed")
632
-			if retries:
633
-				if retries < self._max_retries:
634
-					throttle = throttle and throttle * 5 or 0.01
635
-				warning("Upload failed: %s (%s)" % (resource['uri'], e))
636
-				warning("Retrying on lower speed (throttle=%0.2f)" % throttle)
637
-				warning("Waiting %d sec..." % self._fail_wait(retries))
638
-				time.sleep(self._fail_wait(retries))
639
-				# Connection error -> same throttle value
640
-				return self.send_file(request, file, labels, throttle, retries - 1)
641
-			else:
642
-				debug("Giving up on '%s' %s" % (file.name, e))
643
-				raise S3UploadError("Upload failed for: %s" % resource['uri'])
644
-
645
-		timestamp_end = time.time()
646
-		response["elapsed"] = timestamp_end - timestamp_start
647
-		response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
648
-
649
-		if self.config.progress_meter:
650
-			## The above conn.close() takes some time -> update() progress meter
651
-			## to correct the average speed. Otherwise people will complain that 
652
-			## 'progress' and response["speed"] are inconsistent ;-)
653
-			progress.update()
654
-			progress.done("done")
655
-
656
-		if response["status"] == 307:
657
-			## RedirectPermanent
658
-			redir_bucket = getTextFromXml(response['data'], ".//Bucket")
659
-			redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
660
-			self.set_hostname(redir_bucket, redir_hostname)
661
-			warning("Redirected to: %s" % (redir_hostname))
662
-			return self.send_file(request, file, labels)
663
-
664
-		# S3 from time to time doesn't send ETag back in a response :-(
665
-		# Force re-upload here.
666
-		if not response['headers'].has_key('etag'):
667
-			response['headers']['etag'] = '' 
668
-
669
-		if response["status"] < 200 or response["status"] > 299:
670
-			try_retry = False
671
-			if response["status"] >= 500:
672
-				## AWS internal error - retry
673
-				try_retry = True
674
-			elif response["status"] >= 400:
675
-				err = S3Error(response)
676
-				## Retriable client error?
677
-				if err.code in [ 'BadDigest', 'OperationAborted', 'TokenRefreshRequired', 'RequestTimeout' ]:
678
-					try_retry = True
679
-
680
-			if try_retry:
681
-				if retries:
682
-					warning("Upload failed: %s (%s)" % (resource['uri'], S3Error(response)))
683
-					warning("Waiting %d sec..." % self._fail_wait(retries))
684
-					time.sleep(self._fail_wait(retries))
685
-					return self.send_file(request, file, labels, throttle, retries - 1)
686
-				else:
687
-					warning("Too many failures. Giving up on '%s'" % (file.name))
688
-					raise S3UploadError
689
-
690
-			## Non-recoverable error
691
-			raise S3Error(response)
692
-
693
-		debug("MD5 sums: computed=%s, received=%s" % (md5_computed, response["headers"]["etag"]))
694
-		if response["headers"]["etag"].strip('"\'') != md5_hash.hexdigest():
695
-			warning("MD5 Sums don't match!")
696
-			if retries:
697
-				warning("Retrying upload of %s" % (file.name))
698
-				return self.send_file(request, file, labels, throttle, retries - 1)
699
-			else:
700
-				warning("Too many failures. Giving up on '%s'" % (file.name))
701
-				raise S3UploadError
702
-
703
-		return response
704
-
705
-	def recv_file(self, request, stream, labels, start_position = 0, retries = _max_retries):
706
-		method_string, resource, headers = request.get_triplet()
707
-		if self.config.progress_meter:
708
-			progress = self.config.progress_class(labels, 0)
709
-		else:
710
-			info("Receiving file '%s', please wait..." % stream.name)
711
-		timestamp_start = time.time()
712
-		try:
713
-			conn = self.get_connection(resource['bucket'])
714
-			conn.connect()
715
-			conn.putrequest(method_string, self.format_uri(resource))
716
-			for header in headers.keys():
717
-				conn.putheader(header, str(headers[header]))
718
-			if start_position > 0:
719
-				debug("Requesting Range: %d .. end" % start_position)
720
-				conn.putheader("Range", "bytes=%d-" % start_position)
721
-			conn.endheaders()
722
-			response = {}
723
-			http_response = conn.getresponse()
724
-			response["status"] = http_response.status
725
-			response["reason"] = http_response.reason
726
-			response["headers"] = convertTupleListToDict(http_response.getheaders())
727
-			debug("Response: %s" % response)
728
-		except Exception, e:
729
-			if self.config.progress_meter:
730
-				progress.done("failed")
731
-			if retries:
732
-				warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
733
-				warning("Waiting %d sec..." % self._fail_wait(retries))
734
-				time.sleep(self._fail_wait(retries))
735
-				# Connection error -> same throttle value
736
-				return self.recv_file(request, stream, labels, start_position, retries - 1)
737
-			else:
738
-				raise S3DownloadError("Download failed for: %s" % resource['uri'])
739
-
740
-		if response["status"] == 307:
741
-			## RedirectPermanent
742
-			response['data'] = http_response.read()
743
-			redir_bucket = getTextFromXml(response['data'], ".//Bucket")
744
-			redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
745
-			self.set_hostname(redir_bucket, redir_hostname)
746
-			warning("Redirected to: %s" % (redir_hostname))
747
-			return self.recv_file(request, stream, labels)
748
-
749
-		if response["status"] < 200 or response["status"] > 299:
750
-			raise S3Error(response)
751
-
752
-		if start_position == 0:
753
-			# Only compute MD5 on the fly if we're downloading from beginning
754
-			# Otherwise we'd get a nonsense.
755
-			md5_hash = md5()
756
-		size_left = int(response["headers"]["content-length"])
757
-		size_total = start_position + size_left
758
-		current_position = start_position
759
-
760
-		if self.config.progress_meter:
761
-			progress.total_size = size_total
762
-			progress.initial_position = current_position
763
-			progress.current_position = current_position
764
-
765
-		try:
766
-			while (current_position < size_total):
767
-				this_chunk = size_left > self.config.recv_chunk and self.config.recv_chunk or size_left
768
-				data = http_response.read(this_chunk)
769
-				stream.write(data)
770
-				if start_position == 0:
771
-					md5_hash.update(data)
772
-				current_position += len(data)
773
-				## Call progress meter from here...
774
-				if self.config.progress_meter:
775
-					progress.update(delta_position = len(data))
776
-			conn.close()
777
-		except Exception, e:
778
-			if self.config.progress_meter:
779
-				progress.done("failed")
780
-			if retries:
781
-				warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
782
-				warning("Waiting %d sec..." % self._fail_wait(retries))
783
-				time.sleep(self._fail_wait(retries))
784
-				# Connection error -> same throttle value
785
-				return self.recv_file(request, stream, labels, current_position, retries - 1)
786
-			else:
787
-				raise S3DownloadError("Download failed for: %s" % resource['uri'])
788
-
789
-		stream.flush()
790
-		timestamp_end = time.time()
791
-
792
-		if self.config.progress_meter:
793
-			## The above stream.flush() may take some time -> update() progress meter
794
-			## to correct the average speed. Otherwise people will complain that 
795
-			## 'progress' and response["speed"] are inconsistent ;-)
796
-			progress.update()
797
-			progress.done("done")
798
-
799
-		if start_position == 0:
800
-			# Only compute MD5 on the fly if we were downloading from the beginning
801
-			response["md5"] = md5_hash.hexdigest()
802
-		else:
803
-			# Otherwise try to compute MD5 of the output file
804
-			try:
805
-				response["md5"] = hash_file_md5(stream.name)
806
-			except IOError, e:
807
-				if e.errno != errno.ENOENT:
808
-					warning("Unable to open file: %s: %s" % (stream.name, e))
809
-				warning("Unable to verify MD5. Assume it matches.")
810
-				response["md5"] = response["headers"]["etag"]
811
-
812
-		response["md5match"] = response["headers"]["etag"].find(response["md5"]) >= 0
813
-		response["elapsed"] = timestamp_end - timestamp_start
814
-		response["size"] = current_position
815
-		response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
816
-		if response["size"] != start_position + long(response["headers"]["content-length"]):
817
-			warning("Reported size (%s) does not match received size (%s)" % (
818
-				start_position + response["headers"]["content-length"], response["size"]))
819
-		debug("ReceiveFile: Computed MD5 = %s" % response["md5"])
820
-		if not response["md5match"]:
821
-			warning("MD5 signatures do not match: computed=%s, received=%s" % (
822
-				response["md5"], response["headers"]["etag"]))
823
-		return response
86
+    http_methods = BidirMap(
87
+        GET = 0x01,
88
+        PUT = 0x02,
89
+        HEAD = 0x04,
90
+        DELETE = 0x08,
91
+        MASK = 0x0F,
92
+        )
93
+
94
+    targets = BidirMap(
95
+        SERVICE = 0x0100,
96
+        BUCKET = 0x0200,
97
+        OBJECT = 0x0400,
98
+        MASK = 0x0700,
99
+        )
100
+
101
+    operations = BidirMap(
102
+        UNDFINED = 0x0000,
103
+        LIST_ALL_BUCKETS = targets["SERVICE"] | http_methods["GET"],
104
+        BUCKET_CREATE = targets["BUCKET"] | http_methods["PUT"],
105
+        BUCKET_LIST = targets["BUCKET"] | http_methods["GET"],
106
+        BUCKET_DELETE = targets["BUCKET"] | http_methods["DELETE"],
107
+        OBJECT_PUT = targets["OBJECT"] | http_methods["PUT"],
108
+        OBJECT_GET = targets["OBJECT"] | http_methods["GET"],
109
+        OBJECT_HEAD = targets["OBJECT"] | http_methods["HEAD"],
110
+        OBJECT_DELETE = targets["OBJECT"] | http_methods["DELETE"],
111
+    )
112
+
113
+    codes = {
114
+        "NoSuchBucket" : "Bucket '%s' does not exist",
115
+        "AccessDenied" : "Access to bucket '%s' was denied",
116
+        "BucketAlreadyExists" : "Bucket '%s' already exists",
117
+        }
118
+
119
+    ## S3 sometimes sends HTTP-307 response
120
+    redir_map = {}
121
+
122
+    ## Maximum attempts of re-issuing failed requests
123
+    _max_retries = 5
124
+
125
+    def __init__(self, config):
126
+        self.config = config
127
+
128
+    def get_connection(self, bucket):
129
+        if self.config.proxy_host != "":
130
+            return httplib.HTTPConnection(self.config.proxy_host, self.config.proxy_port)
131
+        else:
132
+            if self.config.use_https:
133
+                return httplib.HTTPSConnection(self.get_hostname(bucket))
134
+            else:
135
+                return httplib.HTTPConnection(self.get_hostname(bucket))
136
+
137
+    def get_hostname(self, bucket):
138
+        if bucket and check_bucket_name_dns_conformity(bucket):
139
+            if self.redir_map.has_key(bucket):
140
+                host = self.redir_map[bucket]
141
+            else:
142
+                host = getHostnameFromBucket(bucket)
143
+        else:
144
+            host = self.config.host_base
145
+        debug('get_hostname(%s): %s' % (bucket, host))
146
+        return host
147
+
148
+    def set_hostname(self, bucket, redir_hostname):
149
+        self.redir_map[bucket] = redir_hostname
150
+
151
+    def format_uri(self, resource):
152
+        if resource['bucket'] and not check_bucket_name_dns_conformity(resource['bucket']):
153
+            uri = "/%s%s" % (resource['bucket'], resource['uri'])
154
+        else:
155
+            uri = resource['uri']
156
+        if self.config.proxy_host != "":
157
+            uri = "http://%s%s" % (self.get_hostname(resource['bucket']), uri)
158
+        debug('format_uri(): ' + uri)
159
+        return uri
160
+
161
+    ## Commands / Actions
162
+    def list_all_buckets(self):
163
+        request = self.create_request("LIST_ALL_BUCKETS")
164
+        response = self.send_request(request)
165
+        response["list"] = getListFromXml(response["data"], "Bucket")
166
+        return response
167
+
168
+    def bucket_list(self, bucket, prefix = None, recursive = None):
169
+        def _list_truncated(data):
170
+            ## <IsTruncated> can either be "true" or "false" or be missing completely
171
+            is_truncated = getTextFromXml(data, ".//IsTruncated") or "false"
172
+            return is_truncated.lower() != "false"
173
+
174
+        def _get_contents(data):
175
+            return getListFromXml(data, "Contents")
176
+
177
+        def _get_common_prefixes(data):
178
+            return getListFromXml(data, "CommonPrefixes")
179
+
180
+        uri_params = {}
181
+        truncated = True
182
+        list = []
183
+        prefixes = []
184
+
185
+        while truncated:
186
+            response = self.bucket_list_noparse(bucket, prefix, recursive, uri_params)
187
+            current_list = _get_contents(response["data"])
188
+            current_prefixes = _get_common_prefixes(response["data"])
189
+            truncated = _list_truncated(response["data"])
190
+            if truncated:
191
+                if current_list:
192
+                    uri_params['marker'] = self.urlencode_string(current_list[-1]["Key"])
193
+                else:
194
+                    uri_params['marker'] = self.urlencode_string(current_prefixes[-1]["Prefix"])
195
+                debug("Listing continues after '%s'" % uri_params['marker'])
196
+
197
+            list += current_list
198
+            prefixes += current_prefixes
199
+
200
+        response['list'] = list
201
+        response['common_prefixes'] = prefixes
202
+        return response
203
+
204
+    def bucket_list_noparse(self, bucket, prefix = None, recursive = None, uri_params = {}):
205
+        if prefix:
206
+            uri_params['prefix'] = self.urlencode_string(prefix)
207
+        if not self.config.recursive and not recursive:
208
+            uri_params['delimiter'] = "/"
209
+        request = self.create_request("BUCKET_LIST", bucket = bucket, **uri_params)
210
+        response = self.send_request(request)
211
+        #debug(response)
212
+        return response
213
+
214
+    def bucket_create(self, bucket, bucket_location = None):
215
+        headers = SortedDict(ignore_case = True)
216
+        body = ""
217
+        if bucket_location and bucket_location.strip().upper() != "US":
218
+            bucket_location = bucket_location.strip()
219
+            if bucket_location.upper() == "EU":
220
+                bucket_location = bucket_location.upper()
221
+            else:
222
+                bucket_location = bucket_location.lower()
223
+            body  = "<CreateBucketConfiguration><LocationConstraint>"
224
+            body += bucket_location
225
+            body += "</LocationConstraint></CreateBucketConfiguration>"
226
+            debug("bucket_location: " + body)
227
+            check_bucket_name(bucket, dns_strict = True)
228
+        else:
229
+            check_bucket_name(bucket, dns_strict = False)
230
+        if self.config.acl_public:
231
+            headers["x-amz-acl"] = "public-read"
232
+        request = self.create_request("BUCKET_CREATE", bucket = bucket, headers = headers)
233
+        response = self.send_request(request, body)
234
+        return response
235
+
236
+    def bucket_delete(self, bucket):
237
+        request = self.create_request("BUCKET_DELETE", bucket = bucket)
238
+        response = self.send_request(request)
239
+        return response
240
+
241
+    def get_bucket_location(self, uri):
242
+        request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?location")
243
+        response = self.send_request(request)
244
+        location = getTextFromXml(response['data'], "LocationConstraint")
245
+        if not location or location in [ "", "US" ]:
246
+            location = "us-east-1"
247
+        elif location == "EU":
248
+            location = "eu-west-1"
249
+        return location
250
+
251
+    def bucket_info(self, uri):
252
+        # For now reports only "Location". One day perhaps more.
253
+        response = {}
254
+        response['bucket-location'] = self.get_bucket_location(uri)
255
+        return response
256
+
257
+    def website_info(self, uri, bucket_location = None):
258
+        headers = SortedDict(ignore_case = True)
259
+        bucket = uri.bucket()
260
+        body = ""
261
+
262
+        request = self.create_request("BUCKET_LIST", bucket = bucket, extra="?website")
263
+        try:
264
+            response = self.send_request(request, body)
265
+            response['index_document'] = getTextFromXml(response['data'], ".//IndexDocument//Suffix")
266
+            response['error_document'] = getTextFromXml(response['data'], ".//ErrorDocument//Key")
267
+            response['website_endpoint'] = self.config.website_endpoint % {
268
+                "bucket" : uri.bucket(),
269
+                "location" : self.get_bucket_location(uri)}
270
+            return response
271
+        except S3Error, e:
272
+            if e.status == 404:
273
+                debug("Could not get /?website - website probably not configured for this bucket")
274
+                return None
275
+            raise
276
+
277
+    def website_create(self, uri, bucket_location = None):
278
+        headers = SortedDict(ignore_case = True)
279
+        bucket = uri.bucket()
280
+        body = '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
281
+        body += '  <IndexDocument>'
282
+        body += ('    <Suffix>%s</Suffix>' % self.config.website_index)
283
+        body += '  </IndexDocument>'
284
+        if self.config.website_error:
285
+            body += '  <ErrorDocument>'
286
+            body += ('    <Key>%s</Key>' % self.config.website_error)
287
+            body += '  </ErrorDocument>'
288
+        body += '</WebsiteConfiguration>'
289
+
290
+        request = self.create_request("BUCKET_CREATE", bucket = bucket, extra="?website")
291
+        debug("About to send request '%s' with body '%s'" % (request, body))
292
+        response = self.send_request(request, body)
293
+        debug("Received response '%s'" % (response))
294
+
295
+        return response
296
+
297
+    def website_delete(self, uri, bucket_location = None):
298
+        headers = SortedDict(ignore_case = True)
299
+        bucket = uri.bucket()
300
+        body = ""
301
+
302
+        request = self.create_request("BUCKET_DELETE", bucket = bucket, extra="?website")
303
+        debug("About to send request '%s' with body '%s'" % (request, body))
304
+        response = self.send_request(request, body)
305
+        debug("Received response '%s'" % (response))
306
+
307
+        if response['status'] != 204:
308
+            raise S3ResponseError("Expected status 204: %s" % response)
309
+
310
+        return response
311
+
312
+    def object_put(self, filename, uri, extra_headers = None, extra_label = ""):
313
+        # TODO TODO
314
+        # Make it consistent with stream-oriented object_get()
315
+        if uri.type != "s3":
316
+            raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
317
+
318
+        if not os.path.isfile(filename):
319
+            raise InvalidFileError(u"%s is not a regular file" % unicodise(filename))
320
+        try:
321
+            file = open(filename, "rb")
322
+            size = os.stat(filename)[ST_SIZE]
323
+        except (IOError, OSError), e:
324
+            raise InvalidFileError(u"%s: %s" % (unicodise(filename), e.strerror))
325
+        headers = SortedDict(ignore_case = True)
326
+        if extra_headers:
327
+            headers.update(extra_headers)
328
+        headers["content-length"] = size
329
+        content_type = None
330
+        if self.config.guess_mime_type:
331
+            content_type = mimetypes.guess_type(filename)[0]
332
+        if not content_type:
333
+            content_type = self.config.default_mime_type
334
+        debug("Content-Type set to '%s'" % content_type)
335
+        headers["content-type"] = content_type
336
+        if self.config.acl_public:
337
+            headers["x-amz-acl"] = "public-read"
338
+        if self.config.reduced_redundancy:
339
+            headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY"
340
+        request = self.create_request("OBJECT_PUT", uri = uri, headers = headers)
341
+        labels = { 'source' : unicodise(filename), 'destination' : unicodise(uri.uri()), 'extra' : extra_label }
342
+        response = self.send_file(request, file, labels)
343
+        return response
344
+
345
+    def object_get(self, uri, stream, start_position = 0, extra_label = ""):
346
+        if uri.type != "s3":
347
+            raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
348
+        request = self.create_request("OBJECT_GET", uri = uri)
349
+        labels = { 'source' : unicodise(uri.uri()), 'destination' : unicodise(stream.name), 'extra' : extra_label }
350
+        response = self.recv_file(request, stream, labels, start_position)
351
+        return response
352
+
353
+    def object_delete(self, uri):
354
+        if uri.type != "s3":
355
+            raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
356
+        request = self.create_request("OBJECT_DELETE", uri = uri)
357
+        response = self.send_request(request)
358
+        return response
359
+
360
+    def object_copy(self, src_uri, dst_uri, extra_headers = None):
361
+        if src_uri.type != "s3":
362
+            raise ValueError("Expected URI type 's3', got '%s'" % src_uri.type)
363
+        if dst_uri.type != "s3":
364
+            raise ValueError("Expected URI type 's3', got '%s'" % dst_uri.type)
365
+        headers = SortedDict(ignore_case = True)
366
+        headers['x-amz-copy-source'] = "/%s/%s" % (src_uri.bucket(), self.urlencode_string(src_uri.object()))
367
+        ## TODO: For now COPY, later maybe add a switch?
368
+        headers['x-amz-metadata-directive'] = "COPY"
369
+        if self.config.acl_public:
370
+            headers["x-amz-acl"] = "public-read"
371
+        if self.config.reduced_redundancy:
372
+            headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY"
373
+        # if extra_headers:
374
+        #   headers.update(extra_headers)
375
+        request = self.create_request("OBJECT_PUT", uri = dst_uri, headers = headers)
376
+        response = self.send_request(request)
377
+        return response
378
+
379
+    def object_move(self, src_uri, dst_uri, extra_headers = None):
380
+        response_copy = self.object_copy(src_uri, dst_uri, extra_headers)
381
+        debug("Object %s copied to %s" % (src_uri, dst_uri))
382
+        if getRootTagName(response_copy["data"]) == "CopyObjectResult":
383
+            response_delete = self.object_delete(src_uri)
384
+            debug("Object %s deleted" % src_uri)
385
+        return response_copy
386
+
387
+    def object_info(self, uri):
388
+        request = self.create_request("OBJECT_HEAD", uri = uri)
389
+        response = self.send_request(request)
390
+        return response
391
+
392
+    def get_acl(self, uri):
393
+        if uri.has_object():
394
+            request = self.create_request("OBJECT_GET", uri = uri, extra = "?acl")
395
+        else:
396
+            request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?acl")
397
+
398
+        response = self.send_request(request)
399
+        acl = ACL(response['data'])
400
+        return acl
401
+
402
+    def set_acl(self, uri, acl):
403
+        if uri.has_object():
404
+            request = self.create_request("OBJECT_PUT", uri = uri, extra = "?acl")
405
+        else:
406
+            request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(), extra = "?acl")
407
+
408
+        body = str(acl)
409
+        debug(u"set_acl(%s): acl-xml: %s" % (uri, body))
410
+        response = self.send_request(request, body)
411
+        return response
412
+
413
+    def get_accesslog(self, uri):
414
+        request = self.create_request("BUCKET_LIST", bucket = uri.bucket(), extra = "?logging")
415
+        response = self.send_request(request)
416
+        accesslog = AccessLog(response['data'])
417
+        return accesslog
418
+
419
+    def set_accesslog_acl(self, uri):
420
+        acl = self.get_acl(uri)
421
+        debug("Current ACL(%s): %s" % (uri.uri(), str(acl)))
422
+        acl.appendGrantee(GranteeLogDelivery("READ_ACP"))
423
+        acl.appendGrantee(GranteeLogDelivery("WRITE"))
424
+        debug("Updated ACL(%s): %s" % (uri.uri(), str(acl)))
425
+        self.set_acl(uri, acl)
426
+
427
+    def set_accesslog(self, uri, enable, log_target_prefix_uri = None, acl_public = False):
428
+        request = self.create_request("BUCKET_CREATE", bucket = uri.bucket(), extra = "?logging")
429
+        accesslog = AccessLog()
430
+        if enable:
431
+            accesslog.enableLogging(log_target_prefix_uri)
432
+            accesslog.setAclPublic(acl_public)
433
+        else:
434
+            accesslog.disableLogging()
435
+        body = str(accesslog)
436
+        debug(u"set_accesslog(%s): accesslog-xml: %s" % (uri, body))
437
+        try:
438
+            response = self.send_request(request, body)
439
+        except S3Error, e:
440
+            if e.info['Code'] == "InvalidTargetBucketForLogging":
441
+                info("Setting up log-delivery ACL for target bucket.")
442
+                self.set_accesslog_acl(S3Uri("s3://%s" % log_target_prefix_uri.bucket()))
443
+                response = self.send_request(request, body)
444
+            else:
445
+                raise
446
+        return accesslog, response
447
+
448
+    ## Low level methods
449
+    def urlencode_string(self, string, urlencoding_mode = None):
450
+        if type(string) == unicode:
451
+            string = string.encode("utf-8")
452
+
453
+        if urlencoding_mode is None:
454
+            urlencoding_mode = self.config.urlencoding_mode
455
+
456
+        if urlencoding_mode == "verbatim":
457
+            ## Don't do any pre-processing
458
+            return string
459
+
460
+        encoded = ""
461
+        ## List of characters that must be escaped for S3
462
+        ## Haven't found this in any official docs
463
+        ## but my tests show it's more less correct.
464
+        ## If you start getting InvalidSignature errors
465
+        ## from S3 check the error headers returned
466
+        ## from S3 to see whether the list hasn't
467
+        ## changed.
468
+        for c in string:    # I'm not sure how to know in what encoding
469
+                    # 'object' is. Apparently "type(object)==str"
470
+                    # but the contents is a string of unicode
471
+                    # bytes, e.g. '\xc4\x8d\xc5\xafr\xc3\xa1k'
472
+                    # Don't know what it will do on non-utf8
473
+                    # systems.
474
+                    #           [hope that sounds reassuring ;-)]
475
+            o = ord(c)
476
+            if (o < 0x20 or o == 0x7f):
477
+                if urlencoding_mode == "fixbucket":
478
+                    encoded += "%%%02X" % o
479
+                else:
480
+                    error(u"Non-printable character 0x%02x in: %s" % (o, string))
481
+                    error(u"Please report it to s3tools-bugs@lists.sourceforge.net")
482
+                    encoded += replace_nonprintables(c)
483
+            elif (o == 0x20 or  # Space and below
484
+                o == 0x22 or    # "
485
+                o == 0x23 or    # #
486
+                o == 0x25 or    # % (escape character)
487
+                o == 0x26 or    # &
488
+                o == 0x2B or    # + (or it would become <space>)
489
+                o == 0x3C or    # <
490
+                o == 0x3E or    # >
491
+                o == 0x3F or    # ?
492
+                o == 0x60 or    # `
493
+                o >= 123):      # { and above, including >= 128 for UTF-8
494
+                encoded += "%%%02X" % o
495
+            else:
496
+                encoded += c
497
+        debug("String '%s' encoded to '%s'" % (string, encoded))
498
+        return encoded
499
+
500
+    def create_request(self, operation, uri = None, bucket = None, object = None, headers = None, extra = None, **params):
501
+        resource = { 'bucket' : None, 'uri' : "/" }
502
+
503
+        if uri and (bucket or object):
504
+            raise ValueError("Both 'uri' and either 'bucket' or 'object' parameters supplied")
505
+        ## If URI is given use that instead of bucket/object parameters
506
+        if uri:
507
+            bucket = uri.bucket()
508
+            object = uri.has_object() and uri.object() or None
509
+
510
+        if bucket:
511
+            resource['bucket'] = str(bucket)
512
+            if object:
513
+                resource['uri'] = "/" + self.urlencode_string(object)
514
+        if extra:
515
+            resource['uri'] += extra
516
+
517
+        method_string = S3.http_methods.getkey(S3.operations[operation] & S3.http_methods["MASK"])
518
+
519
+        request = S3Request(self, method_string, resource, headers, params)
520
+
521
+        debug("CreateRequest: resource[uri]=" + resource['uri'])
522
+        return request
523
+
524
+    def _fail_wait(self, retries):
525
+        # Wait a few seconds. The more it fails the more we wait.
526
+        return (self._max_retries - retries + 1) * 3
527
+
528
+    def send_request(self, request, body = None, retries = _max_retries):
529
+        method_string, resource, headers = request.get_triplet()
530
+        debug("Processing request, please wait...")
531
+        if not headers.has_key('content-length'):
532
+            headers['content-length'] = body and len(body) or 0
533
+        try:
534
+            # "Stringify" all headers
535
+            for header in headers.keys():
536
+                headers[header] = str(headers[header])
537
+            conn = self.get_connection(resource['bucket'])
538
+            conn.request(method_string, self.format_uri(resource), body, headers)
539
+            response = {}
540
+            http_response = conn.getresponse()
541
+            response["status"] = http_response.status
542
+            response["reason"] = http_response.reason
543
+            response["headers"] = convertTupleListToDict(http_response.getheaders())
544
+            response["data"] =  http_response.read()
545
+            debug("Response: " + str(response))
546
+            conn.close()
547
+        except Exception, e:
548
+            if retries:
549
+                warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
550
+                warning("Waiting %d sec..." % self._fail_wait(retries))
551
+                time.sleep(self._fail_wait(retries))
552
+                return self.send_request(request, body, retries - 1)
553
+            else:
554
+                raise S3RequestError("Request failed for: %s" % resource['uri'])
555
+
556
+        if response["status"] == 307:
557
+            ## RedirectPermanent
558
+            redir_bucket = getTextFromXml(response['data'], ".//Bucket")
559
+            redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
560
+            self.set_hostname(redir_bucket, redir_hostname)
561
+            warning("Redirected to: %s" % (redir_hostname))
562
+            return self.send_request(request, body)
563
+
564
+        if response["status"] >= 500:
565
+            e = S3Error(response)
566
+            if retries:
567
+                warning(u"Retrying failed request: %s" % resource['uri'])
568
+                warning(unicode(e))
569
+                warning("Waiting %d sec..." % self._fail_wait(retries))
570
+                time.sleep(self._fail_wait(retries))
571
+                return self.send_request(request, body, retries - 1)
572
+            else:
573
+                raise e
574
+
575
+        if response["status"] < 200 or response["status"] > 299:
576
+            raise S3Error(response)
577
+
578
+        return response
579
+
580
+    def send_file(self, request, file, labels, throttle = 0, retries = _max_retries):
581
+        method_string, resource, headers = request.get_triplet()
582
+        size_left = size_total = headers.get("content-length")
583
+        if self.config.progress_meter:
584
+            progress = self.config.progress_class(labels, size_total)
585
+        else:
586
+            info("Sending file '%s', please wait..." % file.name)
587
+        timestamp_start = time.time()
588
+        try:
589
+            conn = self.get_connection(resource['bucket'])
590
+            conn.connect()
591
+            conn.putrequest(method_string, self.format_uri(resource))
592
+            for header in headers.keys():
593
+                conn.putheader(header, str(headers[header]))
594
+            conn.endheaders()
595
+        except Exception, e:
596
+            if self.config.progress_meter:
597
+                progress.done("failed")
598
+            if retries:
599
+                warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
600
+                warning("Waiting %d sec..." % self._fail_wait(retries))
601
+                time.sleep(self._fail_wait(retries))
602
+                # Connection error -> same throttle value
603
+                return self.send_file(request, file, labels, throttle, retries - 1)
604
+            else:
605
+                raise S3UploadError("Upload failed for: %s" % resource['uri'])
606
+        file.seek(0)
607
+        md5_hash = md5()
608
+        try:
609
+            while (size_left > 0):
610
+                #debug("SendFile: Reading up to %d bytes from '%s'" % (self.config.send_chunk, file.name))
611
+                data = file.read(self.config.send_chunk)
612
+                md5_hash.update(data)
613
+                conn.send(data)
614
+                if self.config.progress_meter:
615
+                    progress.update(delta_position = len(data))
616
+                size_left -= len(data)
617
+                if throttle:
618
+                    time.sleep(throttle)
619
+            md5_computed = md5_hash.hexdigest()
620
+            response = {}
621
+            http_response = conn.getresponse()
622
+            response["status"] = http_response.status
623
+            response["reason"] = http_response.reason
624
+            response["headers"] = convertTupleListToDict(http_response.getheaders())
625
+            response["data"] = http_response.read()
626
+            response["size"] = size_total
627
+            conn.close()
628
+            debug(u"Response: %s" % response)
629
+        except Exception, e:
630
+            if self.config.progress_meter:
631
+                progress.done("failed")
632
+            if retries:
633
+                if retries < self._max_retries:
634
+                    throttle = throttle and throttle * 5 or 0.01
635
+                warning("Upload failed: %s (%s)" % (resource['uri'], e))
636
+                warning("Retrying on lower speed (throttle=%0.2f)" % throttle)
637
+                warning("Waiting %d sec..." % self._fail_wait(retries))
638
+                time.sleep(self._fail_wait(retries))
639
+                # Connection error -> same throttle value
640
+                return self.send_file(request, file, labels, throttle, retries - 1)
641
+            else:
642
+                debug("Giving up on '%s' %s" % (file.name, e))
643
+                raise S3UploadError("Upload failed for: %s" % resource['uri'])
644
+
645
+        timestamp_end = time.time()
646
+        response["elapsed"] = timestamp_end - timestamp_start
647
+        response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
648
+
649
+        if self.config.progress_meter:
650
+            ## The above conn.close() takes some time -> update() progress meter
651
+            ## to correct the average speed. Otherwise people will complain that
652
+            ## 'progress' and response["speed"] are inconsistent ;-)
653
+            progress.update()
654
+            progress.done("done")
655
+
656
+        if response["status"] == 307:
657
+            ## RedirectPermanent
658
+            redir_bucket = getTextFromXml(response['data'], ".//Bucket")
659
+            redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
660
+            self.set_hostname(redir_bucket, redir_hostname)
661
+            warning("Redirected to: %s" % (redir_hostname))
662
+            return self.send_file(request, file, labels)
663
+
664
+        # S3 from time to time doesn't send ETag back in a response :-(
665
+        # Force re-upload here.
666
+        if not response['headers'].has_key('etag'):
667
+            response['headers']['etag'] = ''
668
+
669
+        if response["status"] < 200 or response["status"] > 299:
670
+            try_retry = False
671
+            if response["status"] >= 500:
672
+                ## AWS internal error - retry
673
+                try_retry = True
674
+            elif response["status"] >= 400:
675
+                err = S3Error(response)
676
+                ## Retriable client error?
677
+                if err.code in [ 'BadDigest', 'OperationAborted', 'TokenRefreshRequired', 'RequestTimeout' ]:
678
+                    try_retry = True
679
+
680
+            if try_retry:
681
+                if retries:
682
+                    warning("Upload failed: %s (%s)" % (resource['uri'], S3Error(response)))
683
+                    warning("Waiting %d sec..." % self._fail_wait(retries))
684
+                    time.sleep(self._fail_wait(retries))
685
+                    return self.send_file(request, file, labels, throttle, retries - 1)
686
+                else:
687
+                    warning("Too many failures. Giving up on '%s'" % (file.name))
688
+                    raise S3UploadError
689
+
690
+            ## Non-recoverable error
691
+            raise S3Error(response)
692
+
693
+        debug("MD5 sums: computed=%s, received=%s" % (md5_computed, response["headers"]["etag"]))
694
+        if response["headers"]["etag"].strip('"\'') != md5_hash.hexdigest():
695
+            warning("MD5 Sums don't match!")
696
+            if retries:
697
+                warning("Retrying upload of %s" % (file.name))
698
+                return self.send_file(request, file, labels, throttle, retries - 1)
699
+            else:
700
+                warning("Too many failures. Giving up on '%s'" % (file.name))
701
+                raise S3UploadError
702
+
703
+        return response
704
+
705
+    def recv_file(self, request, stream, labels, start_position = 0, retries = _max_retries):
706
+        method_string, resource, headers = request.get_triplet()
707
+        if self.config.progress_meter:
708
+            progress = self.config.progress_class(labels, 0)
709
+        else:
710
+            info("Receiving file '%s', please wait..." % stream.name)
711
+        timestamp_start = time.time()
712
+        try:
713
+            conn = self.get_connection(resource['bucket'])
714
+            conn.connect()
715
+            conn.putrequest(method_string, self.format_uri(resource))
716
+            for header in headers.keys():
717
+                conn.putheader(header, str(headers[header]))
718
+            if start_position > 0:
719
+                debug("Requesting Range: %d .. end" % start_position)
720
+                conn.putheader("Range", "bytes=%d-" % start_position)
721
+            conn.endheaders()
722
+            response = {}
723
+            http_response = conn.getresponse()
724
+            response["status"] = http_response.status
725
+            response["reason"] = http_response.reason
726
+            response["headers"] = convertTupleListToDict(http_response.getheaders())
727
+            debug("Response: %s" % response)
728
+        except Exception, e:
729
+            if self.config.progress_meter:
730
+                progress.done("failed")
731
+            if retries:
732
+                warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
733
+                warning("Waiting %d sec..." % self._fail_wait(retries))
734
+                time.sleep(self._fail_wait(retries))
735
+                # Connection error -> same throttle value
736
+                return self.recv_file(request, stream, labels, start_position, retries - 1)
737
+            else:
738
+                raise S3DownloadError("Download failed for: %s" % resource['uri'])
739
+
740
+        if response["status"] == 307:
741
+            ## RedirectPermanent
742
+            response['data'] = http_response.read()
743
+            redir_bucket = getTextFromXml(response['data'], ".//Bucket")
744
+            redir_hostname = getTextFromXml(response['data'], ".//Endpoint")
745
+            self.set_hostname(redir_bucket, redir_hostname)
746
+            warning("Redirected to: %s" % (redir_hostname))
747
+            return self.recv_file(request, stream, labels)
748
+
749
+        if response["status"] < 200 or response["status"] > 299:
750
+            raise S3Error(response)
751
+
752
+        if start_position == 0:
753
+            # Only compute MD5 on the fly if we're downloading from beginning
754
+            # Otherwise we'd get a nonsense.
755
+            md5_hash = md5()
756
+        size_left = int(response["headers"]["content-length"])
757
+        size_total = start_position + size_left
758
+        current_position = start_position
759
+
760
+        if self.config.progress_meter:
761
+            progress.total_size = size_total
762
+            progress.initial_position = current_position
763
+            progress.current_position = current_position
764
+
765
+        try:
766
+            while (current_position < size_total):
767
+                this_chunk = size_left > self.config.recv_chunk and self.config.recv_chunk or size_left
768
+                data = http_response.read(this_chunk)
769
+                stream.write(data)
770
+                if start_position == 0:
771
+                    md5_hash.update(data)
772
+                current_position += len(data)
773
+                ## Call progress meter from here...
774
+                if self.config.progress_meter:
775
+                    progress.update(delta_position = len(data))
776
+            conn.close()
777
+        except Exception, e:
778
+            if self.config.progress_meter:
779
+                progress.done("failed")
780
+            if retries:
781
+                warning("Retrying failed request: %s (%s)" % (resource['uri'], e))
782
+                warning("Waiting %d sec..." % self._fail_wait(retries))
783
+                time.sleep(self._fail_wait(retries))
784
+                # Connection error -> same throttle value
785
+                return self.recv_file(request, stream, labels, current_position, retries - 1)
786
+            else:
787
+                raise S3DownloadError("Download failed for: %s" % resource['uri'])
788
+
789
+        stream.flush()
790
+        timestamp_end = time.time()
791
+
792
+        if self.config.progress_meter:
793
+            ## The above stream.flush() may take some time -> update() progress meter
794
+            ## to correct the average speed. Otherwise people will complain that
795
+            ## 'progress' and response["speed"] are inconsistent ;-)
796
+            progress.update()
797
+            progress.done("done")
798
+
799
+        if start_position == 0:
800
+            # Only compute MD5 on the fly if we were downloading from the beginning
801
+            response["md5"] = md5_hash.hexdigest()
802
+        else:
803
+            # Otherwise try to compute MD5 of the output file
804
+            try:
805
+                response["md5"] = hash_file_md5(stream.name)
806
+            except IOError, e:
807
+                if e.errno != errno.ENOENT:
808
+                    warning("Unable to open file: %s: %s" % (stream.name, e))
809
+                warning("Unable to verify MD5. Assume it matches.")
810
+                response["md5"] = response["headers"]["etag"]
811
+
812
+        response["md5match"] = response["headers"]["etag"].find(response["md5"]) >= 0
813
+        response["elapsed"] = timestamp_end - timestamp_start
814
+        response["size"] = current_position
815
+        response["speed"] = response["elapsed"] and float(response["size"]) / response["elapsed"] or float(-1)
816
+        if response["size"] != start_position + long(response["headers"]["content-length"]):
817
+            warning("Reported size (%s) does not match received size (%s)" % (
818
+                start_position + response["headers"]["content-length"], response["size"]))
819
+        debug("ReceiveFile: Computed MD5 = %s" % response["md5"])
820
+        if not response["md5match"]:
821
+            warning("MD5 signatures do not match: computed=%s, received=%s" % (
822
+                response["md5"], response["headers"]["etag"]))
823
+        return response
824 824
 __all__.append("S3")
825
+
826
+# vim:et:ts=4:sts=4:ai
... ...
@@ -12,207 +12,208 @@ import S3
12 12
 from Utils import unicodise, check_bucket_name_dns_conformity
13 13
 
14 14
 class S3Uri(object):
15
-	type = None
16
-	_subclasses = None
17
-
18
-	def __new__(self, string):
19
-		if not self._subclasses:
20
-			## Generate a list of all subclasses of S3Uri
21
-			self._subclasses = []
22
-			dict = sys.modules[__name__].__dict__
23
-			for something in dict:
24
-				if type(dict[something]) is not type(self):
25
-					continue
26
-				if issubclass(dict[something], self) and dict[something] != self:
27
-					self._subclasses.append(dict[something])
28
-		for subclass in self._subclasses:
29
-			try:
30
-				instance = object.__new__(subclass)
31
-				instance.__init__(string)
32
-				return instance
33
-			except ValueError, e:
34
-				continue
35
-		raise ValueError("%s: not a recognized URI" % string)
36
-	
37
-	def __str__(self):
38
-		return self.uri()
39
-
40
-	def __unicode__(self):
41
-		return self.uri()
42
-
43
-	def public_url(self):
44
-		raise ValueError("This S3 URI does not have Anonymous URL representation")
45
-
46
-	def basename(self):
47
-		return self.__unicode__().split("/")[-1]
15
+    type = None
16
+    _subclasses = None
17
+
18
+    def __new__(self, string):
19
+        if not self._subclasses:
20
+            ## Generate a list of all subclasses of S3Uri
21
+            self._subclasses = []
22
+            dict = sys.modules[__name__].__dict__
23
+            for something in dict:
24
+                if type(dict[something]) is not type(self):
25
+                    continue
26
+                if issubclass(dict[something], self) and dict[something] != self:
27
+                    self._subclasses.append(dict[something])
28
+        for subclass in self._subclasses:
29
+            try:
30
+                instance = object.__new__(subclass)
31
+                instance.__init__(string)
32
+                return instance
33
+            except ValueError, e:
34
+                continue
35
+        raise ValueError("%s: not a recognized URI" % string)
36
+
37
+    def __str__(self):
38
+        return self.uri()
39
+
40
+    def __unicode__(self):
41
+        return self.uri()
42
+
43
+    def public_url(self):
44
+        raise ValueError("This S3 URI does not have Anonymous URL representation")
45
+
46
+    def basename(self):
47
+        return self.__unicode__().split("/")[-1]
48 48
 
49 49
 class S3UriS3(S3Uri):
50
-	type = "s3"
51
-	_re = re.compile("^s3://([^/]+)/?(.*)", re.IGNORECASE)
52
-	def __init__(self, string):
53
-		match = self._re.match(string)
54
-		if not match:
55
-			raise ValueError("%s: not a S3 URI" % string)
56
-		groups = match.groups()
57
-		self._bucket = groups[0]
58
-		self._object = unicodise(groups[1])
59
-
60
-	def bucket(self):
61
-		return self._bucket
62
-
63
-	def object(self):
64
-		return self._object
65
-	
66
-	def has_bucket(self):
67
-		return bool(self._bucket)
68
-
69
-	def has_object(self):
70
-		return bool(self._object)
71
-
72
-	def uri(self):
73
-		return "/".join(["s3:/", self._bucket, self._object])
74
-	
75
-	def is_dns_compatible(self):
76
-		return check_bucket_name_dns_conformity(self._bucket)
77
-
78
-	def public_url(self):
79
-		if self.is_dns_compatible():
80
-			return "http://%s.s3.amazonaws.com/%s" % (self._bucket, self._object)
81
-		else:
82
-			return "http://s3.amazonaws.com/%s/%s" % (self._bucket, self._object)
83
-
84
-	def host_name(self):
85
-		if self.is_dns_compatible():
86
-			return "%s.s3.amazonaws.com" % (self._bucket)
87
-		else:
88
-			return "s3.amazonaws.com"
89
-
90
-	@staticmethod
91
-	def compose_uri(bucket, object = ""):
92
-		return "s3://%s/%s" % (bucket, object)
93
-
94
-	@staticmethod
95
-	def httpurl_to_s3uri(http_url):
96
-		m=re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE)
97
-		hostname, object = m.groups()[1:]
98
-		hostname = hostname.lower()
99
-		if hostname == "s3.amazonaws.com":
100
-			## old-style url: http://s3.amazonaws.com/bucket/object
101
-			if object.count("/") == 0:
102
-				## no object given
103
-				bucket = object
104
-				object = ""
105
-			else:
106
-				## bucket/object
107
-				bucket, object = object.split("/", 1)
108
-		elif hostname.endswith(".s3.amazonaws.com"):
109
-			## new-style url: http://bucket.s3.amazonaws.com/object
110
-			bucket = hostname[:-(len(".s3.amazonaws.com"))]
111
-		else:
112
-			raise ValueError("Unable to parse URL: %s" % http_url)
113
-		return S3Uri("s3://%(bucket)s/%(object)s" % { 
114
-			'bucket' : bucket,
115
-			'object' : object })
50
+    type = "s3"
51
+    _re = re.compile("^s3://([^/]+)/?(.*)", re.IGNORECASE)
52
+    def __init__(self, string):
53
+        match = self._re.match(string)
54
+        if not match:
55
+            raise ValueError("%s: not a S3 URI" % string)
56
+        groups = match.groups()
57
+        self._bucket = groups[0]
58
+        self._object = unicodise(groups[1])
59
+
60
+    def bucket(self):
61
+        return self._bucket
62
+
63
+    def object(self):
64
+        return self._object
65
+
66
+    def has_bucket(self):
67
+        return bool(self._bucket)
68
+
69
+    def has_object(self):
70
+        return bool(self._object)
71
+
72
+    def uri(self):
73
+        return "/".join(["s3:/", self._bucket, self._object])
74
+
75
+    def is_dns_compatible(self):
76
+        return check_bucket_name_dns_conformity(self._bucket)
77
+
78
+    def public_url(self):
79
+        if self.is_dns_compatible():
80
+            return "http://%s.s3.amazonaws.com/%s" % (self._bucket, self._object)
81
+        else:
82
+            return "http://s3.amazonaws.com/%s/%s" % (self._bucket, self._object)
83
+
84
+    def host_name(self):
85
+        if self.is_dns_compatible():
86
+            return "%s.s3.amazonaws.com" % (self._bucket)
87
+        else:
88
+            return "s3.amazonaws.com"
89
+
90
+    @staticmethod
91
+    def compose_uri(bucket, object = ""):
92
+        return "s3://%s/%s" % (bucket, object)
93
+
94
+    @staticmethod
95
+    def httpurl_to_s3uri(http_url):
96
+        m=re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE)
97
+        hostname, object = m.groups()[1:]
98
+        hostname = hostname.lower()
99
+        if hostname == "s3.amazonaws.com":
100
+            ## old-style url: http://s3.amazonaws.com/bucket/object
101
+            if object.count("/") == 0:
102
+                ## no object given
103
+                bucket = object
104
+                object = ""
105
+            else:
106
+                ## bucket/object
107
+                bucket, object = object.split("/", 1)
108
+        elif hostname.endswith(".s3.amazonaws.com"):
109
+            ## new-style url: http://bucket.s3.amazonaws.com/object
110
+            bucket = hostname[:-(len(".s3.amazonaws.com"))]
111
+        else:
112
+            raise ValueError("Unable to parse URL: %s" % http_url)
113
+        return S3Uri("s3://%(bucket)s/%(object)s" % {
114
+            'bucket' : bucket,
115
+            'object' : object })
116 116
 
117 117
 class S3UriS3FS(S3Uri):
118
-	type = "s3fs"
119
-	_re = re.compile("^s3fs://([^/]*)/?(.*)", re.IGNORECASE)
120
-	def __init__(self, string):
121
-		match = self._re.match(string)
122
-		if not match:
123
-			raise ValueError("%s: not a S3fs URI" % string)
124
-		groups = match.groups()
125
-		self._fsname = groups[0]
126
-		self._path = unicodise(groups[1]).split("/")
118
+    type = "s3fs"
119
+    _re = re.compile("^s3fs://([^/]*)/?(.*)", re.IGNORECASE)
120
+    def __init__(self, string):
121
+        match = self._re.match(string)
122
+        if not match:
123
+            raise ValueError("%s: not a S3fs URI" % string)
124
+        groups = match.groups()
125
+        self._fsname = groups[0]
126
+        self._path = unicodise(groups[1]).split("/")
127 127
 
128
-	def fsname(self):
129
-		return self._fsname
128
+    def fsname(self):
129
+        return self._fsname
130 130
 
131
-	def path(self):
132
-		return "/".join(self._path)
131
+    def path(self):
132
+        return "/".join(self._path)
133 133
 
134
-	def uri(self):
135
-		return "/".join(["s3fs:/", self._fsname, self.path()])
134
+    def uri(self):
135
+        return "/".join(["s3fs:/", self._fsname, self.path()])
136 136
 
137 137
 class S3UriFile(S3Uri):
138
-	type = "file"
139
-	_re = re.compile("^(\w+://)?(.*)")
140
-	def __init__(self, string):
141
-		match = self._re.match(string)
142
-		groups = match.groups()
143
-		if groups[0] not in (None, "file://"):
144
-			raise ValueError("%s: not a file:// URI" % string)
145
-		self._path = unicodise(groups[1]).split("/")
138
+    type = "file"
139
+    _re = re.compile("^(\w+://)?(.*)")
140
+    def __init__(self, string):
141
+        match = self._re.match(string)
142
+        groups = match.groups()
143
+        if groups[0] not in (None, "file://"):
144
+            raise ValueError("%s: not a file:// URI" % string)
145
+        self._path = unicodise(groups[1]).split("/")
146 146
 
147
-	def path(self):
148
-		return "/".join(self._path)
147
+    def path(self):
148
+        return "/".join(self._path)
149 149
 
150
-	def uri(self):
151
-		return "/".join(["file:/", self.path()])
150
+    def uri(self):
151
+        return "/".join(["file:/", self.path()])
152 152
 
153
-	def isdir(self):
154
-		return os.path.isdir(self.path())
153
+    def isdir(self):
154
+        return os.path.isdir(self.path())
155 155
 
156
-	def dirname(self):
157
-		return os.path.dirname(self.path())
156
+    def dirname(self):
157
+        return os.path.dirname(self.path())
158 158
 
159 159
 class S3UriCloudFront(S3Uri):
160
-	type = "cf"
161
-	_re = re.compile("^cf://([^/]*)/*(.*)", re.IGNORECASE)
162
-	def __init__(self, string):
163
-		match = self._re.match(string)
164
-		if not match:
165
-			raise ValueError("%s: not a CloudFront URI" % string)
166
-		groups = match.groups()
167
-		self._dist_id = groups[0]
168
-		self._request_id = groups[1] != "/" and groups[1] or None
169
-
170
-	def dist_id(self):
171
-		return self._dist_id
172
-
173
-	def request_id(self):
174
-		return self._request_id
175
-
176
-	def uri(self):
177
-		uri = "cf://" + self.dist_id()
178
-		if self.request_id():
179
-			uri += "/" + self.request_id()
180
-		return uri
160
+    type = "cf"
161
+    _re = re.compile("^cf://([^/]*)/*(.*)", re.IGNORECASE)
162
+    def __init__(self, string):
163
+        match = self._re.match(string)
164
+        if not match:
165
+            raise ValueError("%s: not a CloudFront URI" % string)
166
+        groups = match.groups()
167
+        self._dist_id = groups[0]
168
+        self._request_id = groups[1] != "/" and groups[1] or None
169
+
170
+    def dist_id(self):
171
+        return self._dist_id
172
+
173
+    def request_id(self):
174
+        return self._request_id
175
+
176
+    def uri(self):
177
+        uri = "cf://" + self.dist_id()
178
+        if self.request_id():
179
+            uri += "/" + self.request_id()
180
+        return uri
181 181
 
182 182
 if __name__ == "__main__":
183
-	uri = S3Uri("s3://bucket/object")
184
-	print "type()  =", type(uri)
185
-	print "uri     =", uri
186
-	print "uri.type=", uri.type
187
-	print "bucket  =", uri.bucket()
188
-	print "object  =", uri.object()
189
-	print
190
-
191
-	uri = S3Uri("s3://bucket")
192
-	print "type()  =", type(uri)
193
-	print "uri     =", uri
194
-	print "uri.type=", uri.type
195
-	print "bucket  =", uri.bucket()
196
-	print
197
-
198
-	uri = S3Uri("s3fs://filesystem1/path/to/remote/file.txt")
199
-	print "type()  =", type(uri)
200
-	print "uri     =", uri
201
-	print "uri.type=", uri.type
202
-	print "path    =", uri.path()
203
-	print
204
-
205
-	uri = S3Uri("/path/to/local/file.txt")
206
-	print "type()  =", type(uri)
207
-	print "uri     =", uri
208
-	print "uri.type=", uri.type
209
-	print "path    =", uri.path()
210
-	print
211
-
212
-	uri = S3Uri("cf://1234567890ABCD/")
213
-	print "type()  =", type(uri)
214
-	print "uri     =", uri
215
-	print "uri.type=", uri.type
216
-	print "dist_id =", uri.dist_id()
217
-	print
218
-
183
+    uri = S3Uri("s3://bucket/object")
184
+    print "type()  =", type(uri)
185
+    print "uri     =", uri
186
+    print "uri.type=", uri.type
187
+    print "bucket  =", uri.bucket()
188
+    print "object  =", uri.object()
189
+    print
190
+
191
+    uri = S3Uri("s3://bucket")
192
+    print "type()  =", type(uri)
193
+    print "uri     =", uri
194
+    print "uri.type=", uri.type
195
+    print "bucket  =", uri.bucket()
196
+    print
197
+
198
+    uri = S3Uri("s3fs://filesystem1/path/to/remote/file.txt")
199
+    print "type()  =", type(uri)
200
+    print "uri     =", uri
201
+    print "uri.type=", uri.type
202
+    print "path    =", uri.path()
203
+    print
204
+
205
+    uri = S3Uri("/path/to/local/file.txt")
206
+    print "type()  =", type(uri)
207
+    print "uri     =", uri
208
+    print "uri.type=", uri.type
209
+    print "path    =", uri.path()
210
+    print
211
+
212
+    uri = S3Uri("cf://1234567890ABCD/")
213
+    print "type()  =", type(uri)
214
+    print "uri     =", uri
215
+    print "uri.type=", uri.type
216
+    print "dist_id =", uri.dist_id()
217
+    print
218
+
219
+# vim:et:ts=4:sts=4:ai
... ...
@@ -20,154 +20,156 @@ from SortedDict import SortedDict
20 20
 from Exceptions import *
21 21
 
22 22
 class SimpleDB(object):
23
-	# API Version
24
-	# See http://docs.amazonwebservices.com/AmazonSimpleDB/2007-11-07/DeveloperGuide/
25
-	Version = "2007-11-07"
26
-	SignatureVersion = 1
27
-
28
-	def __init__(self, config):
29
-		self.config = config
30
-
31
-	## ------------------------------------------------
32
-	## Methods implementing SimpleDB API
33
-	## ------------------------------------------------
34
-
35
-	def ListDomains(self, MaxNumberOfDomains = 100):
36
-		'''
37
-		Lists all domains associated with our Access Key. Returns 
38
-		domain names up to the limit set by MaxNumberOfDomains.
39
-		'''
40
-		parameters = SortedDict()
41
-		parameters['MaxNumberOfDomains'] = MaxNumberOfDomains
42
-		return self.send_request("ListDomains", DomainName = None, parameters = parameters)
43
-
44
-	def CreateDomain(self, DomainName):
45
-		return self.send_request("CreateDomain", DomainName = DomainName)
46
-
47
-	def DeleteDomain(self, DomainName):
48
-		return self.send_request("DeleteDomain", DomainName = DomainName)
49
-
50
-	def PutAttributes(self, DomainName, ItemName, Attributes):
51
-		parameters = SortedDict()
52
-		parameters['ItemName'] = ItemName
53
-		seq = 0
54
-		for attrib in Attributes:
55
-			if type(Attributes[attrib]) == type(list()):
56
-				for value in Attributes[attrib]:
57
-					parameters['Attribute.%d.Name' % seq] = attrib
58
-					parameters['Attribute.%d.Value' % seq] = unicode(value)
59
-					seq += 1
60
-			else:
61
-				parameters['Attribute.%d.Name' % seq] = attrib
62
-				parameters['Attribute.%d.Value' % seq] = unicode(Attributes[attrib])
63
-				seq += 1
64
-		## TODO:
65
-		## - support for Attribute.N.Replace
66
-		## - support for multiple values for one attribute
67
-		return self.send_request("PutAttributes", DomainName = DomainName, parameters = parameters)
68
-
69
-	def GetAttributes(self, DomainName, ItemName, Attributes = []):
70
-		parameters = SortedDict()
71
-		parameters['ItemName'] = ItemName
72
-		seq = 0
73
-		for attrib in Attributes:
74
-			parameters['AttributeName.%d' % seq] = attrib
75
-			seq += 1
76
-		return self.send_request("GetAttributes", DomainName = DomainName, parameters = parameters)
77
-
78
-	def DeleteAttributes(self, DomainName, ItemName, Attributes = {}):
79
-		"""
80
-		Remove specified Attributes from ItemName.
81
-		Attributes parameter can be either:
82
-		- not specified, in which case the whole Item is removed
83
-		- list, e.g. ['Attr1', 'Attr2'] in which case these parameters are removed
84
-		- dict, e.g. {'Attr' : 'One', 'Attr' : 'Two'} in which case the 
85
-		  specified values are removed from multi-value attributes.
86
-		"""
87
-		parameters = SortedDict()
88
-		parameters['ItemName'] = ItemName
89
-		seq = 0
90
-		for attrib in Attributes:
91
-			parameters['Attribute.%d.Name' % seq] = attrib
92
-			if type(Attributes) == type(dict()):
93
-				parameters['Attribute.%d.Value' % seq] = unicode(Attributes[attrib])
94
-			seq += 1
95
-		return self.send_request("DeleteAttributes", DomainName = DomainName, parameters = parameters)
96
-
97
-	def Query(self, DomainName, QueryExpression = None, MaxNumberOfItems = None, NextToken = None):
98
-		parameters = SortedDict()
99
-		if QueryExpression:
100
-			parameters['QueryExpression'] = QueryExpression
101
-		if MaxNumberOfItems:
102
-			parameters['MaxNumberOfItems'] = MaxNumberOfItems
103
-		if NextToken:
104
-			parameters['NextToken'] = NextToken
105
-		return self.send_request("Query", DomainName = DomainName, parameters = parameters)
106
-		## Handle NextToken? Or maybe not - let the upper level do it
107
-
108
-	## ------------------------------------------------
109
-	## Low-level methods for handling SimpleDB requests
110
-	## ------------------------------------------------
111
-
112
-	def send_request(self, *args, **kwargs):
113
-		request = self.create_request(*args, **kwargs)
114
-		#debug("Request: %s" % repr(request))
115
-		conn = self.get_connection()
116
-		conn.request("GET", self.format_uri(request['uri_params']))
117
-		http_response = conn.getresponse()
118
-		response = {}
119
-		response["status"] = http_response.status
120
-		response["reason"] = http_response.reason
121
-		response["headers"] = convertTupleListToDict(http_response.getheaders())
122
-		response["data"] =  http_response.read()
123
-		conn.close()
124
-
125
-		if response["status"] < 200 or response["status"] > 299:
126
-			debug("Response: " + str(response))
127
-			raise S3Error(response)
128
-
129
-		return response
130
-
131
-	def create_request(self, Action, DomainName, parameters = None):
132
-		if not parameters:
133
-			parameters = SortedDict()
134
-		parameters['AWSAccessKeyId'] = self.config.access_key
135
-		parameters['Version'] = self.Version
136
-		parameters['SignatureVersion'] = self.SignatureVersion
137
-		parameters['Action'] = Action
138
-		parameters['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
139
-		if DomainName:
140
-			parameters['DomainName'] = DomainName
141
-		parameters['Signature'] = self.sign_request(parameters)
142
-		parameters.keys_return_lowercase = False
143
-		uri_params = urllib.urlencode(parameters)
144
-		request = {}
145
-		request['uri_params'] = uri_params
146
-		request['parameters'] = parameters
147
-		return request
148
-
149
-	def sign_request(self, parameters):
150
-		h = ""
151
-		parameters.keys_sort_lowercase = True
152
-		parameters.keys_return_lowercase = False
153
-		for key in parameters:
154
-			h += "%s%s" % (key, parameters[key])
155
-		#debug("SignRequest: %s" % h)
156
-		return base64.encodestring(hmac.new(self.config.secret_key, h, sha).digest()).strip()
157
-
158
-	def get_connection(self):
159
-		if self.config.proxy_host != "":
160
-			return httplib.HTTPConnection(self.config.proxy_host, self.config.proxy_port)
161
-		else:
162
-			if self.config.use_https:
163
-				return httplib.HTTPSConnection(self.config.simpledb_host)
164
-			else:
165
-				return httplib.HTTPConnection(self.config.simpledb_host)
166
-
167
-	def format_uri(self, uri_params):
168
-		if self.config.proxy_host != "":
169
-			uri = "http://%s/?%s" % (self.config.simpledb_host, uri_params)
170
-		else:
171
-			uri = "/?%s" % uri_params
172
-		#debug('format_uri(): ' + uri)
173
-		return uri
23
+    # API Version
24
+    # See http://docs.amazonwebservices.com/AmazonSimpleDB/2007-11-07/DeveloperGuide/
25
+    Version = "2007-11-07"
26
+    SignatureVersion = 1
27
+
28
+    def __init__(self, config):
29
+        self.config = config
30
+
31
+    ## ------------------------------------------------
32
+    ## Methods implementing SimpleDB API
33
+    ## ------------------------------------------------
34
+
35
+    def ListDomains(self, MaxNumberOfDomains = 100):
36
+        '''
37
+        Lists all domains associated with our Access Key. Returns
38
+        domain names up to the limit set by MaxNumberOfDomains.
39
+        '''
40
+        parameters = SortedDict()
41
+        parameters['MaxNumberOfDomains'] = MaxNumberOfDomains
42
+        return self.send_request("ListDomains", DomainName = None, parameters = parameters)
43
+
44
+    def CreateDomain(self, DomainName):
45
+        return self.send_request("CreateDomain", DomainName = DomainName)
46
+
47
+    def DeleteDomain(self, DomainName):
48
+        return self.send_request("DeleteDomain", DomainName = DomainName)
49
+
50
+    def PutAttributes(self, DomainName, ItemName, Attributes):
51
+        parameters = SortedDict()
52
+        parameters['ItemName'] = ItemName
53
+        seq = 0
54
+        for attrib in Attributes:
55
+            if type(Attributes[attrib]) == type(list()):
56
+                for value in Attributes[attrib]:
57
+                    parameters['Attribute.%d.Name' % seq] = attrib
58
+                    parameters['Attribute.%d.Value' % seq] = unicode(value)
59
+                    seq += 1
60
+            else:
61
+                parameters['Attribute.%d.Name' % seq] = attrib
62
+                parameters['Attribute.%d.Value' % seq] = unicode(Attributes[attrib])
63
+                seq += 1
64
+        ## TODO:
65
+        ## - support for Attribute.N.Replace
66
+        ## - support for multiple values for one attribute
67
+        return self.send_request("PutAttributes", DomainName = DomainName, parameters = parameters)
68
+
69
+    def GetAttributes(self, DomainName, ItemName, Attributes = []):
70
+        parameters = SortedDict()
71
+        parameters['ItemName'] = ItemName
72
+        seq = 0
73
+        for attrib in Attributes:
74
+            parameters['AttributeName.%d' % seq] = attrib
75
+            seq += 1
76
+        return self.send_request("GetAttributes", DomainName = DomainName, parameters = parameters)
77
+
78
+    def DeleteAttributes(self, DomainName, ItemName, Attributes = {}):
79
+        """
80
+        Remove specified Attributes from ItemName.
81
+        Attributes parameter can be either:
82
+        - not specified, in which case the whole Item is removed
83
+        - list, e.g. ['Attr1', 'Attr2'] in which case these parameters are removed
84
+        - dict, e.g. {'Attr' : 'One', 'Attr' : 'Two'} in which case the
85
+          specified values are removed from multi-value attributes.
86
+        """
87
+        parameters = SortedDict()
88
+        parameters['ItemName'] = ItemName
89
+        seq = 0
90
+        for attrib in Attributes:
91
+            parameters['Attribute.%d.Name' % seq] = attrib
92
+            if type(Attributes) == type(dict()):
93
+                parameters['Attribute.%d.Value' % seq] = unicode(Attributes[attrib])
94
+            seq += 1
95
+        return self.send_request("DeleteAttributes", DomainName = DomainName, parameters = parameters)
96
+
97
+    def Query(self, DomainName, QueryExpression = None, MaxNumberOfItems = None, NextToken = None):
98
+        parameters = SortedDict()
99
+        if QueryExpression:
100
+            parameters['QueryExpression'] = QueryExpression
101
+        if MaxNumberOfItems:
102
+            parameters['MaxNumberOfItems'] = MaxNumberOfItems
103
+        if NextToken:
104
+            parameters['NextToken'] = NextToken
105
+        return self.send_request("Query", DomainName = DomainName, parameters = parameters)
106
+        ## Handle NextToken? Or maybe not - let the upper level do it
107
+
108
+    ## ------------------------------------------------
109
+    ## Low-level methods for handling SimpleDB requests
110
+    ## ------------------------------------------------
111
+
112
+    def send_request(self, *args, **kwargs):
113
+        request = self.create_request(*args, **kwargs)
114
+        #debug("Request: %s" % repr(request))
115
+        conn = self.get_connection()
116
+        conn.request("GET", self.format_uri(request['uri_params']))
117
+        http_response = conn.getresponse()
118
+        response = {}
119
+        response["status"] = http_response.status
120
+        response["reason"] = http_response.reason
121
+        response["headers"] = convertTupleListToDict(http_response.getheaders())
122
+        response["data"] =  http_response.read()
123
+        conn.close()
124
+
125
+        if response["status"] < 200 or response["status"] > 299:
126
+            debug("Response: " + str(response))
127
+            raise S3Error(response)
128
+
129
+        return response
130
+
131
+    def create_request(self, Action, DomainName, parameters = None):
132
+        if not parameters:
133
+            parameters = SortedDict()
134
+        parameters['AWSAccessKeyId'] = self.config.access_key
135
+        parameters['Version'] = self.Version
136
+        parameters['SignatureVersion'] = self.SignatureVersion
137
+        parameters['Action'] = Action
138
+        parameters['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
139
+        if DomainName:
140
+            parameters['DomainName'] = DomainName
141
+        parameters['Signature'] = self.sign_request(parameters)
142
+        parameters.keys_return_lowercase = False
143
+        uri_params = urllib.urlencode(parameters)
144
+        request = {}
145
+        request['uri_params'] = uri_params
146
+        request['parameters'] = parameters
147
+        return request
148
+
149
+    def sign_request(self, parameters):
150
+        h = ""
151
+        parameters.keys_sort_lowercase = True
152
+        parameters.keys_return_lowercase = False
153
+        for key in parameters:
154
+            h += "%s%s" % (key, parameters[key])
155
+        #debug("SignRequest: %s" % h)
156
+        return base64.encodestring(hmac.new(self.config.secret_key, h, sha).digest()).strip()
157
+
158
+    def get_connection(self):
159
+        if self.config.proxy_host != "":
160
+            return httplib.HTTPConnection(self.config.proxy_host, self.config.proxy_port)
161
+        else:
162
+            if self.config.use_https:
163
+                return httplib.HTTPSConnection(self.config.simpledb_host)
164
+            else:
165
+                return httplib.HTTPConnection(self.config.simpledb_host)
166
+
167
+    def format_uri(self, uri_params):
168
+        if self.config.proxy_host != "":
169
+            uri = "http://%s/?%s" % (self.config.simpledb_host, uri_params)
170
+        else:
171
+            uri = "/?%s" % uri_params
172
+        #debug('format_uri(): ' + uri)
173
+        return uri
174
+
175
+# vim:et:ts=4:sts=4:ai
... ...
@@ -6,56 +6,58 @@
6 6
 from BidirMap import BidirMap
7 7
 
8 8
 class SortedDictIterator(object):
9
-	def __init__(self, sorted_dict, keys):
10
-		self.sorted_dict = sorted_dict
11
-		self.keys = keys
9
+    def __init__(self, sorted_dict, keys):
10
+        self.sorted_dict = sorted_dict
11
+        self.keys = keys
12 12
 
13
-	def next(self):
14
-		try:
15
-			return self.keys.pop(0)
16
-		except IndexError:
17
-			raise StopIteration
13
+    def next(self):
14
+        try:
15
+            return self.keys.pop(0)
16
+        except IndexError:
17
+            raise StopIteration
18 18
 
19 19
 class SortedDict(dict):
20
-	def __init__(self, mapping = {}, ignore_case = True, **kwargs):
21
-		"""
22
-		WARNING: SortedDict() with ignore_case==True will
23
-		         drop entries differing only in capitalisation!
24
-				 Eg: SortedDict({'auckland':1, 'Auckland':2}).keys() => ['Auckland']
25
-				 With ignore_case==False it's all right
26
-		"""
27
-		dict.__init__(self, mapping, **kwargs)
28
-		self.ignore_case = ignore_case
29
-
30
-	def keys(self):
31
-		keys = dict.keys(self)
32
-		if self.ignore_case:
33
-			# Translation map
34
-			xlat_map = BidirMap()
35
-			for key in keys:
36
-				xlat_map[key.lower()] = key
37
-			# Lowercase keys
38
-			lc_keys = xlat_map.keys()
39
-			lc_keys.sort()
40
-			return [xlat_map[k] for k in lc_keys]
41
-		else:
42
-			keys.sort()
43
-			return keys
44
-
45
-	def __iter__(self):
46
-		return SortedDictIterator(self, self.keys())
20
+    def __init__(self, mapping = {}, ignore_case = True, **kwargs):
21
+        """
22
+        WARNING: SortedDict() with ignore_case==True will
23
+                 drop entries differing only in capitalisation!
24
+                 Eg: SortedDict({'auckland':1, 'Auckland':2}).keys() => ['Auckland']
25
+                 With ignore_case==False it's all right
26
+        """
27
+        dict.__init__(self, mapping, **kwargs)
28
+        self.ignore_case = ignore_case
29
+
30
+    def keys(self):
31
+        keys = dict.keys(self)
32
+        if self.ignore_case:
33
+            # Translation map
34
+            xlat_map = BidirMap()
35
+            for key in keys:
36
+                xlat_map[key.lower()] = key
37
+            # Lowercase keys
38
+            lc_keys = xlat_map.keys()
39
+            lc_keys.sort()
40
+            return [xlat_map[k] for k in lc_keys]
41
+        else:
42
+            keys.sort()
43
+            return keys
44
+
45
+    def __iter__(self):
46
+        return SortedDictIterator(self, self.keys())
47 47
 
48 48
 if __name__ == "__main__":
49
-	d = { 'AWS' : 1, 'Action' : 2, 'america' : 3, 'Auckland' : 4, 'America' : 5 }
50
-	sd = SortedDict(d)
51
-	print "Wanted: Action, america, Auckland, AWS,    [ignore case]"
52
-	print "Got:   ",
53
-	for key in sd:
54
-		print "%s," % key,
55
-	print "   [used: __iter__()]"
56
-	d = SortedDict(d, ignore_case = False)
57
-	print "Wanted: AWS, Action, Auckland, america,    [case sensitive]"
58
-	print "Got:   ",
59
-	for key in d.keys():
60
-		print "%s," % key,
61
-	print "   [used: keys()]"
49
+    d = { 'AWS' : 1, 'Action' : 2, 'america' : 3, 'Auckland' : 4, 'America' : 5 }
50
+    sd = SortedDict(d)
51
+    print "Wanted: Action, america, Auckland, AWS,    [ignore case]"
52
+    print "Got:   ",
53
+    for key in sd:
54
+        print "%s," % key,
55
+    print "   [used: __iter__()]"
56
+    d = SortedDict(d, ignore_case = False)
57
+    print "Wanted: AWS, Action, Auckland, america,    [case sensitive]"
58
+    print "Got:   ",
59
+    for key in d.keys():
60
+        print "%s," % key,
61
+    print "   [used: keys()]"
62
+
63
+# vim:et:ts=4:sts=4:ai
... ...
@@ -10,10 +10,10 @@ import string
10 10
 import random
11 11
 import rfc822
12 12
 try:
13
-	from hashlib import md5, sha1
13
+    from hashlib import md5, sha1
14 14
 except ImportError:
15
-	from md5 import md5
16
-	import sha as sha1
15
+    from md5 import md5
16
+    import sha as sha1
17 17
 import hmac
18 18
 import base64
19 19
 import errno
... ...
@@ -24,357 +24,359 @@ import Config
24 24
 import Exceptions
25 25
 
26 26
 try:
27
-	import xml.etree.ElementTree as ET
27
+    import xml.etree.ElementTree as ET
28 28
 except ImportError:
29
-	import elementtree.ElementTree as ET
29
+    import elementtree.ElementTree as ET
30 30
 from xml.parsers.expat import ExpatError
31 31
 
32 32
 __all__ = []
33 33
 def parseNodes(nodes):
34
-	## WARNING: Ignores text nodes from mixed xml/text.
35
-	## For instance <tag1>some text<tag2>other text</tag2></tag1>
36
-	## will be ignore "some text" node
37
-	retval = []
38
-	for node in nodes:
39
-		retval_item = {}
40
-		for child in node.getchildren():
41
-			name = child.tag
42
-			if child.getchildren():
43
-				retval_item[name] = parseNodes([child])
44
-			else:
45
-				retval_item[name] = node.findtext(".//%s" % child.tag)
46
-		retval.append(retval_item)
47
-	return retval
34
+    ## WARNING: Ignores text nodes from mixed xml/text.
35
+    ## For instance <tag1>some text<tag2>other text</tag2></tag1>
36
+    ## will be ignore "some text" node
37
+    retval = []
38
+    for node in nodes:
39
+        retval_item = {}
40
+        for child in node.getchildren():
41
+            name = child.tag
42
+            if child.getchildren():
43
+                retval_item[name] = parseNodes([child])
44
+            else:
45
+                retval_item[name] = node.findtext(".//%s" % child.tag)
46
+        retval.append(retval_item)
47
+    return retval
48 48
 __all__.append("parseNodes")
49 49
 
50 50
 def stripNameSpace(xml):
51
-	"""
52
-	removeNameSpace(xml) -- remove top-level AWS namespace
53
-	"""
54
-	r = re.compile('^(<?[^>]+?>\s?)(<\w+) xmlns=[\'"](http://[^\'"]+)[\'"](.*)', re.MULTILINE)
55
-	if r.match(xml):
56
-		xmlns = r.match(xml).groups()[2]
57
-		xml = r.sub("\\1\\2\\4", xml)
58
-	else:
59
-		xmlns = None
60
-	return xml, xmlns
51
+    """
52
+    removeNameSpace(xml) -- remove top-level AWS namespace
53
+    """
54
+    r = re.compile('^(<?[^>]+?>\s?)(<\w+) xmlns=[\'"](http://[^\'"]+)[\'"](.*)', re.MULTILINE)
55
+    if r.match(xml):
56
+        xmlns = r.match(xml).groups()[2]
57
+        xml = r.sub("\\1\\2\\4", xml)
58
+    else:
59
+        xmlns = None
60
+    return xml, xmlns
61 61
 __all__.append("stripNameSpace")
62 62
 
63 63
 def getTreeFromXml(xml):
64
-	xml, xmlns = stripNameSpace(xml)
65
-	try:
66
-		tree = ET.fromstring(xml)
67
-		if xmlns:
68
-			tree.attrib['xmlns'] = xmlns
69
-		return tree
70
-	except ExpatError, e:
71
-		error(e)
72
-		raise Exceptions.ParameterError("Bucket contains invalid filenames. Please run: s3cmd fixbucket s3://your-bucket/")
64
+    xml, xmlns = stripNameSpace(xml)
65
+    try:
66
+        tree = ET.fromstring(xml)
67
+        if xmlns:
68
+            tree.attrib['xmlns'] = xmlns
69
+        return tree
70
+    except ExpatError, e:
71
+        error(e)
72
+        raise Exceptions.ParameterError("Bucket contains invalid filenames. Please run: s3cmd fixbucket s3://your-bucket/")
73 73
 __all__.append("getTreeFromXml")
74
-	
74
+
75 75
 def getListFromXml(xml, node):
76
-	tree = getTreeFromXml(xml)
77
-	nodes = tree.findall('.//%s' % (node))
78
-	return parseNodes(nodes)
76
+    tree = getTreeFromXml(xml)
77
+    nodes = tree.findall('.//%s' % (node))
78
+    return parseNodes(nodes)
79 79
 __all__.append("getListFromXml")
80 80
 
81 81
 def getDictFromTree(tree):
82
-	ret_dict = {}
83
-	for child in tree.getchildren():
84
-		if child.getchildren():
85
-			## Complex-type child. Recurse
86
-			content = getDictFromTree(child)
87
-		else:
88
-			content = child.text
89
-		if ret_dict.has_key(child.tag):
90
-			if not type(ret_dict[child.tag]) == list:
91
-				ret_dict[child.tag] = [ret_dict[child.tag]]
92
-			ret_dict[child.tag].append(content or "")
93
-		else:
94
-			ret_dict[child.tag] = content or ""
95
-	return ret_dict
82
+    ret_dict = {}
83
+    for child in tree.getchildren():
84
+        if child.getchildren():
85
+            ## Complex-type child. Recurse
86
+            content = getDictFromTree(child)
87
+        else:
88
+            content = child.text
89
+        if ret_dict.has_key(child.tag):
90
+            if not type(ret_dict[child.tag]) == list:
91
+                ret_dict[child.tag] = [ret_dict[child.tag]]
92
+            ret_dict[child.tag].append(content or "")
93
+        else:
94
+            ret_dict[child.tag] = content or ""
95
+    return ret_dict
96 96
 __all__.append("getDictFromTree")
97 97
 
98 98
 def getTextFromXml(xml, xpath):
99
-	tree = getTreeFromXml(xml)
100
-	if tree.tag.endswith(xpath):
101
-		return tree.text
102
-	else:
103
-		return tree.findtext(xpath)
99
+    tree = getTreeFromXml(xml)
100
+    if tree.tag.endswith(xpath):
101
+        return tree.text
102
+    else:
103
+        return tree.findtext(xpath)
104 104
 __all__.append("getTextFromXml")
105 105
 
106 106
 def getRootTagName(xml):
107
-	tree = getTreeFromXml(xml)
108
-	return tree.tag
107
+    tree = getTreeFromXml(xml)
108
+    return tree.tag
109 109
 __all__.append("getRootTagName")
110 110
 
111 111
 def xmlTextNode(tag_name, text):
112
-	el = ET.Element(tag_name)
113
-	el.text = unicode(text)
114
-	return el
112
+    el = ET.Element(tag_name)
113
+    el.text = unicode(text)
114
+    return el
115 115
 __all__.append("xmlTextNode")
116 116
 
117 117
 def appendXmlTextNode(tag_name, text, parent):
118
-	"""
119
-	Creates a new <tag_name> Node and sets
120
-	its content to 'text'. Then appends the
121
-	created Node to 'parent' element if given.
122
-	Returns the newly created Node.
123
-	"""
124
-	el = xmlTextNode(tag_name, text)
125
-	parent.append(el)
126
-	return el
118
+    """
119
+    Creates a new <tag_name> Node and sets
120
+    its content to 'text'. Then appends the
121
+    created Node to 'parent' element if given.
122
+    Returns the newly created Node.
123
+    """
124
+    el = xmlTextNode(tag_name, text)
125
+    parent.append(el)
126
+    return el
127 127
 __all__.append("appendXmlTextNode")
128 128
 
129 129
 def dateS3toPython(date):
130
-	date = re.compile("(\.\d*)?Z").sub(".000Z", date)
131
-	return time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z")
130
+    date = re.compile("(\.\d*)?Z").sub(".000Z", date)
131
+    return time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z")
132 132
 __all__.append("dateS3toPython")
133 133
 
134 134
 def dateS3toUnix(date):
135
-	## FIXME: This should be timezone-aware.
136
-	## Currently the argument to strptime() is GMT but mktime() 
137
-	## treats it as "localtime". Anyway...
138
-	return time.mktime(dateS3toPython(date))
135
+    ## FIXME: This should be timezone-aware.
136
+    ## Currently the argument to strptime() is GMT but mktime()
137
+    ## treats it as "localtime". Anyway...
138
+    return time.mktime(dateS3toPython(date))
139 139
 __all__.append("dateS3toUnix")
140 140
 
141 141
 def dateRFC822toPython(date):
142
-	return rfc822.parsedate(date)
142
+    return rfc822.parsedate(date)
143 143
 __all__.append("dateRFC822toPython")
144 144
 
145 145
 def dateRFC822toUnix(date):
146
-	return time.mktime(dateRFC822toPython(date))
146
+    return time.mktime(dateRFC822toPython(date))
147 147
 __all__.append("dateRFC822toUnix")
148 148
 
149 149
 def formatSize(size, human_readable = False, floating_point = False):
150
-	size = floating_point and float(size) or int(size)
151
-	if human_readable:
152
-		coeffs = ['k', 'M', 'G', 'T']
153
-		coeff = ""
154
-		while size > 2048:
155
-			size /= 1024
156
-			coeff = coeffs.pop(0)
157
-		return (size, coeff)
158
-	else:
159
-		return (size, "")
150
+    size = floating_point and float(size) or int(size)
151
+    if human_readable:
152
+        coeffs = ['k', 'M', 'G', 'T']
153
+        coeff = ""
154
+        while size > 2048:
155
+            size /= 1024
156
+            coeff = coeffs.pop(0)
157
+        return (size, coeff)
158
+    else:
159
+        return (size, "")
160 160
 __all__.append("formatSize")
161 161
 
162 162
 def formatDateTime(s3timestamp):
163
-	return time.strftime("%Y-%m-%d %H:%M", dateS3toPython(s3timestamp))
163
+    return time.strftime("%Y-%m-%d %H:%M", dateS3toPython(s3timestamp))
164 164
 __all__.append("formatDateTime")
165 165
 
166 166
 def convertTupleListToDict(list):
167
-	retval = {}
168
-	for tuple in list:
169
-		retval[tuple[0]] = tuple[1]
170
-	return retval
167
+    retval = {}
168
+    for tuple in list:
169
+        retval[tuple[0]] = tuple[1]
170
+    return retval
171 171
 __all__.append("convertTupleListToDict")
172 172
 
173 173
 _rnd_chars = string.ascii_letters+string.digits
174 174
 _rnd_chars_len = len(_rnd_chars)
175 175
 def rndstr(len):
176
-	retval = ""
177
-	while len > 0:
178
-		retval += _rnd_chars[random.randint(0, _rnd_chars_len-1)]
179
-		len -= 1
180
-	return retval
176
+    retval = ""
177
+    while len > 0:
178
+        retval += _rnd_chars[random.randint(0, _rnd_chars_len-1)]
179
+        len -= 1
180
+    return retval
181 181
 __all__.append("rndstr")
182 182
 
183 183
 def mktmpsomething(prefix, randchars, createfunc):
184
-	old_umask = os.umask(0077)
185
-	tries = 5
186
-	while tries > 0:
187
-		dirname = prefix + rndstr(randchars)
188
-		try:
189
-			createfunc(dirname)
190
-			break
191
-		except OSError, e:
192
-			if e.errno != errno.EEXIST:
193
-				os.umask(old_umask)
194
-				raise
195
-		tries -= 1
196
-
197
-	os.umask(old_umask)
198
-	return dirname
184
+    old_umask = os.umask(0077)
185
+    tries = 5
186
+    while tries > 0:
187
+        dirname = prefix + rndstr(randchars)
188
+        try:
189
+            createfunc(dirname)
190
+            break
191
+        except OSError, e:
192
+            if e.errno != errno.EEXIST:
193
+                os.umask(old_umask)
194
+                raise
195
+        tries -= 1
196
+
197
+    os.umask(old_umask)
198
+    return dirname
199 199
 __all__.append("mktmpsomething")
200 200
 
201 201
 def mktmpdir(prefix = "/tmp/tmpdir-", randchars = 10):
202
-	return mktmpsomething(prefix, randchars, os.mkdir)
202
+    return mktmpsomething(prefix, randchars, os.mkdir)
203 203
 __all__.append("mktmpdir")
204 204
 
205 205
 def mktmpfile(prefix = "/tmp/tmpfile-", randchars = 20):
206
-	createfunc = lambda filename : os.close(os.open(filename, os.O_CREAT | os.O_EXCL))
207
-	return mktmpsomething(prefix, randchars, createfunc)
206
+    createfunc = lambda filename : os.close(os.open(filename, os.O_CREAT | os.O_EXCL))
207
+    return mktmpsomething(prefix, randchars, createfunc)
208 208
 __all__.append("mktmpfile")
209 209
 
210 210
 def hash_file_md5(filename):
211
-	h = md5()
212
-	f = open(filename, "rb")
213
-	while True:
214
-		# Hash 32kB chunks
215
-		data = f.read(32*1024)
216
-		if not data:
217
-			break
218
-		h.update(data)
219
-	f.close()
220
-	return h.hexdigest()
211
+    h = md5()
212
+    f = open(filename, "rb")
213
+    while True:
214
+        # Hash 32kB chunks
215
+        data = f.read(32*1024)
216
+        if not data:
217
+            break
218
+        h.update(data)
219
+    f.close()
220
+    return h.hexdigest()
221 221
 __all__.append("hash_file_md5")
222 222
 
223 223
 def mkdir_with_parents(dir_name):
224
-	"""
225
-	mkdir_with_parents(dst_dir)
226
-	
227
-	Create directory 'dir_name' with all parent directories
228
-
229
-	Returns True on success, False otherwise.
230
-	"""
231
-	pathmembers = dir_name.split(os.sep)
232
-	tmp_stack = []
233
-	while pathmembers and not os.path.isdir(os.sep.join(pathmembers)):
234
-		tmp_stack.append(pathmembers.pop())
235
-	while tmp_stack:
236
-		pathmembers.append(tmp_stack.pop())
237
-		cur_dir = os.sep.join(pathmembers)
238
-		try:
239
-			debug("mkdir(%s)" % cur_dir)
240
-			os.mkdir(cur_dir)
241
-		except (OSError, IOError), e:
242
-			warning("%s: can not make directory: %s" % (cur_dir, e.strerror))
243
-			return False
244
-		except Exception, e:
245
-			warning("%s: %s" % (cur_dir, e))
246
-			return False
247
-	return True
224
+    """
225
+    mkdir_with_parents(dst_dir)
226
+
227
+    Create directory 'dir_name' with all parent directories
228
+
229
+    Returns True on success, False otherwise.
230
+    """
231
+    pathmembers = dir_name.split(os.sep)
232
+    tmp_stack = []
233
+    while pathmembers and not os.path.isdir(os.sep.join(pathmembers)):
234
+        tmp_stack.append(pathmembers.pop())
235
+    while tmp_stack:
236
+        pathmembers.append(tmp_stack.pop())
237
+        cur_dir = os.sep.join(pathmembers)
238
+        try:
239
+            debug("mkdir(%s)" % cur_dir)
240
+            os.mkdir(cur_dir)
241
+        except (OSError, IOError), e:
242
+            warning("%s: can not make directory: %s" % (cur_dir, e.strerror))
243
+            return False
244
+        except Exception, e:
245
+            warning("%s: %s" % (cur_dir, e))
246
+            return False
247
+    return True
248 248
 __all__.append("mkdir_with_parents")
249 249
 
250 250
 def unicodise(string, encoding = None, errors = "replace"):
251
-	"""
252
-	Convert 'string' to Unicode or raise an exception.
253
-	"""
254
-
255
-	if not encoding:
256
-		encoding = Config.Config().encoding
257
-
258
-	if type(string) == unicode:
259
-		return string
260
-	debug("Unicodising %r using %s" % (string, encoding))
261
-	try:
262
-		return string.decode(encoding, errors)
263
-	except UnicodeDecodeError:
264
-		raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
251
+    """
252
+    Convert 'string' to Unicode or raise an exception.
253
+    """
254
+
255
+    if not encoding:
256
+        encoding = Config.Config().encoding
257
+
258
+    if type(string) == unicode:
259
+        return string
260
+    debug("Unicodising %r using %s" % (string, encoding))
261
+    try:
262
+        return string.decode(encoding, errors)
263
+    except UnicodeDecodeError:
264
+        raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
265 265
 __all__.append("unicodise")
266 266
 
267 267
 def deunicodise(string, encoding = None, errors = "replace"):
268
-	"""
269
-	Convert unicode 'string' to <type str>, by default replacing
270
-	all invalid characters with '?' or raise an exception.
271
-	"""
272
-
273
-	if not encoding:
274
-		encoding = Config.Config().encoding
275
-
276
-	if type(string) != unicode:
277
-		return str(string)
278
-	debug("DeUnicodising %r using %s" % (string, encoding))
279
-	try:
280
-		return string.encode(encoding, errors)
281
-	except UnicodeEncodeError:
282
-		raise UnicodeEncodeError("Conversion from unicode failed: %r" % string)
268
+    """
269
+    Convert unicode 'string' to <type str>, by default replacing
270
+    all invalid characters with '?' or raise an exception.
271
+    """
272
+
273
+    if not encoding:
274
+        encoding = Config.Config().encoding
275
+
276
+    if type(string) != unicode:
277
+        return str(string)
278
+    debug("DeUnicodising %r using %s" % (string, encoding))
279
+    try:
280
+        return string.encode(encoding, errors)
281
+    except UnicodeEncodeError:
282
+        raise UnicodeEncodeError("Conversion from unicode failed: %r" % string)
283 283
 __all__.append("deunicodise")
284 284
 
285 285
 def unicodise_safe(string, encoding = None):
286
-	"""
287
-	Convert 'string' to Unicode according to current encoding 
288
-	and replace all invalid characters with '?'
289
-	"""
286
+    """
287
+    Convert 'string' to Unicode according to current encoding
288
+    and replace all invalid characters with '?'
289
+    """
290 290
 
291
-	return unicodise(deunicodise(string, encoding), encoding).replace(u'\ufffd', '?')
291
+    return unicodise(deunicodise(string, encoding), encoding).replace(u'\ufffd', '?')
292 292
 __all__.append("unicodise_safe")
293 293
 
294 294
 def replace_nonprintables(string):
295
-	"""
296
-	replace_nonprintables(string)
297
-
298
-	Replaces all non-printable characters 'ch' in 'string'
299
-	where ord(ch) <= 26 with ^@, ^A, ... ^Z
300
-	"""
301
-	new_string = ""
302
-	modified = 0
303
-	for c in string:
304
-		o = ord(c)
305
-		if (o <= 31):
306
-			new_string += "^" + chr(ord('@') + o)
307
-			modified += 1
308
-		elif (o == 127):
309
-			new_string += "^?"
310
-			modified += 1
311
-		else:
312
-			new_string += c
313
-	if modified and Config.Config().urlencoding_mode != "fixbucket":
314
-		warning("%d non-printable characters replaced in: %s" % (modified, new_string))
315
-	return new_string
295
+    """
296
+    replace_nonprintables(string)
297
+
298
+    Replaces all non-printable characters 'ch' in 'string'
299
+    where ord(ch) <= 26 with ^@, ^A, ... ^Z
300
+    """
301
+    new_string = ""
302
+    modified = 0
303
+    for c in string:
304
+        o = ord(c)
305
+        if (o <= 31):
306
+            new_string += "^" + chr(ord('@') + o)
307
+            modified += 1
308
+        elif (o == 127):
309
+            new_string += "^?"
310
+            modified += 1
311
+        else:
312
+            new_string += c
313
+    if modified and Config.Config().urlencoding_mode != "fixbucket":
314
+        warning("%d non-printable characters replaced in: %s" % (modified, new_string))
315
+    return new_string
316 316
 __all__.append("replace_nonprintables")
317 317
 
318 318
 def sign_string(string_to_sign):
319
-	#debug("string_to_sign: %s" % string_to_sign)
320
-	signature = base64.encodestring(hmac.new(Config.Config().secret_key, string_to_sign, sha1).digest()).strip()
321
-	#debug("signature: %s" % signature)
322
-	return signature
319
+    #debug("string_to_sign: %s" % string_to_sign)
320
+    signature = base64.encodestring(hmac.new(Config.Config().secret_key, string_to_sign, sha1).digest()).strip()
321
+    #debug("signature: %s" % signature)
322
+    return signature
323 323
 __all__.append("sign_string")
324 324
 
325 325
 def check_bucket_name(bucket, dns_strict = True):
326
-	if dns_strict:
327
-		invalid = re.search("([^a-z0-9\.-])", bucket)
328
-		if invalid:
329
-			raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: lowercase us-ascii letters (a-z), digits (0-9), dot (.) and hyphen (-)." % (bucket, invalid.groups()[0]))
330
-	else:
331
-		invalid = re.search("([^A-Za-z0-9\._-])", bucket)
332
-		if invalid:
333
-			raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: us-ascii letters (a-z, A-Z), digits (0-9), dot (.), hyphen (-) and underscore (_)." % (bucket, invalid.groups()[0]))
334
-
335
-	if len(bucket) < 3:
336
-		raise Exceptions.ParameterError("Bucket name '%s' is too short (min 3 characters)" % bucket)
337
-	if len(bucket) > 255:
338
-		raise Exceptions.ParameterError("Bucket name '%s' is too long (max 255 characters)" % bucket)
339
-	if dns_strict:
340
-		if len(bucket) > 63:
341
-			raise Exceptions.ParameterError("Bucket name '%s' is too long (max 63 characters)" % bucket)
342
-		if re.search("-\.", bucket):
343
-			raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '-.' for DNS compatibility" % bucket)
344
-		if re.search("\.\.", bucket):
345
-			raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '..' for DNS compatibility" % bucket)
346
-		if not re.search("^[0-9a-z]", bucket):
347
-			raise Exceptions.ParameterError("Bucket name '%s' must start with a letter or a digit" % bucket)
348
-		if not re.search("[0-9a-z]$", bucket):
349
-			raise Exceptions.ParameterError("Bucket name '%s' must end with a letter or a digit" % bucket)
350
-	return True
326
+    if dns_strict:
327
+        invalid = re.search("([^a-z0-9\.-])", bucket)
328
+        if invalid:
329
+            raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: lowercase us-ascii letters (a-z), digits (0-9), dot (.) and hyphen (-)." % (bucket, invalid.groups()[0]))
330
+    else:
331
+        invalid = re.search("([^A-Za-z0-9\._-])", bucket)
332
+        if invalid:
333
+            raise Exceptions.ParameterError("Bucket name '%s' contains disallowed character '%s'. The only supported ones are: us-ascii letters (a-z, A-Z), digits (0-9), dot (.), hyphen (-) and underscore (_)." % (bucket, invalid.groups()[0]))
334
+
335
+    if len(bucket) < 3:
336
+        raise Exceptions.ParameterError("Bucket name '%s' is too short (min 3 characters)" % bucket)
337
+    if len(bucket) > 255:
338
+        raise Exceptions.ParameterError("Bucket name '%s' is too long (max 255 characters)" % bucket)
339
+    if dns_strict:
340
+        if len(bucket) > 63:
341
+            raise Exceptions.ParameterError("Bucket name '%s' is too long (max 63 characters)" % bucket)
342
+        if re.search("-\.", bucket):
343
+            raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '-.' for DNS compatibility" % bucket)
344
+        if re.search("\.\.", bucket):
345
+            raise Exceptions.ParameterError("Bucket name '%s' must not contain sequence '..' for DNS compatibility" % bucket)
346
+        if not re.search("^[0-9a-z]", bucket):
347
+            raise Exceptions.ParameterError("Bucket name '%s' must start with a letter or a digit" % bucket)
348
+        if not re.search("[0-9a-z]$", bucket):
349
+            raise Exceptions.ParameterError("Bucket name '%s' must end with a letter or a digit" % bucket)
350
+    return True
351 351
 __all__.append("check_bucket_name")
352 352
 
353 353
 def check_bucket_name_dns_conformity(bucket):
354
-	try:
355
-		return check_bucket_name(bucket, dns_strict = True)
356
-	except Exceptions.ParameterError:
357
-		return False
354
+    try:
355
+        return check_bucket_name(bucket, dns_strict = True)
356
+    except Exceptions.ParameterError:
357
+        return False
358 358
 __all__.append("check_bucket_name_dns_conformity")
359 359
 
360 360
 def getBucketFromHostname(hostname):
361
-	"""
362
-	bucket, success = getBucketFromHostname(hostname)
361
+    """
362
+    bucket, success = getBucketFromHostname(hostname)
363 363
 
364
-	Only works for hostnames derived from bucket names
365
-	using Config.host_bucket pattern.
364
+    Only works for hostnames derived from bucket names
365
+    using Config.host_bucket pattern.
366 366
 
367
-	Returns bucket name and a boolean success flag.
368
-	"""
367
+    Returns bucket name and a boolean success flag.
368
+    """
369 369
 
370
-	# Create RE pattern from Config.host_bucket
371
-	pattern = Config.Config().host_bucket % { 'bucket' : '(?P<bucket>.*)' }
372
-	m = re.match(pattern, hostname)
373
-	if not m:
374
-		return (hostname, False)
375
-	return m.groups()[0], True
370
+    # Create RE pattern from Config.host_bucket
371
+    pattern = Config.Config().host_bucket % { 'bucket' : '(?P<bucket>.*)' }
372
+    m = re.match(pattern, hostname)
373
+    if not m:
374
+        return (hostname, False)
375
+    return m.groups()[0], True
376 376
 __all__.append("getBucketFromHostname")
377 377
 
378 378
 def getHostnameFromBucket(bucket):
379
-	return Config.Config().host_bucket % { 'bucket' : bucket }
379
+    return Config.Config().host_bucket % { 'bucket' : bucket }
380 380
 __all__.append("getHostnameFromBucket")
381
+
382
+# vim:et:ts=4:sts=4:ai
... ...
@@ -24,25 +24,25 @@ exclude_tests = []
24 24
 verbose = False
25 25
 
26 26
 if os.name == "posix":
27
-	have_wget = True
27
+    have_wget = True
28 28
 elif os.name == "nt":
29
-	have_wget = False
29
+    have_wget = False
30 30
 else:
31
-	print "Unknown platform: %s" % os.name
32
-	sys.exit(1)
31
+    print "Unknown platform: %s" % os.name
32
+    sys.exit(1)
33 33
 
34 34
 ## Unpack testsuite/ directory
35 35
 if not os.path.isdir('testsuite') and os.path.isfile('testsuite.tar.gz'):
36
-	os.system("tar -xz -f testsuite.tar.gz")
36
+    os.system("tar -xz -f testsuite.tar.gz")
37 37
 if not os.path.isdir('testsuite'):
38
-	print "Something went wrong while unpacking testsuite.tar.gz"
39
-	sys.exit(1)
38
+    print "Something went wrong while unpacking testsuite.tar.gz"
39
+    sys.exit(1)
40 40
 
41 41
 os.system("tar -xf testsuite/checksum.tar -C testsuite")
42 42
 if not os.path.isfile('testsuite/checksum/cksum33.txt'):
43
-	print "Something went wrong while unpacking testsuite/checkum.tar"
44
-	sys.exit(1)
45
-	
43
+    print "Something went wrong while unpacking testsuite/checkum.tar"
44
+    sys.exit(1)
45
+
46 46
 ## Fix up permissions for permission-denied tests
47 47
 os.chmod("testsuite/permission-tests/permission-denied-dir", 0444)
48 48
 os.chmod("testsuite/permission-tests/permission-denied.txt", 0000)
... ...
@@ -54,200 +54,200 @@ patterns['GBK'] = u"12月31日/1-特色條目"
54 54
 
55 55
 encoding = locale.getpreferredencoding()
56 56
 if not encoding:
57
-	print "Guessing current system encoding failed. Consider setting $LANG variable."
58
-	sys.exit(1)
57
+    print "Guessing current system encoding failed. Consider setting $LANG variable."
58
+    sys.exit(1)
59 59
 else:
60
-	print "System encoding: " + encoding
60
+    print "System encoding: " + encoding
61 61
 
62 62
 have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
63 63
 if not have_encoding and os.path.isfile('testsuite/encodings/%s.tar.gz' % encoding):
64
-	os.system("tar xvz -C testsuite/encodings -f testsuite/encodings/%s.tar.gz" % encoding)
65
-	have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
64
+    os.system("tar xvz -C testsuite/encodings -f testsuite/encodings/%s.tar.gz" % encoding)
65
+    have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
66 66
 
67 67
 if have_encoding:
68
-	#enc_base_remote = "%s/xyz/%s/" % (pbucket(1), encoding)
69
-	enc_pattern = patterns[encoding]
68
+    #enc_base_remote = "%s/xyz/%s/" % (pbucket(1), encoding)
69
+    enc_pattern = patterns[encoding]
70 70
 else:
71
-	print encoding + " specific files not found."
71
+    print encoding + " specific files not found."
72 72
 
73 73
 if not os.path.isdir('testsuite/crappy-file-name'):
74
-	os.system("tar xvz -C testsuite -f testsuite/crappy-file-name.tar.gz")
75
-	# TODO: also unpack if the tarball is newer than the directory timestamp
76
-	#       for instance when a new version was pulled from SVN.
74
+    os.system("tar xvz -C testsuite -f testsuite/crappy-file-name.tar.gz")
75
+    # TODO: also unpack if the tarball is newer than the directory timestamp
76
+    #       for instance when a new version was pulled from SVN.
77 77
 
78 78
 def test(label, cmd_args = [], retcode = 0, must_find = [], must_not_find = [], must_find_re = [], must_not_find_re = []):
79
-	def command_output():
80
-		print "----"
81
-		print " ".join([arg.find(" ")>=0 and "'%s'" % arg or arg for arg in cmd_args])
82
-		print "----"
83
-		print stdout
84
-		print "----"
85
-
86
-	def failure(message = ""):
87
-		global count_fail
88
-		if message:
89
-			message = "  (%r)" % message
90
-		print "\x1b[31;1mFAIL%s\x1b[0m" % (message)
91
-		count_fail += 1
92
-		command_output()
93
-		#return 1
94
-		sys.exit(1)
95
-	def success(message = ""):
96
-		global count_pass
97
-		if message:
98
-			message = "  (%r)" % message
99
-		print "\x1b[32;1mOK\x1b[0m%s" % (message)
100
-		count_pass += 1
101
-		if verbose:
102
-			command_output()
103
-		return 0
104
-	def skip(message = ""):
105
-		global count_skip
106
-		if message:
107
-			message = "  (%r)" % message
108
-		print "\x1b[33;1mSKIP\x1b[0m%s" % (message)
109
-		count_skip += 1
110
-		return 0
111
-	def compile_list(_list, regexps = False):
112
-		if regexps == False:
113
-			_list = [re.escape(item.encode(encoding, "replace")) for item in _list]
114
-
115
-		return [re.compile(item, re.MULTILINE) for item in _list]
116
-
117
-	global test_counter
118
-	test_counter += 1
119
-	print ("%3d  %s " % (test_counter, label)).ljust(30, "."),
120
-	sys.stdout.flush()
121
-
122
-	if run_tests.count(test_counter) == 0 or exclude_tests.count(test_counter) > 0:
123
-		return skip()
124
-
125
-	if not cmd_args:
126
-		return skip()
127
-
128
-	p = Popen(cmd_args, stdout = PIPE, stderr = STDOUT, universal_newlines = True)
129
-	stdout, stderr = p.communicate()
130
-	if retcode != p.returncode:
131
-		return failure("retcode: %d, expected: %d" % (p.returncode, retcode))
132
-
133
-	if type(must_find) not in [ list, tuple ]: must_find = [must_find]
134
-	if type(must_find_re) not in [ list, tuple ]: must_find_re = [must_find_re]
135
-	if type(must_not_find) not in [ list, tuple ]: must_not_find = [must_not_find]
136
-	if type(must_not_find_re) not in [ list, tuple ]: must_not_find_re = [must_not_find_re]
137
-
138
-	find_list = []
139
-	find_list.extend(compile_list(must_find))
140
-	find_list.extend(compile_list(must_find_re, regexps = True))
141
-	find_list_patterns = []
142
-	find_list_patterns.extend(must_find)
143
-	find_list_patterns.extend(must_find_re)
144
-
145
-	not_find_list = []
146
-	not_find_list.extend(compile_list(must_not_find))
147
-	not_find_list.extend(compile_list(must_not_find_re, regexps = True))
148
-	not_find_list_patterns = []
149
-	not_find_list_patterns.extend(must_not_find)
150
-	not_find_list_patterns.extend(must_not_find_re)
151
-
152
-	for index in range(len(find_list)):
153
-		match = find_list[index].search(stdout)
154
-		if not match:
155
-			return failure("pattern not found: %s" % find_list_patterns[index])
156
-	for index in range(len(not_find_list)):
157
-		match = not_find_list[index].search(stdout)
158
-		if match:
159
-			return failure("pattern found: %s (match: %s)" % (not_find_list_patterns[index], match.group(0)))
160
-
161
-	return success()
79
+    def command_output():
80
+        print "----"
81
+        print " ".join([arg.find(" ")>=0 and "'%s'" % arg or arg for arg in cmd_args])
82
+        print "----"
83
+        print stdout
84
+        print "----"
85
+
86
+    def failure(message = ""):
87
+        global count_fail
88
+        if message:
89
+            message = "  (%r)" % message
90
+        print "\x1b[31;1mFAIL%s\x1b[0m" % (message)
91
+        count_fail += 1
92
+        command_output()
93
+        #return 1
94
+        sys.exit(1)
95
+    def success(message = ""):
96
+        global count_pass
97
+        if message:
98
+            message = "  (%r)" % message
99
+        print "\x1b[32;1mOK\x1b[0m%s" % (message)
100
+        count_pass += 1
101
+        if verbose:
102
+            command_output()
103
+        return 0
104
+    def skip(message = ""):
105
+        global count_skip
106
+        if message:
107
+            message = "  (%r)" % message
108
+        print "\x1b[33;1mSKIP\x1b[0m%s" % (message)
109
+        count_skip += 1
110
+        return 0
111
+    def compile_list(_list, regexps = False):
112
+        if regexps == False:
113
+            _list = [re.escape(item.encode(encoding, "replace")) for item in _list]
114
+
115
+        return [re.compile(item, re.MULTILINE) for item in _list]
116
+
117
+    global test_counter
118
+    test_counter += 1
119
+    print ("%3d  %s " % (test_counter, label)).ljust(30, "."),
120
+    sys.stdout.flush()
121
+
122
+    if run_tests.count(test_counter) == 0 or exclude_tests.count(test_counter) > 0:
123
+        return skip()
124
+
125
+    if not cmd_args:
126
+        return skip()
127
+
128
+    p = Popen(cmd_args, stdout = PIPE, stderr = STDOUT, universal_newlines = True)
129
+    stdout, stderr = p.communicate()
130
+    if retcode != p.returncode:
131
+        return failure("retcode: %d, expected: %d" % (p.returncode, retcode))
132
+
133
+    if type(must_find) not in [ list, tuple ]: must_find = [must_find]
134
+    if type(must_find_re) not in [ list, tuple ]: must_find_re = [must_find_re]
135
+    if type(must_not_find) not in [ list, tuple ]: must_not_find = [must_not_find]
136
+    if type(must_not_find_re) not in [ list, tuple ]: must_not_find_re = [must_not_find_re]
137
+
138
+    find_list = []
139
+    find_list.extend(compile_list(must_find))
140
+    find_list.extend(compile_list(must_find_re, regexps = True))
141
+    find_list_patterns = []
142
+    find_list_patterns.extend(must_find)
143
+    find_list_patterns.extend(must_find_re)
144
+
145
+    not_find_list = []
146
+    not_find_list.extend(compile_list(must_not_find))
147
+    not_find_list.extend(compile_list(must_not_find_re, regexps = True))
148
+    not_find_list_patterns = []
149
+    not_find_list_patterns.extend(must_not_find)
150
+    not_find_list_patterns.extend(must_not_find_re)
151
+
152
+    for index in range(len(find_list)):
153
+        match = find_list[index].search(stdout)
154
+        if not match:
155
+            return failure("pattern not found: %s" % find_list_patterns[index])
156
+    for index in range(len(not_find_list)):
157
+        match = not_find_list[index].search(stdout)
158
+        if match:
159
+            return failure("pattern found: %s (match: %s)" % (not_find_list_patterns[index], match.group(0)))
160
+
161
+    return success()
162 162
 
163 163
 def test_s3cmd(label, cmd_args = [], **kwargs):
164
-	if not cmd_args[0].endswith("s3cmd"):
165
-		cmd_args.insert(0, "python")
166
-		cmd_args.insert(1, "s3cmd")
164
+    if not cmd_args[0].endswith("s3cmd"):
165
+        cmd_args.insert(0, "python")
166
+        cmd_args.insert(1, "s3cmd")
167 167
 
168
-	return test(label, cmd_args, **kwargs)
168
+    return test(label, cmd_args, **kwargs)
169 169
 
170 170
 def test_mkdir(label, dir_name):
171
-	if os.name in ("posix", "nt"):
172
-		cmd = ['mkdir', '-p']
173
-	else:
174
-		print "Unknown platform: %s" % os.name
175
-		sys.exit(1)
176
-	cmd.append(dir_name)
177
-	return test(label, cmd)
171
+    if os.name in ("posix", "nt"):
172
+        cmd = ['mkdir', '-p']
173
+    else:
174
+        print "Unknown platform: %s" % os.name
175
+        sys.exit(1)
176
+    cmd.append(dir_name)
177
+    return test(label, cmd)
178 178
 
179 179
 def test_rmdir(label, dir_name):
180
-	if os.path.isdir(dir_name):
181
-		if os.name == "posix":
182
-			cmd = ['rm', '-rf']
183
-		elif os.name == "nt":
184
-			cmd = ['rmdir', '/s/q']
185
-		else:
186
-			print "Unknown platform: %s" % os.name
187
-			sys.exit(1)
188
-		cmd.append(dir_name)
189
-		return test(label, cmd)
190
-	else:
191
-		return test(label, [])
180
+    if os.path.isdir(dir_name):
181
+        if os.name == "posix":
182
+            cmd = ['rm', '-rf']
183
+        elif os.name == "nt":
184
+            cmd = ['rmdir', '/s/q']
185
+        else:
186
+            print "Unknown platform: %s" % os.name
187
+            sys.exit(1)
188
+        cmd.append(dir_name)
189
+        return test(label, cmd)
190
+    else:
191
+        return test(label, [])
192 192
 
193 193
 def test_flushdir(label, dir_name):
194
-	test_rmdir(label + "(rm)", dir_name)
195
-	return test_mkdir(label + "(mk)", dir_name)
194
+    test_rmdir(label + "(rm)", dir_name)
195
+    return test_mkdir(label + "(mk)", dir_name)
196 196
 
197 197
 def test_copy(label, src_file, dst_file):
198
-	if os.name == "posix":
199
-		cmd = ['cp', '-f']
200
-	elif os.name == "nt":
201
-		cmd = ['copy']
202
-	else:
203
-		print "Unknown platform: %s" % os.name
204
-		sys.exit(1)
205
-	cmd.append(src_file)
206
-	cmd.append(dst_file)
207
-	return test(label, cmd)
198
+    if os.name == "posix":
199
+        cmd = ['cp', '-f']
200
+    elif os.name == "nt":
201
+        cmd = ['copy']
202
+    else:
203
+        print "Unknown platform: %s" % os.name
204
+        sys.exit(1)
205
+    cmd.append(src_file)
206
+    cmd.append(dst_file)
207
+    return test(label, cmd)
208 208
 
209 209
 try:
210
-	pwd = pwd.getpwuid(os.getuid())
211
-	bucket_prefix = "%s.%s-" % (pwd.pw_name, pwd.pw_uid)
210
+    pwd = pwd.getpwuid(os.getuid())
211
+    bucket_prefix = "%s.%s-" % (pwd.pw_name, pwd.pw_uid)
212 212
 except:
213
-	bucket_prefix = ''
213
+    bucket_prefix = ''
214 214
 print "Using bucket prefix: '%s'" % bucket_prefix
215 215
 
216 216
 argv = sys.argv[1:]
217 217
 while argv:
218
-	arg = argv.pop(0)
219
-	if arg.startswith('--bucket-prefix='):
220
-		print "Usage: '--bucket-prefix PREFIX', not '--bucket-prefix=PREFIX'"
221
-		sys.exit(0)
222
-	if arg in ("-h", "--help"):
223
-		print "%s A B K..O -N" % sys.argv[0]
224
-		print "Run tests number A, B and K through to O, except for N"
225
-		sys.exit(0)
226
-	if arg in ("-l", "--list"):
227
-		exclude_tests = range(0, 999)
228
-		break
229
-	if arg in ("-v", "--verbose"):
230
-		verbose = True
231
-		continue
232
-	if arg in ("-p", "--bucket-prefix"):
233
-		try:
234
-			bucket_prefix = argv.pop(0)
235
-		except IndexError:
236
-			print "Bucket prefix option must explicitly supply a bucket name prefix"
237
-			sys.exit(0)
238
-		continue
239
-	if arg.find("..") >= 0:
240
-		range_idx = arg.find("..")
241
-		range_start = arg[:range_idx] or 0
242
-		range_end = arg[range_idx+2:] or 999
243
-		run_tests.extend(range(int(range_start), int(range_end) + 1))
244
-	elif arg.startswith("-"):
245
-		exclude_tests.append(int(arg[1:]))
246
-	else:
247
-		run_tests.append(int(arg))
218
+    arg = argv.pop(0)
219
+    if arg.startswith('--bucket-prefix='):
220
+        print "Usage: '--bucket-prefix PREFIX', not '--bucket-prefix=PREFIX'"
221
+        sys.exit(0)
222
+    if arg in ("-h", "--help"):
223
+        print "%s A B K..O -N" % sys.argv[0]
224
+        print "Run tests number A, B and K through to O, except for N"
225
+        sys.exit(0)
226
+    if arg in ("-l", "--list"):
227
+        exclude_tests = range(0, 999)
228
+        break
229
+    if arg in ("-v", "--verbose"):
230
+        verbose = True
231
+        continue
232
+    if arg in ("-p", "--bucket-prefix"):
233
+        try:
234
+            bucket_prefix = argv.pop(0)
235
+        except IndexError:
236
+            print "Bucket prefix option must explicitly supply a bucket name prefix"
237
+            sys.exit(0)
238
+        continue
239
+    if arg.find("..") >= 0:
240
+        range_idx = arg.find("..")
241
+        range_start = arg[:range_idx] or 0
242
+        range_end = arg[range_idx+2:] or 999
243
+        run_tests.extend(range(int(range_start), int(range_end) + 1))
244
+    elif arg.startswith("-"):
245
+        exclude_tests.append(int(arg[1:]))
246
+    else:
247
+        run_tests.append(int(arg))
248 248
 
249 249
 if not run_tests:
250
-	run_tests = range(0, 999)
250
+    run_tests = range(0, 999)
251 251
 
252 252
 # helper functions for generating bucket names
253 253
 def bucket(tail):
... ...
@@ -263,61 +263,61 @@ def pbucket(tail):
263 263
 
264 264
 ## ====== Remove test buckets
265 265
 test_s3cmd("Remove test buckets", ['rb', '-r', pbucket(1), pbucket(2), pbucket(3)],
266
-	must_find = [ "Bucket '%s/' removed" % pbucket(1),
267
-		      "Bucket '%s/' removed" % pbucket(2),
268
-		      "Bucket '%s/' removed" % pbucket(3) ])
266
+    must_find = [ "Bucket '%s/' removed" % pbucket(1),
267
+              "Bucket '%s/' removed" % pbucket(2),
268
+              "Bucket '%s/' removed" % pbucket(3) ])
269 269
 
270 270
 
271 271
 ## ====== Create one bucket (EU)
272
-test_s3cmd("Create one bucket (EU)", ['mb', '--bucket-location=EU', pbucket(1)], 
273
-	must_find = "Bucket '%s/' created" % pbucket(1))
272
+test_s3cmd("Create one bucket (EU)", ['mb', '--bucket-location=EU', pbucket(1)],
273
+    must_find = "Bucket '%s/' created" % pbucket(1))
274 274
 
275 275
 
276 276
 
277 277
 ## ====== Create multiple buckets
278
-test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)], 
279
-	must_find = [ "Bucket '%s/' created" % pbucket(2), "Bucket '%s/' created" % pbucket(3)])
278
+test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)],
279
+    must_find = [ "Bucket '%s/' created" % pbucket(2), "Bucket '%s/' created" % pbucket(3)])
280 280
 
281 281
 
282 282
 ## ====== Invalid bucket name
283
-test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')], 
284
-	retcode = 1,
285
-	must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'), 
286
-	must_not_find_re = "Bucket.*created")
283
+test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')],
284
+    retcode = 1,
285
+    must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'),
286
+    must_not_find_re = "Bucket.*created")
287 287
 
288 288
 
289 289
 ## ====== Buckets list
290
-test_s3cmd("Buckets list", ["ls"], 
291
-	must_find = [ "autotest-1", "autotest-2", "Autotest-3" ], must_not_find_re = "autotest-EU")
290
+test_s3cmd("Buckets list", ["ls"],
291
+    must_find = [ "autotest-1", "autotest-2", "Autotest-3" ], must_not_find_re = "autotest-EU")
292 292
 
293 293
 
294 294
 ## ====== Sync to S3
295 295
 test_s3cmd("Sync to S3", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings' ],
296
-	must_find = [ "WARNING: 32 non-printable characters replaced in: crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\]^<>%%\"'#{}`&?.end",
297
-				  "WARNING: File can not be uploaded: testsuite/permission-tests/permission-denied.txt: Permission denied",
298
-	              "stored as '%s/xyz/crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\\]^<>%%%%\"'#{}`&?.end'" % pbucket(1) ],
299
-	must_not_find_re = [ "demo/", "\.png$", "permission-denied-dir" ])
296
+    must_find = [ "WARNING: 32 non-printable characters replaced in: crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\]^<>%%\"'#{}`&?.end",
297
+                  "WARNING: File can not be uploaded: testsuite/permission-tests/permission-denied.txt: Permission denied",
298
+                  "stored as '%s/xyz/crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\\]^<>%%%%\"'#{}`&?.end'" % pbucket(1) ],
299
+    must_not_find_re = [ "demo/", "\.png$", "permission-denied-dir" ])
300 300
 
301 301
 if have_encoding:
302
-	## ====== Sync UTF-8 / GBK / ... to S3
303
-	test_s3cmd("Sync %s to S3" % encoding, ['sync', 'testsuite/encodings/' + encoding, '%s/xyz/encodings/' % pbucket(1), '--exclude', 'demo/*', '--no-encrypt' ],
304
-		must_find = [ u"File 'testsuite/encodings/%(encoding)s/%(pattern)s' stored as '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s'" % { 'encoding' : encoding, 'pattern' : enc_pattern , 'pbucket' : pbucket(1)} ])
302
+    ## ====== Sync UTF-8 / GBK / ... to S3
303
+    test_s3cmd("Sync %s to S3" % encoding, ['sync', 'testsuite/encodings/' + encoding, '%s/xyz/encodings/' % pbucket(1), '--exclude', 'demo/*', '--no-encrypt' ],
304
+        must_find = [ u"File 'testsuite/encodings/%(encoding)s/%(pattern)s' stored as '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s'" % { 'encoding' : encoding, 'pattern' : enc_pattern , 'pbucket' : pbucket(1)} ])
305 305
 
306 306
 
307 307
 ## ====== List bucket content
308 308
 test_s3cmd("List bucket content", ['ls', '%s/xyz/' % pbucket(1) ],
309
-	must_find_re = [ u"DIR   %s/xyz/binary/$" % pbucket(1) , u"DIR   %s/xyz/etc/$" % pbucket(1) ],
310
-	must_not_find = [ u"random-crap.md5", u"/demo" ])
309
+    must_find_re = [ u"DIR   %s/xyz/binary/$" % pbucket(1) , u"DIR   %s/xyz/etc/$" % pbucket(1) ],
310
+    must_not_find = [ u"random-crap.md5", u"/demo" ])
311 311
 
312 312
 
313 313
 ## ====== List bucket recursive
314 314
 must_find = [ u"%s/xyz/binary/random-crap.md5" % pbucket(1) ]
315 315
 if have_encoding:
316
-	must_find.append(u"%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s" % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
316
+    must_find.append(u"%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s" % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
317 317
 
318 318
 test_s3cmd("List bucket recursive", ['ls', '--recursive', pbucket(1)],
319
-	must_find = must_find,
320
-	must_not_find = [ "logo.png" ])
319
+    must_find = must_find,
320
+    must_not_find = [ "logo.png" ])
321 321
 
322 322
 ## ====== FIXME
323 323
 # test_s3cmd("Recursive put", ['put', '--recursive', 'testsuite/etc', '%s/xyz/' % pbucket(1) ])
... ...
@@ -330,9 +330,9 @@ test_flushdir("Clean testsuite-out/", "testsuite-out")
330 330
 ## ====== Sync from S3
331 331
 must_find = [ "File '%s/xyz/binary/random-crap.md5' stored as 'testsuite-out/xyz/binary/random-crap.md5'" % pbucket(1) ]
332 332
 if have_encoding:
333
-	must_find.append(u"File '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s' stored as 'testsuite-out/xyz/encodings/%(encoding)s/%(pattern)s' " % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
333
+    must_find.append(u"File '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s' stored as 'testsuite-out/xyz/encodings/%(encoding)s/%(pattern)s' " % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
334 334
 test_s3cmd("Sync from S3", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
335
-	must_find = must_find)
335
+    must_find = must_find)
336 336
 
337 337
 
338 338
 ## ====== Remove 'demo' directory
... ...
@@ -345,7 +345,7 @@ test_mkdir("Create file-dir dir", "testsuite-out/xyz/dir-test/file-dir")
345 345
 
346 346
 ## ====== Skip dst dirs
347 347
 test_s3cmd("Skip over dir", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
348
-	must_find = "WARNING: testsuite-out/xyz/dir-test/file-dir is a directory - skipping over")
348
+    must_find = "WARNING: testsuite-out/xyz/dir-test/file-dir is a directory - skipping over")
349 349
 
350 350
 
351 351
 ## ====== Clean up local destination dir
... ...
@@ -354,75 +354,75 @@ test_flushdir("Clean testsuite-out/", "testsuite-out")
354 354
 
355 355
 ## ====== Put public, guess MIME
356 356
 test_s3cmd("Put public, guess MIME", ['put', '--guess-mime-type', '--acl-public', 'testsuite/etc/logo.png', '%s/xyz/etc/logo.png' % pbucket(1)],
357
-	must_find = [ "stored as '%s/xyz/etc/logo.png'" % pbucket(1) ])
357
+    must_find = [ "stored as '%s/xyz/etc/logo.png'" % pbucket(1) ])
358 358
 
359 359
 
360 360
 ## ====== Retrieve from URL
361 361
 if have_wget:
362
-	test("Retrieve from URL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
363
-		must_find_re = [ 'logo.png.*saved \[22059/22059\]' ])
362
+    test("Retrieve from URL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
363
+        must_find_re = [ 'logo.png.*saved \[22059/22059\]' ])
364 364
 
365 365
 
366 366
 ## ====== Change ACL to Private
367 367
 test_s3cmd("Change ACL to Private", ['setacl', '--acl-private', '%s/xyz/etc/l*.png' % pbucket(1)],
368
-	must_find = [ "logo.png: ACL set to Private" ])
368
+    must_find = [ "logo.png: ACL set to Private" ])
369 369
 
370 370
 
371 371
 ## ====== Verify Private ACL
372 372
 if have_wget:
373
-	test("Verify Private ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
374
-		retcode = 8,
375
-		must_find_re = [ 'ERROR 403: Forbidden' ])
373
+    test("Verify Private ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
374
+        retcode = 8,
375
+        must_find_re = [ 'ERROR 403: Forbidden' ])
376 376
 
377 377
 
378 378
 ## ====== Change ACL to Public
379 379
 test_s3cmd("Change ACL to Public", ['setacl', '--acl-public', '--recursive', '%s/xyz/etc/' % pbucket(1) , '-v'],
380
-	must_find = [ "logo.png: ACL set to Public" ])
380
+    must_find = [ "logo.png: ACL set to Public" ])
381 381
 
382 382
 
383 383
 ## ====== Verify Public ACL
384 384
 if have_wget:
385
-	test("Verify Public ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
386
-		must_find_re = [ 'logo.png.*saved \[22059/22059\]' ])
385
+    test("Verify Public ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
386
+        must_find_re = [ 'logo.png.*saved \[22059/22059\]' ])
387 387
 
388 388
 
389 389
 ## ====== Sync more to S3
390 390
 test_s3cmd("Sync more to S3", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt' ],
391
-	must_find = [ "File 'testsuite/demo/some-file.xml' stored as '%s/xyz/demo/some-file.xml' " % pbucket(1) ],
392
-	must_not_find = [ "File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png" % pbucket(1) ])
393
-           
391
+    must_find = [ "File 'testsuite/demo/some-file.xml' stored as '%s/xyz/demo/some-file.xml' " % pbucket(1) ],
392
+    must_not_find = [ "File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png" % pbucket(1) ])
393
+
394 394
 
395 395
 ## ====== Don't check MD5 sum on Sync
396 396
 test_copy("Change file cksum1.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum1.txt")
397 397
 test_copy("Change file cksum33.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum33.txt")
398 398
 test_s3cmd("Don't check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--no-check-md5'],
399
-	must_find = [ "cksum33.txt" ],
400
-	must_not_find = [ "cksum1.txt" ])
399
+    must_find = [ "cksum33.txt" ],
400
+    must_not_find = [ "cksum1.txt" ])
401 401
 
402 402
 
403 403
 ## ====== Check MD5 sum on Sync
404 404
 test_s3cmd("Check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--check-md5'],
405
-	must_find = [ "cksum1.txt" ])
405
+    must_find = [ "cksum1.txt" ])
406 406
 
407 407
 
408 408
 ## ====== Rename within S3
409 409
 test_s3cmd("Rename within S3", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
410
-	must_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1))])
410
+    must_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1))])
411 411
 
412 412
 
413 413
 ## ====== Rename (NoSuchKey)
414 414
 test_s3cmd("Rename (NoSuchKey)", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
415
-	retcode = 1,
416
-	must_find_re = [ 'ERROR:.*NoSuchKey' ],
417
-	must_not_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1)) ])
415
+    retcode = 1,
416
+    must_find_re = [ 'ERROR:.*NoSuchKey' ],
417
+    must_not_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1)) ])
418 418
 
419 419
 
420 420
 ## ====== Sync more from S3
421 421
 test_s3cmd("Sync more from S3", ['sync', '--delete-removed', '%s/xyz' % pbucket(1), 'testsuite-out'],
422
-	must_find = [ "deleted: testsuite-out/logo.png",
423
-	              "File '%s/xyz/etc2/Logo.PNG' stored as 'testsuite-out/xyz/etc2/Logo.PNG' (22059 bytes" % pbucket(1), 
424
-	              "File '%s/xyz/demo/some-file.xml' stored as 'testsuite-out/xyz/demo/some-file.xml' " % pbucket(1) ],
425
-	must_not_find_re = [ "not-deleted.*etc/logo.png" ])
422
+    must_find = [ "deleted: testsuite-out/logo.png",
423
+                  "File '%s/xyz/etc2/Logo.PNG' stored as 'testsuite-out/xyz/etc2/Logo.PNG' (22059 bytes" % pbucket(1),
424
+                  "File '%s/xyz/demo/some-file.xml' stored as 'testsuite-out/xyz/demo/some-file.xml' " % pbucket(1) ],
425
+    must_not_find_re = [ "not-deleted.*etc/logo.png" ])
426 426
 
427 427
 
428 428
 ## ====== Make dst dir for get
... ...
@@ -431,8 +431,8 @@ test_rmdir("Remove dst dir for get", "testsuite-out")
431 431
 
432 432
 ## ====== Get multiple files
433 433
 test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
434
-	retcode = 1,
435
-	must_find = [ 'Destination must be a directory when downloading multiple sources.' ])
434
+    retcode = 1,
435
+    must_find = [ 'Destination must be a directory when downloading multiple sources.' ])
436 436
 
437 437
 
438 438
 ## ====== Make dst dir for get
... ...
@@ -441,43 +441,43 @@ test_mkdir("Make dst dir for get", "testsuite-out")
441 441
 
442 442
 ## ====== Get multiple files
443 443
 test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
444
-	must_find = [ u"saved as 'testsuite-out/Logo.PNG'", u"saved as 'testsuite-out/AtomicClockRadio.ttf'" ])
444
+    must_find = [ u"saved as 'testsuite-out/Logo.PNG'", u"saved as 'testsuite-out/AtomicClockRadio.ttf'" ])
445 445
 
446 446
 ## ====== Upload files differing in capitalisation
447 447
 test_s3cmd("blah.txt / Blah.txt", ['put', '-r', 'testsuite/blahBlah', pbucket(1)],
448
-	must_find = [ '%s/blahBlah/Blah.txt' % pbucket(1), '%s/blahBlah/blah.txt' % pbucket(1)])
448
+    must_find = [ '%s/blahBlah/Blah.txt' % pbucket(1), '%s/blahBlah/blah.txt' % pbucket(1)])
449 449
 
450 450
 ## ====== Copy between buckets
451 451
 test_s3cmd("Copy between buckets", ['cp', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc2/logo.png' % pbucket(3)],
452
-	must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/xyz/etc2/logo.png" % (pbucket(1), pbucket(3)) ])
452
+    must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/xyz/etc2/logo.png" % (pbucket(1), pbucket(3)) ])
453 453
 
454 454
 ## ====== Recursive copy
455 455
 test_s3cmd("Recursive copy, set ACL", ['cp', '-r', '--acl-public', '%s/xyz/' % pbucket(1), '%s/copy' % pbucket(2), '--exclude', 'demo/dir?/*.txt', '--exclude', 'non-printables*'],
456
-	must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)),
457
-	              "File %s/xyz/blahBlah/Blah.txt copied to %s/copy/blahBlah/Blah.txt" % (pbucket(1), pbucket(2)),
458
-	              "File %s/xyz/blahBlah/blah.txt copied to %s/copy/blahBlah/blah.txt" % (pbucket(1), pbucket(2)) ],
459
-	must_not_find = [ "demo/dir1/file1-1.txt" ])
456
+    must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)),
457
+                  "File %s/xyz/blahBlah/Blah.txt copied to %s/copy/blahBlah/Blah.txt" % (pbucket(1), pbucket(2)),
458
+                  "File %s/xyz/blahBlah/blah.txt copied to %s/copy/blahBlah/blah.txt" % (pbucket(1), pbucket(2)) ],
459
+    must_not_find = [ "demo/dir1/file1-1.txt" ])
460 460
 
461 461
 ## ====== Verify ACL and MIME type
462 462
 test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
463
-	must_find_re = [ "MIME type:.*image/png", 
464
-	                 "ACL:.*\*anon\*: READ",
465
-					 "URL:.*http://%s.s3.amazonaws.com/copy/etc2/Logo.PNG" % bucket(2) ])
463
+    must_find_re = [ "MIME type:.*image/png",
464
+                     "ACL:.*\*anon\*: READ",
465
+                     "URL:.*http://%s.s3.amazonaws.com/copy/etc2/Logo.PNG" % bucket(2) ])
466 466
 
467 467
 ## ====== Rename within S3
468 468
 test_s3cmd("Rename within S3", ['mv', '%s/copy/etc2/Logo.PNG' % pbucket(2), '%s/copy/etc/logo.png' % pbucket(2)],
469
-	must_find = [ 'File %s/copy/etc2/Logo.PNG moved to %s/copy/etc/logo.png' % (pbucket(2), pbucket(2))])
469
+    must_find = [ 'File %s/copy/etc2/Logo.PNG moved to %s/copy/etc/logo.png' % (pbucket(2), pbucket(2))])
470 470
 
471 471
 ## ====== Sync between buckets
472 472
 test_s3cmd("Sync remote2remote", ['sync', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--delete-removed', '--exclude', 'non-printables*'],
473
-	must_find = [ "File %s/xyz/demo/dir1/file1-1.txt copied to %s/copy/demo/dir1/file1-1.txt" % (pbucket(1), pbucket(2)),
474
-	              "File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)),
475
-	              "deleted: '%s/copy/etc/logo.png'" % pbucket(2) ],
476
-	must_not_find = [ "blah.txt" ])
473
+    must_find = [ "File %s/xyz/demo/dir1/file1-1.txt copied to %s/copy/demo/dir1/file1-1.txt" % (pbucket(1), pbucket(2)),
474
+                  "File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)),
475
+                  "deleted: '%s/copy/etc/logo.png'" % pbucket(2) ],
476
+    must_not_find = [ "blah.txt" ])
477 477
 
478 478
 ## ====== Don't Put symbolic link
479 479
 test_s3cmd("Don't put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),],
480
-	must_not_find_re = [ "linked1.png"])
480
+    must_not_find_re = [ "linked1.png"])
481 481
 
482 482
 ## ====== Put symbolic link
483 483
 test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),'--follow-symlinks' ],
... ...
@@ -485,7 +485,7 @@ test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/x
485 485
 
486 486
 ## ====== Sync symbolic links
487 487
 test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--follow-symlinks' ],
488
-	must_find = ["File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png'" % pbucket(1)],
488
+    must_find = ["File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png'" % pbucket(1)],
489 489
            # Don't want to recursively copy linked directories!
490 490
            must_not_find_re = ["etc/more/linked-dir/more/give-me-more.txt",
491 491
                                "etc/brokenlink.png"],
... ...
@@ -493,43 +493,45 @@ test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket
493 493
 
494 494
 ## ====== Multi source move
495 495
 test_s3cmd("Multi-source move", ['mv', '-r', '%s/copy/blahBlah/Blah.txt' % pbucket(2), '%s/copy/etc/' % pbucket(2), '%s/moved/' % pbucket(2)],
496
-	must_find = [ "File %s/copy/blahBlah/Blah.txt moved to %s/moved/Blah.txt" % (pbucket(2), pbucket(2)),
497
-	              "File %s/copy/etc/AtomicClockRadio.ttf moved to %s/moved/AtomicClockRadio.ttf" % (pbucket(2), pbucket(2)),
498
-				  "File %s/copy/etc/TypeRa.ttf moved to %s/moved/TypeRa.ttf" % (pbucket(2), pbucket(2)) ],
499
-	must_not_find = [ "blah.txt" ])
496
+    must_find = [ "File %s/copy/blahBlah/Blah.txt moved to %s/moved/Blah.txt" % (pbucket(2), pbucket(2)),
497
+                  "File %s/copy/etc/AtomicClockRadio.ttf moved to %s/moved/AtomicClockRadio.ttf" % (pbucket(2), pbucket(2)),
498
+                  "File %s/copy/etc/TypeRa.ttf moved to %s/moved/TypeRa.ttf" % (pbucket(2), pbucket(2)) ],
499
+    must_not_find = [ "blah.txt" ])
500 500
 
501 501
 ## ====== Verify move
502 502
 test_s3cmd("Verify move", ['ls', '-r', pbucket(2)],
503
-	must_find = [ "%s/moved/Blah.txt" % pbucket(2),
504
-	              "%s/moved/AtomicClockRadio.ttf" % pbucket(2),
505
-				  "%s/moved/TypeRa.ttf" % pbucket(2),
506
-				  "%s/copy/blahBlah/blah.txt" % pbucket(2) ],
507
-	must_not_find = [ "%s/copy/blahBlah/Blah.txt" % pbucket(2),
508
-					  "%s/copy/etc/AtomicClockRadio.ttf" % pbucket(2),
509
-					  "%s/copy/etc/TypeRa.ttf" % pbucket(2) ])
503
+    must_find = [ "%s/moved/Blah.txt" % pbucket(2),
504
+                  "%s/moved/AtomicClockRadio.ttf" % pbucket(2),
505
+                  "%s/moved/TypeRa.ttf" % pbucket(2),
506
+                  "%s/copy/blahBlah/blah.txt" % pbucket(2) ],
507
+    must_not_find = [ "%s/copy/blahBlah/Blah.txt" % pbucket(2),
508
+                      "%s/copy/etc/AtomicClockRadio.ttf" % pbucket(2),
509
+                      "%s/copy/etc/TypeRa.ttf" % pbucket(2) ])
510 510
 
511 511
 ## ====== Simple delete
512 512
 test_s3cmd("Simple delete", ['del', '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
513
-	must_find = [ "File %s/xyz/etc2/Logo.PNG deleted" % pbucket(1) ])
513
+    must_find = [ "File %s/xyz/etc2/Logo.PNG deleted" % pbucket(1) ])
514 514
 
515 515
 
516 516
 ## ====== Recursive delete
517 517
 test_s3cmd("Recursive delete", ['del', '--recursive', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
518
-	must_find = [ "File %s/xyz/etc/TypeRa.ttf deleted" % pbucket(1) ],
519
-	must_find_re = [ "File .*/etc/logo.png deleted" ],
520
-	must_not_find = [ "AtomicClockRadio.ttf" ])
518
+    must_find = [ "File %s/xyz/etc/TypeRa.ttf deleted" % pbucket(1) ],
519
+    must_find_re = [ "File .*/etc/logo.png deleted" ],
520
+    must_not_find = [ "AtomicClockRadio.ttf" ])
521 521
 
522 522
 ## ====== Recursive delete all
523 523
 test_s3cmd("Recursive delete all", ['del', '--recursive', '--force', pbucket(1)],
524
-	must_find_re = [ "File .*binary/random-crap deleted" ])
524
+    must_find_re = [ "File .*binary/random-crap deleted" ])
525 525
 
526 526
 
527 527
 ## ====== Remove empty bucket
528 528
 test_s3cmd("Remove empty bucket", ['rb', pbucket(1)],
529
-	must_find = [ "Bucket '%s/' removed" % pbucket(1) ])
529
+    must_find = [ "Bucket '%s/' removed" % pbucket(1) ])
530 530
 
531 531
 
532 532
 ## ====== Remove remaining buckets
533 533
 test_s3cmd("Remove remaining buckets", ['rb', '--recursive', pbucket(2), pbucket(3)],
534
-	must_find = [ "Bucket '%s/' removed" % pbucket(2),
535
-		      "Bucket '%s/' removed" % pbucket(3) ])
534
+    must_find = [ "Bucket '%s/' removed" % pbucket(2),
535
+              "Bucket '%s/' removed" % pbucket(3) ])
536
+
537
+# vim:et:ts=4:sts=4:ai
... ...
@@ -8,8 +8,8 @@
8 8
 import sys
9 9
 
10 10
 if float("%d.%d" %(sys.version_info[0], sys.version_info[1])) < 2.4:
11
-	sys.stderr.write("ERROR: Python 2.4 or higher required, sorry.\n")
12
-	sys.exit(1)
11
+    sys.stderr.write("ERROR: Python 2.4 or higher required, sorry.\n")
12
+    sys.exit(1)
13 13
 
14 14
 import logging
15 15
 import time
... ...
@@ -30,1690 +30,1690 @@ from logging import debug, info, warning, error
30 30
 from distutils.spawn import find_executable
31 31
 
32 32
 def output(message):
33
-	sys.stdout.write(message + "\n")
33
+    sys.stdout.write(message + "\n")
34 34
 
35 35
 def check_args_type(args, type, verbose_type):
36
-	for arg in args:
37
-		if S3Uri(arg).type != type:
38
-			raise ParameterError("Expecting %s instead of '%s'" % (verbose_type, arg))
36
+    for arg in args:
37
+        if S3Uri(arg).type != type:
38
+            raise ParameterError("Expecting %s instead of '%s'" % (verbose_type, arg))
39 39
 
40 40
 def cmd_du(args):
41
-	s3 = S3(Config())
42
-	if len(args) > 0:
43
-		uri = S3Uri(args[0])
44
-		if uri.type == "s3" and uri.has_bucket():
45
-			subcmd_bucket_usage(s3, uri)
46
-			return
47
-	subcmd_bucket_usage_all(s3)
41
+    s3 = S3(Config())
42
+    if len(args) > 0:
43
+        uri = S3Uri(args[0])
44
+        if uri.type == "s3" and uri.has_bucket():
45
+            subcmd_bucket_usage(s3, uri)
46
+            return
47
+    subcmd_bucket_usage_all(s3)
48 48
 
49 49
 def subcmd_bucket_usage_all(s3):
50
-	response = s3.list_all_buckets()
51
-
52
-	buckets_size = 0
53
-	for bucket in response["list"]:
54
-		size = subcmd_bucket_usage(s3, S3Uri("s3://" + bucket["Name"]))
55
-		if size != None:
56
-			buckets_size += size
57
-	total_size, size_coeff = formatSize(buckets_size, Config().human_readable_sizes)
58
-	total_size_str = str(total_size) + size_coeff 
59
-	output(u"".rjust(8, "-"))
60
-	output(u"%s Total" % (total_size_str.ljust(8)))
50
+    response = s3.list_all_buckets()
51
+
52
+    buckets_size = 0
53
+    for bucket in response["list"]:
54
+        size = subcmd_bucket_usage(s3, S3Uri("s3://" + bucket["Name"]))
55
+        if size != None:
56
+            buckets_size += size
57
+    total_size, size_coeff = formatSize(buckets_size, Config().human_readable_sizes)
58
+    total_size_str = str(total_size) + size_coeff
59
+    output(u"".rjust(8, "-"))
60
+    output(u"%s Total" % (total_size_str.ljust(8)))
61 61
 
62 62
 def subcmd_bucket_usage(s3, uri):
63
-	bucket = uri.bucket()
64
-	object = uri.object()
65
-
66
-	if object.endswith('*'):
67
-		object = object[:-1]
68
-	try:
69
-		response = s3.bucket_list(bucket, prefix = object, recursive = True)
70
-	except S3Error, e:
71
-		if S3.codes.has_key(e.info["Code"]):
72
-			error(S3.codes[e.info["Code"]] % bucket)
73
-			return
74
-		else:
75
-			raise
76
-	bucket_size = 0
77
-	for object in response["list"]:
78
-		size, size_coeff = formatSize(object["Size"], False)
79
-		bucket_size += size
80
-	total_size, size_coeff = formatSize(bucket_size, Config().human_readable_sizes)
81
-	total_size_str = str(total_size) + size_coeff 
82
-	output(u"%s %s" % (total_size_str.ljust(8), uri))
83
-	return bucket_size
63
+    bucket = uri.bucket()
64
+    object = uri.object()
65
+
66
+    if object.endswith('*'):
67
+        object = object[:-1]
68
+    try:
69
+        response = s3.bucket_list(bucket, prefix = object, recursive = True)
70
+    except S3Error, e:
71
+        if S3.codes.has_key(e.info["Code"]):
72
+            error(S3.codes[e.info["Code"]] % bucket)
73
+            return
74
+        else:
75
+            raise
76
+    bucket_size = 0
77
+    for object in response["list"]:
78
+        size, size_coeff = formatSize(object["Size"], False)
79
+        bucket_size += size
80
+    total_size, size_coeff = formatSize(bucket_size, Config().human_readable_sizes)
81
+    total_size_str = str(total_size) + size_coeff
82
+    output(u"%s %s" % (total_size_str.ljust(8), uri))
83
+    return bucket_size
84 84
 
85 85
 def cmd_ls(args):
86
-	s3 = S3(Config())
87
-	if len(args) > 0:
88
-		uri = S3Uri(args[0])
89
-		if uri.type == "s3" and uri.has_bucket():
90
-			subcmd_bucket_list(s3, uri)
91
-			return
92
-	subcmd_buckets_list_all(s3)
86
+    s3 = S3(Config())
87
+    if len(args) > 0:
88
+        uri = S3Uri(args[0])
89
+        if uri.type == "s3" and uri.has_bucket():
90
+            subcmd_bucket_list(s3, uri)
91
+            return
92
+    subcmd_buckets_list_all(s3)
93 93
 
94 94
 def cmd_buckets_list_all_all(args):
95
-	s3 = S3(Config())
95
+    s3 = S3(Config())
96 96
 
97
-	response = s3.list_all_buckets()
97
+    response = s3.list_all_buckets()
98 98
 
99
-	for bucket in response["list"]:
100
-		subcmd_bucket_list(s3, S3Uri("s3://" + bucket["Name"]))
101
-		output(u"")
99
+    for bucket in response["list"]:
100
+        subcmd_bucket_list(s3, S3Uri("s3://" + bucket["Name"]))
101
+        output(u"")
102 102
 
103 103
 
104 104
 def subcmd_buckets_list_all(s3):
105
-	response = s3.list_all_buckets()
106
-	for bucket in response["list"]:
107
-		output(u"%s  s3://%s" % (
108
-			formatDateTime(bucket["CreationDate"]),
109
-			bucket["Name"],
110
-			))
105
+    response = s3.list_all_buckets()
106
+    for bucket in response["list"]:
107
+        output(u"%s  s3://%s" % (
108
+            formatDateTime(bucket["CreationDate"]),
109
+            bucket["Name"],
110
+            ))
111 111
 
112 112
 def subcmd_bucket_list(s3, uri):
113
-	bucket = uri.bucket()
114
-	prefix = uri.object()
115
-
116
-	debug(u"Bucket 's3://%s':" % bucket)
117
-	if prefix.endswith('*'):
118
-		prefix = prefix[:-1]
119
-	try:
120
-		response = s3.bucket_list(bucket, prefix = prefix)
121
-	except S3Error, e:
122
-		if S3.codes.has_key(e.info["Code"]):
123
-			error(S3.codes[e.info["Code"]] % bucket)
124
-			return
125
-		else:
126
-			raise
127
-
128
-	if cfg.list_md5:
129
-		format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(md5)32s  %(uri)s"
130
-	else:
131
-		format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(uri)s"
132
-
133
-	for prefix in response['common_prefixes']:
134
-		output(format_string % {
135
-			"timestamp": "",
136
-			"size": "DIR",
137
-			"coeff": "",
138
-			"md5": "",
139
-			"uri": uri.compose_uri(bucket, prefix["Prefix"])})
140
-
141
-	for object in response["list"]:
142
-		size, size_coeff = formatSize(object["Size"], Config().human_readable_sizes)
143
-		output(format_string % {
144
-			"timestamp": formatDateTime(object["LastModified"]),
145
-			"size" : str(size), 
146
-			"coeff": size_coeff,
147
-			"md5" : object['ETag'].strip('"'),
148
-			"uri": uri.compose_uri(bucket, object["Key"]),
149
-			})
113
+    bucket = uri.bucket()
114
+    prefix = uri.object()
115
+
116
+    debug(u"Bucket 's3://%s':" % bucket)
117
+    if prefix.endswith('*'):
118
+        prefix = prefix[:-1]
119
+    try:
120
+        response = s3.bucket_list(bucket, prefix = prefix)
121
+    except S3Error, e:
122
+        if S3.codes.has_key(e.info["Code"]):
123
+            error(S3.codes[e.info["Code"]] % bucket)
124
+            return
125
+        else:
126
+            raise
127
+
128
+    if cfg.list_md5:
129
+        format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(md5)32s  %(uri)s"
130
+    else:
131
+        format_string = u"%(timestamp)16s %(size)9s%(coeff)1s  %(uri)s"
132
+
133
+    for prefix in response['common_prefixes']:
134
+        output(format_string % {
135
+            "timestamp": "",
136
+            "size": "DIR",
137
+            "coeff": "",
138
+            "md5": "",
139
+            "uri": uri.compose_uri(bucket, prefix["Prefix"])})
140
+
141
+    for object in response["list"]:
142
+        size, size_coeff = formatSize(object["Size"], Config().human_readable_sizes)
143
+        output(format_string % {
144
+            "timestamp": formatDateTime(object["LastModified"]),
145
+            "size" : str(size),
146
+            "coeff": size_coeff,
147
+            "md5" : object['ETag'].strip('"'),
148
+            "uri": uri.compose_uri(bucket, object["Key"]),
149
+            })
150 150
 
151 151
 def cmd_bucket_create(args):
152
-	s3 = S3(Config())
153
-	for arg in args:
154
-		uri = S3Uri(arg)
155
-		if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
156
-			raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
157
-		try:
158
-			response = s3.bucket_create(uri.bucket(), cfg.bucket_location)
159
-			output(u"Bucket '%s' created" % uri.uri())
160
-		except S3Error, e:
161
-			if S3.codes.has_key(e.info["Code"]):
162
-				error(S3.codes[e.info["Code"]] % uri.bucket())
163
-				return
164
-			else:
165
-				raise
152
+    s3 = S3(Config())
153
+    for arg in args:
154
+        uri = S3Uri(arg)
155
+        if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
156
+            raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
157
+        try:
158
+            response = s3.bucket_create(uri.bucket(), cfg.bucket_location)
159
+            output(u"Bucket '%s' created" % uri.uri())
160
+        except S3Error, e:
161
+            if S3.codes.has_key(e.info["Code"]):
162
+                error(S3.codes[e.info["Code"]] % uri.bucket())
163
+                return
164
+            else:
165
+                raise
166 166
 
167 167
 def cmd_website_info(args):
168
-	s3 = S3(Config())
169
-	for arg in args:
170
-		uri = S3Uri(arg)
171
-		if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
172
-			raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
173
-		try:
174
-			response = s3.website_info(uri, cfg.bucket_location)
175
-			if response:
176
-				output(u"Bucket %s: Website configuration" % uri.uri())
177
-				output(u"Website endpoint: %s" % response['website_endpoint'])
178
-				output(u"Index document:   %s" % response['index_document'])
179
-				output(u"Error document:   %s" % response['error_document'])
180
-			else:
181
-				output(u"Bucket %s: Unable to receive website configuration." % (uri.uri()))
182
-		except S3Error, e:
183
-			if S3.codes.has_key(e.info["Code"]):
184
-				error(S3.codes[e.info["Code"]] % uri.bucket())
185
-				return
186
-			else:
187
-				raise
168
+    s3 = S3(Config())
169
+    for arg in args:
170
+        uri = S3Uri(arg)
171
+        if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
172
+            raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
173
+        try:
174
+            response = s3.website_info(uri, cfg.bucket_location)
175
+            if response:
176
+                output(u"Bucket %s: Website configuration" % uri.uri())
177
+                output(u"Website endpoint: %s" % response['website_endpoint'])
178
+                output(u"Index document:   %s" % response['index_document'])
179
+                output(u"Error document:   %s" % response['error_document'])
180
+            else:
181
+                output(u"Bucket %s: Unable to receive website configuration." % (uri.uri()))
182
+        except S3Error, e:
183
+            if S3.codes.has_key(e.info["Code"]):
184
+                error(S3.codes[e.info["Code"]] % uri.bucket())
185
+                return
186
+            else:
187
+                raise
188 188
 
189 189
 def cmd_website_create(args):
190
-	s3 = S3(Config())
191
-	for arg in args:
192
-		uri = S3Uri(arg)
193
-		if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
194
-			raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
195
-		try:
196
-			response = s3.website_create(uri, cfg.bucket_location)
197
-			output(u"Bucket '%s': website configuration created." % (uri.uri()))
198
-		except S3Error, e:
199
-			if S3.codes.has_key(e.info["Code"]):
200
-				error(S3.codes[e.info["Code"]] % uri.bucket())
201
-				return
202
-			else:
203
-				raise
190
+    s3 = S3(Config())
191
+    for arg in args:
192
+        uri = S3Uri(arg)
193
+        if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
194
+            raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
195
+        try:
196
+            response = s3.website_create(uri, cfg.bucket_location)
197
+            output(u"Bucket '%s': website configuration created." % (uri.uri()))
198
+        except S3Error, e:
199
+            if S3.codes.has_key(e.info["Code"]):
200
+                error(S3.codes[e.info["Code"]] % uri.bucket())
201
+                return
202
+            else:
203
+                raise
204 204
 
205 205
 def cmd_website_delete(args):
206
-	s3 = S3(Config())
207
-	for arg in args:
208
-		uri = S3Uri(arg)
209
-		if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
210
-			raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
211
-		try:
212
-			response = s3.website_delete(uri, cfg.bucket_location)
213
-			output(u"Bucket '%s': website configuration deleted." % (uri.uri()))
214
-		except S3Error, e:
215
-			if S3.codes.has_key(e.info["Code"]):
216
-				error(S3.codes[e.info["Code"]] % uri.bucket())
217
-				return
218
-			else:
219
-				raise
206
+    s3 = S3(Config())
207
+    for arg in args:
208
+        uri = S3Uri(arg)
209
+        if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
210
+            raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
211
+        try:
212
+            response = s3.website_delete(uri, cfg.bucket_location)
213
+            output(u"Bucket '%s': website configuration deleted." % (uri.uri()))
214
+        except S3Error, e:
215
+            if S3.codes.has_key(e.info["Code"]):
216
+                error(S3.codes[e.info["Code"]] % uri.bucket())
217
+                return
218
+            else:
219
+                raise
220 220
 
221 221
 def cmd_bucket_delete(args):
222
-	def _bucket_delete_one(uri):
223
-		try:
224
-			response = s3.bucket_delete(uri.bucket())
225
-		except S3Error, e:
226
-			if e.info['Code'] == 'BucketNotEmpty' and (cfg.force or cfg.recursive):
227
-				warning(u"Bucket is not empty. Removing all the objects from it first. This may take some time...")
228
-				subcmd_object_del_uri(uri.uri(), recursive = True)
229
-				return _bucket_delete_one(uri)
230
-			elif S3.codes.has_key(e.info["Code"]):
231
-				error(S3.codes[e.info["Code"]] % uri.bucket())
232
-				return
233
-			else:
234
-				raise
235
-		
236
-	s3 = S3(Config())
237
-	for arg in args:
238
-		uri = S3Uri(arg)
239
-		if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
240
-			raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
241
-		_bucket_delete_one(uri)
242
-		output(u"Bucket '%s' removed" % uri.uri())
222
+    def _bucket_delete_one(uri):
223
+        try:
224
+            response = s3.bucket_delete(uri.bucket())
225
+        except S3Error, e:
226
+            if e.info['Code'] == 'BucketNotEmpty' and (cfg.force or cfg.recursive):
227
+                warning(u"Bucket is not empty. Removing all the objects from it first. This may take some time...")
228
+                subcmd_object_del_uri(uri.uri(), recursive = True)
229
+                return _bucket_delete_one(uri)
230
+            elif S3.codes.has_key(e.info["Code"]):
231
+                error(S3.codes[e.info["Code"]] % uri.bucket())
232
+                return
233
+            else:
234
+                raise
235
+
236
+    s3 = S3(Config())
237
+    for arg in args:
238
+        uri = S3Uri(arg)
239
+        if not uri.type == "s3" or not uri.has_bucket() or uri.has_object():
240
+            raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg)
241
+        _bucket_delete_one(uri)
242
+        output(u"Bucket '%s' removed" % uri.uri())
243 243
 
244 244
 def cmd_object_put(args):
245
-	cfg = Config()
246
-	s3 = S3(cfg)
247
-
248
-	if len(args) == 0:
249
-		raise ParameterError("Nothing to upload. Expecting a local file or directory and a S3 URI destination.")
250
-
251
-	## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
252
-	destination_base_uri = S3Uri(args.pop())
253
-	if destination_base_uri.type != 's3':
254
-		raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
255
-	destination_base = str(destination_base_uri)
256
-
257
-	if len(args) == 0:
258
-		raise ParameterError("Nothing to upload. Expecting a local file or directory.")
259
-
260
-	local_list, single_file_local = fetch_local_list(args)
261
-
262
-	local_list, exclude_list = filter_exclude_include(local_list)
263
-
264
-	local_count = len(local_list)
265
-
266
-	info(u"Summary: %d local files to upload" % local_count)
267
-
268
-	if local_count > 0:
269
-		if not destination_base.endswith("/"):
270
-			if not single_file_local:
271
-				raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
272
-			local_list[local_list.keys()[0]]['remote_uri'] = unicodise(destination_base)
273
-		else:
274
-			for key in local_list:
275
-				local_list[key]['remote_uri'] = unicodise(destination_base + key)
276
-
277
-	if cfg.dry_run:
278
-		for key in exclude_list:
279
-			output(u"exclude: %s" % unicodise(key))
280
-		for key in local_list:
281
-			output(u"upload: %s -> %s" % (local_list[key]['full_name_unicode'], local_list[key]['remote_uri']))
282
-
283
-		warning(u"Exitting now because of --dry-run")
284
-		return
285
-
286
-	seq = 0
287
-	for key in local_list:
288
-		seq += 1
289
-
290
-		uri_final = S3Uri(local_list[key]['remote_uri'])
291
-
292
-		extra_headers = copy(cfg.extra_headers)
293
-		full_name_orig = local_list[key]['full_name']
294
-		full_name = full_name_orig
295
-		seq_label = "[%d of %d]" % (seq, local_count)
296
-		if Config().encrypt:
297
-			exitcode, full_name, extra_headers["x-amz-meta-s3tools-gpgenc"] = gpg_encrypt(full_name_orig)
298
-		try:
299
-			response = s3.object_put(full_name, uri_final, extra_headers, extra_label = seq_label)
300
-		except S3UploadError, e:
301
-			error(u"Upload of '%s' failed too many times. Skipping that file." % full_name_orig)
302
-			continue
303
-		except InvalidFileError, e:
304
-			warning(u"File can not be uploaded: %s" % e)
305
-			continue
306
-		speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
307
-		if not Config().progress_meter:
308
-			output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
309
-				(unicodise(full_name_orig), uri_final, response["size"], response["elapsed"], 
310
-				speed_fmt[0], speed_fmt[1], seq_label))
311
-		if Config().acl_public:
312
-			output(u"Public URL of the object is: %s" %
313
-				(uri_final.public_url()))
314
-		if Config().encrypt and full_name != full_name_orig:
315
-			debug(u"Removing temporary encrypted file: %s" % unicodise(full_name))
316
-			os.remove(full_name)
245
+    cfg = Config()
246
+    s3 = S3(cfg)
247
+
248
+    if len(args) == 0:
249
+        raise ParameterError("Nothing to upload. Expecting a local file or directory and a S3 URI destination.")
250
+
251
+    ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
252
+    destination_base_uri = S3Uri(args.pop())
253
+    if destination_base_uri.type != 's3':
254
+        raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
255
+    destination_base = str(destination_base_uri)
256
+
257
+    if len(args) == 0:
258
+        raise ParameterError("Nothing to upload. Expecting a local file or directory.")
259
+
260
+    local_list, single_file_local = fetch_local_list(args)
261
+
262
+    local_list, exclude_list = filter_exclude_include(local_list)
263
+
264
+    local_count = len(local_list)
265
+
266
+    info(u"Summary: %d local files to upload" % local_count)
267
+
268
+    if local_count > 0:
269
+        if not destination_base.endswith("/"):
270
+            if not single_file_local:
271
+                raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
272
+            local_list[local_list.keys()[0]]['remote_uri'] = unicodise(destination_base)
273
+        else:
274
+            for key in local_list:
275
+                local_list[key]['remote_uri'] = unicodise(destination_base + key)
276
+
277
+    if cfg.dry_run:
278
+        for key in exclude_list:
279
+            output(u"exclude: %s" % unicodise(key))
280
+        for key in local_list:
281
+            output(u"upload: %s -> %s" % (local_list[key]['full_name_unicode'], local_list[key]['remote_uri']))
282
+
283
+        warning(u"Exitting now because of --dry-run")
284
+        return
285
+
286
+    seq = 0
287
+    for key in local_list:
288
+        seq += 1
289
+
290
+        uri_final = S3Uri(local_list[key]['remote_uri'])
291
+
292
+        extra_headers = copy(cfg.extra_headers)
293
+        full_name_orig = local_list[key]['full_name']
294
+        full_name = full_name_orig
295
+        seq_label = "[%d of %d]" % (seq, local_count)
296
+        if Config().encrypt:
297
+            exitcode, full_name, extra_headers["x-amz-meta-s3tools-gpgenc"] = gpg_encrypt(full_name_orig)
298
+        try:
299
+            response = s3.object_put(full_name, uri_final, extra_headers, extra_label = seq_label)
300
+        except S3UploadError, e:
301
+            error(u"Upload of '%s' failed too many times. Skipping that file." % full_name_orig)
302
+            continue
303
+        except InvalidFileError, e:
304
+            warning(u"File can not be uploaded: %s" % e)
305
+            continue
306
+        speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
307
+        if not Config().progress_meter:
308
+            output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
309
+                (unicodise(full_name_orig), uri_final, response["size"], response["elapsed"],
310
+                speed_fmt[0], speed_fmt[1], seq_label))
311
+        if Config().acl_public:
312
+            output(u"Public URL of the object is: %s" %
313
+                (uri_final.public_url()))
314
+        if Config().encrypt and full_name != full_name_orig:
315
+            debug(u"Removing temporary encrypted file: %s" % unicodise(full_name))
316
+            os.remove(full_name)
317 317
 
318 318
 def cmd_object_get(args):
319
-	cfg = Config()
320
-	s3 = S3(cfg)
321
-
322
-	## Check arguments:
323
-	## if not --recursive:
324
-	##   - first N arguments must be S3Uri
325
-	##   - if the last one is S3 make current dir the destination_base
326
-	##   - if the last one is a directory:
327
-	##       - take all 'basenames' of the remote objects and
328
-	##         make the destination name be 'destination_base'+'basename'
329
-	##   - if the last one is a file or not existing:
330
-	##       - if the number of sources (N, above) == 1 treat it
331
-	##         as a filename and save the object there.
332
-	##       - if there's more sources -> Error
333
-	## if --recursive:
334
-	##   - first N arguments must be S3Uri
335
-	##       - for each Uri get a list of remote objects with that Uri as a prefix
336
-	##       - apply exclude/include rules
337
-	##       - each list item will have MD5sum, Timestamp and pointer to S3Uri
338
-	##         used as a prefix.
339
-	##   - the last arg may be a local directory - destination_base
340
-	##   - if the last one is S3 make current dir the destination_base
341
-	##   - if the last one doesn't exist check remote list:
342
-	##       - if there is only one item and its_prefix==its_name 
343
-	##         download that item to the name given in last arg.
344
-	##       - if there are more remote items use the last arg as a destination_base
345
-	##         and try to create the directory (incl. all parents).
346
-	##
347
-	## In both cases we end up with a list mapping remote object names (keys) to local file names.
348
-
349
-	## Each item will be a dict with the following attributes
350
-	# {'remote_uri', 'local_filename'}
351
-	download_list = []
352
-
353
-	if len(args) == 0:
354
-		raise ParameterError("Nothing to download. Expecting S3 URI.")
355
-
356
-	if S3Uri(args[-1]).type == 'file':
357
-		destination_base = args.pop()
358
-	else:
359
-		destination_base = "."
360
-
361
-	if len(args) == 0:
362
-		raise ParameterError("Nothing to download. Expecting S3 URI.")
363
-
364
-	remote_list = fetch_remote_list(args, require_attribs = False)
365
-	remote_list, exclude_list = filter_exclude_include(remote_list)
366
-
367
-	remote_count = len(remote_list)
368
-
369
-	info(u"Summary: %d remote files to download" % remote_count)
370
-
371
-	if remote_count > 0:
372
-		if not os.path.isdir(destination_base) or destination_base == '-':
373
-			## We were either given a file name (existing or not) or want STDOUT
374
-			if remote_count > 1:
375
-				raise ParameterError("Destination must be a directory when downloading multiple sources.")
376
-			remote_list[remote_list.keys()[0]]['local_filename'] = deunicodise(destination_base)
377
-		elif os.path.isdir(destination_base):
378
-			if destination_base[-1] != os.path.sep:
379
-				destination_base += os.path.sep
380
-			for key in remote_list:
381
-				remote_list[key]['local_filename'] = destination_base + key
382
-		else:
383
-			raise InternalError("WTF? Is it a dir or not? -- %s" % destination_base)
384
-
385
-	if cfg.dry_run:
386
-		for key in exclude_list:
387
-			output(u"exclude: %s" % unicodise(key))
388
-		for key in remote_list:
389
-			output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
390
-
391
-		warning(u"Exitting now because of --dry-run")
392
-		return
393
-
394
-	seq = 0
395
-	for key in remote_list:
396
-		seq += 1
397
-		item = remote_list[key]
398
-		uri = S3Uri(item['object_uri_str'])
399
-		## Encode / Decode destination with "replace" to make sure it's compatible with current encoding
400
-		destination = unicodise_safe(item['local_filename'])
401
-		seq_label = "[%d of %d]" % (seq, remote_count)
402
-
403
-		start_position = 0
404
-
405
-		if destination == "-":
406
-			## stdout
407
-			dst_stream = sys.__stdout__
408
-		else:
409
-			## File
410
-			try:
411
-				file_exists = os.path.exists(destination)
412
-				try:
413
-					dst_stream = open(destination, "ab")
414
-				except IOError, e:
415
-					if e.errno == errno.ENOENT:
416
-						basename = destination[:destination.rindex(os.path.sep)]
417
-						info(u"Creating directory: %s" % basename)
418
-						os.makedirs(basename)
419
-						dst_stream = open(destination, "ab")
420
-					else:
421
-						raise
422
-				if file_exists:
423
-					if Config().get_continue:
424
-						start_position = dst_stream.tell()
425
-					elif Config().force:
426
-						start_position = 0L
427
-						dst_stream.seek(0L)
428
-						dst_stream.truncate()
429
-					elif Config().skip_existing:
430
-						info(u"Skipping over existing file: %s" % (destination))
431
-						continue
432
-					else:
433
-						dst_stream.close()
434
-						raise ParameterError(u"File %s already exists. Use either of --force / --continue / --skip-existing or give it a new name." % destination)
435
-			except IOError, e:
436
-				error(u"Skipping %s: %s" % (destination, e.strerror))
437
-				continue
438
-		response = s3.object_get(uri, dst_stream, start_position = start_position, extra_label = seq_label)
439
-		if response["headers"].has_key("x-amz-meta-s3tools-gpgenc"):
440
-			gpg_decrypt(destination, response["headers"]["x-amz-meta-s3tools-gpgenc"])
441
-			response["size"] = os.stat(destination)[6]
442
-		if not Config().progress_meter and destination != "-":
443
-			speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
444
-			output(u"File %s saved as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s)" %
445
-				(uri, destination, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1]))
319
+    cfg = Config()
320
+    s3 = S3(cfg)
321
+
322
+    ## Check arguments:
323
+    ## if not --recursive:
324
+    ##   - first N arguments must be S3Uri
325
+    ##   - if the last one is S3 make current dir the destination_base
326
+    ##   - if the last one is a directory:
327
+    ##       - take all 'basenames' of the remote objects and
328
+    ##         make the destination name be 'destination_base'+'basename'
329
+    ##   - if the last one is a file or not existing:
330
+    ##       - if the number of sources (N, above) == 1 treat it
331
+    ##         as a filename and save the object there.
332
+    ##       - if there's more sources -> Error
333
+    ## if --recursive:
334
+    ##   - first N arguments must be S3Uri
335
+    ##       - for each Uri get a list of remote objects with that Uri as a prefix
336
+    ##       - apply exclude/include rules
337
+    ##       - each list item will have MD5sum, Timestamp and pointer to S3Uri
338
+    ##         used as a prefix.
339
+    ##   - the last arg may be a local directory - destination_base
340
+    ##   - if the last one is S3 make current dir the destination_base
341
+    ##   - if the last one doesn't exist check remote list:
342
+    ##       - if there is only one item and its_prefix==its_name
343
+    ##         download that item to the name given in last arg.
344
+    ##       - if there are more remote items use the last arg as a destination_base
345
+    ##         and try to create the directory (incl. all parents).
346
+    ##
347
+    ## In both cases we end up with a list mapping remote object names (keys) to local file names.
348
+
349
+    ## Each item will be a dict with the following attributes
350
+    # {'remote_uri', 'local_filename'}
351
+    download_list = []
352
+
353
+    if len(args) == 0:
354
+        raise ParameterError("Nothing to download. Expecting S3 URI.")
355
+
356
+    if S3Uri(args[-1]).type == 'file':
357
+        destination_base = args.pop()
358
+    else:
359
+        destination_base = "."
360
+
361
+    if len(args) == 0:
362
+        raise ParameterError("Nothing to download. Expecting S3 URI.")
363
+
364
+    remote_list = fetch_remote_list(args, require_attribs = False)
365
+    remote_list, exclude_list = filter_exclude_include(remote_list)
366
+
367
+    remote_count = len(remote_list)
368
+
369
+    info(u"Summary: %d remote files to download" % remote_count)
370
+
371
+    if remote_count > 0:
372
+        if not os.path.isdir(destination_base) or destination_base == '-':
373
+            ## We were either given a file name (existing or not) or want STDOUT
374
+            if remote_count > 1:
375
+                raise ParameterError("Destination must be a directory when downloading multiple sources.")
376
+            remote_list[remote_list.keys()[0]]['local_filename'] = deunicodise(destination_base)
377
+        elif os.path.isdir(destination_base):
378
+            if destination_base[-1] != os.path.sep:
379
+                destination_base += os.path.sep
380
+            for key in remote_list:
381
+                remote_list[key]['local_filename'] = destination_base + key
382
+        else:
383
+            raise InternalError("WTF? Is it a dir or not? -- %s" % destination_base)
384
+
385
+    if cfg.dry_run:
386
+        for key in exclude_list:
387
+            output(u"exclude: %s" % unicodise(key))
388
+        for key in remote_list:
389
+            output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
390
+
391
+        warning(u"Exitting now because of --dry-run")
392
+        return
393
+
394
+    seq = 0
395
+    for key in remote_list:
396
+        seq += 1
397
+        item = remote_list[key]
398
+        uri = S3Uri(item['object_uri_str'])
399
+        ## Encode / Decode destination with "replace" to make sure it's compatible with current encoding
400
+        destination = unicodise_safe(item['local_filename'])
401
+        seq_label = "[%d of %d]" % (seq, remote_count)
402
+
403
+        start_position = 0
404
+
405
+        if destination == "-":
406
+            ## stdout
407
+            dst_stream = sys.__stdout__
408
+        else:
409
+            ## File
410
+            try:
411
+                file_exists = os.path.exists(destination)
412
+                try:
413
+                    dst_stream = open(destination, "ab")
414
+                except IOError, e:
415
+                    if e.errno == errno.ENOENT:
416
+                        basename = destination[:destination.rindex(os.path.sep)]
417
+                        info(u"Creating directory: %s" % basename)
418
+                        os.makedirs(basename)
419
+                        dst_stream = open(destination, "ab")
420
+                    else:
421
+                        raise
422
+                if file_exists:
423
+                    if Config().get_continue:
424
+                        start_position = dst_stream.tell()
425
+                    elif Config().force:
426
+                        start_position = 0L
427
+                        dst_stream.seek(0L)
428
+                        dst_stream.truncate()
429
+                    elif Config().skip_existing:
430
+                        info(u"Skipping over existing file: %s" % (destination))
431
+                        continue
432
+                    else:
433
+                        dst_stream.close()
434
+                        raise ParameterError(u"File %s already exists. Use either of --force / --continue / --skip-existing or give it a new name." % destination)
435
+            except IOError, e:
436
+                error(u"Skipping %s: %s" % (destination, e.strerror))
437
+                continue
438
+        response = s3.object_get(uri, dst_stream, start_position = start_position, extra_label = seq_label)
439
+        if response["headers"].has_key("x-amz-meta-s3tools-gpgenc"):
440
+            gpg_decrypt(destination, response["headers"]["x-amz-meta-s3tools-gpgenc"])
441
+            response["size"] = os.stat(destination)[6]
442
+        if not Config().progress_meter and destination != "-":
443
+            speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
444
+            output(u"File %s saved as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s)" %
445
+                (uri, destination, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1]))
446 446
 
447 447
 def cmd_object_del(args):
448
-	for uri_str in args:
449
-		uri = S3Uri(uri_str)
450
-		if uri.type != "s3":
451
-			raise ParameterError("Expecting S3 URI instead of '%s'" % uri_str)
452
-		if not uri.has_object():
453
-			if Config().recursive and not Config().force:
454
-				raise ParameterError("Please use --force to delete ALL contents of %s" % uri_str)
455
-			elif not Config().recursive:
456
-				raise ParameterError("File name required, not only the bucket name. Alternatively use --recursive")
457
-		subcmd_object_del_uri(uri_str)
448
+    for uri_str in args:
449
+        uri = S3Uri(uri_str)
450
+        if uri.type != "s3":
451
+            raise ParameterError("Expecting S3 URI instead of '%s'" % uri_str)
452
+        if not uri.has_object():
453
+            if Config().recursive and not Config().force:
454
+                raise ParameterError("Please use --force to delete ALL contents of %s" % uri_str)
455
+            elif not Config().recursive:
456
+                raise ParameterError("File name required, not only the bucket name. Alternatively use --recursive")
457
+        subcmd_object_del_uri(uri_str)
458 458
 
459 459
 def subcmd_object_del_uri(uri_str, recursive = None):
460
-	s3 = S3(cfg)
460
+    s3 = S3(cfg)
461 461
 
462
-	if recursive is None:
463
-		recursive = cfg.recursive
462
+    if recursive is None:
463
+        recursive = cfg.recursive
464 464
 
465
-	remote_list = fetch_remote_list(uri_str, require_attribs = False, recursive = recursive)
466
-	remote_list, exclude_list = filter_exclude_include(remote_list)
465
+    remote_list = fetch_remote_list(uri_str, require_attribs = False, recursive = recursive)
466
+    remote_list, exclude_list = filter_exclude_include(remote_list)
467 467
 
468
-	remote_count = len(remote_list)
468
+    remote_count = len(remote_list)
469 469
 
470
-	info(u"Summary: %d remote files to delete" % remote_count)
470
+    info(u"Summary: %d remote files to delete" % remote_count)
471 471
 
472
-	if cfg.dry_run:
473
-		for key in exclude_list:
474
-			output(u"exclude: %s" % unicodise(key))
475
-		for key in remote_list:
476
-			output(u"delete: %s" % remote_list[key]['object_uri_str'])
472
+    if cfg.dry_run:
473
+        for key in exclude_list:
474
+            output(u"exclude: %s" % unicodise(key))
475
+        for key in remote_list:
476
+            output(u"delete: %s" % remote_list[key]['object_uri_str'])
477 477
 
478
-		warning(u"Exitting now because of --dry-run")
479
-		return
478
+        warning(u"Exitting now because of --dry-run")
479
+        return
480 480
 
481
-	for key in remote_list:
482
-		item = remote_list[key]
483
-		response = s3.object_delete(S3Uri(item['object_uri_str']))
484
-		output(u"File %s deleted" % item['object_uri_str'])
481
+    for key in remote_list:
482
+        item = remote_list[key]
483
+        response = s3.object_delete(S3Uri(item['object_uri_str']))
484
+        output(u"File %s deleted" % item['object_uri_str'])
485 485
 
486 486
 def subcmd_cp_mv(args, process_fce, action_str, message):
487
-	if len(args) < 2:
488
-		raise ParameterError("Expecting two or more S3 URIs for " + action_str)
489
-	dst_base_uri = S3Uri(args.pop())
490
-	if dst_base_uri.type != "s3":
491
-		raise ParameterError("Destination must be S3 URI. To download a file use 'get' or 'sync'.")
492
-	destination_base = dst_base_uri.uri()
493
-
494
-	remote_list = fetch_remote_list(args, require_attribs = False)
495
-	remote_list, exclude_list = filter_exclude_include(remote_list)
496
-
497
-	remote_count = len(remote_list)
498
-
499
-	info(u"Summary: %d remote files to %s" % (remote_count, action_str))
500
-
501
-	if cfg.recursive:
502
-		if not destination_base.endswith("/"):
503
-			destination_base += "/"
504
-		for key in remote_list:
505
-			remote_list[key]['dest_name'] = destination_base + key
506
-	else:
507
-		key = remote_list.keys()[0]
508
-		if destination_base.endswith("/"):
509
-			remote_list[key]['dest_name'] = destination_base + key
510
-		else:
511
-			remote_list[key]['dest_name'] = destination_base
512
-
513
-	if cfg.dry_run:
514
-		for key in exclude_list:
515
-			output(u"exclude: %s" % unicodise(key))
516
-		for key in remote_list:
517
-			output(u"%s: %s -> %s" % (action_str, remote_list[key]['object_uri_str'], remote_list[key]['dest_name']))
518
-
519
-		warning(u"Exitting now because of --dry-run")
520
-		return
521
-
522
-	seq = 0
523
-	for key in remote_list:
524
-		seq += 1
525
-		seq_label = "[%d of %d]" % (seq, remote_count)
526
-
527
-		item = remote_list[key]
528
-		src_uri = S3Uri(item['object_uri_str'])
529
-		dst_uri = S3Uri(item['dest_name'])
530
-
531
-		extra_headers = copy(cfg.extra_headers)
532
-		response = process_fce(src_uri, dst_uri, extra_headers) 
533
-		output(message % { "src" : src_uri, "dst" : dst_uri })
534
-		if Config().acl_public:
535
-			info(u"Public URL is: %s" % dst_uri.public_url())
487
+    if len(args) < 2:
488
+        raise ParameterError("Expecting two or more S3 URIs for " + action_str)
489
+    dst_base_uri = S3Uri(args.pop())
490
+    if dst_base_uri.type != "s3":
491
+        raise ParameterError("Destination must be S3 URI. To download a file use 'get' or 'sync'.")
492
+    destination_base = dst_base_uri.uri()
493
+
494
+    remote_list = fetch_remote_list(args, require_attribs = False)
495
+    remote_list, exclude_list = filter_exclude_include(remote_list)
496
+
497
+    remote_count = len(remote_list)
498
+
499
+    info(u"Summary: %d remote files to %s" % (remote_count, action_str))
500
+
501
+    if cfg.recursive:
502
+        if not destination_base.endswith("/"):
503
+            destination_base += "/"
504
+        for key in remote_list:
505
+            remote_list[key]['dest_name'] = destination_base + key
506
+    else:
507
+        key = remote_list.keys()[0]
508
+        if destination_base.endswith("/"):
509
+            remote_list[key]['dest_name'] = destination_base + key
510
+        else:
511
+            remote_list[key]['dest_name'] = destination_base
512
+
513
+    if cfg.dry_run:
514
+        for key in exclude_list:
515
+            output(u"exclude: %s" % unicodise(key))
516
+        for key in remote_list:
517
+            output(u"%s: %s -> %s" % (action_str, remote_list[key]['object_uri_str'], remote_list[key]['dest_name']))
518
+
519
+        warning(u"Exitting now because of --dry-run")
520
+        return
521
+
522
+    seq = 0
523
+    for key in remote_list:
524
+        seq += 1
525
+        seq_label = "[%d of %d]" % (seq, remote_count)
526
+
527
+        item = remote_list[key]
528
+        src_uri = S3Uri(item['object_uri_str'])
529
+        dst_uri = S3Uri(item['dest_name'])
530
+
531
+        extra_headers = copy(cfg.extra_headers)
532
+        response = process_fce(src_uri, dst_uri, extra_headers)
533
+        output(message % { "src" : src_uri, "dst" : dst_uri })
534
+        if Config().acl_public:
535
+            info(u"Public URL is: %s" % dst_uri.public_url())
536 536
 
537 537
 def cmd_cp(args):
538
-	s3 = S3(Config())
539
-	subcmd_cp_mv(args, s3.object_copy, "copy", "File %(src)s copied to %(dst)s")
538
+    s3 = S3(Config())
539
+    subcmd_cp_mv(args, s3.object_copy, "copy", "File %(src)s copied to %(dst)s")
540 540
 
541 541
 def cmd_mv(args):
542
-	s3 = S3(Config())
543
-	subcmd_cp_mv(args, s3.object_move, "move", "File %(src)s moved to %(dst)s")
542
+    s3 = S3(Config())
543
+    subcmd_cp_mv(args, s3.object_move, "move", "File %(src)s moved to %(dst)s")
544 544
 
545 545
 def cmd_info(args):
546
-	s3 = S3(Config())
547
-
548
-	while (len(args)):
549
-		uri_arg = args.pop(0)
550
-		uri = S3Uri(uri_arg)
551
-		if uri.type != "s3" or not uri.has_bucket():
552
-			raise ParameterError("Expecting S3 URI instead of '%s'" % uri_arg)
553
-
554
-		try:
555
-			if uri.has_object():
556
-				info = s3.object_info(uri)
557
-				output(u"%s (object):" % uri.uri())
558
-				output(u"   File size: %s" % info['headers']['content-length'])
559
-				output(u"   Last mod:  %s" % info['headers']['last-modified'])
560
-				output(u"   MIME type: %s" % info['headers']['content-type'])
561
-				output(u"   MD5 sum:   %s" % info['headers']['etag'].strip('"'))
562
-			else:
563
-				info = s3.bucket_info(uri)
564
-				output(u"%s (bucket):" % uri.uri())
565
-				output(u"   Location:  %s" % info['bucket-location'])
566
-			acl = s3.get_acl(uri)
567
-			acl_grant_list = acl.getGrantList()
568
-			for grant in acl_grant_list:
569
-				output(u"   ACL:       %s: %s" % (grant['grantee'], grant['permission']))
570
-			if acl.isAnonRead():
571
-				output(u"   URL:       %s" % uri.public_url())
572
-		except S3Error, e:
573
-			if S3.codes.has_key(e.info["Code"]):
574
-				error(S3.codes[e.info["Code"]] % uri.bucket())
575
-				return
576
-			else:
577
-				raise
546
+    s3 = S3(Config())
547
+
548
+    while (len(args)):
549
+        uri_arg = args.pop(0)
550
+        uri = S3Uri(uri_arg)
551
+        if uri.type != "s3" or not uri.has_bucket():
552
+            raise ParameterError("Expecting S3 URI instead of '%s'" % uri_arg)
553
+
554
+        try:
555
+            if uri.has_object():
556
+                info = s3.object_info(uri)
557
+                output(u"%s (object):" % uri.uri())
558
+                output(u"   File size: %s" % info['headers']['content-length'])
559
+                output(u"   Last mod:  %s" % info['headers']['last-modified'])
560
+                output(u"   MIME type: %s" % info['headers']['content-type'])
561
+                output(u"   MD5 sum:   %s" % info['headers']['etag'].strip('"'))
562
+            else:
563
+                info = s3.bucket_info(uri)
564
+                output(u"%s (bucket):" % uri.uri())
565
+                output(u"   Location:  %s" % info['bucket-location'])
566
+            acl = s3.get_acl(uri)
567
+            acl_grant_list = acl.getGrantList()
568
+            for grant in acl_grant_list:
569
+                output(u"   ACL:       %s: %s" % (grant['grantee'], grant['permission']))
570
+            if acl.isAnonRead():
571
+                output(u"   URL:       %s" % uri.public_url())
572
+        except S3Error, e:
573
+            if S3.codes.has_key(e.info["Code"]):
574
+                error(S3.codes[e.info["Code"]] % uri.bucket())
575
+                return
576
+            else:
577
+                raise
578 578
 
579 579
 def cmd_sync_remote2remote(args):
580
-	s3 = S3(Config())
581
-
582
-	# Normalise s3://uri (e.g. assert trailing slash)
583
-	destination_base = unicode(S3Uri(args[-1]))
584
-
585
-	src_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True)
586
-	dst_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True)
587
-	 
588
-	src_count = len(src_list)
589
-	dst_count = len(dst_list)
590
-
591
-	info(u"Found %d source files, %d destination files" % (src_count, dst_count))
592
-
593
-	src_list, exclude_list = filter_exclude_include(src_list)
594
-
595
-	src_list, dst_list, existing_list = compare_filelists(src_list, dst_list, src_remote = True, dst_remote = True)
596
-
597
-	src_count = len(src_list)
598
-	dst_count = len(dst_list)
599
-
600
-	print(u"Summary: %d source files to copy, %d files at destination to delete" % (src_count, dst_count))
601
-
602
-	if src_count > 0:
603
-		### Populate 'remote_uri' only if we've got something to sync from src to dst
604
-		for key in src_list:
605
-			src_list[key]['target_uri'] = destination_base + key
606
-
607
-	if cfg.dry_run:
608
-		for key in exclude_list:
609
-			output(u"exclude: %s" % unicodise(key))
610
-		if cfg.delete_removed:
611
-			for key in dst_list:
612
-				output(u"delete: %s" % dst_list[key]['object_uri_str'])
613
-		for key in src_list:
614
-			output(u"Sync: %s -> %s" % (src_list[key]['object_uri_str'], src_list[key]['target_uri']))
615
-		warning(u"Exitting now because of --dry-run")
616
-		return
617
-
618
-	# Delete items in destination that are not in source
619
-	if cfg.delete_removed:
620
-		if cfg.dry_run:
621
-			for key in dst_list:
622
-				output(u"delete: %s" % dst_list[key]['object_uri_str'])
623
-		else:
624
-			for key in dst_list:
625
-				uri = S3Uri(dst_list[key]['object_uri_str'])
626
-				s3.object_delete(uri)
627
-				output(u"deleted: '%s'" % uri)
628
-	
629
-	# Perform the synchronization of files
630
-	timestamp_start = time.time()
631
-	seq = 0
632
-	file_list = src_list.keys()
633
-	file_list.sort()
634
-	for file in file_list:
635
-		seq += 1
636
-		item = src_list[file]
637
-		src_uri = S3Uri(item['object_uri_str'])
638
-		dst_uri = S3Uri(item['target_uri'])
639
-		seq_label = "[%d of %d]" % (seq, src_count)
640
-		extra_headers = copy(cfg.extra_headers)
641
-		try:
642
-			response = s3.object_copy(src_uri, dst_uri, extra_headers)
643
-			output("File %(src)s copied to %(dst)s" % { "src" : src_uri, "dst" : dst_uri })
644
-		except S3Error, e:
645
-			error("File %(src)s could not be copied: %(e)s" % { "src" : src_uri, "e" : e })
646
-	total_elapsed = time.time() - timestamp_start
647
-	outstr = "Done. Copied %d files in %0.1f seconds, %0.2f files/s" % (seq, total_elapsed, seq/total_elapsed)
648
-	if seq > 0:
649
-		output(outstr)
650
-	else:
651
-		info(outstr)
580
+    s3 = S3(Config())
581
+
582
+    # Normalise s3://uri (e.g. assert trailing slash)
583
+    destination_base = unicode(S3Uri(args[-1]))
584
+
585
+    src_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True)
586
+    dst_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True)
587
+
588
+    src_count = len(src_list)
589
+    dst_count = len(dst_list)
590
+
591
+    info(u"Found %d source files, %d destination files" % (src_count, dst_count))
592
+
593
+    src_list, exclude_list = filter_exclude_include(src_list)
594
+
595
+    src_list, dst_list, existing_list = compare_filelists(src_list, dst_list, src_remote = True, dst_remote = True)
596
+
597
+    src_count = len(src_list)
598
+    dst_count = len(dst_list)
599
+
600
+    print(u"Summary: %d source files to copy, %d files at destination to delete" % (src_count, dst_count))
601
+
602
+    if src_count > 0:
603
+        ### Populate 'remote_uri' only if we've got something to sync from src to dst
604
+        for key in src_list:
605
+            src_list[key]['target_uri'] = destination_base + key
606
+
607
+    if cfg.dry_run:
608
+        for key in exclude_list:
609
+            output(u"exclude: %s" % unicodise(key))
610
+        if cfg.delete_removed:
611
+            for key in dst_list:
612
+                output(u"delete: %s" % dst_list[key]['object_uri_str'])
613
+        for key in src_list:
614
+            output(u"Sync: %s -> %s" % (src_list[key]['object_uri_str'], src_list[key]['target_uri']))
615
+        warning(u"Exitting now because of --dry-run")
616
+        return
617
+
618
+    # Delete items in destination that are not in source
619
+    if cfg.delete_removed:
620
+        if cfg.dry_run:
621
+            for key in dst_list:
622
+                output(u"delete: %s" % dst_list[key]['object_uri_str'])
623
+        else:
624
+            for key in dst_list:
625
+                uri = S3Uri(dst_list[key]['object_uri_str'])
626
+                s3.object_delete(uri)
627
+                output(u"deleted: '%s'" % uri)
628
+
629
+    # Perform the synchronization of files
630
+    timestamp_start = time.time()
631
+    seq = 0
632
+    file_list = src_list.keys()
633
+    file_list.sort()
634
+    for file in file_list:
635
+        seq += 1
636
+        item = src_list[file]
637
+        src_uri = S3Uri(item['object_uri_str'])
638
+        dst_uri = S3Uri(item['target_uri'])
639
+        seq_label = "[%d of %d]" % (seq, src_count)
640
+        extra_headers = copy(cfg.extra_headers)
641
+        try:
642
+            response = s3.object_copy(src_uri, dst_uri, extra_headers)
643
+            output("File %(src)s copied to %(dst)s" % { "src" : src_uri, "dst" : dst_uri })
644
+        except S3Error, e:
645
+            error("File %(src)s could not be copied: %(e)s" % { "src" : src_uri, "e" : e })
646
+    total_elapsed = time.time() - timestamp_start
647
+    outstr = "Done. Copied %d files in %0.1f seconds, %0.2f files/s" % (seq, total_elapsed, seq/total_elapsed)
648
+    if seq > 0:
649
+        output(outstr)
650
+    else:
651
+        info(outstr)
652 652
 
653 653
 def cmd_sync_remote2local(args):
654
-	def _parse_attrs_header(attrs_header):
655
-		attrs = {}
656
-		for attr in attrs_header.split("/"):
657
-			key, val = attr.split(":")
658
-			attrs[key] = val
659
-		return attrs
660
-		
661
-	s3 = S3(Config())
662
-
663
-	destination_base = args[-1]
664
-	local_list, single_file_local = fetch_local_list(destination_base, recursive = True)
665
-	remote_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True)
666
-
667
-	local_count = len(local_list)
668
-	remote_count = len(remote_list)
669
-
670
-	info(u"Found %d remote files, %d local files" % (remote_count, local_count))
671
-
672
-	remote_list, exclude_list = filter_exclude_include(remote_list)
673
-
674
-	remote_list, local_list, existing_list = compare_filelists(remote_list, local_list, src_remote = True, dst_remote = False)
675
-
676
-	local_count = len(local_list)
677
-	remote_count = len(remote_list)
678
-
679
-	info(u"Summary: %d remote files to download, %d local files to delete" % (remote_count, local_count))
680
-
681
-	if not os.path.isdir(destination_base):
682
-		## We were either given a file name (existing or not) or want STDOUT
683
-		if remote_count > 1:
684
-			raise ParameterError("Destination must be a directory when downloading multiple sources.")
685
-		remote_list[remote_list.keys()[0]]['local_filename'] = deunicodise(destination_base)
686
-	else:
687
-		if destination_base[-1] != os.path.sep:
688
-			destination_base += os.path.sep
689
-		for key in remote_list:
690
-			local_filename = destination_base + key
691
-			if os.path.sep != "/":
692
-				local_filename = os.path.sep.join(local_filename.split("/"))
693
-			remote_list[key]['local_filename'] = deunicodise(local_filename)
694
-
695
-	if cfg.dry_run:
696
-		for key in exclude_list:
697
-			output(u"exclude: %s" % unicodise(key))
698
-		if cfg.delete_removed:
699
-			for key in local_list:
700
-				output(u"delete: %s" % local_list[key]['full_name_unicode'])
701
-		for key in remote_list:
702
-			output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
703
-
704
-		warning(u"Exitting now because of --dry-run")
705
-		return
706
-
707
-	if cfg.delete_removed:
708
-		for key in local_list:
709
-			os.unlink(local_list[key]['full_name'])
710
-			output(u"deleted: %s" % local_list[key]['full_name_unicode'])
711
-
712
-	total_size = 0
713
-	total_elapsed = 0.0
714
-	timestamp_start = time.time()
715
-	seq = 0
716
-	dir_cache = {}
717
-	file_list = remote_list.keys()
718
-	file_list.sort()
719
-	for file in file_list:
720
-		seq += 1
721
-		item = remote_list[file]
722
-		uri = S3Uri(item['object_uri_str'])
723
-		dst_file = item['local_filename']
724
-		seq_label = "[%d of %d]" % (seq, remote_count)
725
-		try:
726
-			dst_dir = os.path.dirname(dst_file)
727
-			if not dir_cache.has_key(dst_dir):
728
-				dir_cache[dst_dir] = Utils.mkdir_with_parents(dst_dir)
729
-			if dir_cache[dst_dir] == False:
730
-				warning(u"%s: destination directory not writable: %s" % (file, dst_dir))
731
-				continue
732
-			try:
733
-				open_flags = os.O_CREAT
734
-				open_flags |= os.O_TRUNC
735
-				# open_flags |= os.O_EXCL
736
-
737
-				debug(u"dst_file=%s" % unicodise(dst_file))
738
-				# This will have failed should the file exist
739
-				os.close(os.open(dst_file, open_flags))
740
-				# Yeah I know there is a race condition here. Sadly I don't know how to open() in exclusive mode.
741
-				dst_stream = open(dst_file, "wb")
742
-				response = s3.object_get(uri, dst_stream, extra_label = seq_label)
743
-				dst_stream.close()
744
-				if response['headers'].has_key('x-amz-meta-s3cmd-attrs') and cfg.preserve_attrs:
745
-					attrs = _parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs'])
746
-					if attrs.has_key('mode'):
747
-						os.chmod(dst_file, int(attrs['mode']))
748
-					if attrs.has_key('mtime') or attrs.has_key('atime'):
749
-						mtime = attrs.has_key('mtime') and int(attrs['mtime']) or int(time.time())
750
-						atime = attrs.has_key('atime') and int(attrs['atime']) or int(time.time())
751
-						os.utime(dst_file, (atime, mtime))
752
-					## FIXME: uid/gid / uname/gname handling comes here! TODO
753
-			except OSError, e:
754
-				try: dst_stream.close()
755
-				except: pass
756
-				if e.errno == errno.EEXIST:
757
-					warning(u"%s exists - not overwriting" % (dst_file))
758
-					continue
759
-				if e.errno in (errno.EPERM, errno.EACCES):
760
-					warning(u"%s not writable: %s" % (dst_file, e.strerror))
761
-					continue
762
-				if e.errno == errno.EISDIR:
763
-					warning(u"%s is a directory - skipping over" % dst_file)
764
-					continue
765
-				raise e
766
-			except KeyboardInterrupt:
767
-				try: dst_stream.close()
768
-				except: pass
769
-				warning(u"Exiting after keyboard interrupt")
770
-				return
771
-			except Exception, e:
772
-				try: dst_stream.close()
773
-				except: pass
774
-				error(u"%s: %s" % (file, e))
775
-				continue
776
-			# We have to keep repeating this call because 
777
-			# Python 2.4 doesn't support try/except/finally
778
-			# construction :-(
779
-			try: dst_stream.close()
780
-			except: pass
781
-		except S3DownloadError, e:
782
-			error(u"%s: download failed too many times. Skipping that file." % file)
783
-			continue
784
-		speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
785
-		if not Config().progress_meter:
786
-			output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
787
-				(uri, unicodise(dst_file), response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1],
788
-				seq_label))
789
-		total_size += response["size"]
790
-
791
-	total_elapsed = time.time() - timestamp_start
792
-	speed_fmt = formatSize(total_size/total_elapsed, human_readable = True, floating_point = True)
793
-
794
-	# Only print out the result if any work has been done or 
795
-	# if the user asked for verbose output
796
-	outstr = "Done. Downloaded %d bytes in %0.1f seconds, %0.2f %sB/s" % (total_size, total_elapsed, speed_fmt[0], speed_fmt[1])
797
-	if total_size > 0:
798
-		output(outstr)
799
-	else:
800
-		info(outstr)
654
+    def _parse_attrs_header(attrs_header):
655
+        attrs = {}
656
+        for attr in attrs_header.split("/"):
657
+            key, val = attr.split(":")
658
+            attrs[key] = val
659
+        return attrs
660
+
661
+    s3 = S3(Config())
662
+
663
+    destination_base = args[-1]
664
+    local_list, single_file_local = fetch_local_list(destination_base, recursive = True)
665
+    remote_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True)
666
+
667
+    local_count = len(local_list)
668
+    remote_count = len(remote_list)
669
+
670
+    info(u"Found %d remote files, %d local files" % (remote_count, local_count))
671
+
672
+    remote_list, exclude_list = filter_exclude_include(remote_list)
673
+
674
+    remote_list, local_list, existing_list = compare_filelists(remote_list, local_list, src_remote = True, dst_remote = False)
675
+
676
+    local_count = len(local_list)
677
+    remote_count = len(remote_list)
678
+
679
+    info(u"Summary: %d remote files to download, %d local files to delete" % (remote_count, local_count))
680
+
681
+    if not os.path.isdir(destination_base):
682
+        ## We were either given a file name (existing or not) or want STDOUT
683
+        if remote_count > 1:
684
+            raise ParameterError("Destination must be a directory when downloading multiple sources.")
685
+        remote_list[remote_list.keys()[0]]['local_filename'] = deunicodise(destination_base)
686
+    else:
687
+        if destination_base[-1] != os.path.sep:
688
+            destination_base += os.path.sep
689
+        for key in remote_list:
690
+            local_filename = destination_base + key
691
+            if os.path.sep != "/":
692
+                local_filename = os.path.sep.join(local_filename.split("/"))
693
+            remote_list[key]['local_filename'] = deunicodise(local_filename)
694
+
695
+    if cfg.dry_run:
696
+        for key in exclude_list:
697
+            output(u"exclude: %s" % unicodise(key))
698
+        if cfg.delete_removed:
699
+            for key in local_list:
700
+                output(u"delete: %s" % local_list[key]['full_name_unicode'])
701
+        for key in remote_list:
702
+            output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename']))
703
+
704
+        warning(u"Exitting now because of --dry-run")
705
+        return
706
+
707
+    if cfg.delete_removed:
708
+        for key in local_list:
709
+            os.unlink(local_list[key]['full_name'])
710
+            output(u"deleted: %s" % local_list[key]['full_name_unicode'])
711
+
712
+    total_size = 0
713
+    total_elapsed = 0.0
714
+    timestamp_start = time.time()
715
+    seq = 0
716
+    dir_cache = {}
717
+    file_list = remote_list.keys()
718
+    file_list.sort()
719
+    for file in file_list:
720
+        seq += 1
721
+        item = remote_list[file]
722
+        uri = S3Uri(item['object_uri_str'])
723
+        dst_file = item['local_filename']
724
+        seq_label = "[%d of %d]" % (seq, remote_count)
725
+        try:
726
+            dst_dir = os.path.dirname(dst_file)
727
+            if not dir_cache.has_key(dst_dir):
728
+                dir_cache[dst_dir] = Utils.mkdir_with_parents(dst_dir)
729
+            if dir_cache[dst_dir] == False:
730
+                warning(u"%s: destination directory not writable: %s" % (file, dst_dir))
731
+                continue
732
+            try:
733
+                open_flags = os.O_CREAT
734
+                open_flags |= os.O_TRUNC
735
+                # open_flags |= os.O_EXCL
736
+
737
+                debug(u"dst_file=%s" % unicodise(dst_file))
738
+                # This will have failed should the file exist
739
+                os.close(os.open(dst_file, open_flags))
740
+                # Yeah I know there is a race condition here. Sadly I don't know how to open() in exclusive mode.
741
+                dst_stream = open(dst_file, "wb")
742
+                response = s3.object_get(uri, dst_stream, extra_label = seq_label)
743
+                dst_stream.close()
744
+                if response['headers'].has_key('x-amz-meta-s3cmd-attrs') and cfg.preserve_attrs:
745
+                    attrs = _parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs'])
746
+                    if attrs.has_key('mode'):
747
+                        os.chmod(dst_file, int(attrs['mode']))
748
+                    if attrs.has_key('mtime') or attrs.has_key('atime'):
749
+                        mtime = attrs.has_key('mtime') and int(attrs['mtime']) or int(time.time())
750
+                        atime = attrs.has_key('atime') and int(attrs['atime']) or int(time.time())
751
+                        os.utime(dst_file, (atime, mtime))
752
+                    ## FIXME: uid/gid / uname/gname handling comes here! TODO
753
+            except OSError, e:
754
+                try: dst_stream.close()
755
+                except: pass
756
+                if e.errno == errno.EEXIST:
757
+                    warning(u"%s exists - not overwriting" % (dst_file))
758
+                    continue
759
+                if e.errno in (errno.EPERM, errno.EACCES):
760
+                    warning(u"%s not writable: %s" % (dst_file, e.strerror))
761
+                    continue
762
+                if e.errno == errno.EISDIR:
763
+                    warning(u"%s is a directory - skipping over" % dst_file)
764
+                    continue
765
+                raise e
766
+            except KeyboardInterrupt:
767
+                try: dst_stream.close()
768
+                except: pass
769
+                warning(u"Exiting after keyboard interrupt")
770
+                return
771
+            except Exception, e:
772
+                try: dst_stream.close()
773
+                except: pass
774
+                error(u"%s: %s" % (file, e))
775
+                continue
776
+            # We have to keep repeating this call because
777
+            # Python 2.4 doesn't support try/except/finally
778
+            # construction :-(
779
+            try: dst_stream.close()
780
+            except: pass
781
+        except S3DownloadError, e:
782
+            error(u"%s: download failed too many times. Skipping that file." % file)
783
+            continue
784
+        speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
785
+        if not Config().progress_meter:
786
+            output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
787
+                (uri, unicodise(dst_file), response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1],
788
+                seq_label))
789
+        total_size += response["size"]
790
+
791
+    total_elapsed = time.time() - timestamp_start
792
+    speed_fmt = formatSize(total_size/total_elapsed, human_readable = True, floating_point = True)
793
+
794
+    # Only print out the result if any work has been done or
795
+    # if the user asked for verbose output
796
+    outstr = "Done. Downloaded %d bytes in %0.1f seconds, %0.2f %sB/s" % (total_size, total_elapsed, speed_fmt[0], speed_fmt[1])
797
+    if total_size > 0:
798
+        output(outstr)
799
+    else:
800
+        info(outstr)
801 801
 
802 802
 def cmd_sync_local2remote(args):
803
-	def _build_attr_header(src):
804
-		import pwd, grp
805
-		attrs = {}
806
-		src = deunicodise(src)
807
-		try:
808
-			st = os.stat_result(os.stat(src))
809
-		except OSError, e:
810
-			raise InvalidFileError(u"%s: %s" % (unicodise(src), e.strerror))
811
-		for attr in cfg.preserve_attrs_list:
812
-			if attr == 'uname':
813
-				try:
814
-					val = pwd.getpwuid(st.st_uid).pw_name
815
-				except KeyError:
816
-					attr = "uid"
817
-					val = st.st_uid
818
-					warning(u"%s: Owner username not known. Storing UID=%d instead." % (unicodise(src), val))
819
-			elif attr == 'gname':
820
-				try:
821
-					val = grp.getgrgid(st.st_gid).gr_name
822
-				except KeyError:
823
-					attr = "gid"
824
-					val = st.st_gid
825
-					warning(u"%s: Owner groupname not known. Storing GID=%d instead." % (unicodise(src), val))
826
-			else:
827
-				val = getattr(st, 'st_' + attr)
828
-			attrs[attr] = val
829
-		result = ""
830
-		for k in attrs: result += "%s:%s/" % (k, attrs[k])
831
-		return { 'x-amz-meta-s3cmd-attrs' : result[:-1] }
832
-
833
-	s3 = S3(cfg)
834
-
835
-	if cfg.encrypt:
836
-		error(u"S3cmd 'sync' doesn't yet support GPG encryption, sorry.")
837
-		error(u"Either use unconditional 's3cmd put --recursive'")
838
-		error(u"or disable encryption with --no-encrypt parameter.")
839
-		sys.exit(1)
840
-
841
-	## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
842
-	destination_base_uri = S3Uri(args[-1])
843
-	if destination_base_uri.type != 's3':
844
-		raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
845
-	destination_base = str(destination_base_uri)
846
-
847
-	local_list, single_file_local = fetch_local_list(args[:-1], recursive = True)
848
-	remote_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True)
849
-
850
-	local_count = len(local_list)
851
-	remote_count = len(remote_list)
852
-
853
-	info(u"Found %d local files, %d remote files" % (local_count, remote_count))
854
-
855
-	local_list, exclude_list = filter_exclude_include(local_list)
856
-
857
-	if single_file_local and len(local_list) == 1 and len(remote_list) == 1:
858
-		## Make remote_key same as local_key for comparison if we're dealing with only one file
859
-		remote_list_entry = remote_list[remote_list.keys()[0]]
860
-		# Flush remote_list, by the way
861
-		remote_list = { local_list.keys()[0] : remote_list_entry }
862
-
863
-	local_list, remote_list, existing_list = compare_filelists(local_list, remote_list, src_remote = False, dst_remote = True)
864
-
865
-	local_count = len(local_list)
866
-	remote_count = len(remote_list)
867
-
868
-	info(u"Summary: %d local files to upload, %d remote files to delete" % (local_count, remote_count))
869
-
870
-	if local_count > 0:
871
-		## Populate 'remote_uri' only if we've got something to upload
872
-		if not destination_base.endswith("/"):
873
-			if not single_file_local:
874
-				raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
875
-			local_list[local_list.keys()[0]]['remote_uri'] = unicodise(destination_base)
876
-		else:
877
-			for key in local_list:
878
-				local_list[key]['remote_uri'] = unicodise(destination_base + key)
879
-
880
-	if cfg.dry_run:
881
-		for key in exclude_list:
882
-			output(u"exclude: %s" % unicodise(key))
883
-		if cfg.delete_removed:
884
-			for key in remote_list:
885
-				output(u"delete: %s" % remote_list[key]['object_uri_str'])
886
-		for key in local_list:
887
-			output(u"upload: %s -> %s" % (local_list[key]['full_name_unicode'], local_list[key]['remote_uri']))
888
-
889
-		warning(u"Exitting now because of --dry-run")
890
-		return
891
-
892
-	if cfg.delete_removed:
893
-		for key in remote_list:
894
-			uri = S3Uri(remote_list[key]['object_uri_str'])
895
-			s3.object_delete(uri)
896
-			output(u"deleted: '%s'" % uri)
897
-
898
-	uploaded_objects_list = []
899
-	total_size = 0
900
-	total_elapsed = 0.0
901
-	timestamp_start = time.time()
902
-	seq = 0
903
-	file_list = local_list.keys()
904
-	file_list.sort()
905
-	for file in file_list:
906
-		seq += 1
907
-		item = local_list[file]
908
-		src = item['full_name']
909
-		uri = S3Uri(item['remote_uri'])
910
-		seq_label = "[%d of %d]" % (seq, local_count)
911
-		extra_headers = copy(cfg.extra_headers)
912
-		try:
913
-			if cfg.preserve_attrs:
914
-				attr_header = _build_attr_header(src)
915
-				debug(u"attr_header: %s" % attr_header)
916
-				extra_headers.update(attr_header)
917
-			response = s3.object_put(src, uri, extra_headers, extra_label = seq_label)
918
-		except InvalidFileError, e:
919
-			warning(u"File can not be uploaded: %s" % e)
920
-			continue
921
-		except S3UploadError, e:
922
-			error(u"%s: upload failed too many times. Skipping that file." % item['full_name_unicode'])
923
-			continue
924
-		speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
925
-		if not cfg.progress_meter:
926
-			output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
927
-				(item['full_name_unicode'], uri, response["size"], response["elapsed"], 
928
-				speed_fmt[0], speed_fmt[1], seq_label))
929
-		total_size += response["size"]
930
-		uploaded_objects_list.append(uri.object())
931
-
932
-	total_elapsed = time.time() - timestamp_start
933
-	total_speed = total_elapsed and total_size/total_elapsed or 0.0
934
-	speed_fmt = formatSize(total_speed, human_readable = True, floating_point = True)
935
-
936
-	# Only print out the result if any work has been done or 
937
-	# if the user asked for verbose output
938
-	outstr = "Done. Uploaded %d bytes in %0.1f seconds, %0.2f %sB/s" % (total_size, total_elapsed, speed_fmt[0], speed_fmt[1])
939
-	if total_size > 0:
940
-		output(outstr)
941
-	else:
942
-		info(outstr)
943
-
944
-	if cfg.invalidate_on_cf:
945
-		if len(uploaded_objects_list) == 0:
946
-			info("Nothing to invalidate in CloudFront")
947
-		else:
948
-			# 'uri' from the last iteration is still valid at this point
949
-			cf = CloudFront(cfg)
950
-			result = cf.InvalidateObjects(uri, uploaded_objects_list)
951
-			if result['status'] == 201:
952
-				output("Created invalidation request for %d paths" % len(uploaded_objects_list)) 
953
-				output("Check progress with: s3cmd cfinvalinfo cf://%s/%s" % (result['dist_id'], result['request_id']))
803
+    def _build_attr_header(src):
804
+        import pwd, grp
805
+        attrs = {}
806
+        src = deunicodise(src)
807
+        try:
808
+            st = os.stat_result(os.stat(src))
809
+        except OSError, e:
810
+            raise InvalidFileError(u"%s: %s" % (unicodise(src), e.strerror))
811
+        for attr in cfg.preserve_attrs_list:
812
+            if attr == 'uname':
813
+                try:
814
+                    val = pwd.getpwuid(st.st_uid).pw_name
815
+                except KeyError:
816
+                    attr = "uid"
817
+                    val = st.st_uid
818
+                    warning(u"%s: Owner username not known. Storing UID=%d instead." % (unicodise(src), val))
819
+            elif attr == 'gname':
820
+                try:
821
+                    val = grp.getgrgid(st.st_gid).gr_name
822
+                except KeyError:
823
+                    attr = "gid"
824
+                    val = st.st_gid
825
+                    warning(u"%s: Owner groupname not known. Storing GID=%d instead." % (unicodise(src), val))
826
+            else:
827
+                val = getattr(st, 'st_' + attr)
828
+            attrs[attr] = val
829
+        result = ""
830
+        for k in attrs: result += "%s:%s/" % (k, attrs[k])
831
+        return { 'x-amz-meta-s3cmd-attrs' : result[:-1] }
832
+
833
+    s3 = S3(cfg)
834
+
835
+    if cfg.encrypt:
836
+        error(u"S3cmd 'sync' doesn't yet support GPG encryption, sorry.")
837
+        error(u"Either use unconditional 's3cmd put --recursive'")
838
+        error(u"or disable encryption with --no-encrypt parameter.")
839
+        sys.exit(1)
840
+
841
+    ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash)
842
+    destination_base_uri = S3Uri(args[-1])
843
+    if destination_base_uri.type != 's3':
844
+        raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri)
845
+    destination_base = str(destination_base_uri)
846
+
847
+    local_list, single_file_local = fetch_local_list(args[:-1], recursive = True)
848
+    remote_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True)
849
+
850
+    local_count = len(local_list)
851
+    remote_count = len(remote_list)
852
+
853
+    info(u"Found %d local files, %d remote files" % (local_count, remote_count))
854
+
855
+    local_list, exclude_list = filter_exclude_include(local_list)
856
+
857
+    if single_file_local and len(local_list) == 1 and len(remote_list) == 1:
858
+        ## Make remote_key same as local_key for comparison if we're dealing with only one file
859
+        remote_list_entry = remote_list[remote_list.keys()[0]]
860
+        # Flush remote_list, by the way
861
+        remote_list = { local_list.keys()[0] : remote_list_entry }
862
+
863
+    local_list, remote_list, existing_list = compare_filelists(local_list, remote_list, src_remote = False, dst_remote = True)
864
+
865
+    local_count = len(local_list)
866
+    remote_count = len(remote_list)
867
+
868
+    info(u"Summary: %d local files to upload, %d remote files to delete" % (local_count, remote_count))
869
+
870
+    if local_count > 0:
871
+        ## Populate 'remote_uri' only if we've got something to upload
872
+        if not destination_base.endswith("/"):
873
+            if not single_file_local:
874
+                raise ParameterError("Destination S3 URI must end with '/' (ie must refer to a directory on the remote side).")
875
+            local_list[local_list.keys()[0]]['remote_uri'] = unicodise(destination_base)
876
+        else:
877
+            for key in local_list:
878
+                local_list[key]['remote_uri'] = unicodise(destination_base + key)
879
+
880
+    if cfg.dry_run:
881
+        for key in exclude_list:
882
+            output(u"exclude: %s" % unicodise(key))
883
+        if cfg.delete_removed:
884
+            for key in remote_list:
885
+                output(u"delete: %s" % remote_list[key]['object_uri_str'])
886
+        for key in local_list:
887
+            output(u"upload: %s -> %s" % (local_list[key]['full_name_unicode'], local_list[key]['remote_uri']))
888
+
889
+        warning(u"Exitting now because of --dry-run")
890
+        return
891
+
892
+    if cfg.delete_removed:
893
+        for key in remote_list:
894
+            uri = S3Uri(remote_list[key]['object_uri_str'])
895
+            s3.object_delete(uri)
896
+            output(u"deleted: '%s'" % uri)
897
+
898
+    uploaded_objects_list = []
899
+    total_size = 0
900
+    total_elapsed = 0.0
901
+    timestamp_start = time.time()
902
+    seq = 0
903
+    file_list = local_list.keys()
904
+    file_list.sort()
905
+    for file in file_list:
906
+        seq += 1
907
+        item = local_list[file]
908
+        src = item['full_name']
909
+        uri = S3Uri(item['remote_uri'])
910
+        seq_label = "[%d of %d]" % (seq, local_count)
911
+        extra_headers = copy(cfg.extra_headers)
912
+        try:
913
+            if cfg.preserve_attrs:
914
+                attr_header = _build_attr_header(src)
915
+                debug(u"attr_header: %s" % attr_header)
916
+                extra_headers.update(attr_header)
917
+            response = s3.object_put(src, uri, extra_headers, extra_label = seq_label)
918
+        except InvalidFileError, e:
919
+            warning(u"File can not be uploaded: %s" % e)
920
+            continue
921
+        except S3UploadError, e:
922
+            error(u"%s: upload failed too many times. Skipping that file." % item['full_name_unicode'])
923
+            continue
924
+        speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
925
+        if not cfg.progress_meter:
926
+            output(u"File '%s' stored as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
927
+                (item['full_name_unicode'], uri, response["size"], response["elapsed"],
928
+                speed_fmt[0], speed_fmt[1], seq_label))
929
+        total_size += response["size"]
930
+        uploaded_objects_list.append(uri.object())
931
+
932
+    total_elapsed = time.time() - timestamp_start
933
+    total_speed = total_elapsed and total_size/total_elapsed or 0.0
934
+    speed_fmt = formatSize(total_speed, human_readable = True, floating_point = True)
935
+
936
+    # Only print out the result if any work has been done or
937
+    # if the user asked for verbose output
938
+    outstr = "Done. Uploaded %d bytes in %0.1f seconds, %0.2f %sB/s" % (total_size, total_elapsed, speed_fmt[0], speed_fmt[1])
939
+    if total_size > 0:
940
+        output(outstr)
941
+    else:
942
+        info(outstr)
943
+
944
+    if cfg.invalidate_on_cf:
945
+        if len(uploaded_objects_list) == 0:
946
+            info("Nothing to invalidate in CloudFront")
947
+        else:
948
+            # 'uri' from the last iteration is still valid at this point
949
+            cf = CloudFront(cfg)
950
+            result = cf.InvalidateObjects(uri, uploaded_objects_list)
951
+            if result['status'] == 201:
952
+                output("Created invalidation request for %d paths" % len(uploaded_objects_list))
953
+                output("Check progress with: s3cmd cfinvalinfo cf://%s/%s" % (result['dist_id'], result['request_id']))
954 954
 
955 955
 def cmd_sync(args):
956
-	if (len(args) < 2):
957
-		raise ParameterError("Too few parameters! Expected: %s" % commands['sync']['param'])
956
+    if (len(args) < 2):
957
+        raise ParameterError("Too few parameters! Expected: %s" % commands['sync']['param'])
958 958
 
959
-	if S3Uri(args[0]).type == "file" and S3Uri(args[-1]).type == "s3":
960
-		return cmd_sync_local2remote(args)
961
-	if S3Uri(args[0]).type == "s3" and S3Uri(args[-1]).type == "file":
962
-		return cmd_sync_remote2local(args)
963
-	if S3Uri(args[0]).type == "s3" and S3Uri(args[-1]).type == "s3":
964
-		return cmd_sync_remote2remote(args)
965
-	raise ParameterError("Invalid source/destination: '%s'" % "' '".join(args))
959
+    if S3Uri(args[0]).type == "file" and S3Uri(args[-1]).type == "s3":
960
+        return cmd_sync_local2remote(args)
961
+    if S3Uri(args[0]).type == "s3" and S3Uri(args[-1]).type == "file":
962
+        return cmd_sync_remote2local(args)
963
+    if S3Uri(args[0]).type == "s3" and S3Uri(args[-1]).type == "s3":
964
+        return cmd_sync_remote2remote(args)
965
+    raise ParameterError("Invalid source/destination: '%s'" % "' '".join(args))
966 966
 
967 967
 def cmd_setacl(args):
968
-	def _update_acl(uri, seq_label = ""):
969
-		something_changed = False
970
-		acl = s3.get_acl(uri)
971
-		debug(u"acl: %s - %r" % (uri, acl.grantees))
972
-		if cfg.acl_public == True:
973
-			if acl.isAnonRead():
974
-				info(u"%s: already Public, skipping %s" % (uri, seq_label))
975
-			else:
976
-				acl.grantAnonRead()
977
-				something_changed = True
978
-		elif cfg.acl_public == False: # we explicitely check for False, because it could be None
979
-			if not acl.isAnonRead():
980
-				info(u"%s: already Private, skipping %s" % (uri, seq_label))
981
-			else:
982
-				acl.revokeAnonRead()
983
-				something_changed = True
984
-
985
-		# update acl with arguments
986
-		# grant first and revoke later, because revoke has priority
987
-		if cfg.acl_grants:
988
-			something_changed = True
989
-			for grant in cfg.acl_grants:
990
-				acl.grant(**grant);
991
-
992
-		if cfg.acl_revokes:
993
-			something_changed = True
994
-			for revoke in cfg.acl_revokes:
995
-				acl.revoke(**revoke);
996
-
997
-		if not something_changed:
998
-			return
999
-
1000
-		retsponse = s3.set_acl(uri, acl)
1001
-		if retsponse['status'] == 200:
1002
-			if cfg.acl_public in (True, False):
1003
-				output(u"%s: ACL set to %s  %s" % (uri, set_to_acl, seq_label))
1004
-			else:
1005
-				output(u"%s: ACL updated" % uri)
1006
-
1007
-	s3 = S3(cfg)
1008
-
1009
-	set_to_acl = cfg.acl_public and "Public" or "Private"
1010
-
1011
-	if not cfg.recursive:
1012
-		old_args = args
1013
-		args = []
1014
-		for arg in old_args:
1015
-			uri = S3Uri(arg)
1016
-			if not uri.has_object():
1017
-				if cfg.acl_public != None:
1018
-					info("Setting bucket-level ACL for %s to %s" % (uri.uri(), set_to_acl))
1019
-				else:
1020
-					info("Setting bucket-level ACL for %s" % (uri.uri()))
1021
-				if not cfg.dry_run:
1022
-					_update_acl(uri)
1023
-			else:
1024
-				args.append(arg)
1025
-
1026
-	remote_list = fetch_remote_list(args)
1027
-	remote_list, exclude_list = filter_exclude_include(remote_list)
1028
-
1029
-	remote_count = len(remote_list)
1030
-
1031
-	info(u"Summary: %d remote files to update" % remote_count)
1032
-
1033
-	if cfg.dry_run:
1034
-		for key in exclude_list:
1035
-			output(u"exclude: %s" % unicodise(key))
1036
-		for key in remote_list:
1037
-			output(u"setacl: %s" % remote_list[key]['object_uri_str'])
1038
-
1039
-		warning(u"Exitting now because of --dry-run")
1040
-		return
1041
-
1042
-	seq = 0
1043
-	for key in remote_list:
1044
-		seq += 1
1045
-		seq_label = "[%d of %d]" % (seq, remote_count)
1046
-		uri = S3Uri(remote_list[key]['object_uri_str'])
1047
-		_update_acl(uri, seq_label)
968
+    def _update_acl(uri, seq_label = ""):
969
+        something_changed = False
970
+        acl = s3.get_acl(uri)
971
+        debug(u"acl: %s - %r" % (uri, acl.grantees))
972
+        if cfg.acl_public == True:
973
+            if acl.isAnonRead():
974
+                info(u"%s: already Public, skipping %s" % (uri, seq_label))
975
+            else:
976
+                acl.grantAnonRead()
977
+                something_changed = True
978
+        elif cfg.acl_public == False: # we explicitely check for False, because it could be None
979
+            if not acl.isAnonRead():
980
+                info(u"%s: already Private, skipping %s" % (uri, seq_label))
981
+            else:
982
+                acl.revokeAnonRead()
983
+                something_changed = True
984
+
985
+        # update acl with arguments
986
+        # grant first and revoke later, because revoke has priority
987
+        if cfg.acl_grants:
988
+            something_changed = True
989
+            for grant in cfg.acl_grants:
990
+                acl.grant(**grant);
991
+
992
+        if cfg.acl_revokes:
993
+            something_changed = True
994
+            for revoke in cfg.acl_revokes:
995
+                acl.revoke(**revoke);
996
+
997
+        if not something_changed:
998
+            return
999
+
1000
+        retsponse = s3.set_acl(uri, acl)
1001
+        if retsponse['status'] == 200:
1002
+            if cfg.acl_public in (True, False):
1003
+                output(u"%s: ACL set to %s  %s" % (uri, set_to_acl, seq_label))
1004
+            else:
1005
+                output(u"%s: ACL updated" % uri)
1006
+
1007
+    s3 = S3(cfg)
1008
+
1009
+    set_to_acl = cfg.acl_public and "Public" or "Private"
1010
+
1011
+    if not cfg.recursive:
1012
+        old_args = args
1013
+        args = []
1014
+        for arg in old_args:
1015
+            uri = S3Uri(arg)
1016
+            if not uri.has_object():
1017
+                if cfg.acl_public != None:
1018
+                    info("Setting bucket-level ACL for %s to %s" % (uri.uri(), set_to_acl))
1019
+                else:
1020
+                    info("Setting bucket-level ACL for %s" % (uri.uri()))
1021
+                if not cfg.dry_run:
1022
+                    _update_acl(uri)
1023
+            else:
1024
+                args.append(arg)
1025
+
1026
+    remote_list = fetch_remote_list(args)
1027
+    remote_list, exclude_list = filter_exclude_include(remote_list)
1028
+
1029
+    remote_count = len(remote_list)
1030
+
1031
+    info(u"Summary: %d remote files to update" % remote_count)
1032
+
1033
+    if cfg.dry_run:
1034
+        for key in exclude_list:
1035
+            output(u"exclude: %s" % unicodise(key))
1036
+        for key in remote_list:
1037
+            output(u"setacl: %s" % remote_list[key]['object_uri_str'])
1038
+
1039
+        warning(u"Exitting now because of --dry-run")
1040
+        return
1041
+
1042
+    seq = 0
1043
+    for key in remote_list:
1044
+        seq += 1
1045
+        seq_label = "[%d of %d]" % (seq, remote_count)
1046
+        uri = S3Uri(remote_list[key]['object_uri_str'])
1047
+        _update_acl(uri, seq_label)
1048 1048
 
1049 1049
 def cmd_accesslog(args):
1050
-	s3 = S3(cfg)
1051
-	bucket_uri = S3Uri(args.pop())
1052
-	if bucket_uri.object():
1053
-		raise ParameterError("Only bucket name is required for [accesslog] command")
1054
-	if cfg.log_target_prefix == False:
1055
-		accesslog, response = s3.set_accesslog(bucket_uri, enable = False)
1056
-	elif cfg.log_target_prefix:
1057
-		log_target_prefix_uri = S3Uri(cfg.log_target_prefix)
1058
-		if log_target_prefix_uri.type != "s3":
1059
-			raise ParameterError("--log-target-prefix must be a S3 URI")
1060
-		accesslog, response = s3.set_accesslog(bucket_uri, enable = True, log_target_prefix_uri = log_target_prefix_uri, acl_public = cfg.acl_public)
1061
-	else:	# cfg.log_target_prefix == None
1062
-		accesslog = s3.get_accesslog(bucket_uri)
1063
-
1064
-	output(u"Access logging for: %s" % bucket_uri.uri())
1065
-	output(u"   Logging Enabled: %s" % accesslog.isLoggingEnabled())
1066
-	if accesslog.isLoggingEnabled():
1067
-		output(u"     Target prefix: %s" % accesslog.targetPrefix().uri())
1068
-		#output(u"   Public Access:   %s" % accesslog.isAclPublic())
1069
-		
1050
+    s3 = S3(cfg)
1051
+    bucket_uri = S3Uri(args.pop())
1052
+    if bucket_uri.object():
1053
+        raise ParameterError("Only bucket name is required for [accesslog] command")
1054
+    if cfg.log_target_prefix == False:
1055
+        accesslog, response = s3.set_accesslog(bucket_uri, enable = False)
1056
+    elif cfg.log_target_prefix:
1057
+        log_target_prefix_uri = S3Uri(cfg.log_target_prefix)
1058
+        if log_target_prefix_uri.type != "s3":
1059
+            raise ParameterError("--log-target-prefix must be a S3 URI")
1060
+        accesslog, response = s3.set_accesslog(bucket_uri, enable = True, log_target_prefix_uri = log_target_prefix_uri, acl_public = cfg.acl_public)
1061
+    else:   # cfg.log_target_prefix == None
1062
+        accesslog = s3.get_accesslog(bucket_uri)
1063
+
1064
+    output(u"Access logging for: %s" % bucket_uri.uri())
1065
+    output(u"   Logging Enabled: %s" % accesslog.isLoggingEnabled())
1066
+    if accesslog.isLoggingEnabled():
1067
+        output(u"     Target prefix: %s" % accesslog.targetPrefix().uri())
1068
+        #output(u"   Public Access:   %s" % accesslog.isAclPublic())
1069
+
1070 1070
 def cmd_sign(args):
1071
-	string_to_sign = args.pop()
1072
-	debug("string-to-sign: %r" % string_to_sign)
1073
-	signature = Utils.sign_string(string_to_sign)
1074
-	output("Signature: %s" % signature)
1071
+    string_to_sign = args.pop()
1072
+    debug("string-to-sign: %r" % string_to_sign)
1073
+    signature = Utils.sign_string(string_to_sign)
1074
+    output("Signature: %s" % signature)
1075 1075
 
1076 1076
 def cmd_fixbucket(args):
1077
-	def _unescape(text):
1078
-		##
1079
-		# Removes HTML or XML character references and entities from a text string.
1080
-		#
1081
-		# @param text The HTML (or XML) source text.
1082
-		# @return The plain text, as a Unicode string, if necessary.
1083
-		# 
1084
-		# From: http://effbot.org/zone/re-sub.htm#unescape-html
1085
-		def _unescape_fixup(m):
1086
-			text = m.group(0)
1087
-			if not htmlentitydefs.name2codepoint.has_key('apos'):
1088
-				htmlentitydefs.name2codepoint['apos'] = ord("'")
1089
-			if text[:2] == "&#":
1090
-				# character reference
1091
-				try:
1092
-					if text[:3] == "&#x":
1093
-						return unichr(int(text[3:-1], 16))
1094
-					else:
1095
-						return unichr(int(text[2:-1]))
1096
-				except ValueError:
1097
-					pass
1098
-			else:
1099
-				# named entity
1100
-				try:
1101
-					text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
1102
-				except KeyError:
1103
-					pass
1104
-			return text # leave as is
1105
-			text = text.encode('ascii', 'xmlcharrefreplace')
1106
-		return re.sub("&#?\w+;", _unescape_fixup, text)
1107
-
1108
-	cfg.urlencoding_mode = "fixbucket"
1109
-	s3 = S3(cfg)
1110
-
1111
-	count = 0
1112
-	for arg in args:
1113
-		culprit = S3Uri(arg)
1114
-		if culprit.type != "s3":
1115
-			raise ParameterError("Expecting S3Uri instead of: %s" % arg)
1116
-		response = s3.bucket_list_noparse(culprit.bucket(), culprit.object(), recursive = True)
1117
-		r_xent = re.compile("&#x[\da-fA-F]+;")
1118
-		response['data'] = unicode(response['data'], 'UTF-8')
1119
-		keys = re.findall("<Key>(.*?)</Key>", response['data'], re.MULTILINE)
1120
-		debug("Keys: %r" % keys)
1121
-		for key in keys:
1122
-			if r_xent.search(key):
1123
-				info("Fixing: %s" % key)
1124
-				debug("Step 1: Transforming %s" % key)
1125
-				key_bin = _unescape(key)
1126
-				debug("Step 2:       ... to %s" % key_bin)
1127
-				key_new = replace_nonprintables(key_bin)
1128
-				debug("Step 3:  ... then to %s" % key_new)
1129
-				src = S3Uri("s3://%s/%s" % (culprit.bucket(), key_bin))
1130
-				dst = S3Uri("s3://%s/%s" % (culprit.bucket(), key_new))
1131
-				resp_move = s3.object_move(src, dst)
1132
-				if resp_move['status'] == 200:
1133
-					output("File %r renamed to %s" % (key_bin, key_new))
1134
-					count += 1
1135
-				else:
1136
-					error("Something went wrong for: %r" % key)
1137
-					error("Please report the problem to s3tools-bugs@lists.sourceforge.net")
1138
-	if count > 0:
1139
-		warning("Fixed %d files' names. Their ACL were reset to Private." % count)
1140
-		warning("Use 's3cmd setacl --acl-public s3://...' to make")
1141
-		warning("them publicly readable if required.")
1077
+    def _unescape(text):
1078
+        ##
1079
+        # Removes HTML or XML character references and entities from a text string.
1080
+        #
1081
+        # @param text The HTML (or XML) source text.
1082
+        # @return The plain text, as a Unicode string, if necessary.
1083
+        #
1084
+        # From: http://effbot.org/zone/re-sub.htm#unescape-html
1085
+        def _unescape_fixup(m):
1086
+            text = m.group(0)
1087
+            if not htmlentitydefs.name2codepoint.has_key('apos'):
1088
+                htmlentitydefs.name2codepoint['apos'] = ord("'")
1089
+            if text[:2] == "&#":
1090
+                # character reference
1091
+                try:
1092
+                    if text[:3] == "&#x":
1093
+                        return unichr(int(text[3:-1], 16))
1094
+                    else:
1095
+                        return unichr(int(text[2:-1]))
1096
+                except ValueError:
1097
+                    pass
1098
+            else:
1099
+                # named entity
1100
+                try:
1101
+                    text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
1102
+                except KeyError:
1103
+                    pass
1104
+            return text # leave as is
1105
+            text = text.encode('ascii', 'xmlcharrefreplace')
1106
+        return re.sub("&#?\w+;", _unescape_fixup, text)
1107
+
1108
+    cfg.urlencoding_mode = "fixbucket"
1109
+    s3 = S3(cfg)
1110
+
1111
+    count = 0
1112
+    for arg in args:
1113
+        culprit = S3Uri(arg)
1114
+        if culprit.type != "s3":
1115
+            raise ParameterError("Expecting S3Uri instead of: %s" % arg)
1116
+        response = s3.bucket_list_noparse(culprit.bucket(), culprit.object(), recursive = True)
1117
+        r_xent = re.compile("&#x[\da-fA-F]+;")
1118
+        response['data'] = unicode(response['data'], 'UTF-8')
1119
+        keys = re.findall("<Key>(.*?)</Key>", response['data'], re.MULTILINE)
1120
+        debug("Keys: %r" % keys)
1121
+        for key in keys:
1122
+            if r_xent.search(key):
1123
+                info("Fixing: %s" % key)
1124
+                debug("Step 1: Transforming %s" % key)
1125
+                key_bin = _unescape(key)
1126
+                debug("Step 2:       ... to %s" % key_bin)
1127
+                key_new = replace_nonprintables(key_bin)
1128
+                debug("Step 3:  ... then to %s" % key_new)
1129
+                src = S3Uri("s3://%s/%s" % (culprit.bucket(), key_bin))
1130
+                dst = S3Uri("s3://%s/%s" % (culprit.bucket(), key_new))
1131
+                resp_move = s3.object_move(src, dst)
1132
+                if resp_move['status'] == 200:
1133
+                    output("File %r renamed to %s" % (key_bin, key_new))
1134
+                    count += 1
1135
+                else:
1136
+                    error("Something went wrong for: %r" % key)
1137
+                    error("Please report the problem to s3tools-bugs@lists.sourceforge.net")
1138
+    if count > 0:
1139
+        warning("Fixed %d files' names. Their ACL were reset to Private." % count)
1140
+        warning("Use 's3cmd setacl --acl-public s3://...' to make")
1141
+        warning("them publicly readable if required.")
1142 1142
 
1143 1143
 def resolve_list(lst, args):
1144
-	retval = []
1145
-	for item in lst:
1146
-		retval.append(item % args)
1147
-	return retval
1144
+    retval = []
1145
+    for item in lst:
1146
+        retval.append(item % args)
1147
+    return retval
1148 1148
 
1149 1149
 def gpg_command(command, passphrase = ""):
1150
-	debug("GPG command: " + " ".join(command))
1151
-	p = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
1152
-	p_stdout, p_stderr = p.communicate(passphrase + "\n")
1153
-	debug("GPG output:")
1154
-	for line in p_stdout.split("\n"):
1155
-		debug("GPG: " + line)
1156
-	p_exitcode = p.wait()
1157
-	return p_exitcode
1150
+    debug("GPG command: " + " ".join(command))
1151
+    p = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
1152
+    p_stdout, p_stderr = p.communicate(passphrase + "\n")
1153
+    debug("GPG output:")
1154
+    for line in p_stdout.split("\n"):
1155
+        debug("GPG: " + line)
1156
+    p_exitcode = p.wait()
1157
+    return p_exitcode
1158 1158
 
1159 1159
 def gpg_encrypt(filename):
1160
-	tmp_filename = Utils.mktmpfile()
1161
-	args = {
1162
-		"gpg_command" : cfg.gpg_command,
1163
-		"passphrase_fd" : "0",
1164
-		"input_file" : filename, 
1165
-		"output_file" : tmp_filename,
1166
-	}
1167
-	info(u"Encrypting file %(input_file)s to %(output_file)s..." % args)
1168
-	command = resolve_list(cfg.gpg_encrypt.split(" "), args)
1169
-	code = gpg_command(command, cfg.gpg_passphrase)
1170
-	return (code, tmp_filename, "gpg")
1160
+    tmp_filename = Utils.mktmpfile()
1161
+    args = {
1162
+        "gpg_command" : cfg.gpg_command,
1163
+        "passphrase_fd" : "0",
1164
+        "input_file" : filename,
1165
+        "output_file" : tmp_filename,
1166
+    }
1167
+    info(u"Encrypting file %(input_file)s to %(output_file)s..." % args)
1168
+    command = resolve_list(cfg.gpg_encrypt.split(" "), args)
1169
+    code = gpg_command(command, cfg.gpg_passphrase)
1170
+    return (code, tmp_filename, "gpg")
1171 1171
 
1172 1172
 def gpg_decrypt(filename, gpgenc_header = "", in_place = True):
1173
-	tmp_filename = Utils.mktmpfile(filename)
1174
-	args = {
1175
-		"gpg_command" : cfg.gpg_command,
1176
-		"passphrase_fd" : "0",
1177
-		"input_file" : filename, 
1178
-		"output_file" : tmp_filename,
1179
-	}
1180
-	info(u"Decrypting file %(input_file)s to %(output_file)s..." % args)
1181
-	command = resolve_list(cfg.gpg_decrypt.split(" "), args)
1182
-	code = gpg_command(command, cfg.gpg_passphrase)
1183
-	if code == 0 and in_place:
1184
-		debug(u"Renaming %s to %s" % (tmp_filename, filename))
1185
-		os.unlink(filename)
1186
-		os.rename(tmp_filename, filename)
1187
-		tmp_filename = filename
1188
-	return (code, tmp_filename)
1173
+    tmp_filename = Utils.mktmpfile(filename)
1174
+    args = {
1175
+        "gpg_command" : cfg.gpg_command,
1176
+        "passphrase_fd" : "0",
1177
+        "input_file" : filename,
1178
+        "output_file" : tmp_filename,
1179
+    }
1180
+    info(u"Decrypting file %(input_file)s to %(output_file)s..." % args)
1181
+    command = resolve_list(cfg.gpg_decrypt.split(" "), args)
1182
+    code = gpg_command(command, cfg.gpg_passphrase)
1183
+    if code == 0 and in_place:
1184
+        debug(u"Renaming %s to %s" % (tmp_filename, filename))
1185
+        os.unlink(filename)
1186
+        os.rename(tmp_filename, filename)
1187
+        tmp_filename = filename
1188
+    return (code, tmp_filename)
1189 1189
 
1190 1190
 def run_configure(config_file):
1191
-	cfg = Config()
1192
-	options = [
1193
-		("access_key", "Access Key", "Access key and Secret key are your identifiers for Amazon S3"),
1194
-		("secret_key", "Secret Key"),
1195
-		("gpg_passphrase", "Encryption password", "Encryption password is used to protect your files from reading\nby unauthorized persons while in transfer to S3"),
1196
-		("gpg_command", "Path to GPG program"),
1197
-		("use_https", "Use HTTPS protocol", "When using secure HTTPS protocol all communication with Amazon S3\nservers is protected from 3rd party eavesdropping. This method is\nslower than plain HTTP and can't be used if you're behind a proxy"),
1198
-		("proxy_host", "HTTP Proxy server name", "On some networks all internet access must go through a HTTP proxy.\nTry setting it here if you can't conect to S3 directly"),
1199
-		("proxy_port", "HTTP Proxy server port"),
1200
-		]
1201
-	## Option-specfic defaults
1202
-	if getattr(cfg, "gpg_command") == "":
1203
-		setattr(cfg, "gpg_command", find_executable("gpg"))
1204
-
1205
-	if getattr(cfg, "proxy_host") == "" and os.getenv("http_proxy"):
1206
-		re_match=re.match("(http://)?([^:]+):(\d+)", os.getenv("http_proxy"))
1207
-		if re_match:
1208
-			setattr(cfg, "proxy_host", re_match.groups()[1])
1209
-			setattr(cfg, "proxy_port", re_match.groups()[2])
1210
-
1211
-	try:
1212
-		while 1:
1213
-			output(u"\nEnter new values or accept defaults in brackets with Enter.")
1214
-			output(u"Refer to user manual for detailed description of all options.")
1215
-			for option in options:
1216
-				prompt = option[1]
1217
-				## Option-specific handling
1218
-				if option[0] == 'proxy_host' and getattr(cfg, 'use_https') == True:
1219
-					setattr(cfg, option[0], "")
1220
-					continue
1221
-				if option[0] == 'proxy_port' and getattr(cfg, 'proxy_host') == "":
1222
-					setattr(cfg, option[0], 0)
1223
-					continue
1224
-
1225
-				try:
1226
-					val = getattr(cfg, option[0])
1227
-					if type(val) is bool:
1228
-						val = val and "Yes" or "No"
1229
-					if val not in (None, ""):
1230
-						prompt += " [%s]" % val
1231
-				except AttributeError:
1232
-					pass
1233
-
1234
-				if len(option) >= 3:
1235
-					output(u"\n%s" % option[2])
1236
-
1237
-				val = raw_input(prompt + ": ")
1238
-				if val != "":
1239
-					if type(getattr(cfg, option[0])) is bool:
1240
-						# Turn 'Yes' into True, everything else into False
1241
-						val = val.lower().startswith('y')
1242
-					setattr(cfg, option[0], val)
1243
-			output(u"\nNew settings:")
1244
-			for option in options:
1245
-				output(u"  %s: %s" % (option[1], getattr(cfg, option[0])))
1246
-			val = raw_input("\nTest access with supplied credentials? [Y/n] ")
1247
-			if val.lower().startswith("y") or val == "":
1248
-				try:
1249
-					output(u"Please wait...")
1250
-					S3(Config()).bucket_list("", "")
1251
-					output(u"Success. Your access key and secret key worked fine :-)")
1252
-
1253
-					output(u"\nNow verifying that encryption works...")
1254
-					if not getattr(cfg, "gpg_command") or not getattr(cfg, "gpg_passphrase"):
1255
-						output(u"Not configured. Never mind.")
1256
-					else:
1257
-						if not getattr(cfg, "gpg_command"):
1258
-							raise Exception("Path to GPG program not set")
1259
-						if not os.path.isfile(getattr(cfg, "gpg_command")):
1260
-							raise Exception("GPG program not found")
1261
-						filename = Utils.mktmpfile()
1262
-						f = open(filename, "w")
1263
-						f.write(os.sys.copyright)
1264
-						f.close()
1265
-						ret_enc = gpg_encrypt(filename)
1266
-						ret_dec = gpg_decrypt(ret_enc[1], ret_enc[2], False)
1267
-						hash = [
1268
-							Utils.hash_file_md5(filename),
1269
-							Utils.hash_file_md5(ret_enc[1]),
1270
-							Utils.hash_file_md5(ret_dec[1]),
1271
-						]
1272
-						os.unlink(filename)
1273
-						os.unlink(ret_enc[1])
1274
-						os.unlink(ret_dec[1])
1275
-						if hash[0] == hash[2] and hash[0] != hash[1]:
1276
-							output ("Success. Encryption and decryption worked fine :-)") 
1277
-						else:
1278
-							raise Exception("Encryption verification error.")
1279
-
1280
-				except Exception, e:
1281
-					error(u"Test failed: %s" % (e))
1282
-					val = raw_input("\nRetry configuration? [Y/n] ")
1283
-					if val.lower().startswith("y") or val == "":
1284
-						continue
1285
-					
1286
-
1287
-			val = raw_input("\nSave settings? [y/N] ")
1288
-			if val.lower().startswith("y"):
1289
-				break
1290
-			val = raw_input("Retry configuration? [Y/n] ")
1291
-			if val.lower().startswith("n"):
1292
-				raise EOFError()
1293
-
1294
-		## Overwrite existing config file, make it user-readable only
1295
-		old_mask = os.umask(0077)
1296
-		try:
1297
-			os.remove(config_file)
1298
-		except OSError, e:
1299
-			if e.errno != errno.ENOENT:
1300
-				raise
1301
-		f = open(config_file, "w")
1302
-		os.umask(old_mask)
1303
-		cfg.dump_config(f)
1304
-		f.close()
1305
-		output(u"Configuration saved to '%s'" % config_file)
1306
-
1307
-	except (EOFError, KeyboardInterrupt):
1308
-		output(u"\nConfiguration aborted. Changes were NOT saved.")
1309
-		return
1310
-	
1311
-	except IOError, e:
1312
-		error(u"Writing config file failed: %s: %s" % (config_file, e.strerror))
1313
-		sys.exit(1)
1191
+    cfg = Config()
1192
+    options = [
1193
+        ("access_key", "Access Key", "Access key and Secret key are your identifiers for Amazon S3"),
1194
+        ("secret_key", "Secret Key"),
1195
+        ("gpg_passphrase", "Encryption password", "Encryption password is used to protect your files from reading\nby unauthorized persons while in transfer to S3"),
1196
+        ("gpg_command", "Path to GPG program"),
1197
+        ("use_https", "Use HTTPS protocol", "When using secure HTTPS protocol all communication with Amazon S3\nservers is protected from 3rd party eavesdropping. This method is\nslower than plain HTTP and can't be used if you're behind a proxy"),
1198
+        ("proxy_host", "HTTP Proxy server name", "On some networks all internet access must go through a HTTP proxy.\nTry setting it here if you can't conect to S3 directly"),
1199
+        ("proxy_port", "HTTP Proxy server port"),
1200
+        ]
1201
+    ## Option-specfic defaults
1202
+    if getattr(cfg, "gpg_command") == "":
1203
+        setattr(cfg, "gpg_command", find_executable("gpg"))
1204
+
1205
+    if getattr(cfg, "proxy_host") == "" and os.getenv("http_proxy"):
1206
+        re_match=re.match("(http://)?([^:]+):(\d+)", os.getenv("http_proxy"))
1207
+        if re_match:
1208
+            setattr(cfg, "proxy_host", re_match.groups()[1])
1209
+            setattr(cfg, "proxy_port", re_match.groups()[2])
1210
+
1211
+    try:
1212
+        while 1:
1213
+            output(u"\nEnter new values or accept defaults in brackets with Enter.")
1214
+            output(u"Refer to user manual for detailed description of all options.")
1215
+            for option in options:
1216
+                prompt = option[1]
1217
+                ## Option-specific handling
1218
+                if option[0] == 'proxy_host' and getattr(cfg, 'use_https') == True:
1219
+                    setattr(cfg, option[0], "")
1220
+                    continue
1221
+                if option[0] == 'proxy_port' and getattr(cfg, 'proxy_host') == "":
1222
+                    setattr(cfg, option[0], 0)
1223
+                    continue
1224
+
1225
+                try:
1226
+                    val = getattr(cfg, option[0])
1227
+                    if type(val) is bool:
1228
+                        val = val and "Yes" or "No"
1229
+                    if val not in (None, ""):
1230
+                        prompt += " [%s]" % val
1231
+                except AttributeError:
1232
+                    pass
1233
+
1234
+                if len(option) >= 3:
1235
+                    output(u"\n%s" % option[2])
1236
+
1237
+                val = raw_input(prompt + ": ")
1238
+                if val != "":
1239
+                    if type(getattr(cfg, option[0])) is bool:
1240
+                        # Turn 'Yes' into True, everything else into False
1241
+                        val = val.lower().startswith('y')
1242
+                    setattr(cfg, option[0], val)
1243
+            output(u"\nNew settings:")
1244
+            for option in options:
1245
+                output(u"  %s: %s" % (option[1], getattr(cfg, option[0])))
1246
+            val = raw_input("\nTest access with supplied credentials? [Y/n] ")
1247
+            if val.lower().startswith("y") or val == "":
1248
+                try:
1249
+                    output(u"Please wait...")
1250
+                    S3(Config()).bucket_list("", "")
1251
+                    output(u"Success. Your access key and secret key worked fine :-)")
1252
+
1253
+                    output(u"\nNow verifying that encryption works...")
1254
+                    if not getattr(cfg, "gpg_command") or not getattr(cfg, "gpg_passphrase"):
1255
+                        output(u"Not configured. Never mind.")
1256
+                    else:
1257
+                        if not getattr(cfg, "gpg_command"):
1258
+                            raise Exception("Path to GPG program not set")
1259
+                        if not os.path.isfile(getattr(cfg, "gpg_command")):
1260
+                            raise Exception("GPG program not found")
1261
+                        filename = Utils.mktmpfile()
1262
+                        f = open(filename, "w")
1263
+                        f.write(os.sys.copyright)
1264
+                        f.close()
1265
+                        ret_enc = gpg_encrypt(filename)
1266
+                        ret_dec = gpg_decrypt(ret_enc[1], ret_enc[2], False)
1267
+                        hash = [
1268
+                            Utils.hash_file_md5(filename),
1269
+                            Utils.hash_file_md5(ret_enc[1]),
1270
+                            Utils.hash_file_md5(ret_dec[1]),
1271
+                        ]
1272
+                        os.unlink(filename)
1273
+                        os.unlink(ret_enc[1])
1274
+                        os.unlink(ret_dec[1])
1275
+                        if hash[0] == hash[2] and hash[0] != hash[1]:
1276
+                            output ("Success. Encryption and decryption worked fine :-)")
1277
+                        else:
1278
+                            raise Exception("Encryption verification error.")
1279
+
1280
+                except Exception, e:
1281
+                    error(u"Test failed: %s" % (e))
1282
+                    val = raw_input("\nRetry configuration? [Y/n] ")
1283
+                    if val.lower().startswith("y") or val == "":
1284
+                        continue
1285
+
1286
+
1287
+            val = raw_input("\nSave settings? [y/N] ")
1288
+            if val.lower().startswith("y"):
1289
+                break
1290
+            val = raw_input("Retry configuration? [Y/n] ")
1291
+            if val.lower().startswith("n"):
1292
+                raise EOFError()
1293
+
1294
+        ## Overwrite existing config file, make it user-readable only
1295
+        old_mask = os.umask(0077)
1296
+        try:
1297
+            os.remove(config_file)
1298
+        except OSError, e:
1299
+            if e.errno != errno.ENOENT:
1300
+                raise
1301
+        f = open(config_file, "w")
1302
+        os.umask(old_mask)
1303
+        cfg.dump_config(f)
1304
+        f.close()
1305
+        output(u"Configuration saved to '%s'" % config_file)
1306
+
1307
+    except (EOFError, KeyboardInterrupt):
1308
+        output(u"\nConfiguration aborted. Changes were NOT saved.")
1309
+        return
1310
+
1311
+    except IOError, e:
1312
+        error(u"Writing config file failed: %s: %s" % (config_file, e.strerror))
1313
+        sys.exit(1)
1314 1314
 
1315 1315
 def process_patterns_from_file(fname, patterns_list):
1316
-	try:
1317
-		fn = open(fname, "rt")
1318
-	except IOError, e:
1319
-		error(e)
1320
-		sys.exit(1)
1321
-	for pattern in fn:
1322
-		pattern = pattern.strip()
1323
-		if re.match("^#", pattern) or re.match("^\s*$", pattern):
1324
-			continue
1325
-		debug(u"%s: adding rule: %s" % (fname, pattern))
1326
-		patterns_list.append(pattern)
1327
-
1328
-	return patterns_list
1316
+    try:
1317
+        fn = open(fname, "rt")
1318
+    except IOError, e:
1319
+        error(e)
1320
+        sys.exit(1)
1321
+    for pattern in fn:
1322
+        pattern = pattern.strip()
1323
+        if re.match("^#", pattern) or re.match("^\s*$", pattern):
1324
+            continue
1325
+        debug(u"%s: adding rule: %s" % (fname, pattern))
1326
+        patterns_list.append(pattern)
1327
+
1328
+    return patterns_list
1329 1329
 
1330 1330
 def process_patterns(patterns_list, patterns_from, is_glob, option_txt = ""):
1331
-	"""
1332
-	process_patterns(patterns, patterns_from, is_glob, option_txt = "")
1333
-	Process --exclude / --include GLOB and REGEXP patterns.
1334
-	'option_txt' is 'exclude' / 'include' / 'rexclude' / 'rinclude'
1335
-	Returns: patterns_compiled, patterns_text
1336
-	"""
1337
-
1338
-	patterns_compiled = []
1339
-	patterns_textual = {}
1340
-
1341
-	if patterns_list is None:
1342
-		patterns_list = []
1343
-
1344
-	if patterns_from:
1345
-		## Append patterns from glob_from 
1346
-		for fname in patterns_from:
1347
-			debug(u"processing --%s-from %s" % (option_txt, fname))
1348
-			patterns_list = process_patterns_from_file(fname, patterns_list)
1349
-
1350
-	for pattern in patterns_list:
1351
-		debug(u"processing %s rule: %s" % (option_txt, patterns_list))
1352
-		if is_glob:
1353
-			pattern = glob.fnmatch.translate(pattern)
1354
-		r = re.compile(pattern)
1355
-		patterns_compiled.append(r)
1356
-		patterns_textual[r] = pattern
1357
-
1358
-	return patterns_compiled, patterns_textual
1331
+    """
1332
+    process_patterns(patterns, patterns_from, is_glob, option_txt = "")
1333
+    Process --exclude / --include GLOB and REGEXP patterns.
1334
+    'option_txt' is 'exclude' / 'include' / 'rexclude' / 'rinclude'
1335
+    Returns: patterns_compiled, patterns_text
1336
+    """
1337
+
1338
+    patterns_compiled = []
1339
+    patterns_textual = {}
1340
+
1341
+    if patterns_list is None:
1342
+        patterns_list = []
1343
+
1344
+    if patterns_from:
1345
+        ## Append patterns from glob_from
1346
+        for fname in patterns_from:
1347
+            debug(u"processing --%s-from %s" % (option_txt, fname))
1348
+            patterns_list = process_patterns_from_file(fname, patterns_list)
1349
+
1350
+    for pattern in patterns_list:
1351
+        debug(u"processing %s rule: %s" % (option_txt, patterns_list))
1352
+        if is_glob:
1353
+            pattern = glob.fnmatch.translate(pattern)
1354
+        r = re.compile(pattern)
1355
+        patterns_compiled.append(r)
1356
+        patterns_textual[r] = pattern
1357
+
1358
+    return patterns_compiled, patterns_textual
1359 1359
 
1360 1360
 def get_commands_list():
1361
-	return [
1362
-	{"cmd":"mb", "label":"Make bucket", "param":"s3://BUCKET", "func":cmd_bucket_create, "argc":1},
1363
-	{"cmd":"rb", "label":"Remove bucket", "param":"s3://BUCKET", "func":cmd_bucket_delete, "argc":1},
1364
-	{"cmd":"ls", "label":"List objects or buckets", "param":"[s3://BUCKET[/PREFIX]]", "func":cmd_ls, "argc":0},
1365
-	{"cmd":"la", "label":"List all object in all buckets", "param":"", "func":cmd_buckets_list_all_all, "argc":0},
1366
-	{"cmd":"put", "label":"Put file into bucket", "param":"FILE [FILE...] s3://BUCKET[/PREFIX]", "func":cmd_object_put, "argc":2},
1367
-	{"cmd":"get", "label":"Get file from bucket", "param":"s3://BUCKET/OBJECT LOCAL_FILE", "func":cmd_object_get, "argc":1},
1368
-	{"cmd":"del", "label":"Delete file from bucket", "param":"s3://BUCKET/OBJECT", "func":cmd_object_del, "argc":1},
1369
-	#{"cmd":"mkdir", "label":"Make a virtual S3 directory", "param":"s3://BUCKET/path/to/dir", "func":cmd_mkdir, "argc":1},
1370
-	{"cmd":"sync", "label":"Synchronize a directory tree to S3", "param":"LOCAL_DIR s3://BUCKET[/PREFIX] or s3://BUCKET[/PREFIX] LOCAL_DIR", "func":cmd_sync, "argc":2},
1371
-	{"cmd":"du", "label":"Disk usage by buckets", "param":"[s3://BUCKET[/PREFIX]]", "func":cmd_du, "argc":0},
1372
-	{"cmd":"info", "label":"Get various information about Buckets or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_info, "argc":1},
1373
-	{"cmd":"cp", "label":"Copy object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_cp, "argc":2},
1374
-	{"cmd":"mv", "label":"Move object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_mv, "argc":2},
1375
-	{"cmd":"setacl", "label":"Modify Access control list for Bucket or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_setacl, "argc":1},
1376
-	{"cmd":"accesslog", "label":"Enable/disable bucket access logging", "param":"s3://BUCKET", "func":cmd_accesslog, "argc":1},
1377
-	{"cmd":"sign", "label":"Sign arbitrary string using the secret key", "param":"STRING-TO-SIGN", "func":cmd_sign, "argc":1},
1378
-	{"cmd":"fixbucket", "label":"Fix invalid file names in a bucket", "param":"s3://BUCKET[/PREFIX]", "func":cmd_fixbucket, "argc":1},
1379
-
1380
-	## Website commands
1381
-	{"cmd":"ws-create", "label":"Create Website from bucket", "param":"s3://BUCKET", "func":cmd_website_create, "argc":1},
1382
-	{"cmd":"ws-delete", "label":"Delete Website", "param":"s3://BUCKET", "func":cmd_website_delete, "argc":1},
1383
-	{"cmd":"ws-info", "label":"Info about Website", "param":"s3://BUCKET", "func":cmd_website_info, "argc":1},
1384
-
1385
-	## CloudFront commands
1386
-	{"cmd":"cflist", "label":"List CloudFront distribution points", "param":"", "func":CfCmd.info, "argc":0},
1387
-	{"cmd":"cfinfo", "label":"Display CloudFront distribution point parameters", "param":"[cf://DIST_ID]", "func":CfCmd.info, "argc":0},
1388
-	{"cmd":"cfcreate", "label":"Create CloudFront distribution point", "param":"s3://BUCKET", "func":CfCmd.create, "argc":1},
1389
-	{"cmd":"cfdelete", "label":"Delete CloudFront distribution point", "param":"cf://DIST_ID", "func":CfCmd.delete, "argc":1},
1390
-	{"cmd":"cfmodify", "label":"Change CloudFront distribution point parameters", "param":"cf://DIST_ID", "func":CfCmd.modify, "argc":1},
1391
-	#{"cmd":"cfinval", "label":"Invalidate CloudFront objects", "param":"s3://BUCKET/OBJECT [s3://BUCKET/OBJECT ...]", "func":CfCmd.invalidate, "argc":1},
1392
-	{"cmd":"cfinvalinfo", "label":"Display CloudFront invalidation request(s) status", "param":"cf://DIST_ID[/INVAL_ID]", "func":CfCmd.invalinfo, "argc":1},
1393
-	]
1361
+    return [
1362
+    {"cmd":"mb", "label":"Make bucket", "param":"s3://BUCKET", "func":cmd_bucket_create, "argc":1},
1363
+    {"cmd":"rb", "label":"Remove bucket", "param":"s3://BUCKET", "func":cmd_bucket_delete, "argc":1},
1364
+    {"cmd":"ls", "label":"List objects or buckets", "param":"[s3://BUCKET[/PREFIX]]", "func":cmd_ls, "argc":0},
1365
+    {"cmd":"la", "label":"List all object in all buckets", "param":"", "func":cmd_buckets_list_all_all, "argc":0},
1366
+    {"cmd":"put", "label":"Put file into bucket", "param":"FILE [FILE...] s3://BUCKET[/PREFIX]", "func":cmd_object_put, "argc":2},
1367
+    {"cmd":"get", "label":"Get file from bucket", "param":"s3://BUCKET/OBJECT LOCAL_FILE", "func":cmd_object_get, "argc":1},
1368
+    {"cmd":"del", "label":"Delete file from bucket", "param":"s3://BUCKET/OBJECT", "func":cmd_object_del, "argc":1},
1369
+    #{"cmd":"mkdir", "label":"Make a virtual S3 directory", "param":"s3://BUCKET/path/to/dir", "func":cmd_mkdir, "argc":1},
1370
+    {"cmd":"sync", "label":"Synchronize a directory tree to S3", "param":"LOCAL_DIR s3://BUCKET[/PREFIX] or s3://BUCKET[/PREFIX] LOCAL_DIR", "func":cmd_sync, "argc":2},
1371
+    {"cmd":"du", "label":"Disk usage by buckets", "param":"[s3://BUCKET[/PREFIX]]", "func":cmd_du, "argc":0},
1372
+    {"cmd":"info", "label":"Get various information about Buckets or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_info, "argc":1},
1373
+    {"cmd":"cp", "label":"Copy object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_cp, "argc":2},
1374
+    {"cmd":"mv", "label":"Move object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_mv, "argc":2},
1375
+    {"cmd":"setacl", "label":"Modify Access control list for Bucket or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_setacl, "argc":1},
1376
+    {"cmd":"accesslog", "label":"Enable/disable bucket access logging", "param":"s3://BUCKET", "func":cmd_accesslog, "argc":1},
1377
+    {"cmd":"sign", "label":"Sign arbitrary string using the secret key", "param":"STRING-TO-SIGN", "func":cmd_sign, "argc":1},
1378
+    {"cmd":"fixbucket", "label":"Fix invalid file names in a bucket", "param":"s3://BUCKET[/PREFIX]", "func":cmd_fixbucket, "argc":1},
1379
+
1380
+    ## Website commands
1381
+    {"cmd":"ws-create", "label":"Create Website from bucket", "param":"s3://BUCKET", "func":cmd_website_create, "argc":1},
1382
+    {"cmd":"ws-delete", "label":"Delete Website", "param":"s3://BUCKET", "func":cmd_website_delete, "argc":1},
1383
+    {"cmd":"ws-info", "label":"Info about Website", "param":"s3://BUCKET", "func":cmd_website_info, "argc":1},
1384
+
1385
+    ## CloudFront commands
1386
+    {"cmd":"cflist", "label":"List CloudFront distribution points", "param":"", "func":CfCmd.info, "argc":0},
1387
+    {"cmd":"cfinfo", "label":"Display CloudFront distribution point parameters", "param":"[cf://DIST_ID]", "func":CfCmd.info, "argc":0},
1388
+    {"cmd":"cfcreate", "label":"Create CloudFront distribution point", "param":"s3://BUCKET", "func":CfCmd.create, "argc":1},
1389
+    {"cmd":"cfdelete", "label":"Delete CloudFront distribution point", "param":"cf://DIST_ID", "func":CfCmd.delete, "argc":1},
1390
+    {"cmd":"cfmodify", "label":"Change CloudFront distribution point parameters", "param":"cf://DIST_ID", "func":CfCmd.modify, "argc":1},
1391
+    #{"cmd":"cfinval", "label":"Invalidate CloudFront objects", "param":"s3://BUCKET/OBJECT [s3://BUCKET/OBJECT ...]", "func":CfCmd.invalidate, "argc":1},
1392
+    {"cmd":"cfinvalinfo", "label":"Display CloudFront invalidation request(s) status", "param":"cf://DIST_ID[/INVAL_ID]", "func":CfCmd.invalinfo, "argc":1},
1393
+    ]
1394 1394
 
1395 1395
 def format_commands(progname, commands_list):
1396
-	help = "Commands:\n"
1397
-	for cmd in commands_list:
1398
-		help += "  %s\n      %s %s %s\n" % (cmd["label"], progname, cmd["cmd"], cmd["param"])
1399
-	return help
1396
+    help = "Commands:\n"
1397
+    for cmd in commands_list:
1398
+        help += "  %s\n      %s %s %s\n" % (cmd["label"], progname, cmd["cmd"], cmd["param"])
1399
+    return help
1400 1400
 
1401 1401
 class OptionMimeType(Option):
1402
-	def check_mimetype(option, opt, value):
1403
-		if re.compile("^[a-z0-9]+/[a-z0-9+\.-]+$", re.IGNORECASE).match(value):
1404
-			return value
1405
-		raise OptionValueError("option %s: invalid MIME-Type format: %r" % (opt, value))
1402
+    def check_mimetype(option, opt, value):
1403
+        if re.compile("^[a-z0-9]+/[a-z0-9+\.-]+$", re.IGNORECASE).match(value):
1404
+            return value
1405
+        raise OptionValueError("option %s: invalid MIME-Type format: %r" % (opt, value))
1406 1406
 
1407 1407
 class OptionS3ACL(Option):
1408
-	def check_s3acl(option, opt, value):
1409
-		permissions = ('read', 'write', 'read_acp', 'write_acp', 'full_control', 'all')
1410
-		try:
1411
-			permission, grantee = re.compile("^(\w+):(.+)$", re.IGNORECASE).match(value).groups()
1412
-			if not permission or not grantee:
1413
-				raise
1414
-			if permission in permissions:
1415
-				return { 'name' : grantee, 'permission' : permission.upper() }
1416
-			else:
1417
-				raise OptionValueError("option %s: invalid S3 ACL permission: %s (valid values: %s)" % 
1418
-					(opt, permission, ", ".join(permissions)))
1419
-		except:
1420
-			raise OptionValueError("option %s: invalid S3 ACL format: %r" % (opt, value))
1408
+    def check_s3acl(option, opt, value):
1409
+        permissions = ('read', 'write', 'read_acp', 'write_acp', 'full_control', 'all')
1410
+        try:
1411
+            permission, grantee = re.compile("^(\w+):(.+)$", re.IGNORECASE).match(value).groups()
1412
+            if not permission or not grantee:
1413
+                raise
1414
+            if permission in permissions:
1415
+                return { 'name' : grantee, 'permission' : permission.upper() }
1416
+            else:
1417
+                raise OptionValueError("option %s: invalid S3 ACL permission: %s (valid values: %s)" %
1418
+                    (opt, permission, ", ".join(permissions)))
1419
+        except:
1420
+            raise OptionValueError("option %s: invalid S3 ACL format: %r" % (opt, value))
1421 1421
 
1422 1422
 class OptionAll(OptionMimeType, OptionS3ACL):
1423
-	TYPE_CHECKER = copy(Option.TYPE_CHECKER)
1424
-	TYPE_CHECKER["mimetype"] = OptionMimeType.check_mimetype
1425
-	TYPE_CHECKER["s3acl"] = OptionS3ACL.check_s3acl
1426
-	TYPES = Option.TYPES + ("mimetype", "s3acl")
1423
+    TYPE_CHECKER = copy(Option.TYPE_CHECKER)
1424
+    TYPE_CHECKER["mimetype"] = OptionMimeType.check_mimetype
1425
+    TYPE_CHECKER["s3acl"] = OptionS3ACL.check_s3acl
1426
+    TYPES = Option.TYPES + ("mimetype", "s3acl")
1427 1427
 
1428 1428
 class MyHelpFormatter(IndentedHelpFormatter):
1429
-	def format_epilog(self, epilog):
1430
-		if epilog:
1431
-			return "\n" + epilog + "\n"
1432
-		else:
1433
-			return ""
1429
+    def format_epilog(self, epilog):
1430
+        if epilog:
1431
+            return "\n" + epilog + "\n"
1432
+        else:
1433
+            return ""
1434 1434
 
1435 1435
 def main():
1436
-	global cfg
1437
-
1438
-	commands_list = get_commands_list()
1439
-	commands = {}
1440
-
1441
-	## Populate "commands" from "commands_list"
1442
-	for cmd in commands_list:
1443
-		if cmd.has_key("cmd"):
1444
-			commands[cmd["cmd"]] = cmd
1445
-
1446
-	default_verbosity = Config().verbosity
1447
-	optparser = OptionParser(option_class=OptionAll, formatter=MyHelpFormatter())
1448
-	#optparser.disable_interspersed_args()
1449
-
1450
-	config_file = None
1451
-	if os.getenv("HOME"):
1452
-		config_file = os.path.join(os.getenv("HOME"), ".s3cfg")
1453
-	elif os.name == "nt" and os.getenv("USERPROFILE"):
1454
-		config_file = os.path.join(os.getenv("USERPROFILE").decode('mbcs'), "Application Data", "s3cmd.ini")
1455
-
1456
-	preferred_encoding = locale.getpreferredencoding() or "UTF-8"
1457
-
1458
-	optparser.set_defaults(encoding = preferred_encoding)
1459
-	optparser.set_defaults(config = config_file)
1460
-	optparser.set_defaults(verbosity = default_verbosity)
1461
-
1462
-	optparser.add_option(      "--configure", dest="run_configure", action="store_true", help="Invoke interactive (re)configuration tool.")
1463
-	optparser.add_option("-c", "--config", dest="config", metavar="FILE", help="Config file name. Defaults to %default")
1464
-	optparser.add_option(      "--dump-config", dest="dump_config", action="store_true", help="Dump current configuration after parsing config files and command line options and exit.")
1465
-
1466
-	optparser.add_option("-n", "--dry-run", dest="dry_run", action="store_true", help="Only show what should be uploaded or downloaded but don't actually do it. May still perform S3 requests to get bucket listings and other information though (only for file transfer commands)")
1467
-
1468
-	optparser.add_option("-e", "--encrypt", dest="encrypt", action="store_true", help="Encrypt files before uploading to S3.")
1469
-	optparser.add_option(      "--no-encrypt", dest="encrypt", action="store_false", help="Don't encrypt files.")
1470
-	optparser.add_option("-f", "--force", dest="force", action="store_true", help="Force overwrite and other dangerous operations.")
1471
-	optparser.add_option(      "--continue", dest="get_continue", action="store_true", help="Continue getting a partially downloaded file (only for [get] command).")
1472
-	optparser.add_option(      "--skip-existing", dest="skip_existing", action="store_true", help="Skip over files that exist at the destination (only for [get] and [sync] commands).")
1473
-	optparser.add_option("-r", "--recursive", dest="recursive", action="store_true", help="Recursive upload, download or removal.")
1474
-	optparser.add_option(      "--check-md5", dest="check_md5", action="store_true", help="Check MD5 sums when comparing files for [sync]. (default)")
1475
-	optparser.add_option(      "--no-check-md5", dest="check_md5", action="store_false", help="Do not check MD5 sums when comparing files for [sync]. Only size will be compared. May significantly speed up transfer but may also miss some changed files.")
1476
-	optparser.add_option("-P", "--acl-public", dest="acl_public", action="store_true", help="Store objects with ACL allowing read for anyone.")
1477
-	optparser.add_option(      "--acl-private", dest="acl_public", action="store_false", help="Store objects with default ACL allowing access for you only.")
1478
-	optparser.add_option(      "--acl-grant", dest="acl_grants", type="s3acl", action="append", metavar="PERMISSION:EMAIL or USER_CANONICAL_ID", help="Grant stated permission to a given amazon user. Permission is one of: read, write, read_acp, write_acp, full_control, all")
1479
-	optparser.add_option(      "--acl-revoke", dest="acl_revokes", type="s3acl", action="append", metavar="PERMISSION:USER_CANONICAL_ID", help="Revoke stated permission for a given amazon user. Permission is one of: read, write, read_acp, wr     ite_acp, full_control, all")
1480
-
1481
-	optparser.add_option(      "--delete-removed", dest="delete_removed", action="store_true", help="Delete remote objects with no corresponding local file [sync]")
1482
-	optparser.add_option(      "--no-delete-removed", dest="delete_removed", action="store_false", help="Don't delete remote objects.")
1483
-	optparser.add_option("-p", "--preserve", dest="preserve_attrs", action="store_true", help="Preserve filesystem attributes (mode, ownership, timestamps). Default for [sync] command.")
1484
-	optparser.add_option(      "--no-preserve", dest="preserve_attrs", action="store_false", help="Don't store FS attributes")
1485
-	optparser.add_option(      "--exclude", dest="exclude", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be excluded from sync")
1486
-	optparser.add_option(      "--exclude-from", dest="exclude_from", action="append", metavar="FILE", help="Read --exclude GLOBs from FILE")
1487
-	optparser.add_option(      "--rexclude", dest="rexclude", action="append", metavar="REGEXP", help="Filenames and paths matching REGEXP (regular expression) will be excluded from sync")
1488
-	optparser.add_option(      "--rexclude-from", dest="rexclude_from", action="append", metavar="FILE", help="Read --rexclude REGEXPs from FILE")
1489
-	optparser.add_option(      "--include", dest="include", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be included even if previously excluded by one of --(r)exclude(-from) patterns")
1490
-	optparser.add_option(      "--include-from", dest="include_from", action="append", metavar="FILE", help="Read --include GLOBs from FILE")
1491
-	optparser.add_option(      "--rinclude", dest="rinclude", action="append", metavar="REGEXP", help="Same as --include but uses REGEXP (regular expression) instead of GLOB")
1492
-	optparser.add_option(      "--rinclude-from", dest="rinclude_from", action="append", metavar="FILE", help="Read --rinclude REGEXPs from FILE")
1493
-
1494
-	optparser.add_option(      "--bucket-location", dest="bucket_location", help="Datacentre to create bucket in. As of now the datacenters are: US (default), EU, us-west-1, and ap-southeast-1")
1495
-	optparser.add_option(      "--reduced-redundancy", "--rr", dest="reduced_redundancy", action="store_true", help="Store object with 'Reduced redundancy'. Lower per-GB price. [put, cp, mv]")
1496
-
1497
-	optparser.add_option(      "--access-logging-target-prefix", dest="log_target_prefix", help="Target prefix for access logs (S3 URI) (for [cfmodify] and [accesslog] commands)")
1498
-	optparser.add_option(      "--no-access-logging", dest="log_target_prefix", action="store_false", help="Disable access logging (for [cfmodify] and [accesslog] commands)")
1499
-
1500
-	optparser.add_option("-m", "--mime-type", dest="default_mime_type", type="mimetype", metavar="MIME/TYPE", help="Default MIME-type to be set for objects stored.")
1501
-	optparser.add_option("-M", "--guess-mime-type", dest="guess_mime_type", action="store_true", help="Guess MIME-type of files by their extension. Falls back to default MIME-Type as specified by --mime-type option")
1502
-
1503
-	optparser.add_option(      "--add-header", dest="add_header", action="append", metavar="NAME:VALUE", help="Add a given HTTP header to the upload request. Can be used multiple times. For instance set 'Expires' or 'Cache-Control' headers (or both) using this options if you like.")
1504
-
1505
-	optparser.add_option(      "--encoding", dest="encoding", metavar="ENCODING", help="Override autodetected terminal and filesystem encoding (character set). Autodetected: %s" % preferred_encoding)
1506
-	optparser.add_option(      "--verbatim", dest="urlencoding_mode", action="store_const", const="verbatim", help="Use the S3 name as given on the command line. No pre-processing, encoding, etc. Use with caution!")
1507
-
1508
-	optparser.add_option(      "--list-md5", dest="list_md5", action="store_true", help="Include MD5 sums in bucket listings (only for 'ls' command).")
1509
-	optparser.add_option("-H", "--human-readable-sizes", dest="human_readable_sizes", action="store_true", help="Print sizes in human readable form (eg 1kB instead of 1234).")
1510
-
1511
-	optparser.add_option(      "--ws-index", dest="website_index", action="store", help="Name of error-document (only for [ws-create] command)")
1512
-	optparser.add_option(      "--ws-error", dest="website_error", action="store", help="Name of index-document (only for [ws-create] command)")
1513
-
1514
-	optparser.add_option(      "--progress", dest="progress_meter", action="store_true", help="Display progress meter (default on TTY).")
1515
-	optparser.add_option(      "--no-progress", dest="progress_meter", action="store_false", help="Don't display progress meter (default on non-TTY).")
1516
-	optparser.add_option(      "--enable", dest="enable", action="store_true", help="Enable given CloudFront distribution (only for [cfmodify] command)")
1517
-	optparser.add_option(      "--disable", dest="enable", action="store_false", help="Enable given CloudFront distribution (only for [cfmodify] command)")
1518
-	optparser.add_option(      "--cf-invalidate", dest="invalidate_on_cf", action="store_true", help="Invalidate the uploaded filed in CloudFront. Also see [cfinval] command.")
1519
-	optparser.add_option(      "--cf-add-cname", dest="cf_cnames_add", action="append", metavar="CNAME", help="Add given CNAME to a CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
1520
-	optparser.add_option(      "--cf-remove-cname", dest="cf_cnames_remove", action="append", metavar="CNAME", help="Remove given CNAME from a CloudFront distribution (only for [cfmodify] command)")
1521
-	optparser.add_option(      "--cf-comment", dest="cf_comment", action="store", metavar="COMMENT", help="Set COMMENT for a given CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
1522
-	optparser.add_option(      "--cf-default-root-object", dest="cf_default_root_object", action="store", metavar="DEFAULT_ROOT_OBJECT", help="Set the default root object to return when no object is specified in the URL. Use a relative path, i.e. default/index.html instead of /default/index.html or s3://bucket/default/index.html (only for [cfcreate] and [cfmodify] commands)")
1523
-	optparser.add_option("-v", "--verbose", dest="verbosity", action="store_const", const=logging.INFO, help="Enable verbose output.")
1524
-	optparser.add_option("-d", "--debug", dest="verbosity", action="store_const", const=logging.DEBUG, help="Enable debug output.")
1525
-	optparser.add_option(      "--version", dest="show_version", action="store_true", help="Show s3cmd version (%s) and exit." % (PkgInfo.version))
1526
-	optparser.add_option("-F", "--follow-symlinks", dest="follow_symlinks", action="store_true", default=False, help="Follow symbolic links as if they are regular files")
1527
-
1528
-	optparser.set_usage(optparser.usage + " COMMAND [parameters]")
1529
-	optparser.set_description('S3cmd is a tool for managing objects in '+
1530
-		'Amazon S3 storage. It allows for making and removing '+
1531
-		'"buckets" and uploading, downloading and removing '+
1532
-		'"objects" from these buckets.')
1533
-	optparser.epilog = format_commands(optparser.get_prog_name(), commands_list)
1534
-	optparser.epilog += ("\nFor more informations see the progect homepage:\n%s\n" % PkgInfo.url)
1535
-	optparser.epilog += ("\nConsider a donation if you have found s3cmd useful:\n%s/donate\n" % PkgInfo.url)
1536
-
1537
-	(options, args) = optparser.parse_args()
1538
-
1539
-	## Some mucking with logging levels to enable 
1540
-	## debugging/verbose output for config file parser on request
1541
-	logging.basicConfig(level=options.verbosity,
1542
-	                    format='%(levelname)s: %(message)s',
1543
-	                    stream = sys.stderr)
1544
-	
1545
-	if options.show_version:
1546
-		output(u"s3cmd version %s" % PkgInfo.version)
1547
-		sys.exit(0)
1548
-
1549
-	## Now finally parse the config file
1550
-	if not options.config:
1551
-		error(u"Can't find a config file. Please use --config option.")
1552
-		sys.exit(1)
1553
-
1554
-	try:
1555
-		cfg = Config(options.config)
1556
-	except IOError, e:
1557
-		if options.run_configure:
1558
-			cfg = Config()
1559
-		else:
1560
-			error(u"%s: %s"  % (options.config, e.strerror))
1561
-			error(u"Configuration file not available.")
1562
-			error(u"Consider using --configure parameter to create one.")
1563
-			sys.exit(1)
1564
-
1565
-	## And again some logging level adjustments
1566
-	## according to configfile and command line parameters
1567
-	if options.verbosity != default_verbosity:
1568
-		cfg.verbosity = options.verbosity
1569
-	logging.root.setLevel(cfg.verbosity)
1570
-
1571
-	## Default to --progress on TTY devices, --no-progress elsewhere
1572
-	## Can be overriden by actual --(no-)progress parameter
1573
-	cfg.update_option('progress_meter', sys.stdout.isatty())
1574
-
1575
-	## Unsupported features on Win32 platform
1576
-	if os.name == "nt":
1577
-		if cfg.preserve_attrs:
1578
-			error(u"Option --preserve is not yet supported on MS Windows platform. Assuming --no-preserve.")
1579
-			cfg.preserve_attrs = False
1580
-		if cfg.progress_meter:
1581
-			error(u"Option --progress is not yet supported on MS Windows platform. Assuming --no-progress.")
1582
-			cfg.progress_meter = False
1583
-
1584
-	## Pre-process --add-header's and put them to Config.extra_headers SortedDict()
1585
-	if options.add_header:
1586
-		for hdr in options.add_header:
1587
-			try:
1588
-				key, val = hdr.split(":", 1)
1589
-			except ValueError:
1590
-				raise ParameterError("Invalid header format: %s" % hdr)
1591
-			key_inval = re.sub("[a-zA-Z0-9-.]", "", key)
1592
-			if key_inval:
1593
-				key_inval = key_inval.replace(" ", "<space>")
1594
-				key_inval = key_inval.replace("\t", "<tab>")
1595
-				raise ParameterError("Invalid character(s) in header name '%s': \"%s\"" % (key, key_inval))
1596
-			debug(u"Updating Config.Config extra_headers[%s] -> %s" % (key.strip(), val.strip()))
1597
-			cfg.extra_headers[key.strip()] = val.strip()
1598
-
1599
-	## --acl-grant/--acl-revoke arguments are pre-parsed by OptionS3ACL()
1600
-	if options.acl_grants:
1601
-		for grant in options.acl_grants:
1602
-			cfg.acl_grants.append(grant)
1603
-
1604
-	if options.acl_revokes:
1605
-		for grant in options.acl_revokes:
1606
-			cfg.acl_revokes.append(grant)
1607
-
1608
-	## Process --(no-)check-md5
1609
-	if options.check_md5 == False:
1610
-		try:
1611
-			cfg.sync_checks.remove("md5")
1612
-		except:
1613
-			pass
1614
-	if options.check_md5 == True and cfg.sync_checks.count("md5") == 0:
1615
-		cfg.sync_checks.append("md5")
1616
-
1617
-	## Update Config with other parameters
1618
-	for option in cfg.option_list():
1619
-		try:
1620
-			if getattr(options, option) != None:
1621
-				debug(u"Updating Config.Config %s -> %s" % (option, getattr(options, option)))
1622
-				cfg.update_option(option, getattr(options, option))
1623
-		except AttributeError:
1624
-			## Some Config() options are not settable from command line
1625
-			pass
1626
-
1627
-	## Special handling for tri-state options (True, False, None)
1628
-	cfg.update_option("enable", options.enable)
1629
-	cfg.update_option("acl_public", options.acl_public)
1630
-
1631
-	## CloudFront's cf_enable and Config's enable share the same --enable switch
1632
-	options.cf_enable = options.enable
1633
-
1634
-	## CloudFront's cf_logging and Config's log_target_prefix share the same --log-target-prefix switch
1635
-	options.cf_logging = options.log_target_prefix
1636
-
1637
-	## Update CloudFront options if some were set
1638
-	for option in CfCmd.options.option_list():
1639
-		try:
1640
-			if getattr(options, option) != None:
1641
-				debug(u"Updating CloudFront.Cmd %s -> %s" % (option, getattr(options, option)))
1642
-				CfCmd.options.update_option(option, getattr(options, option))
1643
-		except AttributeError:
1644
-			## Some CloudFront.Cmd.Options() options are not settable from command line
1645
-			pass
1646
-
1647
-	## Set output and filesystem encoding for printing out filenames.
1648
-	sys.stdout = codecs.getwriter(cfg.encoding)(sys.stdout, "replace")
1649
-	sys.stderr = codecs.getwriter(cfg.encoding)(sys.stderr, "replace")
1650
-
1651
-	## Process --exclude and --exclude-from
1652
-	patterns_list, patterns_textual = process_patterns(options.exclude, options.exclude_from, is_glob = True, option_txt = "exclude")
1653
-	cfg.exclude.extend(patterns_list)
1654
-	cfg.debug_exclude.update(patterns_textual)
1655
-
1656
-	## Process --rexclude and --rexclude-from
1657
-	patterns_list, patterns_textual = process_patterns(options.rexclude, options.rexclude_from, is_glob = False, option_txt = "rexclude")
1658
-	cfg.exclude.extend(patterns_list)
1659
-	cfg.debug_exclude.update(patterns_textual)
1660
-
1661
-	## Process --include and --include-from
1662
-	patterns_list, patterns_textual = process_patterns(options.include, options.include_from, is_glob = True, option_txt = "include")
1663
-	cfg.include.extend(patterns_list)
1664
-	cfg.debug_include.update(patterns_textual)
1665
-
1666
-	## Process --rinclude and --rinclude-from
1667
-	patterns_list, patterns_textual = process_patterns(options.rinclude, options.rinclude_from, is_glob = False, option_txt = "rinclude")
1668
-	cfg.include.extend(patterns_list)
1669
-	cfg.debug_include.update(patterns_textual)
1670
-
1671
-	## Set socket read()/write() timeout
1672
-	socket.setdefaulttimeout(cfg.socket_timeout)
1673
-
1674
-	if cfg.encrypt and cfg.gpg_passphrase == "":
1675
-		error(u"Encryption requested but no passphrase set in config file.")
1676
-		error(u"Please re-run 's3cmd --configure' and supply it.")
1677
-		sys.exit(1)
1678
-
1679
-	if options.dump_config:
1680
-		cfg.dump_config(sys.stdout)
1681
-		sys.exit(0)
1682
-
1683
-	if options.run_configure:
1684
-		run_configure(options.config)
1685
-		sys.exit(0)
1686
-
1687
-	if len(args) < 1:
1688
-		error(u"Missing command. Please run with --help for more information.")
1689
-		sys.exit(1)
1690
-
1691
-	## Unicodise all remaining arguments:
1692
-	args = [unicodise(arg) for arg in args]
1693
-
1694
-	command = args.pop(0)
1695
-	try:
1696
-		debug(u"Command: %s" % commands[command]["cmd"])
1697
-		## We must do this lookup in extra step to 
1698
-		## avoid catching all KeyError exceptions
1699
-		## from inner functions.
1700
-		cmd_func = commands[command]["func"]
1701
-	except KeyError, e:
1702
-		error(u"Invalid command: %s" % e)
1703
-		sys.exit(1)
1704
-
1705
-	if len(args) < commands[command]["argc"]:
1706
-		error(u"Not enough paramters for command '%s'" % command)
1707
-		sys.exit(1)
1708
-
1709
-	try:
1710
-		cmd_func(args)
1711
-	except S3Error, e:
1712
-		error(u"S3 error: %s" % e)
1713
-		sys.exit(1)
1436
+    global cfg
1437
+
1438
+    commands_list = get_commands_list()
1439
+    commands = {}
1440
+
1441
+    ## Populate "commands" from "commands_list"
1442
+    for cmd in commands_list:
1443
+        if cmd.has_key("cmd"):
1444
+            commands[cmd["cmd"]] = cmd
1445
+
1446
+    default_verbosity = Config().verbosity
1447
+    optparser = OptionParser(option_class=OptionAll, formatter=MyHelpFormatter())
1448
+    #optparser.disable_interspersed_args()
1449
+
1450
+    config_file = None
1451
+    if os.getenv("HOME"):
1452
+        config_file = os.path.join(os.getenv("HOME"), ".s3cfg")
1453
+    elif os.name == "nt" and os.getenv("USERPROFILE"):
1454
+        config_file = os.path.join(os.getenv("USERPROFILE").decode('mbcs'), "Application Data", "s3cmd.ini")
1455
+
1456
+    preferred_encoding = locale.getpreferredencoding() or "UTF-8"
1457
+
1458
+    optparser.set_defaults(encoding = preferred_encoding)
1459
+    optparser.set_defaults(config = config_file)
1460
+    optparser.set_defaults(verbosity = default_verbosity)
1461
+
1462
+    optparser.add_option(      "--configure", dest="run_configure", action="store_true", help="Invoke interactive (re)configuration tool.")
1463
+    optparser.add_option("-c", "--config", dest="config", metavar="FILE", help="Config file name. Defaults to %default")
1464
+    optparser.add_option(      "--dump-config", dest="dump_config", action="store_true", help="Dump current configuration after parsing config files and command line options and exit.")
1465
+
1466
+    optparser.add_option("-n", "--dry-run", dest="dry_run", action="store_true", help="Only show what should be uploaded or downloaded but don't actually do it. May still perform S3 requests to get bucket listings and other information though (only for file transfer commands)")
1467
+
1468
+    optparser.add_option("-e", "--encrypt", dest="encrypt", action="store_true", help="Encrypt files before uploading to S3.")
1469
+    optparser.add_option(      "--no-encrypt", dest="encrypt", action="store_false", help="Don't encrypt files.")
1470
+    optparser.add_option("-f", "--force", dest="force", action="store_true", help="Force overwrite and other dangerous operations.")
1471
+    optparser.add_option(      "--continue", dest="get_continue", action="store_true", help="Continue getting a partially downloaded file (only for [get] command).")
1472
+    optparser.add_option(      "--skip-existing", dest="skip_existing", action="store_true", help="Skip over files that exist at the destination (only for [get] and [sync] commands).")
1473
+    optparser.add_option("-r", "--recursive", dest="recursive", action="store_true", help="Recursive upload, download or removal.")
1474
+    optparser.add_option(      "--check-md5", dest="check_md5", action="store_true", help="Check MD5 sums when comparing files for [sync]. (default)")
1475
+    optparser.add_option(      "--no-check-md5", dest="check_md5", action="store_false", help="Do not check MD5 sums when comparing files for [sync]. Only size will be compared. May significantly speed up transfer but may also miss some changed files.")
1476
+    optparser.add_option("-P", "--acl-public", dest="acl_public", action="store_true", help="Store objects with ACL allowing read for anyone.")
1477
+    optparser.add_option(      "--acl-private", dest="acl_public", action="store_false", help="Store objects with default ACL allowing access for you only.")
1478
+    optparser.add_option(      "--acl-grant", dest="acl_grants", type="s3acl", action="append", metavar="PERMISSION:EMAIL or USER_CANONICAL_ID", help="Grant stated permission to a given amazon user. Permission is one of: read, write, read_acp, write_acp, full_control, all")
1479
+    optparser.add_option(      "--acl-revoke", dest="acl_revokes", type="s3acl", action="append", metavar="PERMISSION:USER_CANONICAL_ID", help="Revoke stated permission for a given amazon user. Permission is one of: read, write, read_acp, wr     ite_acp, full_control, all")
1480
+
1481
+    optparser.add_option(      "--delete-removed", dest="delete_removed", action="store_true", help="Delete remote objects with no corresponding local file [sync]")
1482
+    optparser.add_option(      "--no-delete-removed", dest="delete_removed", action="store_false", help="Don't delete remote objects.")
1483
+    optparser.add_option("-p", "--preserve", dest="preserve_attrs", action="store_true", help="Preserve filesystem attributes (mode, ownership, timestamps). Default for [sync] command.")
1484
+    optparser.add_option(      "--no-preserve", dest="preserve_attrs", action="store_false", help="Don't store FS attributes")
1485
+    optparser.add_option(      "--exclude", dest="exclude", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be excluded from sync")
1486
+    optparser.add_option(      "--exclude-from", dest="exclude_from", action="append", metavar="FILE", help="Read --exclude GLOBs from FILE")
1487
+    optparser.add_option(      "--rexclude", dest="rexclude", action="append", metavar="REGEXP", help="Filenames and paths matching REGEXP (regular expression) will be excluded from sync")
1488
+    optparser.add_option(      "--rexclude-from", dest="rexclude_from", action="append", metavar="FILE", help="Read --rexclude REGEXPs from FILE")
1489
+    optparser.add_option(      "--include", dest="include", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be included even if previously excluded by one of --(r)exclude(-from) patterns")
1490
+    optparser.add_option(      "--include-from", dest="include_from", action="append", metavar="FILE", help="Read --include GLOBs from FILE")
1491
+    optparser.add_option(      "--rinclude", dest="rinclude", action="append", metavar="REGEXP", help="Same as --include but uses REGEXP (regular expression) instead of GLOB")
1492
+    optparser.add_option(      "--rinclude-from", dest="rinclude_from", action="append", metavar="FILE", help="Read --rinclude REGEXPs from FILE")
1493
+
1494
+    optparser.add_option(      "--bucket-location", dest="bucket_location", help="Datacentre to create bucket in. As of now the datacenters are: US (default), EU, us-west-1, and ap-southeast-1")
1495
+    optparser.add_option(      "--reduced-redundancy", "--rr", dest="reduced_redundancy", action="store_true", help="Store object with 'Reduced redundancy'. Lower per-GB price. [put, cp, mv]")
1496
+
1497
+    optparser.add_option(      "--access-logging-target-prefix", dest="log_target_prefix", help="Target prefix for access logs (S3 URI) (for [cfmodify] and [accesslog] commands)")
1498
+    optparser.add_option(      "--no-access-logging", dest="log_target_prefix", action="store_false", help="Disable access logging (for [cfmodify] and [accesslog] commands)")
1499
+
1500
+    optparser.add_option("-m", "--mime-type", dest="default_mime_type", type="mimetype", metavar="MIME/TYPE", help="Default MIME-type to be set for objects stored.")
1501
+    optparser.add_option("-M", "--guess-mime-type", dest="guess_mime_type", action="store_true", help="Guess MIME-type of files by their extension. Falls back to default MIME-Type as specified by --mime-type option")
1502
+
1503
+    optparser.add_option(      "--add-header", dest="add_header", action="append", metavar="NAME:VALUE", help="Add a given HTTP header to the upload request. Can be used multiple times. For instance set 'Expires' or 'Cache-Control' headers (or both) using this options if you like.")
1504
+
1505
+    optparser.add_option(      "--encoding", dest="encoding", metavar="ENCODING", help="Override autodetected terminal and filesystem encoding (character set). Autodetected: %s" % preferred_encoding)
1506
+    optparser.add_option(      "--verbatim", dest="urlencoding_mode", action="store_const", const="verbatim", help="Use the S3 name as given on the command line. No pre-processing, encoding, etc. Use with caution!")
1507
+
1508
+    optparser.add_option(      "--list-md5", dest="list_md5", action="store_true", help="Include MD5 sums in bucket listings (only for 'ls' command).")
1509
+    optparser.add_option("-H", "--human-readable-sizes", dest="human_readable_sizes", action="store_true", help="Print sizes in human readable form (eg 1kB instead of 1234).")
1510
+
1511
+    optparser.add_option(      "--ws-index", dest="website_index", action="store", help="Name of error-document (only for [ws-create] command)")
1512
+    optparser.add_option(      "--ws-error", dest="website_error", action="store", help="Name of index-document (only for [ws-create] command)")
1513
+
1514
+    optparser.add_option(      "--progress", dest="progress_meter", action="store_true", help="Display progress meter (default on TTY).")
1515
+    optparser.add_option(      "--no-progress", dest="progress_meter", action="store_false", help="Don't display progress meter (default on non-TTY).")
1516
+    optparser.add_option(      "--enable", dest="enable", action="store_true", help="Enable given CloudFront distribution (only for [cfmodify] command)")
1517
+    optparser.add_option(      "--disable", dest="enable", action="store_false", help="Enable given CloudFront distribution (only for [cfmodify] command)")
1518
+    optparser.add_option(      "--cf-invalidate", dest="invalidate_on_cf", action="store_true", help="Invalidate the uploaded filed in CloudFront. Also see [cfinval] command.")
1519
+    optparser.add_option(      "--cf-add-cname", dest="cf_cnames_add", action="append", metavar="CNAME", help="Add given CNAME to a CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
1520
+    optparser.add_option(      "--cf-remove-cname", dest="cf_cnames_remove", action="append", metavar="CNAME", help="Remove given CNAME from a CloudFront distribution (only for [cfmodify] command)")
1521
+    optparser.add_option(      "--cf-comment", dest="cf_comment", action="store", metavar="COMMENT", help="Set COMMENT for a given CloudFront distribution (only for [cfcreate] and [cfmodify] commands)")
1522
+    optparser.add_option(      "--cf-default-root-object", dest="cf_default_root_object", action="store", metavar="DEFAULT_ROOT_OBJECT", help="Set the default root object to return when no object is specified in the URL. Use a relative path, i.e. default/index.html instead of /default/index.html or s3://bucket/default/index.html (only for [cfcreate] and [cfmodify] commands)")
1523
+    optparser.add_option("-v", "--verbose", dest="verbosity", action="store_const", const=logging.INFO, help="Enable verbose output.")
1524
+    optparser.add_option("-d", "--debug", dest="verbosity", action="store_const", const=logging.DEBUG, help="Enable debug output.")
1525
+    optparser.add_option(      "--version", dest="show_version", action="store_true", help="Show s3cmd version (%s) and exit." % (PkgInfo.version))
1526
+    optparser.add_option("-F", "--follow-symlinks", dest="follow_symlinks", action="store_true", default=False, help="Follow symbolic links as if they are regular files")
1527
+
1528
+    optparser.set_usage(optparser.usage + " COMMAND [parameters]")
1529
+    optparser.set_description('S3cmd is a tool for managing objects in '+
1530
+        'Amazon S3 storage. It allows for making and removing '+
1531
+        '"buckets" and uploading, downloading and removing '+
1532
+        '"objects" from these buckets.')
1533
+    optparser.epilog = format_commands(optparser.get_prog_name(), commands_list)
1534
+    optparser.epilog += ("\nFor more informations see the progect homepage:\n%s\n" % PkgInfo.url)
1535
+    optparser.epilog += ("\nConsider a donation if you have found s3cmd useful:\n%s/donate\n" % PkgInfo.url)
1536
+
1537
+    (options, args) = optparser.parse_args()
1538
+
1539
+    ## Some mucking with logging levels to enable
1540
+    ## debugging/verbose output for config file parser on request
1541
+    logging.basicConfig(level=options.verbosity,
1542
+                        format='%(levelname)s: %(message)s',
1543
+                        stream = sys.stderr)
1544
+
1545
+    if options.show_version:
1546
+        output(u"s3cmd version %s" % PkgInfo.version)
1547
+        sys.exit(0)
1548
+
1549
+    ## Now finally parse the config file
1550
+    if not options.config:
1551
+        error(u"Can't find a config file. Please use --config option.")
1552
+        sys.exit(1)
1553
+
1554
+    try:
1555
+        cfg = Config(options.config)
1556
+    except IOError, e:
1557
+        if options.run_configure:
1558
+            cfg = Config()
1559
+        else:
1560
+            error(u"%s: %s"  % (options.config, e.strerror))
1561
+            error(u"Configuration file not available.")
1562
+            error(u"Consider using --configure parameter to create one.")
1563
+            sys.exit(1)
1564
+
1565
+    ## And again some logging level adjustments
1566
+    ## according to configfile and command line parameters
1567
+    if options.verbosity != default_verbosity:
1568
+        cfg.verbosity = options.verbosity
1569
+    logging.root.setLevel(cfg.verbosity)
1570
+
1571
+    ## Default to --progress on TTY devices, --no-progress elsewhere
1572
+    ## Can be overriden by actual --(no-)progress parameter
1573
+    cfg.update_option('progress_meter', sys.stdout.isatty())
1574
+
1575
+    ## Unsupported features on Win32 platform
1576
+    if os.name == "nt":
1577
+        if cfg.preserve_attrs:
1578
+            error(u"Option --preserve is not yet supported on MS Windows platform. Assuming --no-preserve.")
1579
+            cfg.preserve_attrs = False
1580
+        if cfg.progress_meter:
1581
+            error(u"Option --progress is not yet supported on MS Windows platform. Assuming --no-progress.")
1582
+            cfg.progress_meter = False
1583
+
1584
+    ## Pre-process --add-header's and put them to Config.extra_headers SortedDict()
1585
+    if options.add_header:
1586
+        for hdr in options.add_header:
1587
+            try:
1588
+                key, val = hdr.split(":", 1)
1589
+            except ValueError:
1590
+                raise ParameterError("Invalid header format: %s" % hdr)
1591
+            key_inval = re.sub("[a-zA-Z0-9-.]", "", key)
1592
+            if key_inval:
1593
+                key_inval = key_inval.replace(" ", "<space>")
1594
+                key_inval = key_inval.replace("\t", "<tab>")
1595
+                raise ParameterError("Invalid character(s) in header name '%s': \"%s\"" % (key, key_inval))
1596
+            debug(u"Updating Config.Config extra_headers[%s] -> %s" % (key.strip(), val.strip()))
1597
+            cfg.extra_headers[key.strip()] = val.strip()
1598
+
1599
+    ## --acl-grant/--acl-revoke arguments are pre-parsed by OptionS3ACL()
1600
+    if options.acl_grants:
1601
+        for grant in options.acl_grants:
1602
+            cfg.acl_grants.append(grant)
1603
+
1604
+    if options.acl_revokes:
1605
+        for grant in options.acl_revokes:
1606
+            cfg.acl_revokes.append(grant)
1607
+
1608
+    ## Process --(no-)check-md5
1609
+    if options.check_md5 == False:
1610
+        try:
1611
+            cfg.sync_checks.remove("md5")
1612
+        except:
1613
+            pass
1614
+    if options.check_md5 == True and cfg.sync_checks.count("md5") == 0:
1615
+        cfg.sync_checks.append("md5")
1616
+
1617
+    ## Update Config with other parameters
1618
+    for option in cfg.option_list():
1619
+        try:
1620
+            if getattr(options, option) != None:
1621
+                debug(u"Updating Config.Config %s -> %s" % (option, getattr(options, option)))
1622
+                cfg.update_option(option, getattr(options, option))
1623
+        except AttributeError:
1624
+            ## Some Config() options are not settable from command line
1625
+            pass
1626
+
1627
+    ## Special handling for tri-state options (True, False, None)
1628
+    cfg.update_option("enable", options.enable)
1629
+    cfg.update_option("acl_public", options.acl_public)
1630
+
1631
+    ## CloudFront's cf_enable and Config's enable share the same --enable switch
1632
+    options.cf_enable = options.enable
1633
+
1634
+    ## CloudFront's cf_logging and Config's log_target_prefix share the same --log-target-prefix switch
1635
+    options.cf_logging = options.log_target_prefix
1636
+
1637
+    ## Update CloudFront options if some were set
1638
+    for option in CfCmd.options.option_list():
1639
+        try:
1640
+            if getattr(options, option) != None:
1641
+                debug(u"Updating CloudFront.Cmd %s -> %s" % (option, getattr(options, option)))
1642
+                CfCmd.options.update_option(option, getattr(options, option))
1643
+        except AttributeError:
1644
+            ## Some CloudFront.Cmd.Options() options are not settable from command line
1645
+            pass
1646
+
1647
+    ## Set output and filesystem encoding for printing out filenames.
1648
+    sys.stdout = codecs.getwriter(cfg.encoding)(sys.stdout, "replace")
1649
+    sys.stderr = codecs.getwriter(cfg.encoding)(sys.stderr, "replace")
1650
+
1651
+    ## Process --exclude and --exclude-from
1652
+    patterns_list, patterns_textual = process_patterns(options.exclude, options.exclude_from, is_glob = True, option_txt = "exclude")
1653
+    cfg.exclude.extend(patterns_list)
1654
+    cfg.debug_exclude.update(patterns_textual)
1655
+
1656
+    ## Process --rexclude and --rexclude-from
1657
+    patterns_list, patterns_textual = process_patterns(options.rexclude, options.rexclude_from, is_glob = False, option_txt = "rexclude")
1658
+    cfg.exclude.extend(patterns_list)
1659
+    cfg.debug_exclude.update(patterns_textual)
1660
+
1661
+    ## Process --include and --include-from
1662
+    patterns_list, patterns_textual = process_patterns(options.include, options.include_from, is_glob = True, option_txt = "include")
1663
+    cfg.include.extend(patterns_list)
1664
+    cfg.debug_include.update(patterns_textual)
1665
+
1666
+    ## Process --rinclude and --rinclude-from
1667
+    patterns_list, patterns_textual = process_patterns(options.rinclude, options.rinclude_from, is_glob = False, option_txt = "rinclude")
1668
+    cfg.include.extend(patterns_list)
1669
+    cfg.debug_include.update(patterns_textual)
1670
+
1671
+    ## Set socket read()/write() timeout
1672
+    socket.setdefaulttimeout(cfg.socket_timeout)
1673
+
1674
+    if cfg.encrypt and cfg.gpg_passphrase == "":
1675
+        error(u"Encryption requested but no passphrase set in config file.")
1676
+        error(u"Please re-run 's3cmd --configure' and supply it.")
1677
+        sys.exit(1)
1678
+
1679
+    if options.dump_config:
1680
+        cfg.dump_config(sys.stdout)
1681
+        sys.exit(0)
1682
+
1683
+    if options.run_configure:
1684
+        run_configure(options.config)
1685
+        sys.exit(0)
1686
+
1687
+    if len(args) < 1:
1688
+        error(u"Missing command. Please run with --help for more information.")
1689
+        sys.exit(1)
1690
+
1691
+    ## Unicodise all remaining arguments:
1692
+    args = [unicodise(arg) for arg in args]
1693
+
1694
+    command = args.pop(0)
1695
+    try:
1696
+        debug(u"Command: %s" % commands[command]["cmd"])
1697
+        ## We must do this lookup in extra step to
1698
+        ## avoid catching all KeyError exceptions
1699
+        ## from inner functions.
1700
+        cmd_func = commands[command]["func"]
1701
+    except KeyError, e:
1702
+        error(u"Invalid command: %s" % e)
1703
+        sys.exit(1)
1704
+
1705
+    if len(args) < commands[command]["argc"]:
1706
+        error(u"Not enough paramters for command '%s'" % command)
1707
+        sys.exit(1)
1708
+
1709
+    try:
1710
+        cmd_func(args)
1711
+    except S3Error, e:
1712
+        error(u"S3 error: %s" % e)
1713
+        sys.exit(1)
1714 1714
 
1715 1715
 def report_exception(e):
1716
-		sys.stderr.write("""
1716
+        sys.stderr.write("""
1717 1717
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1718 1718
     An unexpected error has occurred.
1719 1719
   Please report the following lines to:
... ...
@@ -1721,25 +1721,25 @@ def report_exception(e):
1721 1721
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1722 1722
 
1723 1723
 """)
1724
-		tb = traceback.format_exc(sys.exc_info())
1725
-		e_class = str(e.__class__)
1726
-		e_class = e_class[e_class.rfind(".")+1 : -2]
1727
-		sys.stderr.write(u"Problem: %s: %s\n" % (e_class, e))
1728
-		try:
1729
-			sys.stderr.write("S3cmd:   %s\n" % PkgInfo.version)
1730
-		except NameError:
1731
-			sys.stderr.write("S3cmd:   unknown version. Module import problem?\n")
1732
-		sys.stderr.write("\n")
1733
-		sys.stderr.write(unicode(tb, errors="replace"))
1734
-
1735
-		if type(e) == ImportError:
1736
-			sys.stderr.write("\n")
1737
-			sys.stderr.write("Your sys.path contains these entries:\n")
1738
-			for path in sys.path:
1739
-				sys.stderr.write(u"\t%s\n" % path)
1740
-			sys.stderr.write("Now the question is where have the s3cmd modules been installed?\n")
1741
-
1742
-		sys.stderr.write("""
1724
+        tb = traceback.format_exc(sys.exc_info())
1725
+        e_class = str(e.__class__)
1726
+        e_class = e_class[e_class.rfind(".")+1 : -2]
1727
+        sys.stderr.write(u"Problem: %s: %s\n" % (e_class, e))
1728
+        try:
1729
+            sys.stderr.write("S3cmd:   %s\n" % PkgInfo.version)
1730
+        except NameError:
1731
+            sys.stderr.write("S3cmd:   unknown version. Module import problem?\n")
1732
+        sys.stderr.write("\n")
1733
+        sys.stderr.write(unicode(tb, errors="replace"))
1734
+
1735
+        if type(e) == ImportError:
1736
+            sys.stderr.write("\n")
1737
+            sys.stderr.write("Your sys.path contains these entries:\n")
1738
+            for path in sys.path:
1739
+                sys.stderr.write(u"\t%s\n" % path)
1740
+            sys.stderr.write("Now the question is where have the s3cmd modules been installed?\n")
1741
+
1742
+        sys.stderr.write("""
1743 1743
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1744 1744
     An unexpected error has occurred.
1745 1745
     Please report the above lines to:
... ...
@@ -1748,41 +1748,43 @@ def report_exception(e):
1748 1748
 """)
1749 1749
 
1750 1750
 if __name__ == '__main__':
1751
-	try:
1752
-		## Our modules
1753
-		## Keep them in try/except block to 
1754
-		## detect any syntax errors in there
1755
-		from S3.Exceptions import *
1756
-		from S3 import PkgInfo
1757
-		from S3.S3 import S3
1758
-		from S3.Config import Config
1759
-		from S3.SortedDict import SortedDict
1760
-		from S3.S3Uri import S3Uri
1761
-		from S3 import Utils
1762
-		from S3.Utils import *
1763
-		from S3.Progress import Progress
1764
-		from S3.CloudFront import Cmd as CfCmd
1765
-		from S3.CloudFront import CloudFront
1766
-		from S3.FileLists import *
1767
-
1768
-		main()
1769
-		sys.exit(0)
1770
-
1771
-	except ImportError, e:
1772
-		report_exception(e)
1773
-		sys.exit(1)
1774
-		
1775
-	except ParameterError, e:
1776
-		error(u"Parameter problem: %s" % e)
1777
-		sys.exit(1)
1778
-
1779
-	except SystemExit, e:
1780
-		sys.exit(e.code)
1781
-
1782
-	except KeyboardInterrupt:
1783
-		sys.stderr.write("See ya!\n")
1784
-		sys.exit(1)
1785
-
1786
-	except Exception, e:
1787
-		report_exception(e)
1788
-		sys.exit(1)
1751
+    try:
1752
+        ## Our modules
1753
+        ## Keep them in try/except block to
1754
+        ## detect any syntax errors in there
1755
+        from S3.Exceptions import *
1756
+        from S3 import PkgInfo
1757
+        from S3.S3 import S3
1758
+        from S3.Config import Config
1759
+        from S3.SortedDict import SortedDict
1760
+        from S3.S3Uri import S3Uri
1761
+        from S3 import Utils
1762
+        from S3.Utils import *
1763
+        from S3.Progress import Progress
1764
+        from S3.CloudFront import Cmd as CfCmd
1765
+        from S3.CloudFront import CloudFront
1766
+        from S3.FileLists import *
1767
+
1768
+        main()
1769
+        sys.exit(0)
1770
+
1771
+    except ImportError, e:
1772
+        report_exception(e)
1773
+        sys.exit(1)
1774
+
1775
+    except ParameterError, e:
1776
+        error(u"Parameter problem: %s" % e)
1777
+        sys.exit(1)
1778
+
1779
+    except SystemExit, e:
1780
+        sys.exit(e.code)
1781
+
1782
+    except KeyboardInterrupt:
1783
+        sys.stderr.write("See ya!\n")
1784
+        sys.exit(1)
1785
+
1786
+    except Exception, e:
1787
+        report_exception(e)
1788
+        sys.exit(1)
1789
+
1790
+# vim:et:ts=4:sts=4:ai
... ...
@@ -19,35 +19,37 @@ from S3.Config import Config
19 19
 from S3.Exceptions import *
20 20
 
21 21
 def display_response(response):
22
-	print "%s\n%s\n%s" % ('-'*40, response['data'], '-'*40)
23
-	
22
+    print "%s\n%s\n%s" % ('-'*40, response['data'], '-'*40)
23
+
24 24
 if __name__ == '__main__':
25
-	if float("%d.%d" %(sys.version_info[0], sys.version_info[1])) < 2.4:
26
-		sys.stderr.write("ERROR: Python 2.4 or higher required, sorry.\n")
27
-		sys.exit(1)
28
-	cfg = Config(os.getenv("HOME")+"/.s3cfg")
25
+    if float("%d.%d" %(sys.version_info[0], sys.version_info[1])) < 2.4:
26
+        sys.stderr.write("ERROR: Python 2.4 or higher required, sorry.\n")
27
+        sys.exit(1)
28
+    cfg = Config(os.getenv("HOME")+"/.s3cfg")
29
+
30
+    logging.root.setLevel(logging.DEBUG)
31
+    sdb = SimpleDB(cfg)
29 32
 
30
-	logging.root.setLevel(logging.DEBUG)
31
-	sdb = SimpleDB(cfg)
33
+    try:
34
+        display_response(sdb.ListDomains())
32 35
 
33
-	try:
34
-		display_response(sdb.ListDomains())
36
+        display_response(sdb.CreateDomain("logix.cz-test"))
35 37
 
36
-		display_response(sdb.CreateDomain("logix.cz-test"))
38
+        display_response(sdb.ListDomains())
37 39
 
38
-		display_response(sdb.ListDomains())
40
+        display_response(sdb.PutAttributes("logix.cz-test", "AbCd", {'First': "One", "Second" : 2, "Third" : u"drei"}))
41
+        display_response(sdb.PutAttributes("logix.cz-test", "XyZ", {'xyz' : ['x', 'y', 'z'], 'Third' : u'traja'}))
39 42
 
40
-		display_response(sdb.PutAttributes("logix.cz-test", "AbCd", {'First': "One", "Second" : 2, "Third" : u"drei"}))
41
-		display_response(sdb.PutAttributes("logix.cz-test", "XyZ", {'xyz' : ['x', 'y', 'z'], 'Third' : u'traja'}))
43
+        display_response(sdb.GetAttributes("logix.cz-test", "AbCd", ['Second', 'Third']))
44
+        display_response(sdb.GetAttributes("logix.cz-test", "XyZ"))
42 45
 
43
-		display_response(sdb.GetAttributes("logix.cz-test", "AbCd", ['Second', 'Third']))
44
-		display_response(sdb.GetAttributes("logix.cz-test", "XyZ"))
46
+        display_response(sdb.Query("logix.cz-test", "['xyz' = 'z']"))
45 47
 
46
-		display_response(sdb.Query("logix.cz-test", "['xyz' = 'z']"))
48
+        display_response(sdb.DeleteDomain("logix.cz-test"))
47 49
 
48
-		display_response(sdb.DeleteDomain("logix.cz-test"))
50
+        display_response(sdb.ListDomains())
51
+    except S3Error, e:
52
+        error(e)
53
+        error(e.info)
49 54
 
50
-		display_response(sdb.ListDomains())
51
-	except S3Error, e:
52
-		error(e)
53
-		error(e.info)
55
+# vim:et:ts=4:sts=4:ai
... ...
@@ -5,74 +5,76 @@ import os
5 5
 import S3.PkgInfo
6 6
 
7 7
 if float("%d.%d" % sys.version_info[:2]) < 2.4:
8
-	sys.stderr.write("Your Python version %d.%d.%d is not supported.\n" % sys.version_info[:3])
9
-	sys.stderr.write("S3cmd requires Python 2.4 or newer.\n")
10
-	sys.exit(1)
8
+    sys.stderr.write("Your Python version %d.%d.%d is not supported.\n" % sys.version_info[:3])
9
+    sys.stderr.write("S3cmd requires Python 2.4 or newer.\n")
10
+    sys.exit(1)
11 11
 
12 12
 try:
13
-	import xml.etree.ElementTree as ET
14
-	print "Using xml.etree.ElementTree for XML processing"
13
+    import xml.etree.ElementTree as ET
14
+    print "Using xml.etree.ElementTree for XML processing"
15 15
 except ImportError, e:
16
-	sys.stderr.write(str(e) + "\n")
17
-	try:
18
-		import elementtree.ElementTree as ET
19
-		print "Using elementtree.ElementTree for XML processing"
20
-	except ImportError, e:
21
-		sys.stderr.write(str(e) + "\n")
22
-		sys.stderr.write("Please install ElementTree module from\n")
23
-		sys.stderr.write("http://effbot.org/zone/element-index.htm\n")
24
-		sys.exit(1)
16
+    sys.stderr.write(str(e) + "\n")
17
+    try:
18
+        import elementtree.ElementTree as ET
19
+        print "Using elementtree.ElementTree for XML processing"
20
+    except ImportError, e:
21
+        sys.stderr.write(str(e) + "\n")
22
+        sys.stderr.write("Please install ElementTree module from\n")
23
+        sys.stderr.write("http://effbot.org/zone/element-index.htm\n")
24
+        sys.exit(1)
25 25
 
26 26
 try:
27
-	## Remove 'MANIFEST' file to force
28
-	## distutils to recreate it.
29
-	## Only in "sdist" stage. Otherwise 
30
-	## it makes life difficult to packagers.
31
-	if sys.argv[1] == "sdist":
32
-		os.unlink("MANIFEST")
27
+    ## Remove 'MANIFEST' file to force
28
+    ## distutils to recreate it.
29
+    ## Only in "sdist" stage. Otherwise
30
+    ## it makes life difficult to packagers.
31
+    if sys.argv[1] == "sdist":
32
+        os.unlink("MANIFEST")
33 33
 except:
34
-	pass
34
+    pass
35 35
 
36 36
 ## Re-create the manpage
37 37
 ## (Beware! Perl script on the loose!!)
38 38
 if sys.argv[1] == "sdist":
39
-	if os.stat_result(os.stat("s3cmd.1")).st_mtime < os.stat_result(os.stat("s3cmd")).st_mtime:
40
-		sys.stderr.write("Re-create man page first!\n")
41
-		sys.stderr.write("Run: ./s3cmd --help | ./format-manpage.pl > s3cmd.1\n")
42
-		sys.exit(1)
39
+    if os.stat_result(os.stat("s3cmd.1")).st_mtime < os.stat_result(os.stat("s3cmd")).st_mtime:
40
+        sys.stderr.write("Re-create man page first!\n")
41
+        sys.stderr.write("Run: ./s3cmd --help | ./format-manpage.pl > s3cmd.1\n")
42
+        sys.exit(1)
43 43
 
44 44
 ## Don't install manpages and docs when $S3CMD_PACKAGING is set
45
-## This was a requirement of Debian package maintainer. 
45
+## This was a requirement of Debian package maintainer.
46 46
 if not os.getenv("S3CMD_PACKAGING"):
47
-	man_path = os.getenv("S3CMD_INSTPATH_MAN") or "share/man"
48
-	doc_path = os.getenv("S3CMD_INSTPATH_DOC") or "share/doc/packages"
49
-	data_files = [	
50
-		(doc_path+"/s3cmd", [ "README", "INSTALL", "NEWS" ]),
51
-		(man_path+"/man1", [ "s3cmd.1" ] ),
52
-	]
47
+    man_path = os.getenv("S3CMD_INSTPATH_MAN") or "share/man"
48
+    doc_path = os.getenv("S3CMD_INSTPATH_DOC") or "share/doc/packages"
49
+    data_files = [
50
+        (doc_path+"/s3cmd", [ "README", "INSTALL", "NEWS" ]),
51
+        (man_path+"/man1", [ "s3cmd.1" ] ),
52
+    ]
53 53
 else:
54
-	data_files = None
54
+    data_files = None
55 55
 
56 56
 ## Main distutils info
57 57
 setup(
58
-	## Content description
59
-	name = S3.PkgInfo.package,
60
-	version = S3.PkgInfo.version,
61
-	packages = [ 'S3' ],
62
-	scripts = ['s3cmd'],
63
-	data_files = data_files,
58
+    ## Content description
59
+    name = S3.PkgInfo.package,
60
+    version = S3.PkgInfo.version,
61
+    packages = [ 'S3' ],
62
+    scripts = ['s3cmd'],
63
+    data_files = data_files,
64 64
 
65
-	## Packaging details
66
-	author = "Michal Ludvig",
67
-	author_email = "michal@logix.cz",
68
-	url = S3.PkgInfo.url,
69
-	license = S3.PkgInfo.license,
70
-	description = S3.PkgInfo.short_description,
71
-	long_description = """
65
+    ## Packaging details
66
+    author = "Michal Ludvig",
67
+    author_email = "michal@logix.cz",
68
+    url = S3.PkgInfo.url,
69
+    license = S3.PkgInfo.license,
70
+    description = S3.PkgInfo.short_description,
71
+    long_description = """
72 72
 %s
73 73
 
74 74
 Authors:
75 75
 --------
76 76
     Michal Ludvig  <michal@logix.cz>
77 77
 """ % (S3.PkgInfo.long_description)
78
-	)
78
+    )
79
+
80
+# vim:et:ts=4:sts=4:ai