git-svn-id: https://s3tools.svn.sourceforge.net/svnroot/s3tools/s3py/trunk@43 830e0280-6d2a-0410-9c65-932aecc39d9d
Michal Ludvig authored on 2007/01/13 22:05:294 | 8 |
deleted file mode 100644 |
... | ... |
@@ -1,31 +0,0 @@ |
1 |
-class BidirMap: |
|
2 |
- def __init__(self, **map): |
|
3 |
- self.k2v = {} |
|
4 |
- self.v2k = {} |
|
5 |
- for key in map: |
|
6 |
- self.__setitem__(key, map[key]) |
|
7 |
- |
|
8 |
- def __setitem__(self, key, value): |
|
9 |
- if self.v2k.has_key(value): |
|
10 |
- if self.v2k[value] != key: |
|
11 |
- raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'") |
|
12 |
- try: |
|
13 |
- del(self.v2k[self.k2v[key]]) |
|
14 |
- except KeyError: |
|
15 |
- pass |
|
16 |
- self.k2v[key] = value |
|
17 |
- self.v2k[value] = key |
|
18 |
- |
|
19 |
- def __getitem__(self, key): |
|
20 |
- return self.k2v[key] |
|
21 |
- |
|
22 |
- def __str__(self): |
|
23 |
- return self.v2k.__str__() |
|
24 |
- |
|
25 |
- def getkey(self, value): |
|
26 |
- return self.v2k[value] |
|
27 |
- |
|
28 |
- def getvalue(self, key): |
|
29 |
- return self.k2v[key] |
|
30 |
- |
|
31 |
- |
32 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,48 +0,0 @@ |
1 |
-import logging |
|
2 |
-from logging import debug, info, warning, error |
|
3 |
-import re |
|
4 |
- |
|
5 |
-class ConfigParser: |
|
6 |
- def __init__(self, file, sections = []): |
|
7 |
- self.cfg = {} |
|
8 |
- self.parse_file(file, sections) |
|
9 |
- |
|
10 |
- def parse_file(self, file, sections = []): |
|
11 |
- info("ConfigParser: Reading file '%s'" % file) |
|
12 |
- if type(sections) != type([]): |
|
13 |
- sections = [sections] |
|
14 |
- in_our_section = True |
|
15 |
- f = open(file, "r") |
|
16 |
- r_comment = re.compile("^\s*#.*") |
|
17 |
- r_empty = re.compile("^\s*$") |
|
18 |
- r_section = re.compile("^\[([^\]]+)\]") |
|
19 |
- r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)") |
|
20 |
- r_quotes = re.compile("^\"(.*)\"\s*$") |
|
21 |
- for line in f: |
|
22 |
- if r_comment.match(line) or r_empty.match(line): |
|
23 |
- continue |
|
24 |
- is_section = r_section.match(line) |
|
25 |
- if is_section: |
|
26 |
- section = is_section.groups()[0] |
|
27 |
- in_our_section = (section in sections) or (len(sections) == 0) |
|
28 |
- continue |
|
29 |
- is_data = r_data.match(line) |
|
30 |
- if is_data and in_our_section: |
|
31 |
- data = is_data.groupdict() |
|
32 |
- if r_quotes.match(data["value"]): |
|
33 |
- data["value"] = data["value"][1:-1] |
|
34 |
- debug("ConfigParser: %s->%s" % (data["key"], data["value"])) |
|
35 |
- self.__setitem__(data["key"], data["value"]) |
|
36 |
- continue |
|
37 |
- warning("Ignoring invalid line in '%s': %s" % (file, line)) |
|
38 |
- |
|
39 |
- def __getitem__(self, name): |
|
40 |
- return self.cfg[name] |
|
41 |
- |
|
42 |
- def __setitem__(self, name, value): |
|
43 |
- self.cfg[name] = value |
|
44 |
- |
|
45 |
- def get(self, name, default = None): |
|
46 |
- if self.cfg.has_key(name): |
|
47 |
- return self.cfg[name] |
|
48 |
- return default |
49 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,31 @@ |
0 |
+class BidirMap: |
|
1 |
+ def __init__(self, **map): |
|
2 |
+ self.k2v = {} |
|
3 |
+ self.v2k = {} |
|
4 |
+ for key in map: |
|
5 |
+ self.__setitem__(key, map[key]) |
|
6 |
+ |
|
7 |
+ def __setitem__(self, key, value): |
|
8 |
+ if self.v2k.has_key(value): |
|
9 |
+ if self.v2k[value] != key: |
|
10 |
+ raise KeyError("Value '"+str(value)+"' already in use with key '"+str(self.v2k[value])+"'") |
|
11 |
+ try: |
|
12 |
+ del(self.v2k[self.k2v[key]]) |
|
13 |
+ except KeyError: |
|
14 |
+ pass |
|
15 |
+ self.k2v[key] = value |
|
16 |
+ self.v2k[value] = key |
|
17 |
+ |
|
18 |
+ def __getitem__(self, key): |
|
19 |
+ return self.k2v[key] |
|
20 |
+ |
|
21 |
+ def __str__(self): |
|
22 |
+ return self.v2k.__str__() |
|
23 |
+ |
|
24 |
+ def getkey(self, value): |
|
25 |
+ return self.v2k[value] |
|
26 |
+ |
|
27 |
+ def getvalue(self, key): |
|
28 |
+ return self.k2v[key] |
|
29 |
+ |
|
30 |
+ |
0 | 31 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,48 @@ |
0 |
+import logging |
|
1 |
+from logging import debug, info, warning, error |
|
2 |
+import re |
|
3 |
+ |
|
4 |
+class ConfigParser: |
|
5 |
+ def __init__(self, file, sections = []): |
|
6 |
+ self.cfg = {} |
|
7 |
+ self.parse_file(file, sections) |
|
8 |
+ |
|
9 |
+ def parse_file(self, file, sections = []): |
|
10 |
+ info("ConfigParser: Reading file '%s'" % file) |
|
11 |
+ if type(sections) != type([]): |
|
12 |
+ sections = [sections] |
|
13 |
+ in_our_section = True |
|
14 |
+ f = open(file, "r") |
|
15 |
+ r_comment = re.compile("^\s*#.*") |
|
16 |
+ r_empty = re.compile("^\s*$") |
|
17 |
+ r_section = re.compile("^\[([^\]]+)\]") |
|
18 |
+ r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)") |
|
19 |
+ r_quotes = re.compile("^\"(.*)\"\s*$") |
|
20 |
+ for line in f: |
|
21 |
+ if r_comment.match(line) or r_empty.match(line): |
|
22 |
+ continue |
|
23 |
+ is_section = r_section.match(line) |
|
24 |
+ if is_section: |
|
25 |
+ section = is_section.groups()[0] |
|
26 |
+ in_our_section = (section in sections) or (len(sections) == 0) |
|
27 |
+ continue |
|
28 |
+ is_data = r_data.match(line) |
|
29 |
+ if is_data and in_our_section: |
|
30 |
+ data = is_data.groupdict() |
|
31 |
+ if r_quotes.match(data["value"]): |
|
32 |
+ data["value"] = data["value"][1:-1] |
|
33 |
+ debug("ConfigParser: %s->%s" % (data["key"], data["value"])) |
|
34 |
+ self.__setitem__(data["key"], data["value"]) |
|
35 |
+ continue |
|
36 |
+ warning("Ignoring invalid line in '%s': %s" % (file, line)) |
|
37 |
+ |
|
38 |
+ def __getitem__(self, name): |
|
39 |
+ return self.cfg[name] |
|
40 |
+ |
|
41 |
+ def __setitem__(self, name, value): |
|
42 |
+ self.cfg[name] = value |
|
43 |
+ |
|
44 |
+ def get(self, name, default = None): |
|
45 |
+ if self.cfg.has_key(name): |
|
46 |
+ return self.cfg[name] |
|
47 |
+ return default |
0 | 48 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,42 @@ |
0 |
+class SortedDictIterator: |
|
1 |
+ def __init__(self, dict): |
|
2 |
+ self.dict = dict |
|
3 |
+ self.keys = dict.keys() |
|
4 |
+ self.index = 0 |
|
5 |
+ self.length = len(self.keys) |
|
6 |
+ |
|
7 |
+ def next(self): |
|
8 |
+ if self.length <= self.index: |
|
9 |
+ raise StopIteration |
|
10 |
+ |
|
11 |
+ retval = self.keys[self.index] |
|
12 |
+ self.index += 1 |
|
13 |
+ return retval |
|
14 |
+ |
|
15 |
+ |
|
16 |
+class SortedDict(dict): |
|
17 |
+ def __setitem__(self, name, value): |
|
18 |
+ try: |
|
19 |
+ value = value.strip() |
|
20 |
+ except: |
|
21 |
+ pass |
|
22 |
+ dict.__setitem__(self, name.lower(), value) |
|
23 |
+ |
|
24 |
+ def __iter__(self): |
|
25 |
+ return SortedDictIterator(self) |
|
26 |
+ |
|
27 |
+ |
|
28 |
+ def keys(self): |
|
29 |
+ keys = dict.keys(self) |
|
30 |
+ keys.sort() |
|
31 |
+ return keys |
|
32 |
+ |
|
33 |
+ def popitem(self): |
|
34 |
+ keys = self.keys() |
|
35 |
+ if len(keys) < 1: |
|
36 |
+ raise KeyError("popitem(): dictionary is empty") |
|
37 |
+ retval = (keys[0], dict.__getitem__(self, keys[0])) |
|
38 |
+ dict.__delitem__(self, keys[0]) |
|
39 |
+ return retval |
|
40 |
+ |
|
41 |
+ |
0 | 42 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,65 @@ |
0 |
+import time |
|
1 |
+import re |
|
2 |
+import elementtree.ElementTree as ET |
|
3 |
+ |
|
4 |
+def parseNodes(nodes, xmlns = ""): |
|
5 |
+ retval = [] |
|
6 |
+ for node in nodes: |
|
7 |
+ retval_item = {} |
|
8 |
+ if xmlns != "": |
|
9 |
+ ## Take regexp compilation out of the loop |
|
10 |
+ r = re.compile(xmlns) |
|
11 |
+ fixup = lambda string : r.sub("", string) |
|
12 |
+ else: |
|
13 |
+ ## Do-nothing function |
|
14 |
+ fixup = lambda string : string |
|
15 |
+ |
|
16 |
+ for child in node.getchildren(): |
|
17 |
+ name = fixup(child.tag) |
|
18 |
+ retval_item[name] = node.findtext(".//%s" % child.tag) |
|
19 |
+ |
|
20 |
+ retval.append(retval_item) |
|
21 |
+ return retval |
|
22 |
+ |
|
23 |
+def getNameSpace(element): |
|
24 |
+ if not element.tag.startswith("{"): |
|
25 |
+ return "" |
|
26 |
+ return re.compile("^(\{[^}]+\})").match(element.tag).groups()[0] |
|
27 |
+ |
|
28 |
+def getListFromXml(xml, node): |
|
29 |
+ tree = ET.fromstring(xml) |
|
30 |
+ xmlns = getNameSpace(tree) |
|
31 |
+ nodes = tree.findall('.//%s%s' % (xmlns, node)) |
|
32 |
+ return parseNodes(nodes, xmlns) |
|
33 |
+ |
|
34 |
+def dateS3toPython(date): |
|
35 |
+ date = re.compile("\.\d\d\dZ").sub(".000Z", date) |
|
36 |
+ return time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z") |
|
37 |
+ |
|
38 |
+def dateS3toUnix(date): |
|
39 |
+ ## FIXME: This should be timezone-aware. |
|
40 |
+ ## Currently the argument to strptime() is GMT but mktime() |
|
41 |
+ ## treats it as "localtime". Anyway... |
|
42 |
+ return time.mktime(dateS3toPython(date)) |
|
43 |
+ |
|
44 |
+def formatSize(size, human_readable = False): |
|
45 |
+ size = int(size) |
|
46 |
+ if human_readable: |
|
47 |
+ coeffs = ['k', 'M', 'G', 'T'] |
|
48 |
+ coeff = "" |
|
49 |
+ while size > 2048: |
|
50 |
+ size /= 1024 |
|
51 |
+ coeff = coeffs.pop(0) |
|
52 |
+ return (size, coeff) |
|
53 |
+ else: |
|
54 |
+ return (size, "") |
|
55 |
+ |
|
56 |
+def formatDateTime(s3timestamp): |
|
57 |
+ return time.strftime("%Y-%m-%d %H:%M", dateS3toPython(s3timestamp)) |
|
58 |
+ |
|
59 |
+def convertTupleListToDict(list): |
|
60 |
+ retval = {} |
|
61 |
+ for tuple in list: |
|
62 |
+ retval[tuple[0]] = tuple[1] |
|
63 |
+ return retval |
|
64 |
+ |
1 | 66 |
deleted file mode 100644 |
... | ... |
@@ -1,42 +0,0 @@ |
1 |
-class SortedDictIterator: |
|
2 |
- def __init__(self, dict): |
|
3 |
- self.dict = dict |
|
4 |
- self.keys = dict.keys() |
|
5 |
- self.index = 0 |
|
6 |
- self.length = len(self.keys) |
|
7 |
- |
|
8 |
- def next(self): |
|
9 |
- if self.length <= self.index: |
|
10 |
- raise StopIteration |
|
11 |
- |
|
12 |
- retval = self.keys[self.index] |
|
13 |
- self.index += 1 |
|
14 |
- return retval |
|
15 |
- |
|
16 |
- |
|
17 |
-class SortedDict(dict): |
|
18 |
- def __setitem__(self, name, value): |
|
19 |
- try: |
|
20 |
- value = value.strip() |
|
21 |
- except: |
|
22 |
- pass |
|
23 |
- dict.__setitem__(self, name.lower(), value) |
|
24 |
- |
|
25 |
- def __iter__(self): |
|
26 |
- return SortedDictIterator(self) |
|
27 |
- |
|
28 |
- |
|
29 |
- def keys(self): |
|
30 |
- keys = dict.keys(self) |
|
31 |
- keys.sort() |
|
32 |
- return keys |
|
33 |
- |
|
34 |
- def popitem(self): |
|
35 |
- keys = self.keys() |
|
36 |
- if len(keys) < 1: |
|
37 |
- raise KeyError("popitem(): dictionary is empty") |
|
38 |
- retval = (keys[0], dict.__getitem__(self, keys[0])) |
|
39 |
- dict.__delitem__(self, keys[0]) |
|
40 |
- return retval |
|
41 |
- |
|
42 |
- |
... | ... |
@@ -1,5 +1,10 @@ |
1 | 1 |
#!/usr/bin/env python |
2 | 2 |
|
3 |
+## Amazon S3 manager |
|
4 |
+## Author: Michal Ludvig <michal@logix.cz> |
|
5 |
+## http://www.logix.cz/michal |
|
6 |
+## License: GPL Version 2 |
|
7 |
+ |
|
3 | 8 |
import httplib2 |
4 | 9 |
import sys |
5 | 10 |
import os, os.path |
... | ... |
@@ -16,10 +21,10 @@ from stat import ST_SIZE |
16 | 16 |
import elementtree.ElementTree as ET |
17 | 17 |
|
18 | 18 |
## Our modules |
19 |
-from utils import * |
|
20 |
-from SortedDict import SortedDict |
|
21 |
-from BidirMap import BidirMap |
|
22 |
-from ConfigParser import ConfigParser |
|
19 |
+from S3.utils import * |
|
20 |
+from S3.SortedDict import SortedDict |
|
21 |
+from S3.BidirMap import BidirMap |
|
22 |
+from S3.ConfigParser import ConfigParser |
|
23 | 23 |
|
24 | 24 |
class AwsConfig: |
25 | 25 |
parsed_files = [] |
26 | 26 |
deleted file mode 100644 |
... | ... |
@@ -1,65 +0,0 @@ |
1 |
-import time |
|
2 |
-import re |
|
3 |
-import elementtree.ElementTree as ET |
|
4 |
- |
|
5 |
-def parseNodes(nodes, xmlns = ""): |
|
6 |
- retval = [] |
|
7 |
- for node in nodes: |
|
8 |
- retval_item = {} |
|
9 |
- if xmlns != "": |
|
10 |
- ## Take regexp compilation out of the loop |
|
11 |
- r = re.compile(xmlns) |
|
12 |
- fixup = lambda string : r.sub("", string) |
|
13 |
- else: |
|
14 |
- ## Do-nothing function |
|
15 |
- fixup = lambda string : string |
|
16 |
- |
|
17 |
- for child in node.getchildren(): |
|
18 |
- name = fixup(child.tag) |
|
19 |
- retval_item[name] = node.findtext(".//%s" % child.tag) |
|
20 |
- |
|
21 |
- retval.append(retval_item) |
|
22 |
- return retval |
|
23 |
- |
|
24 |
-def getNameSpace(element): |
|
25 |
- if not element.tag.startswith("{"): |
|
26 |
- return "" |
|
27 |
- return re.compile("^(\{[^}]+\})").match(element.tag).groups()[0] |
|
28 |
- |
|
29 |
-def getListFromXml(xml, node): |
|
30 |
- tree = ET.fromstring(xml) |
|
31 |
- xmlns = getNameSpace(tree) |
|
32 |
- nodes = tree.findall('.//%s%s' % (xmlns, node)) |
|
33 |
- return parseNodes(nodes, xmlns) |
|
34 |
- |
|
35 |
-def dateS3toPython(date): |
|
36 |
- date = re.compile("\.\d\d\dZ").sub(".000Z", date) |
|
37 |
- return time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z") |
|
38 |
- |
|
39 |
-def dateS3toUnix(date): |
|
40 |
- ## FIXME: This should be timezone-aware. |
|
41 |
- ## Currently the argument to strptime() is GMT but mktime() |
|
42 |
- ## treats it as "localtime". Anyway... |
|
43 |
- return time.mktime(dateS3toPython(date)) |
|
44 |
- |
|
45 |
-def formatSize(size, human_readable = False): |
|
46 |
- size = int(size) |
|
47 |
- if human_readable: |
|
48 |
- coeffs = ['k', 'M', 'G', 'T'] |
|
49 |
- coeff = "" |
|
50 |
- while size > 2048: |
|
51 |
- size /= 1024 |
|
52 |
- coeff = coeffs.pop(0) |
|
53 |
- return (size, coeff) |
|
54 |
- else: |
|
55 |
- return (size, "") |
|
56 |
- |
|
57 |
-def formatDateTime(s3timestamp): |
|
58 |
- return time.strftime("%Y-%m-%d %H:%M", dateS3toPython(s3timestamp)) |
|
59 |
- |
|
60 |
-def convertTupleListToDict(list): |
|
61 |
- retval = {} |
|
62 |
- for tuple in list: |
|
63 |
- retval[tuple[0]] = tuple[1] |
|
64 |
- return retval |
|
65 |
- |