git-svn-id: https://s3tools.svn.sourceforge.net/svnroot/s3tools/s3cmd/trunk@79 830e0280-6d2a-0410-9c65-932aecc39d9d
Michal Ludvig authored on 2007/02/19 11:32:171 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,316 +0,0 @@ |
1 |
-## Amazon S3 manager |
|
2 |
-## Author: Michal Ludvig <michal@logix.cz> |
|
3 |
-## http://www.logix.cz/michal |
|
4 |
-## License: GPL Version 2 |
|
5 |
- |
|
6 |
-import os, os.path |
|
7 |
-import errno |
|
8 |
-import random |
|
9 |
-import pickle |
|
10 |
-import sqlite3 |
|
11 |
-import string |
|
12 |
-import stat |
|
13 |
-import time |
|
14 |
- |
|
15 |
-class S3fs(object): |
|
16 |
- def __init__(self, fsname = None): |
|
17 |
- self.n = S3fsObjectName() |
|
18 |
- if fsname: |
|
19 |
- self.openfs(fsname) |
|
20 |
- |
|
21 |
- def mkfs(self, fsname): |
|
22 |
- self.fsname = fsname |
|
23 |
- self._object_name = self.n.fs(fsname) |
|
24 |
- if self.object_exists(self._object_name): |
|
25 |
- raise S3fsError("Filesystem '%s' already exists" % fsname, errno.EEXIST) |
|
26 |
- root_inode = S3fsInode(self) |
|
27 |
- root_inode.store() |
|
28 |
- local_name = self.object_create(self._object_name) |
|
29 |
- S3fsTree(local_name, root_inode_id = root_inode.inode_id) |
|
30 |
- self.store() |
|
31 |
- |
|
32 |
- self.openfs(fsname) |
|
33 |
- |
|
34 |
- def openfs(self, fsname): |
|
35 |
- self.fsname = fsname |
|
36 |
- self._object_name = self.n.fs(fsname) |
|
37 |
- if not self.object_exists(self._object_name): |
|
38 |
- raise S3fsError("Filesystem '%s' does not exist" % fsname, errno.ENOENT) |
|
39 |
- self.tree = S3fsTree(self.object_fetch(self._object_name)) |
|
40 |
- print self.tree |
|
41 |
- |
|
42 |
- def get_inode(self, path): |
|
43 |
- (inode_num, id) = self.tree.get_inode(path) |
|
44 |
- inode = S3fsInode(self, id) |
|
45 |
- return inode |
|
46 |
- |
|
47 |
- def store(self): |
|
48 |
- self.object_store(self.fsname) |
|
49 |
- |
|
50 |
- def mknod(self, name, props = {}): |
|
51 |
- inode = S3fsInode(self, props = props) |
|
52 |
- self.tree.mknod(name, inode.inode_id) |
|
53 |
- inode.store() |
|
54 |
- return inode |
|
55 |
- |
|
56 |
-class S3fsTree(object): |
|
57 |
- def __init__(self, fsfilename, root_inode_id = None): |
|
58 |
- print "S3fsTree(%s) opening database" % fsfilename |
|
59 |
- self._cache = {} |
|
60 |
- self.conn = sqlite3.connect(fsfilename) |
|
61 |
- self.conn.isolation_level = None ## Auto-Commit mode |
|
62 |
- self.c = self.conn.cursor() |
|
63 |
- if root_inode_id: |
|
64 |
- self.mkfs(root_inode_id) |
|
65 |
- print "Dumping filesystem:" |
|
66 |
- for row in self.c.execute("SELECT * FROM tree"): |
|
67 |
- print row |
|
68 |
- print "Done." |
|
69 |
- |
|
70 |
- |
|
71 |
- def mkfs(self, root_inode_id): |
|
72 |
- try: |
|
73 |
- self.c.execute(""" |
|
74 |
- CREATE TABLE tree ( |
|
75 |
- inode INTEGER PRIMARY KEY AUTOINCREMENT, |
|
76 |
- parent INTEGER, |
|
77 |
- name TEXT, |
|
78 |
- id TEXT, |
|
79 |
- UNIQUE (parent, name) |
|
80 |
- ) |
|
81 |
- """) |
|
82 |
- print "Table 'tree' created" |
|
83 |
- root_inode = self.mknod("/", root_inode_id, -1) |
|
84 |
- except sqlite3.OperationalError, e: |
|
85 |
- if e.message != "table tree already exists": |
|
86 |
- raise |
|
87 |
- |
|
88 |
- def mknod(self, name, id, parent = None): |
|
89 |
- if not parent: |
|
90 |
- parent = self.get_inode(os.path.dirname(name))[0] |
|
91 |
- print "mknod(name=%s, id=%s, parent=%s)" % (name, id, parent) |
|
92 |
- try: |
|
93 |
- r = self.c.execute(""" |
|
94 |
- INSERT INTO tree (parent, name, id) |
|
95 |
- VALUES (?, ?, ?) |
|
96 |
- """, (parent, os.path.basename(name), id)) |
|
97 |
- self._cache[name] = (r.lastrowid, id) |
|
98 |
- except sqlite3.IntegrityError, e: |
|
99 |
- raise IOError(errno.EEXIST, "Node '%s' aleady exists" %name) |
|
100 |
- print "mknod('%s'): %s" % (name, str(self._cache[name])) |
|
101 |
- return self._cache[name] |
|
102 |
- |
|
103 |
- def get_inode(self, path): |
|
104 |
- print "get_inode(%s)" % path |
|
105 |
- print "_cache = %s" % str(self._cache) |
|
106 |
- if self._cache.has_key(path): |
|
107 |
- return self._cache[path] |
|
108 |
- if not path.startswith("/"): |
|
109 |
- raise ValueError("get_inode() requires path beginning with '/'") |
|
110 |
- if path in ("/", ""): |
|
111 |
- pathparts = [] |
|
112 |
- else: |
|
113 |
- path = path[1:] |
|
114 |
- pathparts = path.split("/")[1:] |
|
115 |
- query_from = "tree as t0" |
|
116 |
- query_where = "t0.parent == -1 AND t0.name == ''" |
|
117 |
- join_index = 0 |
|
118 |
- for p in pathparts: |
|
119 |
- join_index += 1 |
|
120 |
- query_from += " LEFT JOIN tree as t%d" % join_index |
|
121 |
- query_where += " AND t%d.parent == t%d.inode AND t%d.name == ?" % \ |
|
122 |
- (join_index, join_index-1, join_index) |
|
123 |
- |
|
124 |
- query = "SELECT t%d.inode, t%d.id FROM %s WHERE %s" % \ |
|
125 |
- (join_index, join_index, query_from, query_where) |
|
126 |
- |
|
127 |
- print query |
|
128 |
- retval = self.c.execute(query, pathparts).fetchone() |
|
129 |
- if not retval: |
|
130 |
- raise S3fsError("get_inode(%s): not found" % path, errno.ENOENT) |
|
131 |
- print retval |
|
132 |
- return retval |
|
133 |
- |
|
134 |
-class S3fsSync(object): |
|
135 |
- def store(self, fs, object_name = None): |
|
136 |
- if not object_name: |
|
137 |
- object_name = self._object_name |
|
138 |
- to_sync = {} |
|
139 |
- for attr in self._sync_attrs: |
|
140 |
- if hasattr(self, attr): |
|
141 |
- to_sync[attr] = getattr(self, attr) |
|
142 |
- fs.object_write(object_name, pickle.dumps(to_sync)) |
|
143 |
- print "Stored object: %s" % (object_name) |
|
144 |
- |
|
145 |
- def load(self, fs, object_name = None): |
|
146 |
- if not object_name: |
|
147 |
- object_name = self._object_name |
|
148 |
- from_sync = pickle.loads(fs.object_read(object_name)) |
|
149 |
- for attr in self._sync_attrs: |
|
150 |
- if from_sync.has_key(attr): |
|
151 |
- setattr(self, attr, from_sync[attr]) |
|
152 |
- print "Loaded object: %s" % (object_name) |
|
153 |
- |
|
154 |
- def try_load(self, fs, object_name = None): |
|
155 |
- if not object_name: |
|
156 |
- object_name = self._object_name |
|
157 |
- if fs.object_exists(object_name): |
|
158 |
- self.load(fs, object_name) |
|
159 |
- return True |
|
160 |
- else: |
|
161 |
- print "Nonexist object: %s" % (object_name) |
|
162 |
- return False |
|
163 |
- |
|
164 |
- |
|
165 |
-class S3fsInode(S3fsSync): |
|
166 |
- _fs = None |
|
167 |
- |
|
168 |
- ## Interface for S3fsSync |
|
169 |
- _sync_attrs = [ "properties" ] |
|
170 |
- # _object_name = |
|
171 |
- |
|
172 |
- ## Properties |
|
173 |
- inode_id = None |
|
174 |
- properties = { |
|
175 |
- "ctime" : 0, |
|
176 |
- "mtime" : 0, |
|
177 |
- "uid" : 0, |
|
178 |
- "gid" : 0, |
|
179 |
- "mode" : 0, |
|
180 |
- } |
|
181 |
- |
|
182 |
- def __init__(self, fs, inode_id = None, props = {}): |
|
183 |
- if not inode_id: |
|
184 |
- inode_id = fs.n.rndstr(10) |
|
185 |
- self.inode_id = inode_id |
|
186 |
- self._object_name = fs.n.inode(fs.fsname, inode_id) |
|
187 |
- self._fs = fs |
|
188 |
- print "S3fsInode._object_name="+self._object_name |
|
189 |
- if not self.try_load(self._fs): |
|
190 |
- self.setprop("ctime", time.time()) |
|
191 |
- for prop in props: |
|
192 |
- self.setprop(prop, props[prop]) |
|
193 |
- |
|
194 |
- def setprop(self, property, value): |
|
195 |
- self.assert_property_name(property) |
|
196 |
- self.properties[property] = value |
|
197 |
- return value |
|
198 |
- |
|
199 |
- def getprop(self, property): |
|
200 |
- self.assert_property_name(property) |
|
201 |
- return self.properties[property] |
|
202 |
- |
|
203 |
- def assert_property_name(self, property): |
|
204 |
- if not self.properties.has_key(property): |
|
205 |
- raise ValueError("Property '%s' not known to S3fsInode") |
|
206 |
- |
|
207 |
- def store(self, object_name = None): |
|
208 |
- self.setprop("mtime", time.time()) |
|
209 |
- S3fsSync.store(self, self._fs, object_name) |
|
210 |
- |
|
211 |
-class S3fsLocalDir(S3fs): |
|
212 |
- def __init__(self, directory, fsname = None): |
|
213 |
- if not os.path.isdir(directory): |
|
214 |
- raise IOError("Directory %s does not exist" % directory, errno.ENOENT) |
|
215 |
- self._dir = directory |
|
216 |
- |
|
217 |
- ## SubClass must be set to go before calling parent constructor! |
|
218 |
- S3fs.__init__(self, fsname) |
|
219 |
- |
|
220 |
- def object_exists(self, object_name): |
|
221 |
- real_path = os.path.join(self._dir, object_name) |
|
222 |
- if os.path.isfile(real_path): ## Is file, all good |
|
223 |
- return True |
|
224 |
- if os.path.exists(real_path): ## Exists but is not file! |
|
225 |
- raise S3fsError("Object %s (%s) is not a regular file" % (object_name, real_path), errno.EINVAL) |
|
226 |
- return False |
|
227 |
- |
|
228 |
- def object_write(self, object_name, contents): |
|
229 |
- real_path = os.path.join(self._dir, object_name) |
|
230 |
- f = open(real_path, "wb") |
|
231 |
- f.write(contents) |
|
232 |
- f.close() |
|
233 |
- |
|
234 |
- def object_read(self, object_name): |
|
235 |
- real_path = os.path.join(self._dir, object_name) |
|
236 |
- f = open(real_path, "rb") |
|
237 |
- contents = f.read() |
|
238 |
- f.close() |
|
239 |
- return contents |
|
240 |
- |
|
241 |
- def object_create(self, object_name): |
|
242 |
- """ Create object in a temporary directory |
|
243 |
- """ |
|
244 |
- real_path = os.path.join(self._dir, object_name) |
|
245 |
- # Load object from S3 to a temporary directory |
|
246 |
- return real_path |
|
247 |
- |
|
248 |
- def object_fetch(self, object_name): |
|
249 |
- """ Load object from S3 to a local directory. |
|
250 |
- |
|
251 |
- Returns: real file name on the local filesystem. |
|
252 |
- """ |
|
253 |
- real_path = os.path.join(self._dir, object_name) |
|
254 |
- return real_path |
|
255 |
- |
|
256 |
- def object_store(self, object_name): |
|
257 |
- """ Store object from a local directory to S3. |
|
258 |
- |
|
259 |
- Returns: real file name on the local filesystem. |
|
260 |
- """ |
|
261 |
- real_path = os.path.join(self._dir, object_name) |
|
262 |
- # Store file from temporary directory to S3 |
|
263 |
- return real_path |
|
264 |
- |
|
265 |
- def object_real_path(self, object_name): |
|
266 |
- return os.path.join(self._dir, object_name) |
|
267 |
- |
|
268 |
-class S3fsObjectName(object): |
|
269 |
- _rnd_chars = string.ascii_letters+string.digits |
|
270 |
- _rnd_chars_len = len(_rnd_chars) |
|
271 |
- |
|
272 |
- def __init__(self): |
|
273 |
- random.seed() |
|
274 |
- |
|
275 |
- def rndstr(self, len): |
|
276 |
- retval = "" |
|
277 |
- while len > 0: |
|
278 |
- retval += self._rnd_chars[random.randint(0, self._rnd_chars_len-1)] |
|
279 |
- len -= 1 |
|
280 |
- return retval |
|
281 |
- |
|
282 |
- def fs(self, fsname): |
|
283 |
- return "fs-%s" % fsname |
|
284 |
- |
|
285 |
- def inode(self, fsname, inode_id): |
|
286 |
- return "%s-i-%s" % (fsname, inode_id) |
|
287 |
- |
|
288 |
-class S3fsError(Exception): |
|
289 |
- def __init__(self, message, errno = -1): |
|
290 |
- Exception.__init__(self, message) |
|
291 |
- self.errno = errno |
|
292 |
- |
|
293 |
-if __name__ == "__main__": |
|
294 |
- local_dir = "/tmp/s3fs" |
|
295 |
- fsname = "testFs" |
|
296 |
- if not os.path.isdir(local_dir): |
|
297 |
- os.mkdir(local_dir) |
|
298 |
- |
|
299 |
- try: |
|
300 |
- fs = S3fsLocalDir(local_dir, fsname) |
|
301 |
- print "Filesystem '%s' opened." % fsname |
|
302 |
- except S3fsError, e: |
|
303 |
- if e.errno == errno.ENOENT: |
|
304 |
- print "Filesystem %s does not exist -> mkfs()" % fsname |
|
305 |
- fs = S3fsLocalDir(local_dir) |
|
306 |
- fs.mkfs(fsname) |
|
307 |
- else: |
|
308 |
- raise |
|
309 |
- root_inode = fs.get_inode("/") |
|
310 |
- print "root_inode(%s).mode = 0%o" % (root_inode.inode_id, root_inode.getprop("mode")) |
|
311 |
- if root_inode.getprop("mode") == 0: |
|
312 |
- root_inode.setprop("mode", 0755) |
|
313 |
- root_inode.store() |
|
314 |
- fs.mknod("/data", props = {"mode":0755 | stat.S_IFDIR, "uid":11022}) |
|
315 |
- fs.mknod("/data/share", props = {"mode":0755 | stat.S_IFDIR, "uid":11022}) |
|
316 |
- |