git-svn-id: https://s3tools.svn.sourceforge.net/svnroot/s3tools/s3cmd/trunk@77 830e0280-6d2a-0410-9c65-932aecc39d9d
Michal Ludvig authored on 2007/02/08 11:42:08... | ... |
@@ -9,16 +9,10 @@ import random |
9 | 9 |
import pickle |
10 | 10 |
import sqlite3 |
11 | 11 |
import string |
12 |
+import stat |
|
13 |
+import time |
|
12 | 14 |
|
13 | 15 |
class S3fs(object): |
14 |
- _sync_attrs = [ "tree" ] |
|
15 |
- |
|
16 |
- ## These are instance variables - we must |
|
17 |
- ## catch when they are used uninitialized |
|
18 |
- ### _object_name = ... |
|
19 |
- ### fsname = ... |
|
20 |
- ### tree = ... |
|
21 |
- |
|
22 | 16 |
def __init__(self, fsname = None): |
23 | 17 |
self.n = S3fsObjectName() |
24 | 18 |
if fsname: |
... | ... |
@@ -29,10 +23,10 @@ class S3fs(object): |
29 | 29 |
self._object_name = self.n.fs(fsname) |
30 | 30 |
if self.object_exists(self._object_name): |
31 | 31 |
raise S3fsError("Filesystem '%s' already exists" % fsname, errno.EEXIST) |
32 |
- tree = S3fsTree(self.object_create(self._object_name)) |
|
33 | 32 |
root_inode = S3fsInode(self) |
34 |
- S3fsSync.store(self, root_inode) |
|
35 |
- tree.mkrootdir(root_inode.inode_id) |
|
33 |
+ root_inode.store() |
|
34 |
+ local_name = self.object_create(self._object_name) |
|
35 |
+ S3fsTree(local_name, root_inode_id = root_inode.inode_id) |
|
36 | 36 |
self.store() |
37 | 37 |
|
38 | 38 |
self.openfs(fsname) |
... | ... |
@@ -53,14 +47,28 @@ class S3fs(object): |
53 | 53 |
def store(self): |
54 | 54 |
self.object_store(self.fsname) |
55 | 55 |
|
56 |
+ def mknod(self, name, props = {}): |
|
57 |
+ inode = S3fsInode(self, props = props) |
|
58 |
+ self.tree.mknod(name, inode.inode_id) |
|
59 |
+ inode.store() |
|
60 |
+ return inode |
|
56 | 61 |
|
57 | 62 |
class S3fsTree(object): |
58 |
- def __init__(self, fsfilename): |
|
63 |
+ def __init__(self, fsfilename, root_inode_id = None): |
|
59 | 64 |
print "S3fsTree(%s) opening database" % fsfilename |
60 | 65 |
self._cache = {} |
61 | 66 |
self.conn = sqlite3.connect(fsfilename) |
62 | 67 |
self.conn.isolation_level = None ## Auto-Commit mode |
63 | 68 |
self.c = self.conn.cursor() |
69 |
+ if root_inode_id: |
|
70 |
+ self.mkfs(root_inode_id) |
|
71 |
+ print "Dumping filesystem:" |
|
72 |
+ for row in self.c.execute("SELECT * FROM tree"): |
|
73 |
+ print row |
|
74 |
+ print "Done." |
|
75 |
+ |
|
76 |
+ |
|
77 |
+ def mkfs(self, root_inode_id): |
|
64 | 78 |
try: |
65 | 79 |
self.c.execute(""" |
66 | 80 |
CREATE TABLE tree ( |
... | ... |
@@ -72,22 +80,25 @@ class S3fsTree(object): |
72 | 72 |
) |
73 | 73 |
""") |
74 | 74 |
print "Table 'tree' created" |
75 |
+ root_inode = self.mknod("/", root_inode_id, -1) |
|
75 | 76 |
except sqlite3.OperationalError, e: |
76 | 77 |
if e.message != "table tree already exists": |
77 | 78 |
raise |
78 |
- print "Dumping filesystem:" |
|
79 |
- r = self.c.execute("SELECT * FROM tree") |
|
80 |
- for row in r.fetchall(): |
|
81 |
- print row |
|
82 |
- print "Done." |
|
83 | 79 |
|
84 |
- def mkrootdir(self, id): |
|
85 |
- r = self.c.execute(""" |
|
86 |
- INSERT INTO tree (parent, name, id) |
|
87 |
- VALUES (-1, "/", ?) |
|
88 |
- """, (id,)) |
|
89 |
- self._cache["/"] = (r.lastrowid, id) |
|
90 |
- print "Stored '/': %s" % str(self._cache["/"]) |
|
80 |
+ def mknod(self, name, id, parent = None): |
|
81 |
+ if not parent: |
|
82 |
+ parent = self.get_inode(os.path.dirname(name))[0] |
|
83 |
+ print "mknod(name=%s, id=%s, parent=%s)" % (name, id, parent) |
|
84 |
+ try: |
|
85 |
+ r = self.c.execute(""" |
|
86 |
+ INSERT INTO tree (parent, name, id) |
|
87 |
+ VALUES (?, ?, ?) |
|
88 |
+ """, (parent, os.path.basename(name), id)) |
|
89 |
+ self._cache[name] = (r.lastrowid, id) |
|
90 |
+ except sqlite3.IntegrityError, e: |
|
91 |
+ raise IOError(errno.EEXIST, "Node '%s' aleady exists" %name) |
|
92 |
+ print "mknod('%s'): %s" % (name, str(self._cache[name])) |
|
93 |
+ return self._cache[name] |
|
91 | 94 |
|
92 | 95 |
def get_inode(self, path): |
93 | 96 |
print "get_inode(%s)" % path |
... | ... |
@@ -96,10 +107,13 @@ class S3fsTree(object): |
96 | 96 |
return self._cache[path] |
97 | 97 |
if not path.startswith("/"): |
98 | 98 |
raise ValueError("get_inode() requires path beginning with '/'") |
99 |
- path = path[1:] |
|
100 |
- pathparts = path.split("/")[1:] |
|
99 |
+ if path in ("/", ""): |
|
100 |
+ pathparts = [] |
|
101 |
+ else: |
|
102 |
+ path = path[1:] |
|
103 |
+ pathparts = path.split("/")[1:] |
|
101 | 104 |
query_from = "tree as t0" |
102 |
- query_where = "t0.parent == -1 AND t0.name == '/'" |
|
105 |
+ query_where = "t0.parent == -1 AND t0.name == ''" |
|
103 | 106 |
join_index = 0 |
104 | 107 |
for p in pathparts: |
105 | 108 |
join_index += 1 |
... | ... |
@@ -112,13 +126,43 @@ class S3fsTree(object): |
112 | 112 |
|
113 | 113 |
print query |
114 | 114 |
retval = self.c.execute(query, pathparts).fetchone() |
115 |
+ if not retval: |
|
116 |
+ raise S3fsError("get_inode(%s): not found" % path, errno.ENOENT) |
|
115 | 117 |
print retval |
116 | 118 |
return retval |
117 | 119 |
|
118 |
-#class S3fsDb(object): |
|
120 |
+class S3fsSync(object): |
|
121 |
+ def store(self, fs, object_name = None): |
|
122 |
+ if not object_name: |
|
123 |
+ object_name = self._object_name |
|
124 |
+ to_sync = {} |
|
125 |
+ for attr in self._sync_attrs: |
|
126 |
+ if hasattr(self, attr): |
|
127 |
+ to_sync[attr] = getattr(self, attr) |
|
128 |
+ fs.object_write(object_name, pickle.dumps(to_sync)) |
|
129 |
+ print "Stored object: %s" % (object_name) |
|
130 |
+ |
|
131 |
+ def load(self, fs, object_name = None): |
|
132 |
+ if not object_name: |
|
133 |
+ object_name = self._object_name |
|
134 |
+ from_sync = pickle.loads(fs.object_read(object_name)) |
|
135 |
+ for attr in self._sync_attrs: |
|
136 |
+ if from_sync.has_key(attr): |
|
137 |
+ setattr(self, attr, from_sync[attr]) |
|
138 |
+ print "Loaded object: %s" % (object_name) |
|
139 |
+ |
|
140 |
+ def try_load(self, fs, object_name = None): |
|
141 |
+ if not object_name: |
|
142 |
+ object_name = self._object_name |
|
143 |
+ if fs.object_exists(object_name): |
|
144 |
+ self.load(fs, object_name) |
|
145 |
+ return True |
|
146 |
+ else: |
|
147 |
+ print "Nonexist object: %s" % (object_name) |
|
148 |
+ return False |
|
119 | 149 |
|
120 | 150 |
|
121 |
-class S3fsInode(object): |
|
151 |
+class S3fsInode(S3fsSync): |
|
122 | 152 |
_fs = None |
123 | 153 |
|
124 | 154 |
## Interface for S3fsSync |
... | ... |
@@ -135,13 +179,17 @@ class S3fsInode(object): |
135 | 135 |
"mode" : 0, |
136 | 136 |
} |
137 | 137 |
|
138 |
- def __init__(self, fs, inode_id = None): |
|
138 |
+ def __init__(self, fs, inode_id = None, props = {}): |
|
139 | 139 |
if not inode_id: |
140 | 140 |
inode_id = fs.n.rndstr(10) |
141 | 141 |
self.inode_id = inode_id |
142 | 142 |
self._object_name = fs.n.inode(fs.fsname, inode_id) |
143 | 143 |
self._fs = fs |
144 |
- S3fsSync.try_load(self._fs, self) |
|
144 |
+ print "S3fsInode._object_name="+self._object_name |
|
145 |
+ if not self.try_load(self._fs): |
|
146 |
+ self.setprop("ctime", time.time()) |
|
147 |
+ for prop in props: |
|
148 |
+ self.setprop(prop, props[prop]) |
|
145 | 149 |
|
146 | 150 |
def setprop(self, property, value): |
147 | 151 |
self.assert_property_name(property) |
... | ... |
@@ -155,6 +203,10 @@ class S3fsInode(object): |
155 | 155 |
def assert_property_name(self, property): |
156 | 156 |
if not self.properties.has_key(property): |
157 | 157 |
raise ValueError("Property '%s' not known to S3fsInode") |
158 |
+ |
|
159 |
+ def store(self, object_name = None): |
|
160 |
+ self.setprop("mtime", time.time()) |
|
161 |
+ S3fsSync.store(self, self._fs, object_name) |
|
158 | 162 |
|
159 | 163 |
class S3fsLocalDir(S3fs): |
160 | 164 |
def __init__(self, directory, fsname = None): |
... | ... |
@@ -233,37 +285,6 @@ class S3fsObjectName(object): |
233 | 233 |
def inode(self, fsname, inode_id): |
234 | 234 |
return "%s-i-%s" % (fsname, inode_id) |
235 | 235 |
|
236 |
-class S3fsSync(object): |
|
237 |
- @staticmethod |
|
238 |
- def store(fs, instance, object_name = None): |
|
239 |
- if not object_name: |
|
240 |
- object_name = instance._object_name |
|
241 |
- to_sync = {} |
|
242 |
- for attr in instance._sync_attrs: |
|
243 |
- if hasattr(instance, attr): |
|
244 |
- to_sync[attr] = getattr(instance, attr) |
|
245 |
- fs.object_write(object_name, pickle.dumps(to_sync)) |
|
246 |
- print "Stored object: %s" % (object_name) |
|
247 |
- |
|
248 |
- @staticmethod |
|
249 |
- def load(fs, instance, object_name = None): |
|
250 |
- if not object_name: |
|
251 |
- object_name = instance._object_name |
|
252 |
- from_sync = pickle.loads(fs.object_read(object_name)) |
|
253 |
- for attr in instance._sync_attrs: |
|
254 |
- if from_sync.has_key(attr): |
|
255 |
- setattr(instance, attr, from_sync[attr]) |
|
256 |
- print "Loaded object: %s" % (object_name) |
|
257 |
- |
|
258 |
- @staticmethod |
|
259 |
- def try_load(fs, instance, object_name = None): |
|
260 |
- if not object_name: |
|
261 |
- object_name = instance._object_name |
|
262 |
- if fs.object_exists(object_name): |
|
263 |
- S3fsSync.load(fs, instance, object_name) |
|
264 |
- else: |
|
265 |
- print "Nonexist object: %s" % (object_name) |
|
266 |
- |
|
267 | 236 |
class S3fsError(Exception): |
268 | 237 |
def __init__(self, message, errno = -1): |
269 | 238 |
Exception.__init__(self, message) |
... | ... |
@@ -289,4 +310,7 @@ if __name__ == "__main__": |
289 | 289 |
print "root_inode(%s).mode = 0%o" % (root_inode.inode_id, root_inode.getprop("mode")) |
290 | 290 |
if root_inode.getprop("mode") == 0: |
291 | 291 |
root_inode.setprop("mode", 0755) |
292 |
- S3fsSync.store(fs, root_inode) |
|
292 |
+ root_inode.store() |
|
293 |
+ fs.mknod("/data", props = {"mode":0755 | stat.S_IFDIR, "uid":11022}) |
|
294 |
+ fs.mknod("/data/share", props = {"mode":0755 | stat.S_IFDIR, "uid":11022}) |
|
295 |
+ |