... | ... |
@@ -34,313 +34,183 @@ |
34 | 34 |
#include "fmap.h" |
35 | 35 |
|
36 | 36 |
|
37 |
-/* struct cache_key { */ |
|
38 |
-/* char digest[16]; */ |
|
39 |
-/* uint32_t size; /\* 0 is used to mark an empty hash slot! *\/ */ |
|
40 |
-/* struct cache_key *lru_next, *lru_prev; */ |
|
41 |
-/* }; */ |
|
42 |
- |
|
43 |
-/* struct cache_set { */ |
|
44 |
-/* struct cache_key *data; */ |
|
45 |
-/* size_t capacity; */ |
|
46 |
-/* size_t maxelements; /\* considering load factor *\/ */ |
|
47 |
-/* size_t elements; */ |
|
48 |
-/* size_t version; */ |
|
49 |
-/* struct cache_key *lru_head, *lru_tail; */ |
|
50 |
-/* pthread_mutex_t mutex; */ |
|
51 |
-/* }; */ |
|
52 |
- |
|
53 |
-/* #define CACHE_INVALID_VERSION ~0u */ |
|
54 |
-/* #define CACHE_KEY_DELETED ~0u */ |
|
55 |
-/* #define CACHE_KEY_EMPTY 0 */ |
|
56 |
- |
|
57 |
-/* /\* size must be power of 2! *\/ */ |
|
58 |
-/* static int cacheset_init(struct cache_set* map, size_t maxsize, uint8_t loadfactor) */ |
|
59 |
-/* { */ |
|
60 |
-/* map->data = cli_calloc(maxsize, sizeof(*map->data)); */ |
|
61 |
-/* if (!map->data) */ |
|
62 |
-/* return CL_EMEM; */ |
|
63 |
-/* map->capacity = maxsize; */ |
|
64 |
-/* map->maxelements = loadfactor*maxsize / 100; */ |
|
65 |
-/* map->elements = 0; */ |
|
66 |
-/* map->version = CACHE_INVALID_VERSION; */ |
|
67 |
-/* map->lru_head = map->lru_tail = NULL; */ |
|
68 |
-/* if (pthread_mutex_init(&map->mutex, NULL)) { */ |
|
69 |
-/* cli_errmsg("mutex init fail\n"); */ |
|
70 |
-/* return CL_EMEM; */ |
|
71 |
-/* } */ |
|
72 |
-/* } */ |
|
73 |
- |
|
74 |
-/* static void cacheset_destroy(struct cache_set *map) */ |
|
75 |
-/* { */ |
|
76 |
-/* pthread_mutex_destroy(&map->mutex); */ |
|
77 |
-/* free(map->data); */ |
|
78 |
-/* } */ |
|
79 |
- |
|
80 |
-/* static void cacheset_acquire(struct cache_set *map) */ |
|
81 |
-/* { */ |
|
82 |
-/* pthread_mutex_lock(&map->mutex); */ |
|
83 |
-/* } */ |
|
84 |
- |
|
85 |
-/* static void cache_setversion(struct cache_set* map, uint32_t version) */ |
|
86 |
-/* { */ |
|
87 |
-/* unsigned i; */ |
|
88 |
-/* if (map->version == version) */ |
|
89 |
-/* return; */ |
|
90 |
-/* map->version = version; */ |
|
91 |
-/* map->elements = 0; /\* all elements have expired now *\/ */ |
|
92 |
-/* for (i=0;i<map->capacity;i++) */ |
|
93 |
-/* map->data[i].size = 0; */ |
|
94 |
-/* map->lru_head = map->lru_tail = NULL; */ |
|
95 |
-/* } */ |
|
96 |
- |
|
97 |
-/* static void cacheset_lru_remove(struct cache_set *map, size_t howmany) */ |
|
98 |
-/* { */ |
|
99 |
-/* while (howmany--) { */ |
|
100 |
-/* struct cache_key *old; */ |
|
101 |
-/* assert(map->lru_head); */ |
|
102 |
-/* assert(!old->lru_prev); */ |
|
103 |
-/* // Remove a key from the head of the list */ |
|
104 |
-/* old = map->lru_head; */ |
|
105 |
-/* map->lru_head = old->lru_next; */ |
|
106 |
-/* old->size = CACHE_KEY_DELETED; */ |
|
107 |
-/* /\* This slot is now deleted, it is not empty, */ |
|
108 |
-/* * because previously we could have inserted a key that has seen this */ |
|
109 |
-/* * slot as occupied, to find that key we need to ensure that all keys */ |
|
110 |
-/* * that were occupied when the key was inserted, are seen as occupied */ |
|
111 |
-/* * when searching too. */ |
|
112 |
-/* * Of course when inserting a new value, we treat deleted slots as */ |
|
113 |
-/* * empty. */ |
|
114 |
-/* * We only replace old values with new values, but there is no guarantee */ |
|
115 |
-/* * that the newly inserted value would hash to same place as the value */ |
|
116 |
-/* * we remove due to LRU! *\/ */ |
|
117 |
-/* if (old == map->lru_tail) */ |
|
118 |
-/* map->lru_tail = 0; */ |
|
119 |
-/* } */ |
|
120 |
-/* } */ |
|
121 |
- |
|
122 |
-/* static inline uint32_t hash32shift(uint32_t key) */ |
|
123 |
-/* { */ |
|
124 |
-/* key = ~key + (key << 15); */ |
|
125 |
-/* key = key ^ (key >> 12); */ |
|
126 |
-/* key = key + (key << 2); */ |
|
127 |
-/* key = key ^ (key >> 4); */ |
|
128 |
-/* key = (key + (key << 3)) + (key << 11); */ |
|
129 |
-/* key = key ^ (key >> 16); */ |
|
130 |
-/* return key; */ |
|
131 |
-/* } */ |
|
132 |
- |
|
133 |
-/* static inline size_t hash(const unsigned char* k,const size_t len,const size_t SIZE) */ |
|
134 |
-/* { */ |
|
135 |
-/* size_t Hash = 1; */ |
|
136 |
-/* size_t i; */ |
|
137 |
-/* for(i=0;i<len;i++) { */ |
|
138 |
-/* /\* a simple add is good, because we use the mixing function below *\/ */ |
|
139 |
-/* Hash += k[i]; */ |
|
140 |
-/* /\* mixing function *\/ */ |
|
141 |
-/* Hash = hash32shift(Hash); */ |
|
142 |
-/* } */ |
|
143 |
-/* /\* SIZE is power of 2 *\/ */ |
|
144 |
-/* return Hash & (SIZE - 1); */ |
|
145 |
-/* } */ |
|
146 |
- |
|
147 |
-/* int cacheset_lookup_internal(struct cache_set *map, const struct cache_key *key, */ |
|
148 |
-/* uint32_t *insert_pos, int deletedok) */ |
|
149 |
-/* { */ |
|
150 |
-/* uint32_t idx = hash((const unsigned char*)key, sizeof(*key), map->capacity); */ |
|
151 |
-/* uint32_t tries = 0; */ |
|
152 |
-/* struct cache_key *k = &map->data[idx]; */ |
|
153 |
-/* while (k->size != CACHE_KEY_EMPTY) { */ |
|
154 |
-/* if (k->size == key->size && */ |
|
155 |
-/* !memcmp(k->digest, key, 16)) { */ |
|
156 |
-/* /\* found key *\/ */ |
|
157 |
-/* *insert_pos = idx; */ |
|
158 |
-/* return 1; */ |
|
159 |
-/* } */ |
|
160 |
-/* if (deletedok && k->size == CACHE_KEY_DELETED) { */ |
|
161 |
-/* /\* treat deleted slot as empty *\/ */ |
|
162 |
-/* *insert_pos = idx; */ |
|
163 |
-/* return 0; */ |
|
164 |
-/* } */ |
|
165 |
-/* idx = (idx + tries++)&(map->capacity-1); */ |
|
166 |
-/* k = &map->data[idx]; */ |
|
167 |
-/* } */ |
|
168 |
-/* /\* found empty pos *\/ */ |
|
169 |
-/* *insert_pos = idx; */ |
|
170 |
-/* return 0; */ |
|
171 |
-/* } */ |
|
172 |
- |
|
173 |
-/* static inline void lru_remove(struct cache_set *map, struct cache_key *newkey) */ |
|
174 |
-/* { */ |
|
175 |
-/* if (newkey->lru_next) */ |
|
176 |
-/* newkey->lru_next->lru_prev = newkey->lru_prev; */ |
|
177 |
-/* if (newkey->lru_prev) */ |
|
178 |
-/* newkey->lru_prev->lru_next = newkey->lru_next; */ |
|
179 |
-/* if (newkey == map->lru_head) */ |
|
180 |
-/* map->lru_head = newkey->lru_next; */ |
|
181 |
-/* } */ |
|
182 |
- |
|
183 |
-/* static inline void lru_addtail(struct cache_set *map, struct cache_key *newkey) */ |
|
184 |
-/* { */ |
|
185 |
-/* if (!map->lru_head) */ |
|
186 |
-/* map->lru_head = newkey; */ |
|
187 |
-/* if (map->lru_tail) */ |
|
188 |
-/* map->lru_tail->lru_next = newkey; */ |
|
189 |
-/* newkey->lru_next = NULL; */ |
|
190 |
-/* newkey->lru_prev = map->lru_tail; */ |
|
191 |
-/* map->lru_tail = newkey; */ |
|
192 |
-/* } */ |
|
193 |
- |
|
194 |
-/* static void cacheset_add(struct cache_set *map, const struct cache_key *key) */ |
|
195 |
-/* { */ |
|
196 |
-/* int ret; */ |
|
197 |
-/* uint32_t pos; */ |
|
198 |
-/* struct cache_key *newkey; */ |
|
199 |
-/* if (map->elements >= map->maxelements) */ |
|
200 |
-/* cacheset_lru_remove(map, 1); */ |
|
201 |
-/* assert(map->elements < map->maxelements); */ |
|
202 |
- |
|
203 |
-/* ret = cacheset_lookup_internal(map, key, &pos, 1); */ |
|
204 |
-/* newkey = &map->data[pos]; */ |
|
205 |
-/* if (ret) { */ |
|
206 |
-/* /\* was already added, remove from LRU list *\/ */ |
|
207 |
-/* lru_remove(map, newkey); */ |
|
208 |
-/* } */ |
|
209 |
-/* /\* add new key to tail of LRU list *\/ */ |
|
210 |
-/* lru_addtail(map, newkey); */ |
|
211 |
- |
|
212 |
-/* map->elements++; */ |
|
213 |
- |
|
214 |
-/* assert(pos < map->maxelements); */ |
|
215 |
- |
|
216 |
-/* memcpy(&map->data[pos], key, sizeof(*key)); */ |
|
217 |
-/* } */ |
|
218 |
- |
|
219 |
-/* static int cacheset_lookup(struct cache_set *map, const struct cache_key *key) */ |
|
220 |
-/* { */ |
|
221 |
-/* struct cache_key *newkey; */ |
|
222 |
-/* int ret; */ |
|
223 |
-/* uint32_t pos; */ |
|
224 |
-/* ret = cacheset_lookup_internal(map, key, &pos, 0); */ |
|
225 |
-/* if (!ret) */ |
|
226 |
-/* return CACHE_INVALID_VERSION; */ |
|
227 |
-/* newkey = &map->data[pos]; */ |
|
228 |
-/* /\* update LRU position: move to tail *\/ */ |
|
229 |
-/* lru_remove(map, newkey); */ |
|
230 |
-/* lru_addtail(map, newkey); */ |
|
231 |
- |
|
232 |
-/* return map->version; */ |
|
233 |
-/* } */ |
|
234 |
- |
|
235 |
-/* static void cacheset_release(struct cache_set *map) */ |
|
236 |
-/* { */ |
|
237 |
-/* pthread_mutex_unlock(&map->mutex); */ |
|
238 |
-/* } */ |
|
239 |
- |
|
240 |
-/* #if 0 */ |
|
241 |
-/* int main(int argc, char **argv) */ |
|
242 |
-/* { */ |
|
243 |
-/* struct cache_key key; */ |
|
244 |
-/* struct cache_set map; */ |
|
245 |
-/* cacheset_init(&map, 256, 80); */ |
|
246 |
-/* cacheset_acquire(&map); */ |
|
247 |
-/* cache_setversion(&map, 10); */ |
|
248 |
- |
|
249 |
-/* key.size = 1024; */ |
|
250 |
-/* memcpy(key.digest, "1234567890123456", 16); */ |
|
251 |
-/* cacheset_add(&map, &key); */ |
|
252 |
-/* memcpy(key.digest, "1234567890123457", 16); */ |
|
253 |
-/* cacheset_add(&map, &key); */ |
|
254 |
-/* memcpy(key.digest, "0123456789012345", 16); */ |
|
255 |
-/* cacheset_add(&map, &key); */ |
|
256 |
- |
|
257 |
-/* key.size = 1024; */ |
|
258 |
-/* memcpy(key.digest, "1234567890123456", 16); */ |
|
259 |
-/* if (cacheset_lookup(&map, &key) != 10) */ |
|
260 |
-/* abort(); */ |
|
261 |
-/* memcpy(key.digest, "1234567890123456", 16); */ |
|
262 |
-/* if (cacheset_lookup(&map, &key) != 10) */ |
|
263 |
-/* abort(); */ |
|
264 |
-/* memcpy(key.digest, "1234567890123457", 16); */ |
|
265 |
-/* if (cacheset_lookup(&map, &key) != 10) */ |
|
266 |
-/* abort(); */ |
|
267 |
-/* memcpy(key.digest, "0123456789012345", 16); */ |
|
268 |
-/* if (cacheset_lookup(&map, &key) != 10) */ |
|
269 |
-/* abort(); */ |
|
270 |
-/* memcpy(key.digest, "0123456789012346", 16); */ |
|
271 |
-/* if (cacheset_lookup(&map, &key) == 10) */ |
|
272 |
-/* abort(); */ |
|
273 |
- |
|
274 |
-/* cache_setversion(&map, 1); */ |
|
275 |
-/* memcpy(key.digest, "1234567890123456", 16); */ |
|
276 |
-/* if (cacheset_lookup(&map, &key) != CACHE_INVALID_VERSION) */ |
|
277 |
-/* abort(); */ |
|
278 |
-/* memcpy(key.digest, "1234567890123456", 16); */ |
|
279 |
-/* if (cacheset_lookup(&map, &key) != CACHE_INVALID_VERSION) */ |
|
280 |
-/* abort(); */ |
|
281 |
-/* memcpy(key.digest, "1234567890123457", 16); */ |
|
282 |
-/* if (cacheset_lookup(&map, &key) != CACHE_INVALID_VERSION) */ |
|
283 |
-/* abort(); */ |
|
284 |
-/* memcpy(key.digest, "0123456789012345", 16); */ |
|
285 |
-/* if (cacheset_lookup(&map, &key) != CACHE_INVALID_VERSION) */ |
|
286 |
-/* abort(); */ |
|
287 |
- |
|
288 |
-/* cacheset_release(&map); */ |
|
289 |
- |
|
290 |
-/* cacheset_destroy(&map); */ |
|
291 |
- |
|
292 |
-/* cacheset_init(&map, 8, 50); */ |
|
293 |
-/* cacheset_acquire(&map); */ |
|
294 |
-/* cache_setversion(&map, 10); */ |
|
295 |
- |
|
296 |
-/* key.size = 416; */ |
|
297 |
-/* memcpy(key.digest, "1234567890123456", 16); */ |
|
298 |
-/* cacheset_add(&map, &key); */ |
|
299 |
-/* memcpy(key.digest, "1234567890123457", 16); */ |
|
300 |
-/* cacheset_add(&map, &key); */ |
|
301 |
-/* memcpy(key.digest, "1234567890123459", 16); */ |
|
302 |
-/* cacheset_add(&map, &key); */ |
|
303 |
-/* key.size = 400; */ |
|
304 |
-/* memcpy(key.digest, "1234567890123450", 16); */ |
|
305 |
-/* cacheset_add(&map, &key); */ |
|
306 |
- |
|
307 |
-/* key.size = 416; */ |
|
308 |
-/* memcpy(key.digest, "1234567890123456", 16); */ |
|
309 |
-/* if (cacheset_lookup(&map, &key) != 10) */ |
|
310 |
-/* abort(); */ |
|
311 |
-/* if (cacheset_lookup(&map, &key) != 10) */ |
|
312 |
-/* abort(); */ |
|
313 |
-/* if (cacheset_lookup(&map, &key) != 10) */ |
|
314 |
-/* abort(); */ |
|
315 |
- |
|
316 |
-/* key.size = 500; */ |
|
317 |
-/* cacheset_add(&map, &key); */ |
|
318 |
-/* memcpy(key.digest, "1234567890123457", 16); */ |
|
319 |
-/* if (cacheset_lookup(&map, &key) == 10) */ |
|
320 |
-/* abort(); */ |
|
321 |
- |
|
322 |
-/* cacheset_release(&map); */ |
|
323 |
-/* cacheset_destroy(&map); */ |
|
324 |
- |
|
325 |
-/* return 0; */ |
|
326 |
-/* } */ |
|
327 |
-/* #endif */ |
|
328 |
- |
|
329 |
-#define CACHE_PERTURB 8 |
|
330 |
-/* 1/10th */ |
|
37 |
+struct cache_key { |
|
38 |
+ char digest[16]; |
|
39 |
+ uint32_t size; /* 0 is used to mark an empty hash slot! */ |
|
40 |
+ struct cache_key *lru_next, *lru_prev; |
|
41 |
+}; |
|
42 |
+ |
|
43 |
+struct cache_set { |
|
44 |
+ struct cache_key *data; |
|
45 |
+ size_t capacity; |
|
46 |
+ size_t maxelements; /* considering load factor */ |
|
47 |
+ size_t elements; |
|
48 |
+ size_t version; |
|
49 |
+ struct cache_key *lru_head, *lru_tail; |
|
50 |
+}; |
|
51 |
+ |
|
52 |
+#define CACHE_INVALID_VERSION ~0u |
|
53 |
+#define CACHE_KEY_DELETED ~0u |
|
54 |
+#define CACHE_KEY_EMPTY 0 |
|
55 |
+ |
|
56 |
+static void cache_setversion(struct cache_set* map, uint32_t version) |
|
57 |
+{ |
|
58 |
+ unsigned i; |
|
59 |
+ if (map->version == version) |
|
60 |
+ return; |
|
61 |
+ map->version = version; |
|
62 |
+ map->elements = 0; /* all elements have expired now */ |
|
63 |
+ for (i=0;i<map->capacity;i++) |
|
64 |
+ map->data[i].size = 0; |
|
65 |
+ map->lru_head = map->lru_tail = NULL; |
|
66 |
+} |
|
67 |
+ |
|
68 |
+static void cacheset_lru_remove(struct cache_set *map, size_t howmany) |
|
69 |
+{ |
|
70 |
+ while (howmany--) { |
|
71 |
+ struct cache_key *old; |
|
72 |
+ assert(map->lru_head); |
|
73 |
+ assert(!old->lru_prev); |
|
74 |
+ // Remove a key from the head of the list |
|
75 |
+ old = map->lru_head; |
|
76 |
+ map->lru_head = old->lru_next; |
|
77 |
+ old->size = CACHE_KEY_DELETED; |
|
78 |
+ /* This slot is now deleted, it is not empty, |
|
79 |
+ * because previously we could have inserted a key that has seen this |
|
80 |
+ * slot as occupied, to find that key we need to ensure that all keys |
|
81 |
+ * that were occupied when the key was inserted, are seen as occupied |
|
82 |
+ * when searching too. |
|
83 |
+ * Of course when inserting a new value, we treat deleted slots as |
|
84 |
+ * empty. |
|
85 |
+ * We only replace old values with new values, but there is no guarantee |
|
86 |
+ * that the newly inserted value would hash to same place as the value |
|
87 |
+ * we remove due to LRU! */ |
|
88 |
+ if (old == map->lru_tail) |
|
89 |
+ map->lru_tail = 0; |
|
90 |
+ } |
|
91 |
+} |
|
92 |
+ |
|
93 |
+static inline uint32_t hash32shift(uint32_t key) |
|
94 |
+{ |
|
95 |
+ key = ~key + (key << 15); |
|
96 |
+ key = key ^ (key >> 12); |
|
97 |
+ key = key + (key << 2); |
|
98 |
+ key = key ^ (key >> 4); |
|
99 |
+ key = (key + (key << 3)) + (key << 11); |
|
100 |
+ key = key ^ (key >> 16); |
|
101 |
+ return key; |
|
102 |
+} |
|
103 |
+ |
|
104 |
+static inline size_t hash(const unsigned char* k,const size_t len,const size_t SIZE) |
|
105 |
+{ |
|
106 |
+ size_t Hash = 1; |
|
107 |
+ size_t i; |
|
108 |
+ for(i=0;i<len;i++) { |
|
109 |
+ /* a simple add is good, because we use the mixing function below */ |
|
110 |
+ Hash += k[i]; |
|
111 |
+ /* mixing function */ |
|
112 |
+ Hash = hash32shift(Hash); |
|
113 |
+ } |
|
114 |
+ /* SIZE is power of 2 */ |
|
115 |
+ return Hash & (SIZE - 1); |
|
116 |
+} |
|
117 |
+ |
|
118 |
+int cacheset_lookup_internal(struct cache_set *map, const struct cache_key *key, |
|
119 |
+ uint32_t *insert_pos, int deletedok) |
|
120 |
+{ |
|
121 |
+ uint32_t idx = hash((const unsigned char*)key, sizeof(*key), map->capacity); |
|
122 |
+ uint32_t tries = 0; |
|
123 |
+ struct cache_key *k = &map->data[idx]; |
|
124 |
+ while (k->size != CACHE_KEY_EMPTY) { |
|
125 |
+ if (k->size == key->size && |
|
126 |
+ !memcmp(k->digest, key, 16)) { |
|
127 |
+ /* found key */ |
|
128 |
+ *insert_pos = idx; |
|
129 |
+ return 1; |
|
130 |
+ } |
|
131 |
+ if (deletedok && k->size == CACHE_KEY_DELETED) { |
|
132 |
+ /* treat deleted slot as empty */ |
|
133 |
+ *insert_pos = idx; |
|
134 |
+ return 0; |
|
135 |
+ } |
|
136 |
+ idx = (idx + tries++)&(map->capacity-1); |
|
137 |
+ k = &map->data[idx]; |
|
138 |
+ } |
|
139 |
+ /* found empty pos */ |
|
140 |
+ *insert_pos = idx; |
|
141 |
+ return 0; |
|
142 |
+} |
|
143 |
+ |
|
144 |
+static inline void lru_remove(struct cache_set *map, struct cache_key *newkey) |
|
145 |
+{ |
|
146 |
+ if (newkey->lru_next) |
|
147 |
+ newkey->lru_next->lru_prev = newkey->lru_prev; |
|
148 |
+ if (newkey->lru_prev) |
|
149 |
+ newkey->lru_prev->lru_next = newkey->lru_next; |
|
150 |
+ if (newkey == map->lru_head) |
|
151 |
+ map->lru_head = newkey->lru_next; |
|
152 |
+} |
|
153 |
+ |
|
154 |
+static inline void lru_addtail(struct cache_set *map, struct cache_key *newkey) |
|
155 |
+{ |
|
156 |
+ if (!map->lru_head) |
|
157 |
+ map->lru_head = newkey; |
|
158 |
+ if (map->lru_tail) |
|
159 |
+ map->lru_tail->lru_next = newkey; |
|
160 |
+ newkey->lru_next = NULL; |
|
161 |
+ newkey->lru_prev = map->lru_tail; |
|
162 |
+ map->lru_tail = newkey; |
|
163 |
+} |
|
164 |
+ |
|
165 |
+static void cacheset_add(struct cache_set *map, const struct cache_key *key) |
|
166 |
+{ |
|
167 |
+ int ret; |
|
168 |
+ uint32_t pos; |
|
169 |
+ struct cache_key *newkey; |
|
170 |
+ if (map->elements >= map->maxelements) |
|
171 |
+ cacheset_lru_remove(map, 1); |
|
172 |
+ assert(map->elements < map->maxelements); |
|
173 |
+ |
|
174 |
+ ret = cacheset_lookup_internal(map, key, &pos, 1); |
|
175 |
+ newkey = &map->data[pos]; |
|
176 |
+ if (ret) { |
|
177 |
+ /* was already added, remove from LRU list */ |
|
178 |
+ lru_remove(map, newkey); |
|
179 |
+ } |
|
180 |
+ /* add new key to tail of LRU list */ |
|
181 |
+ memcpy(&map->data[pos], key, sizeof(*key)); |
|
182 |
+ lru_addtail(map, newkey); |
|
183 |
+ |
|
184 |
+ map->elements++; |
|
185 |
+ |
|
186 |
+ assert(pos < map->maxelements); |
|
187 |
+ |
|
188 |
+} |
|
189 |
+ |
|
190 |
+static int cacheset_lookup(struct cache_set *map, const struct cache_key *key) |
|
191 |
+{ |
|
192 |
+ struct cache_key *newkey; |
|
193 |
+ int ret; |
|
194 |
+ uint32_t pos; |
|
195 |
+ ret = cacheset_lookup_internal(map, key, &pos, 0); |
|
196 |
+ if (!ret) |
|
197 |
+ return CACHE_INVALID_VERSION; |
|
198 |
+ newkey = &map->data[pos]; |
|
199 |
+ /* update LRU position: move to tail */ |
|
200 |
+ lru_remove(map, newkey); |
|
201 |
+ lru_addtail(map, newkey); |
|
202 |
+ |
|
203 |
+ return map->version; |
|
204 |
+} |
|
205 |
+ |
|
331 | 206 |
static mpool_t *mempool = NULL; |
332 | 207 |
static struct CACHE { |
333 |
- struct CACHE_ENTRY { |
|
334 |
- unsigned char hash[15]; |
|
335 |
- uint32_t dbver; |
|
336 |
- uint32_t hits; |
|
337 |
- } *items; |
|
208 |
+ struct cache_set cacheset; |
|
338 | 209 |
pthread_mutex_t mutex; |
339 | 210 |
uint32_t lastdb; |
340 | 211 |
} *cache = NULL; |
341 | 212 |
static unsigned int cache_entries = 0; |
342 | 213 |
|
343 |
- |
|
344 | 214 |
#define TREES 256 |
345 | 215 |
static inline unsigned int getkey(uint8_t *hash) { return *hash; } |
346 | 216 |
|
... | ... |
@@ -367,15 +237,6 @@ int cl_cache_init(unsigned int entries) { |
367 | 367 |
} |
368 | 368 |
|
369 | 369 |
for(i=0; i<TREES; i++) { |
370 |
- struct CACHE_ENTRY *e = mpool_calloc(mempool, sizeof(struct CACHE_ENTRY), entries); |
|
371 |
- if(!e) { |
|
372 |
- cli_errmsg("mpool calloc fail\n"); |
|
373 |
- mpool_destroy(mempool); |
|
374 |
- mempool = NULL; |
|
375 |
- return 1; |
|
376 |
- } |
|
377 |
- cache[i].items = e; |
|
378 |
- cache[i].lastdb = 0; |
|
379 | 370 |
if(pthread_mutex_init(&cache[i].mutex, NULL)) { |
380 | 371 |
cli_errmsg("mutex init fail\n"); |
381 | 372 |
mpool_destroy(mempool); |
... | ... |
@@ -383,112 +244,57 @@ int cl_cache_init(unsigned int entries) { |
383 | 383 |
cache = NULL; |
384 | 384 |
return 1; |
385 | 385 |
} |
386 |
+ |
|
387 |
+ cache[i].cacheset.data = mpool_calloc(mempool, 256, sizeof(*cache[i].cacheset.data)); |
|
388 |
+ if (!cache[i].cacheset.data) |
|
389 |
+ return CL_EMEM; |
|
390 |
+ cache_setversion(&cache[i].cacheset, 1337); |
|
391 |
+ cache[i].cacheset.capacity = 256; |
|
392 |
+ cache[i].cacheset.maxelements = 80*256 / 100; |
|
393 |
+ cache[i].cacheset.elements = 0; |
|
394 |
+ cache[i].cacheset.version = CACHE_INVALID_VERSION; |
|
395 |
+ cache[i].cacheset.lru_head = cache[i].cacheset.lru_tail = NULL; |
|
386 | 396 |
} |
387 | 397 |
cache_entries = entries; |
388 | 398 |
return 0; |
389 | 399 |
} |
390 | 400 |
|
391 |
-void cache_swap(struct CACHE_ENTRY *e, unsigned int a) { |
|
392 |
- struct CACHE_ENTRY t; |
|
393 |
- unsigned int b = a-1; |
|
394 |
- |
|
395 |
- if(!a || e[a].hits <= e[b].hits) |
|
396 |
- return; |
|
397 |
- |
|
398 |
- do { |
|
399 |
- if(e[a].hits > e[b].hits) |
|
400 |
- continue; |
|
401 |
- break; |
|
402 |
- } while(b--); |
|
403 |
- b++; |
|
404 |
- |
|
405 |
- memcpy(&t, &e[a], sizeof(t)); |
|
406 |
- memcpy(&e[a], &e[b], sizeof(t)); |
|
407 |
- memcpy(&e[b], &t, sizeof(t)); |
|
408 |
-} |
|
409 |
- |
|
410 |
-static void updb(uint32_t db, unsigned int skip) { |
|
411 |
- unsigned int i; |
|
412 |
- for(i=0; i<TREES; i++) { |
|
413 |
- if(i==skip) continue; |
|
414 |
- if(pthread_mutex_lock(&cache[i].mutex)) { |
|
415 |
- cli_errmsg("mutex lock fail\n"); |
|
416 |
- continue; |
|
417 |
- } |
|
418 |
- cache[i].lastdb = db; |
|
419 |
- pthread_mutex_unlock(&cache[i].mutex); |
|
420 |
- } |
|
421 |
-} |
|
422 |
- |
|
423 | 401 |
static int cache_lookup_hash(unsigned char *md5, cli_ctx *ctx) { |
424 |
- unsigned int i; |
|
402 |
+ struct cache_key entry; |
|
425 | 403 |
int ret = CL_VIRUS; |
426 | 404 |
unsigned int key = getkey(md5); |
427 |
- struct CACHE_ENTRY *e; |
|
428 | 405 |
struct CACHE *c; |
429 | 406 |
|
430 | 407 |
if(!cache) return ret; |
431 | 408 |
|
432 | 409 |
c = &cache[key]; |
433 |
- e = c->items; |
|
434 | 410 |
if(pthread_mutex_lock(&c->mutex)) { |
435 | 411 |
cli_errmsg("mutex lock fail\n"); |
436 | 412 |
return ret; |
437 | 413 |
} |
438 |
- if(c->lastdb <= ctx->engine->dbversion[0]) { |
|
439 |
- if(c->lastdb < ctx->engine->dbversion[0]) { |
|
440 |
- c->lastdb = ctx->engine->dbversion[0]; |
|
441 |
- updb(c->lastdb, key); |
|
442 |
- } else { |
|
443 |
- for(i=0; i<cache_entries; i++) { |
|
444 |
- if(!e[i].hits) break; |
|
445 |
- if(e[i].dbver == c->lastdb && !memcmp(e[i].hash, md5 + 1, 15)) { |
|
446 |
- e[i].hits++; |
|
447 |
- cache_swap(e, i); |
|
448 |
- ret = CL_CLEAN; |
|
449 |
- break; |
|
450 |
- } |
|
451 |
- } |
|
452 |
- } |
|
453 |
- } |
|
414 |
+ entry.size = 1024; |
|
415 |
+ memcpy(entry.digest, md5, 16); |
|
416 |
+ ret = (cacheset_lookup(&c->cacheset, &entry) == 1337) ? CL_CLEAN : CL_VIRUS; |
|
454 | 417 |
pthread_mutex_unlock(&c->mutex); |
418 |
+ if(ret == CL_CLEAN) cli_warnmsg("cached\n"); |
|
455 | 419 |
return ret; |
456 | 420 |
} |
457 | 421 |
|
458 | 422 |
void cache_add(unsigned char *md5, cli_ctx *ctx) { |
423 |
+ struct cache_key entry; |
|
459 | 424 |
unsigned int key = getkey(md5); |
460 |
- unsigned int i, replace; |
|
461 |
- struct CACHE_ENTRY *e; |
|
462 | 425 |
struct CACHE *c; |
463 | 426 |
|
464 | 427 |
if(!cache) return; |
465 | 428 |
|
466 | 429 |
c = &cache[key]; |
467 |
- e = c->items; |
|
468 | 430 |
if(pthread_mutex_lock(&c->mutex)) { |
469 | 431 |
cli_errmsg("mutex lock fail\n"); |
470 | 432 |
return; |
471 | 433 |
} |
472 |
- if(c->lastdb == ctx->engine->dbversion[0]) { |
|
473 |
- replace = cache_entries; |
|
474 |
- for(i=0; i<cache_entries; i++) { |
|
475 |
- if(!e[i].hits) break; |
|
476 |
- if(replace == cache_entries && e[i].dbver < c->lastdb) { |
|
477 |
- replace = i; |
|
478 |
- } else if(e[i].hits && !memcmp(e[i].hash, md5 + 1, 15)) { |
|
479 |
- e[i].hits++; |
|
480 |
- cache_swap(e, i); |
|
481 |
- pthread_mutex_unlock(&c->mutex); |
|
482 |
- return; |
|
483 |
- } |
|
484 |
- } |
|
485 |
- if(replace == cache_entries) |
|
486 |
- replace = cache_entries - 1 - (rand() % (cache_entries / CACHE_PERTURB)); |
|
487 |
- e[replace].hits = 1; |
|
488 |
- e[replace].dbver = c->lastdb; |
|
489 |
- memcpy(e[replace].hash, md5 + 1, 15); |
|
490 |
- cache_swap(e, replace); |
|
491 |
- } |
|
434 |
+ entry.size = 1024; |
|
435 |
+ memcpy(entry.digest, md5, 16); |
|
436 |
+ cacheset_add(&c->cacheset, &entry); |
|
492 | 437 |
pthread_mutex_unlock(&c->mutex); |
493 | 438 |
return; |
494 | 439 |
} |