Browse code

performance testing

aCaB authored on 2010/01/08 09:39:25
Showing 1 changed files
... ...
@@ -33,193 +33,6 @@
33 33
 #include "cache.h"
34 34
 #include "fmap.h"
35 35
 
36
-#define CACHE_PERTURB 8
37
-/* 1/10th */
38
-static mpool_t *mempool = NULL;
39
-static struct CACHE {
40
-    struct CACHE_ENTRY {
41
-	unsigned char hash[15];
42
-	uint32_t dbver;
43
-	uint32_t hits;
44
-    } *items;
45
-    pthread_mutex_t mutex;
46
-    uint32_t lastdb;
47
-} *cache = NULL;
48
-static unsigned int cache_entries = 0;
49
-
50
-
51
-#define TREES 256
52
-static inline unsigned int getkey(uint8_t *hash) { return *hash; }
53
-
54
-/* #define TREES 4096 */
55
-/* static inline unsigned int getkey(uint8_t *hash) { return hash[0] | ((unsigned int)(hash[1] & 0xf)<<8) ; } */
56
-
57
-/* #define TREES 65536 */
58
-/* static inline unsigned int getkey(uint8_t *hash) { return hash[0] | (((unsigned int)hash[1])<<8) ; } */
59
-
60
-
61
-int cl_cache_init(unsigned int entries) {
62
-    unsigned int i;
63
-
64
-    entries = MAX(entries / (TREES / 256), 10);
65
-    if(!(mempool = mpool_create())) {
66
-	cli_errmsg("mpool init fail\n");
67
-	return 1;
68
-    }
69
-    if(!(cache = mpool_malloc(mempool, sizeof(struct CACHE) * TREES))) {
70
-	cli_errmsg("mpool malloc fail\n");
71
-	mpool_destroy(mempool);
72
-	mempool = NULL;
73
-	return 1;
74
-    }
75
-
76
-    for(i=0; i<TREES; i++) {
77
-	struct CACHE_ENTRY *e = mpool_calloc(mempool, sizeof(struct CACHE_ENTRY), entries);
78
-	if(!e) {
79
-	    cli_errmsg("mpool calloc fail\n");
80
-	    mpool_destroy(mempool);
81
-	    mempool = NULL;
82
-	    return 1;
83
-	}
84
-	cache[i].items = e;
85
-	cache[i].lastdb = 0;
86
-	if(pthread_mutex_init(&cache[i].mutex, NULL)) {
87
-	    cli_errmsg("mutex init fail\n");
88
-	    mpool_destroy(mempool);
89
-	    mempool = NULL;
90
-	    cache = NULL;
91
-	    return 1;
92
-	}
93
-    }
94
-    cache_entries = entries;
95
-    return 0;
96
-}
97
-
98
-void cache_swap(struct CACHE_ENTRY *e, unsigned int a) {
99
-    struct CACHE_ENTRY t;
100
-    unsigned int b = a-1;
101
-
102
-    if(!a || e[a].hits <= e[b].hits)
103
-	return;
104
-
105
-    do {
106
-	if(e[a].hits > e[b].hits)
107
-	    continue;
108
-	break;
109
-    } while(b--);
110
-    b++;
111
-
112
-    memcpy(&t, &e[a], sizeof(t));
113
-    memcpy(&e[a], &e[b], sizeof(t));
114
-    memcpy(&e[b], &t, sizeof(t));
115
-}
116
-
117
-static void updb(uint32_t db, unsigned int skip) {
118
-    unsigned int i;
119
-    for(i=0; i<TREES; i++) {
120
-	if(i==skip) continue;
121
-	if(pthread_mutex_lock(&cache[i].mutex)) {
122
-	    cli_errmsg("mutex lock fail\n");
123
-	    continue;
124
-	}
125
-	cache[i].lastdb = db;
126
-	pthread_mutex_unlock(&cache[i].mutex);	
127
-    }
128
-}
129
-
130
-static int cache_lookup_hash(unsigned char *md5, cli_ctx *ctx) {
131
-    unsigned int i;
132
-    int ret = CL_VIRUS;
133
-    unsigned int key = getkey(md5);
134
-    struct CACHE_ENTRY *e;
135
-    struct CACHE *c;
136
-
137
-    if(!cache) return ret;
138
-
139
-    c = &cache[key];
140
-    e = c->items;
141
-    if(pthread_mutex_lock(&c->mutex)) {
142
-	cli_errmsg("mutex lock fail\n");
143
-	return ret;
144
-    }
145
-    if(c->lastdb <= ctx->engine->dbversion[0]) {
146
-	if(c->lastdb < ctx->engine->dbversion[0]) {
147
-	    c->lastdb = ctx->engine->dbversion[0];
148
-	    updb(c->lastdb, key);
149
-	} else {
150
-	    for(i=0; i<cache_entries; i++) {
151
-		if(!e[i].hits) break;
152
-		if(e[i].dbver == c->lastdb && !memcmp(e[i].hash, md5 + 1, 15)) {
153
-		    e[i].hits++;
154
-		    cache_swap(e, i);
155
-		    ret = CL_CLEAN;
156
-		    break;
157
-		}
158
-	    }
159
-	}
160
-    }
161
-    pthread_mutex_unlock(&c->mutex);
162
-    return ret;
163
-}
164
-
165
-void cache_add(unsigned char *md5, cli_ctx *ctx) {
166
-    unsigned int key = getkey(md5);
167
-    unsigned int i, replace;
168
-    struct CACHE_ENTRY *e;
169
-    struct CACHE *c;
170
-
171
-    if(!cache) return;
172
-
173
-    c = &cache[key];
174
-    e = c->items;
175
-    if(pthread_mutex_lock(&c->mutex)) {
176
-	cli_errmsg("mutex lock fail\n");
177
-	return;
178
-    }
179
-    if(c->lastdb == ctx->engine->dbversion[0]) {
180
-	replace = cache_entries;
181
-	for(i=0; i<cache_entries; i++) {
182
-	    if(!e[i].hits) break;
183
-	    if(replace == cache_entries && e[i].dbver < c->lastdb) {
184
-		replace = i;
185
-	    } else if(e[i].hits && !memcmp(e[i].hash, md5 + 1, 15)) {
186
-		e[i].hits++;
187
-		cache_swap(e, i);
188
-		pthread_mutex_unlock(&c->mutex);
189
-		return;
190
-	    }
191
-	}
192
-	if(replace == cache_entries)
193
-	    replace = cache_entries - 1 - (rand() % (cache_entries / CACHE_PERTURB));
194
-	e[replace].hits = 1;
195
-	e[replace].dbver = c->lastdb;
196
-	memcpy(e[replace].hash, md5 + 1, 15);
197
-	cache_swap(e, replace);
198
-    }
199
-    pthread_mutex_unlock(&c->mutex);
200
-    return;
201
-}
202
-
203
-int cache_check(unsigned char *hash, cli_ctx *ctx) {
204
-    fmap_t *map = *ctx->fmap;
205
-    size_t todo = map->len, at = 0;
206
-    cli_md5_ctx md5;
207
-
208
-    if(!cache) return CL_VIRUS;
209
-
210
-    cli_md5_init(&md5);
211
-    while(todo) {
212
-	void *buf;
213
-	size_t readme = todo < FILEBUFF ? todo : FILEBUFF;
214
-	if(!(buf = fmap_need_off_once(map, at, readme)))
215
-	    return CL_VIRUS;
216
-	todo -= readme;
217
-	at += readme;
218
-	cli_md5_update(&md5, buf, readme);
219
-    }
220
-    cli_md5_final(hash, &md5);
221
-    return cache_lookup_hash(hash, ctx);
222
-}
223 36
 
224 37
 /* struct cache_key { */
225 38
 /*     char digest[16]; */
... ...
@@ -512,3 +325,191 @@ int cache_check(unsigned char *hash, cli_ctx *ctx) {
512 512
 /*     return 0; */
513 513
 /* } */
514 514
 /* #endif */
515
+
516
+#define CACHE_PERTURB 8
517
+/* 1/10th */
518
+static mpool_t *mempool = NULL;
519
+static struct CACHE {
520
+    struct CACHE_ENTRY {
521
+	unsigned char hash[15];
522
+	uint32_t dbver;
523
+	uint32_t hits;
524
+    } *items;
525
+    pthread_mutex_t mutex;
526
+    uint32_t lastdb;
527
+} *cache = NULL;
528
+static unsigned int cache_entries = 0;
529
+
530
+
531
+#define TREES 256
532
+static inline unsigned int getkey(uint8_t *hash) { return *hash; }
533
+
534
+/* #define TREES 4096 */
535
+/* static inline unsigned int getkey(uint8_t *hash) { return hash[0] | ((unsigned int)(hash[1] & 0xf)<<8) ; } */
536
+
537
+/* #define TREES 65536 */
538
+/* static inline unsigned int getkey(uint8_t *hash) { return hash[0] | (((unsigned int)hash[1])<<8) ; } */
539
+
540
+
541
+int cl_cache_init(unsigned int entries) {
542
+    unsigned int i;
543
+
544
+    entries = MAX(entries / (TREES / 256), 10);
545
+    if(!(mempool = mpool_create())) {
546
+	cli_errmsg("mpool init fail\n");
547
+	return 1;
548
+    }
549
+    if(!(cache = mpool_malloc(mempool, sizeof(struct CACHE) * TREES))) {
550
+	cli_errmsg("mpool malloc fail\n");
551
+	mpool_destroy(mempool);
552
+	mempool = NULL;
553
+	return 1;
554
+    }
555
+
556
+    for(i=0; i<TREES; i++) {
557
+	struct CACHE_ENTRY *e = mpool_calloc(mempool, sizeof(struct CACHE_ENTRY), entries);
558
+	if(!e) {
559
+	    cli_errmsg("mpool calloc fail\n");
560
+	    mpool_destroy(mempool);
561
+	    mempool = NULL;
562
+	    return 1;
563
+	}
564
+	cache[i].items = e;
565
+	cache[i].lastdb = 0;
566
+	if(pthread_mutex_init(&cache[i].mutex, NULL)) {
567
+	    cli_errmsg("mutex init fail\n");
568
+	    mpool_destroy(mempool);
569
+	    mempool = NULL;
570
+	    cache = NULL;
571
+	    return 1;
572
+	}
573
+    }
574
+    cache_entries = entries;
575
+    return 0;
576
+}
577
+
578
+void cache_swap(struct CACHE_ENTRY *e, unsigned int a) {
579
+    struct CACHE_ENTRY t;
580
+    unsigned int b = a-1;
581
+
582
+    if(!a || e[a].hits <= e[b].hits)
583
+	return;
584
+
585
+    do {
586
+	if(e[a].hits > e[b].hits)
587
+	    continue;
588
+	break;
589
+    } while(b--);
590
+    b++;
591
+
592
+    memcpy(&t, &e[a], sizeof(t));
593
+    memcpy(&e[a], &e[b], sizeof(t));
594
+    memcpy(&e[b], &t, sizeof(t));
595
+}
596
+
597
+static void updb(uint32_t db, unsigned int skip) {
598
+    unsigned int i;
599
+    for(i=0; i<TREES; i++) {
600
+	if(i==skip) continue;
601
+	if(pthread_mutex_lock(&cache[i].mutex)) {
602
+	    cli_errmsg("mutex lock fail\n");
603
+	    continue;
604
+	}
605
+	cache[i].lastdb = db;
606
+	pthread_mutex_unlock(&cache[i].mutex);	
607
+    }
608
+}
609
+
610
+static int cache_lookup_hash(unsigned char *md5, cli_ctx *ctx) {
611
+    unsigned int i;
612
+    int ret = CL_VIRUS;
613
+    unsigned int key = getkey(md5);
614
+    struct CACHE_ENTRY *e;
615
+    struct CACHE *c;
616
+
617
+    if(!cache) return ret;
618
+
619
+    c = &cache[key];
620
+    e = c->items;
621
+    if(pthread_mutex_lock(&c->mutex)) {
622
+	cli_errmsg("mutex lock fail\n");
623
+	return ret;
624
+    }
625
+    if(c->lastdb <= ctx->engine->dbversion[0]) {
626
+	if(c->lastdb < ctx->engine->dbversion[0]) {
627
+	    c->lastdb = ctx->engine->dbversion[0];
628
+	    updb(c->lastdb, key);
629
+	} else {
630
+	    for(i=0; i<cache_entries; i++) {
631
+		if(!e[i].hits) break;
632
+		if(e[i].dbver == c->lastdb && !memcmp(e[i].hash, md5 + 1, 15)) {
633
+		    e[i].hits++;
634
+		    cache_swap(e, i);
635
+		    ret = CL_CLEAN;
636
+		    break;
637
+		}
638
+	    }
639
+	}
640
+    }
641
+    pthread_mutex_unlock(&c->mutex);
642
+    return ret;
643
+}
644
+
645
+void cache_add(unsigned char *md5, cli_ctx *ctx) {
646
+    unsigned int key = getkey(md5);
647
+    unsigned int i, replace;
648
+    struct CACHE_ENTRY *e;
649
+    struct CACHE *c;
650
+
651
+    if(!cache) return;
652
+
653
+    c = &cache[key];
654
+    e = c->items;
655
+    if(pthread_mutex_lock(&c->mutex)) {
656
+	cli_errmsg("mutex lock fail\n");
657
+	return;
658
+    }
659
+    if(c->lastdb == ctx->engine->dbversion[0]) {
660
+	replace = cache_entries;
661
+	for(i=0; i<cache_entries; i++) {
662
+	    if(!e[i].hits) break;
663
+	    if(replace == cache_entries && e[i].dbver < c->lastdb) {
664
+		replace = i;
665
+	    } else if(e[i].hits && !memcmp(e[i].hash, md5 + 1, 15)) {
666
+		e[i].hits++;
667
+		cache_swap(e, i);
668
+		pthread_mutex_unlock(&c->mutex);
669
+		return;
670
+	    }
671
+	}
672
+	if(replace == cache_entries)
673
+	    replace = cache_entries - 1 - (rand() % (cache_entries / CACHE_PERTURB));
674
+	e[replace].hits = 1;
675
+	e[replace].dbver = c->lastdb;
676
+	memcpy(e[replace].hash, md5 + 1, 15);
677
+	cache_swap(e, replace);
678
+    }
679
+    pthread_mutex_unlock(&c->mutex);
680
+    return;
681
+}
682
+
683
+int cache_check(unsigned char *hash, cli_ctx *ctx) {
684
+    fmap_t *map = *ctx->fmap;
685
+    size_t todo = map->len, at = 0;
686
+    cli_md5_ctx md5;
687
+
688
+    if(!cache) return CL_VIRUS;
689
+
690
+    cli_md5_init(&md5);
691
+    while(todo) {
692
+	void *buf;
693
+	size_t readme = todo < FILEBUFF ? todo : FILEBUFF;
694
+	if(!(buf = fmap_need_off_once(map, at, readme)))
695
+	    return CL_VIRUS;
696
+	todo -= readme;
697
+	at += readme;
698
+	cli_md5_update(&md5, buf, readme);
699
+    }
700
+    cli_md5_final(hash, &md5);
701
+    return cache_lookup_hash(hash, ctx);
702
+}