Browse code

fill the header while the mutex is held, same for prefaulting the pages

acab authored on 2009/09/12 05:58:31
Showing 1 changed files
... ...
@@ -59,9 +59,6 @@
59 59
 #define UNPAGE_THRSHLD_HI 8*1024*1024
60 60
 #define READAHEAD_PAGES 8
61 61
 
62
-/* FIXME: remove the malloc fallback, it only makes thing slower */
63
-#define DUMB_SIZE 0
64
-
65 62
 /* DON'T ASK ME */
66 63
 pthread_mutex_t fmap_mutex = PTHREAD_MUTEX_INITIALIZER;
67 64
 
... ...
@@ -105,24 +102,25 @@ struct F_MAP *fmap(int fd, off_t offset, size_t len) {
105 105
     pages = fmap_align_items(len, pgsz);
106 106
     hdrsz = fmap_align_to(sizeof(struct F_MAP) + pages * sizeof(uint32_t), pgsz);
107 107
     mapsz = pages * pgsz + hdrsz;
108
+    pthread_mutex_lock(&fmap_mutex);
108 109
 #if HAVE_MMAP
109
-    if(mapsz >= DUMB_SIZE) {
110
-	pthread_mutex_lock(&fmap_mutex);
111
-	if ((m = (struct F_MAP *)mmap(NULL, mapsz, PROT_READ | PROT_WRITE, MAP_PRIVATE|/*FIXME: MAP_POPULATE is ~8% faster but more memory intensive */ANONYMOUS_MAP, -1, 0)) == MAP_FAILED)
112
-	    m = NULL;
113
-	else {
114
-	    dumb = 0;
115
-	    madvise(m, mapsz, MADV_RANDOM|MADV_DONTFORK);
116
-	    madvise(m, hdrsz, MADV_WILLNEED);
117
-	}
118
-	pthread_mutex_unlock(&fmap_mutex);
119
-    } else
110
+    if ((m = (struct F_MAP *)mmap(NULL, mapsz, PROT_READ | PROT_WRITE, MAP_PRIVATE|/*FIXME: MAP_POPULATE is ~8% faster but more memory intensive */ANONYMOUS_MAP, -1, 0)) == MAP_FAILED) {
111
+	m = NULL;
112
+    } else {
113
+	dumb = 0;
114
+	madvise(m, mapsz, MADV_RANDOM|MADV_DONTFORK);
115
+    }
116
+#else
117
+    m = (struct F_MAP *)cli_malloc(mapsz);
120 118
 #endif
121
-	m = (struct F_MAP *)cli_malloc(mapsz);
122 119
     if(!m) {
123 120
 	cli_warnmsg("fmap: map allocation failed\n");
121
+	pthread_mutex_unlock(&fmap_mutex);
124 122
 	return NULL;
125 123
     }
124
+    /* fault the header while we still have the lock - we DO context switch here a lot here :@ */
125
+    memset(m->bitmap, 0, sizeof(uint32_t) * pages);
126
+    pthread_mutex_unlock(&fmap_mutex);
126 127
     m->fd = fd;
127 128
     m->dumb = dumb;
128 129
     m->mtime = st.st_mtime;
... ...
@@ -132,7 +130,6 @@ struct F_MAP *fmap(int fd, off_t offset, size_t len) {
132 132
     m->hdrsz = hdrsz;
133 133
     m->pgsz = pgsz;
134 134
     m->paged = 0;
135
-    memset(m->bitmap, 0, sizeof(uint32_t) * pages);
136 135
 #ifdef FMAPDEBUG
137 136
     m->page_needs = 0;
138 137
     m->page_reads = 0;
... ...
@@ -216,12 +213,16 @@ static void fmap_aging(struct F_MAP *m) {
216 216
 static int fmap_readpage(struct F_MAP *m, unsigned int first_page, unsigned int count, unsigned int lock_count) {
217 217
     size_t readsz = 0, got;
218 218
     char *pptr = NULL;
219
-    volatile uint32_t s;
219
+    uint32_t s;
220 220
     unsigned int i, page = first_page, force_read = 0;
221 221
 
222
-    for(i=0; i<count; i++) { /* REAL MEN DON'T MADVISE: seriously, it sucks! */
223
-	volatile char faultme = ((char *)m)[(first_page+i) * m->pgsz + m->hdrsz];
222
+    pthread_mutex_lock(&fmap_mutex);
223
+    for(i=0; i<count; i++) { /* prefault */
224
+    	/* Not worth checking if the page is already paged, just ping each */
225
+	/* Also not worth reusing the loop below */
226
+    	volatile char faultme = ((char *)m)[(first_page+i) * m->pgsz + m->hdrsz];
224 227
     }
228
+    pthread_mutex_unlock(&fmap_mutex);
225 229
 #ifdef FMAPDEBUG
226 230
     m->page_needs += count;
227 231
     m->page_locks += lock_count;