Specifically this fixes use of cli_map_scandesc().
The cli_map_scandesc() function used to override the current fmap
settings with a new size and offset, performing a scan of the embedded
content. This broke the ability to iterate backwards through the fmap
recursion array when an alert occurs to check each map's hash for
whitelist matches.
In order to fix this issue, it needed to be possible to duplicate an
fmap header for the scan of the embedded file without duplicating the
actual map/data. This wasn't feasible with the posix fmap handle
implementation where the fmap header, bitmap array, and memory map
were all contiguouus. This commit makes it possible by extracting the
fmap header and bitmap array from the mmap region, using instead a
pointer for both the bitmap array and mmap/data. The resulting posix
fmap handle implementation as a result ended up working more similarly
to existing the Windows implementation.
In addition to the above changes, this commit fixes:
- fmap recursion tracking for cli_scandesc()
- a recursion tracking issue in cli_scanembpe() error handling
... | ... |
@@ -48,10 +48,55 @@ |
48 | 48 |
#include "others.h" |
49 | 49 |
#include "str.h" |
50 | 50 |
|
51 |
+#define FM_MASK_COUNT 0x3fffffff |
|
52 |
+#define FM_MASK_PAGED 0x40000000 |
|
53 |
+#define FM_MASK_SEEN 0x80000000 |
|
54 |
+#define FM_MASK_LOCKED FM_MASK_SEEN |
|
55 |
+/* 2 high bits: |
|
56 |
+00 - not seen - not paged - N/A |
|
57 |
+01 - N/A - paged - not locked |
|
58 |
+10 - seen - not paged - N/A |
|
59 |
+11 - N/A - paged - locked |
|
60 |
+*/ |
|
61 |
+ |
|
62 |
+/* FIXME: tune this stuff */ |
|
63 |
+#define UNPAGE_THRSHLD_LO 4 * 1024 * 1024 |
|
64 |
+#define UNPAGE_THRSHLD_HI 8 * 1024 * 1024 |
|
65 |
+#define READAHEAD_PAGES 8 |
|
66 |
+ |
|
67 |
+#if defined(ANONYMOUS_MAP) && defined(C_LINUX) && defined(CL_THREAD_SAFE) |
|
68 |
+/* |
|
69 |
+ WORKAROUND |
|
70 |
+ Relieve some stress on mmap_sem. |
|
71 |
+ When mmap_sem is heavily hammered, the scheduler |
|
72 |
+ tends to fail to wake us up properly. |
|
73 |
+*/ |
|
74 |
+pthread_mutex_t fmap_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
75 |
+#define fmap_lock pthread_mutex_lock(&fmap_mutex) |
|
76 |
+#define fmap_unlock pthread_mutex_unlock(&fmap_mutex); |
|
77 |
+#else |
|
78 |
+#define fmap_lock |
|
79 |
+#define fmap_unlock |
|
80 |
+#endif |
|
81 |
+ |
|
82 |
+#ifndef MADV_DONTFORK |
|
83 |
+#define MADV_DONTFORK 0 |
|
84 |
+#endif |
|
85 |
+ |
|
86 |
+#define fmap_bitmap (m->bitmap) |
|
87 |
+ |
|
51 | 88 |
static inline unsigned int fmap_align_items(unsigned int sz, unsigned int al); |
52 | 89 |
static inline unsigned int fmap_align_to(unsigned int sz, unsigned int al); |
53 | 90 |
static inline unsigned int fmap_which_page(fmap_t *m, size_t at); |
54 | 91 |
|
92 |
+static const void *handle_need(fmap_t *m, size_t at, size_t len, int lock); |
|
93 |
+static void handle_unneed_off(fmap_t *m, size_t at, size_t len); |
|
94 |
+static const void *handle_need_offstr(fmap_t *m, size_t at, size_t len_hint); |
|
95 |
+static const void *handle_gets(fmap_t *m, char *dst, size_t *at, size_t max_len); |
|
96 |
+ |
|
97 |
+static void unmap_mmap(fmap_t *m); |
|
98 |
+static void unmap_malloc(fmap_t *m); |
|
99 |
+ |
|
55 | 100 |
#ifndef _WIN32 |
56 | 101 |
/* pread proto here in order to avoid the use of XOPEN and BSD_SOURCE |
57 | 102 |
which may in turn prevent some mmap constants to be defined */ |
... | ... |
@@ -66,7 +111,7 @@ static off_t pread_cb(void *handle, void *buf, size_t count, off_t offset) |
66 | 66 |
fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
67 | 67 |
{ |
68 | 68 |
STATBUF st; |
69 |
- fmap_t *m; |
|
69 |
+ fmap_t *m = NULL; |
|
70 | 70 |
unsigned char hash[16] = {'\0'}; |
71 | 71 |
|
72 | 72 |
*empty = 0; |
... | ... |
@@ -93,6 +138,7 @@ fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
93 | 93 |
|
94 | 94 |
/* Calculate the fmap hash to be used by the FP check later */ |
95 | 95 |
if (CL_SUCCESS != fmap_get_MD5(hash, m)) { |
96 |
+ funmap(m); |
|
96 | 97 |
return NULL; |
97 | 98 |
} |
98 | 99 |
memcpy(m->maphash, hash, 16); |
... | ... |
@@ -102,21 +148,28 @@ fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
102 | 102 |
#else |
103 | 103 |
/* vvvvv WIN32 STUFF BELOW vvvvv */ |
104 | 104 |
static void unmap_win32(fmap_t *m) |
105 |
-{ /* WIN32 */ |
|
106 |
- UnmapViewOfFile(m->data); |
|
107 |
- CloseHandle(m->mh); |
|
108 |
- free((void *)m); |
|
105 |
+{ |
|
106 |
+ if (NULL != m) { |
|
107 |
+ if (NULL != m->data) { |
|
108 |
+ UnmapViewOfFile(m->data); |
|
109 |
+ } |
|
110 |
+ if (NULL != m->mh) { |
|
111 |
+ CloseHandle(m->mh); |
|
112 |
+ } |
|
113 |
+ free((void *)m); |
|
114 |
+ } |
|
109 | 115 |
} |
110 | 116 |
|
111 | 117 |
fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
112 | 118 |
{ /* WIN32 */ |
113 |
- unsigned int pages, mapsz, hdrsz; |
|
119 |
+ unsigned int pages, mapsz; |
|
114 | 120 |
int pgsz = cli_getpagesize(); |
115 | 121 |
STATBUF st; |
116 |
- fmap_t *m; |
|
122 |
+ fmap_t *m = NULL; |
|
117 | 123 |
const void *data; |
118 | 124 |
HANDLE fh; |
119 | 125 |
HANDLE mh; |
126 |
+ unsigned char hash[16] = {'\0'}; |
|
120 | 127 |
|
121 | 128 |
*empty = 0; |
122 | 129 |
if (FSTAT(fd, &st)) { |
... | ... |
@@ -139,7 +192,6 @@ fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
139 | 139 |
} |
140 | 140 |
|
141 | 141 |
pages = fmap_align_items(len, pgsz); |
142 |
- hdrsz = fmap_align_to(sizeof(fmap_t), pgsz); |
|
143 | 142 |
|
144 | 143 |
if ((fh = (HANDLE)_get_osfhandle(fd)) == INVALID_HANDLE_VALUE) { |
145 | 144 |
cli_errmsg("fmap: cannot get a valid handle for descriptor %d\n", fd); |
... | ... |
@@ -158,6 +210,7 @@ fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
158 | 158 |
} |
159 | 159 |
if (!(m = cl_fmap_open_memory(data, len))) { |
160 | 160 |
cli_errmsg("fmap: cannot allocate fmap_t\n", fd); |
161 |
+ UnmapViewOfFile(data); |
|
161 | 162 |
CloseHandle(mh); |
162 | 163 |
CloseHandle(fh); |
163 | 164 |
return NULL; |
... | ... |
@@ -170,6 +223,7 @@ fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
170 | 170 |
|
171 | 171 |
/* Calculate the fmap hash to be used by the FP check later */ |
172 | 172 |
if (CL_SUCCESS != fmap_get_MD5(hash, m)) { |
173 |
+ funmap(m); |
|
173 | 174 |
return NULL; |
174 | 175 |
} |
175 | 176 |
memcpy(m->maphash, hash, 16); |
... | ... |
@@ -180,73 +234,117 @@ fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) |
180 | 180 |
|
181 | 181 |
/* vvvvv SHARED STUFF BELOW vvvvv */ |
182 | 182 |
|
183 |
-#define FM_MASK_COUNT 0x3fffffff |
|
184 |
-#define FM_MASK_PAGED 0x40000000 |
|
185 |
-#define FM_MASK_SEEN 0x80000000 |
|
186 |
-#define FM_MASK_LOCKED FM_MASK_SEEN |
|
187 |
-/* 2 high bits: |
|
188 |
-00 - not seen - not paged - N/A |
|
189 |
-01 - N/A - paged - not locked |
|
190 |
-10 - seen - not paged - N/A |
|
191 |
-11 - N/A - paged - locked |
|
192 |
-*/ |
|
183 |
+fmap_t *fmap_duplicate(cl_fmap_t *map, off_t offset, size_t length) |
|
184 |
+{ |
|
185 |
+ cl_error_t status = CL_ERROR; |
|
186 |
+ cl_fmap_t *duplicate_map = NULL; |
|
187 |
+ unsigned char hash[16] = {'\0'}; |
|
188 |
+ |
|
189 |
+ duplicate_map = cli_malloc(sizeof(cl_fmap_t)); |
|
190 |
+ if (!duplicate_map) { |
|
191 |
+ cli_warnmsg("fmap_duplicate: map allocation failed\n"); |
|
192 |
+ goto done; |
|
193 |
+ } |
|
193 | 194 |
|
194 |
-/* FIXME: tune this stuff */ |
|
195 |
-#define UNPAGE_THRSHLD_LO 4 * 1024 * 1024 |
|
196 |
-#define UNPAGE_THRSHLD_HI 8 * 1024 * 1024 |
|
197 |
-#define READAHEAD_PAGES 8 |
|
195 |
+ /* Duplicate the state of the original map */ |
|
196 |
+ memcpy(duplicate_map, map, sizeof(cl_fmap_t)); |
|
197 |
+ |
|
198 |
+ /* Set the new offset and length for the new map */ |
|
199 |
+ /* can't change offset because then we'd have to discard/move cached |
|
200 |
+ * data, instead use another offset to reuse the already cached data */ |
|
201 |
+ duplicate_map->nested_offset += offset; |
|
202 |
+ duplicate_map->len = length; |
|
203 |
+ duplicate_map->real_len = duplicate_map->nested_offset + length; |
|
204 |
+ |
|
205 |
+ if (!CLI_ISCONTAINED(map->nested_offset, map->len, |
|
206 |
+ duplicate_map->nested_offset, duplicate_map->len)) { |
|
207 |
+ uint64_t len1, len2; |
|
208 |
+ len1 = map->nested_offset + map->len; |
|
209 |
+ len2 = duplicate_map->nested_offset + duplicate_map->len; |
|
210 |
+ cli_warnmsg("fmap_duplicate: internal map error: %zu, " STDu64 "; %zu, " STDu64 "\n", |
|
211 |
+ (size_t)map->nested_offset, |
|
212 |
+ (uint64_t)len1, |
|
213 |
+ (size_t)duplicate_map->offset, |
|
214 |
+ (uint64_t)len2); |
|
215 |
+ } |
|
198 | 216 |
|
199 |
-#if defined(ANONYMOUS_MAP) && defined(C_LINUX) && defined(CL_THREAD_SAFE) |
|
200 |
-/* |
|
201 |
- WORKAROUND |
|
202 |
- Relieve some stress on mmap_sem. |
|
203 |
- When mmap_sem is heavily hammered, the scheduler |
|
204 |
- tends to fail to wake us up properly. |
|
205 |
-*/ |
|
206 |
-pthread_mutex_t fmap_mutex = PTHREAD_MUTEX_INITIALIZER; |
|
207 |
-#define fmap_lock pthread_mutex_lock(&fmap_mutex) |
|
208 |
-#define fmap_unlock pthread_mutex_unlock(&fmap_mutex); |
|
209 |
-#else |
|
210 |
-#define fmap_lock |
|
211 |
-#define fmap_unlock |
|
212 |
-#endif |
|
217 |
+ /* Calculate the fmap hash to be used by the FP check later */ |
|
218 |
+ if (CL_SUCCESS != fmap_get_MD5(hash, duplicate_map)) { |
|
219 |
+ cli_warnmsg("fmap_duplicate: failed to get fmap MD5\n"); |
|
220 |
+ goto done; |
|
221 |
+ } |
|
222 |
+ memcpy(duplicate_map->maphash, hash, 16); |
|
213 | 223 |
|
214 |
-#ifndef MADV_DONTFORK |
|
215 |
-#define MADV_DONTFORK 0 |
|
216 |
-#endif |
|
224 |
+ status = CL_SUCCESS; |
|
217 | 225 |
|
218 |
-#define fmap_bitmap (&m->placeholder_for_bitmap) |
|
226 |
+done: |
|
227 |
+ if (CL_SUCCESS != status) { |
|
228 |
+ if (NULL != duplicate_map) { |
|
229 |
+ free(duplicate_map); |
|
230 |
+ duplicate_map = NULL; |
|
231 |
+ } |
|
232 |
+ } |
|
219 | 233 |
|
220 |
-static const void *handle_need(fmap_t *m, size_t at, size_t len, int lock); |
|
221 |
-static void handle_unneed_off(fmap_t *m, size_t at, size_t len); |
|
222 |
-static const void *handle_need_offstr(fmap_t *m, size_t at, size_t len_hint); |
|
223 |
-static const void *handle_gets(fmap_t *m, char *dst, size_t *at, size_t max_len); |
|
224 |
-static void unmap_mmap(fmap_t *m); |
|
225 |
-static void unmap_malloc(fmap_t *m); |
|
234 |
+ return duplicate_map; |
|
235 |
+} |
|
236 |
+ |
|
237 |
+static void unmap_handle(fmap_t *m) |
|
238 |
+{ |
|
239 |
+ if (NULL != m) { |
|
240 |
+ if (NULL != m->data) { |
|
241 |
+ if (m->aging) { |
|
242 |
+ unmap_mmap(m); |
|
243 |
+ } else { |
|
244 |
+ free((void *)m->data); |
|
245 |
+ } |
|
246 |
+ m->data = NULL; |
|
247 |
+ } |
|
248 |
+ if (NULL != m->bitmap) { |
|
249 |
+ free(m->bitmap); |
|
250 |
+ m->bitmap = NULL; |
|
251 |
+ } |
|
252 |
+ free((void *)m); |
|
253 |
+ } |
|
254 |
+} |
|
226 | 255 |
|
227 | 256 |
extern cl_fmap_t *cl_fmap_open_handle(void *handle, size_t offset, size_t len, |
228 | 257 |
clcb_pread pread_cb, int use_aging) |
229 | 258 |
{ |
230 |
- unsigned int pages, mapsz, hdrsz; |
|
231 |
- cl_fmap_t *m; |
|
232 |
- int pgsz = cli_getpagesize(); |
|
259 |
+ cl_error_t status = CL_EMEM; |
|
260 |
+ unsigned int pages; |
|
261 |
+ size_t mapsz, bitmap_size; |
|
262 |
+ cl_fmap_t *m = NULL; |
|
263 |
+ int pgsz = cli_getpagesize(); |
|
233 | 264 |
|
234 | 265 |
if ((off_t)offset < 0 || offset != fmap_align_to(offset, pgsz)) { |
235 | 266 |
cli_warnmsg("fmap: attempted mapping with unaligned offset\n"); |
236 |
- return NULL; |
|
267 |
+ goto done; |
|
237 | 268 |
} |
238 | 269 |
if (!len) { |
239 | 270 |
cli_dbgmsg("fmap: attempted void mapping\n"); |
240 |
- return NULL; |
|
271 |
+ goto done; |
|
241 | 272 |
} |
242 | 273 |
if (offset >= len) { |
243 | 274 |
cli_warnmsg("fmap: attempted oof mapping\n"); |
244 |
- return NULL; |
|
275 |
+ goto done; |
|
245 | 276 |
} |
246 | 277 |
|
247 | 278 |
pages = fmap_align_items(len, pgsz); |
248 |
- hdrsz = fmap_align_to(sizeof(fmap_t) + (pages - 1) * sizeof(uint64_t), pgsz); /* fmap_t includes 1 bitmap slot, hence (pages-1) */ |
|
249 |
- mapsz = pages * pgsz + hdrsz; |
|
279 |
+ |
|
280 |
+ bitmap_size = pages * sizeof(uint32_t); |
|
281 |
+ mapsz = pages * pgsz; |
|
282 |
+ |
|
283 |
+ m = cli_calloc(1, sizeof(fmap_t)); |
|
284 |
+ if (!m) { |
|
285 |
+ cli_warnmsg("fmap: map header allocation failed\n"); |
|
286 |
+ goto done; |
|
287 |
+ } |
|
288 |
+ |
|
289 |
+ m->bitmap = cli_calloc(1, bitmap_size); |
|
290 |
+ if (!m) { |
|
291 |
+ cli_warnmsg("fmap: map header allocation failed\n"); |
|
292 |
+ goto done; |
|
293 |
+ } |
|
250 | 294 |
|
251 | 295 |
#ifndef ANONYMOUS_MAP |
252 | 296 |
use_aging = 0; |
... | ... |
@@ -254,29 +352,30 @@ extern cl_fmap_t *cl_fmap_open_handle(void *handle, size_t offset, size_t len, |
254 | 254 |
#ifdef ANONYMOUS_MAP |
255 | 255 |
if (use_aging) { |
256 | 256 |
fmap_lock; |
257 |
- if ((m = (fmap_t *)mmap(NULL, mapsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | /*FIXME: MAP_POPULATE is ~8% faster but more memory intensive */ ANONYMOUS_MAP, -1, 0)) == MAP_FAILED) { |
|
258 |
- m = NULL; |
|
257 |
+ if ((m->data = (fmap_t *)mmap(NULL, |
|
258 |
+ mapsz, |
|
259 |
+ PROT_READ | PROT_WRITE, MAP_PRIVATE | /* FIXME: MAP_POPULATE is ~8% faster but more memory intensive */ ANONYMOUS_MAP, |
|
260 |
+ -1, |
|
261 |
+ 0)) == MAP_FAILED) { |
|
262 |
+ m->data = NULL; |
|
259 | 263 |
} else { |
260 | 264 |
#if HAVE_MADVISE |
261 |
- madvise((void *)m, mapsz, MADV_RANDOM | MADV_DONTFORK); |
|
265 |
+ madvise((void *)m->data, mapsz, MADV_RANDOM | MADV_DONTFORK); |
|
262 | 266 |
#endif /* madvise */ |
263 |
- /* fault the header while we still have the lock - we DO context switch here a lot here :@ */ |
|
264 |
- memset(fmap_bitmap, 0, sizeof(uint32_t) * pages); |
|
265 | 267 |
} |
266 | 268 |
fmap_unlock; |
267 | 269 |
} |
268 | 270 |
#endif /* ANONYMOUS_MAP */ |
269 | 271 |
if (!use_aging) { |
270 |
- m = (fmap_t *)cli_malloc(mapsz); |
|
271 |
- if (!(m)) { |
|
272 |
+ m->data = (fmap_t *)cli_malloc(mapsz); |
|
273 |
+ if (!(m->data)) { |
|
272 | 274 |
cli_warnmsg("fmap: map allocation failed\n"); |
273 |
- return NULL; |
|
275 |
+ goto done; |
|
274 | 276 |
} |
275 |
- memset(m, 0, hdrsz); |
|
276 | 277 |
} |
277 |
- if (!m) { |
|
278 |
+ if (!m->data) { |
|
278 | 279 |
cli_warnmsg("fmap: map allocation failed\n"); |
279 |
- return NULL; |
|
280 |
+ goto done; |
|
280 | 281 |
} |
281 | 282 |
m->handle = handle; |
282 | 283 |
m->pread_cb = pread_cb; |
... | ... |
@@ -286,15 +385,22 @@ extern cl_fmap_t *cl_fmap_open_handle(void *handle, size_t offset, size_t len, |
286 | 286 |
m->len = len; /* m->nested_offset + m->len = m->real_len */ |
287 | 287 |
m->real_len = len; |
288 | 288 |
m->pages = pages; |
289 |
- m->hdrsz = hdrsz; |
|
290 | 289 |
m->pgsz = pgsz; |
291 | 290 |
m->paged = 0; |
292 | 291 |
m->dont_cache_flag = 0; |
293 |
- m->unmap = use_aging ? unmap_mmap : unmap_malloc; |
|
292 |
+ m->unmap = unmap_handle; |
|
294 | 293 |
m->need = handle_need; |
295 | 294 |
m->need_offstr = handle_need_offstr; |
296 | 295 |
m->gets = handle_gets; |
297 | 296 |
m->unneed_off = handle_unneed_off; |
297 |
+ |
|
298 |
+ status = CL_SUCCESS; |
|
299 |
+ |
|
300 |
+done: |
|
301 |
+ if (CL_SUCCESS != status) { |
|
302 |
+ unmap_handle(m); |
|
303 |
+ m = NULL; |
|
304 |
+ } |
|
298 | 305 |
return m; |
299 | 306 |
} |
300 | 307 |
|
... | ... |
@@ -333,7 +439,7 @@ static void fmap_aging(fmap_t *m) |
333 | 333 |
char *lastpage = NULL; |
334 | 334 |
char *firstpage = NULL; |
335 | 335 |
for (i = 0; i < avail; i++) { |
336 |
- char *pptr = (char *)m + freeme[i] * m->pgsz + m->hdrsz; |
|
336 |
+ char *pptr = (char *)m->data + freeme[i] * m->pgsz; |
|
337 | 337 |
/* we mark the page as seen */ |
338 | 338 |
fmap_bitmap[freeme[i]] = FM_MASK_SEEN; |
339 | 339 |
/* and we mmap the page over so the kernel knows there's nothing good in there */ |
... | ... |
@@ -383,7 +489,7 @@ static int fmap_readpage(fmap_t *m, uint64_t first_page, uint32_t count, uint32_ |
383 | 383 |
/* Not worth checking if the page is already paged, just ping each */ |
384 | 384 |
/* Also not worth reusing the loop below */ |
385 | 385 |
volatile char faultme; |
386 |
- faultme = ((char *)m)[(first_page + i) * m->pgsz + m->hdrsz]; |
|
386 |
+ faultme = ((char *)m->data)[(first_page + i) * m->pgsz]; |
|
387 | 387 |
} |
388 | 388 |
fmap_unlock; |
389 | 389 |
for (i = 0; i <= count; i++, page++) { |
... | ... |
@@ -480,7 +586,7 @@ static int fmap_readpage(fmap_t *m, uint64_t first_page, uint32_t count, uint32_ |
480 | 480 |
/* page is not already paged */ |
481 | 481 |
if (!pptr) { |
482 | 482 |
/* set a new start for pending reads if we don't have one */ |
483 |
- pptr = (char *)m + page * m->pgsz + m->hdrsz; |
|
483 |
+ pptr = (char *)m->data + page * m->pgsz; |
|
484 | 484 |
first_page = page; |
485 | 485 |
} |
486 | 486 |
if ((page == m->pages - 1) && (m->real_len % m->pgsz)) |
... | ... |
@@ -521,8 +627,7 @@ static const void *handle_need(fmap_t *m, size_t at, size_t len, int lock) |
521 | 521 |
if (fmap_readpage(m, first_page, last_page - first_page + 1, lock_count)) |
522 | 522 |
return NULL; |
523 | 523 |
|
524 |
- ret = (char *)m; |
|
525 |
- ret += at + m->hdrsz; |
|
524 |
+ ret = (char *)m->data + at; |
|
526 | 525 |
return (void *)ret; |
527 | 526 |
} |
528 | 527 |
|
... | ... |
@@ -571,23 +676,25 @@ static void handle_unneed_off(fmap_t *m, size_t at, size_t len) |
571 | 571 |
static void unmap_mmap(fmap_t *m) |
572 | 572 |
{ |
573 | 573 |
#ifdef ANONYMOUS_MAP |
574 |
- size_t len = m->pages * m->pgsz + m->hdrsz; |
|
574 |
+ size_t len = m->pages * m->pgsz; |
|
575 | 575 |
fmap_lock; |
576 |
- if (munmap((void *)m, len) == -1) /* munmap() failed */ |
|
577 |
- cli_warnmsg("funmap: unable to unmap memory segment at address: %p with length: %zu\n", (void *)m, len); |
|
576 |
+ if (munmap((void *)m->data, len) == -1) /* munmap() failed */ |
|
577 |
+ cli_warnmsg("funmap: unable to unmap memory segment at address: %p with length: %zu\n", (void *)m->data, len); |
|
578 | 578 |
fmap_unlock; |
579 | 579 |
#endif |
580 | 580 |
} |
581 | 581 |
|
582 | 582 |
static void unmap_malloc(fmap_t *m) |
583 | 583 |
{ |
584 |
- free((void *)m); |
|
584 |
+ if (NULL != m) { |
|
585 |
+ free((void *)m); |
|
586 |
+ } |
|
585 | 587 |
} |
586 | 588 |
|
587 | 589 |
static const void *handle_need_offstr(fmap_t *m, size_t at, size_t len_hint) |
588 | 590 |
{ |
589 | 591 |
unsigned int i, first_page, last_page; |
590 |
- void *ptr = (void *)((char *)m + m->hdrsz + at); |
|
592 |
+ void *ptr = (void *)((char *)m->data + at); |
|
591 | 593 |
|
592 | 594 |
if (!len_hint || len_hint > m->real_len - at) |
593 | 595 |
len_hint = m->real_len - at; |
... | ... |
@@ -601,7 +708,7 @@ static const void *handle_need_offstr(fmap_t *m, size_t at, size_t len_hint) |
601 | 601 |
last_page = fmap_which_page(m, at + len_hint - 1); |
602 | 602 |
|
603 | 603 |
for (i = first_page; i <= last_page; i++) { |
604 |
- char *thispage = (char *)m + m->hdrsz + i * m->pgsz; |
|
604 |
+ char *thispage = (char *)m->data + i * m->pgsz; |
|
605 | 605 |
unsigned int scanat, scansz; |
606 | 606 |
|
607 | 607 |
if (fmap_readpage(m, i, 1, 1)) { |
... | ... |
@@ -627,8 +734,10 @@ static const void *handle_need_offstr(fmap_t *m, size_t at, size_t len_hint) |
627 | 627 |
static const void *handle_gets(fmap_t *m, char *dst, size_t *at, size_t max_len) |
628 | 628 |
{ |
629 | 629 |
unsigned int i, first_page, last_page; |
630 |
- char *src = (void *)((char *)m + m->hdrsz + *at), *endptr = NULL; |
|
631 |
- size_t len = MIN(max_len - 1, m->real_len - *at), fullen = len; |
|
630 |
+ char *src = (void *)((char *)m->data + *at); |
|
631 |
+ char *endptr = NULL; |
|
632 |
+ size_t len = MIN(max_len - 1, m->real_len - *at); |
|
633 |
+ size_t fullen = len; |
|
632 | 634 |
|
633 | 635 |
if (!len || !CLI_ISCONTAINED(0, m->real_len, *at, len)) |
634 | 636 |
return NULL; |
... | ... |
@@ -639,7 +748,7 @@ static const void *handle_gets(fmap_t *m, char *dst, size_t *at, size_t max_len) |
639 | 639 |
last_page = fmap_which_page(m, *at + len - 1); |
640 | 640 |
|
641 | 641 |
for (i = first_page; i <= last_page; i++) { |
642 |
- char *thispage = (char *)m + m->hdrsz + i * m->pgsz; |
|
642 |
+ char *thispage = (char *)m->data + i * m->pgsz; |
|
643 | 643 |
unsigned int scanat, scansz; |
644 | 644 |
|
645 | 645 |
if (fmap_readpage(m, i, 1, 0)) |
... | ... |
@@ -47,7 +47,6 @@ struct cl_fmap { |
47 | 47 |
/* internal */ |
48 | 48 |
time_t mtime; |
49 | 49 |
unsigned int pages; |
50 |
- uint64_t hdrsz; |
|
51 | 50 |
uint64_t pgsz; |
52 | 51 |
unsigned int paged; |
53 | 52 |
unsigned short aging; |
... | ... |
@@ -82,12 +81,22 @@ struct cl_fmap { |
82 | 82 |
HANDLE mh; |
83 | 83 |
#endif |
84 | 84 |
unsigned char maphash[16]; |
85 |
- uint32_t placeholder_for_bitmap; |
|
85 |
+ uint32_t *bitmap; |
|
86 | 86 |
}; |
87 | 87 |
|
88 | 88 |
fmap_t *fmap(int fd, off_t offset, size_t len); |
89 | 89 |
fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty); |
90 | 90 |
|
91 |
+/** |
|
92 |
+ * @brief Create a new fmap view into another fmap. |
|
93 |
+ * |
|
94 |
+ * @param map The parent fmap. |
|
95 |
+ * @param offset Offset for the start of the new fmap into the parent fmap. |
|
96 |
+ * @param length Length of the data from the offset for the new fmap. |
|
97 |
+ * @return fmap_t* NULL if failure, an allocated fmap that must be free'd if success. |
|
98 |
+ */ |
|
99 |
+fmap_t *fmap_duplicate(cl_fmap_t *map, off_t offset, size_t length); |
|
100 |
+ |
|
91 | 101 |
static inline void funmap(fmap_t *m) |
92 | 102 |
{ |
93 | 103 |
m->unmap(m); |
... | ... |
@@ -105,9 +114,7 @@ static inline const void *fmap_need_off_once(fmap_t *m, size_t at, size_t len) |
105 | 105 |
|
106 | 106 |
static inline size_t fmap_ptr2off(const fmap_t *m, const void *ptr) |
107 | 107 |
{ |
108 |
- return (m->data ? (const char *)ptr - (const char *)m->data |
|
109 |
- : (const char *)ptr - (const char *)m - m->hdrsz) - |
|
110 |
- m->nested_offset; |
|
108 |
+ return ((const char *)ptr - (const char *)m->data) - m->nested_offset; |
|
111 | 109 |
} |
112 | 110 |
|
113 | 111 |
static inline const void *fmap_need_ptr(fmap_t *m, const void *ptr, size_t len) |
... | ... |
@@ -763,15 +763,16 @@ cl_error_t cli_scandesc(int desc, cli_ctx *ctx, cli_file_t ftype, uint8_t ftonly |
763 | 763 |
{ |
764 | 764 |
cl_error_t ret = CL_EMEM; |
765 | 765 |
int empty; |
766 |
- fmap_t *map = *ctx->fmap; /* Store off the parent fmap */ |
|
766 |
+ fmap_t *map = *ctx->fmap; /* Store off the parent fmap for easy reference */ |
|
767 | 767 |
|
768 |
- /* Perform scan with child fmap */ |
|
769 |
- if ((*ctx->fmap = fmap_check_empty(desc, 0, 0, &empty))) { |
|
768 |
+ ctx->fmap++; /* Perform scan with child fmap */ |
|
769 |
+ if (NULL != (*ctx->fmap = fmap_check_empty(desc, 0, 0, &empty))) { |
|
770 | 770 |
ret = cli_fmap_scandesc(ctx, ftype, ftonly, ftoffset, acmode, acres, NULL); |
771 | 771 |
map->dont_cache_flag = (*ctx->fmap)->dont_cache_flag; |
772 | 772 |
funmap(*ctx->fmap); |
773 | 773 |
} |
774 |
- *ctx->fmap = map; /* Restore the parent fmap */ |
|
774 |
+ ctx->fmap--; /* Restore the parent fmap */ |
|
775 |
+ |
|
775 | 776 |
if (empty) |
776 | 777 |
return CL_CLEAN; |
777 | 778 |
return ret; |
... | ... |
@@ -167,7 +167,7 @@ typedef struct cli_ctx_tag { |
167 | 167 |
cli_ctx_container *containers; /* set container type after recurse */ |
168 | 168 |
unsigned char handlertype_hash[16]; |
169 | 169 |
struct cli_dconf *dconf; |
170 |
- fmap_t **fmap; |
|
170 |
+ fmap_t **fmap; /* pointer to current fmap in an allocated array, incremented with recursion depth */ |
|
171 | 171 |
bitset_t *hook_lsig_matches; |
172 | 172 |
void *cb_ctx; |
173 | 173 |
cli_events_t *perf; |
... | ... |
@@ -2612,10 +2612,12 @@ static cl_error_t cli_scanembpe(cli_ctx *ctx, off_t offset) |
2612 | 2612 |
if (!ctx->engine->keeptmp) { |
2613 | 2613 |
if (cli_unlink(tmpname)) { |
2614 | 2614 |
free(tmpname); |
2615 |
+ ctx->recursion--; |
|
2615 | 2616 |
return CL_EUNLINK; |
2616 | 2617 |
} |
2617 | 2618 |
} |
2618 | 2619 |
free(tmpname); |
2620 |
+ ctx->recursion--; |
|
2619 | 2621 |
return CL_VIRUS; |
2620 | 2622 |
} |
2621 | 2623 |
ctx->recursion--; |
... | ... |
@@ -3314,7 +3316,7 @@ static int magic_scandesc(cli_ctx *ctx, cli_file_t type) |
3314 | 3314 |
early_ret_from_magicscan(CL_CLEAN); |
3315 | 3315 |
} |
3316 | 3316 |
|
3317 |
- hash = (*ctx->fmap)->maphash; |
|
3317 |
+ hash = (*ctx->fmap)->maphash; |
|
3318 | 3318 |
hashed_size = (*ctx->fmap)->len; |
3319 | 3319 |
|
3320 | 3320 |
old_hook_lsig_matches = ctx->hook_lsig_matches; |
... | ... |
@@ -3500,7 +3502,6 @@ static int magic_scandesc(cli_ctx *ctx, cli_file_t type) |
3500 | 3500 |
} |
3501 | 3501 |
} |
3502 | 3502 |
|
3503 |
- |
|
3504 | 3503 |
ctx->recursion++; |
3505 | 3504 |
perf_nested_start(ctx, PERFT_CONTAINER, PERFT_SCAN); |
3506 | 3505 |
/* set current level as container AFTER recursing */ |
... | ... |
@@ -4183,25 +4184,22 @@ int cli_map_scan(cl_fmap_t *map, off_t offset, size_t length, cli_ctx *ctx, cli_ |
4183 | 4183 |
/* For map scans that are not forced to disk */ |
4184 | 4184 |
int cli_map_scandesc(cl_fmap_t *map, off_t offset, size_t length, cli_ctx *ctx, cli_file_t type) |
4185 | 4185 |
{ |
4186 |
- off_t old_off = map->nested_offset; |
|
4187 |
- size_t old_len = map->len; |
|
4188 |
- size_t old_real_len = map->real_len; |
|
4189 |
- int ret = CL_CLEAN; |
|
4186 |
+ int ret = CL_CLEAN; |
|
4190 | 4187 |
|
4191 |
- cli_dbgmsg("cli_map_scandesc: [%ld, +%lu), [%ld, +%lu)\n", |
|
4192 |
- (long)old_off, (unsigned long)old_len, |
|
4193 |
- (long)offset, (unsigned long)length); |
|
4194 |
- if (offset < 0 || (size_t)offset >= old_len) { |
|
4188 |
+ cli_dbgmsg("cli_map_scandesc: [%zu, +%zu), [" STDi64 ", +%zu)\n", |
|
4189 |
+ map->nested_offset, map->len, |
|
4190 |
+ (int64_t)offset, length); |
|
4191 |
+ if (offset < 0 || (size_t)offset >= map->len) { |
|
4195 | 4192 |
cli_dbgmsg("Invalid offset: %ld\n", (long)offset); |
4196 | 4193 |
return CL_CLEAN; |
4197 | 4194 |
} |
4198 | 4195 |
|
4199 | 4196 |
if (!length) |
4200 |
- length = old_len - offset; |
|
4201 |
- if (length > old_len - offset) { |
|
4197 |
+ length = map->len - offset; |
|
4198 |
+ if (length > map->len - offset) { |
|
4202 | 4199 |
cli_dbgmsg("Data truncated: %zu -> %zu\n", |
4203 |
- length, old_len - (size_t)offset); |
|
4204 |
- length = old_len - (size_t)offset; |
|
4200 |
+ length, map->len - (size_t)offset); |
|
4201 |
+ length = map->len - (size_t)offset; |
|
4205 | 4202 |
} |
4206 | 4203 |
|
4207 | 4204 |
if (length <= 5) { |
... | ... |
@@ -4209,26 +4207,19 @@ int cli_map_scandesc(cl_fmap_t *map, off_t offset, size_t length, cli_ctx *ctx, |
4209 | 4209 |
return CL_CLEAN; |
4210 | 4210 |
} |
4211 | 4211 |
ctx->fmap++; |
4212 |
- *ctx->fmap = map; |
|
4213 |
- /* can't change offset because then we'd have to discard/move cached |
|
4214 |
- * data, instead use another offset to reuse the already cached data */ |
|
4215 |
- map->nested_offset += offset; |
|
4216 |
- map->len = length; |
|
4217 |
- map->real_len = map->nested_offset + length; |
|
4218 |
- if (CLI_ISCONTAINED(old_off, old_len, map->nested_offset, map->len)) { |
|
4219 |
- ret = magic_scandesc(ctx, type); |
|
4220 |
- } else { |
|
4221 |
- long long len1, len2; |
|
4222 |
- len1 = old_off + old_len; |
|
4223 |
- len2 = map->nested_offset + map->len; |
|
4224 |
- cli_warnmsg("internal map error: %lu, %llu; %lu, %llu\n", (long unsigned)old_off, |
|
4225 |
- (long long unsigned)len1, (long unsigned)map->offset, (long long unsigned)len2); |
|
4212 |
+ *ctx->fmap = fmap_duplicate(map, offset, length); |
|
4213 |
+ if (NULL == *ctx->fmap) { |
|
4214 |
+ cli_dbgmsg("Failed to duplicate fmap for scan of fmap subsection\n"); |
|
4215 |
+ ctx->fmap--; |
|
4216 |
+ return CL_CLEAN; |
|
4226 | 4217 |
} |
4227 | 4218 |
|
4219 |
+ ret = magic_scandesc(ctx, type); |
|
4220 |
+ |
|
4221 |
+ free(*ctx->fmap); /* This fmap is just a duplicate, free with free() */ |
|
4222 |
+ *ctx->fmap = NULL; |
|
4228 | 4223 |
ctx->fmap--; |
4229 |
- map->nested_offset = old_off; |
|
4230 |
- map->len = old_len; |
|
4231 |
- map->real_len = old_real_len; |
|
4224 |
+ |
|
4232 | 4225 |
return ret; |
4233 | 4226 |
} |
4234 | 4227 |
|