This is needed to allow clamdtop to watch in realtime what is happening in
clamd, otherwise STATS would always lag behind 50 other multiscan commands.
... | ... |
@@ -163,7 +163,7 @@ int scan_callback(struct stat *sb, char *filename, const char *msg, enum cli_ftw |
163 | 163 |
pthread_mutex_lock(&reload_mutex); |
164 | 164 |
client_conn->engine_timestamp = reloaded_time; |
165 | 165 |
pthread_mutex_unlock(&reload_mutex); |
166 |
- if(!thrmgr_group_dispatch(scandata->thr_pool, scandata->group, client_conn)) { |
|
166 |
+ if(!thrmgr_group_dispatch(scandata->thr_pool, scandata->group, client_conn, 1)) { |
|
167 | 167 |
logg("!thread dispatch failed\n"); |
168 | 168 |
free(filename); |
169 | 169 |
return CL_EMEM; |
... | ... |
@@ -368,6 +368,7 @@ int command(client_conn_t *conn, int *virus) |
368 | 368 |
static int dispatch_command(client_conn_t *conn, enum commands cmd, const char *argument) |
369 | 369 |
{ |
370 | 370 |
int ret = 0; |
371 |
+ int bulk; |
|
371 | 372 |
client_conn_t *dup_conn = (client_conn_t *) malloc(sizeof(struct client_conn_tag)); |
372 | 373 |
|
373 | 374 |
if(!dup_conn) { |
... | ... |
@@ -382,6 +383,7 @@ static int dispatch_command(client_conn_t *conn, enum commands cmd, const char * |
382 | 382 |
return -1; |
383 | 383 |
} |
384 | 384 |
dup_conn->scanfd = -1; |
385 |
+ bulk = 1; |
|
385 | 386 |
switch (cmd) { |
386 | 387 |
case COMMAND_FILDES: |
387 | 388 |
if (conn->scanfd == -1) { |
... | ... |
@@ -407,10 +409,14 @@ static int dispatch_command(client_conn_t *conn, enum commands cmd, const char * |
407 | 407 |
break; |
408 | 408 |
case COMMAND_STREAM: |
409 | 409 |
case COMMAND_STATS: |
410 |
+ /* not a scan command, don't queue to bulk */ |
|
411 |
+ bulk = 0; |
|
410 | 412 |
/* just dispatch the command */ |
411 | 413 |
break; |
412 | 414 |
} |
413 |
- if(!ret && !thrmgr_group_dispatch(dup_conn->thrpool, dup_conn->group, dup_conn)) { |
|
415 |
+ if (!dup_conn->group) |
|
416 |
+ bulk = 0; |
|
417 |
+ if(!ret && !thrmgr_group_dispatch(dup_conn->thrpool, dup_conn->group, dup_conn, bulk)) { |
|
414 | 418 |
logg("!thread dispatch failed\n"); |
415 | 419 |
ret = -2; |
416 | 420 |
} |
... | ... |
@@ -745,7 +745,7 @@ int thrmgr_dispatch(threadpool_t *threadpool, void *user_data) |
745 | 745 |
return thrmgr_dispatch_internal(threadpool, user_data, 0); |
746 | 746 |
} |
747 | 747 |
|
748 |
-int thrmgr_group_dispatch(threadpool_t *threadpool, jobgroup_t *group, void *user_data) |
|
748 |
+int thrmgr_group_dispatch(threadpool_t *threadpool, jobgroup_t *group, void *user_data, int bulk) |
|
749 | 749 |
{ |
750 | 750 |
int ret; |
751 | 751 |
if (group) { |
... | ... |
@@ -754,7 +754,7 @@ int thrmgr_group_dispatch(threadpool_t *threadpool, jobgroup_t *group, void *use |
754 | 754 |
logg("$THRMGR: active jobs for %p: %d\n", group, group->jobs); |
755 | 755 |
pthread_mutex_unlock(&group->mutex); |
756 | 756 |
} |
757 |
- if (!(ret = thrmgr_dispatch_internal(threadpool, user_data, group ? 1 : 0)) && group) { |
|
757 |
+ if (!(ret = thrmgr_dispatch_internal(threadpool, user_data, bulk)) && group) { |
|
758 | 758 |
pthread_mutex_lock(&group->mutex); |
759 | 759 |
group->jobs--; |
760 | 760 |
logg("$THRMGR: active jobs for %p: %d\n", group, group->jobs); |
... | ... |
@@ -99,7 +99,7 @@ enum thrmgr_exit { |
99 | 99 |
threadpool_t *thrmgr_new(int max_threads, int idle_timeout, int max_queue, void (*handler)(void *)); |
100 | 100 |
void thrmgr_destroy(threadpool_t *threadpool); |
101 | 101 |
int thrmgr_dispatch(threadpool_t *threadpool, void *user_data); |
102 |
-int thrmgr_group_dispatch(threadpool_t *threadpool, jobgroup_t *group, void *user_data); |
|
102 |
+int thrmgr_group_dispatch(threadpool_t *threadpool, jobgroup_t *group, void *user_data, int bulk); |
|
103 | 103 |
void thrmgr_group_waitforall(jobgroup_t *group, unsigned *ok, unsigned *error, unsigned *total); |
104 | 104 |
int thrmgr_group_finished(jobgroup_t *group, enum thrmgr_exit exitc); |
105 | 105 |
int thrmgr_group_need_terminate(jobgroup_t *group); |