7ee85372 |
/* |
206dbaef |
* Copyright (C) 2015-2020 Cisco Systems, Inc. and/or its affiliates. All rights reserved. |
7ee85372 |
*
* Authors: Mickey Sola
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#if HAVE_CONFIG_H
#include "clamav-config.h"
#endif
#include <stdio.h> |
3941428b |
#include <errno.h> |
7ee85372 |
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <signal.h>
#include <pthread.h> |
7bc021ff |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
3e5b1b8b |
#include <sys/fanotify.h> |
7bc021ff |
#endif |
7ee85372 |
|
9e20cdf6 |
// libclamav
#include "others.h"
// shared
#include "optparser.h"
#include "output.h" |
7ee85372 |
|
3733a879 |
#include "../misc/priv_fts.h" |
78b1b1b4 |
#include "../misc/utils.h"
#include "../client/client.h" |
72c10bd1 |
#include "thread.h" |
b71960cd |
|
3e5b1b8b |
static pthread_mutex_t onas_scan_lock = PTHREAD_MUTEX_INITIALIZER;
|
7bc021ff |
static int onas_scan(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code); |
b365aa58 |
static cl_error_t onas_scan_safe(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code); |
78b1b1b4 |
static cl_error_t onas_scan_thread_scanfile(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
static cl_error_t onas_scan_thread_handle_dir(struct onas_scan_event *event_data, const char *pathname);
static cl_error_t onas_scan_thread_handle_file(struct onas_scan_event *event_data, const char *pathname); |
d98d6fdb |
|
3e5b1b8b |
/** |
4c1f1522 |
* @brief Safe-scan wrapper, originally used by inotify and fanotify threads, now exists for error checking/convenience.
*
* Owned by scanthread to try and force multithreaded client archtiecture which better avoids kernel level deadlocks from
* fanotify blocking/prevention. |
3e5b1b8b |
*/ |
7bc021ff |
static int onas_scan(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code) |
b71960cd |
{ |
4fee702f |
int ret = 0;
int i = 0; |
b365aa58 |
uint8_t retry_on_error = event_data->bool_opts & ONAS_SCTH_B_RETRY_ON_E; |
3e5b1b8b |
|
b365aa58 |
ret = onas_scan_safe(event_data, fname, sb, infected, err, ret_code); |
3e5b1b8b |
if (*err) {
switch (*ret_code) {
case CL_EACCES:
case CL_ESTAT:
logg("*ClamMisc: internal issue (daemon could not access directory/file %s)\n", fname);
break;
/* TODO: handle other errors */
case CL_EPARSE:
case CL_EREAD:
case CL_EWRITE:
case CL_EMEM:
case CL_ENULLARG:
default:
logg("~ClamMisc: internal issue (client failed to scan)\n");
} |
4fee702f |
if (retry_on_error) {
logg("*ClamMisc: reattempting scan ... \n");
while (err) {
ret = onas_scan_safe(event_data, fname, sb, infected, err, ret_code);
i++;
if (*err && i == event_data->retry_attempts) {
*err = 0;
}
}
} |
3e5b1b8b |
} |
b71960cd |
|
497b72ea |
return ret; |
b71960cd |
}
|
3e5b1b8b |
/** |
4c1f1522 |
* @brief Thread-safe scan wrapper to ensure there's no processs contention over use of the socket.
*
* This is noticeably slower, and I had no issues running smaller scale tests with it off, but better than sorry until more testing can be done.
*
* TODO: make this configurable? |
3e5b1b8b |
*/ |
4fee702f |
static cl_error_t onas_scan_safe(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code)
{ |
b365aa58 |
|
4fee702f |
int ret = 0;
int fd = 0; |
7bc021ff |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
4fee702f |
uint8_t b_fanotify; |
128da45d |
|
4fee702f |
b_fanotify = event_data->bool_opts & ONAS_SCTH_B_FANOTIFY ? 1 : 0; |
128da45d |
|
4fee702f |
if (b_fanotify) {
fd = event_data->fmd->fd;
} |
7bc021ff |
#endif |
3e5b1b8b |
|
4fee702f |
pthread_mutex_lock(&onas_scan_lock); |
3e5b1b8b |
|
4fee702f |
ret = onas_client_scan(event_data->tcpaddr, event_data->portnum, event_data->scantype, event_data->maxstream,
fname, fd, event_data->timeout, sb, infected, err, ret_code); |
128da45d |
|
4fee702f |
pthread_mutex_unlock(&onas_scan_lock); |
3e5b1b8b |
|
4fee702f |
return ret; |
3e5b1b8b |
}
|
4fee702f |
static cl_error_t onas_scan_thread_scanfile(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code)
{ |
7bc021ff |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
4fee702f |
struct fanotify_response res;
uint8_t b_fanotify; |
7bc021ff |
#endif
|
4fee702f |
int ret = 0; |
3e5b1b8b |
|
4fee702f |
uint8_t b_scan;
uint8_t b_deny_on_error; |
b365aa58 |
|
4fee702f |
if (NULL == event_data || NULL == fname || NULL == infected || NULL == err || NULL == ret_code) {
logg("!ClamWorker: scan failed (NULL arg given)\n");
return CL_ENULLARG;
} |
b365aa58 |
|
4fee702f |
b_scan = event_data->bool_opts & ONAS_SCTH_B_SCAN ? 1 : 0;
b_deny_on_error = event_data->bool_opts & ONAS_SCTH_B_DENY_ON_E ? 1 : 0; |
b365aa58 |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
4fee702f |
b_fanotify = event_data->bool_opts & ONAS_SCTH_B_FANOTIFY ? 1 : 0;
if (b_fanotify) {
res.fd = event_data->fmd->fd;
res.response = FAN_ALLOW;
} |
7bc021ff |
#endif |
3e5b1b8b |
|
4fee702f |
if (b_scan) {
ret = onas_scan(event_data, fname, sb, infected, err, ret_code); |
3e5b1b8b |
|
4fee702f |
if (*err && *ret_code != CL_SUCCESS) {
logg("*ClamWorker: scan failed with error code %d\n", *ret_code);
} |
3e5b1b8b |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
4fee702f |
if (b_fanotify) {
if ((*err && *ret_code && b_deny_on_error) || *infected) {
res.response = FAN_DENY;
}
} |
7bc021ff |
#endif |
4fee702f |
} |
3e5b1b8b |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
4fee702f |
if (b_fanotify) {
if (event_data->fmd->mask & FAN_ALL_PERM_EVENTS) {
ret = write(event_data->fan_fd, &res, sizeof(res));
if (ret == -1) {
logg("!ClamWorker: internal error (can't write to fanotify)\n");
if (errno == ENOENT) {
logg("*ClamWorker: permission event has already been written ... recovering ...\n");
} else {
ret = CL_EWRITE;
}
}
}
}
if (b_fanotify) { |
7d83fa29 |
#ifdef ONAS_DEBUG |
4fee702f |
logg("*ClamWorker: closing fd, %d)\n", event_data->fmd->fd); |
7d83fa29 |
#endif |
4fee702f |
if (-1 == close(event_data->fmd->fd)) {
logg("!ClamWorker: internal error (can't close fanotify meta fd, %d)\n", event_data->fmd->fd);
if (errno == EBADF) {
logg("*ClamWorker: fd already closed ... recovering ...\n");
} else {
ret = CL_EUNLINK;
}
}
} |
7bc021ff |
#endif |
4fee702f |
return ret; |
3e5b1b8b |
}
|
4fee702f |
static cl_error_t onas_scan_thread_handle_dir(struct onas_scan_event *event_data, const char *pathname)
{
FTS *ftsp = NULL;
int32_t ftspopts = FTS_NOCHDIR | FTS_PHYSICAL | FTS_XDEV;
FTSENT *curr = NULL; |
b365aa58 |
|
4fee702f |
int32_t infected = 0;
int32_t err = 0;
cl_error_t ret_code = CL_SUCCESS;
cl_error_t ret = CL_SUCCESS; |
b365aa58 |
|
4fee702f |
int32_t fres = 0;
STATBUF sb; |
b71960cd |
|
4fee702f |
char *const pathargv[] = {(char *)pathname, NULL}; |
b365aa58 |
|
4fee702f |
if (!(ftsp = _priv_fts_open(pathargv, ftspopts, NULL))) {
ret = CL_EOPEN;
goto out;
} |
7ee85372 |
|
4fee702f |
while ((curr = _priv_fts_read(ftsp))) {
if (curr->fts_info != FTS_D) { |
b71960cd |
|
4fee702f |
fres = CLAMSTAT(curr->fts_path, &sb); |
3e5b1b8b |
|
4fee702f |
if (event_data->sizelimit) { |
e2f59af3 |
if (fres != 0 || (uint64_t)sb.st_size > event_data->sizelimit) { |
4fee702f |
/* okay to skip w/o allow/deny since dir comes from inotify |
b365aa58 |
* events and (probably) won't block w/ protection enabled */ |
4fee702f |
event_data->bool_opts &= ((uint16_t)~ONAS_SCTH_B_SCAN);
logg("*ClamWorker: size limit surpassed while doing extra scanning ... skipping object ...\n");
}
} |
b71960cd |
|
4fee702f |
ret = onas_scan_thread_scanfile(event_data, curr->fts_path, sb, &infected, &err, &ret_code);
}
} |
7ee85372 |
|
7ad7211e |
out: |
4fee702f |
if (ftsp) {
_priv_fts_close(ftsp);
} |
7ad7211e |
|
4fee702f |
return ret; |
7ee85372 |
}
|
4fee702f |
static cl_error_t onas_scan_thread_handle_file(struct onas_scan_event *event_data, const char *pathname)
{ |
7ee85372 |
|
4fee702f |
STATBUF sb;
int32_t infected = 0;
int32_t err = 0;
cl_error_t ret_code = CL_SUCCESS;
int fres = 0;
cl_error_t ret = 0; |
7ee85372 |
|
4fee702f |
if (NULL == pathname || NULL == event_data) {
return CL_ENULLARG;
} |
7ee85372 |
|
4fee702f |
fres = CLAMSTAT(pathname, &sb);
if (event_data->sizelimit) { |
e2f59af3 |
if (fres != 0 || (uint64_t)sb.st_size > event_data->sizelimit) { |
4fee702f |
/* don't skip so we avoid lockups, but don't scan either; |
b365aa58 |
* while it should be obvious, this will unconditionally set
* the bit in the map to 0 regardless of original orientation */ |
4fee702f |
event_data->bool_opts &= ((uint16_t)~ONAS_SCTH_B_SCAN);
}
} |
674a4aa5 |
|
4fee702f |
ret = onas_scan_thread_scanfile(event_data, pathname, sb, &infected, &err, &ret_code); |
7d83fa29 |
#ifdef ONAS_DEBUG |
4fee702f |
/* very noisy, debug only */
if (event_data->bool_opts & ONAS_SCTH_B_INOTIFY) {
logg("*ClamWorker: Inotify Scan Results ...\n\tret = %d ...\n\tinfected = %d ...\n\terr = %d ...\n\tret_code = %d\n",
ret, infected, err, ret_code);
} else {
logg("*ClamWorker: Fanotify Scan Results ...\n\tret = %d ...\n\tinfected = %d ...\n\terr = %d ...\n\tret_code = %d\n\tfd = %d\n",
ret, infected, err, ret_code, event_data->fmd->fd);
} |
7d83fa29 |
#endif |
b71960cd |
|
4fee702f |
return ret; |
7ee85372 |
}
|
4c1f1522 |
/**
* @brief worker thread designed to work with the lovely c-thread-pool library to handle our scanning jobs after our queue thread consumes an event
*
* @param arg this should always be an onas_scan_event struct
*/ |
4fee702f |
void *onas_scan_worker(void *arg)
{ |
b365aa58 |
|
4fee702f |
struct onas_scan_event *event_data = (struct onas_scan_event *)arg; |
b365aa58 |
|
4fee702f |
uint8_t b_dir;
uint8_t b_file;
uint8_t b_inotify;
uint8_t b_fanotify; |
b365aa58 |
|
4fee702f |
if (NULL == event_data || NULL == event_data->pathname) {
logg("ClamWorker: invalid worker arguments for scanning thread\n");
if (event_data) {
logg("ClamWorker: pathname is null\n");
}
goto done;
} |
72fd33c8 |
|
4fee702f |
/* load in boolean info from event struct; makes for easier reading--you're welcome */
b_dir = event_data->bool_opts & ONAS_SCTH_B_DIR ? 1 : 0;
b_file = event_data->bool_opts & ONAS_SCTH_B_FILE ? 1 : 0;
b_inotify = event_data->bool_opts & ONAS_SCTH_B_INOTIFY ? 1 : 0;
b_fanotify = event_data->bool_opts & ONAS_SCTH_B_FANOTIFY ? 1 : 0; |
b365aa58 |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
4fee702f |
if (b_inotify) {
logg("*ClamWorker: handling inotify event ...\n");
if (b_dir) {
logg("*ClamWorker: performing (extra) scanning on directory '%s'\n", event_data->pathname);
onas_scan_thread_handle_dir(event_data, event_data->pathname);
} else if (b_file) {
logg("*ClamWorker: performing (extra) scanning on file '%s'\n", event_data->pathname);
onas_scan_thread_handle_file(event_data, event_data->pathname);
}
} else if (b_fanotify) {
logg("*ClamWorker: performing scanning on file '%s'\n", event_data->pathname);
onas_scan_thread_handle_file(event_data, event_data->pathname);
} else {
/* something went very wrong, so check if we have an open fd, |
7bc021ff |
* try to close it to resolve any potential lingering permissions event,
* then move to cleanup */ |
4fee702f |
if (event_data->fmd) {
if (event_data->fmd->fd) {
close(event_data->fmd->fd);
goto done;
}
}
} |
7bc021ff |
#endif |
d7979d4f |
done: |
4fee702f |
/* our job to cleanup event data: worker queue just kicks us off in a thread pool, drops the event object |
b365aa58 |
* from the queue and forgets about us */ |
3e5b1b8b |
|
4fee702f |
if (NULL != event_data) {
if (NULL != event_data->pathname) {
free(event_data->pathname);
event_data->pathname = NULL;
} |
3941428b |
|
9e20cdf6 |
#if defined(HAVE_SYS_FANOTIFY_H) |
4fee702f |
if (NULL != event_data->fmd) {
free(event_data->fmd);
event_data->fmd = NULL;
} |
7bc021ff |
#endif |
4fee702f |
free(event_data);
event_data = NULL;
} |
72fd33c8 |
|
4fee702f |
return NULL; |
7ee85372 |
} |
b365aa58 |
|
4c1f1522 |
/**
* @brief Simple utility function for external interfaces to add relevant context information to scan_event struct.
*
* Doing this mapping cuts down significantly on memory overhead when queueing hundreds of these scan_event structs
* especially vs using a copy of a raw context struct.
*
* Other potential design options include giving the event access to the "global" context struct address instead,
* to further cut down on space used, but (among other thread safety concerns) I'd prefer the worker threads not
* have the ability to modify it at all to keep down on potential maintenance headaches in the future.
*/ |
4fee702f |
cl_error_t onas_map_context_info_to_event_data(struct onas_context *ctx, struct onas_scan_event **event_data)
{ |
b365aa58 |
|
4fee702f |
if (NULL == ctx || NULL == event_data || NULL == *event_data) { |
b365aa58 |
logg("*ClamScThread: context and scan event struct are null ...\n");
return CL_ENULLARG;
}
|
4fee702f |
(*event_data)->scantype = ctx->scantype;
(*event_data)->timeout = ctx->timeout;
(*event_data)->maxstream = ctx->maxstream;
(*event_data)->fan_fd = ctx->fan_fd;
(*event_data)->sizelimit = ctx->sizelimit; |
b365aa58 |
(*event_data)->retry_attempts = ctx->retry_attempts;
if (ctx->retry_on_error) {
(*event_data)->bool_opts |= ONAS_SCTH_B_RETRY_ON_E;
}
if (ctx->deny_on_error) {
(*event_data)->bool_opts |= ONAS_SCTH_B_DENY_ON_E;
}
|
7d83fa29 |
if (ctx->isremote) {
(*event_data)->bool_opts |= ONAS_SCTH_B_REMOTE;
(*event_data)->tcpaddr = optget(ctx->clamdopts, "TCPAddr")->strarg;
(*event_data)->portnum = ctx->portnum;
} else {
(*event_data)->tcpaddr = optget(ctx->clamdopts, "LocalSocket")->strarg;
}
|
b365aa58 |
return CL_SUCCESS;
} |