git-svn-id: http://svn.openvpn.net/projects/openvpn/branches/BETA21/openvpn@901 e7ae566f-a301-0410-adde-c780ea21d3b5
james authored on 2006/02/18 19:33:41... | ... |
@@ -25,7 +25,7 @@ dnl Process this file with autoconf to produce a configure script. |
25 | 25 |
|
26 | 26 |
AC_PREREQ(2.50) |
27 | 27 |
|
28 |
-AC_INIT([OpenVPN], [2.1_beta10], [openvpn-users@lists.sourceforge.net], [openvpn]) |
|
28 |
+AC_INIT([OpenVPN], [2.1_beta10a], [openvpn-users@lists.sourceforge.net], [openvpn]) |
|
29 | 29 |
AM_CONFIG_HEADER(config.h) |
30 | 30 |
AC_CONFIG_SRCDIR(syshead.h) |
31 | 31 |
|
... | ... |
@@ -81,7 +81,7 @@ headc (const struct buffer *buf) |
81 | 81 |
} |
82 | 82 |
#endif |
83 | 83 |
|
84 |
-static void |
|
84 |
+static inline void |
|
85 | 85 |
close_socket_if_defined (const socket_descriptor_t sd) |
86 | 86 |
{ |
87 | 87 |
if (socket_defined (sd)) |
... | ... |
@@ -154,6 +154,13 @@ send_control (const socket_descriptor_t fd, int code) |
154 | 154 |
return -1; |
155 | 155 |
} |
156 | 156 |
|
157 |
+/* |
|
158 |
+ * Send a command (char), data (head), and a file descriptor (sd_send) to a local process |
|
159 |
+ * over unix socket sd. Unfortunately, there's no portable way to send file descriptors |
|
160 |
+ * to other processes, so this code, as well as its analog (control_message_from_parent below), |
|
161 |
+ * is Linux-specific. This function runs in the context of the main process and is used to |
|
162 |
+ * send commands, data, and file descriptors to the background process. |
|
163 |
+ */ |
|
157 | 164 |
static void |
158 | 165 |
port_share_sendmsg (const socket_descriptor_t sd, |
159 | 166 |
const char command, |
... | ... |
@@ -231,7 +238,9 @@ pc_list_len (struct proxy_connection *pc) |
231 | 231 |
return count; |
232 | 232 |
} |
233 | 233 |
|
234 |
-/* mark a proxy entry and its counterpart for close */ |
|
234 |
+/* |
|
235 |
+ * Mark a proxy entry and its counterpart for close. |
|
236 |
+ */ |
|
235 | 237 |
static void |
236 | 238 |
proxy_entry_mark_for_close (struct proxy_connection *pc, struct event_set *es) |
237 | 239 |
{ |
... | ... |
@@ -256,6 +265,10 @@ proxy_entry_mark_for_close (struct proxy_connection *pc, struct event_set *es) |
256 | 256 |
} |
257 | 257 |
} |
258 | 258 |
|
259 |
+/* |
|
260 |
+ * Run through the proxy entry list and delete all entries marked |
|
261 |
+ * for close. |
|
262 |
+ */ |
|
259 | 263 |
static void |
260 | 264 |
proxy_list_housekeeping (struct proxy_connection **list) |
261 | 265 |
{ |
... | ... |
@@ -282,6 +295,9 @@ proxy_list_housekeeping (struct proxy_connection **list) |
282 | 282 |
} |
283 | 283 |
} |
284 | 284 |
|
285 |
+/* |
|
286 |
+ * Cleanup function, on proxy process exit. |
|
287 |
+ */ |
|
285 | 288 |
static void |
286 | 289 |
proxy_list_close (struct proxy_connection **list) |
287 | 290 |
{ |
... | ... |
@@ -318,6 +334,13 @@ proxy_connection_io_requeue (struct proxy_connection *pc, const int rwflags_new, |
318 | 318 |
} |
319 | 319 |
} |
320 | 320 |
|
321 |
+/* |
|
322 |
+ * Create a new pair of proxy_connection entries, one for each |
|
323 |
+ * socket file descriptor involved in the proxy. We are given |
|
324 |
+ * the client fd, and we should derive our own server fd by connecting |
|
325 |
+ * to the server given by server_addr/server_port. Return true |
|
326 |
+ * on success and false on failure to connect to server. |
|
327 |
+ */ |
|
321 | 328 |
static bool |
322 | 329 |
proxy_entry_new (struct proxy_connection **list, |
323 | 330 |
struct event_set *es, |
... | ... |
@@ -381,6 +404,12 @@ proxy_entry_new (struct proxy_connection **list, |
381 | 381 |
return true; |
382 | 382 |
} |
383 | 383 |
|
384 |
+/* |
|
385 |
+ * This function runs in the context of the background proxy process. |
|
386 |
+ * Receive a control message from the parent (sent by the port_share_sendmsg |
|
387 |
+ * function above) and act on it. Return false if the proxy process should |
|
388 |
+ * exit, true otherwise. |
|
389 |
+ */ |
|
384 | 390 |
static bool |
385 | 391 |
control_message_from_parent (const socket_descriptor_t sd_control, |
386 | 392 |
struct proxy_connection **list, |
... | ... |
@@ -466,7 +495,14 @@ control_message_from_parent (const socket_descriptor_t sd_control, |
466 | 466 |
#define IOSTAT_EAGAIN_ON_WRITE 1 |
467 | 467 |
#define IOSTAT_ERROR 2 |
468 | 468 |
|
469 |
-/* forward data from pc to pc->counterpart */ |
|
469 |
+/* |
|
470 |
+ * Forward data from pc to pc->counterpart. |
|
471 |
+ * Return values: |
|
472 |
+ * |
|
473 |
+ * IOSTAT_EAGAIN_ON_READ -- recv returned EAGAIN |
|
474 |
+ * IOSTAT_EAGAIN_ON_WRITE -- send return EAGAIN |
|
475 |
+ * IOSTAT_ERROR -- the other end of one of our sockets was closed |
|
476 |
+ */ |
|
470 | 477 |
static int |
471 | 478 |
proxy_connection_io_xfer (struct proxy_connection *pc) |
472 | 479 |
{ |
... | ... |
@@ -516,6 +552,9 @@ proxy_connection_io_xfer (struct proxy_connection *pc) |
516 | 516 |
return IOSTAT_ERROR; |
517 | 517 |
} |
518 | 518 |
|
519 |
+/* |
|
520 |
+ * Decide how the receipt of an EAGAIN status should affect our next IO queueing. |
|
521 |
+ */ |
|
519 | 522 |
static inline bool |
520 | 523 |
proxy_connection_io_status (const int status, int *rwflags_pc, int *rwflags_cp) |
521 | 524 |
{ |
... | ... |
@@ -534,6 +573,10 @@ proxy_connection_io_status (const int status, int *rwflags_pc, int *rwflags_cp) |
534 | 534 |
} |
535 | 535 |
} |
536 | 536 |
|
537 |
+/* |
|
538 |
+ * Dispatch function for forwarding data between the two socket fds involved |
|
539 |
+ * in the proxied connection. |
|
540 |
+ */ |
|
537 | 541 |
static bool |
538 | 542 |
proxy_connection_io_dispatch (struct proxy_connection *pc, |
539 | 543 |
const int rwflags, |
... | ... |
@@ -566,6 +609,9 @@ proxy_connection_io_dispatch (struct proxy_connection *pc, |
566 | 566 |
return false; |
567 | 567 |
} |
568 | 568 |
|
569 |
+/* |
|
570 |
+ * This is the main function for the port share proxy background process. |
|
571 |
+ */ |
|
569 | 572 |
static void |
570 | 573 |
port_share_proxy (const in_addr_t hostaddr, const int port, const socket_descriptor_t sd_control) |
571 | 574 |
{ |
... | ... |
@@ -629,6 +675,10 @@ port_share_proxy (const in_addr_t hostaddr, const int port, const socket_descrip |
629 | 629 |
msg (D_PS_PROXY, "PORT SHARE PROXY: proxy exiting"); |
630 | 630 |
} |
631 | 631 |
|
632 |
+/* |
|
633 |
+ * Called from the main OpenVPN process to enable the port |
|
634 |
+ * share proxy. |
|
635 |
+ */ |
|
632 | 636 |
struct port_share * |
633 | 637 |
port_share_open (const char *host, const int port) |
634 | 638 |
{ |
... | ... |
@@ -754,6 +804,11 @@ port_share_abort (struct port_share *ps) |
754 | 754 |
} |
755 | 755 |
} |
756 | 756 |
|
757 |
+/* |
|
758 |
+ * Given either the first 2 or 3 bytes of an initial client -> server |
|
759 |
+ * data payload, return true if the protocol is that of an OpenVPN |
|
760 |
+ * client attempting to connect with an OpenVPN server. |
|
761 |
+ */ |
|
757 | 762 |
bool |
758 | 763 |
is_openvpn_protocol (const struct buffer *buf) |
759 | 764 |
{ |
... | ... |
@@ -773,6 +828,11 @@ is_openvpn_protocol (const struct buffer *buf) |
773 | 773 |
return true; |
774 | 774 |
} |
775 | 775 |
|
776 |
+/* |
|
777 |
+ * Called from the foreground process. Send a message to the background process that it |
|
778 |
+ * should proxy the TCP client on sd to the host/port defined in the initial port_share_open |
|
779 |
+ * call. |
|
780 |
+ */ |
|
776 | 781 |
void |
777 | 782 |
port_share_redirect (struct port_share *ps, const struct buffer *head, socket_descriptor_t sd) |
778 | 783 |
{ |