Sebastian Krahmer from the SuSE security team reported that the buffer
overflow check in openvpn_decrypt() was too strict according to the
cipher update function contract:
"The amount of data written depends on the block alignment of the
encrypted data: as a result the amount of data written may be anything
from zero bytes to (inl + cipher_block_size - 1) so outl should contain
sufficient room."
This stems from the way CBC mode works, which caches input and 'flushes'
it block-wise to the output buffer. We do allocate enough space for this
extra block in the output buffer for CBC mode, but not for CFB/OFB modes.
This patch:
* updates the overflow check to also verify that the extra block required
according to the function contract is available.
* uses buf_inc_len() to double-check for overflows during en/decryption.
* also reserves the extra block for non-CBC cipher modes.
In practice, I could not find a way in which this would fail. The plaintext
is never longer than the ciphertext, and the implementations of CBC/OFB/CBC
for AES and BF in both OpenSSL and PolarSSL/mbed TLS do not use the buffer
beyond the plaintext length when decrypting. However, some funky OpenSSL
engine I did not check *might* use the buffer space required by the
function contract. So we should still make sure we have enough room
anyway.
v2 - always ASSERT() on buf_inc_len(). It is a double-check so should
really not fail, but if it fails there has been a buffer overflow.
At that point the best thing we can do is assert out. (The primary
check *is* handled gracefully, and just drops the packet.)
Signed-off-by: Steffan Karger <steffan.karger@fox-it.com>
Acked-by: Arne Schwabe <arne@rfc2549.org>
Message-Id: <1438165826-32762-1-git-send-email-steffan.karger@fox-it.com>
URL: http://article.gmane.org/gmane.network.openvpn.devel/9974
Signed-off-by: Gert Doering <gert@greenie.muc.de>
... | ... |
@@ -166,11 +166,11 @@ openvpn_encrypt (struct buffer *buf, struct buffer work, |
166 | 166 |
|
167 | 167 |
/* Encrypt packet ID, payload */ |
168 | 168 |
ASSERT (cipher_ctx_update (ctx->cipher, BPTR (&work), &outlen, BPTR (buf), BLEN (buf))); |
169 |
- work.len += outlen; |
|
169 |
+ ASSERT (buf_inc_len(&work, outlen)); |
|
170 | 170 |
|
171 | 171 |
/* Flush the encryption buffer */ |
172 |
- ASSERT(cipher_ctx_final(ctx->cipher, BPTR (&work) + outlen, &outlen)); |
|
173 |
- work.len += outlen; |
|
172 |
+ ASSERT (cipher_ctx_final(ctx->cipher, BPTR (&work) + outlen, &outlen)); |
|
173 |
+ ASSERT (buf_inc_len(&work, outlen)); |
|
174 | 174 |
|
175 | 175 |
/* For all CBC mode ciphers, check the last block is complete */ |
176 | 176 |
ASSERT (cipher_kt_mode (cipher_kt) != OPENVPN_MODE_CBC || |
... | ... |
@@ -305,18 +305,18 @@ openvpn_decrypt (struct buffer *buf, struct buffer work, |
305 | 305 |
CRYPT_ERROR ("cipher init failed"); |
306 | 306 |
|
307 | 307 |
/* Buffer overflow check (should never happen) */ |
308 |
- if (!buf_safe (&work, buf->len)) |
|
309 |
- CRYPT_ERROR ("buffer overflow"); |
|
308 |
+ if (!buf_safe (&work, buf->len + cipher_ctx_block_size(ctx->cipher))) |
|
309 |
+ CRYPT_ERROR ("potential buffer overflow"); |
|
310 | 310 |
|
311 | 311 |
/* Decrypt packet ID, payload */ |
312 | 312 |
if (!cipher_ctx_update (ctx->cipher, BPTR (&work), &outlen, BPTR (buf), BLEN (buf))) |
313 | 313 |
CRYPT_ERROR ("cipher update failed"); |
314 |
- work.len += outlen; |
|
314 |
+ ASSERT (buf_inc_len(&work, outlen)); |
|
315 | 315 |
|
316 | 316 |
/* Flush the decryption buffer */ |
317 | 317 |
if (!cipher_ctx_final (ctx->cipher, BPTR (&work) + outlen, &outlen)) |
318 | 318 |
CRYPT_ERROR ("cipher final failed"); |
319 |
- work.len += outlen; |
|
319 |
+ ASSERT (buf_inc_len(&work, outlen)); |
|
320 | 320 |
|
321 | 321 |
dmsg (D_PACKET_CONTENT, "DECRYPT TO: %s", |
322 | 322 |
format_hex (BPTR (&work), BLEN (&work), 80, &gc)); |
... | ... |
@@ -413,9 +413,8 @@ crypto_adjust_frame_parameters(struct frame *frame, |
413 | 413 |
if (use_iv) |
414 | 414 |
crypto_overhead += cipher_kt_iv_size (kt->cipher); |
415 | 415 |
|
416 |
- if (cipher_kt_mode_cbc (kt->cipher)) |
|
417 |
- /* worst case padding expansion */ |
|
418 |
- crypto_overhead += cipher_kt_block_size (kt->cipher); |
|
416 |
+ /* extra block required by cipher_ctx_update() */ |
|
417 |
+ crypto_overhead += cipher_kt_block_size (kt->cipher); |
|
419 | 418 |
} |
420 | 419 |
|
421 | 420 |
crypto_overhead += kt->hmac_length; |
... | ... |
@@ -333,7 +333,7 @@ int cipher_ctx_reset (cipher_ctx_t *ctx, uint8_t *iv_buf); |
333 | 333 |
* Note that if a complete block cannot be written, data is cached in the |
334 | 334 |
* context, and emitted at a later call to \c cipher_ctx_update, or by a call |
335 | 335 |
* to \c cipher_ctx_final(). This implies that dst should have enough room for |
336 |
- * src_len + \c cipher_ctx_block_size() - 1. |
|
336 |
+ * src_len + \c cipher_ctx_block_size(). |
|
337 | 337 |
* |
338 | 338 |
* @param ctx Cipher's context. May not be NULL. |
339 | 339 |
* @param dst Destination buffer |