Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Michael Niedermayer authored on 2014/05/25 10:39:08... | ... |
@@ -235,15 +235,15 @@ void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst, |
235 | 235 |
|
236 | 236 |
/** butterfly operation for the inverse Haar transform */ |
237 | 237 |
#define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \ |
238 |
- t = (s1 - s2) >> 1;\ |
|
239 |
- o1 = (s1 + s2) >> 1;\ |
|
240 |
- o2 = t;\ |
|
238 |
+ t = ((s1) - (s2)) >> 1;\ |
|
239 |
+ o1 = ((s1) + (s2)) >> 1;\ |
|
240 |
+ o2 = (t);\ |
|
241 | 241 |
|
242 | 242 |
/** inverse 8-point Haar transform */ |
243 | 243 |
#define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\ |
244 | 244 |
d1, d2, d3, d4, d5, d6, d7, d8,\ |
245 | 245 |
t0, t1, t2, t3, t4, t5, t6, t7, t8) {\ |
246 |
- t1 = s1 << 1; t5 = s5 << 1;\ |
|
246 |
+ t1 = (s1) << 1; t5 = (s5) << 1;\ |
|
247 | 247 |
IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\ |
248 | 248 |
IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\ |
249 | 249 |
IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\ |
... | ... |
@@ -485,21 +485,21 @@ void ff_ivi_dc_haar_2d(const int32_t *in, int16_t *out, uint32_t pitch, |
485 | 485 |
|
486 | 486 |
/** butterfly operation for the inverse slant transform */ |
487 | 487 |
#define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \ |
488 |
- t = s1 - s2;\ |
|
489 |
- o1 = s1 + s2;\ |
|
490 |
- o2 = t;\ |
|
488 |
+ t = (s1) - (s2);\ |
|
489 |
+ o1 = (s1) + (s2);\ |
|
490 |
+ o2 = (t);\ |
|
491 | 491 |
|
492 | 492 |
/** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */ |
493 | 493 |
#define IVI_IREFLECT(s1, s2, o1, o2, t) \ |
494 |
- t = ((s1 + s2*2 + 2) >> 2) + s1;\ |
|
495 |
- o2 = ((s1*2 - s2 + 2) >> 2) - s2;\ |
|
496 |
- o1 = t;\ |
|
494 |
+ t = (((s1) + (s2)*2 + 2) >> 2) + (s1);\ |
|
495 |
+ o2 = (((s1)*2 - (s2) + 2) >> 2) - (s2);\ |
|
496 |
+ o1 = (t);\ |
|
497 | 497 |
|
498 | 498 |
/** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */ |
499 | 499 |
#define IVI_SLANT_PART4(s1, s2, o1, o2, t) \ |
500 |
- t = s2 + ((s1*4 - s2 + 4) >> 3);\ |
|
501 |
- o2 = s1 + ((-s1 - s2*4 + 4) >> 3);\ |
|
502 |
- o1 = t;\ |
|
500 |
+ t = (s2) + (((s1)*4 - (s2) + 4) >> 3);\ |
|
501 |
+ o2 = (s1) + ((-(s1) - (s2)*4 + 4) >> 3);\ |
|
502 |
+ o1 = (t);\ |
|
503 | 503 |
|
504 | 504 |
/** inverse slant8 transform */ |
505 | 505 |
#define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\ |
... | ... |
@@ -557,7 +557,7 @@ void ff_ivi_inverse_slant_8x8(const int32_t *in, int16_t *out, uint32_t pitch, c |
557 | 557 |
} |
558 | 558 |
#undef COMPENSATE |
559 | 559 |
|
560 |
-#define COMPENSATE(x) ((x + 1)>>1) |
|
560 |
+#define COMPENSATE(x) (((x) + 1)>>1) |
|
561 | 561 |
src = tmp; |
562 | 562 |
for (i = 0; i < 8; i++) { |
563 | 563 |
if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) { |
... | ... |
@@ -597,7 +597,7 @@ void ff_ivi_inverse_slant_4x4(const int32_t *in, int16_t *out, uint32_t pitch, c |
597 | 597 |
} |
598 | 598 |
#undef COMPENSATE |
599 | 599 |
|
600 |
-#define COMPENSATE(x) ((x + 1)>>1) |
|
600 |
+#define COMPENSATE(x) (((x) + 1)>>1) |
|
601 | 601 |
src = tmp; |
602 | 602 |
for (i = 0; i < 4; i++) { |
603 | 603 |
if (!src[0] && !src[1] && !src[2] && !src[3]) { |
... | ... |
@@ -631,7 +631,7 @@ void ff_ivi_row_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const ui |
631 | 631 |
int i; |
632 | 632 |
int t0, t1, t2, t3, t4, t5, t6, t7, t8; |
633 | 633 |
|
634 |
-#define COMPENSATE(x) ((x + 1)>>1) |
|
634 |
+#define COMPENSATE(x) (((x) + 1)>>1) |
|
635 | 635 |
for (i = 0; i < 8; i++) { |
636 | 636 |
if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) { |
637 | 637 |
memset(out, 0, 8*sizeof(out[0])); |
... | ... |
@@ -673,7 +673,7 @@ void ff_ivi_col_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const ui |
673 | 673 |
row4 = pitch << 2; |
674 | 674 |
row8 = pitch << 3; |
675 | 675 |
|
676 |
-#define COMPENSATE(x) ((x + 1)>>1) |
|
676 |
+#define COMPENSATE(x) (((x) + 1)>>1) |
|
677 | 677 |
for (i = 0; i < 8; i++) { |
678 | 678 |
if (flags[i]) { |
679 | 679 |
IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56], |
... | ... |
@@ -710,7 +710,7 @@ void ff_ivi_row_slant4(const int32_t *in, int16_t *out, uint32_t pitch, const ui |
710 | 710 |
int i; |
711 | 711 |
int t0, t1, t2, t3, t4; |
712 | 712 |
|
713 |
-#define COMPENSATE(x) ((x + 1)>>1) |
|
713 |
+#define COMPENSATE(x) (((x) + 1)>>1) |
|
714 | 714 |
for (i = 0; i < 4; i++) { |
715 | 715 |
if (!in[0] && !in[1] && !in[2] && !in[3]) { |
716 | 716 |
memset(out, 0, 4*sizeof(out[0])); |
... | ... |
@@ -732,7 +732,7 @@ void ff_ivi_col_slant4(const int32_t *in, int16_t *out, uint32_t pitch, const ui |
732 | 732 |
|
733 | 733 |
row2 = pitch << 1; |
734 | 734 |
|
735 |
-#define COMPENSATE(x) ((x + 1)>>1) |
|
735 |
+#define COMPENSATE(x) (((x) + 1)>>1) |
|
736 | 736 |
for (i = 0; i < 4; i++) { |
737 | 737 |
if (flags[i]) { |
738 | 738 |
IVI_INV_SLANT4(in[0], in[4], in[8], in[12], |