Patch by Reimar Döffinger <latinize($name) at (MN's favourite mail provider).de>
Originally committed as revision 18677 to svn://svn.ffmpeg.org/ffmpeg/trunk
... | ... |
@@ -365,10 +365,10 @@ static inline int GET_TOK(TM2Context *ctx,int type) { |
365 | 365 |
|
366 | 366 |
/* recalculate last and delta values for next blocks */ |
367 | 367 |
#define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\ |
368 |
- CD[0] = (CHR[1] - 128) - last[1];\ |
|
368 |
+ CD[0] = CHR[1] - last[1];\ |
|
369 | 369 |
CD[1] = (int)CHR[stride + 1] - (int)CHR[1];\ |
370 |
- last[0] = (int)CHR[stride + 0] - 128;\ |
|
371 |
- last[1] = (int)CHR[stride + 1] - 128;} |
|
370 |
+ last[0] = (int)CHR[stride + 0];\ |
|
371 |
+ last[1] = (int)CHR[stride + 1];} |
|
372 | 372 |
|
373 | 373 |
/* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */ |
374 | 374 |
static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last) |
... | ... |
@@ -396,7 +396,7 @@ static inline void tm2_high_chroma(int *data, int stride, int *last, int *CD, in |
396 | 396 |
for(i = 0; i < 2; i++){ |
397 | 397 |
CD[j] += deltas[i + j * 2]; |
398 | 398 |
last[i] += CD[j]; |
399 |
- data[i] = last[i] + 128; |
|
399 |
+ data[i] = last[i]; |
|
400 | 400 |
} |
401 | 401 |
data += stride; |
402 | 402 |
} |
... | ... |
@@ -675,8 +675,8 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) |
675 | 675 |
int bw, bh; |
676 | 676 |
int type; |
677 | 677 |
int keyframe = 1; |
678 |
- uint8_t *Y, *U, *V; |
|
679 |
- int *src; |
|
678 |
+ int *Y, *U, *V; |
|
679 |
+ uint8_t *dst; |
|
680 | 680 |
|
681 | 681 |
bw = ctx->avctx->width >> 2; |
682 | 682 |
bh = ctx->avctx->height >> 2; |
... | ... |
@@ -729,29 +729,23 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) |
729 | 729 |
} |
730 | 730 |
|
731 | 731 |
/* copy data from our buffer to AVFrame */ |
732 |
- Y = p->data[0]; |
|
733 |
- src = (ctx->cur?ctx->Y2:ctx->Y1); |
|
732 |
+ Y = (ctx->cur?ctx->Y2:ctx->Y1); |
|
733 |
+ U = (ctx->cur?ctx->U2:ctx->U1); |
|
734 |
+ V = (ctx->cur?ctx->V2:ctx->V1); |
|
735 |
+ dst = p->data[0]; |
|
734 | 736 |
for(j = 0; j < ctx->avctx->height; j++){ |
735 | 737 |
for(i = 0; i < ctx->avctx->width; i++){ |
736 |
- Y[i] = av_clip_uint8(*src++); |
|
738 |
+ int y = Y[i], u = U[i >> 1], v = V[i >> 1]; |
|
739 |
+ dst[3*i+0] = av_clip_uint8(y + v); |
|
740 |
+ dst[3*i+1] = av_clip_uint8(y); |
|
741 |
+ dst[3*i+2] = av_clip_uint8(y + u); |
|
737 | 742 |
} |
738 |
- Y += p->linesize[0]; |
|
739 |
- } |
|
740 |
- U = p->data[2]; |
|
741 |
- src = (ctx->cur?ctx->U2:ctx->U1); |
|
742 |
- for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){ |
|
743 |
- for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){ |
|
744 |
- U[i] = av_clip_uint8(*src++); |
|
745 |
- } |
|
746 |
- U += p->linesize[2]; |
|
747 |
- } |
|
748 |
- V = p->data[1]; |
|
749 |
- src = (ctx->cur?ctx->V2:ctx->V1); |
|
750 |
- for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){ |
|
751 |
- for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){ |
|
752 |
- V[i] = av_clip_uint8(*src++); |
|
743 |
+ Y += ctx->avctx->width; |
|
744 |
+ if (j & 1) { |
|
745 |
+ U += ctx->avctx->width >> 1; |
|
746 |
+ V += ctx->avctx->width >> 1; |
|
753 | 747 |
} |
754 |
- V += p->linesize[1]; |
|
748 |
+ dst += p->linesize[0]; |
|
755 | 749 |
} |
756 | 750 |
|
757 | 751 |
return keyframe; |
... | ... |
@@ -829,7 +823,7 @@ static av_cold int decode_init(AVCodecContext *avctx){ |
829 | 829 |
|
830 | 830 |
l->avctx = avctx; |
831 | 831 |
l->pic.data[0]=NULL; |
832 |
- avctx->pix_fmt = PIX_FMT_YUV420P; |
|
832 |
+ avctx->pix_fmt = PIX_FMT_BGR24; |
|
833 | 833 |
|
834 | 834 |
dsputil_init(&l->dsp, avctx); |
835 | 835 |
|