Increase performance, to match mp=unsharp.
Stefano Sabatini authored on 2013/02/17 00:55:55... | ... |
@@ -105,8 +105,13 @@ static void apply_unsharp( uint8_t *dst, int dst_stride, |
105 | 105 |
int32_t res; |
106 | 106 |
int x, y, z; |
107 | 107 |
const uint8_t *src2 = NULL; //silence a warning |
108 |
+ const int amount = fp->amount; |
|
109 |
+ const int steps_x = fp->steps_x; |
|
110 |
+ const int steps_y = fp->steps_y; |
|
111 |
+ const int scalebits = fp->scalebits; |
|
112 |
+ const int32_t halfscale = fp->halfscale; |
|
108 | 113 |
|
109 |
- if (!fp->amount) { |
|
114 |
+ if (!amount) { |
|
110 | 115 |
if (dst_stride == src_stride) |
111 | 116 |
memcpy(dst, src, src_stride * height); |
112 | 117 |
else |
... | ... |
@@ -115,29 +120,29 @@ static void apply_unsharp( uint8_t *dst, int dst_stride, |
115 | 115 |
return; |
116 | 116 |
} |
117 | 117 |
|
118 |
- for (y = 0; y < 2 * fp->steps_y; y++) |
|
119 |
- memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * fp->steps_x)); |
|
118 |
+ for (y = 0; y < 2 * steps_y; y++) |
|
119 |
+ memset(sc[y], 0, sizeof(sc[y][0]) * (width + 2 * steps_x)); |
|
120 | 120 |
|
121 |
- for (y = -fp->steps_y; y < height + fp->steps_y; y++) { |
|
121 |
+ for (y = -steps_y; y < height + steps_y; y++) { |
|
122 | 122 |
if (y < height) |
123 | 123 |
src2 = src; |
124 | 124 |
|
125 |
- memset(sr, 0, sizeof(sr[0]) * (2 * fp->steps_x - 1)); |
|
126 |
- for (x = -fp->steps_x; x < width + fp->steps_x; x++) { |
|
125 |
+ memset(sr, 0, sizeof(sr[0]) * (2 * steps_x - 1)); |
|
126 |
+ for (x = -steps_x; x < width + steps_x; x++) { |
|
127 | 127 |
tmp1 = x <= 0 ? src2[0] : x >= width ? src2[width-1] : src2[x]; |
128 |
- for (z = 0; z < fp->steps_x * 2; z += 2) { |
|
128 |
+ for (z = 0; z < steps_x * 2; z += 2) { |
|
129 | 129 |
tmp2 = sr[z + 0] + tmp1; sr[z + 0] = tmp1; |
130 | 130 |
tmp1 = sr[z + 1] + tmp2; sr[z + 1] = tmp2; |
131 | 131 |
} |
132 |
- for (z = 0; z < fp->steps_y * 2; z += 2) { |
|
133 |
- tmp2 = sc[z + 0][x + fp->steps_x] + tmp1; sc[z + 0][x + fp->steps_x] = tmp1; |
|
134 |
- tmp1 = sc[z + 1][x + fp->steps_x] + tmp2; sc[z + 1][x + fp->steps_x] = tmp2; |
|
132 |
+ for (z = 0; z < steps_y * 2; z += 2) { |
|
133 |
+ tmp2 = sc[z + 0][x + steps_x] + tmp1; sc[z + 0][x + steps_x] = tmp1; |
|
134 |
+ tmp1 = sc[z + 1][x + steps_x] + tmp2; sc[z + 1][x + steps_x] = tmp2; |
|
135 | 135 |
} |
136 |
- if (x >= fp->steps_x && y >= fp->steps_y) { |
|
137 |
- const uint8_t *srx = src - fp->steps_y * src_stride + x - fp->steps_x; |
|
138 |
- uint8_t *dsx = dst - fp->steps_y * dst_stride + x - fp->steps_x; |
|
136 |
+ if (x >= steps_x && y >= steps_y) { |
|
137 |
+ const uint8_t *srx = src - steps_y * src_stride + x - steps_x; |
|
138 |
+ uint8_t *dsx = dst - steps_y * dst_stride + x - steps_x; |
|
139 | 139 |
|
140 |
- res = (int32_t)*srx + ((((int32_t) * srx - (int32_t)((tmp1 + fp->halfscale) >> fp->scalebits)) * fp->amount) >> 16); |
|
140 |
+ res = (int32_t)*srx + ((((int32_t) * srx - (int32_t)((tmp1 + halfscale) >> scalebits)) * amount) >> 16); |
|
141 | 141 |
*dsx = av_clip_uint8(res); |
142 | 142 |
} |
143 | 143 |
} |