1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "./vpx_config.h"
12
13 #include "vpx_mem/vpx_mem.h"
14 #include "vpx_ports/vpx_once.h"
15
16 #include "./vp9_rtcd.h"
17
18 #include "vp9/common/vp9_reconintra.h"
19 #include "vp9/common/vp9_onyxc_int.h"
20
21 const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES] = {
22 DCT_DCT, // DC
23 ADST_DCT, // V
24 DCT_ADST, // H
25 DCT_DCT, // D45
26 ADST_ADST, // D135
27 ADST_DCT, // D117
28 DCT_ADST, // D153
29 DCT_ADST, // D207
30 ADST_DCT, // D63
31 ADST_ADST, // TM
32 };
33
34 // This serves as a wrapper function, so that all the prediction functions
35 // can be unified and accessed as a pointer array. Note that the boundary
36 // above and left are not necessarily used all the time.
37 #define intra_pred_sized(type, size) \
38 void vp9_##type##_predictor_##size##x##size##_c(uint8_t *dst, \
39 ptrdiff_t stride, \
40 const uint8_t *above, \
41 const uint8_t *left) { \
42 type##_predictor(dst, stride, size, above, left); \
43 }
44
45 #define intra_pred_allsizes(type) \
46 intra_pred_sized(type, 4) \
47 intra_pred_sized(type, 8) \
48 intra_pred_sized(type, 16) \
49 intra_pred_sized(type, 32)
50
d207_predictor(uint8_t * dst,ptrdiff_t stride,int bs,const uint8_t * above,const uint8_t * left)51 static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
52 const uint8_t *above, const uint8_t *left) {
53 int r, c;
54 (void) above;
55 // first column
56 for (r = 0; r < bs - 1; ++r)
57 dst[r * stride] = ROUND_POWER_OF_TWO(left[r] + left[r + 1], 1);
58 dst[(bs - 1) * stride] = left[bs - 1];
59 dst++;
60
61 // second column
62 for (r = 0; r < bs - 2; ++r)
63 dst[r * stride] = ROUND_POWER_OF_TWO(left[r] + left[r + 1] * 2 +
64 left[r + 2], 2);
65 dst[(bs - 2) * stride] = ROUND_POWER_OF_TWO(left[bs - 2] +
66 left[bs - 1] * 3, 2);
67 dst[(bs - 1) * stride] = left[bs - 1];
68 dst++;
69
70 // rest of last row
71 for (c = 0; c < bs - 2; ++c)
72 dst[(bs - 1) * stride + c] = left[bs - 1];
73
74 for (r = bs - 2; r >= 0; --r)
75 for (c = 0; c < bs - 2; ++c)
76 dst[r * stride + c] = dst[(r + 1) * stride + c - 2];
77 }
intra_pred_allsizes(d207)78 intra_pred_allsizes(d207)
79
80 static INLINE void d63_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
81 const uint8_t *above, const uint8_t *left) {
82 int r, c;
83 (void) left;
84 for (r = 0; r < bs; ++r) {
85 for (c = 0; c < bs; ++c)
86 dst[c] = r & 1 ? ROUND_POWER_OF_TWO(above[r/2 + c] +
87 above[r/2 + c + 1] * 2 +
88 above[r/2 + c + 2], 2)
89 : ROUND_POWER_OF_TWO(above[r/2 + c] +
90 above[r/2 + c + 1], 1);
91 dst += stride;
92 }
93 }
intra_pred_allsizes(d63)94 intra_pred_allsizes(d63)
95
96 static INLINE void d45_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
97 const uint8_t *above, const uint8_t *left) {
98 int r, c;
99 (void) left;
100 for (r = 0; r < bs; ++r) {
101 for (c = 0; c < bs; ++c)
102 dst[c] = r + c + 2 < bs * 2 ? ROUND_POWER_OF_TWO(above[r + c] +
103 above[r + c + 1] * 2 +
104 above[r + c + 2], 2)
105 : above[bs * 2 - 1];
106 dst += stride;
107 }
108 }
intra_pred_allsizes(d45)109 intra_pred_allsizes(d45)
110
111 static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
112 const uint8_t *above, const uint8_t *left) {
113 int r, c;
114
115 // first row
116 for (c = 0; c < bs; c++)
117 dst[c] = ROUND_POWER_OF_TWO(above[c - 1] + above[c], 1);
118 dst += stride;
119
120 // second row
121 dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
122 for (c = 1; c < bs; c++)
123 dst[c] = ROUND_POWER_OF_TWO(above[c - 2] + above[c - 1] * 2 + above[c], 2);
124 dst += stride;
125
126 // the rest of first col
127 dst[0] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
128 for (r = 3; r < bs; ++r)
129 dst[(r - 2) * stride] = ROUND_POWER_OF_TWO(left[r - 3] + left[r - 2] * 2 +
130 left[r - 1], 2);
131
132 // the rest of the block
133 for (r = 2; r < bs; ++r) {
134 for (c = 1; c < bs; c++)
135 dst[c] = dst[-2 * stride + c - 1];
136 dst += stride;
137 }
138 }
intra_pred_allsizes(d117)139 intra_pred_allsizes(d117)
140
141 static INLINE void d135_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
142 const uint8_t *above, const uint8_t *left) {
143 int r, c;
144 dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
145 for (c = 1; c < bs; c++)
146 dst[c] = ROUND_POWER_OF_TWO(above[c - 2] + above[c - 1] * 2 + above[c], 2);
147
148 dst[stride] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
149 for (r = 2; r < bs; ++r)
150 dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 2] + left[r - 1] * 2 +
151 left[r], 2);
152
153 dst += stride;
154 for (r = 1; r < bs; ++r) {
155 for (c = 1; c < bs; c++)
156 dst[c] = dst[-stride + c - 1];
157 dst += stride;
158 }
159 }
intra_pred_allsizes(d135)160 intra_pred_allsizes(d135)
161
162 static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
163 const uint8_t *above, const uint8_t *left) {
164 int r, c;
165 dst[0] = ROUND_POWER_OF_TWO(above[-1] + left[0], 1);
166 for (r = 1; r < bs; r++)
167 dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 1] + left[r], 1);
168 dst++;
169
170 dst[0] = ROUND_POWER_OF_TWO(left[0] + above[-1] * 2 + above[0], 2);
171 dst[stride] = ROUND_POWER_OF_TWO(above[-1] + left[0] * 2 + left[1], 2);
172 for (r = 2; r < bs; r++)
173 dst[r * stride] = ROUND_POWER_OF_TWO(left[r - 2] + left[r - 1] * 2 +
174 left[r], 2);
175 dst++;
176
177 for (c = 0; c < bs - 2; c++)
178 dst[c] = ROUND_POWER_OF_TWO(above[c - 1] + above[c] * 2 + above[c + 1], 2);
179 dst += stride;
180
181 for (r = 1; r < bs; ++r) {
182 for (c = 0; c < bs - 2; c++)
183 dst[c] = dst[-stride + c - 2];
184 dst += stride;
185 }
186 }
intra_pred_allsizes(d153)187 intra_pred_allsizes(d153)
188
189 static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
190 const uint8_t *above, const uint8_t *left) {
191 int r;
192 (void) left;
193
194 for (r = 0; r < bs; r++) {
195 vpx_memcpy(dst, above, bs);
196 dst += stride;
197 }
198 }
intra_pred_allsizes(v)199 intra_pred_allsizes(v)
200
201 static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
202 const uint8_t *above, const uint8_t *left) {
203 int r;
204 (void) above;
205
206 for (r = 0; r < bs; r++) {
207 vpx_memset(dst, left[r], bs);
208 dst += stride;
209 }
210 }
intra_pred_allsizes(h)211 intra_pred_allsizes(h)
212
213 static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
214 const uint8_t *above, const uint8_t *left) {
215 int r, c;
216 int ytop_left = above[-1];
217
218 for (r = 0; r < bs; r++) {
219 for (c = 0; c < bs; c++)
220 dst[c] = clip_pixel(left[r] + above[c] - ytop_left);
221 dst += stride;
222 }
223 }
intra_pred_allsizes(tm)224 intra_pred_allsizes(tm)
225
226 static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
227 const uint8_t *above, const uint8_t *left) {
228 int r;
229 (void) above;
230 (void) left;
231
232 for (r = 0; r < bs; r++) {
233 vpx_memset(dst, 128, bs);
234 dst += stride;
235 }
236 }
intra_pred_allsizes(dc_128)237 intra_pred_allsizes(dc_128)
238
239 static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
240 const uint8_t *above,
241 const uint8_t *left) {
242 int i, r, expected_dc, sum = 0;
243 (void) above;
244
245 for (i = 0; i < bs; i++)
246 sum += left[i];
247 expected_dc = (sum + (bs >> 1)) / bs;
248
249 for (r = 0; r < bs; r++) {
250 vpx_memset(dst, expected_dc, bs);
251 dst += stride;
252 }
253 }
intra_pred_allsizes(dc_left)254 intra_pred_allsizes(dc_left)
255
256 static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
257 const uint8_t *above, const uint8_t *left) {
258 int i, r, expected_dc, sum = 0;
259 (void) left;
260
261 for (i = 0; i < bs; i++)
262 sum += above[i];
263 expected_dc = (sum + (bs >> 1)) / bs;
264
265 for (r = 0; r < bs; r++) {
266 vpx_memset(dst, expected_dc, bs);
267 dst += stride;
268 }
269 }
intra_pred_allsizes(dc_top)270 intra_pred_allsizes(dc_top)
271
272 static INLINE void dc_predictor(uint8_t *dst, ptrdiff_t stride, int bs,
273 const uint8_t *above, const uint8_t *left) {
274 int i, r, expected_dc, sum = 0;
275 const int count = 2 * bs;
276
277 for (i = 0; i < bs; i++) {
278 sum += above[i];
279 sum += left[i];
280 }
281
282 expected_dc = (sum + (count >> 1)) / count;
283
284 for (r = 0; r < bs; r++) {
285 vpx_memset(dst, expected_dc, bs);
286 dst += stride;
287 }
288 }
289 intra_pred_allsizes(dc)
290 #undef intra_pred_allsizes
291
292 typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
293 const uint8_t *above, const uint8_t *left);
294
295 static intra_pred_fn pred[INTRA_MODES][4];
296 static intra_pred_fn dc_pred[2][2][4];
297
init_intra_pred_fn_ptrs(void)298 static void init_intra_pred_fn_ptrs(void) {
299 #define intra_pred_allsizes(l, type) \
300 l[0] = vp9_##type##_predictor_4x4; \
301 l[1] = vp9_##type##_predictor_8x8; \
302 l[2] = vp9_##type##_predictor_16x16; \
303 l[3] = vp9_##type##_predictor_32x32
304
305 intra_pred_allsizes(pred[V_PRED], v);
306 intra_pred_allsizes(pred[H_PRED], h);
307 intra_pred_allsizes(pred[D207_PRED], d207);
308 intra_pred_allsizes(pred[D45_PRED], d45);
309 intra_pred_allsizes(pred[D63_PRED], d63);
310 intra_pred_allsizes(pred[D117_PRED], d117);
311 intra_pred_allsizes(pred[D135_PRED], d135);
312 intra_pred_allsizes(pred[D153_PRED], d153);
313 intra_pred_allsizes(pred[TM_PRED], tm);
314
315 intra_pred_allsizes(dc_pred[0][0], dc_128);
316 intra_pred_allsizes(dc_pred[0][1], dc_top);
317 intra_pred_allsizes(dc_pred[1][0], dc_left);
318 intra_pred_allsizes(dc_pred[1][1], dc);
319
320 #undef intra_pred_allsizes
321 }
322
build_intra_predictors(const MACROBLOCKD * xd,const uint8_t * ref,int ref_stride,uint8_t * dst,int dst_stride,PREDICTION_MODE mode,TX_SIZE tx_size,int up_available,int left_available,int right_available,int x,int y,int plane)323 static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
324 int ref_stride, uint8_t *dst, int dst_stride,
325 PREDICTION_MODE mode, TX_SIZE tx_size,
326 int up_available, int left_available,
327 int right_available, int x, int y,
328 int plane) {
329 int i;
330 DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64);
331 DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16);
332 uint8_t *above_row = above_data + 16;
333 const uint8_t *const_above_row = above_row;
334 const int bs = 4 << tx_size;
335 int frame_width, frame_height;
336 int x0, y0;
337 const struct macroblockd_plane *const pd = &xd->plane[plane];
338
339 // 127 127 127 .. 127 127 127 127 127 127
340 // 129 A B .. Y Z
341 // 129 C D .. W X
342 // 129 E F .. U V
343 // 129 G H .. S T T T T T
344 // ..
345
346 once(init_intra_pred_fn_ptrs);
347
348 // Get current frame pointer, width and height.
349 if (plane == 0) {
350 frame_width = xd->cur_buf->y_width;
351 frame_height = xd->cur_buf->y_height;
352 } else {
353 frame_width = xd->cur_buf->uv_width;
354 frame_height = xd->cur_buf->uv_height;
355 }
356
357 // Get block position in current frame.
358 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
359 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
360
361 vpx_memset(left_col, 129, 64);
362
363 // left
364 if (left_available) {
365 if (xd->mb_to_bottom_edge < 0) {
366 /* slower path if the block needs border extension */
367 if (y0 + bs <= frame_height) {
368 for (i = 0; i < bs; ++i)
369 left_col[i] = ref[i * ref_stride - 1];
370 } else {
371 const int extend_bottom = frame_height - y0;
372 for (i = 0; i < extend_bottom; ++i)
373 left_col[i] = ref[i * ref_stride - 1];
374 for (; i < bs; ++i)
375 left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
376 }
377 } else {
378 /* faster path if the block does not need extension */
379 for (i = 0; i < bs; ++i)
380 left_col[i] = ref[i * ref_stride - 1];
381 }
382 }
383
384 // TODO(hkuang) do not extend 2*bs pixels for all modes.
385 // above
386 if (up_available) {
387 const uint8_t *above_ref = ref - ref_stride;
388 if (xd->mb_to_right_edge < 0) {
389 /* slower path if the block needs border extension */
390 if (x0 + 2 * bs <= frame_width) {
391 if (right_available && bs == 4) {
392 vpx_memcpy(above_row, above_ref, 2 * bs);
393 } else {
394 vpx_memcpy(above_row, above_ref, bs);
395 vpx_memset(above_row + bs, above_row[bs - 1], bs);
396 }
397 } else if (x0 + bs <= frame_width) {
398 const int r = frame_width - x0;
399 if (right_available && bs == 4) {
400 vpx_memcpy(above_row, above_ref, r);
401 vpx_memset(above_row + r, above_row[r - 1],
402 x0 + 2 * bs - frame_width);
403 } else {
404 vpx_memcpy(above_row, above_ref, bs);
405 vpx_memset(above_row + bs, above_row[bs - 1], bs);
406 }
407 } else if (x0 <= frame_width) {
408 const int r = frame_width - x0;
409 if (right_available && bs == 4) {
410 vpx_memcpy(above_row, above_ref, r);
411 vpx_memset(above_row + r, above_row[r - 1],
412 x0 + 2 * bs - frame_width);
413 } else {
414 vpx_memcpy(above_row, above_ref, r);
415 vpx_memset(above_row + r, above_row[r - 1],
416 x0 + 2 * bs - frame_width);
417 }
418 }
419 above_row[-1] = left_available ? above_ref[-1] : 129;
420 } else {
421 /* faster path if the block does not need extension */
422 if (bs == 4 && right_available && left_available) {
423 const_above_row = above_ref;
424 } else {
425 vpx_memcpy(above_row, above_ref, bs);
426 if (bs == 4 && right_available)
427 vpx_memcpy(above_row + bs, above_ref + bs, bs);
428 else
429 vpx_memset(above_row + bs, above_row[bs - 1], bs);
430 above_row[-1] = left_available ? above_ref[-1] : 129;
431 }
432 }
433 } else {
434 vpx_memset(above_row, 127, bs * 2);
435 above_row[-1] = 127;
436 }
437
438 // predict
439 if (mode == DC_PRED) {
440 dc_pred[left_available][up_available][tx_size](dst, dst_stride,
441 const_above_row, left_col);
442 } else {
443 pred[mode][tx_size](dst, dst_stride, const_above_row, left_col);
444 }
445 }
446
vp9_predict_intra_block(const MACROBLOCKD * xd,int block_idx,int bwl_in,TX_SIZE tx_size,PREDICTION_MODE mode,const uint8_t * ref,int ref_stride,uint8_t * dst,int dst_stride,int aoff,int loff,int plane)447 void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in,
448 TX_SIZE tx_size, PREDICTION_MODE mode,
449 const uint8_t *ref, int ref_stride,
450 uint8_t *dst, int dst_stride,
451 int aoff, int loff, int plane) {
452 const int bwl = bwl_in - tx_size;
453 const int wmask = (1 << bwl) - 1;
454 const int have_top = (block_idx >> bwl) || xd->up_available;
455 const int have_left = (block_idx & wmask) || xd->left_available;
456 const int have_right = ((block_idx & wmask) != wmask);
457 const int x = aoff * 4;
458 const int y = loff * 4;
459
460 assert(bwl >= 0);
461 build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
462 have_top, have_left, have_right, x, y, plane);
463 }
464