1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 
12 #include "./vp9_rtcd.h"
13 #include "./vpx_config.h"
14 
15 #include "vpx_mem/vpx_mem.h"
16 
17 #include "vp9/common/vp9_idct.h"
18 #include "vp9/common/vp9_reconinter.h"
19 #include "vp9/common/vp9_reconintra.h"
20 #include "vp9/common/vp9_systemdependent.h"
21 
22 #include "vp9/encoder/vp9_encodemb.h"
23 #include "vp9/encoder/vp9_quantize.h"
24 #include "vp9/encoder/vp9_rd.h"
25 #include "vp9/encoder/vp9_tokenize.h"
26 
27 struct optimize_ctx {
28   ENTROPY_CONTEXT ta[MAX_MB_PLANE][16];
29   ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
30 };
31 
32 struct encode_b_args {
33   MACROBLOCK *x;
34   struct optimize_ctx *ctx;
35   int8_t *skip;
36 };
37 
vp9_subtract_block_c(int rows,int cols,int16_t * diff,ptrdiff_t diff_stride,const uint8_t * src,ptrdiff_t src_stride,const uint8_t * pred,ptrdiff_t pred_stride)38 void vp9_subtract_block_c(int rows, int cols,
39                           int16_t *diff, ptrdiff_t diff_stride,
40                           const uint8_t *src, ptrdiff_t src_stride,
41                           const uint8_t *pred, ptrdiff_t pred_stride) {
42   int r, c;
43 
44   for (r = 0; r < rows; r++) {
45     for (c = 0; c < cols; c++)
46       diff[c] = src[c] - pred[c];
47 
48     diff += diff_stride;
49     pred += pred_stride;
50     src  += src_stride;
51   }
52 }
53 
vp9_subtract_plane(MACROBLOCK * x,BLOCK_SIZE bsize,int plane)54 void vp9_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
55   struct macroblock_plane *const p = &x->plane[plane];
56   const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
57   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
58   const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
59   const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
60 
61   vp9_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
62                      pd->dst.buf, pd->dst.stride);
63 }
64 
65 #define RDTRUNC(RM, DM, R, D) ((128 + (R) * (RM)) & 0xFF)
66 
67 typedef struct vp9_token_state {
68   int           rate;
69   int           error;
70   int           next;
71   signed char   token;
72   short         qc;
73 } vp9_token_state;
74 
75 // TODO(jimbankoski): experiment to find optimal RD numbers.
76 static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 };
77 
78 #define UPDATE_RD_COST()\
79 {\
80   rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\
81   rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\
82   if (rd_cost0 == rd_cost1) {\
83     rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\
84     rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\
85   }\
86 }
87 
88 // This function is a place holder for now but may ultimately need
89 // to scan previous tokens to work out the correct context.
trellis_get_coeff_context(const int16_t * scan,const int16_t * nb,int idx,int token,uint8_t * token_cache)90 static int trellis_get_coeff_context(const int16_t *scan,
91                                      const int16_t *nb,
92                                      int idx, int token,
93                                      uint8_t *token_cache) {
94   int bak = token_cache[scan[idx]], pt;
95   token_cache[scan[idx]] = vp9_pt_energy_class[token];
96   pt = get_coef_context(nb, token_cache, idx + 1);
97   token_cache[scan[idx]] = bak;
98   return pt;
99 }
100 
optimize_b(MACROBLOCK * mb,int plane,int block,TX_SIZE tx_size,int ctx)101 static int optimize_b(MACROBLOCK *mb, int plane, int block,
102                       TX_SIZE tx_size, int ctx) {
103   MACROBLOCKD *const xd = &mb->e_mbd;
104   struct macroblock_plane *const p = &mb->plane[plane];
105   struct macroblockd_plane *const pd = &xd->plane[plane];
106   const int ref = is_inter_block(&xd->mi[0]->mbmi);
107   vp9_token_state tokens[1025][2];
108   unsigned best_index[1025][2];
109   uint8_t token_cache[1024];
110   const int16_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
111   int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
112   int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
113   const int eob = p->eobs[block];
114   const PLANE_TYPE type = pd->plane_type;
115   const int default_eob = 16 << (tx_size << 1);
116   const int mul = 1 + (tx_size == TX_32X32);
117   const int16_t *dequant_ptr = pd->dequant;
118   const uint8_t *const band_translate = get_band_translate(tx_size);
119   const scan_order *const so = get_scan(xd, tx_size, type, block);
120   const int16_t *const scan = so->scan;
121   const int16_t *const nb = so->neighbors;
122   int next = eob, sz = 0;
123   int64_t rdmult = mb->rdmult * plane_rd_mult[type], rddiv = mb->rddiv;
124   int64_t rd_cost0, rd_cost1;
125   int rate0, rate1, error0, error1, t0, t1;
126   int best, band, pt, i, final_eob;
127 
128   assert((!type && !plane) || (type && plane));
129   assert(eob <= default_eob);
130 
131   /* Now set up a Viterbi trellis to evaluate alternative roundings. */
132   if (!ref)
133     rdmult = (rdmult * 9) >> 4;
134 
135   /* Initialize the sentinel node of the trellis. */
136   tokens[eob][0].rate = 0;
137   tokens[eob][0].error = 0;
138   tokens[eob][0].next = default_eob;
139   tokens[eob][0].token = EOB_TOKEN;
140   tokens[eob][0].qc = 0;
141   tokens[eob][1] = tokens[eob][0];
142 
143   for (i = 0; i < eob; i++)
144     token_cache[scan[i]] =
145         vp9_pt_energy_class[vp9_dct_value_tokens_ptr[qcoeff[scan[i]]].token];
146 
147   for (i = eob; i-- > 0;) {
148     int base_bits, d2, dx;
149     const int rc = scan[i];
150     int x = qcoeff[rc];
151     /* Only add a trellis state for non-zero coefficients. */
152     if (x) {
153       int shortcut = 0;
154       error0 = tokens[next][0].error;
155       error1 = tokens[next][1].error;
156       /* Evaluate the first possibility for this state. */
157       rate0 = tokens[next][0].rate;
158       rate1 = tokens[next][1].rate;
159       t0 = (vp9_dct_value_tokens_ptr + x)->token;
160       /* Consider both possible successor states. */
161       if (next < default_eob) {
162         band = band_translate[i + 1];
163         pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
164         rate0 += mb->token_costs[tx_size][type][ref][band][0][pt]
165                                 [tokens[next][0].token];
166         rate1 += mb->token_costs[tx_size][type][ref][band][0][pt]
167                                 [tokens[next][1].token];
168       }
169       UPDATE_RD_COST();
170       /* And pick the best. */
171       best = rd_cost1 < rd_cost0;
172       base_bits = vp9_dct_value_cost_ptr[x];
173       dx = mul * (dqcoeff[rc] - coeff[rc]);
174       d2 = dx * dx;
175       tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
176       tokens[i][0].error = d2 + (best ? error1 : error0);
177       tokens[i][0].next = next;
178       tokens[i][0].token = t0;
179       tokens[i][0].qc = x;
180       best_index[i][0] = best;
181 
182       /* Evaluate the second possibility for this state. */
183       rate0 = tokens[next][0].rate;
184       rate1 = tokens[next][1].rate;
185 
186       if ((abs(x) * dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) &&
187           (abs(x) * dequant_ptr[rc != 0] < abs(coeff[rc]) * mul +
188                                                dequant_ptr[rc != 0]))
189         shortcut = 1;
190       else
191         shortcut = 0;
192 
193       if (shortcut) {
194         sz = -(x < 0);
195         x -= 2 * sz + 1;
196       }
197 
198       /* Consider both possible successor states. */
199       if (!x) {
200         /* If we reduced this coefficient to zero, check to see if
201          *  we need to move the EOB back here.
202          */
203         t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
204         t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
205       } else {
206         t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token;
207       }
208       if (next < default_eob) {
209         band = band_translate[i + 1];
210         if (t0 != EOB_TOKEN) {
211           pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
212           rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt]
213                                   [tokens[next][0].token];
214         }
215         if (t1 != EOB_TOKEN) {
216           pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache);
217           rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt]
218                                   [tokens[next][1].token];
219         }
220       }
221 
222       UPDATE_RD_COST();
223       /* And pick the best. */
224       best = rd_cost1 < rd_cost0;
225       base_bits = vp9_dct_value_cost_ptr[x];
226 
227       if (shortcut) {
228         dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
229         d2 = dx * dx;
230       }
231       tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
232       tokens[i][1].error = d2 + (best ? error1 : error0);
233       tokens[i][1].next = next;
234       tokens[i][1].token = best ? t1 : t0;
235       tokens[i][1].qc = x;
236       best_index[i][1] = best;
237       /* Finally, make this the new head of the trellis. */
238       next = i;
239     } else {
240       /* There's no choice to make for a zero coefficient, so we don't
241        *  add a new trellis node, but we do need to update the costs.
242        */
243       band = band_translate[i + 1];
244       t0 = tokens[next][0].token;
245       t1 = tokens[next][1].token;
246       /* Update the cost of each path if we're past the EOB token. */
247       if (t0 != EOB_TOKEN) {
248         tokens[next][0].rate +=
249             mb->token_costs[tx_size][type][ref][band][1][0][t0];
250         tokens[next][0].token = ZERO_TOKEN;
251       }
252       if (t1 != EOB_TOKEN) {
253         tokens[next][1].rate +=
254             mb->token_costs[tx_size][type][ref][band][1][0][t1];
255         tokens[next][1].token = ZERO_TOKEN;
256       }
257       best_index[i][0] = best_index[i][1] = 0;
258       /* Don't update next, because we didn't add a new node. */
259     }
260   }
261 
262   /* Now pick the best path through the whole trellis. */
263   band = band_translate[i + 1];
264   rate0 = tokens[next][0].rate;
265   rate1 = tokens[next][1].rate;
266   error0 = tokens[next][0].error;
267   error1 = tokens[next][1].error;
268   t0 = tokens[next][0].token;
269   t1 = tokens[next][1].token;
270   rate0 += mb->token_costs[tx_size][type][ref][band][0][ctx][t0];
271   rate1 += mb->token_costs[tx_size][type][ref][band][0][ctx][t1];
272   UPDATE_RD_COST();
273   best = rd_cost1 < rd_cost0;
274   final_eob = -1;
275   vpx_memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2)));
276   vpx_memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2)));
277   for (i = next; i < eob; i = next) {
278     const int x = tokens[i][best].qc;
279     const int rc = scan[i];
280     if (x) {
281       final_eob = i;
282     }
283 
284     qcoeff[rc] = x;
285     dqcoeff[rc] = (x * dequant_ptr[rc != 0]) / mul;
286 
287     next = tokens[i][best].next;
288     best = best_index[i][best];
289   }
290   final_eob++;
291 
292   mb->plane[plane].eobs[block] = final_eob;
293   return final_eob;
294 }
295 
fdct32x32(int rd_transform,const int16_t * src,int16_t * dst,int src_stride)296 static INLINE void fdct32x32(int rd_transform,
297                              const int16_t *src, int16_t *dst, int src_stride) {
298   if (rd_transform)
299     vp9_fdct32x32_rd(src, dst, src_stride);
300   else
301     vp9_fdct32x32(src, dst, src_stride);
302 }
303 
vp9_xform_quant_fp(MACROBLOCK * x,int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size)304 void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block,
305                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
306   MACROBLOCKD *const xd = &x->e_mbd;
307   const struct macroblock_plane *const p = &x->plane[plane];
308   const struct macroblockd_plane *const pd = &xd->plane[plane];
309   const scan_order *const scan_order = &vp9_default_scan_orders[tx_size];
310   int16_t *const coeff = BLOCK_OFFSET(p->coeff, block);
311   int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
312   int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
313   uint16_t *const eob = &p->eobs[block];
314   const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
315   int i, j;
316   const int16_t *src_diff;
317   txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
318   src_diff = &p->src_diff[4 * (j * diff_stride + i)];
319 
320   switch (tx_size) {
321     case TX_32X32:
322       fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
323       vp9_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
324                             p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
325                             pd->dequant, p->zbin_extra, eob, scan_order->scan,
326                             scan_order->iscan);
327       break;
328     case TX_16X16:
329       vp9_fdct16x16(src_diff, coeff, diff_stride);
330       vp9_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
331                       p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
332                       pd->dequant, p->zbin_extra, eob,
333                       scan_order->scan, scan_order->iscan);
334       break;
335     case TX_8X8:
336       vp9_fdct8x8(src_diff, coeff, diff_stride);
337       vp9_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
338                       p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
339                       pd->dequant, p->zbin_extra, eob,
340                       scan_order->scan, scan_order->iscan);
341       break;
342     case TX_4X4:
343       x->fwd_txm4x4(src_diff, coeff, diff_stride);
344       vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
345                       p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
346                       pd->dequant, p->zbin_extra, eob,
347                       scan_order->scan, scan_order->iscan);
348       break;
349     default:
350       assert(0);
351       break;
352   }
353 }
354 
vp9_xform_quant_dc(MACROBLOCK * x,int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size)355 void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block,
356                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
357   MACROBLOCKD *const xd = &x->e_mbd;
358   const struct macroblock_plane *const p = &x->plane[plane];
359   const struct macroblockd_plane *const pd = &xd->plane[plane];
360   int16_t *const coeff = BLOCK_OFFSET(p->coeff, block);
361   int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
362   int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
363   uint16_t *const eob = &p->eobs[block];
364   const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
365   int i, j;
366   const int16_t *src_diff;
367 
368   txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
369   src_diff = &p->src_diff[4 * (j * diff_stride + i)];
370 
371   switch (tx_size) {
372     case TX_32X32:
373       vp9_fdct32x32_1(src_diff, coeff, diff_stride);
374       vp9_quantize_dc_32x32(coeff, x->skip_block, p->round,
375                             p->quant_fp[0], qcoeff, dqcoeff,
376                             pd->dequant[0], eob);
377       break;
378     case TX_16X16:
379       vp9_fdct16x16_1(src_diff, coeff, diff_stride);
380       vp9_quantize_dc(coeff, x->skip_block, p->round,
381                      p->quant_fp[0], qcoeff, dqcoeff,
382                      pd->dequant[0], eob);
383       break;
384     case TX_8X8:
385       vp9_fdct8x8_1(src_diff, coeff, diff_stride);
386       vp9_quantize_dc(coeff, x->skip_block, p->round,
387                       p->quant_fp[0], qcoeff, dqcoeff,
388                       pd->dequant[0], eob);
389       break;
390     case TX_4X4:
391       x->fwd_txm4x4(src_diff, coeff, diff_stride);
392       vp9_quantize_dc(coeff, x->skip_block, p->round,
393                       p->quant_fp[0], qcoeff, dqcoeff,
394                       pd->dequant[0], eob);
395       break;
396     default:
397       assert(0);
398       break;
399   }
400 }
401 
vp9_xform_quant(MACROBLOCK * x,int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size)402 void vp9_xform_quant(MACROBLOCK *x, int plane, int block,
403                      BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
404   MACROBLOCKD *const xd = &x->e_mbd;
405   const struct macroblock_plane *const p = &x->plane[plane];
406   const struct macroblockd_plane *const pd = &xd->plane[plane];
407   const scan_order *const scan_order = &vp9_default_scan_orders[tx_size];
408   int16_t *const coeff = BLOCK_OFFSET(p->coeff, block);
409   int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
410   int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
411   uint16_t *const eob = &p->eobs[block];
412   const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
413   int i, j;
414   const int16_t *src_diff;
415   txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
416   src_diff = &p->src_diff[4 * (j * diff_stride + i)];
417 
418   switch (tx_size) {
419     case TX_32X32:
420       fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
421       vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
422                            p->quant, p->quant_shift, qcoeff, dqcoeff,
423                            pd->dequant, p->zbin_extra, eob, scan_order->scan,
424                            scan_order->iscan);
425       break;
426     case TX_16X16:
427       vp9_fdct16x16(src_diff, coeff, diff_stride);
428       vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
429                      p->quant, p->quant_shift, qcoeff, dqcoeff,
430                      pd->dequant, p->zbin_extra, eob,
431                      scan_order->scan, scan_order->iscan);
432       break;
433     case TX_8X8:
434       vp9_fdct8x8(src_diff, coeff, diff_stride);
435       vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
436                      p->quant, p->quant_shift, qcoeff, dqcoeff,
437                      pd->dequant, p->zbin_extra, eob,
438                      scan_order->scan, scan_order->iscan);
439       break;
440     case TX_4X4:
441       x->fwd_txm4x4(src_diff, coeff, diff_stride);
442       vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
443                      p->quant, p->quant_shift, qcoeff, dqcoeff,
444                      pd->dequant, p->zbin_extra, eob,
445                      scan_order->scan, scan_order->iscan);
446       break;
447     default:
448       assert(0);
449       break;
450   }
451 }
452 
encode_block(int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,void * arg)453 static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize,
454                          TX_SIZE tx_size, void *arg) {
455   struct encode_b_args *const args = arg;
456   MACROBLOCK *const x = args->x;
457   MACROBLOCKD *const xd = &x->e_mbd;
458   struct optimize_ctx *const ctx = args->ctx;
459   struct macroblock_plane *const p = &x->plane[plane];
460   struct macroblockd_plane *const pd = &xd->plane[plane];
461   int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
462   int i, j;
463   uint8_t *dst;
464   ENTROPY_CONTEXT *a, *l;
465   txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
466   dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i];
467   a = &ctx->ta[plane][i];
468   l = &ctx->tl[plane][j];
469 
470   // TODO(jingning): per transformed block zero forcing only enabled for
471   // luma component. will integrate chroma components as well.
472   if (x->zcoeff_blk[tx_size][block] && plane == 0) {
473     p->eobs[block] = 0;
474     *a = *l = 0;
475     return;
476   }
477 
478   if (!x->skip_recode) {
479     if (x->skip_txfm[plane] == 0) {
480       // full forward transform and quantization
481       if (x->quant_fp)
482         vp9_xform_quant_fp(x, plane, block, plane_bsize, tx_size);
483       else
484         vp9_xform_quant(x, plane, block, plane_bsize, tx_size);
485     } else if (x->skip_txfm[plane] == 2) {
486       // fast path forward transform and quantization
487       vp9_xform_quant_dc(x, plane, block, plane_bsize, tx_size);
488     } else {
489       // skip forward transform
490       p->eobs[block] = 0;
491       *a = *l = 0;
492       return;
493     }
494   }
495 
496   if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
497     const int ctx = combine_entropy_contexts(*a, *l);
498     *a = *l = optimize_b(x, plane, block, tx_size, ctx) > 0;
499   } else {
500     *a = *l = p->eobs[block] > 0;
501   }
502 
503   if (p->eobs[block])
504     *(args->skip) = 0;
505 
506   if (x->skip_encode || p->eobs[block] == 0)
507     return;
508 
509   switch (tx_size) {
510     case TX_32X32:
511       vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
512       break;
513     case TX_16X16:
514       vp9_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
515       break;
516     case TX_8X8:
517       vp9_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
518       break;
519     case TX_4X4:
520       // this is like vp9_short_idct4x4 but has a special case around eob<=1
521       // which is significant (not just an optimization) for the lossless
522       // case.
523       x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
524       break;
525     default:
526       assert(0 && "Invalid transform size");
527       break;
528   }
529 }
530 
encode_block_pass1(int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,void * arg)531 static void encode_block_pass1(int plane, int block, BLOCK_SIZE plane_bsize,
532                                TX_SIZE tx_size, void *arg) {
533   MACROBLOCK *const x = (MACROBLOCK *)arg;
534   MACROBLOCKD *const xd = &x->e_mbd;
535   struct macroblock_plane *const p = &x->plane[plane];
536   struct macroblockd_plane *const pd = &xd->plane[plane];
537   int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
538   int i, j;
539   uint8_t *dst;
540   txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
541   dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i];
542 
543   vp9_xform_quant(x, plane, block, plane_bsize, tx_size);
544 
545   if (p->eobs[block] > 0)
546     x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
547 }
548 
vp9_encode_sby_pass1(MACROBLOCK * x,BLOCK_SIZE bsize)549 void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
550   vp9_subtract_plane(x, bsize, 0);
551   vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
552                                          encode_block_pass1, x);
553 }
554 
vp9_encode_sb(MACROBLOCK * x,BLOCK_SIZE bsize)555 void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
556   MACROBLOCKD *const xd = &x->e_mbd;
557   struct optimize_ctx ctx;
558   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
559   struct encode_b_args arg = {x, &ctx, &mbmi->skip};
560   int plane;
561 
562   for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
563     if (!x->skip_recode)
564       vp9_subtract_plane(x, bsize, plane);
565 
566     if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
567       const struct macroblockd_plane* const pd = &xd->plane[plane];
568       const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
569       vp9_get_entropy_contexts(bsize, tx_size, pd,
570                                ctx.ta[plane], ctx.tl[plane]);
571     }
572 
573     vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
574                                            &arg);
575   }
576 }
577 
encode_block_intra(int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,void * arg)578 static void encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
579                                TX_SIZE tx_size, void *arg) {
580   struct encode_b_args* const args = arg;
581   MACROBLOCK *const x = args->x;
582   MACROBLOCKD *const xd = &x->e_mbd;
583   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
584   struct macroblock_plane *const p = &x->plane[plane];
585   struct macroblockd_plane *const pd = &xd->plane[plane];
586   int16_t *coeff = BLOCK_OFFSET(p->coeff, block);
587   int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
588   int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
589   const scan_order *scan_order;
590   TX_TYPE tx_type;
591   PREDICTION_MODE mode;
592   const int bwl = b_width_log2(plane_bsize);
593   const int diff_stride = 4 * (1 << bwl);
594   uint8_t *src, *dst;
595   int16_t *src_diff;
596   uint16_t *eob = &p->eobs[block];
597   const int src_stride = p->src.stride;
598   const int dst_stride = pd->dst.stride;
599   int i, j;
600   txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
601   dst = &pd->dst.buf[4 * (j * dst_stride + i)];
602   src = &p->src.buf[4 * (j * src_stride + i)];
603   src_diff = &p->src_diff[4 * (j * diff_stride + i)];
604 
605   switch (tx_size) {
606     case TX_32X32:
607       scan_order = &vp9_default_scan_orders[TX_32X32];
608       mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
609       vp9_predict_intra_block(xd, block >> 6, bwl, TX_32X32, mode,
610                               x->skip_encode ? src : dst,
611                               x->skip_encode ? src_stride : dst_stride,
612                               dst, dst_stride, i, j, plane);
613       if (!x->skip_recode) {
614         vp9_subtract_block(32, 32, src_diff, diff_stride,
615                            src, src_stride, dst, dst_stride);
616         fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
617         vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
618                              p->quant, p->quant_shift, qcoeff, dqcoeff,
619                              pd->dequant, p->zbin_extra, eob, scan_order->scan,
620                              scan_order->iscan);
621       }
622       if (!x->skip_encode && *eob)
623         vp9_idct32x32_add(dqcoeff, dst, dst_stride, *eob);
624       break;
625     case TX_16X16:
626       tx_type = get_tx_type(pd->plane_type, xd);
627       scan_order = &vp9_scan_orders[TX_16X16][tx_type];
628       mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
629       vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode,
630                               x->skip_encode ? src : dst,
631                               x->skip_encode ? src_stride : dst_stride,
632                               dst, dst_stride, i, j, plane);
633       if (!x->skip_recode) {
634         vp9_subtract_block(16, 16, src_diff, diff_stride,
635                            src, src_stride, dst, dst_stride);
636         vp9_fht16x16(src_diff, coeff, diff_stride, tx_type);
637         vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
638                        p->quant, p->quant_shift, qcoeff, dqcoeff,
639                        pd->dequant, p->zbin_extra, eob, scan_order->scan,
640                        scan_order->iscan);
641       }
642       if (!x->skip_encode && *eob)
643         vp9_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob);
644       break;
645     case TX_8X8:
646       tx_type = get_tx_type(pd->plane_type, xd);
647       scan_order = &vp9_scan_orders[TX_8X8][tx_type];
648       mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
649       vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode,
650                               x->skip_encode ? src : dst,
651                               x->skip_encode ? src_stride : dst_stride,
652                               dst, dst_stride, i, j, plane);
653       if (!x->skip_recode) {
654         vp9_subtract_block(8, 8, src_diff, diff_stride,
655                            src, src_stride, dst, dst_stride);
656         vp9_fht8x8(src_diff, coeff, diff_stride, tx_type);
657         vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
658                        p->quant_shift, qcoeff, dqcoeff,
659                        pd->dequant, p->zbin_extra, eob, scan_order->scan,
660                        scan_order->iscan);
661       }
662       if (!x->skip_encode && *eob)
663         vp9_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob);
664       break;
665     case TX_4X4:
666       tx_type = get_tx_type_4x4(pd->plane_type, xd, block);
667       scan_order = &vp9_scan_orders[TX_4X4][tx_type];
668       mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
669       vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode,
670                               x->skip_encode ? src : dst,
671                               x->skip_encode ? src_stride : dst_stride,
672                               dst, dst_stride, i, j, plane);
673 
674       if (!x->skip_recode) {
675         vp9_subtract_block(4, 4, src_diff, diff_stride,
676                            src, src_stride, dst, dst_stride);
677         if (tx_type != DCT_DCT)
678           vp9_fht4x4(src_diff, coeff, diff_stride, tx_type);
679         else
680           x->fwd_txm4x4(src_diff, coeff, diff_stride);
681         vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
682                        p->quant_shift, qcoeff, dqcoeff,
683                        pd->dequant, p->zbin_extra, eob, scan_order->scan,
684                        scan_order->iscan);
685       }
686 
687       if (!x->skip_encode && *eob) {
688         if (tx_type == DCT_DCT)
689           // this is like vp9_short_idct4x4 but has a special case around eob<=1
690           // which is significant (not just an optimization) for the lossless
691           // case.
692           x->itxm_add(dqcoeff, dst, dst_stride, *eob);
693         else
694           vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type);
695       }
696       break;
697     default:
698       assert(0);
699       break;
700   }
701   if (*eob)
702     *(args->skip) = 0;
703 }
704 
vp9_encode_block_intra(MACROBLOCK * x,int plane,int block,BLOCK_SIZE plane_bsize,TX_SIZE tx_size,int8_t * skip)705 void vp9_encode_block_intra(MACROBLOCK *x, int plane, int block,
706                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
707                             int8_t *skip) {
708   struct encode_b_args arg = {x, NULL, skip};
709   encode_block_intra(plane, block, plane_bsize, tx_size, &arg);
710 }
711 
712 
vp9_encode_intra_block_plane(MACROBLOCK * x,BLOCK_SIZE bsize,int plane)713 void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
714   const MACROBLOCKD *const xd = &x->e_mbd;
715   struct encode_b_args arg = {x, NULL, &xd->mi[0]->mbmi.skip};
716 
717   vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block_intra,
718                                          &arg);
719 }
720