1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <limits.h>
12 
13 #include "./vp9_rtcd.h"
14 #include "./vpx_dsp_rtcd.h"
15 
16 #include "vpx_dsp/vpx_dsp_common.h"
17 #include "vpx_mem/vpx_mem.h"
18 #include "vpx_ports/system_state.h"
19 #include "vp9/encoder/vp9_segmentation.h"
20 #include "vp9/encoder/vp9_mcomp.h"
21 #include "vp9/common/vp9_blockd.h"
22 #include "vp9/common/vp9_reconinter.h"
23 #include "vp9/common/vp9_reconintra.h"
24 
do_16x16_motion_iteration(VP9_COMP * cpi,const MV * ref_mv,MV * dst_mv,int mb_row,int mb_col)25 static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, const MV *ref_mv,
26                                               MV *dst_mv, int mb_row,
27                                               int mb_col) {
28   MACROBLOCK *const x = &cpi->td.mb;
29   MACROBLOCKD *const xd = &x->e_mbd;
30   MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
31   const SEARCH_METHODS old_search_method = mv_sf->search_method;
32   const vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
33   const MvLimits tmp_mv_limits = x->mv_limits;
34   MV ref_full;
35   int cost_list[5];
36 
37   // Further step/diamond searches as necessary
38   int step_param = mv_sf->reduce_first_step_size;
39   step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
40 
41   vp9_set_mv_search_range(&x->mv_limits, ref_mv);
42 
43   ref_full.col = ref_mv->col >> 3;
44   ref_full.row = ref_mv->row >> 3;
45 
46   mv_sf->search_method = HEX;
47   vp9_full_pixel_search(cpi, x, BLOCK_16X16, &ref_full, step_param,
48                         cpi->sf.mv.search_method, x->errorperbit,
49                         cond_cost_list(cpi, cost_list), ref_mv, dst_mv, 0, 0);
50   mv_sf->search_method = old_search_method;
51 
52   /* restore UMV window */
53   x->mv_limits = tmp_mv_limits;
54 
55   // Try sub-pixel MC
56   // if (bestsme > error_thresh && bestsme < INT_MAX)
57   {
58     uint32_t distortion;
59     uint32_t sse;
60     cpi->find_fractional_mv_step(
61         x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
62         &v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
63         cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0,
64         0);
65   }
66 
67   xd->mi[0]->mode = NEWMV;
68   xd->mi[0]->mv[0].as_mv = *dst_mv;
69 
70   vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
71 
72   return vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
73                       xd->plane[0].dst.buf, xd->plane[0].dst.stride);
74 }
75 
do_16x16_motion_search(VP9_COMP * cpi,const MV * ref_mv,int_mv * dst_mv,int mb_row,int mb_col)76 static int do_16x16_motion_search(VP9_COMP *cpi, const MV *ref_mv,
77                                   int_mv *dst_mv, int mb_row, int mb_col) {
78   MACROBLOCK *const x = &cpi->td.mb;
79   MACROBLOCKD *const xd = &x->e_mbd;
80   unsigned int err, tmp_err;
81   MV tmp_mv;
82 
83   // Try zero MV first
84   // FIXME should really use something like near/nearest MV and/or MV prediction
85   err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
86                      xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
87   dst_mv->as_int = 0;
88 
89   // Test last reference frame using the previous best mv as the
90   // starting point (best reference) for the search
91   tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col);
92   if (tmp_err < err) {
93     err = tmp_err;
94     dst_mv->as_mv = tmp_mv;
95   }
96 
97   // If the current best reference mv is not centered on 0,0 then do a 0,0
98   // based search as well.
99   if (ref_mv->row != 0 || ref_mv->col != 0) {
100     unsigned int tmp_err;
101     MV zero_ref_mv = { 0, 0 }, tmp_mv;
102 
103     tmp_err =
104         do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, mb_row, mb_col);
105     if (tmp_err < err) {
106       dst_mv->as_mv = tmp_mv;
107       err = tmp_err;
108     }
109   }
110 
111   return err;
112 }
113 
do_16x16_zerozero_search(VP9_COMP * cpi,int_mv * dst_mv)114 static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) {
115   MACROBLOCK *const x = &cpi->td.mb;
116   MACROBLOCKD *const xd = &x->e_mbd;
117   unsigned int err;
118 
119   // Try zero MV first
120   // FIXME should really use something like near/nearest MV and/or MV prediction
121   err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
122                      xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
123 
124   dst_mv->as_int = 0;
125 
126   return err;
127 }
find_best_16x16_intra(VP9_COMP * cpi,PREDICTION_MODE * pbest_mode)128 static int find_best_16x16_intra(VP9_COMP *cpi, PREDICTION_MODE *pbest_mode) {
129   MACROBLOCK *const x = &cpi->td.mb;
130   MACROBLOCKD *const xd = &x->e_mbd;
131   PREDICTION_MODE best_mode = -1, mode;
132   unsigned int best_err = INT_MAX;
133 
134   // calculate SATD for each intra prediction mode;
135   // we're intentionally not doing 4x4, we just want a rough estimate
136   for (mode = DC_PRED; mode <= TM_PRED; mode++) {
137     unsigned int err;
138 
139     xd->mi[0]->mode = mode;
140     vp9_predict_intra_block(xd, 2, TX_16X16, mode, x->plane[0].src.buf,
141                             x->plane[0].src.stride, xd->plane[0].dst.buf,
142                             xd->plane[0].dst.stride, 0, 0, 0);
143     err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
144                        xd->plane[0].dst.buf, xd->plane[0].dst.stride);
145 
146     // find best
147     if (err < best_err) {
148       best_err = err;
149       best_mode = mode;
150     }
151   }
152 
153   if (pbest_mode) *pbest_mode = best_mode;
154 
155   return best_err;
156 }
157 
update_mbgraph_mb_stats(VP9_COMP * cpi,MBGRAPH_MB_STATS * stats,YV12_BUFFER_CONFIG * buf,int mb_y_offset,YV12_BUFFER_CONFIG * golden_ref,const MV * prev_golden_ref_mv,YV12_BUFFER_CONFIG * alt_ref,int mb_row,int mb_col)158 static void update_mbgraph_mb_stats(VP9_COMP *cpi, MBGRAPH_MB_STATS *stats,
159                                     YV12_BUFFER_CONFIG *buf, int mb_y_offset,
160                                     YV12_BUFFER_CONFIG *golden_ref,
161                                     const MV *prev_golden_ref_mv,
162                                     YV12_BUFFER_CONFIG *alt_ref, int mb_row,
163                                     int mb_col) {
164   MACROBLOCK *const x = &cpi->td.mb;
165   MACROBLOCKD *const xd = &x->e_mbd;
166   int intra_error;
167   VP9_COMMON *cm = &cpi->common;
168 
169   // FIXME in practice we're completely ignoring chroma here
170   x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
171   x->plane[0].src.stride = buf->y_stride;
172 
173   xd->plane[0].dst.buf = get_frame_new_buffer(cm)->y_buffer + mb_y_offset;
174   xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride;
175 
176   // do intra 16x16 prediction
177   intra_error = find_best_16x16_intra(cpi, &stats->ref[INTRA_FRAME].m.mode);
178   if (intra_error <= 0) intra_error = 1;
179   stats->ref[INTRA_FRAME].err = intra_error;
180 
181   // Golden frame MV search, if it exists and is different than last frame
182   if (golden_ref) {
183     int g_motion_error;
184     xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
185     xd->plane[0].pre[0].stride = golden_ref->y_stride;
186     g_motion_error =
187         do_16x16_motion_search(cpi, prev_golden_ref_mv,
188                                &stats->ref[GOLDEN_FRAME].m.mv, mb_row, mb_col);
189     stats->ref[GOLDEN_FRAME].err = g_motion_error;
190   } else {
191     stats->ref[GOLDEN_FRAME].err = INT_MAX;
192     stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
193   }
194 
195   // Do an Alt-ref frame MV search, if it exists and is different than
196   // last/golden frame.
197   if (alt_ref) {
198     int a_motion_error;
199     xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
200     xd->plane[0].pre[0].stride = alt_ref->y_stride;
201     a_motion_error =
202         do_16x16_zerozero_search(cpi, &stats->ref[ALTREF_FRAME].m.mv);
203 
204     stats->ref[ALTREF_FRAME].err = a_motion_error;
205   } else {
206     stats->ref[ALTREF_FRAME].err = INT_MAX;
207     stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
208   }
209 }
210 
update_mbgraph_frame_stats(VP9_COMP * cpi,MBGRAPH_FRAME_STATS * stats,YV12_BUFFER_CONFIG * buf,YV12_BUFFER_CONFIG * golden_ref,YV12_BUFFER_CONFIG * alt_ref)211 static void update_mbgraph_frame_stats(VP9_COMP *cpi,
212                                        MBGRAPH_FRAME_STATS *stats,
213                                        YV12_BUFFER_CONFIG *buf,
214                                        YV12_BUFFER_CONFIG *golden_ref,
215                                        YV12_BUFFER_CONFIG *alt_ref) {
216   MACROBLOCK *const x = &cpi->td.mb;
217   MACROBLOCKD *const xd = &x->e_mbd;
218   VP9_COMMON *const cm = &cpi->common;
219 
220   int mb_col, mb_row, offset = 0;
221   int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
222   MV gld_top_mv = { 0, 0 };
223   MODE_INFO mi_local;
224   MODE_INFO mi_above, mi_left;
225 
226   vp9_zero(mi_local);
227   // Set up limit values for motion vectors to prevent them extending outside
228   // the UMV borders.
229   x->mv_limits.row_min = -BORDER_MV_PIXELS_B16;
230   x->mv_limits.row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
231   // Signal to vp9_predict_intra_block() that above is not available
232   xd->above_mi = NULL;
233 
234   xd->plane[0].dst.stride = buf->y_stride;
235   xd->plane[0].pre[0].stride = buf->y_stride;
236   xd->plane[1].dst.stride = buf->uv_stride;
237   xd->mi[0] = &mi_local;
238   mi_local.sb_type = BLOCK_16X16;
239   mi_local.ref_frame[0] = LAST_FRAME;
240   mi_local.ref_frame[1] = NONE;
241 
242   for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
243     MV gld_left_mv = gld_top_mv;
244     int mb_y_in_offset = mb_y_offset;
245     int arf_y_in_offset = arf_y_offset;
246     int gld_y_in_offset = gld_y_offset;
247 
248     // Set up limit values for motion vectors to prevent them extending outside
249     // the UMV borders.
250     x->mv_limits.col_min = -BORDER_MV_PIXELS_B16;
251     x->mv_limits.col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
252     // Signal to vp9_predict_intra_block() that left is not available
253     xd->left_mi = NULL;
254 
255     for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
256       MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
257 
258       update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, golden_ref,
259                               &gld_left_mv, alt_ref, mb_row, mb_col);
260       gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv;
261       if (mb_col == 0) {
262         gld_top_mv = gld_left_mv;
263       }
264       // Signal to vp9_predict_intra_block() that left is available
265       xd->left_mi = &mi_left;
266 
267       mb_y_in_offset += 16;
268       gld_y_in_offset += 16;
269       arf_y_in_offset += 16;
270       x->mv_limits.col_min -= 16;
271       x->mv_limits.col_max -= 16;
272     }
273 
274     // Signal to vp9_predict_intra_block() that above is available
275     xd->above_mi = &mi_above;
276 
277     mb_y_offset += buf->y_stride * 16;
278     gld_y_offset += golden_ref->y_stride * 16;
279     if (alt_ref) arf_y_offset += alt_ref->y_stride * 16;
280     x->mv_limits.row_min -= 16;
281     x->mv_limits.row_max -= 16;
282     offset += cm->mb_cols;
283   }
284 }
285 
286 // void separate_arf_mbs_byzz
separate_arf_mbs(VP9_COMP * cpi)287 static void separate_arf_mbs(VP9_COMP *cpi) {
288   VP9_COMMON *const cm = &cpi->common;
289   int mb_col, mb_row, offset, i;
290   int mi_row, mi_col;
291   int ncnt[4] = { 0 };
292   int n_frames = cpi->mbgraph_n_frames;
293 
294   int *arf_not_zz;
295 
296   CHECK_MEM_ERROR(
297       cm, arf_not_zz,
298       vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
299 
300   // We are not interested in results beyond the alt ref itself.
301   if (n_frames > cpi->rc.frames_till_gf_update_due)
302     n_frames = cpi->rc.frames_till_gf_update_due;
303 
304   // defer cost to reference frames
305   for (i = n_frames - 1; i >= 0; i--) {
306     MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
307 
308     for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
309          offset += cm->mb_cols, mb_row++) {
310       for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
311         MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
312 
313         int altref_err = mb_stats->ref[ALTREF_FRAME].err;
314         int intra_err = mb_stats->ref[INTRA_FRAME].err;
315         int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
316 
317         // Test for altref vs intra and gf and that its mv was 0,0.
318         if (altref_err > 1000 || altref_err > intra_err ||
319             altref_err > golden_err) {
320           arf_not_zz[offset + mb_col]++;
321         }
322       }
323     }
324   }
325 
326   // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out
327   // of bound access in segmentation_map
328   for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) {
329     for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
330       // If any of the blocks in the sequence failed then the MB
331       // goes in segment 0
332       if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) {
333         ncnt[0]++;
334         cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0;
335       } else {
336         cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1;
337         ncnt[1]++;
338       }
339     }
340   }
341 
342   // Only bother with segmentation if over 10% of the MBs in static segment
343   // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
344   if (1) {
345     // Note % of blocks that are marked as static
346     if (cm->MBs)
347       cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols);
348 
349     // This error case should not be reachable as this function should
350     // never be called with the common data structure uninitialized.
351     else
352       cpi->static_mb_pct = 0;
353 
354     vp9_enable_segmentation(&cm->seg);
355   } else {
356     cpi->static_mb_pct = 0;
357     vp9_disable_segmentation(&cm->seg);
358   }
359 
360   // Free localy allocated storage
361   vpx_free(arf_not_zz);
362 }
363 
vp9_update_mbgraph_stats(VP9_COMP * cpi)364 void vp9_update_mbgraph_stats(VP9_COMP *cpi) {
365   VP9_COMMON *const cm = &cpi->common;
366   int i, n_frames = vp9_lookahead_depth(cpi->lookahead);
367   YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
368 
369   assert(golden_ref != NULL);
370 
371   // we need to look ahead beyond where the ARF transitions into
372   // being a GF - so exit if we don't look ahead beyond that
373   if (n_frames <= cpi->rc.frames_till_gf_update_due) return;
374 
375   if (n_frames > MAX_LAG_BUFFERS) n_frames = MAX_LAG_BUFFERS;
376 
377   cpi->mbgraph_n_frames = n_frames;
378   for (i = 0; i < n_frames; i++) {
379     MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
380     memset(frame_stats->mb_stats, 0,
381            cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
382   }
383 
384   // do motion search to find contribution of each reference to data
385   // later on in this GF group
386   // FIXME really, the GF/last MC search should be done forward, and
387   // the ARF MC search backwards, to get optimal results for MV caching
388   for (i = 0; i < n_frames; i++) {
389     MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
390     struct lookahead_entry *q_cur = vp9_lookahead_peek(cpi->lookahead, i);
391 
392     assert(q_cur != NULL);
393 
394     update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, golden_ref,
395                                cpi->Source);
396   }
397 
398   vpx_clear_system_state();
399 
400   separate_arf_mbs(cpi);
401 }
402