1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <assert.h>
13 #include <math.h>
14 #include <stdbool.h>
15 
16 #include "config/aom_config.h"
17 #include "config/aom_dsp_rtcd.h"
18 #include "config/av1_rtcd.h"
19 
20 #include "aom_dsp/aom_dsp_common.h"
21 #include "aom_dsp/blend.h"
22 #include "aom_mem/aom_mem.h"
23 #include "aom_ports/aom_timer.h"
24 #include "aom_ports/mem.h"
25 #include "aom_ports/system_state.h"
26 
27 #include "av1/common/av1_common_int.h"
28 #include "av1/common/cfl.h"
29 #include "av1/common/common.h"
30 #include "av1/common/common_data.h"
31 #include "av1/common/entropy.h"
32 #include "av1/common/entropymode.h"
33 #include "av1/common/idct.h"
34 #include "av1/common/mvref_common.h"
35 #include "av1/common/obmc.h"
36 #include "av1/common/pred_common.h"
37 #include "av1/common/quant_common.h"
38 #include "av1/common/reconinter.h"
39 #include "av1/common/reconintra.h"
40 #include "av1/common/scan.h"
41 #include "av1/common/seg_common.h"
42 #include "av1/common/txb_common.h"
43 #include "av1/common/warped_motion.h"
44 
45 #include "av1/encoder/aq_variance.h"
46 #include "av1/encoder/av1_quantize.h"
47 #include "av1/encoder/cost.h"
48 #include "av1/encoder/compound_type.h"
49 #include "av1/encoder/encodemb.h"
50 #include "av1/encoder/encodemv.h"
51 #include "av1/encoder/encoder.h"
52 #include "av1/encoder/encodetxb.h"
53 #include "av1/encoder/hybrid_fwd_txfm.h"
54 #include "av1/encoder/interp_search.h"
55 #include "av1/encoder/intra_mode_search.h"
56 #include "av1/encoder/mcomp.h"
57 #include "av1/encoder/ml.h"
58 #include "av1/encoder/mode_prune_model_weights.h"
59 #include "av1/encoder/model_rd.h"
60 #include "av1/encoder/motion_search_facade.h"
61 #include "av1/encoder/palette.h"
62 #include "av1/encoder/pustats.h"
63 #include "av1/encoder/random.h"
64 #include "av1/encoder/ratectrl.h"
65 #include "av1/encoder/rd.h"
66 #include "av1/encoder/rdopt.h"
67 #include "av1/encoder/reconinter_enc.h"
68 #include "av1/encoder/tokenize.h"
69 #include "av1/encoder/tpl_model.h"
70 #include "av1/encoder/tx_search.h"
71 
72 #define LAST_NEW_MV_INDEX 6
73 
74 // Mode_threshold multiplication factor table for prune_inter_modes_if_skippable
75 // The values are kept in Q12 format and equation used to derive is
76 // (2.5 - ((float)x->qindex / MAXQ) * 1.5)
77 #define MODE_THRESH_QBITS 12
78 static const int mode_threshold_mul_factor[QINDEX_RANGE] = {
79   10240, 10216, 10192, 10168, 10144, 10120, 10095, 10071, 10047, 10023, 9999,
80   9975,  9951,  9927,  9903,  9879,  9854,  9830,  9806,  9782,  9758,  9734,
81   9710,  9686,  9662,  9638,  9614,  9589,  9565,  9541,  9517,  9493,  9469,
82   9445,  9421,  9397,  9373,  9349,  9324,  9300,  9276,  9252,  9228,  9204,
83   9180,  9156,  9132,  9108,  9083,  9059,  9035,  9011,  8987,  8963,  8939,
84   8915,  8891,  8867,  8843,  8818,  8794,  8770,  8746,  8722,  8698,  8674,
85   8650,  8626,  8602,  8578,  8553,  8529,  8505,  8481,  8457,  8433,  8409,
86   8385,  8361,  8337,  8312,  8288,  8264,  8240,  8216,  8192,  8168,  8144,
87   8120,  8096,  8072,  8047,  8023,  7999,  7975,  7951,  7927,  7903,  7879,
88   7855,  7831,  7806,  7782,  7758,  7734,  7710,  7686,  7662,  7638,  7614,
89   7590,  7566,  7541,  7517,  7493,  7469,  7445,  7421,  7397,  7373,  7349,
90   7325,  7301,  7276,  7252,  7228,  7204,  7180,  7156,  7132,  7108,  7084,
91   7060,  7035,  7011,  6987,  6963,  6939,  6915,  6891,  6867,  6843,  6819,
92   6795,  6770,  6746,  6722,  6698,  6674,  6650,  6626,  6602,  6578,  6554,
93   6530,  6505,  6481,  6457,  6433,  6409,  6385,  6361,  6337,  6313,  6289,
94   6264,  6240,  6216,  6192,  6168,  6144,  6120,  6096,  6072,  6048,  6024,
95   5999,  5975,  5951,  5927,  5903,  5879,  5855,  5831,  5807,  5783,  5758,
96   5734,  5710,  5686,  5662,  5638,  5614,  5590,  5566,  5542,  5518,  5493,
97   5469,  5445,  5421,  5397,  5373,  5349,  5325,  5301,  5277,  5253,  5228,
98   5204,  5180,  5156,  5132,  5108,  5084,  5060,  5036,  5012,  4987,  4963,
99   4939,  4915,  4891,  4867,  4843,  4819,  4795,  4771,  4747,  4722,  4698,
100   4674,  4650,  4626,  4602,  4578,  4554,  4530,  4506,  4482,  4457,  4433,
101   4409,  4385,  4361,  4337,  4313,  4289,  4265,  4241,  4216,  4192,  4168,
102   4144,  4120,  4096
103 };
104 
105 static const THR_MODES av1_default_mode_order[MAX_MODES] = {
106   THR_NEARESTMV,
107   THR_NEARESTL2,
108   THR_NEARESTL3,
109   THR_NEARESTB,
110   THR_NEARESTA2,
111   THR_NEARESTA,
112   THR_NEARESTG,
113 
114   THR_NEWMV,
115   THR_NEWL2,
116   THR_NEWL3,
117   THR_NEWB,
118   THR_NEWA2,
119   THR_NEWA,
120   THR_NEWG,
121 
122   THR_NEARMV,
123   THR_NEARL2,
124   THR_NEARL3,
125   THR_NEARB,
126   THR_NEARA2,
127   THR_NEARA,
128   THR_NEARG,
129 
130   THR_GLOBALMV,
131   THR_GLOBALL2,
132   THR_GLOBALL3,
133   THR_GLOBALB,
134   THR_GLOBALA2,
135   THR_GLOBALA,
136   THR_GLOBALG,
137 
138   THR_COMP_NEAREST_NEARESTLA,
139   THR_COMP_NEAREST_NEARESTL2A,
140   THR_COMP_NEAREST_NEARESTL3A,
141   THR_COMP_NEAREST_NEARESTGA,
142   THR_COMP_NEAREST_NEARESTLB,
143   THR_COMP_NEAREST_NEARESTL2B,
144   THR_COMP_NEAREST_NEARESTL3B,
145   THR_COMP_NEAREST_NEARESTGB,
146   THR_COMP_NEAREST_NEARESTLA2,
147   THR_COMP_NEAREST_NEARESTL2A2,
148   THR_COMP_NEAREST_NEARESTL3A2,
149   THR_COMP_NEAREST_NEARESTGA2,
150   THR_COMP_NEAREST_NEARESTLL2,
151   THR_COMP_NEAREST_NEARESTLL3,
152   THR_COMP_NEAREST_NEARESTLG,
153   THR_COMP_NEAREST_NEARESTBA,
154 
155   THR_COMP_NEAR_NEARLA,
156   THR_COMP_NEW_NEARESTLA,
157   THR_COMP_NEAREST_NEWLA,
158   THR_COMP_NEW_NEARLA,
159   THR_COMP_NEAR_NEWLA,
160   THR_COMP_NEW_NEWLA,
161   THR_COMP_GLOBAL_GLOBALLA,
162 
163   THR_COMP_NEAR_NEARL2A,
164   THR_COMP_NEW_NEARESTL2A,
165   THR_COMP_NEAREST_NEWL2A,
166   THR_COMP_NEW_NEARL2A,
167   THR_COMP_NEAR_NEWL2A,
168   THR_COMP_NEW_NEWL2A,
169   THR_COMP_GLOBAL_GLOBALL2A,
170 
171   THR_COMP_NEAR_NEARL3A,
172   THR_COMP_NEW_NEARESTL3A,
173   THR_COMP_NEAREST_NEWL3A,
174   THR_COMP_NEW_NEARL3A,
175   THR_COMP_NEAR_NEWL3A,
176   THR_COMP_NEW_NEWL3A,
177   THR_COMP_GLOBAL_GLOBALL3A,
178 
179   THR_COMP_NEAR_NEARGA,
180   THR_COMP_NEW_NEARESTGA,
181   THR_COMP_NEAREST_NEWGA,
182   THR_COMP_NEW_NEARGA,
183   THR_COMP_NEAR_NEWGA,
184   THR_COMP_NEW_NEWGA,
185   THR_COMP_GLOBAL_GLOBALGA,
186 
187   THR_COMP_NEAR_NEARLB,
188   THR_COMP_NEW_NEARESTLB,
189   THR_COMP_NEAREST_NEWLB,
190   THR_COMP_NEW_NEARLB,
191   THR_COMP_NEAR_NEWLB,
192   THR_COMP_NEW_NEWLB,
193   THR_COMP_GLOBAL_GLOBALLB,
194 
195   THR_COMP_NEAR_NEARL2B,
196   THR_COMP_NEW_NEARESTL2B,
197   THR_COMP_NEAREST_NEWL2B,
198   THR_COMP_NEW_NEARL2B,
199   THR_COMP_NEAR_NEWL2B,
200   THR_COMP_NEW_NEWL2B,
201   THR_COMP_GLOBAL_GLOBALL2B,
202 
203   THR_COMP_NEAR_NEARL3B,
204   THR_COMP_NEW_NEARESTL3B,
205   THR_COMP_NEAREST_NEWL3B,
206   THR_COMP_NEW_NEARL3B,
207   THR_COMP_NEAR_NEWL3B,
208   THR_COMP_NEW_NEWL3B,
209   THR_COMP_GLOBAL_GLOBALL3B,
210 
211   THR_COMP_NEAR_NEARGB,
212   THR_COMP_NEW_NEARESTGB,
213   THR_COMP_NEAREST_NEWGB,
214   THR_COMP_NEW_NEARGB,
215   THR_COMP_NEAR_NEWGB,
216   THR_COMP_NEW_NEWGB,
217   THR_COMP_GLOBAL_GLOBALGB,
218 
219   THR_COMP_NEAR_NEARLA2,
220   THR_COMP_NEW_NEARESTLA2,
221   THR_COMP_NEAREST_NEWLA2,
222   THR_COMP_NEW_NEARLA2,
223   THR_COMP_NEAR_NEWLA2,
224   THR_COMP_NEW_NEWLA2,
225   THR_COMP_GLOBAL_GLOBALLA2,
226 
227   THR_COMP_NEAR_NEARL2A2,
228   THR_COMP_NEW_NEARESTL2A2,
229   THR_COMP_NEAREST_NEWL2A2,
230   THR_COMP_NEW_NEARL2A2,
231   THR_COMP_NEAR_NEWL2A2,
232   THR_COMP_NEW_NEWL2A2,
233   THR_COMP_GLOBAL_GLOBALL2A2,
234 
235   THR_COMP_NEAR_NEARL3A2,
236   THR_COMP_NEW_NEARESTL3A2,
237   THR_COMP_NEAREST_NEWL3A2,
238   THR_COMP_NEW_NEARL3A2,
239   THR_COMP_NEAR_NEWL3A2,
240   THR_COMP_NEW_NEWL3A2,
241   THR_COMP_GLOBAL_GLOBALL3A2,
242 
243   THR_COMP_NEAR_NEARGA2,
244   THR_COMP_NEW_NEARESTGA2,
245   THR_COMP_NEAREST_NEWGA2,
246   THR_COMP_NEW_NEARGA2,
247   THR_COMP_NEAR_NEWGA2,
248   THR_COMP_NEW_NEWGA2,
249   THR_COMP_GLOBAL_GLOBALGA2,
250 
251   THR_COMP_NEAR_NEARLL2,
252   THR_COMP_NEW_NEARESTLL2,
253   THR_COMP_NEAREST_NEWLL2,
254   THR_COMP_NEW_NEARLL2,
255   THR_COMP_NEAR_NEWLL2,
256   THR_COMP_NEW_NEWLL2,
257   THR_COMP_GLOBAL_GLOBALLL2,
258 
259   THR_COMP_NEAR_NEARLL3,
260   THR_COMP_NEW_NEARESTLL3,
261   THR_COMP_NEAREST_NEWLL3,
262   THR_COMP_NEW_NEARLL3,
263   THR_COMP_NEAR_NEWLL3,
264   THR_COMP_NEW_NEWLL3,
265   THR_COMP_GLOBAL_GLOBALLL3,
266 
267   THR_COMP_NEAR_NEARLG,
268   THR_COMP_NEW_NEARESTLG,
269   THR_COMP_NEAREST_NEWLG,
270   THR_COMP_NEW_NEARLG,
271   THR_COMP_NEAR_NEWLG,
272   THR_COMP_NEW_NEWLG,
273   THR_COMP_GLOBAL_GLOBALLG,
274 
275   THR_COMP_NEAR_NEARBA,
276   THR_COMP_NEW_NEARESTBA,
277   THR_COMP_NEAREST_NEWBA,
278   THR_COMP_NEW_NEARBA,
279   THR_COMP_NEAR_NEWBA,
280   THR_COMP_NEW_NEWBA,
281   THR_COMP_GLOBAL_GLOBALBA,
282 
283   THR_DC,
284   THR_PAETH,
285   THR_SMOOTH,
286   THR_SMOOTH_V,
287   THR_SMOOTH_H,
288   THR_H_PRED,
289   THR_V_PRED,
290   THR_D135_PRED,
291   THR_D203_PRED,
292   THR_D157_PRED,
293   THR_D67_PRED,
294   THR_D113_PRED,
295   THR_D45_PRED,
296 };
297 
find_last_single_ref_mode_idx(const THR_MODES * mode_order)298 static int find_last_single_ref_mode_idx(const THR_MODES *mode_order) {
299   uint8_t mode_found[NUM_SINGLE_REF_MODES];
300   av1_zero(mode_found);
301   int num_single_ref_modes_left = NUM_SINGLE_REF_MODES;
302 
303   for (int idx = 0; idx < MAX_MODES; idx++) {
304     const THR_MODES curr_mode = mode_order[idx];
305     if (curr_mode < SINGLE_REF_MODE_END) {
306       num_single_ref_modes_left--;
307     }
308     if (!num_single_ref_modes_left) {
309       return idx;
310     }
311   }
312   return -1;
313 }
314 
315 typedef struct SingleInterModeState {
316   int64_t rd;
317   MV_REFERENCE_FRAME ref_frame;
318   int valid;
319 } SingleInterModeState;
320 
321 typedef struct InterModeSearchState {
322   int64_t best_rd;
323   int64_t best_skip_rd[2];
324   MB_MODE_INFO best_mbmode;
325   int best_rate_y;
326   int best_rate_uv;
327   int best_mode_skippable;
328   int best_skip2;
329   THR_MODES best_mode_index;
330   int num_available_refs;
331   int64_t dist_refs[REF_FRAMES];
332   int dist_order_refs[REF_FRAMES];
333   int64_t mode_threshold[MAX_MODES];
334   int64_t best_intra_rd;
335   unsigned int best_pred_sse;
336   int64_t best_pred_diff[REFERENCE_MODES];
337   // Save a set of single_newmv for each checked ref_mv.
338   int_mv single_newmv[MAX_REF_MV_SEARCH][REF_FRAMES];
339   int single_newmv_rate[MAX_REF_MV_SEARCH][REF_FRAMES];
340   int single_newmv_valid[MAX_REF_MV_SEARCH][REF_FRAMES];
341   int64_t modelled_rd[MB_MODE_COUNT][MAX_REF_MV_SEARCH][REF_FRAMES];
342   // The rd of simple translation in single inter modes
343   int64_t simple_rd[MB_MODE_COUNT][MAX_REF_MV_SEARCH][REF_FRAMES];
344 
345   // Single search results by [directions][modes][reference frames]
346   SingleInterModeState single_state[2][SINGLE_INTER_MODE_NUM][FWD_REFS];
347   int single_state_cnt[2][SINGLE_INTER_MODE_NUM];
348   SingleInterModeState single_state_modelled[2][SINGLE_INTER_MODE_NUM]
349                                             [FWD_REFS];
350   int single_state_modelled_cnt[2][SINGLE_INTER_MODE_NUM];
351   MV_REFERENCE_FRAME single_rd_order[2][SINGLE_INTER_MODE_NUM][FWD_REFS];
352   IntraModeSearchState intra_search_state;
353 } InterModeSearchState;
354 
av1_inter_mode_data_init(TileDataEnc * tile_data)355 void av1_inter_mode_data_init(TileDataEnc *tile_data) {
356   for (int i = 0; i < BLOCK_SIZES_ALL; ++i) {
357     InterModeRdModel *md = &tile_data->inter_mode_rd_models[i];
358     md->ready = 0;
359     md->num = 0;
360     md->dist_sum = 0;
361     md->ld_sum = 0;
362     md->sse_sum = 0;
363     md->sse_sse_sum = 0;
364     md->sse_ld_sum = 0;
365   }
366 }
367 
get_est_rate_dist(const TileDataEnc * tile_data,BLOCK_SIZE bsize,int64_t sse,int * est_residue_cost,int64_t * est_dist)368 static int get_est_rate_dist(const TileDataEnc *tile_data, BLOCK_SIZE bsize,
369                              int64_t sse, int *est_residue_cost,
370                              int64_t *est_dist) {
371   aom_clear_system_state();
372   const InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
373   if (md->ready) {
374     if (sse < md->dist_mean) {
375       *est_residue_cost = 0;
376       *est_dist = sse;
377     } else {
378       *est_dist = (int64_t)round(md->dist_mean);
379       const double est_ld = md->a * sse + md->b;
380       // Clamp estimated rate cost by INT_MAX / 2.
381       // TODO(angiebird@google.com): find better solution than clamping.
382       if (fabs(est_ld) < 1e-2) {
383         *est_residue_cost = INT_MAX / 2;
384       } else {
385         double est_residue_cost_dbl = ((sse - md->dist_mean) / est_ld);
386         if (est_residue_cost_dbl < 0) {
387           *est_residue_cost = 0;
388         } else {
389           *est_residue_cost =
390               (int)AOMMIN((int64_t)round(est_residue_cost_dbl), INT_MAX / 2);
391         }
392       }
393       if (*est_residue_cost <= 0) {
394         *est_residue_cost = 0;
395         *est_dist = sse;
396       }
397     }
398     return 1;
399   }
400   return 0;
401 }
402 
av1_inter_mode_data_fit(TileDataEnc * tile_data,int rdmult)403 void av1_inter_mode_data_fit(TileDataEnc *tile_data, int rdmult) {
404   aom_clear_system_state();
405   for (int bsize = 0; bsize < BLOCK_SIZES_ALL; ++bsize) {
406     const int block_idx = inter_mode_data_block_idx(bsize);
407     InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
408     if (block_idx == -1) continue;
409     if ((md->ready == 0 && md->num < 200) || (md->ready == 1 && md->num < 64)) {
410       continue;
411     } else {
412       if (md->ready == 0) {
413         md->dist_mean = md->dist_sum / md->num;
414         md->ld_mean = md->ld_sum / md->num;
415         md->sse_mean = md->sse_sum / md->num;
416         md->sse_sse_mean = md->sse_sse_sum / md->num;
417         md->sse_ld_mean = md->sse_ld_sum / md->num;
418       } else {
419         const double factor = 3;
420         md->dist_mean =
421             (md->dist_mean * factor + (md->dist_sum / md->num)) / (factor + 1);
422         md->ld_mean =
423             (md->ld_mean * factor + (md->ld_sum / md->num)) / (factor + 1);
424         md->sse_mean =
425             (md->sse_mean * factor + (md->sse_sum / md->num)) / (factor + 1);
426         md->sse_sse_mean =
427             (md->sse_sse_mean * factor + (md->sse_sse_sum / md->num)) /
428             (factor + 1);
429         md->sse_ld_mean =
430             (md->sse_ld_mean * factor + (md->sse_ld_sum / md->num)) /
431             (factor + 1);
432       }
433 
434       const double my = md->ld_mean;
435       const double mx = md->sse_mean;
436       const double dx = sqrt(md->sse_sse_mean);
437       const double dxy = md->sse_ld_mean;
438 
439       md->a = (dxy - mx * my) / (dx * dx - mx * mx);
440       md->b = my - md->a * mx;
441       md->ready = 1;
442 
443       md->num = 0;
444       md->dist_sum = 0;
445       md->ld_sum = 0;
446       md->sse_sum = 0;
447       md->sse_sse_sum = 0;
448       md->sse_ld_sum = 0;
449     }
450     (void)rdmult;
451   }
452 }
453 
inter_mode_data_push(TileDataEnc * tile_data,BLOCK_SIZE bsize,int64_t sse,int64_t dist,int residue_cost)454 static AOM_INLINE void inter_mode_data_push(TileDataEnc *tile_data,
455                                             BLOCK_SIZE bsize, int64_t sse,
456                                             int64_t dist, int residue_cost) {
457   if (residue_cost == 0 || sse == dist) return;
458   const int block_idx = inter_mode_data_block_idx(bsize);
459   if (block_idx == -1) return;
460   InterModeRdModel *rd_model = &tile_data->inter_mode_rd_models[bsize];
461   if (rd_model->num < INTER_MODE_RD_DATA_OVERALL_SIZE) {
462     aom_clear_system_state();
463     const double ld = (sse - dist) * 1. / residue_cost;
464     ++rd_model->num;
465     rd_model->dist_sum += dist;
466     rd_model->ld_sum += ld;
467     rd_model->sse_sum += sse;
468     rd_model->sse_sse_sum += (double)sse * (double)sse;
469     rd_model->sse_ld_sum += sse * ld;
470   }
471 }
472 
inter_modes_info_push(InterModesInfo * inter_modes_info,int mode_rate,int64_t sse,int64_t rd,RD_STATS * rd_cost,RD_STATS * rd_cost_y,RD_STATS * rd_cost_uv,const MB_MODE_INFO * mbmi)473 static AOM_INLINE void inter_modes_info_push(InterModesInfo *inter_modes_info,
474                                              int mode_rate, int64_t sse,
475                                              int64_t rd, RD_STATS *rd_cost,
476                                              RD_STATS *rd_cost_y,
477                                              RD_STATS *rd_cost_uv,
478                                              const MB_MODE_INFO *mbmi) {
479   const int num = inter_modes_info->num;
480   assert(num < MAX_INTER_MODES);
481   inter_modes_info->mbmi_arr[num] = *mbmi;
482   inter_modes_info->mode_rate_arr[num] = mode_rate;
483   inter_modes_info->sse_arr[num] = sse;
484   inter_modes_info->est_rd_arr[num] = rd;
485   inter_modes_info->rd_cost_arr[num] = *rd_cost;
486   inter_modes_info->rd_cost_y_arr[num] = *rd_cost_y;
487   inter_modes_info->rd_cost_uv_arr[num] = *rd_cost_uv;
488   ++inter_modes_info->num;
489 }
490 
compare_rd_idx_pair(const void * a,const void * b)491 static int compare_rd_idx_pair(const void *a, const void *b) {
492   if (((RdIdxPair *)a)->rd == ((RdIdxPair *)b)->rd) {
493     return 0;
494   } else if (((const RdIdxPair *)a)->rd > ((const RdIdxPair *)b)->rd) {
495     return 1;
496   } else {
497     return -1;
498   }
499 }
500 
inter_modes_info_sort(const InterModesInfo * inter_modes_info,RdIdxPair * rd_idx_pair_arr)501 static AOM_INLINE void inter_modes_info_sort(
502     const InterModesInfo *inter_modes_info, RdIdxPair *rd_idx_pair_arr) {
503   if (inter_modes_info->num == 0) {
504     return;
505   }
506   for (int i = 0; i < inter_modes_info->num; ++i) {
507     rd_idx_pair_arr[i].idx = i;
508     rd_idx_pair_arr[i].rd = inter_modes_info->est_rd_arr[i];
509   }
510   qsort(rd_idx_pair_arr, inter_modes_info->num, sizeof(rd_idx_pair_arr[0]),
511         compare_rd_idx_pair);
512 }
513 
514 // Similar to get_horver_correlation, but also takes into account first
515 // row/column, when computing horizontal/vertical correlation.
av1_get_horver_correlation_full_c(const int16_t * diff,int stride,int width,int height,float * hcorr,float * vcorr)516 void av1_get_horver_correlation_full_c(const int16_t *diff, int stride,
517                                        int width, int height, float *hcorr,
518                                        float *vcorr) {
519   // The following notation is used:
520   // x - current pixel
521   // y - left neighbor pixel
522   // z - top neighbor pixel
523   int64_t x_sum = 0, x2_sum = 0, xy_sum = 0, xz_sum = 0;
524   int64_t x_firstrow = 0, x_finalrow = 0, x_firstcol = 0, x_finalcol = 0;
525   int64_t x2_firstrow = 0, x2_finalrow = 0, x2_firstcol = 0, x2_finalcol = 0;
526 
527   // First, process horizontal correlation on just the first row
528   x_sum += diff[0];
529   x2_sum += diff[0] * diff[0];
530   x_firstrow += diff[0];
531   x2_firstrow += diff[0] * diff[0];
532   for (int j = 1; j < width; ++j) {
533     const int16_t x = diff[j];
534     const int16_t y = diff[j - 1];
535     x_sum += x;
536     x_firstrow += x;
537     x2_sum += x * x;
538     x2_firstrow += x * x;
539     xy_sum += x * y;
540   }
541 
542   // Process vertical correlation in the first column
543   x_firstcol += diff[0];
544   x2_firstcol += diff[0] * diff[0];
545   for (int i = 1; i < height; ++i) {
546     const int16_t x = diff[i * stride];
547     const int16_t z = diff[(i - 1) * stride];
548     x_sum += x;
549     x_firstcol += x;
550     x2_sum += x * x;
551     x2_firstcol += x * x;
552     xz_sum += x * z;
553   }
554 
555   // Now process horiz and vert correlation through the rest unit
556   for (int i = 1; i < height; ++i) {
557     for (int j = 1; j < width; ++j) {
558       const int16_t x = diff[i * stride + j];
559       const int16_t y = diff[i * stride + j - 1];
560       const int16_t z = diff[(i - 1) * stride + j];
561       x_sum += x;
562       x2_sum += x * x;
563       xy_sum += x * y;
564       xz_sum += x * z;
565     }
566   }
567 
568   for (int j = 0; j < width; ++j) {
569     x_finalrow += diff[(height - 1) * stride + j];
570     x2_finalrow +=
571         diff[(height - 1) * stride + j] * diff[(height - 1) * stride + j];
572   }
573   for (int i = 0; i < height; ++i) {
574     x_finalcol += diff[i * stride + width - 1];
575     x2_finalcol += diff[i * stride + width - 1] * diff[i * stride + width - 1];
576   }
577 
578   int64_t xhor_sum = x_sum - x_finalcol;
579   int64_t xver_sum = x_sum - x_finalrow;
580   int64_t y_sum = x_sum - x_firstcol;
581   int64_t z_sum = x_sum - x_firstrow;
582   int64_t x2hor_sum = x2_sum - x2_finalcol;
583   int64_t x2ver_sum = x2_sum - x2_finalrow;
584   int64_t y2_sum = x2_sum - x2_firstcol;
585   int64_t z2_sum = x2_sum - x2_firstrow;
586 
587   const float num_hor = (float)(height * (width - 1));
588   const float num_ver = (float)((height - 1) * width);
589 
590   const float xhor_var_n = x2hor_sum - (xhor_sum * xhor_sum) / num_hor;
591   const float xver_var_n = x2ver_sum - (xver_sum * xver_sum) / num_ver;
592 
593   const float y_var_n = y2_sum - (y_sum * y_sum) / num_hor;
594   const float z_var_n = z2_sum - (z_sum * z_sum) / num_ver;
595 
596   const float xy_var_n = xy_sum - (xhor_sum * y_sum) / num_hor;
597   const float xz_var_n = xz_sum - (xver_sum * z_sum) / num_ver;
598 
599   if (xhor_var_n > 0 && y_var_n > 0) {
600     *hcorr = xy_var_n / sqrtf(xhor_var_n * y_var_n);
601     *hcorr = *hcorr < 0 ? 0 : *hcorr;
602   } else {
603     *hcorr = 1.0;
604   }
605   if (xver_var_n > 0 && z_var_n > 0) {
606     *vcorr = xz_var_n / sqrtf(xver_var_n * z_var_n);
607     *vcorr = *vcorr < 0 ? 0 : *vcorr;
608   } else {
609     *vcorr = 1.0;
610   }
611 }
612 
get_sse(const AV1_COMP * cpi,const MACROBLOCK * x,int64_t * sse_y)613 static int64_t get_sse(const AV1_COMP *cpi, const MACROBLOCK *x,
614                        int64_t *sse_y) {
615   const AV1_COMMON *cm = &cpi->common;
616   const int num_planes = av1_num_planes(cm);
617   const MACROBLOCKD *xd = &x->e_mbd;
618   const MB_MODE_INFO *mbmi = xd->mi[0];
619   int64_t total_sse = 0;
620   for (int plane = 0; plane < num_planes; ++plane) {
621     if (plane && !xd->is_chroma_ref) break;
622     const struct macroblock_plane *const p = &x->plane[plane];
623     const struct macroblockd_plane *const pd = &xd->plane[plane];
624     const BLOCK_SIZE bs = get_plane_block_size(mbmi->sb_type, pd->subsampling_x,
625                                                pd->subsampling_y);
626     unsigned int sse;
627 
628     cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
629                        &sse);
630     total_sse += sse;
631     if (!plane && sse_y) *sse_y = sse;
632   }
633   total_sse <<= 4;
634   return total_sse;
635 }
636 
av1_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz)637 int64_t av1_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
638                           intptr_t block_size, int64_t *ssz) {
639   int i;
640   int64_t error = 0, sqcoeff = 0;
641 
642   for (i = 0; i < block_size; i++) {
643     const int diff = coeff[i] - dqcoeff[i];
644     error += diff * diff;
645     sqcoeff += coeff[i] * coeff[i];
646   }
647 
648   *ssz = sqcoeff;
649   return error;
650 }
651 
av1_block_error_lp_c(const int16_t * coeff,const int16_t * dqcoeff,intptr_t block_size)652 int64_t av1_block_error_lp_c(const int16_t *coeff, const int16_t *dqcoeff,
653                              intptr_t block_size) {
654   int64_t error = 0;
655 
656   for (int i = 0; i < block_size; i++) {
657     const int diff = coeff[i] - dqcoeff[i];
658     error += diff * diff;
659   }
660 
661   return error;
662 }
663 
664 #if CONFIG_AV1_HIGHBITDEPTH
av1_highbd_block_error_c(const tran_low_t * coeff,const tran_low_t * dqcoeff,intptr_t block_size,int64_t * ssz,int bd)665 int64_t av1_highbd_block_error_c(const tran_low_t *coeff,
666                                  const tran_low_t *dqcoeff, intptr_t block_size,
667                                  int64_t *ssz, int bd) {
668   int i;
669   int64_t error = 0, sqcoeff = 0;
670   int shift = 2 * (bd - 8);
671   int rounding = shift > 0 ? 1 << (shift - 1) : 0;
672 
673   for (i = 0; i < block_size; i++) {
674     const int64_t diff = coeff[i] - dqcoeff[i];
675     error += diff * diff;
676     sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
677   }
678   assert(error >= 0 && sqcoeff >= 0);
679   error = (error + rounding) >> shift;
680   sqcoeff = (sqcoeff + rounding) >> shift;
681 
682   *ssz = sqcoeff;
683   return error;
684 }
685 #endif
686 
conditional_skipintra(PREDICTION_MODE mode,PREDICTION_MODE best_intra_mode)687 static int conditional_skipintra(PREDICTION_MODE mode,
688                                  PREDICTION_MODE best_intra_mode) {
689   if (mode == D113_PRED && best_intra_mode != V_PRED &&
690       best_intra_mode != D135_PRED)
691     return 1;
692   if (mode == D67_PRED && best_intra_mode != V_PRED &&
693       best_intra_mode != D45_PRED)
694     return 1;
695   if (mode == D203_PRED && best_intra_mode != H_PRED &&
696       best_intra_mode != D45_PRED)
697     return 1;
698   if (mode == D157_PRED && best_intra_mode != H_PRED &&
699       best_intra_mode != D135_PRED)
700     return 1;
701   return 0;
702 }
703 
cost_mv_ref(const MACROBLOCK * const x,PREDICTION_MODE mode,int16_t mode_context)704 static int cost_mv_ref(const MACROBLOCK *const x, PREDICTION_MODE mode,
705                        int16_t mode_context) {
706   if (is_inter_compound_mode(mode)) {
707     return x
708         ->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
709   }
710 
711   int mode_cost = 0;
712   int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
713 
714   assert(is_inter_mode(mode));
715 
716   if (mode == NEWMV) {
717     mode_cost = x->newmv_mode_cost[mode_ctx][0];
718     return mode_cost;
719   } else {
720     mode_cost = x->newmv_mode_cost[mode_ctx][1];
721     mode_ctx = (mode_context >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
722 
723     if (mode == GLOBALMV) {
724       mode_cost += x->zeromv_mode_cost[mode_ctx][0];
725       return mode_cost;
726     } else {
727       mode_cost += x->zeromv_mode_cost[mode_ctx][1];
728       mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
729       mode_cost += x->refmv_mode_cost[mode_ctx][mode != NEARESTMV];
730       return mode_cost;
731     }
732   }
733 }
734 
get_single_mode(PREDICTION_MODE this_mode,int ref_idx)735 static INLINE PREDICTION_MODE get_single_mode(PREDICTION_MODE this_mode,
736                                               int ref_idx) {
737   return ref_idx ? compound_ref1_mode(this_mode)
738                  : compound_ref0_mode(this_mode);
739 }
740 
estimate_ref_frame_costs(const AV1_COMMON * cm,const MACROBLOCKD * xd,const MACROBLOCK * x,int segment_id,unsigned int * ref_costs_single,unsigned int (* ref_costs_comp)[REF_FRAMES])741 static AOM_INLINE void estimate_ref_frame_costs(
742     const AV1_COMMON *cm, const MACROBLOCKD *xd, const MACROBLOCK *x,
743     int segment_id, unsigned int *ref_costs_single,
744     unsigned int (*ref_costs_comp)[REF_FRAMES]) {
745   int seg_ref_active =
746       segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
747   if (seg_ref_active) {
748     memset(ref_costs_single, 0, REF_FRAMES * sizeof(*ref_costs_single));
749     int ref_frame;
750     for (ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame)
751       memset(ref_costs_comp[ref_frame], 0,
752              REF_FRAMES * sizeof((*ref_costs_comp)[0]));
753   } else {
754     int intra_inter_ctx = av1_get_intra_inter_context(xd);
755     ref_costs_single[INTRA_FRAME] = x->intra_inter_cost[intra_inter_ctx][0];
756     unsigned int base_cost = x->intra_inter_cost[intra_inter_ctx][1];
757 
758     for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i)
759       ref_costs_single[i] = base_cost;
760 
761     const int ctx_p1 = av1_get_pred_context_single_ref_p1(xd);
762     const int ctx_p2 = av1_get_pred_context_single_ref_p2(xd);
763     const int ctx_p3 = av1_get_pred_context_single_ref_p3(xd);
764     const int ctx_p4 = av1_get_pred_context_single_ref_p4(xd);
765     const int ctx_p5 = av1_get_pred_context_single_ref_p5(xd);
766     const int ctx_p6 = av1_get_pred_context_single_ref_p6(xd);
767 
768     // Determine cost of a single ref frame, where frame types are represented
769     // by a tree:
770     // Level 0: add cost whether this ref is a forward or backward ref
771     ref_costs_single[LAST_FRAME] += x->single_ref_cost[ctx_p1][0][0];
772     ref_costs_single[LAST2_FRAME] += x->single_ref_cost[ctx_p1][0][0];
773     ref_costs_single[LAST3_FRAME] += x->single_ref_cost[ctx_p1][0][0];
774     ref_costs_single[GOLDEN_FRAME] += x->single_ref_cost[ctx_p1][0][0];
775     ref_costs_single[BWDREF_FRAME] += x->single_ref_cost[ctx_p1][0][1];
776     ref_costs_single[ALTREF2_FRAME] += x->single_ref_cost[ctx_p1][0][1];
777     ref_costs_single[ALTREF_FRAME] += x->single_ref_cost[ctx_p1][0][1];
778 
779     // Level 1: if this ref is forward ref,
780     // add cost whether it is last/last2 or last3/golden
781     ref_costs_single[LAST_FRAME] += x->single_ref_cost[ctx_p3][2][0];
782     ref_costs_single[LAST2_FRAME] += x->single_ref_cost[ctx_p3][2][0];
783     ref_costs_single[LAST3_FRAME] += x->single_ref_cost[ctx_p3][2][1];
784     ref_costs_single[GOLDEN_FRAME] += x->single_ref_cost[ctx_p3][2][1];
785 
786     // Level 1: if this ref is backward ref
787     // then add cost whether this ref is altref or backward ref
788     ref_costs_single[BWDREF_FRAME] += x->single_ref_cost[ctx_p2][1][0];
789     ref_costs_single[ALTREF2_FRAME] += x->single_ref_cost[ctx_p2][1][0];
790     ref_costs_single[ALTREF_FRAME] += x->single_ref_cost[ctx_p2][1][1];
791 
792     // Level 2: further add cost whether this ref is last or last2
793     ref_costs_single[LAST_FRAME] += x->single_ref_cost[ctx_p4][3][0];
794     ref_costs_single[LAST2_FRAME] += x->single_ref_cost[ctx_p4][3][1];
795 
796     // Level 2: last3 or golden
797     ref_costs_single[LAST3_FRAME] += x->single_ref_cost[ctx_p5][4][0];
798     ref_costs_single[GOLDEN_FRAME] += x->single_ref_cost[ctx_p5][4][1];
799 
800     // Level 2: bwdref or altref2
801     ref_costs_single[BWDREF_FRAME] += x->single_ref_cost[ctx_p6][5][0];
802     ref_costs_single[ALTREF2_FRAME] += x->single_ref_cost[ctx_p6][5][1];
803 
804     if (cm->current_frame.reference_mode != SINGLE_REFERENCE) {
805       // Similar to single ref, determine cost of compound ref frames.
806       // cost_compound_refs = cost_first_ref + cost_second_ref
807       const int bwdref_comp_ctx_p = av1_get_pred_context_comp_bwdref_p(xd);
808       const int bwdref_comp_ctx_p1 = av1_get_pred_context_comp_bwdref_p1(xd);
809       const int ref_comp_ctx_p = av1_get_pred_context_comp_ref_p(xd);
810       const int ref_comp_ctx_p1 = av1_get_pred_context_comp_ref_p1(xd);
811       const int ref_comp_ctx_p2 = av1_get_pred_context_comp_ref_p2(xd);
812 
813       const int comp_ref_type_ctx = av1_get_comp_reference_type_context(xd);
814       unsigned int ref_bicomp_costs[REF_FRAMES] = { 0 };
815 
816       ref_bicomp_costs[LAST_FRAME] = ref_bicomp_costs[LAST2_FRAME] =
817           ref_bicomp_costs[LAST3_FRAME] = ref_bicomp_costs[GOLDEN_FRAME] =
818               base_cost + x->comp_ref_type_cost[comp_ref_type_ctx][1];
819       ref_bicomp_costs[BWDREF_FRAME] = ref_bicomp_costs[ALTREF2_FRAME] = 0;
820       ref_bicomp_costs[ALTREF_FRAME] = 0;
821 
822       // cost of first ref frame
823       ref_bicomp_costs[LAST_FRAME] += x->comp_ref_cost[ref_comp_ctx_p][0][0];
824       ref_bicomp_costs[LAST2_FRAME] += x->comp_ref_cost[ref_comp_ctx_p][0][0];
825       ref_bicomp_costs[LAST3_FRAME] += x->comp_ref_cost[ref_comp_ctx_p][0][1];
826       ref_bicomp_costs[GOLDEN_FRAME] += x->comp_ref_cost[ref_comp_ctx_p][0][1];
827 
828       ref_bicomp_costs[LAST_FRAME] += x->comp_ref_cost[ref_comp_ctx_p1][1][0];
829       ref_bicomp_costs[LAST2_FRAME] += x->comp_ref_cost[ref_comp_ctx_p1][1][1];
830 
831       ref_bicomp_costs[LAST3_FRAME] += x->comp_ref_cost[ref_comp_ctx_p2][2][0];
832       ref_bicomp_costs[GOLDEN_FRAME] += x->comp_ref_cost[ref_comp_ctx_p2][2][1];
833 
834       // cost of second ref frame
835       ref_bicomp_costs[BWDREF_FRAME] +=
836           x->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
837       ref_bicomp_costs[ALTREF2_FRAME] +=
838           x->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
839       ref_bicomp_costs[ALTREF_FRAME] +=
840           x->comp_bwdref_cost[bwdref_comp_ctx_p][0][1];
841 
842       ref_bicomp_costs[BWDREF_FRAME] +=
843           x->comp_bwdref_cost[bwdref_comp_ctx_p1][1][0];
844       ref_bicomp_costs[ALTREF2_FRAME] +=
845           x->comp_bwdref_cost[bwdref_comp_ctx_p1][1][1];
846 
847       // cost: if one ref frame is forward ref, the other ref is backward ref
848       int ref0, ref1;
849       for (ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
850         for (ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1) {
851           ref_costs_comp[ref0][ref1] =
852               ref_bicomp_costs[ref0] + ref_bicomp_costs[ref1];
853         }
854       }
855 
856       // cost: if both ref frames are the same side.
857       const int uni_comp_ref_ctx_p = av1_get_pred_context_uni_comp_ref_p(xd);
858       const int uni_comp_ref_ctx_p1 = av1_get_pred_context_uni_comp_ref_p1(xd);
859       const int uni_comp_ref_ctx_p2 = av1_get_pred_context_uni_comp_ref_p2(xd);
860       ref_costs_comp[LAST_FRAME][LAST2_FRAME] =
861           base_cost + x->comp_ref_type_cost[comp_ref_type_ctx][0] +
862           x->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
863           x->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][0];
864       ref_costs_comp[LAST_FRAME][LAST3_FRAME] =
865           base_cost + x->comp_ref_type_cost[comp_ref_type_ctx][0] +
866           x->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
867           x->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
868           x->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][0];
869       ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] =
870           base_cost + x->comp_ref_type_cost[comp_ref_type_ctx][0] +
871           x->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
872           x->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
873           x->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][1];
874       ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] =
875           base_cost + x->comp_ref_type_cost[comp_ref_type_ctx][0] +
876           x->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][1];
877     } else {
878       int ref0, ref1;
879       for (ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
880         for (ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1)
881           ref_costs_comp[ref0][ref1] = 512;
882       }
883       ref_costs_comp[LAST_FRAME][LAST2_FRAME] = 512;
884       ref_costs_comp[LAST_FRAME][LAST3_FRAME] = 512;
885       ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] = 512;
886       ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] = 512;
887     }
888   }
889 }
890 
store_coding_context(MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,int mode_index,int64_t comp_pred_diff[REFERENCE_MODES],int skippable)891 static AOM_INLINE void store_coding_context(
892 #if CONFIG_INTERNAL_STATS
893     MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
894 #else
895     MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
896 #endif  // CONFIG_INTERNAL_STATS
897     int64_t comp_pred_diff[REFERENCE_MODES], int skippable) {
898   MACROBLOCKD *const xd = &x->e_mbd;
899 
900   // Take a snapshot of the coding context so it can be
901   // restored if we decide to encode this way
902   ctx->rd_stats.skip = x->force_skip;
903   ctx->skippable = skippable;
904 #if CONFIG_INTERNAL_STATS
905   ctx->best_mode_index = mode_index;
906 #endif  // CONFIG_INTERNAL_STATS
907   ctx->mic = *xd->mi[0];
908   av1_copy_mbmi_ext_to_mbmi_ext_frame(&ctx->mbmi_ext_best, x->mbmi_ext,
909                                       av1_ref_frame_type(xd->mi[0]->ref_frame));
910   ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
911   ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
912   ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
913 }
914 
setup_buffer_ref_mvs_inter(const AV1_COMP * const cpi,MACROBLOCK * x,MV_REFERENCE_FRAME ref_frame,BLOCK_SIZE block_size,struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE])915 static AOM_INLINE void setup_buffer_ref_mvs_inter(
916     const AV1_COMP *const cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
917     BLOCK_SIZE block_size, struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE]) {
918   const AV1_COMMON *cm = &cpi->common;
919   const int num_planes = av1_num_planes(cm);
920   const YV12_BUFFER_CONFIG *scaled_ref_frame =
921       av1_get_scaled_ref_frame(cpi, ref_frame);
922   MACROBLOCKD *const xd = &x->e_mbd;
923   MB_MODE_INFO *const mbmi = xd->mi[0];
924   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
925   const struct scale_factors *const sf =
926       get_ref_scale_factors_const(cm, ref_frame);
927   const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, ref_frame);
928   assert(yv12 != NULL);
929 
930   if (scaled_ref_frame) {
931     // Setup pred block based on scaled reference, because av1_mv_pred() doesn't
932     // support scaling.
933     av1_setup_pred_block(xd, yv12_mb[ref_frame], scaled_ref_frame, NULL, NULL,
934                          num_planes);
935   } else {
936     av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, sf, sf, num_planes);
937   }
938 
939   // Gets an initial list of candidate vectors from neighbours and orders them
940   av1_find_mv_refs(cm, xd, mbmi, ref_frame, mbmi_ext->ref_mv_count,
941                    xd->ref_mv_stack, xd->weight, NULL, mbmi_ext->global_mvs,
942                    mbmi_ext->mode_context);
943   // TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
944   // mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
945   av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame);
946   // Further refinement that is encode side only to test the top few candidates
947   // in full and choose the best as the center point for subsequent searches.
948   // The current implementation doesn't support scaling.
949   av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12_mb[ref_frame][0].stride,
950               ref_frame, block_size);
951 
952   // Go back to unscaled reference.
953   if (scaled_ref_frame) {
954     // We had temporarily setup pred block based on scaled reference above. Go
955     // back to unscaled reference now, for subsequent use.
956     av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, sf, sf, num_planes);
957   }
958 }
959 
960 #define LEFT_TOP_MARGIN ((AOM_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
961 #define RIGHT_BOTTOM_MARGIN ((AOM_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
962 
963 // TODO(jingning): this mv clamping function should be block size dependent.
clamp_mv2(MV * mv,const MACROBLOCKD * xd)964 static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
965   const SubpelMvLimits mv_limits = { xd->mb_to_left_edge - LEFT_TOP_MARGIN,
966                                      xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
967                                      xd->mb_to_top_edge - LEFT_TOP_MARGIN,
968                                      xd->mb_to_bottom_edge +
969                                          RIGHT_BOTTOM_MARGIN };
970   clamp_mv(mv, &mv_limits);
971 }
972 
973 /* If the current mode shares the same mv with other modes with higher cost,
974  * skip this mode. */
skip_repeated_mv(const AV1_COMMON * const cm,const MACROBLOCK * const x,PREDICTION_MODE this_mode,const MV_REFERENCE_FRAME ref_frames[2],InterModeSearchState * search_state)975 static int skip_repeated_mv(const AV1_COMMON *const cm,
976                             const MACROBLOCK *const x,
977                             PREDICTION_MODE this_mode,
978                             const MV_REFERENCE_FRAME ref_frames[2],
979                             InterModeSearchState *search_state) {
980   const int is_comp_pred = ref_frames[1] > INTRA_FRAME;
981   const uint8_t ref_frame_type = av1_ref_frame_type(ref_frames);
982   const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
983   const int ref_mv_count = mbmi_ext->ref_mv_count[ref_frame_type];
984   PREDICTION_MODE compare_mode = MB_MODE_COUNT;
985   if (!is_comp_pred) {
986     if (this_mode == NEARMV) {
987       if (ref_mv_count == 0) {
988         // NEARMV has the same motion vector as NEARESTMV
989         compare_mode = NEARESTMV;
990       }
991       if (ref_mv_count == 1 &&
992           cm->global_motion[ref_frames[0]].wmtype <= TRANSLATION) {
993         // NEARMV has the same motion vector as GLOBALMV
994         compare_mode = GLOBALMV;
995       }
996     }
997     if (this_mode == GLOBALMV) {
998       if (ref_mv_count == 0 &&
999           cm->global_motion[ref_frames[0]].wmtype <= TRANSLATION) {
1000         // GLOBALMV has the same motion vector as NEARESTMV
1001         compare_mode = NEARESTMV;
1002       }
1003       if (ref_mv_count == 1) {
1004         // GLOBALMV has the same motion vector as NEARMV
1005         compare_mode = NEARMV;
1006       }
1007     }
1008 
1009     if (compare_mode != MB_MODE_COUNT) {
1010       // Use modelled_rd to check whether compare mode was searched
1011       if (search_state->modelled_rd[compare_mode][0][ref_frames[0]] !=
1012           INT64_MAX) {
1013         const int16_t mode_ctx =
1014             av1_mode_context_analyzer(mbmi_ext->mode_context, ref_frames);
1015         const int compare_cost = cost_mv_ref(x, compare_mode, mode_ctx);
1016         const int this_cost = cost_mv_ref(x, this_mode, mode_ctx);
1017 
1018         // Only skip if the mode cost is larger than compare mode cost
1019         if (this_cost > compare_cost) {
1020           search_state->modelled_rd[this_mode][0][ref_frames[0]] =
1021               search_state->modelled_rd[compare_mode][0][ref_frames[0]];
1022           return 1;
1023         }
1024       }
1025     }
1026   }
1027   return 0;
1028 }
1029 
clamp_and_check_mv(int_mv * out_mv,int_mv in_mv,const AV1_COMMON * cm,const MACROBLOCK * x)1030 static INLINE int clamp_and_check_mv(int_mv *out_mv, int_mv in_mv,
1031                                      const AV1_COMMON *cm,
1032                                      const MACROBLOCK *x) {
1033   const MACROBLOCKD *const xd = &x->e_mbd;
1034   *out_mv = in_mv;
1035   lower_mv_precision(&out_mv->as_mv, cm->features.allow_high_precision_mv,
1036                      cm->features.cur_frame_force_integer_mv);
1037   clamp_mv2(&out_mv->as_mv, xd);
1038   return av1_is_fullmv_in_range(&x->mv_limits,
1039                                 get_fullmv_from_mv(&out_mv->as_mv));
1040 }
1041 
1042 // To use single newmv directly for compound modes, need to clamp the mv to the
1043 // valid mv range. Without this, encoder would generate out of range mv, and
1044 // this is seen in 8k encoding.
clamp_mv_in_range(MACROBLOCK * const x,int_mv * mv,int ref_idx)1045 static INLINE void clamp_mv_in_range(MACROBLOCK *const x, int_mv *mv,
1046                                      int ref_idx) {
1047   const int_mv ref_mv = av1_get_ref_mv(x, ref_idx);
1048   SubpelMvLimits mv_limits;
1049 
1050   av1_set_subpel_mv_search_range(&mv_limits, &x->mv_limits, &ref_mv.as_mv);
1051   clamp_mv(&mv->as_mv, &mv_limits);
1052 }
1053 
handle_newmv(const AV1_COMP * const cpi,MACROBLOCK * const x,const BLOCK_SIZE bsize,int_mv * cur_mv,int * const rate_mv,HandleInterModeArgs * const args,inter_mode_info * mode_info)1054 static int64_t handle_newmv(const AV1_COMP *const cpi, MACROBLOCK *const x,
1055                             const BLOCK_SIZE bsize, int_mv *cur_mv,
1056                             int *const rate_mv, HandleInterModeArgs *const args,
1057                             inter_mode_info *mode_info) {
1058   const MACROBLOCKD *const xd = &x->e_mbd;
1059   const MB_MODE_INFO *const mbmi = xd->mi[0];
1060   const int is_comp_pred = has_second_ref(mbmi);
1061   const PREDICTION_MODE this_mode = mbmi->mode;
1062   const int refs[2] = { mbmi->ref_frame[0],
1063                         mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
1064   const int ref_mv_idx = mbmi->ref_mv_idx;
1065 
1066   if (is_comp_pred) {
1067     const int valid_mv0 = args->single_newmv_valid[ref_mv_idx][refs[0]];
1068     const int valid_mv1 = args->single_newmv_valid[ref_mv_idx][refs[1]];
1069 
1070     if (this_mode == NEW_NEWMV) {
1071       if (valid_mv0) {
1072         cur_mv[0].as_int = args->single_newmv[ref_mv_idx][refs[0]].as_int;
1073         clamp_mv_in_range(x, &cur_mv[0], 0);
1074       }
1075       if (valid_mv1) {
1076         cur_mv[1].as_int = args->single_newmv[ref_mv_idx][refs[1]].as_int;
1077         clamp_mv_in_range(x, &cur_mv[1], 1);
1078       }
1079 
1080       // aomenc1
1081       if (cpi->sf.inter_sf.comp_inter_joint_search_thresh <= bsize ||
1082           !valid_mv0 || !valid_mv1) {
1083         av1_joint_motion_search(cpi, x, bsize, cur_mv, NULL, 0, rate_mv);
1084       } else {
1085         *rate_mv = 0;
1086         for (int i = 0; i < 2; ++i) {
1087           const int_mv ref_mv = av1_get_ref_mv(x, i);
1088           *rate_mv +=
1089               av1_mv_bit_cost(&cur_mv[i].as_mv, &ref_mv.as_mv, x->nmv_vec_cost,
1090                               x->mv_cost_stack, MV_COST_WEIGHT);
1091         }
1092       }
1093     } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
1094       if (valid_mv1) {
1095         cur_mv[1].as_int = args->single_newmv[ref_mv_idx][refs[1]].as_int;
1096         clamp_mv_in_range(x, &cur_mv[1], 1);
1097       }
1098 
1099       // aomenc2
1100       if (cpi->sf.inter_sf.comp_inter_joint_search_thresh <= bsize ||
1101           !valid_mv1) {
1102         av1_compound_single_motion_search_interinter(cpi, x, bsize, cur_mv,
1103                                                      NULL, 0, rate_mv, 1);
1104       } else {
1105         const int_mv ref_mv = av1_get_ref_mv(x, 1);
1106         *rate_mv =
1107             av1_mv_bit_cost(&cur_mv[1].as_mv, &ref_mv.as_mv, x->nmv_vec_cost,
1108                             x->mv_cost_stack, MV_COST_WEIGHT);
1109       }
1110     } else {
1111       assert(this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV);
1112       if (valid_mv0) {
1113         cur_mv[0].as_int = args->single_newmv[ref_mv_idx][refs[0]].as_int;
1114         clamp_mv_in_range(x, &cur_mv[0], 0);
1115       }
1116 
1117       // aomenc3
1118       if (cpi->sf.inter_sf.comp_inter_joint_search_thresh <= bsize ||
1119           !valid_mv0) {
1120         av1_compound_single_motion_search_interinter(cpi, x, bsize, cur_mv,
1121                                                      NULL, 0, rate_mv, 0);
1122       } else {
1123         const int_mv ref_mv = av1_get_ref_mv(x, 0);
1124         *rate_mv =
1125             av1_mv_bit_cost(&cur_mv[0].as_mv, &ref_mv.as_mv, x->nmv_vec_cost,
1126                             x->mv_cost_stack, MV_COST_WEIGHT);
1127       }
1128     }
1129   } else {
1130     // Single ref case.
1131     const int ref_idx = 0;
1132     int search_range = INT_MAX;
1133 
1134     if (cpi->sf.mv_sf.reduce_search_range && mbmi->ref_mv_idx > 0) {
1135       const MV ref_mv = av1_get_ref_mv(x, ref_idx).as_mv;
1136       int min_mv_diff = INT_MAX;
1137       int best_match = -1;
1138       MV prev_ref_mv[2] = { { 0 } };
1139       for (int idx = 0; idx < mbmi->ref_mv_idx; ++idx) {
1140         prev_ref_mv[idx] = av1_get_ref_mv_from_stack(ref_idx, mbmi->ref_frame,
1141                                                      idx, x->mbmi_ext)
1142                                .as_mv;
1143         const int ref_mv_diff = AOMMAX(abs(ref_mv.row - prev_ref_mv[idx].row),
1144                                        abs(ref_mv.col - prev_ref_mv[idx].col));
1145 
1146         if (min_mv_diff > ref_mv_diff) {
1147           min_mv_diff = ref_mv_diff;
1148           best_match = idx;
1149         }
1150       }
1151 
1152       if (min_mv_diff < (16 << 3)) {
1153         if (args->single_newmv_valid[best_match][refs[0]]) {
1154           search_range = min_mv_diff;
1155           search_range +=
1156               AOMMAX(abs(args->single_newmv[best_match][refs[0]].as_mv.row -
1157                          prev_ref_mv[best_match].row),
1158                      abs(args->single_newmv[best_match][refs[0]].as_mv.col -
1159                          prev_ref_mv[best_match].col));
1160           // Get full pixel search range.
1161           search_range = (search_range + 4) >> 3;
1162         }
1163       }
1164     }
1165 
1166     int_mv best_mv;
1167     av1_single_motion_search(cpi, x, bsize, ref_idx, rate_mv, search_range,
1168                              mode_info, &best_mv);
1169     if (best_mv.as_int == INVALID_MV) return INT64_MAX;
1170 
1171     args->single_newmv[ref_mv_idx][refs[0]] = best_mv;
1172     args->single_newmv_rate[ref_mv_idx][refs[0]] = *rate_mv;
1173     args->single_newmv_valid[ref_mv_idx][refs[0]] = 1;
1174     cur_mv[0].as_int = best_mv.as_int;
1175   }
1176 
1177   return 0;
1178 }
1179 
1180 // If number of valid neighbours is 1,
1181 // 1) ROTZOOM parameters can be obtained reliably (2 parameters from
1182 // one neighbouring MV)
1183 // 2) For IDENTITY/TRANSLATION cases, warp can perform better due to
1184 // a different interpolation filter being used. However the quality
1185 // gains (due to the same) may not be much
1186 // For above 2 cases warp evaluation is skipped
1187 
check_if_optimal_warp(const AV1_COMP * cpi,WarpedMotionParams * wm_params,int num_proj_ref)1188 static int check_if_optimal_warp(const AV1_COMP *cpi,
1189                                  WarpedMotionParams *wm_params,
1190                                  int num_proj_ref) {
1191   int is_valid_warp = 1;
1192   if (cpi->sf.inter_sf.prune_warp_using_wmtype) {
1193     TransformationType wmtype = get_wmtype(wm_params);
1194     if (num_proj_ref == 1) {
1195       if (wmtype != ROTZOOM) is_valid_warp = 0;
1196     } else {
1197       if (wmtype < ROTZOOM) is_valid_warp = 0;
1198     }
1199   }
1200   return is_valid_warp;
1201 }
1202 
update_mode_start_end_index(const AV1_COMP * const cpi,int * mode_index_start,int * mode_index_end,int last_motion_mode_allowed,int interintra_allowed,int eval_motion_mode)1203 static INLINE void update_mode_start_end_index(const AV1_COMP *const cpi,
1204                                                int *mode_index_start,
1205                                                int *mode_index_end,
1206                                                int last_motion_mode_allowed,
1207                                                int interintra_allowed,
1208                                                int eval_motion_mode) {
1209   *mode_index_start = (int)SIMPLE_TRANSLATION;
1210   *mode_index_end = (int)last_motion_mode_allowed + interintra_allowed;
1211   if (cpi->sf.winner_mode_sf.motion_mode_for_winner_cand) {
1212     if (!eval_motion_mode) {
1213       *mode_index_end = (int)SIMPLE_TRANSLATION;
1214     } else {
1215       // Set the start index appropriately to process motion modes other than
1216       // simple translation
1217       *mode_index_start = 1;
1218     }
1219   }
1220 }
1221 
1222 // TODO(afergs): Refactor the MBMI references in here - there's four
1223 // TODO(afergs): Refactor optional args - add them to a struct or remove
motion_mode_rd(const AV1_COMP * const cpi,TileDataEnc * tile_data,MACROBLOCK * const x,BLOCK_SIZE bsize,RD_STATS * rd_stats,RD_STATS * rd_stats_y,RD_STATS * rd_stats_uv,int * disable_skip,HandleInterModeArgs * const args,int64_t ref_best_rd,int64_t * ref_skip_rd,int * rate_mv,const BUFFER_SET * orig_dst,int64_t * best_est_rd,int do_tx_search,InterModesInfo * inter_modes_info,int eval_motion_mode)1224 static int64_t motion_mode_rd(
1225     const AV1_COMP *const cpi, TileDataEnc *tile_data, MACROBLOCK *const x,
1226     BLOCK_SIZE bsize, RD_STATS *rd_stats, RD_STATS *rd_stats_y,
1227     RD_STATS *rd_stats_uv, int *disable_skip, HandleInterModeArgs *const args,
1228     int64_t ref_best_rd, int64_t *ref_skip_rd, int *rate_mv,
1229     const BUFFER_SET *orig_dst, int64_t *best_est_rd, int do_tx_search,
1230     InterModesInfo *inter_modes_info, int eval_motion_mode) {
1231   const AV1_COMMON *const cm = &cpi->common;
1232   const FeatureFlags *const features = &cm->features;
1233   const int num_planes = av1_num_planes(cm);
1234   MACROBLOCKD *xd = &x->e_mbd;
1235   MB_MODE_INFO *mbmi = xd->mi[0];
1236   const int is_comp_pred = has_second_ref(mbmi);
1237   const PREDICTION_MODE this_mode = mbmi->mode;
1238   const int rate2_nocoeff = rd_stats->rate;
1239   int best_xskip = 0, best_disable_skip = 0;
1240   RD_STATS best_rd_stats, best_rd_stats_y, best_rd_stats_uv;
1241   uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
1242   uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
1243   const int rate_mv0 = *rate_mv;
1244   const int interintra_allowed = cm->seq_params.enable_interintra_compound &&
1245                                  is_interintra_allowed(mbmi) &&
1246                                  mbmi->compound_idx;
1247   int pts0[SAMPLES_ARRAY_SIZE], pts_inref0[SAMPLES_ARRAY_SIZE];
1248 
1249   assert(mbmi->ref_frame[1] != INTRA_FRAME);
1250   const MV_REFERENCE_FRAME ref_frame_1 = mbmi->ref_frame[1];
1251   (void)tile_data;
1252   av1_invalid_rd_stats(&best_rd_stats);
1253   aom_clear_system_state();
1254   mbmi->num_proj_ref = 1;  // assume num_proj_ref >=1
1255   MOTION_MODE last_motion_mode_allowed = SIMPLE_TRANSLATION;
1256   if (features->switchable_motion_mode) {
1257     last_motion_mode_allowed = motion_mode_allowed(
1258         xd->global_motion, xd, mbmi, features->allow_warped_motion);
1259   }
1260 
1261   if (last_motion_mode_allowed == WARPED_CAUSAL) {
1262     mbmi->num_proj_ref = av1_findSamples(cm, xd, pts0, pts_inref0);
1263   }
1264   const int total_samples = mbmi->num_proj_ref;
1265   if (total_samples == 0) {
1266     last_motion_mode_allowed = OBMC_CAUSAL;
1267   }
1268 
1269   const MB_MODE_INFO base_mbmi = *mbmi;
1270   MB_MODE_INFO best_mbmi;
1271   SimpleRDState *const simple_states = &args->simple_rd_state[mbmi->ref_mv_idx];
1272   const int interp_filter = features->interp_filter;
1273   const int switchable_rate =
1274       av1_is_interp_needed(xd) ? av1_get_switchable_rate(x, xd, interp_filter)
1275                                : 0;
1276   int64_t best_rd = INT64_MAX;
1277   int best_rate_mv = rate_mv0;
1278   const int mi_row = xd->mi_row;
1279   const int mi_col = xd->mi_col;
1280   int mode_index_start, mode_index_end;
1281   update_mode_start_end_index(cpi, &mode_index_start, &mode_index_end,
1282                               last_motion_mode_allowed, interintra_allowed,
1283                               eval_motion_mode);
1284   for (int mode_index = mode_index_start; mode_index <= mode_index_end;
1285        mode_index++) {
1286     if (args->skip_motion_mode && mode_index) continue;
1287     if (cpi->sf.inter_sf.prune_single_motion_modes_by_simple_trans &&
1288         args->single_ref_first_pass && mode_index)
1289       break;
1290     int tmp_rate2 = rate2_nocoeff;
1291     const int is_interintra_mode = mode_index > (int)last_motion_mode_allowed;
1292     int tmp_rate_mv = rate_mv0;
1293 
1294     *mbmi = base_mbmi;
1295     if (is_interintra_mode) {
1296       mbmi->motion_mode = SIMPLE_TRANSLATION;
1297     } else {
1298       mbmi->motion_mode = (MOTION_MODE)mode_index;
1299       assert(mbmi->ref_frame[1] != INTRA_FRAME);
1300     }
1301 
1302     const FRAME_UPDATE_TYPE update_type = get_frame_update_type(&cpi->gf_group);
1303     const int prune_obmc = cpi->frame_probs.obmc_probs[update_type][bsize] <
1304                            cpi->sf.inter_sf.prune_obmc_prob_thresh;
1305     if ((cpi->oxcf.enable_obmc == 0 || cpi->sf.inter_sf.disable_obmc ||
1306          cpi->sf.rt_sf.use_nonrd_pick_mode || prune_obmc) &&
1307         mbmi->motion_mode == OBMC_CAUSAL)
1308       continue;
1309 
1310     if (mbmi->motion_mode == SIMPLE_TRANSLATION && !is_interintra_mode) {
1311       // SIMPLE_TRANSLATION mode: no need to recalculate.
1312       // The prediction is calculated before motion_mode_rd() is called in
1313       // handle_inter_mode()
1314       if (cpi->sf.inter_sf.prune_single_motion_modes_by_simple_trans &&
1315           !is_comp_pred) {
1316         if (args->single_ref_first_pass == 0) {
1317           if (simple_states->early_skipped) {
1318             assert(simple_states->rd_stats.rdcost == INT64_MAX);
1319             return INT64_MAX;
1320           }
1321           if (simple_states->rd_stats.rdcost != INT64_MAX) {
1322             best_rd = simple_states->rd_stats.rdcost;
1323             best_rd_stats = simple_states->rd_stats;
1324             best_rd_stats_y = simple_states->rd_stats_y;
1325             best_rd_stats_uv = simple_states->rd_stats_uv;
1326             memcpy(best_blk_skip, simple_states->blk_skip,
1327                    sizeof(x->blk_skip[0]) * xd->height * xd->width);
1328             av1_copy_array(best_tx_type_map, simple_states->tx_type_map,
1329                            xd->height * xd->width);
1330             best_xskip = simple_states->skip;
1331             best_disable_skip = simple_states->disable_skip;
1332             best_mbmi = *mbmi;
1333           }
1334           continue;
1335         }
1336         simple_states->early_skipped = 0;
1337       }
1338     } else if (mbmi->motion_mode == OBMC_CAUSAL) {
1339       const uint32_t cur_mv = mbmi->mv[0].as_int;
1340       assert(!is_comp_pred);
1341       if (have_newmv_in_inter_mode(this_mode)) {
1342         av1_single_motion_search(cpi, x, bsize, 0, &tmp_rate_mv, INT_MAX, NULL,
1343                                  &mbmi->mv[0]);
1344         tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
1345       }
1346       if ((mbmi->mv[0].as_int != cur_mv) || eval_motion_mode) {
1347         av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize,
1348                                       0, av1_num_planes(cm) - 1);
1349       }
1350       av1_build_obmc_inter_prediction(
1351           cm, xd, args->above_pred_buf, args->above_pred_stride,
1352           args->left_pred_buf, args->left_pred_stride);
1353     } else if (mbmi->motion_mode == WARPED_CAUSAL) {
1354       int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE];
1355       mbmi->motion_mode = WARPED_CAUSAL;
1356       mbmi->wm_params.wmtype = DEFAULT_WMTYPE;
1357       mbmi->interp_filters =
1358           av1_broadcast_interp_filter(av1_unswitchable_filter(interp_filter));
1359 
1360       memcpy(pts, pts0, total_samples * 2 * sizeof(*pts0));
1361       memcpy(pts_inref, pts_inref0, total_samples * 2 * sizeof(*pts_inref0));
1362       // Select the samples according to motion vector difference
1363       if (mbmi->num_proj_ref > 1) {
1364         mbmi->num_proj_ref = av1_selectSamples(
1365             &mbmi->mv[0].as_mv, pts, pts_inref, mbmi->num_proj_ref, bsize);
1366       }
1367 
1368       if (!av1_find_projection(mbmi->num_proj_ref, pts, pts_inref, bsize,
1369                                mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col,
1370                                &mbmi->wm_params, mi_row, mi_col)) {
1371         // Refine MV for NEWMV mode
1372         assert(!is_comp_pred);
1373         if (have_newmv_in_inter_mode(this_mode)) {
1374           const int_mv mv0 = mbmi->mv[0];
1375           const WarpedMotionParams wm_params0 = mbmi->wm_params;
1376           const int num_proj_ref0 = mbmi->num_proj_ref;
1377 
1378           if (cpi->sf.inter_sf.prune_warp_using_wmtype) {
1379             TransformationType wmtype = get_wmtype(&mbmi->wm_params);
1380             if (wmtype < ROTZOOM) continue;
1381           }
1382 
1383           const int_mv ref_mv = av1_get_ref_mv(x, 0);
1384           SUBPEL_MOTION_SEARCH_PARAMS ms_params;
1385           av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize,
1386                                             &ref_mv.as_mv, NULL);
1387 
1388           // Refine MV in a small range.
1389           av1_refine_warped_mv(xd, cm, &ms_params, bsize, pts0, pts_inref0,
1390                                total_samples);
1391 
1392           // Keep the refined MV and WM parameters.
1393           if (mv0.as_int != mbmi->mv[0].as_int) {
1394             tmp_rate_mv = av1_mv_bit_cost(&mbmi->mv[0].as_mv, &ref_mv.as_mv,
1395                                           x->nmv_vec_cost, x->mv_cost_stack,
1396                                           MV_COST_WEIGHT);
1397             if (cpi->sf.mv_sf.adaptive_motion_search) {
1398               x->pred_mv[mbmi->ref_frame[0]] = mbmi->mv[0].as_mv;
1399             }
1400             tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
1401           } else {
1402             // Restore the old MV and WM parameters.
1403             mbmi->mv[0] = mv0;
1404             mbmi->wm_params = wm_params0;
1405             mbmi->num_proj_ref = num_proj_ref0;
1406           }
1407         } else {
1408           if (!check_if_optimal_warp(cpi, &mbmi->wm_params, mbmi->num_proj_ref))
1409             continue;
1410         }
1411 
1412         av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0,
1413                                       av1_num_planes(cm) - 1);
1414       } else {
1415         continue;
1416       }
1417     } else if (is_interintra_mode) {
1418       const int ret =
1419           av1_handle_inter_intra_mode(cpi, x, bsize, mbmi, args, ref_best_rd,
1420                                       &tmp_rate_mv, &tmp_rate2, orig_dst);
1421       if (ret < 0) continue;
1422     }
1423 
1424     // If we are searching newmv and the mv is the same as refmv, skip the
1425     // current mode
1426     if (this_mode == NEW_NEWMV) {
1427       const int_mv ref_mv_0 = av1_get_ref_mv(x, 0);
1428       const int_mv ref_mv_1 = av1_get_ref_mv(x, 1);
1429       if (mbmi->mv[0].as_int == ref_mv_0.as_int ||
1430           mbmi->mv[1].as_int == ref_mv_1.as_int) {
1431         continue;
1432       }
1433     } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
1434       const int_mv ref_mv_1 = av1_get_ref_mv(x, 1);
1435       if (mbmi->mv[1].as_int == ref_mv_1.as_int) {
1436         continue;
1437       }
1438     } else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
1439       const int_mv ref_mv_0 = av1_get_ref_mv(x, 0);
1440       if (mbmi->mv[0].as_int == ref_mv_0.as_int) {
1441         continue;
1442       }
1443     } else if (this_mode == NEWMV) {
1444       const int_mv ref_mv_0 = av1_get_ref_mv(x, 0);
1445       if (mbmi->mv[0].as_int == ref_mv_0.as_int) {
1446         continue;
1447       }
1448     }
1449 
1450     x->force_skip = 0;
1451     rd_stats->dist = 0;
1452     rd_stats->sse = 0;
1453     rd_stats->skip = 1;
1454     rd_stats->rate = tmp_rate2;
1455     if (mbmi->motion_mode != WARPED_CAUSAL) rd_stats->rate += switchable_rate;
1456     if (interintra_allowed) {
1457       rd_stats->rate += x->interintra_cost[size_group_lookup[bsize]]
1458                                           [mbmi->ref_frame[1] == INTRA_FRAME];
1459     }
1460     if ((last_motion_mode_allowed > SIMPLE_TRANSLATION) &&
1461         (mbmi->ref_frame[1] != INTRA_FRAME)) {
1462       if (last_motion_mode_allowed == WARPED_CAUSAL) {
1463         rd_stats->rate += x->motion_mode_cost[bsize][mbmi->motion_mode];
1464       } else {
1465         rd_stats->rate += x->motion_mode_cost1[bsize][mbmi->motion_mode];
1466       }
1467     }
1468 
1469     if (!do_tx_search) {
1470       int64_t curr_sse = -1;
1471       int64_t sse_y = -1;
1472       int est_residue_cost = 0;
1473       int64_t est_dist = 0;
1474       int64_t est_rd = 0;
1475       if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1) {
1476         curr_sse = get_sse(cpi, x, &sse_y);
1477         // Scale luma SSE as per bit depth so as to be consistent with
1478         // model_rd_sb_fn and compound type rd
1479         sse_y = ROUND_POWER_OF_TWO(sse_y, (xd->bd - 8) * 2);
1480         const int has_est_rd = get_est_rate_dist(tile_data, bsize, curr_sse,
1481                                                  &est_residue_cost, &est_dist);
1482         (void)has_est_rd;
1483         assert(has_est_rd);
1484       } else if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 2 ||
1485                  cpi->sf.rt_sf.use_nonrd_pick_mode) {
1486         model_rd_sb_fn[MODELRD_TYPE_MOTION_MODE_RD](
1487             cpi, bsize, x, xd, 0, num_planes - 1, &est_residue_cost, &est_dist,
1488             NULL, &curr_sse, NULL, NULL, NULL);
1489         sse_y = x->pred_sse[xd->mi[0]->ref_frame[0]];
1490       }
1491       est_rd = RDCOST(x->rdmult, rd_stats->rate + est_residue_cost, est_dist);
1492       if (est_rd * 0.80 > *best_est_rd) {
1493         mbmi->ref_frame[1] = ref_frame_1;
1494         continue;
1495       }
1496       const int mode_rate = rd_stats->rate;
1497       rd_stats->rate += est_residue_cost;
1498       rd_stats->dist = est_dist;
1499       rd_stats->rdcost = est_rd;
1500       if (rd_stats->rdcost < *best_est_rd) {
1501         *best_est_rd = rd_stats->rdcost;
1502         assert(sse_y >= 0);
1503         ref_skip_rd[1] = cpi->sf.inter_sf.txfm_rd_gate_level
1504                              ? RDCOST(x->rdmult, mode_rate, (sse_y << 4))
1505                              : INT64_MAX;
1506       }
1507       if (cm->current_frame.reference_mode == SINGLE_REFERENCE) {
1508         if (!is_comp_pred) {
1509           assert(curr_sse >= 0);
1510           inter_modes_info_push(inter_modes_info, mode_rate, curr_sse,
1511                                 rd_stats->rdcost, rd_stats, rd_stats_y,
1512                                 rd_stats_uv, mbmi);
1513         }
1514       } else {
1515         assert(curr_sse >= 0);
1516         inter_modes_info_push(inter_modes_info, mode_rate, curr_sse,
1517                               rd_stats->rdcost, rd_stats, rd_stats_y,
1518                               rd_stats_uv, mbmi);
1519       }
1520       mbmi->skip = 0;
1521     } else {
1522       int64_t skip_rd = INT64_MAX;
1523       int64_t skip_rdy = INT64_MAX;
1524       if (cpi->sf.inter_sf.txfm_rd_gate_level) {
1525         // Check if the mode is good enough based on skip RD
1526         int64_t sse_y = INT64_MAX;
1527         int64_t curr_sse = get_sse(cpi, x, &sse_y);
1528         // Scale luma SSE as per bit depth so as to be consistent with
1529         // model_rd_sb_fn and compound type rd
1530         sse_y = ROUND_POWER_OF_TWO(sse_y, (xd->bd - 8) * 2);
1531         skip_rd = RDCOST(x->rdmult, rd_stats->rate, curr_sse);
1532         skip_rdy = RDCOST(x->rdmult, rd_stats->rate, (sse_y << 4));
1533         int eval_txfm = check_txfm_eval(x, bsize, ref_skip_rd[0], skip_rd,
1534                                         cpi->sf.inter_sf.txfm_rd_gate_level, 0);
1535         if (!eval_txfm) continue;
1536       }
1537 
1538       if (!av1_txfm_search(cpi, x, bsize, rd_stats, rd_stats_y, rd_stats_uv,
1539                            rd_stats->rate, ref_best_rd)) {
1540         if (rd_stats_y->rate == INT_MAX && mode_index == 0) {
1541           if (cpi->sf.inter_sf.prune_single_motion_modes_by_simple_trans &&
1542               !is_comp_pred) {
1543             simple_states->early_skipped = 1;
1544           }
1545           return INT64_MAX;
1546         }
1547         continue;
1548       }
1549 
1550       const int64_t curr_rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
1551       if (curr_rd < ref_best_rd) {
1552         ref_best_rd = curr_rd;
1553         ref_skip_rd[0] = skip_rd;
1554         ref_skip_rd[1] = skip_rdy;
1555       }
1556       *disable_skip = 0;
1557       if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1) {
1558         const int skip_ctx = av1_get_skip_context(xd);
1559         inter_mode_data_push(tile_data, mbmi->sb_type, rd_stats->sse,
1560                              rd_stats->dist,
1561                              rd_stats_y->rate + rd_stats_uv->rate +
1562                                  x->skip_cost[skip_ctx][mbmi->skip]);
1563       }
1564     }
1565 
1566     if (this_mode == GLOBALMV || this_mode == GLOBAL_GLOBALMV) {
1567       if (is_nontrans_global_motion(xd, xd->mi[0])) {
1568         mbmi->interp_filters =
1569             av1_broadcast_interp_filter(av1_unswitchable_filter(interp_filter));
1570       }
1571     }
1572 
1573     const int64_t tmp_rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
1574     if (mode_index == 0) {
1575       args->simple_rd[this_mode][mbmi->ref_mv_idx][mbmi->ref_frame[0]] = tmp_rd;
1576       if (!is_comp_pred) {
1577         simple_states->rd_stats = *rd_stats;
1578         simple_states->rd_stats.rdcost = tmp_rd;
1579         simple_states->rd_stats_y = *rd_stats_y;
1580         simple_states->rd_stats_uv = *rd_stats_uv;
1581         memcpy(simple_states->blk_skip, x->blk_skip,
1582                sizeof(x->blk_skip[0]) * xd->height * xd->width);
1583         av1_copy_array(simple_states->tx_type_map, xd->tx_type_map,
1584                        xd->height * xd->width);
1585         simple_states->skip = mbmi->skip;
1586         simple_states->disable_skip = *disable_skip;
1587       }
1588     }
1589     if (mode_index == 0 || tmp_rd < best_rd) {
1590       best_mbmi = *mbmi;
1591       best_rd = tmp_rd;
1592       best_rd_stats = *rd_stats;
1593       best_rd_stats_y = *rd_stats_y;
1594       best_rate_mv = tmp_rate_mv;
1595       if (num_planes > 1) best_rd_stats_uv = *rd_stats_uv;
1596       memcpy(best_blk_skip, x->blk_skip,
1597              sizeof(x->blk_skip[0]) * xd->height * xd->width);
1598       av1_copy_array(best_tx_type_map, xd->tx_type_map, xd->height * xd->width);
1599       best_xskip = mbmi->skip;
1600       best_disable_skip = *disable_skip;
1601       // TODO(anyone): evaluate the quality and speed trade-off of the early
1602       // termination logic below.
1603       // if (best_xskip) break;
1604     }
1605   }
1606   mbmi->ref_frame[1] = ref_frame_1;
1607   *rate_mv = best_rate_mv;
1608   if (best_rd == INT64_MAX) {
1609     av1_invalid_rd_stats(rd_stats);
1610     restore_dst_buf(xd, *orig_dst, num_planes);
1611     return INT64_MAX;
1612   }
1613   *mbmi = best_mbmi;
1614   *rd_stats = best_rd_stats;
1615   *rd_stats_y = best_rd_stats_y;
1616   if (num_planes > 1) *rd_stats_uv = best_rd_stats_uv;
1617   memcpy(x->blk_skip, best_blk_skip,
1618          sizeof(x->blk_skip[0]) * xd->height * xd->width);
1619   av1_copy_array(xd->tx_type_map, best_tx_type_map, xd->height * xd->width);
1620   x->force_skip = best_xskip;
1621   *disable_skip = best_disable_skip;
1622 
1623   restore_dst_buf(xd, *orig_dst, num_planes);
1624   return 0;
1625 }
1626 
skip_mode_rd(RD_STATS * rd_stats,const AV1_COMP * const cpi,MACROBLOCK * const x,BLOCK_SIZE bsize,const BUFFER_SET * const orig_dst)1627 static int64_t skip_mode_rd(RD_STATS *rd_stats, const AV1_COMP *const cpi,
1628                             MACROBLOCK *const x, BLOCK_SIZE bsize,
1629                             const BUFFER_SET *const orig_dst) {
1630   assert(bsize < BLOCK_SIZES_ALL);
1631   const AV1_COMMON *cm = &cpi->common;
1632   const int num_planes = av1_num_planes(cm);
1633   MACROBLOCKD *const xd = &x->e_mbd;
1634   const int mi_row = xd->mi_row;
1635   const int mi_col = xd->mi_col;
1636   av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize, 0,
1637                                 av1_num_planes(cm) - 1);
1638 
1639   int64_t total_sse = 0;
1640   for (int plane = 0; plane < num_planes; ++plane) {
1641     const struct macroblock_plane *const p = &x->plane[plane];
1642     const struct macroblockd_plane *const pd = &xd->plane[plane];
1643     const BLOCK_SIZE plane_bsize =
1644         get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y);
1645     const int bw = block_size_wide[plane_bsize];
1646     const int bh = block_size_high[plane_bsize];
1647 
1648     av1_subtract_plane(x, plane_bsize, plane);
1649     int64_t sse = aom_sum_squares_2d_i16(p->src_diff, bw, bw, bh) << 4;
1650     total_sse += sse;
1651   }
1652   const int skip_mode_ctx = av1_get_skip_mode_context(xd);
1653   rd_stats->dist = rd_stats->sse = total_sse;
1654   rd_stats->rate = x->skip_mode_cost[skip_mode_ctx][1];
1655   rd_stats->rdcost = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
1656 
1657   restore_dst_buf(xd, *orig_dst, num_planes);
1658   return 0;
1659 }
1660 
1661 // Check NEARESTMV, NEARMV, GLOBALMV ref mvs for duplicate and skip the relevant
1662 // mode
check_repeat_ref_mv(const MB_MODE_INFO_EXT * mbmi_ext,int ref_idx,const MV_REFERENCE_FRAME * ref_frame,PREDICTION_MODE single_mode)1663 static INLINE int check_repeat_ref_mv(const MB_MODE_INFO_EXT *mbmi_ext,
1664                                       int ref_idx,
1665                                       const MV_REFERENCE_FRAME *ref_frame,
1666                                       PREDICTION_MODE single_mode) {
1667   const uint8_t ref_frame_type = av1_ref_frame_type(ref_frame);
1668   const int ref_mv_count = mbmi_ext->ref_mv_count[ref_frame_type];
1669   assert(single_mode != NEWMV);
1670   if (single_mode == NEARESTMV) {
1671     return 0;
1672   } else if (single_mode == NEARMV) {
1673     // when ref_mv_count = 0, NEARESTMV and NEARMV are same as GLOBALMV
1674     // when ref_mv_count = 1, NEARMV is same as GLOBALMV
1675     if (ref_mv_count < 2) return 1;
1676   } else if (single_mode == GLOBALMV) {
1677     // when ref_mv_count == 0, GLOBALMV is same as NEARESTMV
1678     if (ref_mv_count == 0) return 1;
1679     // when ref_mv_count == 1, NEARMV is same as GLOBALMV
1680     else if (ref_mv_count == 1)
1681       return 0;
1682 
1683     int stack_size = AOMMIN(USABLE_REF_MV_STACK_SIZE, ref_mv_count);
1684     // Check GLOBALMV is matching with any mv in ref_mv_stack
1685     for (int ref_mv_idx = 0; ref_mv_idx < stack_size; ref_mv_idx++) {
1686       int_mv this_mv;
1687 
1688       if (ref_idx == 0)
1689         this_mv = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
1690       else
1691         this_mv = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
1692 
1693       if (this_mv.as_int == mbmi_ext->global_mvs[ref_frame[ref_idx]].as_int)
1694         return 1;
1695     }
1696   }
1697   return 0;
1698 }
1699 
get_this_mv(int_mv * this_mv,PREDICTION_MODE this_mode,int ref_idx,int ref_mv_idx,int skip_repeated_ref_mv,const MV_REFERENCE_FRAME * ref_frame,const MB_MODE_INFO_EXT * mbmi_ext)1700 static INLINE int get_this_mv(int_mv *this_mv, PREDICTION_MODE this_mode,
1701                               int ref_idx, int ref_mv_idx,
1702                               int skip_repeated_ref_mv,
1703                               const MV_REFERENCE_FRAME *ref_frame,
1704                               const MB_MODE_INFO_EXT *mbmi_ext) {
1705   const PREDICTION_MODE single_mode = get_single_mode(this_mode, ref_idx);
1706   assert(is_inter_singleref_mode(single_mode));
1707   if (single_mode == NEWMV) {
1708     this_mv->as_int = INVALID_MV;
1709   } else if (single_mode == GLOBALMV) {
1710     if (skip_repeated_ref_mv &&
1711         check_repeat_ref_mv(mbmi_ext, ref_idx, ref_frame, single_mode))
1712       return 0;
1713     *this_mv = mbmi_ext->global_mvs[ref_frame[ref_idx]];
1714   } else {
1715     assert(single_mode == NEARMV || single_mode == NEARESTMV);
1716     const uint8_t ref_frame_type = av1_ref_frame_type(ref_frame);
1717     const int ref_mv_offset = single_mode == NEARESTMV ? 0 : ref_mv_idx + 1;
1718     if (ref_mv_offset < mbmi_ext->ref_mv_count[ref_frame_type]) {
1719       assert(ref_mv_offset >= 0);
1720       if (ref_idx == 0) {
1721         *this_mv =
1722             mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_offset].this_mv;
1723       } else {
1724         *this_mv =
1725             mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_offset].comp_mv;
1726       }
1727     } else {
1728       if (skip_repeated_ref_mv &&
1729           check_repeat_ref_mv(mbmi_ext, ref_idx, ref_frame, single_mode))
1730         return 0;
1731       *this_mv = mbmi_ext->global_mvs[ref_frame[ref_idx]];
1732     }
1733   }
1734   return 1;
1735 }
1736 
1737 // This function update the non-new mv for the current prediction mode
build_cur_mv(int_mv * cur_mv,PREDICTION_MODE this_mode,const AV1_COMMON * cm,const MACROBLOCK * x,int skip_repeated_ref_mv)1738 static INLINE int build_cur_mv(int_mv *cur_mv, PREDICTION_MODE this_mode,
1739                                const AV1_COMMON *cm, const MACROBLOCK *x,
1740                                int skip_repeated_ref_mv) {
1741   const MACROBLOCKD *xd = &x->e_mbd;
1742   const MB_MODE_INFO *mbmi = xd->mi[0];
1743   const int is_comp_pred = has_second_ref(mbmi);
1744 
1745   int ret = 1;
1746   for (int i = 0; i < is_comp_pred + 1; ++i) {
1747     int_mv this_mv;
1748     this_mv.as_int = INVALID_MV;
1749     ret = get_this_mv(&this_mv, this_mode, i, mbmi->ref_mv_idx,
1750                       skip_repeated_ref_mv, mbmi->ref_frame, x->mbmi_ext);
1751     if (!ret) return 0;
1752     const PREDICTION_MODE single_mode = get_single_mode(this_mode, i);
1753     if (single_mode == NEWMV) {
1754       const uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
1755       cur_mv[i] =
1756           (i == 0) ? x->mbmi_ext->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx]
1757                          .this_mv
1758                    : x->mbmi_ext->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx]
1759                          .comp_mv;
1760     } else {
1761       ret &= clamp_and_check_mv(cur_mv + i, this_mv, cm, x);
1762     }
1763   }
1764   return ret;
1765 }
1766 
get_drl_cost(const MB_MODE_INFO * mbmi,const MB_MODE_INFO_EXT * mbmi_ext,const int (* const drl_mode_cost0)[2],int8_t ref_frame_type)1767 static INLINE int get_drl_cost(const MB_MODE_INFO *mbmi,
1768                                const MB_MODE_INFO_EXT *mbmi_ext,
1769                                const int (*const drl_mode_cost0)[2],
1770                                int8_t ref_frame_type) {
1771   int cost = 0;
1772   if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) {
1773     for (int idx = 0; idx < 2; ++idx) {
1774       if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
1775         uint8_t drl_ctx = av1_drl_ctx(mbmi_ext->weight[ref_frame_type], idx);
1776         cost += drl_mode_cost0[drl_ctx][mbmi->ref_mv_idx != idx];
1777         if (mbmi->ref_mv_idx == idx) return cost;
1778       }
1779     }
1780     return cost;
1781   }
1782 
1783   if (have_nearmv_in_inter_mode(mbmi->mode)) {
1784     for (int idx = 1; idx < 3; ++idx) {
1785       if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
1786         uint8_t drl_ctx = av1_drl_ctx(mbmi_ext->weight[ref_frame_type], idx);
1787         cost += drl_mode_cost0[drl_ctx][mbmi->ref_mv_idx != (idx - 1)];
1788         if (mbmi->ref_mv_idx == (idx - 1)) return cost;
1789       }
1790     }
1791     return cost;
1792   }
1793   return cost;
1794 }
1795 
is_single_newmv_valid(const HandleInterModeArgs * const args,const MB_MODE_INFO * const mbmi,PREDICTION_MODE this_mode)1796 static INLINE int is_single_newmv_valid(const HandleInterModeArgs *const args,
1797                                         const MB_MODE_INFO *const mbmi,
1798                                         PREDICTION_MODE this_mode) {
1799   for (int ref_idx = 0; ref_idx < 2; ++ref_idx) {
1800     const PREDICTION_MODE single_mode = get_single_mode(this_mode, ref_idx);
1801     const MV_REFERENCE_FRAME ref = mbmi->ref_frame[ref_idx];
1802     if (single_mode == NEWMV &&
1803         args->single_newmv_valid[mbmi->ref_mv_idx][ref] == 0) {
1804       return 0;
1805     }
1806   }
1807   return 1;
1808 }
1809 
get_drl_refmv_count(const MACROBLOCK * const x,const MV_REFERENCE_FRAME * ref_frame,PREDICTION_MODE mode)1810 static int get_drl_refmv_count(const MACROBLOCK *const x,
1811                                const MV_REFERENCE_FRAME *ref_frame,
1812                                PREDICTION_MODE mode) {
1813   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1814   const int8_t ref_frame_type = av1_ref_frame_type(ref_frame);
1815   const int has_nearmv = have_nearmv_in_inter_mode(mode) ? 1 : 0;
1816   const int ref_mv_count = mbmi_ext->ref_mv_count[ref_frame_type];
1817   const int only_newmv = (mode == NEWMV || mode == NEW_NEWMV);
1818   const int has_drl =
1819       (has_nearmv && ref_mv_count > 2) || (only_newmv && ref_mv_count > 1);
1820   const int ref_set =
1821       has_drl ? AOMMIN(MAX_REF_MV_SEARCH, ref_mv_count - has_nearmv) : 1;
1822 
1823   return ref_set;
1824 }
1825 
1826 // Whether this reference motion vector can be skipped, based on initial
1827 // heuristics.
ref_mv_idx_early_breakout(const AV1_COMP * const cpi,MACROBLOCK * x,const HandleInterModeArgs * const args,int64_t ref_best_rd,int ref_mv_idx)1828 static bool ref_mv_idx_early_breakout(const AV1_COMP *const cpi, MACROBLOCK *x,
1829                                       const HandleInterModeArgs *const args,
1830                                       int64_t ref_best_rd, int ref_mv_idx) {
1831   const SPEED_FEATURES *const sf = &cpi->sf;
1832   MACROBLOCKD *xd = &x->e_mbd;
1833   MB_MODE_INFO *mbmi = xd->mi[0];
1834   const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1835   const int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
1836   const int is_comp_pred = has_second_ref(mbmi);
1837   if (sf->inter_sf.reduce_inter_modes && ref_mv_idx > 0) {
1838     if (mbmi->ref_frame[0] == LAST2_FRAME ||
1839         mbmi->ref_frame[0] == LAST3_FRAME ||
1840         mbmi->ref_frame[1] == LAST2_FRAME ||
1841         mbmi->ref_frame[1] == LAST3_FRAME) {
1842       const int has_nearmv = have_nearmv_in_inter_mode(mbmi->mode) ? 1 : 0;
1843       if (mbmi_ext->weight[ref_frame_type][ref_mv_idx + has_nearmv] <
1844           REF_CAT_LEVEL) {
1845         return true;
1846       }
1847     }
1848     // TODO(any): Experiment with reduce_inter_modes for compound prediction
1849     if (sf->inter_sf.reduce_inter_modes >= 2 && !is_comp_pred &&
1850         have_newmv_in_inter_mode(mbmi->mode)) {
1851       if (mbmi->ref_frame[0] != cpi->nearest_past_ref &&
1852           mbmi->ref_frame[0] != cpi->nearest_future_ref) {
1853         const int has_nearmv = have_nearmv_in_inter_mode(mbmi->mode) ? 1 : 0;
1854         if (mbmi_ext->weight[ref_frame_type][ref_mv_idx + has_nearmv] <
1855             REF_CAT_LEVEL) {
1856           return true;
1857         }
1858       }
1859     }
1860   }
1861   if (sf->inter_sf.prune_single_motion_modes_by_simple_trans && !is_comp_pred &&
1862       args->single_ref_first_pass == 0) {
1863     if (args->simple_rd_state[ref_mv_idx].early_skipped) {
1864       return true;
1865     }
1866   }
1867   mbmi->ref_mv_idx = ref_mv_idx;
1868   if (is_comp_pred && (!is_single_newmv_valid(args, mbmi, mbmi->mode))) {
1869     return true;
1870   }
1871   size_t est_rd_rate = args->ref_frame_cost + args->single_comp_cost;
1872   const int drl_cost =
1873       get_drl_cost(mbmi, mbmi_ext, x->drl_mode_cost0, ref_frame_type);
1874   est_rd_rate += drl_cost;
1875   if (RDCOST(x->rdmult, est_rd_rate, 0) > ref_best_rd &&
1876       mbmi->mode != NEARESTMV && mbmi->mode != NEAREST_NEARESTMV) {
1877     return true;
1878   }
1879   return false;
1880 }
1881 
1882 // Compute the estimated RD cost for the motion vector with simple translation.
simple_translation_pred_rd(AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,HandleInterModeArgs * args,int ref_mv_idx,inter_mode_info * mode_info,int64_t ref_best_rd,BLOCK_SIZE bsize)1883 static int64_t simple_translation_pred_rd(
1884     AV1_COMP *const cpi, MACROBLOCK *x, RD_STATS *rd_stats,
1885     HandleInterModeArgs *args, int ref_mv_idx, inter_mode_info *mode_info,
1886     int64_t ref_best_rd, BLOCK_SIZE bsize) {
1887   MACROBLOCKD *xd = &x->e_mbd;
1888   MB_MODE_INFO *mbmi = xd->mi[0];
1889   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1890   const int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
1891   const AV1_COMMON *cm = &cpi->common;
1892   const int is_comp_pred = has_second_ref(mbmi);
1893 
1894   struct macroblockd_plane *p = xd->plane;
1895   const BUFFER_SET orig_dst = {
1896     { p[0].dst.buf, p[1].dst.buf, p[2].dst.buf },
1897     { p[0].dst.stride, p[1].dst.stride, p[2].dst.stride },
1898   };
1899   av1_init_rd_stats(rd_stats);
1900 
1901   mbmi->interinter_comp.type = COMPOUND_AVERAGE;
1902   mbmi->comp_group_idx = 0;
1903   mbmi->compound_idx = 1;
1904   if (mbmi->ref_frame[1] == INTRA_FRAME) {
1905     mbmi->ref_frame[1] = NONE_FRAME;
1906   }
1907   int16_t mode_ctx =
1908       av1_mode_context_analyzer(mbmi_ext->mode_context, mbmi->ref_frame);
1909 
1910   mbmi->num_proj_ref = 0;
1911   mbmi->motion_mode = SIMPLE_TRANSLATION;
1912   mbmi->ref_mv_idx = ref_mv_idx;
1913 
1914   rd_stats->rate += args->ref_frame_cost + args->single_comp_cost;
1915   const int drl_cost =
1916       get_drl_cost(mbmi, mbmi_ext, x->drl_mode_cost0, ref_frame_type);
1917   rd_stats->rate += drl_cost;
1918   mode_info[ref_mv_idx].drl_cost = drl_cost;
1919 
1920   int_mv cur_mv[2];
1921   if (!build_cur_mv(cur_mv, mbmi->mode, cm, x, 0)) {
1922     return INT64_MAX;
1923   }
1924   assert(have_nearmv_in_inter_mode(mbmi->mode));
1925   for (int i = 0; i < is_comp_pred + 1; ++i) {
1926     mbmi->mv[i].as_int = cur_mv[i].as_int;
1927   }
1928   const int ref_mv_cost = cost_mv_ref(x, mbmi->mode, mode_ctx);
1929   rd_stats->rate += ref_mv_cost;
1930 
1931   if (RDCOST(x->rdmult, rd_stats->rate, 0) > ref_best_rd) {
1932     return INT64_MAX;
1933   }
1934 
1935   mbmi->motion_mode = SIMPLE_TRANSLATION;
1936   mbmi->num_proj_ref = 0;
1937   if (is_comp_pred) {
1938     // Only compound_average
1939     mbmi->interinter_comp.type = COMPOUND_AVERAGE;
1940     mbmi->comp_group_idx = 0;
1941     mbmi->compound_idx = 1;
1942   }
1943   set_default_interp_filters(mbmi, cm->features.interp_filter);
1944 
1945   const int mi_row = xd->mi_row;
1946   const int mi_col = xd->mi_col;
1947   av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst, bsize,
1948                                 AOM_PLANE_Y, AOM_PLANE_Y);
1949   int est_rate;
1950   int64_t est_dist;
1951   model_rd_sb_fn[MODELRD_CURVFIT](cpi, bsize, x, xd, 0, 0, &est_rate, &est_dist,
1952                                   NULL, NULL, NULL, NULL, NULL);
1953   return RDCOST(x->rdmult, rd_stats->rate + est_rate, est_dist);
1954 }
1955 
1956 // Represents a set of integers, from 0 to sizeof(int) * 8, as bits in
1957 // an integer. 0 for the i-th bit means that integer is excluded, 1 means
1958 // it is included.
mask_set_bit(int * mask,int index)1959 static INLINE void mask_set_bit(int *mask, int index) { *mask |= (1 << index); }
1960 
mask_check_bit(int mask,int index)1961 static INLINE bool mask_check_bit(int mask, int index) {
1962   return (mask >> index) & 0x1;
1963 }
1964 
1965 // Before performing the full MV search in handle_inter_mode, do a simple
1966 // translation search and see if we can eliminate any motion vectors.
1967 // Returns an integer where, if the i-th bit is set, it means that the i-th
1968 // motion vector should be searched. This is only set for NEAR_MV.
ref_mv_idx_to_search(AV1_COMP * const cpi,MACROBLOCK * x,RD_STATS * rd_stats,HandleInterModeArgs * const args,int64_t ref_best_rd,inter_mode_info * mode_info,BLOCK_SIZE bsize,const int ref_set)1969 static int ref_mv_idx_to_search(AV1_COMP *const cpi, MACROBLOCK *x,
1970                                 RD_STATS *rd_stats,
1971                                 HandleInterModeArgs *const args,
1972                                 int64_t ref_best_rd, inter_mode_info *mode_info,
1973                                 BLOCK_SIZE bsize, const int ref_set) {
1974   AV1_COMMON *const cm = &cpi->common;
1975   const MACROBLOCKD *const xd = &x->e_mbd;
1976   const MB_MODE_INFO *const mbmi = xd->mi[0];
1977   const PREDICTION_MODE this_mode = mbmi->mode;
1978 
1979   // Only search indices if they have some chance of being good.
1980   int good_indices = 0;
1981   for (int i = 0; i < ref_set; ++i) {
1982     if (ref_mv_idx_early_breakout(cpi, x, args, ref_best_rd, i)) {
1983       continue;
1984     }
1985     mask_set_bit(&good_indices, i);
1986   }
1987 
1988   // Only prune in NEARMV mode, if the speed feature is set, and the block size
1989   // is large enough. If these conditions are not met, return all good indices
1990   // found so far.
1991   if (!cpi->sf.inter_sf.prune_mode_search_simple_translation)
1992     return good_indices;
1993   if (!have_nearmv_in_inter_mode(this_mode)) return good_indices;
1994   if (num_pels_log2_lookup[bsize] <= 6) return good_indices;
1995   // Do not prune when there is internal resizing. TODO(elliottk) fix this
1996   // so b/2384 can be resolved.
1997   if (av1_is_scaled(get_ref_scale_factors(cm, mbmi->ref_frame[0])) ||
1998       (mbmi->ref_frame[1] > 0 &&
1999        av1_is_scaled(get_ref_scale_factors(cm, mbmi->ref_frame[1])))) {
2000     return good_indices;
2001   }
2002 
2003   // Calculate the RD cost for the motion vectors using simple translation.
2004   int64_t idx_rdcost[] = { INT64_MAX, INT64_MAX, INT64_MAX };
2005   for (int ref_mv_idx = 0; ref_mv_idx < ref_set; ++ref_mv_idx) {
2006     // If this index is bad, ignore it.
2007     if (!mask_check_bit(good_indices, ref_mv_idx)) {
2008       continue;
2009     }
2010     idx_rdcost[ref_mv_idx] = simple_translation_pred_rd(
2011         cpi, x, rd_stats, args, ref_mv_idx, mode_info, ref_best_rd, bsize);
2012   }
2013   // Find the index with the best RD cost.
2014   int best_idx = 0;
2015   for (int i = 1; i < MAX_REF_MV_SEARCH; ++i) {
2016     if (idx_rdcost[i] < idx_rdcost[best_idx]) {
2017       best_idx = i;
2018     }
2019   }
2020   // Only include indices that are good and within a % of the best.
2021   const double dth = has_second_ref(mbmi) ? 1.05 : 1.001;
2022   // If the simple translation cost is not within this multiple of the
2023   // best RD, skip it. Note that the cutoff is derived experimentally.
2024   const double ref_dth = 5;
2025   int result = 0;
2026   for (int i = 0; i < ref_set; ++i) {
2027     if (mask_check_bit(good_indices, i) &&
2028         (1.0 * idx_rdcost[i]) / idx_rdcost[best_idx] < dth &&
2029         (1.0 * idx_rdcost[i]) / ref_best_rd < ref_dth) {
2030       mask_set_bit(&result, i);
2031     }
2032   }
2033   return result;
2034 }
2035 
2036 typedef struct motion_mode_candidate {
2037   MB_MODE_INFO mbmi;
2038   int rate_mv;
2039   int rate2_nocoeff;
2040   int skip_motion_mode;
2041   int64_t rd_cost;
2042 } motion_mode_candidate;
2043 
2044 typedef struct motion_mode_best_st_candidate {
2045   motion_mode_candidate motion_mode_cand[MAX_WINNER_MOTION_MODES];
2046   int num_motion_mode_cand;
2047 } motion_mode_best_st_candidate;
2048 
2049 // Checks if the current reference frame matches with neighbouring block's
2050 // (top/left) reference frames
ref_match_found_in_nb_blocks(MB_MODE_INFO * cur_mbmi,MB_MODE_INFO * nb_mbmi)2051 static AOM_INLINE int ref_match_found_in_nb_blocks(MB_MODE_INFO *cur_mbmi,
2052                                                    MB_MODE_INFO *nb_mbmi) {
2053   MV_REFERENCE_FRAME nb_ref_frames[2] = { nb_mbmi->ref_frame[0],
2054                                           nb_mbmi->ref_frame[1] };
2055   MV_REFERENCE_FRAME cur_ref_frames[2] = { cur_mbmi->ref_frame[0],
2056                                            cur_mbmi->ref_frame[1] };
2057   const int is_cur_comp_pred = has_second_ref(cur_mbmi);
2058   int match_found = 0;
2059 
2060   for (int i = 0; i < (is_cur_comp_pred + 1); i++) {
2061     if ((cur_ref_frames[i] == nb_ref_frames[0]) ||
2062         (cur_ref_frames[i] == nb_ref_frames[1]))
2063       match_found = 1;
2064   }
2065   return match_found;
2066 }
2067 
find_ref_match_in_above_nbs(const int total_mi_cols,MACROBLOCKD * xd)2068 static AOM_INLINE int find_ref_match_in_above_nbs(const int total_mi_cols,
2069                                                   MACROBLOCKD *xd) {
2070   if (!xd->up_available) return 0;
2071   const int mi_col = xd->mi_col;
2072   MB_MODE_INFO **cur_mbmi = xd->mi;
2073   // prev_row_mi points into the mi array, starting at the beginning of the
2074   // previous row.
2075   MB_MODE_INFO **prev_row_mi = xd->mi - mi_col - 1 * xd->mi_stride;
2076   const int end_col = AOMMIN(mi_col + xd->width, total_mi_cols);
2077   uint8_t mi_step;
2078   for (int above_mi_col = mi_col; above_mi_col < end_col;
2079        above_mi_col += mi_step) {
2080     MB_MODE_INFO **above_mi = prev_row_mi + above_mi_col;
2081     mi_step = mi_size_wide[above_mi[0]->sb_type];
2082     int match_found = 0;
2083     if (is_inter_block(*above_mi))
2084       match_found = ref_match_found_in_nb_blocks(*cur_mbmi, *above_mi);
2085     if (match_found) return 1;
2086   }
2087   return 0;
2088 }
2089 
find_ref_match_in_left_nbs(const int total_mi_rows,MACROBLOCKD * xd)2090 static AOM_INLINE int find_ref_match_in_left_nbs(const int total_mi_rows,
2091                                                  MACROBLOCKD *xd) {
2092   if (!xd->left_available) return 0;
2093   const int mi_row = xd->mi_row;
2094   MB_MODE_INFO **cur_mbmi = xd->mi;
2095   // prev_col_mi points into the mi array, starting at the top of the
2096   // previous column
2097   MB_MODE_INFO **prev_col_mi = xd->mi - 1 - mi_row * xd->mi_stride;
2098   const int end_row = AOMMIN(mi_row + xd->height, total_mi_rows);
2099   uint8_t mi_step;
2100   for (int left_mi_row = mi_row; left_mi_row < end_row;
2101        left_mi_row += mi_step) {
2102     MB_MODE_INFO **left_mi = prev_col_mi + left_mi_row * xd->mi_stride;
2103     mi_step = mi_size_high[left_mi[0]->sb_type];
2104     int match_found = 0;
2105     if (is_inter_block(*left_mi))
2106       match_found = ref_match_found_in_nb_blocks(*cur_mbmi, *left_mi);
2107     if (match_found) return 1;
2108   }
2109   return 0;
2110 }
2111 
2112 typedef struct {
2113   int64_t best_inter_cost;
2114   int64_t ref_inter_cost[INTER_REFS_PER_FRAME];
2115 } PruneInfoFromTpl;
2116 
2117 #if !CONFIG_REALTIME_ONLY
2118 // TODO(Remya): Check if get_tpl_stats_b() can be reused
get_block_level_tpl_stats(AV1_COMP * cpi,BLOCK_SIZE bsize,int mi_row,int mi_col,int * valid_refs,PruneInfoFromTpl * inter_cost_info_from_tpl)2119 static AOM_INLINE void get_block_level_tpl_stats(
2120     AV1_COMP *cpi, BLOCK_SIZE bsize, int mi_row, int mi_col, int *valid_refs,
2121     PruneInfoFromTpl *inter_cost_info_from_tpl) {
2122   const GF_GROUP *const gf_group = &cpi->gf_group;
2123   AV1_COMMON *const cm = &cpi->common;
2124 
2125   assert(IMPLIES(gf_group->size > 0, gf_group->index < gf_group->size));
2126   const int tpl_idx = gf_group->index;
2127   TplParams *const tpl_data = &cpi->tpl_data;
2128   const TplDepFrame *tpl_frame = &tpl_data->tpl_frame[tpl_idx];
2129   if (tpl_idx >= MAX_LAG_BUFFERS || !tpl_frame->is_valid) {
2130     return;
2131   }
2132 
2133   const TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
2134   const int mi_wide = mi_size_wide[bsize];
2135   const int mi_high = mi_size_high[bsize];
2136   const int tpl_stride = tpl_frame->stride;
2137   const int step = 1 << tpl_data->tpl_stats_block_mis_log2;
2138   const int mi_col_sr =
2139       coded_to_superres_mi(mi_col, cm->superres_scale_denominator);
2140   const int mi_col_end_sr =
2141       coded_to_superres_mi(mi_col + mi_wide, cm->superres_scale_denominator);
2142   const int mi_cols_sr = av1_pixels_to_mi(cm->superres_upscaled_width);
2143 
2144   for (int row = mi_row; row < AOMMIN(mi_row + mi_high, cm->mi_params.mi_rows);
2145        row += step) {
2146     for (int col = mi_col_sr; col < AOMMIN(mi_col_end_sr, mi_cols_sr);
2147          col += step) {
2148       const TplDepStats *this_stats = &tpl_stats[av1_tpl_ptr_pos(
2149           row, col, tpl_stride, tpl_data->tpl_stats_block_mis_log2)];
2150 
2151       // Sums up the inter cost of corresponding ref frames
2152       for (int ref_idx = 0; ref_idx < INTER_REFS_PER_FRAME; ref_idx++) {
2153         inter_cost_info_from_tpl->ref_inter_cost[ref_idx] +=
2154             this_stats->pred_error[ref_idx];
2155       }
2156     }
2157   }
2158 
2159   // Computes the best inter cost (minimum inter_cost)
2160   int64_t best_inter_cost = INT64_MAX;
2161   for (int ref_idx = 0; ref_idx < INTER_REFS_PER_FRAME; ref_idx++) {
2162     const int64_t cur_inter_cost =
2163         inter_cost_info_from_tpl->ref_inter_cost[ref_idx];
2164     // For invalid ref frames, cur_inter_cost = 0 and has to be handled while
2165     // calculating the minimum inter_cost
2166     if (cur_inter_cost != 0 && (cur_inter_cost < best_inter_cost) &&
2167         valid_refs[ref_idx])
2168       best_inter_cost = cur_inter_cost;
2169   }
2170   inter_cost_info_from_tpl->best_inter_cost = best_inter_cost;
2171 }
2172 #endif
2173 
prune_modes_based_on_tpl_stats(PruneInfoFromTpl * inter_cost_info_from_tpl,const int * refs,int ref_mv_idx,const PREDICTION_MODE this_mode,int prune_mode_level)2174 static AOM_INLINE int prune_modes_based_on_tpl_stats(
2175     PruneInfoFromTpl *inter_cost_info_from_tpl, const int *refs, int ref_mv_idx,
2176     const PREDICTION_MODE this_mode, int prune_mode_level) {
2177   const int have_newmv = have_newmv_in_inter_mode(this_mode);
2178   if ((prune_mode_level < 3) && have_newmv) return 0;
2179 
2180   static const int prune_level_idx[3] = { 0, 1, 1 };
2181   const int prune_level = prune_level_idx[prune_mode_level - 1];
2182   int64_t cur_inter_cost;
2183 
2184   const int is_globalmv =
2185       (this_mode == GLOBALMV) || (this_mode == GLOBAL_GLOBALMV);
2186   const int prune_index = is_globalmv ? MAX_REF_MV_SEARCH : ref_mv_idx;
2187 
2188   // Thresholds used for pruning:
2189   // Lower value indicates aggressive pruning and higher value indicates
2190   // conservative pruning which is set based on ref_mv_idx and speed feature.
2191   // 'prune_index' 0, 1, 2 corresponds to ref_mv indices 0, 1 and 2. prune_index
2192   // 3 corresponds to GLOBALMV/GLOBAL_GLOBALMV
2193   static const int tpl_inter_mode_prune_mul_factor[2][MAX_REF_MV_SEARCH + 1] = {
2194     { 3, 3, 3, 2 }, { 3, 2, 2, 2 }
2195   };
2196 
2197   const int is_comp_pred = (refs[1] > INTRA_FRAME);
2198   if (!is_comp_pred) {
2199     cur_inter_cost = inter_cost_info_from_tpl->ref_inter_cost[refs[0] - 1];
2200   } else {
2201     const int64_t inter_cost_ref0 =
2202         inter_cost_info_from_tpl->ref_inter_cost[refs[0] - 1];
2203     const int64_t inter_cost_ref1 =
2204         inter_cost_info_from_tpl->ref_inter_cost[refs[1] - 1];
2205     // Choose maximum inter_cost among inter_cost_ref0 and inter_cost_ref1 for
2206     // more aggressive pruning
2207     cur_inter_cost = AOMMAX(inter_cost_ref0, inter_cost_ref1);
2208   }
2209 
2210   // Prune the mode if cur_inter_cost is greater than threshold times
2211   // best_inter_cost
2212   const int64_t best_inter_cost = inter_cost_info_from_tpl->best_inter_cost;
2213   if (cur_inter_cost >
2214       ((tpl_inter_mode_prune_mul_factor[prune_level][prune_index] *
2215         best_inter_cost) >>
2216        1))
2217     return 1;
2218   return 0;
2219 }
2220 
handle_inter_mode(AV1_COMP * const cpi,TileDataEnc * tile_data,MACROBLOCK * x,BLOCK_SIZE bsize,RD_STATS * rd_stats,RD_STATS * rd_stats_y,RD_STATS * rd_stats_uv,int * disable_skip,HandleInterModeArgs * args,int64_t ref_best_rd,uint8_t * const tmp_buf,const CompoundTypeRdBuffers * rd_buffers,int64_t * best_est_rd,const int do_tx_search,InterModesInfo * inter_modes_info,motion_mode_candidate * motion_mode_cand,int64_t * skip_rd,PruneInfoFromTpl * inter_cost_info_from_tpl)2221 static int64_t handle_inter_mode(
2222     AV1_COMP *const cpi, TileDataEnc *tile_data, MACROBLOCK *x,
2223     BLOCK_SIZE bsize, RD_STATS *rd_stats, RD_STATS *rd_stats_y,
2224     RD_STATS *rd_stats_uv, int *disable_skip, HandleInterModeArgs *args,
2225     int64_t ref_best_rd, uint8_t *const tmp_buf,
2226     const CompoundTypeRdBuffers *rd_buffers, int64_t *best_est_rd,
2227     const int do_tx_search, InterModesInfo *inter_modes_info,
2228     motion_mode_candidate *motion_mode_cand, int64_t *skip_rd,
2229     PruneInfoFromTpl *inter_cost_info_from_tpl) {
2230   const AV1_COMMON *cm = &cpi->common;
2231   const int num_planes = av1_num_planes(cm);
2232   MACROBLOCKD *xd = &x->e_mbd;
2233   MB_MODE_INFO *mbmi = xd->mi[0];
2234   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2235   const int is_comp_pred = has_second_ref(mbmi);
2236   const PREDICTION_MODE this_mode = mbmi->mode;
2237 
2238   const GF_GROUP *const gf_group = &cpi->gf_group;
2239   const int tpl_idx = gf_group->index;
2240   TplDepFrame *tpl_frame = &cpi->tpl_data.tpl_frame[tpl_idx];
2241   const int prune_modes_based_on_tpl =
2242       cpi->sf.inter_sf.prune_inter_modes_based_on_tpl &&
2243       tpl_idx >= MAX_LAG_BUFFERS && tpl_frame->is_valid;
2244   int i;
2245   const int refs[2] = { mbmi->ref_frame[0],
2246                         (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
2247   int rate_mv = 0;
2248   int64_t rd = INT64_MAX;
2249   // do first prediction into the destination buffer. Do the next
2250   // prediction into a temporary buffer. Then keep track of which one
2251   // of these currently holds the best predictor, and use the other
2252   // one for future predictions. In the end, copy from tmp_buf to
2253   // dst if necessary.
2254   struct macroblockd_plane *p = xd->plane;
2255   const BUFFER_SET orig_dst = {
2256     { p[0].dst.buf, p[1].dst.buf, p[2].dst.buf },
2257     { p[0].dst.stride, p[1].dst.stride, p[2].dst.stride },
2258   };
2259   const BUFFER_SET tmp_dst = { { tmp_buf, tmp_buf + 1 * MAX_SB_SQUARE,
2260                                  tmp_buf + 2 * MAX_SB_SQUARE },
2261                                { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE } };
2262 
2263   const int masked_compound_used = is_any_masked_compound_used(bsize) &&
2264                                    cm->seq_params.enable_masked_compound;
2265   int64_t ret_val = INT64_MAX;
2266   const int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
2267   RD_STATS best_rd_stats, best_rd_stats_y, best_rd_stats_uv;
2268   int64_t best_rd = INT64_MAX;
2269   uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
2270   uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
2271   MB_MODE_INFO best_mbmi = *mbmi;
2272   int best_disable_skip = 0;
2273   int best_xskip = 0;
2274   int64_t newmv_ret_val = INT64_MAX;
2275   inter_mode_info mode_info[MAX_REF_MV_SEARCH];
2276 
2277   int mode_search_mask = (1 << COMPOUND_AVERAGE) | (1 << COMPOUND_DISTWTD) |
2278                          (1 << COMPOUND_WEDGE) | (1 << COMPOUND_DIFFWTD);
2279 
2280   // Do not prune the mode based on inter cost from tpl if the current ref frame
2281   // is the winner ref in neighbouring blocks.
2282   int ref_match_found_in_above_nb = 0;
2283   int ref_match_found_in_left_nb = 0;
2284   if (prune_modes_based_on_tpl) {
2285     ref_match_found_in_above_nb =
2286         find_ref_match_in_above_nbs(cm->mi_params.mi_cols, xd);
2287     ref_match_found_in_left_nb =
2288         find_ref_match_in_left_nbs(cm->mi_params.mi_rows, xd);
2289   }
2290 
2291   // First, perform a simple translation search for each of the indices. If
2292   // an index performs well, it will be fully searched here.
2293   const int ref_set = get_drl_refmv_count(x, mbmi->ref_frame, this_mode);
2294   // Save MV results from first 2 ref_mv_idx.
2295   int_mv save_mv[MAX_REF_MV_SEARCH - 1][2] = { { { 0 } } };
2296   int best_ref_mv_idx = -1;
2297   const int idx_mask = ref_mv_idx_to_search(cpi, x, rd_stats, args, ref_best_rd,
2298                                             mode_info, bsize, ref_set);
2299   const int16_t mode_ctx =
2300       av1_mode_context_analyzer(mbmi_ext->mode_context, mbmi->ref_frame);
2301   const int ref_mv_cost = cost_mv_ref(x, this_mode, mode_ctx);
2302   const int base_rate =
2303       args->ref_frame_cost + args->single_comp_cost + ref_mv_cost;
2304   for (int ref_mv_idx = 0; ref_mv_idx < ref_set; ++ref_mv_idx) {
2305     mode_info[ref_mv_idx].full_search_mv.as_int = INVALID_MV;
2306     mode_info[ref_mv_idx].mv.as_int = INVALID_MV;
2307     mode_info[ref_mv_idx].rd = INT64_MAX;
2308 
2309     if (!mask_check_bit(idx_mask, ref_mv_idx)) {
2310       // MV did not perform well in simple translation search. Skip it.
2311       continue;
2312     }
2313     if (prune_modes_based_on_tpl && !ref_match_found_in_above_nb &&
2314         !ref_match_found_in_left_nb && (ref_best_rd != INT64_MAX)) {
2315       if (prune_modes_based_on_tpl_stats(
2316               inter_cost_info_from_tpl, refs, ref_mv_idx, this_mode,
2317               cpi->sf.inter_sf.prune_inter_modes_based_on_tpl))
2318         continue;
2319     }
2320     av1_init_rd_stats(rd_stats);
2321 
2322     mbmi->interinter_comp.type = COMPOUND_AVERAGE;
2323     mbmi->comp_group_idx = 0;
2324     mbmi->compound_idx = 1;
2325     if (mbmi->ref_frame[1] == INTRA_FRAME) mbmi->ref_frame[1] = NONE_FRAME;
2326 
2327     mbmi->num_proj_ref = 0;
2328     mbmi->motion_mode = SIMPLE_TRANSLATION;
2329     mbmi->ref_mv_idx = ref_mv_idx;
2330 
2331     rd_stats->rate = base_rate;
2332     const int drl_cost =
2333         get_drl_cost(mbmi, mbmi_ext, x->drl_mode_cost0, ref_frame_type);
2334     rd_stats->rate += drl_cost;
2335     mode_info[ref_mv_idx].drl_cost = drl_cost;
2336 
2337     int rs = 0;
2338     int compmode_interinter_cost = 0;
2339 
2340     int_mv cur_mv[2];
2341 
2342     // TODO(Cherma): Extend this speed feature to support compound mode
2343     int skip_repeated_ref_mv =
2344         is_comp_pred ? 0 : cpi->sf.inter_sf.skip_repeated_ref_mv;
2345     if (!build_cur_mv(cur_mv, this_mode, cm, x, skip_repeated_ref_mv)) {
2346       continue;
2347     }
2348 
2349     if (have_newmv_in_inter_mode(this_mode)) {
2350 #if CONFIG_COLLECT_COMPONENT_TIMING
2351       start_timing(cpi, handle_newmv_time);
2352 #endif
2353       if (cpi->sf.inter_sf.prune_single_motion_modes_by_simple_trans &&
2354           args->single_ref_first_pass == 0 && !is_comp_pred) {
2355         const int ref0 = mbmi->ref_frame[0];
2356         newmv_ret_val = args->single_newmv_valid[ref_mv_idx][ref0] ? 0 : 1;
2357         cur_mv[0] = args->single_newmv[ref_mv_idx][ref0];
2358         rate_mv = args->single_newmv_rate[ref_mv_idx][ref0];
2359       } else {
2360         newmv_ret_val =
2361             handle_newmv(cpi, x, bsize, cur_mv, &rate_mv, args, mode_info);
2362       }
2363 #if CONFIG_COLLECT_COMPONENT_TIMING
2364       end_timing(cpi, handle_newmv_time);
2365 #endif
2366 
2367       if (newmv_ret_val != 0) continue;
2368 
2369       rd_stats->rate += rate_mv;
2370 
2371       if (cpi->sf.inter_sf.skip_repeated_newmv) {
2372         if (!is_comp_pred && this_mode == NEWMV && ref_mv_idx > 0) {
2373           int skip = 0;
2374           int this_rate_mv = 0;
2375           for (i = 0; i < ref_mv_idx; ++i) {
2376             // Check if the motion search result same as previous results
2377             if (cur_mv[0].as_int == args->single_newmv[i][refs[0]].as_int &&
2378                 args->single_newmv_valid[i][refs[0]]) {
2379               // If the compared mode has no valid rd, it is unlikely this
2380               // mode will be the best mode
2381               if (mode_info[i].rd == INT64_MAX) {
2382                 skip = 1;
2383                 break;
2384               }
2385               // Compare the cost difference including drl cost and mv cost
2386               if (mode_info[i].mv.as_int != INVALID_MV) {
2387                 const int compare_cost =
2388                     mode_info[i].rate_mv + mode_info[i].drl_cost;
2389                 const int_mv ref_mv = av1_get_ref_mv(x, 0);
2390                 this_rate_mv = av1_mv_bit_cost(
2391                     &mode_info[i].mv.as_mv, &ref_mv.as_mv, x->nmv_vec_cost,
2392                     x->mv_cost_stack, MV_COST_WEIGHT);
2393                 const int this_cost = this_rate_mv + drl_cost;
2394 
2395                 if (compare_cost <= this_cost) {
2396                   skip = 1;
2397                   break;
2398                 } else {
2399                   // If the cost is less than current best result, make this
2400                   // the best and update corresponding variables unless the
2401                   // best_mv is the same as ref_mv. In this case we skip and
2402                   // rely on NEAR(EST)MV instead
2403                   if (best_mbmi.ref_mv_idx == i &&
2404                       mode_info[i].mv.as_int != ref_mv.as_int) {
2405                     assert(best_rd != INT64_MAX);
2406                     best_mbmi.ref_mv_idx = ref_mv_idx;
2407                     motion_mode_cand->rate_mv = this_rate_mv;
2408                     best_rd_stats.rate += this_cost - compare_cost;
2409                     best_rd = RDCOST(x->rdmult, best_rd_stats.rate,
2410                                      best_rd_stats.dist);
2411                     if (best_rd < ref_best_rd) ref_best_rd = best_rd;
2412                     break;
2413                   }
2414                 }
2415               }
2416             }
2417           }
2418           if (skip) {
2419             const THR_MODES mode_enum = get_prediction_mode_idx(
2420                 best_mbmi.mode, best_mbmi.ref_frame[0], best_mbmi.ref_frame[1]);
2421             // Collect mode stats for multiwinner mode processing
2422             store_winner_mode_stats(
2423                 &cpi->common, x, &best_mbmi, &best_rd_stats, &best_rd_stats_y,
2424                 &best_rd_stats_uv, mode_enum, NULL, bsize, best_rd,
2425                 cpi->sf.winner_mode_sf.enable_multiwinner_mode_process,
2426                 do_tx_search);
2427             args->modelled_rd[this_mode][ref_mv_idx][refs[0]] =
2428                 args->modelled_rd[this_mode][i][refs[0]];
2429             args->simple_rd[this_mode][ref_mv_idx][refs[0]] =
2430                 args->simple_rd[this_mode][i][refs[0]];
2431             mode_info[ref_mv_idx].rd = mode_info[i].rd;
2432             mode_info[ref_mv_idx].rate_mv = this_rate_mv;
2433             mode_info[ref_mv_idx].mv.as_int = mode_info[i].mv.as_int;
2434 
2435             restore_dst_buf(xd, orig_dst, num_planes);
2436             continue;
2437           }
2438         }
2439       }
2440     }
2441     for (i = 0; i < is_comp_pred + 1; ++i) {
2442       mbmi->mv[i].as_int = cur_mv[i].as_int;
2443     }
2444 
2445     if (RDCOST(x->rdmult, rd_stats->rate, 0) > ref_best_rd &&
2446         mbmi->mode != NEARESTMV && mbmi->mode != NEAREST_NEARESTMV) {
2447       continue;
2448     }
2449 
2450     if (cpi->sf.inter_sf.prune_ref_mv_idx_search && is_comp_pred) {
2451       // TODO(yunqing): Move this part to a separate function when it is done.
2452       // Store MV result.
2453       if (ref_mv_idx < MAX_REF_MV_SEARCH - 1) {
2454         for (i = 0; i < is_comp_pred + 1; ++i)
2455           save_mv[ref_mv_idx][i].as_int = mbmi->mv[i].as_int;
2456       }
2457       // Skip the evaluation if an MV match is found.
2458       if (ref_mv_idx > 0) {
2459         int match = 0;
2460         for (int idx = 0; idx < ref_mv_idx; ++idx) {
2461           int mv_diff = 0;
2462           for (i = 0; i < 1 + is_comp_pred; ++i) {
2463             mv_diff += abs(save_mv[idx][i].as_mv.row - mbmi->mv[i].as_mv.row) +
2464                        abs(save_mv[idx][i].as_mv.col - mbmi->mv[i].as_mv.col);
2465           }
2466 
2467           // If this mode is not the best one, and current MV is similar to
2468           // previous stored MV, terminate this ref_mv_idx evaluation.
2469           if (best_ref_mv_idx == -1 && mv_diff < 1) {
2470             match = 1;
2471             break;
2472           }
2473         }
2474         if (match == 1) continue;
2475       }
2476     }
2477 
2478 #if CONFIG_COLLECT_COMPONENT_TIMING
2479     start_timing(cpi, compound_type_rd_time);
2480 #endif
2481     int skip_build_pred = 0;
2482     const int mi_row = xd->mi_row;
2483     const int mi_col = xd->mi_col;
2484     if (is_comp_pred) {
2485       // Find matching interp filter or set to default interp filter
2486       const int need_search = av1_is_interp_needed(xd);
2487       const InterpFilter assign_filter = cm->features.interp_filter;
2488       int is_luma_interp_done = 0;
2489       av1_find_interp_filter_match(mbmi, cpi, assign_filter, need_search,
2490                                    args->interp_filter_stats,
2491                                    args->interp_filter_stats_idx);
2492 
2493       int64_t best_rd_compound;
2494       int64_t rd_thresh;
2495       const int comp_type_rd_shift = COMP_TYPE_RD_THRESH_SHIFT;
2496       const int comp_type_rd_scale = COMP_TYPE_RD_THRESH_SCALE;
2497       rd_thresh = get_rd_thresh_from_best_rd(
2498           ref_best_rd, (1 << comp_type_rd_shift), comp_type_rd_scale);
2499       compmode_interinter_cost = av1_compound_type_rd(
2500           cpi, x, bsize, cur_mv, mode_search_mask, masked_compound_used,
2501           &orig_dst, &tmp_dst, rd_buffers, &rate_mv, &best_rd_compound,
2502           rd_stats, ref_best_rd, skip_rd[1], &is_luma_interp_done, rd_thresh);
2503       if (ref_best_rd < INT64_MAX &&
2504           (best_rd_compound >> comp_type_rd_shift) * comp_type_rd_scale >
2505               ref_best_rd) {
2506         restore_dst_buf(xd, orig_dst, num_planes);
2507         continue;
2508       }
2509       // No need to call av1_enc_build_inter_predictor for luma if
2510       // COMPOUND_AVERAGE is selected because it is the first
2511       // candidate in av1_compound_type_rd, and the following
2512       // compound types searching uses tmp_dst buffer
2513 
2514       if (mbmi->interinter_comp.type == COMPOUND_AVERAGE &&
2515           is_luma_interp_done) {
2516         if (num_planes > 1) {
2517           av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst,
2518                                         bsize, AOM_PLANE_U, num_planes - 1);
2519         }
2520         skip_build_pred = 1;
2521       }
2522     }
2523 
2524 #if CONFIG_COLLECT_COMPONENT_TIMING
2525     end_timing(cpi, compound_type_rd_time);
2526 #endif
2527 
2528 #if CONFIG_COLLECT_COMPONENT_TIMING
2529     start_timing(cpi, interpolation_filter_search_time);
2530 #endif
2531     ret_val = av1_interpolation_filter_search(
2532         x, cpi, tile_data, bsize, &tmp_dst, &orig_dst, &rd, &rs,
2533         &skip_build_pred, args, ref_best_rd);
2534 #if CONFIG_COLLECT_COMPONENT_TIMING
2535     end_timing(cpi, interpolation_filter_search_time);
2536 #endif
2537     if (args->modelled_rd != NULL && !is_comp_pred) {
2538       args->modelled_rd[this_mode][ref_mv_idx][refs[0]] = rd;
2539     }
2540     if (ret_val != 0) {
2541       restore_dst_buf(xd, orig_dst, num_planes);
2542       continue;
2543     } else if (cpi->sf.inter_sf.model_based_post_interp_filter_breakout &&
2544                ref_best_rd != INT64_MAX && (rd >> 3) * 3 > ref_best_rd) {
2545       restore_dst_buf(xd, orig_dst, num_planes);
2546       continue;
2547     }
2548 
2549     if (args->modelled_rd != NULL) {
2550       if (is_comp_pred) {
2551         const int mode0 = compound_ref0_mode(this_mode);
2552         const int mode1 = compound_ref1_mode(this_mode);
2553         const int64_t mrd =
2554             AOMMIN(args->modelled_rd[mode0][ref_mv_idx][refs[0]],
2555                    args->modelled_rd[mode1][ref_mv_idx][refs[1]]);
2556         if ((rd >> 3) * 6 > mrd && ref_best_rd < INT64_MAX) {
2557           restore_dst_buf(xd, orig_dst, num_planes);
2558           continue;
2559         }
2560       }
2561     }
2562     rd_stats->rate += compmode_interinter_cost;
2563     if (skip_build_pred != 1) {
2564       av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst, bsize, 0,
2565                                     av1_num_planes(cm) - 1);
2566     }
2567 
2568 #if CONFIG_COLLECT_COMPONENT_TIMING
2569     start_timing(cpi, motion_mode_rd_time);
2570 #endif
2571     int rate2_nocoeff = rd_stats->rate;
2572     ret_val = motion_mode_rd(cpi, tile_data, x, bsize, rd_stats, rd_stats_y,
2573                              rd_stats_uv, disable_skip, args, ref_best_rd,
2574                              skip_rd, &rate_mv, &orig_dst, best_est_rd,
2575                              do_tx_search, inter_modes_info, 0);
2576 #if CONFIG_COLLECT_COMPONENT_TIMING
2577     end_timing(cpi, motion_mode_rd_time);
2578 #endif
2579 
2580     mode_info[ref_mv_idx].mv.as_int = mbmi->mv[0].as_int;
2581     mode_info[ref_mv_idx].rate_mv = rate_mv;
2582     if (ret_val != INT64_MAX) {
2583       int64_t tmp_rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
2584       mode_info[ref_mv_idx].rd = tmp_rd;
2585       const THR_MODES mode_enum = get_prediction_mode_idx(
2586           mbmi->mode, mbmi->ref_frame[0], mbmi->ref_frame[1]);
2587       // Collect mode stats for multiwinner mode processing
2588       store_winner_mode_stats(
2589           &cpi->common, x, mbmi, rd_stats, rd_stats_y, rd_stats_uv, mode_enum,
2590           NULL, bsize, tmp_rd,
2591           cpi->sf.winner_mode_sf.enable_multiwinner_mode_process, do_tx_search);
2592       if (tmp_rd < best_rd) {
2593         best_rd_stats = *rd_stats;
2594         best_rd_stats_y = *rd_stats_y;
2595         best_rd_stats_uv = *rd_stats_uv;
2596         best_rd = tmp_rd;
2597         best_mbmi = *mbmi;
2598         best_disable_skip = *disable_skip;
2599         best_xskip = x->force_skip;
2600         memcpy(best_blk_skip, x->blk_skip,
2601                sizeof(best_blk_skip[0]) * xd->height * xd->width);
2602         av1_copy_array(best_tx_type_map, xd->tx_type_map,
2603                        xd->height * xd->width);
2604         motion_mode_cand->rate_mv = rate_mv;
2605         motion_mode_cand->rate2_nocoeff = rate2_nocoeff;
2606       }
2607 
2608       if (tmp_rd < ref_best_rd) {
2609         ref_best_rd = tmp_rd;
2610         best_ref_mv_idx = ref_mv_idx;
2611       }
2612     }
2613     restore_dst_buf(xd, orig_dst, num_planes);
2614   }
2615 
2616   if (best_rd == INT64_MAX) return INT64_MAX;
2617 
2618   // re-instate status of the best choice
2619   *rd_stats = best_rd_stats;
2620   *rd_stats_y = best_rd_stats_y;
2621   *rd_stats_uv = best_rd_stats_uv;
2622   *mbmi = best_mbmi;
2623   *disable_skip = best_disable_skip;
2624   x->force_skip = best_xskip;
2625   assert(IMPLIES(mbmi->comp_group_idx == 1,
2626                  mbmi->interinter_comp.type != COMPOUND_AVERAGE));
2627   memcpy(x->blk_skip, best_blk_skip,
2628          sizeof(best_blk_skip[0]) * xd->height * xd->width);
2629   av1_copy_array(xd->tx_type_map, best_tx_type_map, xd->height * xd->width);
2630 
2631   rd_stats->rdcost = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
2632 
2633   return rd_stats->rdcost;
2634 }
2635 
rd_pick_intrabc_mode_sb(const AV1_COMP * cpi,MACROBLOCK * x,PICK_MODE_CONTEXT * ctx,RD_STATS * rd_stats,BLOCK_SIZE bsize,int64_t best_rd)2636 static int64_t rd_pick_intrabc_mode_sb(const AV1_COMP *cpi, MACROBLOCK *x,
2637                                        PICK_MODE_CONTEXT *ctx,
2638                                        RD_STATS *rd_stats, BLOCK_SIZE bsize,
2639                                        int64_t best_rd) {
2640   const AV1_COMMON *const cm = &cpi->common;
2641   if (!av1_allow_intrabc(cm) || !cpi->oxcf.enable_intrabc) return INT64_MAX;
2642   const int num_planes = av1_num_planes(cm);
2643 
2644   MACROBLOCKD *const xd = &x->e_mbd;
2645   const TileInfo *tile = &xd->tile;
2646   MB_MODE_INFO *mbmi = xd->mi[0];
2647   const int mi_row = xd->mi_row;
2648   const int mi_col = xd->mi_col;
2649   const int w = block_size_wide[bsize];
2650   const int h = block_size_high[bsize];
2651   const int sb_row = mi_row >> cm->seq_params.mib_size_log2;
2652   const int sb_col = mi_col >> cm->seq_params.mib_size_log2;
2653 
2654   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2655   MV_REFERENCE_FRAME ref_frame = INTRA_FRAME;
2656   av1_find_mv_refs(cm, xd, mbmi, ref_frame, mbmi_ext->ref_mv_count,
2657                    xd->ref_mv_stack, xd->weight, NULL, mbmi_ext->global_mvs,
2658                    mbmi_ext->mode_context);
2659   // TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
2660   // mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
2661   av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame);
2662   int_mv nearestmv, nearmv;
2663   av1_find_best_ref_mvs_from_stack(0, mbmi_ext, ref_frame, &nearestmv, &nearmv,
2664                                    0);
2665 
2666   if (nearestmv.as_int == INVALID_MV) {
2667     nearestmv.as_int = 0;
2668   }
2669   if (nearmv.as_int == INVALID_MV) {
2670     nearmv.as_int = 0;
2671   }
2672 
2673   int_mv dv_ref = nearestmv.as_int == 0 ? nearmv : nearestmv;
2674   if (dv_ref.as_int == 0) {
2675     av1_find_ref_dv(&dv_ref, tile, cm->seq_params.mib_size, mi_row);
2676   }
2677   // Ref DV should not have sub-pel.
2678   assert((dv_ref.as_mv.col & 7) == 0);
2679   assert((dv_ref.as_mv.row & 7) == 0);
2680   mbmi_ext->ref_mv_stack[INTRA_FRAME][0].this_mv = dv_ref;
2681 
2682   struct buf_2d yv12_mb[MAX_MB_PLANE];
2683   av1_setup_pred_block(xd, yv12_mb, xd->cur_buf, NULL, NULL, num_planes);
2684   for (int i = 0; i < num_planes; ++i) {
2685     xd->plane[i].pre[0] = yv12_mb[i];
2686   }
2687 
2688   enum IntrabcMotionDirection {
2689     IBC_MOTION_ABOVE,
2690     IBC_MOTION_LEFT,
2691     IBC_MOTION_DIRECTIONS
2692   };
2693 
2694   MB_MODE_INFO best_mbmi = *mbmi;
2695   RD_STATS best_rdstats = *rd_stats;
2696   uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE] = { 0 };
2697   uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
2698   av1_copy_array(best_tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
2699 
2700   FULLPEL_MOTION_SEARCH_PARAMS fullms_params;
2701   const search_site_config *lookahead_search_sites =
2702       &cpi->mv_search_params.ss_cfg[SS_CFG_LOOKAHEAD];
2703   av1_make_default_fullpel_ms_params(&fullms_params, cpi, x, bsize,
2704                                      &dv_ref.as_mv, lookahead_search_sites);
2705   fullms_params.is_intra_mode = 1;
2706 
2707   for (enum IntrabcMotionDirection dir = IBC_MOTION_ABOVE;
2708        dir < IBC_MOTION_DIRECTIONS; ++dir) {
2709     switch (dir) {
2710       case IBC_MOTION_ABOVE:
2711         fullms_params.mv_limits.col_min =
2712             (tile->mi_col_start - mi_col) * MI_SIZE;
2713         fullms_params.mv_limits.col_max =
2714             (tile->mi_col_end - mi_col) * MI_SIZE - w;
2715         fullms_params.mv_limits.row_min =
2716             (tile->mi_row_start - mi_row) * MI_SIZE;
2717         fullms_params.mv_limits.row_max =
2718             (sb_row * cm->seq_params.mib_size - mi_row) * MI_SIZE - h;
2719         break;
2720       case IBC_MOTION_LEFT:
2721         fullms_params.mv_limits.col_min =
2722             (tile->mi_col_start - mi_col) * MI_SIZE;
2723         fullms_params.mv_limits.col_max =
2724             (sb_col * cm->seq_params.mib_size - mi_col) * MI_SIZE - w;
2725         // TODO(aconverse@google.com): Minimize the overlap between above and
2726         // left areas.
2727         fullms_params.mv_limits.row_min =
2728             (tile->mi_row_start - mi_row) * MI_SIZE;
2729         int bottom_coded_mi_edge =
2730             AOMMIN((sb_row + 1) * cm->seq_params.mib_size, tile->mi_row_end);
2731         fullms_params.mv_limits.row_max =
2732             (bottom_coded_mi_edge - mi_row) * MI_SIZE - h;
2733         break;
2734       default: assert(0);
2735     }
2736     assert(fullms_params.mv_limits.col_min >= fullms_params.mv_limits.col_min);
2737     assert(fullms_params.mv_limits.col_max <= fullms_params.mv_limits.col_max);
2738     assert(fullms_params.mv_limits.row_min >= fullms_params.mv_limits.row_min);
2739     assert(fullms_params.mv_limits.row_max <= fullms_params.mv_limits.row_max);
2740 
2741     av1_set_mv_search_range(&fullms_params.mv_limits, &dv_ref.as_mv);
2742 
2743     if (fullms_params.mv_limits.col_max < fullms_params.mv_limits.col_min ||
2744         fullms_params.mv_limits.row_max < fullms_params.mv_limits.row_min) {
2745       continue;
2746     }
2747 
2748     const int step_param = cpi->mv_search_params.mv_step_param;
2749     const FULLPEL_MV start_mv = get_fullmv_from_mv(&dv_ref.as_mv);
2750     IntraBCHashInfo *intrabc_hash_info = &x->intrabc_hash_info;
2751     int_mv best_mv, best_hash_mv;
2752 
2753     int bestsme = av1_full_pixel_search(start_mv, &fullms_params, step_param,
2754                                         NULL, &best_mv.as_fullmv, NULL);
2755     const int hashsme = av1_intrabc_hash_search(
2756         cpi, xd, &fullms_params, intrabc_hash_info, &best_hash_mv.as_fullmv);
2757     if (hashsme < bestsme) {
2758       best_mv = best_hash_mv;
2759       bestsme = hashsme;
2760     }
2761 
2762     if (bestsme == INT_MAX) continue;
2763     const MV dv = get_mv_from_fullmv(&best_mv.as_fullmv);
2764     if (!av1_is_fullmv_in_range(&fullms_params.mv_limits,
2765                                 get_fullmv_from_mv(&dv)))
2766       continue;
2767     if (!av1_is_dv_valid(dv, cm, xd, mi_row, mi_col, bsize,
2768                          cm->seq_params.mib_size_log2))
2769       continue;
2770 
2771     // DV should not have sub-pel.
2772     assert((dv.col & 7) == 0);
2773     assert((dv.row & 7) == 0);
2774     memset(&mbmi->palette_mode_info, 0, sizeof(mbmi->palette_mode_info));
2775     mbmi->filter_intra_mode_info.use_filter_intra = 0;
2776     mbmi->use_intrabc = 1;
2777     mbmi->mode = DC_PRED;
2778     mbmi->uv_mode = UV_DC_PRED;
2779     mbmi->motion_mode = SIMPLE_TRANSLATION;
2780     mbmi->mv[0].as_mv = dv;
2781     mbmi->interp_filters = av1_broadcast_interp_filter(BILINEAR);
2782     mbmi->skip = 0;
2783     av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0,
2784                                   av1_num_planes(cm) - 1);
2785 
2786     const IntraBCMVCosts *const dv_costs = &cpi->dv_costs;
2787     int *dvcost[2] = { (int *)&dv_costs->mv_component[0][MV_MAX],
2788                        (int *)&dv_costs->mv_component[1][MV_MAX] };
2789     // TODO(aconverse@google.com): The full motion field defining discount
2790     // in MV_COST_WEIGHT is too large. Explore other values.
2791     const int rate_mv = av1_mv_bit_cost(&dv, &dv_ref.as_mv, dv_costs->joint_mv,
2792                                         dvcost, MV_COST_WEIGHT_SUB);
2793     const int rate_mode = x->intrabc_cost[1];
2794     RD_STATS rd_stats_yuv, rd_stats_y, rd_stats_uv;
2795     if (!av1_txfm_search(cpi, x, bsize, &rd_stats_yuv, &rd_stats_y,
2796                          &rd_stats_uv, rate_mode + rate_mv, INT64_MAX))
2797       continue;
2798     rd_stats_yuv.rdcost =
2799         RDCOST(x->rdmult, rd_stats_yuv.rate, rd_stats_yuv.dist);
2800     if (rd_stats_yuv.rdcost < best_rd) {
2801       best_rd = rd_stats_yuv.rdcost;
2802       best_mbmi = *mbmi;
2803       best_rdstats = rd_stats_yuv;
2804       memcpy(best_blk_skip, x->blk_skip,
2805              sizeof(x->blk_skip[0]) * xd->height * xd->width);
2806       av1_copy_array(best_tx_type_map, xd->tx_type_map, xd->height * xd->width);
2807     }
2808   }
2809   *mbmi = best_mbmi;
2810   *rd_stats = best_rdstats;
2811   memcpy(x->blk_skip, best_blk_skip,
2812          sizeof(x->blk_skip[0]) * xd->height * xd->width);
2813   av1_copy_array(xd->tx_type_map, best_tx_type_map, ctx->num_4x4_blk);
2814 #if CONFIG_RD_DEBUG
2815   mbmi->rd_stats = *rd_stats;
2816 #endif
2817   return best_rd;
2818 }
2819 
av1_rd_pick_intra_mode_sb(const AV1_COMP * cpi,MACROBLOCK * x,RD_STATS * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd)2820 void av1_rd_pick_intra_mode_sb(const AV1_COMP *cpi, MACROBLOCK *x,
2821                                RD_STATS *rd_cost, BLOCK_SIZE bsize,
2822                                PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
2823   const AV1_COMMON *const cm = &cpi->common;
2824   MACROBLOCKD *const xd = &x->e_mbd;
2825   MB_MODE_INFO *const mbmi = xd->mi[0];
2826   const int num_planes = av1_num_planes(cm);
2827   int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
2828   int y_skip = 0, uv_skip = 0;
2829   int64_t dist_y = 0, dist_uv = 0;
2830 
2831   ctx->rd_stats.skip = 0;
2832   mbmi->ref_frame[0] = INTRA_FRAME;
2833   mbmi->ref_frame[1] = NONE_FRAME;
2834   mbmi->use_intrabc = 0;
2835   mbmi->mv[0].as_int = 0;
2836   mbmi->skip_mode = 0;
2837 
2838   const int64_t intra_yrd =
2839       av1_rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
2840                                  &y_skip, bsize, best_rd, ctx);
2841 
2842   // Initialize default mode evaluation params
2843   set_mode_eval_params(cpi, x, DEFAULT_EVAL);
2844 
2845   if (intra_yrd < best_rd) {
2846     // Only store reconstructed luma when there's chroma RDO. When there's no
2847     // chroma RDO, the reconstructed luma will be stored in encode_superblock().
2848     xd->cfl.store_y = store_cfl_required_rdo(cm, x);
2849     if (xd->cfl.store_y) {
2850       // Restore reconstructed luma values.
2851       memcpy(x->blk_skip, ctx->blk_skip,
2852              sizeof(x->blk_skip[0]) * ctx->num_4x4_blk);
2853       av1_copy_array(xd->tx_type_map, ctx->tx_type_map, ctx->num_4x4_blk);
2854       av1_encode_intra_block_plane(cpi, x, bsize, AOM_PLANE_Y, DRY_RUN_NORMAL,
2855                                    cpi->optimize_seg_arr[mbmi->segment_id]);
2856       av1_copy_array(ctx->tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
2857       xd->cfl.store_y = 0;
2858     }
2859     if (num_planes > 1) {
2860       init_sbuv_mode(mbmi);
2861       if (xd->is_chroma_ref) {
2862         const TX_SIZE max_uv_tx_size = av1_get_tx_size(AOM_PLANE_U, xd);
2863         av1_rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
2864                                     &dist_uv, &uv_skip, bsize, max_uv_tx_size);
2865       }
2866     }
2867 
2868     // Intra block is always coded as non-skip
2869     rd_cost->rate =
2870         rate_y + rate_uv + x->skip_cost[av1_get_skip_context(xd)][0];
2871     rd_cost->dist = dist_y + dist_uv;
2872     rd_cost->rdcost = RDCOST(x->rdmult, rd_cost->rate, rd_cost->dist);
2873     rd_cost->skip = 0;
2874   } else {
2875     rd_cost->rate = INT_MAX;
2876   }
2877 
2878   if (rd_cost->rate != INT_MAX && rd_cost->rdcost < best_rd)
2879     best_rd = rd_cost->rdcost;
2880   if (rd_pick_intrabc_mode_sb(cpi, x, ctx, rd_cost, bsize, best_rd) < best_rd) {
2881     ctx->rd_stats.skip = mbmi->skip;
2882     memcpy(ctx->blk_skip, x->blk_skip,
2883            sizeof(x->blk_skip[0]) * ctx->num_4x4_blk);
2884     assert(rd_cost->rate != INT_MAX);
2885   }
2886   if (rd_cost->rate == INT_MAX) return;
2887 
2888   ctx->mic = *xd->mi[0];
2889   av1_copy_mbmi_ext_to_mbmi_ext_frame(&ctx->mbmi_ext_best, x->mbmi_ext,
2890                                       av1_ref_frame_type(xd->mi[0]->ref_frame));
2891   av1_copy_array(ctx->tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
2892 }
2893 
2894 static AOM_INLINE void calc_target_weighted_pred(
2895     const AV1_COMMON *cm, const MACROBLOCK *x, const MACROBLOCKD *xd,
2896     const uint8_t *above, int above_stride, const uint8_t *left,
2897     int left_stride);
2898 
rd_pick_skip_mode(RD_STATS * rd_cost,InterModeSearchState * search_state,const AV1_COMP * const cpi,MACROBLOCK * const x,BLOCK_SIZE bsize,struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE])2899 static AOM_INLINE void rd_pick_skip_mode(
2900     RD_STATS *rd_cost, InterModeSearchState *search_state,
2901     const AV1_COMP *const cpi, MACROBLOCK *const x, BLOCK_SIZE bsize,
2902     struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE]) {
2903   const AV1_COMMON *const cm = &cpi->common;
2904   const SkipModeInfo *const skip_mode_info = &cm->current_frame.skip_mode_info;
2905   const int num_planes = av1_num_planes(cm);
2906   MACROBLOCKD *const xd = &x->e_mbd;
2907   MB_MODE_INFO *const mbmi = xd->mi[0];
2908 
2909   x->compound_idx = 1;  // COMPOUND_AVERAGE
2910   RD_STATS skip_mode_rd_stats;
2911   av1_invalid_rd_stats(&skip_mode_rd_stats);
2912 
2913   if (skip_mode_info->ref_frame_idx_0 == INVALID_IDX ||
2914       skip_mode_info->ref_frame_idx_1 == INVALID_IDX) {
2915     return;
2916   }
2917 
2918   const MV_REFERENCE_FRAME ref_frame =
2919       LAST_FRAME + skip_mode_info->ref_frame_idx_0;
2920   const MV_REFERENCE_FRAME second_ref_frame =
2921       LAST_FRAME + skip_mode_info->ref_frame_idx_1;
2922   const PREDICTION_MODE this_mode = NEAREST_NEARESTMV;
2923   const THR_MODES mode_index =
2924       get_prediction_mode_idx(this_mode, ref_frame, second_ref_frame);
2925 
2926   if (mode_index == THR_INVALID) {
2927     return;
2928   }
2929 
2930   if ((!cpi->oxcf.enable_onesided_comp ||
2931        cpi->sf.inter_sf.disable_onesided_comp) &&
2932       cpi->all_one_sided_refs) {
2933     return;
2934   }
2935 
2936   mbmi->mode = this_mode;
2937   mbmi->uv_mode = UV_DC_PRED;
2938   mbmi->ref_frame[0] = ref_frame;
2939   mbmi->ref_frame[1] = second_ref_frame;
2940   const uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
2941   if (x->mbmi_ext->ref_mv_count[ref_frame_type] == UINT8_MAX) {
2942     if (x->mbmi_ext->ref_mv_count[ref_frame] == UINT8_MAX ||
2943         x->mbmi_ext->ref_mv_count[second_ref_frame] == UINT8_MAX) {
2944       return;
2945     }
2946     MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
2947     av1_find_mv_refs(cm, xd, mbmi, ref_frame_type, mbmi_ext->ref_mv_count,
2948                      xd->ref_mv_stack, xd->weight, NULL, mbmi_ext->global_mvs,
2949                      mbmi_ext->mode_context);
2950     // TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
2951     // mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
2952     av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame_type);
2953   }
2954 
2955   assert(this_mode == NEAREST_NEARESTMV);
2956   if (!build_cur_mv(mbmi->mv, this_mode, cm, x, 0)) {
2957     return;
2958   }
2959 
2960   mbmi->filter_intra_mode_info.use_filter_intra = 0;
2961   mbmi->interintra_mode = (INTERINTRA_MODE)(II_DC_PRED - 1);
2962   mbmi->comp_group_idx = 0;
2963   mbmi->compound_idx = x->compound_idx;
2964   mbmi->interinter_comp.type = COMPOUND_AVERAGE;
2965   mbmi->motion_mode = SIMPLE_TRANSLATION;
2966   mbmi->ref_mv_idx = 0;
2967   mbmi->skip_mode = mbmi->skip = 1;
2968 
2969   set_default_interp_filters(mbmi, cm->features.interp_filter);
2970 
2971   set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
2972   for (int i = 0; i < num_planes; i++) {
2973     xd->plane[i].pre[0] = yv12_mb[mbmi->ref_frame[0]][i];
2974     xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
2975   }
2976 
2977   BUFFER_SET orig_dst;
2978   for (int i = 0; i < num_planes; i++) {
2979     orig_dst.plane[i] = xd->plane[i].dst.buf;
2980     orig_dst.stride[i] = xd->plane[i].dst.stride;
2981   }
2982 
2983   // Obtain the rdcost for skip_mode.
2984   skip_mode_rd(&skip_mode_rd_stats, cpi, x, bsize, &orig_dst);
2985 
2986   // Compare the use of skip_mode with the best intra/inter mode obtained.
2987   const int skip_mode_ctx = av1_get_skip_mode_context(xd);
2988   int64_t best_intra_inter_mode_cost = INT64_MAX;
2989   if (rd_cost->dist < INT64_MAX && rd_cost->rate < INT32_MAX) {
2990     best_intra_inter_mode_cost =
2991         RDCOST(x->rdmult, rd_cost->rate + x->skip_mode_cost[skip_mode_ctx][0],
2992                rd_cost->dist);
2993     // Account for non-skip mode rate in total rd stats
2994     rd_cost->rate += x->skip_mode_cost[skip_mode_ctx][0];
2995     av1_rd_cost_update(x->rdmult, rd_cost);
2996   }
2997 
2998   if (skip_mode_rd_stats.rdcost <= best_intra_inter_mode_cost &&
2999       (!xd->lossless[mbmi->segment_id] || skip_mode_rd_stats.dist == 0)) {
3000     assert(mode_index != THR_INVALID);
3001     search_state->best_mbmode.skip_mode = 1;
3002     search_state->best_mbmode = *mbmi;
3003 
3004     search_state->best_mbmode.skip_mode = search_state->best_mbmode.skip = 1;
3005     search_state->best_mbmode.mode = NEAREST_NEARESTMV;
3006     search_state->best_mbmode.ref_frame[0] = mbmi->ref_frame[0];
3007     search_state->best_mbmode.ref_frame[1] = mbmi->ref_frame[1];
3008     search_state->best_mbmode.mv[0].as_int = mbmi->mv[0].as_int;
3009     search_state->best_mbmode.mv[1].as_int = mbmi->mv[1].as_int;
3010     search_state->best_mbmode.ref_mv_idx = 0;
3011 
3012     // Set up tx_size related variables for skip-specific loop filtering.
3013     search_state->best_mbmode.tx_size =
3014         block_signals_txsize(bsize)
3015             ? tx_size_from_tx_mode(bsize, x->tx_mode_search_type)
3016             : max_txsize_rect_lookup[bsize];
3017     memset(search_state->best_mbmode.inter_tx_size,
3018            search_state->best_mbmode.tx_size,
3019            sizeof(search_state->best_mbmode.inter_tx_size));
3020     set_txfm_ctxs(search_state->best_mbmode.tx_size, xd->width, xd->height,
3021                   search_state->best_mbmode.skip && is_inter_block(mbmi), xd);
3022 
3023     // Set up color-related variables for skip mode.
3024     search_state->best_mbmode.uv_mode = UV_DC_PRED;
3025     search_state->best_mbmode.palette_mode_info.palette_size[0] = 0;
3026     search_state->best_mbmode.palette_mode_info.palette_size[1] = 0;
3027 
3028     search_state->best_mbmode.comp_group_idx = 0;
3029     search_state->best_mbmode.compound_idx = x->compound_idx;
3030     search_state->best_mbmode.interinter_comp.type = COMPOUND_AVERAGE;
3031     search_state->best_mbmode.motion_mode = SIMPLE_TRANSLATION;
3032 
3033     search_state->best_mbmode.interintra_mode =
3034         (INTERINTRA_MODE)(II_DC_PRED - 1);
3035     search_state->best_mbmode.filter_intra_mode_info.use_filter_intra = 0;
3036 
3037     set_default_interp_filters(&search_state->best_mbmode,
3038                                cm->features.interp_filter);
3039 
3040     search_state->best_mode_index = mode_index;
3041 
3042     // Update rd_cost
3043     rd_cost->rate = skip_mode_rd_stats.rate;
3044     rd_cost->dist = rd_cost->sse = skip_mode_rd_stats.dist;
3045     rd_cost->rdcost = skip_mode_rd_stats.rdcost;
3046 
3047     search_state->best_rd = rd_cost->rdcost;
3048     search_state->best_skip2 = 1;
3049     search_state->best_mode_skippable = 1;
3050 
3051     x->force_skip = 1;
3052   }
3053 }
3054 
3055 // Get winner mode stats of given mode index
get_winner_mode_stats(MACROBLOCK * x,MB_MODE_INFO * best_mbmode,RD_STATS * best_rd_cost,int best_rate_y,int best_rate_uv,THR_MODES * best_mode_index,RD_STATS ** winner_rd_cost,int * winner_rate_y,int * winner_rate_uv,THR_MODES * winner_mode_index,int enable_multiwinner_mode_process,int mode_idx)3056 static AOM_INLINE MB_MODE_INFO *get_winner_mode_stats(
3057     MACROBLOCK *x, MB_MODE_INFO *best_mbmode, RD_STATS *best_rd_cost,
3058     int best_rate_y, int best_rate_uv, THR_MODES *best_mode_index,
3059     RD_STATS **winner_rd_cost, int *winner_rate_y, int *winner_rate_uv,
3060     THR_MODES *winner_mode_index, int enable_multiwinner_mode_process,
3061     int mode_idx) {
3062   MB_MODE_INFO *winner_mbmi;
3063   if (enable_multiwinner_mode_process) {
3064     assert(mode_idx >= 0 && mode_idx < x->winner_mode_count);
3065     WinnerModeStats *winner_mode_stat = &x->winner_mode_stats[mode_idx];
3066     winner_mbmi = &winner_mode_stat->mbmi;
3067 
3068     *winner_rd_cost = &winner_mode_stat->rd_cost;
3069     *winner_rate_y = winner_mode_stat->rate_y;
3070     *winner_rate_uv = winner_mode_stat->rate_uv;
3071     *winner_mode_index = winner_mode_stat->mode_index;
3072   } else {
3073     winner_mbmi = best_mbmode;
3074     *winner_rd_cost = best_rd_cost;
3075     *winner_rate_y = best_rate_y;
3076     *winner_rate_uv = best_rate_uv;
3077     *winner_mode_index = *best_mode_index;
3078   }
3079   return winner_mbmi;
3080 }
3081 
3082 // speed feature: fast intra/inter transform type search
3083 // Used for speed >= 2
3084 // When this speed feature is on, in rd mode search, only DCT is used.
3085 // After the mode is determined, this function is called, to select
3086 // transform types and get accurate rdcost.
refine_winner_mode_tx(const AV1_COMP * cpi,MACROBLOCK * x,RD_STATS * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,THR_MODES * best_mode_index,MB_MODE_INFO * best_mbmode,struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE],int best_rate_y,int best_rate_uv,int * best_skip2,int winner_mode_count)3087 static AOM_INLINE void refine_winner_mode_tx(
3088     const AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *rd_cost, BLOCK_SIZE bsize,
3089     PICK_MODE_CONTEXT *ctx, THR_MODES *best_mode_index,
3090     MB_MODE_INFO *best_mbmode, struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE],
3091     int best_rate_y, int best_rate_uv, int *best_skip2, int winner_mode_count) {
3092   const AV1_COMMON *const cm = &cpi->common;
3093   MACROBLOCKD *const xd = &x->e_mbd;
3094   MB_MODE_INFO *const mbmi = xd->mi[0];
3095   int64_t best_rd;
3096   const int num_planes = av1_num_planes(cm);
3097 
3098   if (!is_winner_mode_processing_enabled(cpi, best_mbmode, best_mbmode->mode))
3099     return;
3100 
3101   // Set params for winner mode evaluation
3102   set_mode_eval_params(cpi, x, WINNER_MODE_EVAL);
3103 
3104   // No best mode identified so far
3105   if (*best_mode_index == THR_INVALID) return;
3106 
3107   best_rd = RDCOST(x->rdmult, rd_cost->rate, rd_cost->dist);
3108   for (int mode_idx = 0; mode_idx < winner_mode_count; mode_idx++) {
3109     RD_STATS *winner_rd_stats = NULL;
3110     int winner_rate_y = 0, winner_rate_uv = 0;
3111     THR_MODES winner_mode_index = 0;
3112 
3113     // TODO(any): Combine best mode and multi-winner mode processing paths
3114     // Get winner mode stats for current mode index
3115     MB_MODE_INFO *winner_mbmi = get_winner_mode_stats(
3116         x, best_mbmode, rd_cost, best_rate_y, best_rate_uv, best_mode_index,
3117         &winner_rd_stats, &winner_rate_y, &winner_rate_uv, &winner_mode_index,
3118         cpi->sf.winner_mode_sf.enable_multiwinner_mode_process, mode_idx);
3119 
3120     if (xd->lossless[winner_mbmi->segment_id] == 0 &&
3121         winner_mode_index != THR_INVALID &&
3122         is_winner_mode_processing_enabled(cpi, winner_mbmi,
3123                                           winner_mbmi->mode)) {
3124       RD_STATS rd_stats = *winner_rd_stats;
3125       int skip_blk = 0;
3126       RD_STATS rd_stats_y, rd_stats_uv;
3127       const int skip_ctx = av1_get_skip_context(xd);
3128 
3129       *mbmi = *winner_mbmi;
3130 
3131       set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
3132 
3133       // Select prediction reference frames.
3134       for (int i = 0; i < num_planes; i++) {
3135         xd->plane[i].pre[0] = yv12_mb[mbmi->ref_frame[0]][i];
3136         if (has_second_ref(mbmi))
3137           xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
3138       }
3139 
3140       if (is_inter_mode(mbmi->mode)) {
3141         const int mi_row = xd->mi_row;
3142         const int mi_col = xd->mi_col;
3143         av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0,
3144                                       av1_num_planes(cm) - 1);
3145         if (mbmi->motion_mode == OBMC_CAUSAL)
3146           av1_build_obmc_inter_predictors_sb(cm, xd);
3147 
3148         av1_subtract_plane(x, bsize, 0);
3149         if (x->tx_mode_search_type == TX_MODE_SELECT &&
3150             !xd->lossless[mbmi->segment_id]) {
3151           av1_pick_recursive_tx_size_type_yrd(cpi, x, &rd_stats_y, bsize,
3152                                               INT64_MAX);
3153           assert(rd_stats_y.rate != INT_MAX);
3154         } else {
3155           av1_pick_uniform_tx_size_type_yrd(cpi, x, &rd_stats_y, bsize,
3156                                             INT64_MAX);
3157           memset(mbmi->inter_tx_size, mbmi->tx_size,
3158                  sizeof(mbmi->inter_tx_size));
3159           for (int i = 0; i < xd->height * xd->width; ++i)
3160             set_blk_skip(x, 0, i, rd_stats_y.skip);
3161         }
3162       } else {
3163         av1_pick_uniform_tx_size_type_yrd(cpi, x, &rd_stats_y, bsize,
3164                                           INT64_MAX);
3165       }
3166 
3167       if (num_planes > 1) {
3168         av1_txfm_uvrd(cpi, x, &rd_stats_uv, bsize, INT64_MAX);
3169       } else {
3170         av1_init_rd_stats(&rd_stats_uv);
3171       }
3172 
3173       if (is_inter_mode(mbmi->mode) &&
3174           RDCOST(x->rdmult,
3175                  x->skip_cost[skip_ctx][0] + rd_stats_y.rate + rd_stats_uv.rate,
3176                  (rd_stats_y.dist + rd_stats_uv.dist)) >
3177               RDCOST(x->rdmult, x->skip_cost[skip_ctx][1],
3178                      (rd_stats_y.sse + rd_stats_uv.sse))) {
3179         skip_blk = 1;
3180         rd_stats_y.rate = x->skip_cost[skip_ctx][1];
3181         rd_stats_uv.rate = 0;
3182         rd_stats_y.dist = rd_stats_y.sse;
3183         rd_stats_uv.dist = rd_stats_uv.sse;
3184       } else {
3185         skip_blk = 0;
3186         rd_stats_y.rate += x->skip_cost[skip_ctx][0];
3187       }
3188       int this_rate = rd_stats.rate + rd_stats_y.rate + rd_stats_uv.rate -
3189                       winner_rate_y - winner_rate_uv;
3190       int64_t this_rd =
3191           RDCOST(x->rdmult, this_rate, (rd_stats_y.dist + rd_stats_uv.dist));
3192       if (best_rd > this_rd) {
3193         *best_mbmode = *mbmi;
3194         *best_mode_index = winner_mode_index;
3195         av1_copy_array(ctx->blk_skip, x->blk_skip, ctx->num_4x4_blk);
3196         av1_copy_array(ctx->tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
3197         rd_cost->rate = this_rate;
3198         rd_cost->dist = rd_stats_y.dist + rd_stats_uv.dist;
3199         rd_cost->sse = rd_stats_y.sse + rd_stats_uv.sse;
3200         rd_cost->rdcost = this_rd;
3201         best_rd = this_rd;
3202         *best_skip2 = skip_blk;
3203       }
3204     }
3205   }
3206 }
3207 
3208 typedef struct {
3209   // Mask for each reference frame, specifying which prediction modes to NOT try
3210   // during search.
3211   uint32_t pred_modes[REF_FRAMES];
3212   // If ref_combo[i][j + 1] is true, do NOT try prediction using combination of
3213   // reference frames (i, j).
3214   // Note: indexing with 'j + 1' is due to the fact that 2nd reference can be -1
3215   // (NONE_FRAME).
3216   bool ref_combo[REF_FRAMES][REF_FRAMES + 1];
3217 } mode_skip_mask_t;
3218 
3219 // Update 'ref_combo' mask to disable given 'ref' in single and compound modes.
disable_reference(MV_REFERENCE_FRAME ref,bool ref_combo[REF_FRAMES][REF_FRAMES+1])3220 static AOM_INLINE void disable_reference(
3221     MV_REFERENCE_FRAME ref, bool ref_combo[REF_FRAMES][REF_FRAMES + 1]) {
3222   for (MV_REFERENCE_FRAME ref2 = NONE_FRAME; ref2 < REF_FRAMES; ++ref2) {
3223     ref_combo[ref][ref2 + 1] = true;
3224   }
3225 }
3226 
3227 // Update 'ref_combo' mask to disable all inter references except ALTREF.
disable_inter_references_except_altref(bool ref_combo[REF_FRAMES][REF_FRAMES+1])3228 static AOM_INLINE void disable_inter_references_except_altref(
3229     bool ref_combo[REF_FRAMES][REF_FRAMES + 1]) {
3230   disable_reference(LAST_FRAME, ref_combo);
3231   disable_reference(LAST2_FRAME, ref_combo);
3232   disable_reference(LAST3_FRAME, ref_combo);
3233   disable_reference(GOLDEN_FRAME, ref_combo);
3234   disable_reference(BWDREF_FRAME, ref_combo);
3235   disable_reference(ALTREF2_FRAME, ref_combo);
3236 }
3237 
3238 static const MV_REFERENCE_FRAME reduced_ref_combos[][2] = {
3239   { LAST_FRAME, NONE_FRAME },     { ALTREF_FRAME, NONE_FRAME },
3240   { LAST_FRAME, ALTREF_FRAME },   { GOLDEN_FRAME, NONE_FRAME },
3241   { INTRA_FRAME, NONE_FRAME },    { GOLDEN_FRAME, ALTREF_FRAME },
3242   { LAST_FRAME, GOLDEN_FRAME },   { LAST_FRAME, INTRA_FRAME },
3243   { LAST_FRAME, BWDREF_FRAME },   { LAST_FRAME, LAST3_FRAME },
3244   { GOLDEN_FRAME, BWDREF_FRAME }, { GOLDEN_FRAME, INTRA_FRAME },
3245   { BWDREF_FRAME, NONE_FRAME },   { BWDREF_FRAME, ALTREF_FRAME },
3246   { ALTREF_FRAME, INTRA_FRAME },  { BWDREF_FRAME, INTRA_FRAME },
3247 };
3248 
3249 static const MV_REFERENCE_FRAME real_time_ref_combos[][2] = {
3250   { LAST_FRAME, NONE_FRAME },
3251   { ALTREF_FRAME, NONE_FRAME },
3252   { GOLDEN_FRAME, NONE_FRAME },
3253   { INTRA_FRAME, NONE_FRAME }
3254 };
3255 
3256 typedef enum { REF_SET_FULL, REF_SET_REDUCED, REF_SET_REALTIME } REF_SET;
3257 
default_skip_mask(mode_skip_mask_t * mask,REF_SET ref_set)3258 static AOM_INLINE void default_skip_mask(mode_skip_mask_t *mask,
3259                                          REF_SET ref_set) {
3260   if (ref_set == REF_SET_FULL) {
3261     // Everything available by default.
3262     memset(mask, 0, sizeof(*mask));
3263   } else {
3264     // All modes available by default.
3265     memset(mask->pred_modes, 0, sizeof(mask->pred_modes));
3266     // All references disabled first.
3267     for (MV_REFERENCE_FRAME ref1 = INTRA_FRAME; ref1 < REF_FRAMES; ++ref1) {
3268       for (MV_REFERENCE_FRAME ref2 = NONE_FRAME; ref2 < REF_FRAMES; ++ref2) {
3269         mask->ref_combo[ref1][ref2 + 1] = true;
3270       }
3271     }
3272     const MV_REFERENCE_FRAME(*ref_set_combos)[2];
3273     int num_ref_combos;
3274 
3275     // Then enable reduced set of references explicitly.
3276     switch (ref_set) {
3277       case REF_SET_REDUCED:
3278         ref_set_combos = reduced_ref_combos;
3279         num_ref_combos =
3280             (int)sizeof(reduced_ref_combos) / sizeof(reduced_ref_combos[0]);
3281         break;
3282       case REF_SET_REALTIME:
3283         ref_set_combos = real_time_ref_combos;
3284         num_ref_combos =
3285             (int)sizeof(real_time_ref_combos) / sizeof(real_time_ref_combos[0]);
3286         break;
3287       default: assert(0); num_ref_combos = 0;
3288     }
3289 
3290     for (int i = 0; i < num_ref_combos; ++i) {
3291       const MV_REFERENCE_FRAME *const this_combo = ref_set_combos[i];
3292       mask->ref_combo[this_combo[0]][this_combo[1] + 1] = false;
3293     }
3294   }
3295 }
3296 
init_mode_skip_mask(mode_skip_mask_t * mask,const AV1_COMP * cpi,MACROBLOCK * x,BLOCK_SIZE bsize)3297 static AOM_INLINE void init_mode_skip_mask(mode_skip_mask_t *mask,
3298                                            const AV1_COMP *cpi, MACROBLOCK *x,
3299                                            BLOCK_SIZE bsize) {
3300   const AV1_COMMON *const cm = &cpi->common;
3301   const struct segmentation *const seg = &cm->seg;
3302   MACROBLOCKD *const xd = &x->e_mbd;
3303   MB_MODE_INFO *const mbmi = xd->mi[0];
3304   unsigned char segment_id = mbmi->segment_id;
3305   const SPEED_FEATURES *const sf = &cpi->sf;
3306   REF_SET ref_set = REF_SET_FULL;
3307 
3308   if (sf->rt_sf.use_real_time_ref_set)
3309     ref_set = REF_SET_REALTIME;
3310   else if (cpi->oxcf.enable_reduced_reference_set)
3311     ref_set = REF_SET_REDUCED;
3312 
3313   default_skip_mask(mask, ref_set);
3314 
3315   int min_pred_mv_sad = INT_MAX;
3316   MV_REFERENCE_FRAME ref_frame;
3317   if (ref_set == REF_SET_REALTIME) {
3318     // For real-time encoding, we only look at a subset of ref frames. So the
3319     // threshold for pruning should be computed from this subset as well.
3320     const int num_rt_refs =
3321         sizeof(real_time_ref_combos) / sizeof(*real_time_ref_combos);
3322     for (int r_idx = 0; r_idx < num_rt_refs; r_idx++) {
3323       const MV_REFERENCE_FRAME ref = real_time_ref_combos[r_idx][0];
3324       if (ref != INTRA_FRAME) {
3325         min_pred_mv_sad = AOMMIN(min_pred_mv_sad, x->pred_mv_sad[ref]);
3326       }
3327     }
3328   } else {
3329     for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame)
3330       min_pred_mv_sad = AOMMIN(min_pred_mv_sad, x->pred_mv_sad[ref_frame]);
3331   }
3332 
3333   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3334     if (!(cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frame])) {
3335       // Skip checking missing reference in both single and compound reference
3336       // modes.
3337       disable_reference(ref_frame, mask->ref_combo);
3338     } else {
3339       // Skip fixed mv modes for poor references
3340       if ((x->pred_mv_sad[ref_frame] >> 2) > min_pred_mv_sad) {
3341         mask->pred_modes[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
3342       }
3343     }
3344     if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
3345         get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
3346       // Reference not used for the segment.
3347       disable_reference(ref_frame, mask->ref_combo);
3348     }
3349   }
3350   // Note: We use the following drop-out only if the SEG_LVL_REF_FRAME feature
3351   // is disabled for this segment. This is to prevent the possibility that we
3352   // end up unable to pick any mode.
3353   if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
3354     // Only consider GLOBALMV/ALTREF_FRAME for alt ref frame,
3355     // unless ARNR filtering is enabled in which case we want
3356     // an unfiltered alternative. We allow near/nearest as well
3357     // because they may result in zero-zero MVs but be cheaper.
3358     if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
3359       disable_inter_references_except_altref(mask->ref_combo);
3360 
3361       mask->pred_modes[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
3362       const MV_REFERENCE_FRAME tmp_ref_frames[2] = { ALTREF_FRAME, NONE_FRAME };
3363       int_mv near_mv, nearest_mv, global_mv;
3364       get_this_mv(&nearest_mv, NEARESTMV, 0, 0, 0, tmp_ref_frames, x->mbmi_ext);
3365       get_this_mv(&near_mv, NEARMV, 0, 0, 0, tmp_ref_frames, x->mbmi_ext);
3366       get_this_mv(&global_mv, GLOBALMV, 0, 0, 0, tmp_ref_frames, x->mbmi_ext);
3367 
3368       if (near_mv.as_int != global_mv.as_int)
3369         mask->pred_modes[ALTREF_FRAME] |= (1 << NEARMV);
3370       if (nearest_mv.as_int != global_mv.as_int)
3371         mask->pred_modes[ALTREF_FRAME] |= (1 << NEARESTMV);
3372     }
3373   }
3374 
3375   if (cpi->rc.is_src_frame_alt_ref) {
3376     if (sf->inter_sf.alt_ref_search_fp) {
3377       assert(cpi->ref_frame_flags & av1_ref_frame_flag_list[ALTREF_FRAME]);
3378       mask->pred_modes[ALTREF_FRAME] = 0;
3379       disable_inter_references_except_altref(mask->ref_combo);
3380       disable_reference(INTRA_FRAME, mask->ref_combo);
3381     }
3382   }
3383 
3384   if (sf->inter_sf.alt_ref_search_fp) {
3385     if (!cm->show_frame && x->best_pred_mv_sad < INT_MAX) {
3386       int sad_thresh = x->best_pred_mv_sad + (x->best_pred_mv_sad >> 3);
3387       // Conservatively skip the modes w.r.t. BWDREF, ALTREF2 and ALTREF, if
3388       // those are past frames
3389       for (ref_frame = BWDREF_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
3390         if (cpi->ref_relative_dist[ref_frame - LAST_FRAME] < 0)
3391           if (x->pred_mv_sad[ref_frame] > sad_thresh)
3392             mask->pred_modes[ref_frame] |= INTER_ALL;
3393       }
3394     }
3395   }
3396 
3397   if (sf->inter_sf.adaptive_mode_search) {
3398     if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
3399         cpi->rc.frames_since_golden >= 3)
3400       if ((x->pred_mv_sad[GOLDEN_FRAME] >> 1) > x->pred_mv_sad[LAST_FRAME])
3401         mask->pred_modes[GOLDEN_FRAME] |= INTER_ALL;
3402   }
3403 
3404   if (bsize > sf->part_sf.max_intra_bsize) {
3405     disable_reference(INTRA_FRAME, mask->ref_combo);
3406   }
3407 
3408   mask->pred_modes[INTRA_FRAME] |=
3409       ~(sf->intra_sf.intra_y_mode_mask[max_txsize_lookup[bsize]]);
3410 }
3411 
init_pred_buf(const MACROBLOCK * const x,HandleInterModeArgs * const args)3412 static AOM_INLINE void init_pred_buf(const MACROBLOCK *const x,
3413                                      HandleInterModeArgs *const args) {
3414   const MACROBLOCKD *const xd = &x->e_mbd;
3415   if (is_cur_buf_hbd(xd)) {
3416     const int len = sizeof(uint16_t);
3417     args->above_pred_buf[0] = CONVERT_TO_BYTEPTR(x->above_pred_buf);
3418     args->above_pred_buf[1] =
3419         CONVERT_TO_BYTEPTR(x->above_pred_buf + (MAX_SB_SQUARE >> 1) * len);
3420     args->above_pred_buf[2] =
3421         CONVERT_TO_BYTEPTR(x->above_pred_buf + MAX_SB_SQUARE * len);
3422     args->left_pred_buf[0] = CONVERT_TO_BYTEPTR(x->left_pred_buf);
3423     args->left_pred_buf[1] =
3424         CONVERT_TO_BYTEPTR(x->left_pred_buf + (MAX_SB_SQUARE >> 1) * len);
3425     args->left_pred_buf[2] =
3426         CONVERT_TO_BYTEPTR(x->left_pred_buf + MAX_SB_SQUARE * len);
3427   } else {
3428     args->above_pred_buf[0] = x->above_pred_buf;
3429     args->above_pred_buf[1] = x->above_pred_buf + (MAX_SB_SQUARE >> 1);
3430     args->above_pred_buf[2] = x->above_pred_buf + MAX_SB_SQUARE;
3431     args->left_pred_buf[0] = x->left_pred_buf;
3432     args->left_pred_buf[1] = x->left_pred_buf + (MAX_SB_SQUARE >> 1);
3433     args->left_pred_buf[2] = x->left_pred_buf + MAX_SB_SQUARE;
3434   }
3435 }
3436 
3437 // Please add/modify parameter setting in this function, making it consistent
3438 // and easy to read and maintain.
set_params_rd_pick_inter_mode(const AV1_COMP * cpi,MACROBLOCK * x,HandleInterModeArgs * args,BLOCK_SIZE bsize,mode_skip_mask_t * mode_skip_mask,int skip_ref_frame_mask,unsigned int * ref_costs_single,unsigned int (* ref_costs_comp)[REF_FRAMES],struct buf_2d (* yv12_mb)[MAX_MB_PLANE])3439 static AOM_INLINE void set_params_rd_pick_inter_mode(
3440     const AV1_COMP *cpi, MACROBLOCK *x, HandleInterModeArgs *args,
3441     BLOCK_SIZE bsize, mode_skip_mask_t *mode_skip_mask, int skip_ref_frame_mask,
3442     unsigned int *ref_costs_single, unsigned int (*ref_costs_comp)[REF_FRAMES],
3443     struct buf_2d (*yv12_mb)[MAX_MB_PLANE]) {
3444   const AV1_COMMON *const cm = &cpi->common;
3445   MACROBLOCKD *const xd = &x->e_mbd;
3446   MB_MODE_INFO *const mbmi = xd->mi[0];
3447   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
3448   unsigned char segment_id = mbmi->segment_id;
3449 
3450   init_pred_buf(x, args);
3451   av1_collect_neighbors_ref_counts(xd);
3452   estimate_ref_frame_costs(cm, xd, x, segment_id, ref_costs_single,
3453                            ref_costs_comp);
3454 
3455   const int mi_row = xd->mi_row;
3456   const int mi_col = xd->mi_col;
3457   MV_REFERENCE_FRAME ref_frame;
3458   x->best_pred_mv_sad = INT_MAX;
3459   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
3460     x->pred_mv_sad[ref_frame] = INT_MAX;
3461     x->mbmi_ext->mode_context[ref_frame] = 0;
3462     mbmi_ext->ref_mv_count[ref_frame] = UINT8_MAX;
3463     if (cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frame]) {
3464       if (mbmi->partition != PARTITION_NONE &&
3465           mbmi->partition != PARTITION_SPLIT) {
3466         if (skip_ref_frame_mask & (1 << ref_frame)) {
3467           int skip = 1;
3468           for (int r = ALTREF_FRAME + 1; r < MODE_CTX_REF_FRAMES; ++r) {
3469             if (!(skip_ref_frame_mask & (1 << r))) {
3470               const MV_REFERENCE_FRAME *rf = ref_frame_map[r - REF_FRAMES];
3471               if (rf[0] == ref_frame || rf[1] == ref_frame) {
3472                 skip = 0;
3473                 break;
3474               }
3475             }
3476           }
3477           if (skip) continue;
3478         }
3479       }
3480       assert(get_ref_frame_yv12_buf(cm, ref_frame) != NULL);
3481       setup_buffer_ref_mvs_inter(cpi, x, ref_frame, bsize, yv12_mb);
3482     }
3483     // Store the best pred_mv_sad across all past frames
3484     if (cpi->sf.inter_sf.alt_ref_search_fp &&
3485         cpi->ref_relative_dist[ref_frame - LAST_FRAME] < 0)
3486       x->best_pred_mv_sad =
3487           AOMMIN(x->best_pred_mv_sad, x->pred_mv_sad[ref_frame]);
3488   }
3489   // ref_frame = ALTREF_FRAME
3490   if (!cpi->sf.rt_sf.use_real_time_ref_set) {
3491     // No second reference on RT ref set, so no need to initialize
3492     for (; ref_frame < MODE_CTX_REF_FRAMES; ++ref_frame) {
3493       x->mbmi_ext->mode_context[ref_frame] = 0;
3494       mbmi_ext->ref_mv_count[ref_frame] = UINT8_MAX;
3495       const MV_REFERENCE_FRAME *rf = ref_frame_map[ref_frame - REF_FRAMES];
3496       if (!((cpi->ref_frame_flags & av1_ref_frame_flag_list[rf[0]]) &&
3497             (cpi->ref_frame_flags & av1_ref_frame_flag_list[rf[1]]))) {
3498         continue;
3499       }
3500 
3501       if (mbmi->partition != PARTITION_NONE &&
3502           mbmi->partition != PARTITION_SPLIT) {
3503         if (skip_ref_frame_mask & (1 << ref_frame)) {
3504           continue;
3505         }
3506       }
3507       av1_find_mv_refs(cm, xd, mbmi, ref_frame, mbmi_ext->ref_mv_count,
3508                        xd->ref_mv_stack, xd->weight, NULL, mbmi_ext->global_mvs,
3509                        mbmi_ext->mode_context);
3510       // TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
3511       // mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
3512       av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame);
3513     }
3514   }
3515 
3516   av1_count_overlappable_neighbors(cm, xd);
3517   const FRAME_UPDATE_TYPE update_type = get_frame_update_type(&cpi->gf_group);
3518   const int prune_obmc = cpi->frame_probs.obmc_probs[update_type][bsize] <
3519                          cpi->sf.inter_sf.prune_obmc_prob_thresh;
3520   if (cpi->oxcf.enable_obmc && !cpi->sf.inter_sf.disable_obmc && !prune_obmc) {
3521     if (check_num_overlappable_neighbors(mbmi) &&
3522         is_motion_variation_allowed_bsize(bsize)) {
3523       int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
3524       int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE >> 1, MAX_SB_SIZE >> 1,
3525                                        MAX_SB_SIZE >> 1 };
3526       int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE >> 1, MAX_SB_SIZE >> 1,
3527                                         MAX_SB_SIZE >> 1 };
3528       int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
3529       av1_build_prediction_by_above_preds(cm, xd, args->above_pred_buf,
3530                                           dst_width1, dst_height1,
3531                                           args->above_pred_stride);
3532       av1_build_prediction_by_left_preds(cm, xd, args->left_pred_buf,
3533                                          dst_width2, dst_height2,
3534                                          args->left_pred_stride);
3535       const int num_planes = av1_num_planes(cm);
3536       av1_setup_dst_planes(xd->plane, bsize, &cm->cur_frame->buf, mi_row,
3537                            mi_col, 0, num_planes);
3538       calc_target_weighted_pred(
3539           cm, x, xd, args->above_pred_buf[0], args->above_pred_stride[0],
3540           args->left_pred_buf[0], args->left_pred_stride[0]);
3541     }
3542   }
3543 
3544   init_mode_skip_mask(mode_skip_mask, cpi, x, bsize);
3545 
3546   // Set params for mode evaluation
3547   set_mode_eval_params(cpi, x, MODE_EVAL);
3548 
3549   x->comp_rd_stats_idx = 0;
3550 }
3551 
init_intra_mode_search_state(IntraModeSearchState * intra_search_state)3552 static AOM_INLINE void init_intra_mode_search_state(
3553     IntraModeSearchState *intra_search_state) {
3554   intra_search_state->skip_intra_modes = 0;
3555   intra_search_state->best_intra_mode = DC_PRED;
3556   intra_search_state->angle_stats_ready = 0;
3557   av1_zero(intra_search_state->directional_mode_skip_mask);
3558   intra_search_state->rate_uv_intra = INT_MAX;
3559   av1_zero(intra_search_state->pmi_uv);
3560   for (int i = 0; i < REFERENCE_MODES; ++i)
3561     intra_search_state->best_pred_rd[i] = INT64_MAX;
3562 }
3563 
init_inter_mode_search_state(InterModeSearchState * search_state,const AV1_COMP * cpi,const MACROBLOCK * x,BLOCK_SIZE bsize,int64_t best_rd_so_far)3564 static AOM_INLINE void init_inter_mode_search_state(
3565     InterModeSearchState *search_state, const AV1_COMP *cpi,
3566     const MACROBLOCK *x, BLOCK_SIZE bsize, int64_t best_rd_so_far) {
3567   init_intra_mode_search_state(&search_state->intra_search_state);
3568 
3569   search_state->best_rd = best_rd_so_far;
3570   search_state->best_skip_rd[0] = INT64_MAX;
3571   search_state->best_skip_rd[1] = INT64_MAX;
3572 
3573   av1_zero(search_state->best_mbmode);
3574 
3575   search_state->best_rate_y = INT_MAX;
3576 
3577   search_state->best_rate_uv = INT_MAX;
3578 
3579   search_state->best_mode_skippable = 0;
3580 
3581   search_state->best_skip2 = 0;
3582 
3583   search_state->best_mode_index = THR_INVALID;
3584 
3585   const MACROBLOCKD *const xd = &x->e_mbd;
3586   const MB_MODE_INFO *const mbmi = xd->mi[0];
3587   const unsigned char segment_id = mbmi->segment_id;
3588 
3589   search_state->num_available_refs = 0;
3590   memset(search_state->dist_refs, -1, sizeof(search_state->dist_refs));
3591   memset(search_state->dist_order_refs, -1,
3592          sizeof(search_state->dist_order_refs));
3593 
3594   for (int i = 0; i <= LAST_NEW_MV_INDEX; ++i)
3595     search_state->mode_threshold[i] = 0;
3596   const int *const rd_threshes = cpi->rd.threshes[segment_id][bsize];
3597   for (int i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
3598     search_state->mode_threshold[i] =
3599         ((int64_t)rd_threshes[i] * x->thresh_freq_fact[bsize][i]) >>
3600         RD_THRESH_FAC_FRAC_BITS;
3601 
3602   search_state->best_intra_rd = INT64_MAX;
3603 
3604   search_state->best_pred_sse = UINT_MAX;
3605 
3606   av1_zero(search_state->single_newmv);
3607   av1_zero(search_state->single_newmv_rate);
3608   av1_zero(search_state->single_newmv_valid);
3609   for (int i = 0; i < MB_MODE_COUNT; ++i) {
3610     for (int j = 0; j < MAX_REF_MV_SEARCH; ++j) {
3611       for (int ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame) {
3612         search_state->modelled_rd[i][j][ref_frame] = INT64_MAX;
3613         search_state->simple_rd[i][j][ref_frame] = INT64_MAX;
3614       }
3615     }
3616   }
3617 
3618   for (int dir = 0; dir < 2; ++dir) {
3619     for (int mode = 0; mode < SINGLE_INTER_MODE_NUM; ++mode) {
3620       for (int ref_frame = 0; ref_frame < FWD_REFS; ++ref_frame) {
3621         SingleInterModeState *state;
3622 
3623         state = &search_state->single_state[dir][mode][ref_frame];
3624         state->ref_frame = NONE_FRAME;
3625         state->rd = INT64_MAX;
3626 
3627         state = &search_state->single_state_modelled[dir][mode][ref_frame];
3628         state->ref_frame = NONE_FRAME;
3629         state->rd = INT64_MAX;
3630       }
3631     }
3632   }
3633   for (int dir = 0; dir < 2; ++dir) {
3634     for (int mode = 0; mode < SINGLE_INTER_MODE_NUM; ++mode) {
3635       for (int ref_frame = 0; ref_frame < FWD_REFS; ++ref_frame) {
3636         search_state->single_rd_order[dir][mode][ref_frame] = NONE_FRAME;
3637       }
3638     }
3639   }
3640   av1_zero(search_state->single_state_cnt);
3641   av1_zero(search_state->single_state_modelled_cnt);
3642 }
3643 
mask_says_skip(const mode_skip_mask_t * mode_skip_mask,const MV_REFERENCE_FRAME * ref_frame,const PREDICTION_MODE this_mode)3644 static bool mask_says_skip(const mode_skip_mask_t *mode_skip_mask,
3645                            const MV_REFERENCE_FRAME *ref_frame,
3646                            const PREDICTION_MODE this_mode) {
3647   if (mode_skip_mask->pred_modes[ref_frame[0]] & (1 << this_mode)) {
3648     return true;
3649   }
3650 
3651   return mode_skip_mask->ref_combo[ref_frame[0]][ref_frame[1] + 1];
3652 }
3653 
inter_mode_compatible_skip(const AV1_COMP * cpi,const MACROBLOCK * x,BLOCK_SIZE bsize,PREDICTION_MODE curr_mode,const MV_REFERENCE_FRAME * ref_frames)3654 static int inter_mode_compatible_skip(const AV1_COMP *cpi, const MACROBLOCK *x,
3655                                       BLOCK_SIZE bsize,
3656                                       PREDICTION_MODE curr_mode,
3657                                       const MV_REFERENCE_FRAME *ref_frames) {
3658   const int comp_pred = ref_frames[1] > INTRA_FRAME;
3659   if (comp_pred) {
3660     if (!is_comp_ref_allowed(bsize)) return 1;
3661     if (!(cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frames[1]])) {
3662       return 1;
3663     }
3664 
3665     const AV1_COMMON *const cm = &cpi->common;
3666     if (frame_is_intra_only(cm)) return 1;
3667 
3668     const CurrentFrame *const current_frame = &cm->current_frame;
3669     if (current_frame->reference_mode == SINGLE_REFERENCE) return 1;
3670 
3671     const struct segmentation *const seg = &cm->seg;
3672     const unsigned char segment_id = x->e_mbd.mi[0]->segment_id;
3673     // Do not allow compound prediction if the segment level reference frame
3674     // feature is in use as in this case there can only be one reference.
3675     if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) return 1;
3676   }
3677 
3678   if (ref_frames[0] > INTRA_FRAME && ref_frames[1] == INTRA_FRAME) {
3679     // Mode must be compatible
3680     if (!is_interintra_allowed_bsize(bsize)) return 1;
3681     if (!is_interintra_allowed_mode(curr_mode)) return 1;
3682   }
3683 
3684   return 0;
3685 }
3686 
fetch_picked_ref_frames_mask(const MACROBLOCK * const x,BLOCK_SIZE bsize,int mib_size)3687 static int fetch_picked_ref_frames_mask(const MACROBLOCK *const x,
3688                                         BLOCK_SIZE bsize, int mib_size) {
3689   const int sb_size_mask = mib_size - 1;
3690   const MACROBLOCKD *const xd = &x->e_mbd;
3691   const int mi_row = xd->mi_row;
3692   const int mi_col = xd->mi_col;
3693   const int mi_row_in_sb = mi_row & sb_size_mask;
3694   const int mi_col_in_sb = mi_col & sb_size_mask;
3695   const int mi_w = mi_size_wide[bsize];
3696   const int mi_h = mi_size_high[bsize];
3697   int picked_ref_frames_mask = 0;
3698   for (int i = mi_row_in_sb; i < mi_row_in_sb + mi_h; ++i) {
3699     for (int j = mi_col_in_sb; j < mi_col_in_sb + mi_w; ++j) {
3700       picked_ref_frames_mask |= x->picked_ref_frames_mask[i * 32 + j];
3701     }
3702   }
3703   return picked_ref_frames_mask;
3704 }
3705 
3706 // Case 1: return 0, means don't skip this mode
3707 // Case 2: return 1, means skip this mode completely
3708 // Case 3: return 2, means skip compound only, but still try single motion modes
inter_mode_search_order_independent_skip(const AV1_COMP * cpi,const MACROBLOCK * x,mode_skip_mask_t * mode_skip_mask,InterModeSearchState * search_state,int skip_ref_frame_mask,PREDICTION_MODE mode,const MV_REFERENCE_FRAME * ref_frame)3709 static int inter_mode_search_order_independent_skip(
3710     const AV1_COMP *cpi, const MACROBLOCK *x, mode_skip_mask_t *mode_skip_mask,
3711     InterModeSearchState *search_state, int skip_ref_frame_mask,
3712     PREDICTION_MODE mode, const MV_REFERENCE_FRAME *ref_frame) {
3713   if (mask_says_skip(mode_skip_mask, ref_frame, mode)) {
3714     return 1;
3715   }
3716 
3717   const int ref_type = av1_ref_frame_type(ref_frame);
3718   if ((cpi->prune_ref_frame_mask >> ref_type) & 1) return 1;
3719 
3720   // This is only used in motion vector unit test.
3721   if (cpi->oxcf.motion_vector_unit_test && ref_frame[0] == INTRA_FRAME)
3722     return 1;
3723 
3724   const AV1_COMMON *const cm = &cpi->common;
3725   if (skip_repeated_mv(cm, x, mode, ref_frame, search_state)) {
3726     return 1;
3727   }
3728 
3729   const int comp_pred = ref_frame[1] > INTRA_FRAME;
3730   if ((!cpi->oxcf.enable_onesided_comp ||
3731        cpi->sf.inter_sf.disable_onesided_comp) &&
3732       comp_pred && cpi->all_one_sided_refs) {
3733     return 1;
3734   }
3735 
3736   const MB_MODE_INFO *const mbmi = x->e_mbd.mi[0];
3737   // If no valid mode has been found so far in PARTITION_NONE when finding a
3738   // valid partition is required, do not skip mode.
3739   if (search_state->best_rd == INT64_MAX && mbmi->partition == PARTITION_NONE &&
3740       x->must_find_valid_partition)
3741     return 0;
3742 
3743   int skip_motion_mode = 0;
3744   if (mbmi->partition != PARTITION_NONE && mbmi->partition != PARTITION_SPLIT) {
3745     int skip_ref = skip_ref_frame_mask & (1 << ref_type);
3746     if (ref_type <= ALTREF_FRAME && skip_ref) {
3747       // Since the compound ref modes depends on the motion estimation result of
3748       // two single ref modes( best mv of single ref modes as the start point )
3749       // If current single ref mode is marked skip, we need to check if it will
3750       // be used in compound ref modes.
3751       for (int r = ALTREF_FRAME + 1; r < MODE_CTX_REF_FRAMES; ++r) {
3752         if (skip_ref_frame_mask & (1 << r)) continue;
3753         const MV_REFERENCE_FRAME *rf = ref_frame_map[r - REF_FRAMES];
3754         if (rf[0] == ref_type || rf[1] == ref_type) {
3755           // Found a not skipped compound ref mode which contains current
3756           // single ref. So this single ref can't be skipped completly
3757           // Just skip it's motion mode search, still try it's simple
3758           // transition mode.
3759           skip_motion_mode = 1;
3760           skip_ref = 0;
3761           break;
3762         }
3763       }
3764     }
3765     if (skip_ref) return 1;
3766   }
3767 
3768   const SPEED_FEATURES *const sf = &cpi->sf;
3769   if (ref_frame[0] == INTRA_FRAME) {
3770     if (mode != DC_PRED) {
3771       // Disable intra modes other than DC_PRED for blocks with low variance
3772       // Threshold for intra skipping based on source variance
3773       // TODO(debargha): Specialize the threshold for super block sizes
3774       const unsigned int skip_intra_var_thresh = 64;
3775       if ((sf->rt_sf.mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
3776           x->source_variance < skip_intra_var_thresh)
3777         return 1;
3778     }
3779   }
3780 
3781   if (prune_ref_by_selective_ref_frame(cpi, x, ref_frame,
3782                                        cm->cur_frame->ref_display_order_hint))
3783     return 1;
3784 
3785   if (skip_motion_mode) return 2;
3786 
3787   return 0;
3788 }
3789 
init_mbmi(MB_MODE_INFO * mbmi,PREDICTION_MODE curr_mode,const MV_REFERENCE_FRAME * ref_frames,const AV1_COMMON * cm)3790 static INLINE void init_mbmi(MB_MODE_INFO *mbmi, PREDICTION_MODE curr_mode,
3791                              const MV_REFERENCE_FRAME *ref_frames,
3792                              const AV1_COMMON *cm) {
3793   PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
3794   mbmi->ref_mv_idx = 0;
3795   mbmi->mode = curr_mode;
3796   mbmi->uv_mode = UV_DC_PRED;
3797   mbmi->ref_frame[0] = ref_frames[0];
3798   mbmi->ref_frame[1] = ref_frames[1];
3799   pmi->palette_size[0] = 0;
3800   pmi->palette_size[1] = 0;
3801   mbmi->filter_intra_mode_info.use_filter_intra = 0;
3802   mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
3803   mbmi->motion_mode = SIMPLE_TRANSLATION;
3804   mbmi->interintra_mode = (INTERINTRA_MODE)(II_DC_PRED - 1);
3805   set_default_interp_filters(mbmi, cm->features.interp_filter);
3806 }
3807 
collect_single_states(MACROBLOCK * x,InterModeSearchState * search_state,const MB_MODE_INFO * const mbmi)3808 static AOM_INLINE void collect_single_states(MACROBLOCK *x,
3809                                              InterModeSearchState *search_state,
3810                                              const MB_MODE_INFO *const mbmi) {
3811   int i, j;
3812   const MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame[0];
3813   const PREDICTION_MODE this_mode = mbmi->mode;
3814   const int dir = ref_frame <= GOLDEN_FRAME ? 0 : 1;
3815   const int mode_offset = INTER_OFFSET(this_mode);
3816   const int ref_set = get_drl_refmv_count(x, mbmi->ref_frame, this_mode);
3817 
3818   // Simple rd
3819   int64_t simple_rd = search_state->simple_rd[this_mode][0][ref_frame];
3820   for (int ref_mv_idx = 1; ref_mv_idx < ref_set; ++ref_mv_idx) {
3821     const int64_t rd =
3822         search_state->simple_rd[this_mode][ref_mv_idx][ref_frame];
3823     if (rd < simple_rd) simple_rd = rd;
3824   }
3825 
3826   // Insertion sort of single_state
3827   const SingleInterModeState this_state_s = { simple_rd, ref_frame, 1 };
3828   SingleInterModeState *state_s = search_state->single_state[dir][mode_offset];
3829   i = search_state->single_state_cnt[dir][mode_offset];
3830   for (j = i; j > 0 && state_s[j - 1].rd > this_state_s.rd; --j)
3831     state_s[j] = state_s[j - 1];
3832   state_s[j] = this_state_s;
3833   search_state->single_state_cnt[dir][mode_offset]++;
3834 
3835   // Modelled rd
3836   int64_t modelled_rd = search_state->modelled_rd[this_mode][0][ref_frame];
3837   for (int ref_mv_idx = 1; ref_mv_idx < ref_set; ++ref_mv_idx) {
3838     const int64_t rd =
3839         search_state->modelled_rd[this_mode][ref_mv_idx][ref_frame];
3840     if (rd < modelled_rd) modelled_rd = rd;
3841   }
3842 
3843   // Insertion sort of single_state_modelled
3844   const SingleInterModeState this_state_m = { modelled_rd, ref_frame, 1 };
3845   SingleInterModeState *state_m =
3846       search_state->single_state_modelled[dir][mode_offset];
3847   i = search_state->single_state_modelled_cnt[dir][mode_offset];
3848   for (j = i; j > 0 && state_m[j - 1].rd > this_state_m.rd; --j)
3849     state_m[j] = state_m[j - 1];
3850   state_m[j] = this_state_m;
3851   search_state->single_state_modelled_cnt[dir][mode_offset]++;
3852 }
3853 
analyze_single_states(const AV1_COMP * cpi,InterModeSearchState * search_state)3854 static AOM_INLINE void analyze_single_states(
3855     const AV1_COMP *cpi, InterModeSearchState *search_state) {
3856   const int prune_level = cpi->sf.inter_sf.prune_comp_search_by_single_result;
3857   assert(prune_level >= 1);
3858   int i, j, dir, mode;
3859 
3860   for (dir = 0; dir < 2; ++dir) {
3861     int64_t best_rd;
3862     SingleInterModeState(*state)[FWD_REFS];
3863     const int prune_factor = prune_level >= 2 ? 6 : 5;
3864 
3865     // Use the best rd of GLOBALMV or NEWMV to prune the unlikely
3866     // reference frames for all the modes (NEARESTMV and NEARMV may not
3867     // have same motion vectors). Always keep the best of each mode
3868     // because it might form the best possible combination with other mode.
3869     state = search_state->single_state[dir];
3870     best_rd = AOMMIN(state[INTER_OFFSET(NEWMV)][0].rd,
3871                      state[INTER_OFFSET(GLOBALMV)][0].rd);
3872     for (mode = 0; mode < SINGLE_INTER_MODE_NUM; ++mode) {
3873       for (i = 1; i < search_state->single_state_cnt[dir][mode]; ++i) {
3874         if (state[mode][i].rd != INT64_MAX &&
3875             (state[mode][i].rd >> 3) * prune_factor > best_rd) {
3876           state[mode][i].valid = 0;
3877         }
3878       }
3879     }
3880 
3881     state = search_state->single_state_modelled[dir];
3882     best_rd = AOMMIN(state[INTER_OFFSET(NEWMV)][0].rd,
3883                      state[INTER_OFFSET(GLOBALMV)][0].rd);
3884     for (mode = 0; mode < SINGLE_INTER_MODE_NUM; ++mode) {
3885       for (i = 1; i < search_state->single_state_modelled_cnt[dir][mode]; ++i) {
3886         if (state[mode][i].rd != INT64_MAX &&
3887             (state[mode][i].rd >> 3) * prune_factor > best_rd) {
3888           state[mode][i].valid = 0;
3889         }
3890       }
3891     }
3892   }
3893 
3894   // Ordering by simple rd first, then by modelled rd
3895   for (dir = 0; dir < 2; ++dir) {
3896     for (mode = 0; mode < SINGLE_INTER_MODE_NUM; ++mode) {
3897       const int state_cnt_s = search_state->single_state_cnt[dir][mode];
3898       const int state_cnt_m =
3899           search_state->single_state_modelled_cnt[dir][mode];
3900       SingleInterModeState *state_s = search_state->single_state[dir][mode];
3901       SingleInterModeState *state_m =
3902           search_state->single_state_modelled[dir][mode];
3903       int count = 0;
3904       const int max_candidates = AOMMAX(state_cnt_s, state_cnt_m);
3905       for (i = 0; i < state_cnt_s; ++i) {
3906         if (state_s[i].rd == INT64_MAX) break;
3907         if (state_s[i].valid) {
3908           search_state->single_rd_order[dir][mode][count++] =
3909               state_s[i].ref_frame;
3910         }
3911       }
3912       if (count >= max_candidates) continue;
3913 
3914       for (i = 0; i < state_cnt_m && count < max_candidates; ++i) {
3915         if (state_m[i].rd == INT64_MAX) break;
3916         if (!state_m[i].valid) continue;
3917         const int ref_frame = state_m[i].ref_frame;
3918         int match = 0;
3919         // Check if existing already
3920         for (j = 0; j < count; ++j) {
3921           if (search_state->single_rd_order[dir][mode][j] == ref_frame) {
3922             match = 1;
3923             break;
3924           }
3925         }
3926         if (match) continue;
3927         // Check if this ref_frame is removed in simple rd
3928         int valid = 1;
3929         for (j = 0; j < state_cnt_s; ++j) {
3930           if (ref_frame == state_s[j].ref_frame) {
3931             valid = state_s[j].valid;
3932             break;
3933           }
3934         }
3935         if (valid) {
3936           search_state->single_rd_order[dir][mode][count++] = ref_frame;
3937         }
3938       }
3939     }
3940   }
3941 }
3942 
compound_skip_get_candidates(const AV1_COMP * cpi,const InterModeSearchState * search_state,const int dir,const PREDICTION_MODE mode)3943 static int compound_skip_get_candidates(
3944     const AV1_COMP *cpi, const InterModeSearchState *search_state,
3945     const int dir, const PREDICTION_MODE mode) {
3946   const int mode_offset = INTER_OFFSET(mode);
3947   const SingleInterModeState *state =
3948       search_state->single_state[dir][mode_offset];
3949   const SingleInterModeState *state_modelled =
3950       search_state->single_state_modelled[dir][mode_offset];
3951 
3952   int max_candidates = 0;
3953   for (int i = 0; i < FWD_REFS; ++i) {
3954     if (search_state->single_rd_order[dir][mode_offset][i] == NONE_FRAME) break;
3955     max_candidates++;
3956   }
3957 
3958   int candidates = max_candidates;
3959   if (cpi->sf.inter_sf.prune_comp_search_by_single_result >= 2) {
3960     candidates = AOMMIN(2, max_candidates);
3961   }
3962   if (cpi->sf.inter_sf.prune_comp_search_by_single_result >= 3) {
3963     if (state[0].rd != INT64_MAX && state_modelled[0].rd != INT64_MAX &&
3964         state[0].ref_frame == state_modelled[0].ref_frame)
3965       candidates = 1;
3966     if (mode == NEARMV || mode == GLOBALMV) candidates = 1;
3967   }
3968 
3969   if (cpi->sf.inter_sf.prune_comp_search_by_single_result >= 4) {
3970     // Limit the number of candidates to 1 in each direction for compound
3971     // prediction
3972     candidates = AOMMIN(1, candidates);
3973   }
3974   return candidates;
3975 }
3976 
compound_skip_by_single_states(const AV1_COMP * cpi,const InterModeSearchState * search_state,const PREDICTION_MODE this_mode,const MV_REFERENCE_FRAME ref_frame,const MV_REFERENCE_FRAME second_ref_frame,const MACROBLOCK * x)3977 static int compound_skip_by_single_states(
3978     const AV1_COMP *cpi, const InterModeSearchState *search_state,
3979     const PREDICTION_MODE this_mode, const MV_REFERENCE_FRAME ref_frame,
3980     const MV_REFERENCE_FRAME second_ref_frame, const MACROBLOCK *x) {
3981   const MV_REFERENCE_FRAME refs[2] = { ref_frame, second_ref_frame };
3982   const int mode[2] = { compound_ref0_mode(this_mode),
3983                         compound_ref1_mode(this_mode) };
3984   const int mode_offset[2] = { INTER_OFFSET(mode[0]), INTER_OFFSET(mode[1]) };
3985   const int mode_dir[2] = { refs[0] <= GOLDEN_FRAME ? 0 : 1,
3986                             refs[1] <= GOLDEN_FRAME ? 0 : 1 };
3987   int ref_searched[2] = { 0, 0 };
3988   int ref_mv_match[2] = { 1, 1 };
3989   int i, j;
3990 
3991   for (i = 0; i < 2; ++i) {
3992     const SingleInterModeState *state =
3993         search_state->single_state[mode_dir[i]][mode_offset[i]];
3994     const int state_cnt =
3995         search_state->single_state_cnt[mode_dir[i]][mode_offset[i]];
3996     for (j = 0; j < state_cnt; ++j) {
3997       if (state[j].ref_frame == refs[i]) {
3998         ref_searched[i] = 1;
3999         break;
4000       }
4001     }
4002   }
4003 
4004   const int ref_set = get_drl_refmv_count(x, refs, this_mode);
4005   for (i = 0; i < 2; ++i) {
4006     if (!ref_searched[i] || (mode[i] != NEARESTMV && mode[i] != NEARMV)) {
4007       continue;
4008     }
4009     const MV_REFERENCE_FRAME single_refs[2] = { refs[i], NONE_FRAME };
4010     for (int ref_mv_idx = 0; ref_mv_idx < ref_set; ref_mv_idx++) {
4011       int_mv single_mv;
4012       int_mv comp_mv;
4013       get_this_mv(&single_mv, mode[i], 0, ref_mv_idx, 0, single_refs,
4014                   x->mbmi_ext);
4015       get_this_mv(&comp_mv, this_mode, i, ref_mv_idx, 0, refs, x->mbmi_ext);
4016       if (single_mv.as_int != comp_mv.as_int) {
4017         ref_mv_match[i] = 0;
4018         break;
4019       }
4020     }
4021   }
4022 
4023   for (i = 0; i < 2; ++i) {
4024     if (!ref_searched[i] || !ref_mv_match[i]) continue;
4025     const int candidates =
4026         compound_skip_get_candidates(cpi, search_state, mode_dir[i], mode[i]);
4027     const MV_REFERENCE_FRAME *ref_order =
4028         search_state->single_rd_order[mode_dir[i]][mode_offset[i]];
4029     int match = 0;
4030     for (j = 0; j < candidates; ++j) {
4031       if (refs[i] == ref_order[j]) {
4032         match = 1;
4033         break;
4034       }
4035     }
4036     if (!match) return 1;
4037   }
4038 
4039   return 0;
4040 }
4041 
4042 // Check if ref frames of current block matches with given block.
match_ref_frame(const MB_MODE_INFO * const mbmi,const MV_REFERENCE_FRAME * ref_frames,int * const is_ref_match)4043 static INLINE void match_ref_frame(const MB_MODE_INFO *const mbmi,
4044                                    const MV_REFERENCE_FRAME *ref_frames,
4045                                    int *const is_ref_match) {
4046   if (is_inter_block(mbmi)) {
4047     is_ref_match[0] |= ref_frames[0] == mbmi->ref_frame[0];
4048     is_ref_match[1] |= ref_frames[1] == mbmi->ref_frame[0];
4049     if (has_second_ref(mbmi)) {
4050       is_ref_match[0] |= ref_frames[0] == mbmi->ref_frame[1];
4051       is_ref_match[1] |= ref_frames[1] == mbmi->ref_frame[1];
4052     }
4053   }
4054 }
4055 
4056 // Prune compound mode using ref frames of neighbor blocks.
compound_skip_using_neighbor_refs(MACROBLOCKD * const xd,const PREDICTION_MODE this_mode,const MV_REFERENCE_FRAME * ref_frames,int prune_compound_using_neighbors)4057 static INLINE int compound_skip_using_neighbor_refs(
4058     MACROBLOCKD *const xd, const PREDICTION_MODE this_mode,
4059     const MV_REFERENCE_FRAME *ref_frames, int prune_compound_using_neighbors) {
4060   // Exclude non-extended compound modes from pruning
4061   if (this_mode == NEAREST_NEARESTMV || this_mode == NEAR_NEARMV ||
4062       this_mode == NEW_NEWMV || this_mode == GLOBAL_GLOBALMV)
4063     return 0;
4064 
4065   int is_ref_match[2] = { 0 };  // 0 - match for forward refs
4066                                 // 1 - match for backward refs
4067   // Check if ref frames of this block matches with left neighbor.
4068   if (xd->left_available)
4069     match_ref_frame(xd->left_mbmi, ref_frames, is_ref_match);
4070 
4071   // Check if ref frames of this block matches with above neighbor.
4072   if (xd->up_available)
4073     match_ref_frame(xd->above_mbmi, ref_frames, is_ref_match);
4074 
4075   // Combine ref frame match with neighbors in forward and backward refs.
4076   const int track_ref_match = is_ref_match[0] + is_ref_match[1];
4077 
4078   // Pruning based on ref frame match with neighbors.
4079   if (track_ref_match >= prune_compound_using_neighbors) return 0;
4080   return 1;
4081 }
4082 
compare_int64(const void * a,const void * b)4083 static int compare_int64(const void *a, const void *b) {
4084   int64_t a64 = *((int64_t *)a);
4085   int64_t b64 = *((int64_t *)b);
4086   if (a64 < b64) {
4087     return -1;
4088   } else if (a64 == b64) {
4089     return 0;
4090   } else {
4091     return 1;
4092   }
4093 }
4094 
update_search_state(InterModeSearchState * search_state,RD_STATS * best_rd_stats_dst,PICK_MODE_CONTEXT * ctx,const RD_STATS * new_best_rd_stats,const RD_STATS * new_best_rd_stats_y,const RD_STATS * new_best_rd_stats_uv,THR_MODES new_best_mode,const MACROBLOCK * x,int txfm_search_done)4095 static INLINE void update_search_state(
4096     InterModeSearchState *search_state, RD_STATS *best_rd_stats_dst,
4097     PICK_MODE_CONTEXT *ctx, const RD_STATS *new_best_rd_stats,
4098     const RD_STATS *new_best_rd_stats_y, const RD_STATS *new_best_rd_stats_uv,
4099     THR_MODES new_best_mode, const MACROBLOCK *x, int txfm_search_done) {
4100   const MACROBLOCKD *xd = &x->e_mbd;
4101   const MB_MODE_INFO *mbmi = xd->mi[0];
4102   const int skip_ctx = av1_get_skip_context(xd);
4103   const int mode_is_intra =
4104       (av1_mode_defs[new_best_mode].mode < INTRA_MODE_END);
4105   const int skip = mbmi->skip && !mode_is_intra;
4106 
4107   search_state->best_rd = new_best_rd_stats->rdcost;
4108   search_state->best_mode_index = new_best_mode;
4109   *best_rd_stats_dst = *new_best_rd_stats;
4110   search_state->best_mbmode = *mbmi;
4111   search_state->best_skip2 = skip;
4112   search_state->best_mode_skippable = new_best_rd_stats->skip;
4113   // When !txfm_search_done, new_best_rd_stats won't provide correct rate_y and
4114   // rate_uv because av1_txfm_search process is replaced by rd estimation.
4115   // Therfore, we should avoid updating best_rate_y and best_rate_uv here.
4116   // These two values will be updated when av1_txfm_search is called.
4117   if (txfm_search_done) {
4118     search_state->best_rate_y =
4119         new_best_rd_stats_y->rate +
4120         x->skip_cost[skip_ctx][new_best_rd_stats->skip || skip];
4121     search_state->best_rate_uv = new_best_rd_stats_uv->rate;
4122   }
4123   memcpy(ctx->blk_skip, x->blk_skip, sizeof(x->blk_skip[0]) * ctx->num_4x4_blk);
4124   av1_copy_array(ctx->tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
4125 }
4126 
4127 // Find the best RD for a reference frame (among single reference modes)
4128 // and store +10% of it in the 0-th element in ref_frame_rd.
find_top_ref(int64_t ref_frame_rd[REF_FRAMES])4129 static AOM_INLINE void find_top_ref(int64_t ref_frame_rd[REF_FRAMES]) {
4130   assert(ref_frame_rd[0] == INT64_MAX);
4131   int64_t ref_copy[REF_FRAMES - 1];
4132   memcpy(ref_copy, ref_frame_rd + 1,
4133          sizeof(ref_frame_rd[0]) * (REF_FRAMES - 1));
4134   qsort(ref_copy, REF_FRAMES - 1, sizeof(int64_t), compare_int64);
4135 
4136   int64_t cutoff = ref_copy[0];
4137   // The cut-off is within 10% of the best.
4138   if (cutoff != INT64_MAX) {
4139     assert(cutoff < INT64_MAX / 200);
4140     cutoff = (110 * cutoff) / 100;
4141   }
4142   ref_frame_rd[0] = cutoff;
4143 }
4144 
4145 // Check if either frame is within the cutoff.
in_single_ref_cutoff(int64_t ref_frame_rd[REF_FRAMES],MV_REFERENCE_FRAME frame1,MV_REFERENCE_FRAME frame2)4146 static INLINE bool in_single_ref_cutoff(int64_t ref_frame_rd[REF_FRAMES],
4147                                         MV_REFERENCE_FRAME frame1,
4148                                         MV_REFERENCE_FRAME frame2) {
4149   assert(frame2 > 0);
4150   return ref_frame_rd[frame1] <= ref_frame_rd[0] ||
4151          ref_frame_rd[frame2] <= ref_frame_rd[0];
4152 }
4153 
evaluate_motion_mode_for_winner_candidates(const AV1_COMP * const cpi,MACROBLOCK * const x,RD_STATS * const rd_cost,HandleInterModeArgs * const args,TileDataEnc * const tile_data,PICK_MODE_CONTEXT * const ctx,struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE],const motion_mode_best_st_candidate * const best_motion_mode_cands,int do_tx_search,const BLOCK_SIZE bsize,int64_t * const best_est_rd,InterModeSearchState * const search_state)4154 static AOM_INLINE void evaluate_motion_mode_for_winner_candidates(
4155     const AV1_COMP *const cpi, MACROBLOCK *const x, RD_STATS *const rd_cost,
4156     HandleInterModeArgs *const args, TileDataEnc *const tile_data,
4157     PICK_MODE_CONTEXT *const ctx,
4158     struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE],
4159     const motion_mode_best_st_candidate *const best_motion_mode_cands,
4160     int do_tx_search, const BLOCK_SIZE bsize, int64_t *const best_est_rd,
4161     InterModeSearchState *const search_state) {
4162   const AV1_COMMON *const cm = &cpi->common;
4163   const int num_planes = av1_num_planes(cm);
4164   MACROBLOCKD *const xd = &x->e_mbd;
4165   MB_MODE_INFO *const mbmi = xd->mi[0];
4166   InterModesInfo *const inter_modes_info = x->inter_modes_info;
4167   const int num_best_cand = best_motion_mode_cands->num_motion_mode_cand;
4168 
4169   for (int cand = 0; cand < num_best_cand; cand++) {
4170     RD_STATS rd_stats;
4171     RD_STATS rd_stats_y;
4172     RD_STATS rd_stats_uv;
4173     av1_init_rd_stats(&rd_stats);
4174     av1_init_rd_stats(&rd_stats_y);
4175     av1_init_rd_stats(&rd_stats_uv);
4176     int disable_skip = 0, rate_mv;
4177 
4178     rate_mv = best_motion_mode_cands->motion_mode_cand[cand].rate_mv;
4179     args->skip_motion_mode =
4180         best_motion_mode_cands->motion_mode_cand[cand].skip_motion_mode;
4181     *mbmi = best_motion_mode_cands->motion_mode_cand[cand].mbmi;
4182     rd_stats.rate =
4183         best_motion_mode_cands->motion_mode_cand[cand].rate2_nocoeff;
4184 
4185     // Continue if the best candidate is compound.
4186     if (!is_inter_singleref_mode(mbmi->mode)) continue;
4187 
4188     x->force_skip = 0;
4189     const int mode_index = get_prediction_mode_idx(
4190         mbmi->mode, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4191     struct macroblockd_plane *p = xd->plane;
4192     const BUFFER_SET orig_dst = {
4193       { p[0].dst.buf, p[1].dst.buf, p[2].dst.buf },
4194       { p[0].dst.stride, p[1].dst.stride, p[2].dst.stride },
4195     };
4196 
4197     set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4198     args->simple_rd_state = x->simple_rd_state[mode_index];
4199     // Initialize motion mode to simple translation
4200     // Calculation of switchable rate depends on it.
4201     mbmi->motion_mode = 0;
4202     const int is_comp_pred = mbmi->ref_frame[1] > INTRA_FRAME;
4203     for (int i = 0; i < num_planes; i++) {
4204       xd->plane[i].pre[0] = yv12_mb[mbmi->ref_frame[0]][i];
4205       if (is_comp_pred) xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
4206     }
4207 
4208     int64_t skip_rd[2] = { search_state->best_skip_rd[0],
4209                            search_state->best_skip_rd[1] };
4210     int64_t ret_value = motion_mode_rd(
4211         cpi, tile_data, x, bsize, &rd_stats, &rd_stats_y, &rd_stats_uv,
4212         &disable_skip, args, search_state->best_rd, skip_rd, &rate_mv,
4213         &orig_dst, best_est_rd, do_tx_search, inter_modes_info, 1);
4214 
4215     if (ret_value != INT64_MAX) {
4216       rd_stats.rdcost = RDCOST(x->rdmult, rd_stats.rate, rd_stats.dist);
4217       const THR_MODES mode_enum = get_prediction_mode_idx(
4218           mbmi->mode, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4219       // Collect mode stats for multiwinner mode processing
4220       store_winner_mode_stats(
4221           &cpi->common, x, mbmi, &rd_stats, &rd_stats_y, &rd_stats_uv,
4222           mode_enum, NULL, bsize, rd_stats.rdcost,
4223           cpi->sf.winner_mode_sf.enable_multiwinner_mode_process, do_tx_search);
4224       if (rd_stats.rdcost < search_state->best_rd) {
4225         update_search_state(search_state, rd_cost, ctx, &rd_stats, &rd_stats_y,
4226                             &rd_stats_uv, mode_enum, x, do_tx_search);
4227         if (do_tx_search) search_state->best_skip_rd[0] = skip_rd[0];
4228       }
4229     }
4230   }
4231 }
4232 
4233 // Arguments for speed feature pruning of inter mode search
4234 typedef struct {
4235   int *skip_motion_mode;
4236   mode_skip_mask_t *mode_skip_mask;
4237   InterModeSearchState *search_state;
4238   int skip_ref_frame_mask;
4239   int reach_first_comp_mode;
4240   int mode_thresh_mul_fact;
4241   int *intra_mode_idx_ls;
4242   int *intra_mode_num;
4243   int prune_cpd_using_sr_stats_ready;
4244 } InterModeSFArgs;
4245 
skip_inter_mode(AV1_COMP * cpi,MACROBLOCK * x,const BLOCK_SIZE bsize,int64_t * ref_frame_rd,int midx,InterModeSFArgs * args)4246 static int skip_inter_mode(AV1_COMP *cpi, MACROBLOCK *x, const BLOCK_SIZE bsize,
4247                            int64_t *ref_frame_rd, int midx,
4248                            InterModeSFArgs *args) {
4249   const SPEED_FEATURES *const sf = &cpi->sf;
4250   MACROBLOCKD *const xd = &x->e_mbd;
4251   MB_MODE_INFO *const mbmi = xd->mi[0];
4252   // Get the actual prediction mode we are trying in this iteration
4253   const THR_MODES mode_enum = av1_default_mode_order[midx];
4254   const MODE_DEFINITION *mode_def = &av1_mode_defs[mode_enum];
4255   const PREDICTION_MODE this_mode = mode_def->mode;
4256   const MV_REFERENCE_FRAME *ref_frames = mode_def->ref_frame;
4257   const MV_REFERENCE_FRAME ref_frame = ref_frames[0];
4258   const MV_REFERENCE_FRAME second_ref_frame = ref_frames[1];
4259   const int comp_pred = second_ref_frame > INTRA_FRAME;
4260   const int last_single_ref_mode_idx =
4261       find_last_single_ref_mode_idx(av1_default_mode_order);
4262 
4263   // After we done with single reference modes, find the 2nd best RD
4264   // for a reference frame. Only search compound modes that have a reference
4265   // frame at least as good as the 2nd best.
4266   if (sf->inter_sf.prune_compound_using_single_ref &&
4267       midx == last_single_ref_mode_idx + 1) {
4268     find_top_ref(ref_frame_rd);
4269     args->prune_cpd_using_sr_stats_ready = 1;
4270   }
4271 
4272   // Check if this mode should be skipped because it is incompatible with the
4273   // current frame
4274   if (inter_mode_compatible_skip(cpi, x, bsize, this_mode, ref_frames))
4275     return 1;
4276   const int ret = inter_mode_search_order_independent_skip(
4277       cpi, x, args->mode_skip_mask, args->search_state,
4278       args->skip_ref_frame_mask, this_mode, mode_def->ref_frame);
4279   if (ret == 1) return 1;
4280   *(args->skip_motion_mode) = (ret == 2);
4281 
4282   // We've reached the first compound prediction mode, get stats from the
4283   // single reference predictors to help with pruning
4284   if (sf->inter_sf.prune_comp_search_by_single_result > 0 && comp_pred &&
4285       args->reach_first_comp_mode == 0) {
4286     analyze_single_states(cpi, args->search_state);
4287     args->reach_first_comp_mode = 1;
4288   }
4289 
4290   // Prune aggressively when best mode is skippable.
4291   int mul_fact = args->search_state->best_mode_skippable
4292                      ? args->mode_thresh_mul_fact
4293                      : (1 << MODE_THRESH_QBITS);
4294   int64_t mode_threshold =
4295       (args->search_state->mode_threshold[mode_enum] * mul_fact) >>
4296       MODE_THRESH_QBITS;
4297 
4298   if (args->search_state->best_rd < mode_threshold) return 1;
4299 
4300   // Skip this compound mode based on the RD results from the single prediction
4301   // modes
4302   if (sf->inter_sf.prune_comp_search_by_single_result > 0 && comp_pred) {
4303     if (compound_skip_by_single_states(cpi, args->search_state, this_mode,
4304                                        ref_frame, second_ref_frame, x))
4305       return 1;
4306   }
4307 
4308   // Speed features to prune out INTRA frames
4309   if (ref_frame == INTRA_FRAME) {
4310     if ((!cpi->oxcf.enable_smooth_intra || sf->intra_sf.disable_smooth_intra) &&
4311         (mbmi->mode == SMOOTH_PRED || mbmi->mode == SMOOTH_H_PRED ||
4312          mbmi->mode == SMOOTH_V_PRED))
4313       return 1;
4314     if (!cpi->oxcf.enable_paeth_intra && mbmi->mode == PAETH_PRED) return 1;
4315     if (sf->inter_sf.adaptive_mode_search > 1)
4316       if ((x->source_variance << num_pels_log2_lookup[bsize]) >
4317           args->search_state->best_pred_sse)
4318         return 1;
4319 
4320     // Intra modes will be handled in another loop later.
4321     assert(*args->intra_mode_num < INTRA_MODES);
4322     args->intra_mode_idx_ls[(*args->intra_mode_num)++] = mode_enum;
4323     return 1;
4324   }
4325 
4326   if (sf->inter_sf.prune_compound_using_single_ref &&
4327       args->prune_cpd_using_sr_stats_ready && comp_pred &&
4328       !in_single_ref_cutoff(ref_frame_rd, ref_frame, second_ref_frame)) {
4329     return 1;
4330   }
4331 
4332   if (sf->inter_sf.prune_compound_using_neighbors && comp_pred) {
4333     if (compound_skip_using_neighbor_refs(
4334             xd, this_mode, ref_frames,
4335             sf->inter_sf.prune_compound_using_neighbors))
4336       return 1;
4337   }
4338 
4339   return 0;
4340 }
4341 
record_best_compound(REFERENCE_MODE reference_mode,RD_STATS * rd_stats,int comp_pred,int rdmult,InterModeSearchState * search_state,int compmode_cost)4342 static void record_best_compound(REFERENCE_MODE reference_mode,
4343                                  RD_STATS *rd_stats, int comp_pred, int rdmult,
4344                                  InterModeSearchState *search_state,
4345                                  int compmode_cost) {
4346   int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
4347 
4348   if (reference_mode == REFERENCE_MODE_SELECT) {
4349     single_rate = rd_stats->rate - compmode_cost;
4350     hybrid_rate = rd_stats->rate;
4351   } else {
4352     single_rate = rd_stats->rate;
4353     hybrid_rate = rd_stats->rate + compmode_cost;
4354   }
4355 
4356   single_rd = RDCOST(rdmult, single_rate, rd_stats->dist);
4357   hybrid_rd = RDCOST(rdmult, hybrid_rate, rd_stats->dist);
4358 
4359   if (!comp_pred) {
4360     if (single_rd <
4361         search_state->intra_search_state.best_pred_rd[SINGLE_REFERENCE])
4362       search_state->intra_search_state.best_pred_rd[SINGLE_REFERENCE] =
4363           single_rd;
4364   } else {
4365     if (single_rd <
4366         search_state->intra_search_state.best_pred_rd[COMPOUND_REFERENCE])
4367       search_state->intra_search_state.best_pred_rd[COMPOUND_REFERENCE] =
4368           single_rd;
4369   }
4370   if (hybrid_rd <
4371       search_state->intra_search_state.best_pred_rd[REFERENCE_MODE_SELECT])
4372     search_state->intra_search_state.best_pred_rd[REFERENCE_MODE_SELECT] =
4373         hybrid_rd;
4374 }
4375 
4376 // Indicates number of winner simple translation modes to be used
4377 static const unsigned int num_winner_motion_modes[3] = { 0, 10, 3 };
4378 
4379 // Adds a motion mode to the candidate list for motion_mode_for_winner_cand
4380 // speed feature. This list consists of modes that have only searched
4381 // SIMPLE_TRANSLATION. The final list will be used to search other motion
4382 // modes after the initial RD search.
handle_winner_cand(MB_MODE_INFO * const mbmi,motion_mode_best_st_candidate * best_motion_mode_cands,int max_winner_motion_mode_cand,int64_t this_rd,motion_mode_candidate * motion_mode_cand,int skip_motion_mode)4383 static void handle_winner_cand(
4384     MB_MODE_INFO *const mbmi,
4385     motion_mode_best_st_candidate *best_motion_mode_cands,
4386     int max_winner_motion_mode_cand, int64_t this_rd,
4387     motion_mode_candidate *motion_mode_cand, int skip_motion_mode) {
4388   // Number of current motion mode candidates in list
4389   const int num_motion_mode_cand = best_motion_mode_cands->num_motion_mode_cand;
4390   int valid_motion_mode_cand_loc = num_motion_mode_cand;
4391 
4392   // find the best location to insert new motion mode candidate
4393   for (int j = 0; j < num_motion_mode_cand; j++) {
4394     if (this_rd < best_motion_mode_cands->motion_mode_cand[j].rd_cost) {
4395       valid_motion_mode_cand_loc = j;
4396       break;
4397     }
4398   }
4399 
4400   // Insert motion mode if location is found
4401   if (valid_motion_mode_cand_loc < max_winner_motion_mode_cand) {
4402     if (num_motion_mode_cand > 0 &&
4403         valid_motion_mode_cand_loc < max_winner_motion_mode_cand - 1)
4404       memmove(
4405           &best_motion_mode_cands
4406                ->motion_mode_cand[valid_motion_mode_cand_loc + 1],
4407           &best_motion_mode_cands->motion_mode_cand[valid_motion_mode_cand_loc],
4408           (AOMMIN(num_motion_mode_cand, max_winner_motion_mode_cand - 1) -
4409            valid_motion_mode_cand_loc) *
4410               sizeof(best_motion_mode_cands->motion_mode_cand[0]));
4411     motion_mode_cand->mbmi = *mbmi;
4412     motion_mode_cand->rd_cost = this_rd;
4413     motion_mode_cand->skip_motion_mode = skip_motion_mode;
4414     best_motion_mode_cands->motion_mode_cand[valid_motion_mode_cand_loc] =
4415         *motion_mode_cand;
4416     best_motion_mode_cands->num_motion_mode_cand =
4417         AOMMIN(max_winner_motion_mode_cand,
4418                best_motion_mode_cands->num_motion_mode_cand + 1);
4419   }
4420 }
4421 
av1_rd_pick_inter_mode_sb(AV1_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,RD_STATS * rd_cost,const BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)4422 void av1_rd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
4423                                MACROBLOCK *x, RD_STATS *rd_cost,
4424                                const BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
4425                                int64_t best_rd_so_far) {
4426   AV1_COMMON *const cm = &cpi->common;
4427   const FeatureFlags *const features = &cm->features;
4428   const int num_planes = av1_num_planes(cm);
4429   const SPEED_FEATURES *const sf = &cpi->sf;
4430   MACROBLOCKD *const xd = &x->e_mbd;
4431   MB_MODE_INFO *const mbmi = xd->mi[0];
4432   int i;
4433   const int *comp_inter_cost =
4434       x->comp_inter_cost[av1_get_reference_mode_context(xd)];
4435 
4436   InterModeSearchState search_state;
4437   init_inter_mode_search_state(&search_state, cpi, x, bsize, best_rd_so_far);
4438   INTERINTRA_MODE interintra_modes[REF_FRAMES] = {
4439     INTERINTRA_MODES, INTERINTRA_MODES, INTERINTRA_MODES, INTERINTRA_MODES,
4440     INTERINTRA_MODES, INTERINTRA_MODES, INTERINTRA_MODES, INTERINTRA_MODES
4441   };
4442   HandleInterModeArgs args = { { NULL },
4443                                { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE },
4444                                { NULL },
4445                                { MAX_SB_SIZE >> 1, MAX_SB_SIZE >> 1,
4446                                  MAX_SB_SIZE >> 1 },
4447                                NULL,
4448                                NULL,
4449                                NULL,
4450                                search_state.modelled_rd,
4451                                INT_MAX,
4452                                INT_MAX,
4453                                search_state.simple_rd,
4454                                0,
4455                                interintra_modes,
4456                                1,
4457                                NULL,
4458                                { { { 0 }, { { 0 } }, { 0 }, 0, 0, 0, 0 } },
4459                                0 };
4460   // Indicates the appropriate number of simple translation winner modes for
4461   // exhaustive motion mode evaluation
4462   const int max_winner_motion_mode_cand =
4463       num_winner_motion_modes[cpi->sf.winner_mode_sf
4464                                   .motion_mode_for_winner_cand];
4465   assert(max_winner_motion_mode_cand <= MAX_WINNER_MOTION_MODES);
4466   motion_mode_candidate motion_mode_cand;
4467   motion_mode_best_st_candidate best_motion_mode_cands;
4468   // Initializing the number of motion mode candidates to zero.
4469   best_motion_mode_cands.num_motion_mode_cand = 0;
4470   for (i = 0; i < MAX_WINNER_MOTION_MODES; ++i)
4471     best_motion_mode_cands.motion_mode_cand[i].rd_cost = INT64_MAX;
4472 
4473   for (i = 0; i < REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
4474 
4475   av1_invalid_rd_stats(rd_cost);
4476 
4477   // Ref frames that are selected by square partition blocks.
4478   int picked_ref_frames_mask = 0;
4479   if (cpi->sf.inter_sf.prune_ref_frame_for_rect_partitions &&
4480       mbmi->partition != PARTITION_NONE && mbmi->partition != PARTITION_SPLIT) {
4481     // prune_ref_frame_for_rect_partitions = 1 implies prune only extended
4482     // partition blocks. prune_ref_frame_for_rect_partitions >=2
4483     // implies prune for vert, horiz and extended partition blocks.
4484     if ((mbmi->partition != PARTITION_VERT &&
4485          mbmi->partition != PARTITION_HORZ) ||
4486         cpi->sf.inter_sf.prune_ref_frame_for_rect_partitions >= 2) {
4487       picked_ref_frames_mask =
4488           fetch_picked_ref_frames_mask(x, bsize, cm->seq_params.mib_size);
4489     }
4490   }
4491 
4492   // Skip ref frames that never selected by square blocks.
4493   const int skip_ref_frame_mask =
4494       picked_ref_frames_mask ? ~picked_ref_frames_mask : 0;
4495   mode_skip_mask_t mode_skip_mask;
4496   unsigned int ref_costs_single[REF_FRAMES];
4497   unsigned int ref_costs_comp[REF_FRAMES][REF_FRAMES];
4498   struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE];
4499   // init params, set frame modes, speed features
4500   set_params_rd_pick_inter_mode(cpi, x, &args, bsize, &mode_skip_mask,
4501                                 skip_ref_frame_mask, ref_costs_single,
4502                                 ref_costs_comp, yv12_mb);
4503 
4504   int64_t best_est_rd = INT64_MAX;
4505   const InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
4506   // If do_tx_search is 0, only estimated RD should be computed.
4507   // If do_tx_search is 1, all modes have TX search performed.
4508   const int do_tx_search =
4509       !((cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1 && md->ready) ||
4510         (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 2 &&
4511          num_pels_log2_lookup[bsize] > 8) ||
4512         cpi->sf.rt_sf.force_tx_search_off);
4513   InterModesInfo *inter_modes_info = x->inter_modes_info;
4514   inter_modes_info->num = 0;
4515 
4516   int intra_mode_num = 0;
4517   int intra_mode_idx_ls[INTRA_MODES];
4518 
4519   // Temporary buffers used by handle_inter_mode().
4520   uint8_t *const tmp_buf = get_buf_by_bd(xd, x->tmp_obmc_bufs[0]);
4521 
4522   // The best RD found for the reference frame, among single reference modes.
4523   // Note that the 0-th element will contain a cut-off that is later used
4524   // to determine if we should skip a compound mode.
4525   int64_t ref_frame_rd[REF_FRAMES] = { INT64_MAX, INT64_MAX, INT64_MAX,
4526                                        INT64_MAX, INT64_MAX, INT64_MAX,
4527                                        INT64_MAX, INT64_MAX };
4528   const int skip_ctx = av1_get_skip_context(xd);
4529 
4530   // Prepared stats used later to check if we could skip intra mode eval.
4531   int64_t inter_cost = -1;
4532   int64_t intra_cost = -1;
4533   // Need to tweak the threshold for hdres speed 0 & 1.
4534   const int mi_row = xd->mi_row;
4535   const int mi_col = xd->mi_col;
4536 
4537   // Obtain the relevant tpl stats for pruning inter modes
4538   PruneInfoFromTpl inter_cost_info_from_tpl;
4539 #if !CONFIG_REALTIME_ONLY
4540   if (cpi->sf.inter_sf.prune_inter_modes_based_on_tpl) {
4541     // x->search_ref_frame[id] = 1 => no pruning in
4542     // prune_ref_by_selective_ref_frame()
4543     // x->search_ref_frame[id] = 0  => ref frame can be pruned in
4544     // prune_ref_by_selective_ref_frame()
4545     // Populating valid_refs[idx] = 1 ensures that
4546     // 'inter_cost_info_from_tpl.best_inter_cost' does not correspond to a
4547     // pruned ref frame.
4548     int valid_refs[INTER_REFS_PER_FRAME];
4549     for (MV_REFERENCE_FRAME frame = LAST_FRAME; frame < REF_FRAMES; frame++) {
4550       const MV_REFERENCE_FRAME refs[2] = { frame, NONE_FRAME };
4551       valid_refs[frame - 1] =
4552           x->search_ref_frame[frame] ||
4553           !prune_ref_by_selective_ref_frame(
4554               cpi, x, refs, cm->cur_frame->ref_display_order_hint);
4555     }
4556     av1_zero(inter_cost_info_from_tpl);
4557     get_block_level_tpl_stats(cpi, bsize, mi_row, mi_col, valid_refs,
4558                               &inter_cost_info_from_tpl);
4559   }
4560 #endif
4561   const int do_pruning =
4562       (AOMMIN(cm->width, cm->height) > 480 && cpi->speed <= 1) ? 0 : 1;
4563   if (do_pruning && sf->intra_sf.skip_intra_in_interframe) {
4564     // Only consider full SB.
4565     int len = tpl_blocks_in_sb(cm->seq_params.sb_size);
4566     if (len == x->valid_cost_b) {
4567       const BLOCK_SIZE tpl_bsize = convert_length_to_bsize(MC_FLOW_BSIZE_1D);
4568       const int tplw = mi_size_wide[tpl_bsize];
4569       const int tplh = mi_size_high[tpl_bsize];
4570       const int nw = mi_size_wide[bsize] / tplw;
4571       const int nh = mi_size_high[bsize] / tplh;
4572       if (nw >= 1 && nh >= 1) {
4573         const int of_h = mi_row % mi_size_high[cm->seq_params.sb_size];
4574         const int of_w = mi_col % mi_size_wide[cm->seq_params.sb_size];
4575         const int start = of_h / tplh * x->cost_stride + of_w / tplw;
4576 
4577         for (int k = 0; k < nh; k++) {
4578           for (int l = 0; l < nw; l++) {
4579             inter_cost += x->inter_cost_b[start + k * x->cost_stride + l];
4580             intra_cost += x->intra_cost_b[start + k * x->cost_stride + l];
4581           }
4582         }
4583         inter_cost /= nw * nh;
4584         intra_cost /= nw * nh;
4585       }
4586     }
4587   }
4588 
4589   // Initialize best mode stats for winner mode processing
4590   av1_zero(x->winner_mode_stats);
4591   x->winner_mode_count = 0;
4592   store_winner_mode_stats(
4593       &cpi->common, x, mbmi, NULL, NULL, NULL, THR_INVALID, NULL, bsize,
4594       best_rd_so_far, cpi->sf.winner_mode_sf.enable_multiwinner_mode_process,
4595       0);
4596 
4597   int mode_thresh_mul_fact = (1 << MODE_THRESH_QBITS);
4598   if (sf->inter_sf.prune_inter_modes_if_skippable) {
4599     // Higher multiplication factor values for lower quantizers.
4600     mode_thresh_mul_fact = mode_threshold_mul_factor[x->qindex];
4601   }
4602 
4603   // Initialize arguments for mode loop speed features
4604   InterModeSFArgs sf_args = { &args.skip_motion_mode,
4605                               &mode_skip_mask,
4606                               &search_state,
4607                               skip_ref_frame_mask,
4608                               0,
4609                               mode_thresh_mul_fact,
4610                               intra_mode_idx_ls,
4611                               &intra_mode_num,
4612                               0 };
4613 
4614   // Here midx is just an iterator index that should not be used by itself
4615   // except to keep track of the number of modes searched. It should be used
4616   // with av1_default_mode_order to get the enum that defines the mode, which
4617   // can be used with av1_mode_defs to get the prediction mode and the ref
4618   // frames.
4619   for (THR_MODES midx = THR_MODE_START; midx < THR_MODE_END; ++midx) {
4620     // Get the actual prediction mode we are trying in this iteration
4621     const THR_MODES mode_enum = av1_default_mode_order[midx];
4622     const MODE_DEFINITION *mode_def = &av1_mode_defs[mode_enum];
4623     const PREDICTION_MODE this_mode = mode_def->mode;
4624     const MV_REFERENCE_FRAME *ref_frames = mode_def->ref_frame;
4625 
4626     const MV_REFERENCE_FRAME ref_frame = ref_frames[0];
4627     const MV_REFERENCE_FRAME second_ref_frame = ref_frames[1];
4628     const int is_single_pred =
4629         ref_frame > INTRA_FRAME && second_ref_frame == NONE_FRAME;
4630     const int comp_pred = second_ref_frame > INTRA_FRAME;
4631 
4632     init_mbmi(mbmi, this_mode, ref_frames, cm);
4633 
4634     x->force_skip = 0;
4635     set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
4636 
4637     // Apply speed features to decide if this inter mode can be skipped
4638     if (skip_inter_mode(cpi, x, bsize, ref_frame_rd, midx, &sf_args)) continue;
4639 
4640     // Select prediction reference frames.
4641     for (i = 0; i < num_planes; i++) {
4642       xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
4643       if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
4644     }
4645 
4646     mbmi->angle_delta[PLANE_TYPE_Y] = 0;
4647     mbmi->angle_delta[PLANE_TYPE_UV] = 0;
4648     mbmi->filter_intra_mode_info.use_filter_intra = 0;
4649     mbmi->ref_mv_idx = 0;
4650 
4651     const int64_t ref_best_rd = search_state.best_rd;
4652     int disable_skip = 0;
4653     RD_STATS rd_stats, rd_stats_y, rd_stats_uv;
4654     av1_init_rd_stats(&rd_stats);
4655 
4656     const int ref_frame_cost = comp_pred
4657                                    ? ref_costs_comp[ref_frame][second_ref_frame]
4658                                    : ref_costs_single[ref_frame];
4659     const int compmode_cost =
4660         is_comp_ref_allowed(mbmi->sb_type) ? comp_inter_cost[comp_pred] : 0;
4661     const int real_compmode_cost =
4662         cm->current_frame.reference_mode == REFERENCE_MODE_SELECT
4663             ? compmode_cost
4664             : 0;
4665     // Point to variables that are maintained between loop iterations
4666     args.single_newmv = search_state.single_newmv;
4667     args.single_newmv_rate = search_state.single_newmv_rate;
4668     args.single_newmv_valid = search_state.single_newmv_valid;
4669     args.single_comp_cost = real_compmode_cost;
4670     args.ref_frame_cost = ref_frame_cost;
4671     if (is_single_pred) {
4672       args.simple_rd_state = x->simple_rd_state[mode_enum];
4673     }
4674 
4675     int64_t skip_rd[2] = { search_state.best_skip_rd[0],
4676                            search_state.best_skip_rd[1] };
4677     int64_t this_rd = handle_inter_mode(
4678         cpi, tile_data, x, bsize, &rd_stats, &rd_stats_y, &rd_stats_uv,
4679         &disable_skip, &args, ref_best_rd, tmp_buf, &x->comp_rd_buffer,
4680         &best_est_rd, do_tx_search, inter_modes_info, &motion_mode_cand,
4681         skip_rd, &inter_cost_info_from_tpl);
4682 
4683     if (sf->inter_sf.prune_comp_search_by_single_result > 0 &&
4684         is_inter_singleref_mode(this_mode) && args.single_ref_first_pass) {
4685       collect_single_states(x, &search_state, mbmi);
4686     }
4687 
4688     if (this_rd == INT64_MAX) continue;
4689 
4690     if (mbmi->skip) {
4691       rd_stats_y.rate = 0;
4692       rd_stats_uv.rate = 0;
4693     }
4694 
4695     if (sf->inter_sf.prune_compound_using_single_ref && is_single_pred &&
4696         this_rd < ref_frame_rd[ref_frame]) {
4697       ref_frame_rd[ref_frame] = this_rd;
4698     }
4699 
4700     // Did this mode help, i.e., is it the new best mode
4701     if (this_rd < search_state.best_rd) {
4702       assert(IMPLIES(comp_pred,
4703                      cm->current_frame.reference_mode != SINGLE_REFERENCE));
4704       search_state.best_pred_sse = x->pred_sse[ref_frame];
4705       update_search_state(&search_state, rd_cost, ctx, &rd_stats, &rd_stats_y,
4706                           &rd_stats_uv, mode_enum, x, do_tx_search);
4707       if (do_tx_search) search_state.best_skip_rd[0] = skip_rd[0];
4708       search_state.best_skip_rd[1] = skip_rd[1];
4709     }
4710     if (cpi->sf.winner_mode_sf.motion_mode_for_winner_cand) {
4711       // Add this mode to motion mode candidate list for motion mode search
4712       // if using motion_mode_for_winner_cand speed feature
4713       handle_winner_cand(mbmi, &best_motion_mode_cands,
4714                          max_winner_motion_mode_cand, this_rd,
4715                          &motion_mode_cand, args.skip_motion_mode);
4716     }
4717 
4718     /* keep record of best compound/single-only prediction */
4719     if (!disable_skip) {
4720       record_best_compound(cm->current_frame.reference_mode, &rd_stats,
4721                            comp_pred, x->rdmult, &search_state, compmode_cost);
4722     }
4723   }
4724 
4725   if (cpi->sf.winner_mode_sf.motion_mode_for_winner_cand) {
4726     // For the single ref winner candidates, evaluate other motion modes (non
4727     // simple translation).
4728     evaluate_motion_mode_for_winner_candidates(
4729         cpi, x, rd_cost, &args, tile_data, ctx, yv12_mb,
4730         &best_motion_mode_cands, do_tx_search, bsize, &best_est_rd,
4731         &search_state);
4732   }
4733 
4734 #if CONFIG_COLLECT_COMPONENT_TIMING
4735   start_timing(cpi, do_tx_search_time);
4736 #endif
4737   if (do_tx_search != 1) {
4738     inter_modes_info_sort(inter_modes_info, inter_modes_info->rd_idx_pair_arr);
4739     search_state.best_rd = best_rd_so_far;
4740     search_state.best_mode_index = THR_INVALID;
4741     // Initialize best mode stats for winner mode processing
4742     x->winner_mode_count = 0;
4743     store_winner_mode_stats(
4744         &cpi->common, x, mbmi, NULL, NULL, NULL, THR_INVALID, NULL, bsize,
4745         best_rd_so_far, cpi->sf.winner_mode_sf.enable_multiwinner_mode_process,
4746         do_tx_search);
4747     inter_modes_info->num =
4748         inter_modes_info->num < cpi->sf.rt_sf.num_inter_modes_for_tx_search
4749             ? inter_modes_info->num
4750             : cpi->sf.rt_sf.num_inter_modes_for_tx_search;
4751     const int64_t top_est_rd =
4752         inter_modes_info->num > 0
4753             ? inter_modes_info
4754                   ->est_rd_arr[inter_modes_info->rd_idx_pair_arr[0].idx]
4755             : INT64_MAX;
4756     for (int j = 0; j < inter_modes_info->num; ++j) {
4757       const int data_idx = inter_modes_info->rd_idx_pair_arr[j].idx;
4758       *mbmi = inter_modes_info->mbmi_arr[data_idx];
4759       int64_t curr_est_rd = inter_modes_info->est_rd_arr[data_idx];
4760       if (curr_est_rd * 0.80 > top_est_rd) break;
4761 
4762       x->force_skip = 0;
4763       set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4764 
4765       // Select prediction reference frames.
4766       const int is_comp_pred = mbmi->ref_frame[1] > INTRA_FRAME;
4767       for (i = 0; i < num_planes; i++) {
4768         xd->plane[i].pre[0] = yv12_mb[mbmi->ref_frame[0]][i];
4769         if (is_comp_pred) xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
4770       }
4771 
4772       av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0,
4773                                     av1_num_planes(cm) - 1);
4774       if (mbmi->motion_mode == OBMC_CAUSAL) {
4775         av1_build_obmc_inter_predictors_sb(cm, xd);
4776       }
4777 
4778       RD_STATS rd_stats;
4779       RD_STATS rd_stats_y;
4780       RD_STATS rd_stats_uv;
4781       const int mode_rate = inter_modes_info->mode_rate_arr[data_idx];
4782       int64_t skip_rd = INT64_MAX;
4783       if (cpi->sf.inter_sf.txfm_rd_gate_level) {
4784         // Check if the mode is good enough based on skip RD
4785         int64_t curr_sse = inter_modes_info->sse_arr[data_idx];
4786         skip_rd = RDCOST(x->rdmult, mode_rate, curr_sse);
4787         int eval_txfm =
4788             check_txfm_eval(x, bsize, search_state.best_skip_rd[0], skip_rd,
4789                             cpi->sf.inter_sf.txfm_rd_gate_level, 0);
4790         if (!eval_txfm) continue;
4791       }
4792 
4793       if (!av1_txfm_search(cpi, x, bsize, &rd_stats, &rd_stats_y, &rd_stats_uv,
4794                            mode_rate, search_state.best_rd)) {
4795         continue;
4796       } else if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1) {
4797         inter_mode_data_push(tile_data, mbmi->sb_type, rd_stats.sse,
4798                              rd_stats.dist,
4799                              rd_stats_y.rate + rd_stats_uv.rate +
4800                                  x->skip_cost[skip_ctx][mbmi->skip]);
4801       }
4802       rd_stats.rdcost = RDCOST(x->rdmult, rd_stats.rate, rd_stats.dist);
4803 
4804       const THR_MODES mode_enum = get_prediction_mode_idx(
4805           mbmi->mode, mbmi->ref_frame[0], mbmi->ref_frame[1]);
4806 
4807       // Collect mode stats for multiwinner mode processing
4808       const int txfm_search_done = 1;
4809       store_winner_mode_stats(
4810           &cpi->common, x, mbmi, &rd_stats, &rd_stats_y, &rd_stats_uv,
4811           mode_enum, NULL, bsize, rd_stats.rdcost,
4812           cpi->sf.winner_mode_sf.enable_multiwinner_mode_process,
4813           txfm_search_done);
4814 
4815       if (rd_stats.rdcost < search_state.best_rd) {
4816         update_search_state(&search_state, rd_cost, ctx, &rd_stats, &rd_stats_y,
4817                             &rd_stats_uv, mode_enum, x, txfm_search_done);
4818         search_state.best_skip_rd[0] = skip_rd;
4819       }
4820     }
4821   }
4822 #if CONFIG_COLLECT_COMPONENT_TIMING
4823   end_timing(cpi, do_tx_search_time);
4824 #endif
4825 
4826 #if CONFIG_COLLECT_COMPONENT_TIMING
4827   start_timing(cpi, handle_intra_mode_time);
4828 #endif
4829 
4830   // Gate intra mode evaluation if best of inter is skip except when source
4831   // variance is extremely low
4832   if (sf->intra_sf.skip_intra_in_interframe &&
4833       (x->source_variance > sf->intra_sf.src_var_thresh_intra_skip)) {
4834     if (inter_cost >= 0 && intra_cost >= 0) {
4835       aom_clear_system_state();
4836       const NN_CONFIG *nn_config = (AOMMIN(cm->width, cm->height) <= 480)
4837                                        ? &av1_intrap_nn_config
4838                                        : &av1_intrap_hd_nn_config;
4839       float nn_features[6];
4840       float scores[2] = { 0.0f };
4841       float probs[2] = { 0.0f };
4842 
4843       nn_features[0] = (float)search_state.best_mbmode.skip;
4844       nn_features[1] = (float)mi_size_wide_log2[bsize];
4845       nn_features[2] = (float)mi_size_high_log2[bsize];
4846       nn_features[3] = (float)intra_cost;
4847       nn_features[4] = (float)inter_cost;
4848       const int ac_q = av1_ac_quant_QTX(x->qindex, 0, xd->bd);
4849       const int ac_q_max = av1_ac_quant_QTX(255, 0, xd->bd);
4850       nn_features[5] = (float)(ac_q_max / ac_q);
4851 
4852       av1_nn_predict(nn_features, nn_config, 1, scores);
4853       aom_clear_system_state();
4854       av1_nn_softmax(scores, probs, 2);
4855 
4856       if (probs[1] > 0.8) search_state.intra_search_state.skip_intra_modes = 1;
4857     } else if ((search_state.best_mbmode.skip) &&
4858                (sf->intra_sf.skip_intra_in_interframe >= 2)) {
4859       search_state.intra_search_state.skip_intra_modes = 1;
4860     }
4861   }
4862 
4863   const int intra_ref_frame_cost = ref_costs_single[INTRA_FRAME];
4864   for (int j = 0; j < intra_mode_num; ++j) {
4865     if (sf->intra_sf.skip_intra_in_interframe &&
4866         search_state.intra_search_state.skip_intra_modes)
4867       break;
4868     const THR_MODES mode_enum = intra_mode_idx_ls[j];
4869     const MODE_DEFINITION *mode_def = &av1_mode_defs[mode_enum];
4870     const PREDICTION_MODE this_mode = mode_def->mode;
4871 
4872     assert(av1_mode_defs[mode_enum].ref_frame[0] == INTRA_FRAME);
4873     assert(av1_mode_defs[mode_enum].ref_frame[1] == NONE_FRAME);
4874     init_mbmi(mbmi, this_mode, av1_mode_defs[mode_enum].ref_frame, cm);
4875     x->force_skip = 0;
4876 
4877     if (this_mode != DC_PRED) {
4878       // Only search the oblique modes if the best so far is
4879       // one of the neighboring directional modes
4880       if ((sf->rt_sf.mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
4881           (this_mode >= D45_PRED && this_mode <= PAETH_PRED)) {
4882         if (search_state.best_mode_index != THR_INVALID &&
4883             search_state.best_mbmode.ref_frame[0] > INTRA_FRAME)
4884           continue;
4885       }
4886       if (sf->rt_sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
4887         if (conditional_skipintra(
4888                 this_mode, search_state.intra_search_state.best_intra_mode))
4889           continue;
4890       }
4891     }
4892 
4893     RD_STATS intra_rd_stats, intra_rd_stats_y, intra_rd_stats_uv;
4894     intra_rd_stats.rdcost = av1_handle_intra_mode(
4895         &search_state.intra_search_state, cpi, x, bsize, intra_ref_frame_cost,
4896         ctx, 0, &intra_rd_stats, &intra_rd_stats_y, &intra_rd_stats_uv,
4897         search_state.best_rd, &search_state.best_intra_rd,
4898         search_state.best_mbmode.skip);
4899     // Collect mode stats for multiwinner mode processing
4900     const int txfm_search_done = 1;
4901     store_winner_mode_stats(
4902         &cpi->common, x, mbmi, &intra_rd_stats, &intra_rd_stats_y,
4903         &intra_rd_stats_uv, mode_enum, NULL, bsize, intra_rd_stats.rdcost,
4904         cpi->sf.winner_mode_sf.enable_multiwinner_mode_process,
4905         txfm_search_done);
4906     if (intra_rd_stats.rdcost < search_state.best_rd) {
4907       update_search_state(&search_state, rd_cost, ctx, &intra_rd_stats,
4908                           &intra_rd_stats_y, &intra_rd_stats_uv, mode_enum, x,
4909                           txfm_search_done);
4910     }
4911   }
4912 #if CONFIG_COLLECT_COMPONENT_TIMING
4913   end_timing(cpi, handle_intra_mode_time);
4914 #endif
4915 
4916   int winner_mode_count = cpi->sf.winner_mode_sf.enable_multiwinner_mode_process
4917                               ? x->winner_mode_count
4918                               : 1;
4919   // In effect only when fast tx search speed features are enabled.
4920   refine_winner_mode_tx(
4921       cpi, x, rd_cost, bsize, ctx, &search_state.best_mode_index,
4922       &search_state.best_mbmode, yv12_mb, search_state.best_rate_y,
4923       search_state.best_rate_uv, &search_state.best_skip2, winner_mode_count);
4924 
4925   // Initialize default mode evaluation params
4926   set_mode_eval_params(cpi, x, DEFAULT_EVAL);
4927 
4928   // Only try palette mode when the best mode so far is an intra mode.
4929   const int try_palette =
4930       cpi->oxcf.enable_palette &&
4931       av1_allow_palette(features->allow_screen_content_tools, mbmi->sb_type) &&
4932       !is_inter_mode(search_state.best_mbmode.mode);
4933   PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
4934   RD_STATS this_rd_cost;
4935   int this_skippable = 0;
4936   if (try_palette) {
4937     this_skippable = av1_search_palette_mode(
4938         cpi, x, &this_rd_cost, ctx, bsize, mbmi, pmi, ref_costs_single,
4939         &search_state.intra_search_state, search_state.best_rd);
4940     if (this_rd_cost.rdcost < search_state.best_rd) {
4941       search_state.best_mode_index = THR_DC;
4942       mbmi->mv[0].as_int = 0;
4943       rd_cost->rate = this_rd_cost.rate;
4944       rd_cost->dist = this_rd_cost.dist;
4945       rd_cost->rdcost = this_rd_cost.rdcost;
4946       search_state.best_rd = rd_cost->rdcost;
4947       search_state.best_mbmode = *mbmi;
4948       search_state.best_skip2 = 0;
4949       search_state.best_mode_skippable = this_skippable;
4950       memcpy(ctx->blk_skip, x->blk_skip,
4951              sizeof(x->blk_skip[0]) * ctx->num_4x4_blk);
4952       av1_copy_array(ctx->tx_type_map, xd->tx_type_map, ctx->num_4x4_blk);
4953     }
4954   }
4955 
4956   search_state.best_mbmode.skip_mode = 0;
4957   if (cm->current_frame.skip_mode_info.skip_mode_flag &&
4958       is_comp_ref_allowed(bsize)) {
4959     const struct segmentation *const seg = &cm->seg;
4960     unsigned char segment_id = mbmi->segment_id;
4961     if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
4962       rd_pick_skip_mode(rd_cost, &search_state, cpi, x, bsize, yv12_mb);
4963     }
4964   }
4965 
4966   // Make sure that the ref_mv_idx is only nonzero when we're
4967   // using a mode which can support ref_mv_idx
4968   if (search_state.best_mbmode.ref_mv_idx != 0 &&
4969       !(search_state.best_mbmode.mode == NEWMV ||
4970         search_state.best_mbmode.mode == NEW_NEWMV ||
4971         have_nearmv_in_inter_mode(search_state.best_mbmode.mode))) {
4972     search_state.best_mbmode.ref_mv_idx = 0;
4973   }
4974 
4975   if (search_state.best_mode_index == THR_INVALID ||
4976       search_state.best_rd >= best_rd_so_far) {
4977     rd_cost->rate = INT_MAX;
4978     rd_cost->rdcost = INT64_MAX;
4979     return;
4980   }
4981 
4982   const InterpFilter interp_filter = features->interp_filter;
4983   assert((interp_filter == SWITCHABLE) ||
4984          (interp_filter ==
4985           search_state.best_mbmode.interp_filters.as_filters.y_filter) ||
4986          !is_inter_block(&search_state.best_mbmode));
4987   assert((interp_filter == SWITCHABLE) ||
4988          (interp_filter ==
4989           search_state.best_mbmode.interp_filters.as_filters.x_filter) ||
4990          !is_inter_block(&search_state.best_mbmode));
4991 
4992   if (!cpi->rc.is_src_frame_alt_ref && cpi->sf.inter_sf.adaptive_rd_thresh) {
4993     av1_update_rd_thresh_fact(cm, x->thresh_freq_fact,
4994                               sf->inter_sf.adaptive_rd_thresh, bsize,
4995                               search_state.best_mode_index);
4996   }
4997 
4998   // macroblock modes
4999   *mbmi = search_state.best_mbmode;
5000   x->force_skip |= search_state.best_skip2;
5001 
5002   // Note: this section is needed since the mode may have been forced to
5003   // GLOBALMV by the all-zero mode handling of ref-mv.
5004   if (mbmi->mode == GLOBALMV || mbmi->mode == GLOBAL_GLOBALMV) {
5005     // Correct the interp filters for GLOBALMV
5006     if (is_nontrans_global_motion(xd, xd->mi[0])) {
5007       int_interpfilters filters =
5008           av1_broadcast_interp_filter(av1_unswitchable_filter(interp_filter));
5009       assert(mbmi->interp_filters.as_int == filters.as_int);
5010       (void)filters;
5011     }
5012   }
5013 
5014   for (i = 0; i < REFERENCE_MODES; ++i) {
5015     if (search_state.intra_search_state.best_pred_rd[i] == INT64_MAX) {
5016       search_state.best_pred_diff[i] = INT_MIN;
5017     } else {
5018       search_state.best_pred_diff[i] =
5019           search_state.best_rd -
5020           search_state.intra_search_state.best_pred_rd[i];
5021     }
5022   }
5023 
5024   x->force_skip |= search_state.best_mode_skippable;
5025 
5026   assert(search_state.best_mode_index != THR_INVALID);
5027 
5028 #if CONFIG_INTERNAL_STATS
5029   store_coding_context(x, ctx, search_state.best_mode_index,
5030                        search_state.best_pred_diff,
5031                        search_state.best_mode_skippable);
5032 #else
5033   store_coding_context(x, ctx, search_state.best_pred_diff,
5034                        search_state.best_mode_skippable);
5035 #endif  // CONFIG_INTERNAL_STATS
5036 
5037   if (pmi->palette_size[1] > 0) {
5038     assert(try_palette);
5039     av1_restore_uv_color_map(cpi, x);
5040   }
5041 }
5042 
av1_rd_pick_inter_mode_sb_seg_skip(const AV1_COMP * cpi,TileDataEnc * tile_data,MACROBLOCK * x,int mi_row,int mi_col,RD_STATS * rd_cost,BLOCK_SIZE bsize,PICK_MODE_CONTEXT * ctx,int64_t best_rd_so_far)5043 void av1_rd_pick_inter_mode_sb_seg_skip(const AV1_COMP *cpi,
5044                                         TileDataEnc *tile_data, MACROBLOCK *x,
5045                                         int mi_row, int mi_col,
5046                                         RD_STATS *rd_cost, BLOCK_SIZE bsize,
5047                                         PICK_MODE_CONTEXT *ctx,
5048                                         int64_t best_rd_so_far) {
5049   const AV1_COMMON *const cm = &cpi->common;
5050   const FeatureFlags *const features = &cm->features;
5051   MACROBLOCKD *const xd = &x->e_mbd;
5052   MB_MODE_INFO *const mbmi = xd->mi[0];
5053   unsigned char segment_id = mbmi->segment_id;
5054   const int comp_pred = 0;
5055   int i;
5056   int64_t best_pred_diff[REFERENCE_MODES];
5057   unsigned int ref_costs_single[REF_FRAMES];
5058   unsigned int ref_costs_comp[REF_FRAMES][REF_FRAMES];
5059   int *comp_inter_cost = x->comp_inter_cost[av1_get_reference_mode_context(xd)];
5060   InterpFilter best_filter = SWITCHABLE;
5061   int64_t this_rd = INT64_MAX;
5062   int rate2 = 0;
5063   const int64_t distortion2 = 0;
5064   (void)mi_row;
5065   (void)mi_col;
5066   (void)tile_data;
5067 
5068   av1_collect_neighbors_ref_counts(xd);
5069 
5070   estimate_ref_frame_costs(cm, xd, x, segment_id, ref_costs_single,
5071                            ref_costs_comp);
5072 
5073   for (i = 0; i < REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
5074   for (i = LAST_FRAME; i < REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
5075 
5076   rd_cost->rate = INT_MAX;
5077 
5078   assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
5079 
5080   mbmi->palette_mode_info.palette_size[0] = 0;
5081   mbmi->palette_mode_info.palette_size[1] = 0;
5082   mbmi->filter_intra_mode_info.use_filter_intra = 0;
5083   mbmi->mode = GLOBALMV;
5084   mbmi->motion_mode = SIMPLE_TRANSLATION;
5085   mbmi->uv_mode = UV_DC_PRED;
5086   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME))
5087     mbmi->ref_frame[0] = get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
5088   else
5089     mbmi->ref_frame[0] = LAST_FRAME;
5090   mbmi->ref_frame[1] = NONE_FRAME;
5091   mbmi->mv[0].as_int =
5092       gm_get_motion_vector(&cm->global_motion[mbmi->ref_frame[0]],
5093                            features->allow_high_precision_mv, bsize, mi_col,
5094                            mi_row, features->cur_frame_force_integer_mv)
5095           .as_int;
5096   mbmi->tx_size = max_txsize_lookup[bsize];
5097   x->force_skip = 1;
5098 
5099   mbmi->ref_mv_idx = 0;
5100 
5101   mbmi->motion_mode = SIMPLE_TRANSLATION;
5102   av1_count_overlappable_neighbors(cm, xd);
5103   if (is_motion_variation_allowed_bsize(bsize) && !has_second_ref(mbmi)) {
5104     int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE];
5105     mbmi->num_proj_ref = av1_findSamples(cm, xd, pts, pts_inref);
5106     // Select the samples according to motion vector difference
5107     if (mbmi->num_proj_ref > 1)
5108       mbmi->num_proj_ref = av1_selectSamples(&mbmi->mv[0].as_mv, pts, pts_inref,
5109                                              mbmi->num_proj_ref, bsize);
5110   }
5111 
5112   const InterpFilter interp_filter = features->interp_filter;
5113   set_default_interp_filters(mbmi, interp_filter);
5114 
5115   if (interp_filter != SWITCHABLE) {
5116     best_filter = interp_filter;
5117   } else {
5118     best_filter = EIGHTTAP_REGULAR;
5119     if (av1_is_interp_needed(xd) &&
5120         x->source_variance >=
5121             cpi->sf.interp_sf.disable_filter_search_var_thresh) {
5122       int rs;
5123       int best_rs = INT_MAX;
5124       for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
5125         mbmi->interp_filters = av1_broadcast_interp_filter(i);
5126         rs = av1_get_switchable_rate(x, xd, interp_filter);
5127         if (rs < best_rs) {
5128           best_rs = rs;
5129           best_filter = mbmi->interp_filters.as_filters.y_filter;
5130         }
5131       }
5132     }
5133   }
5134   // Set the appropriate filter
5135   mbmi->interp_filters = av1_broadcast_interp_filter(best_filter);
5136   rate2 += av1_get_switchable_rate(x, xd, interp_filter);
5137 
5138   if (cm->current_frame.reference_mode == REFERENCE_MODE_SELECT)
5139     rate2 += comp_inter_cost[comp_pred];
5140 
5141   // Estimate the reference frame signaling cost and add it
5142   // to the rolling cost variable.
5143   rate2 += ref_costs_single[LAST_FRAME];
5144   this_rd = RDCOST(x->rdmult, rate2, distortion2);
5145 
5146   rd_cost->rate = rate2;
5147   rd_cost->dist = distortion2;
5148   rd_cost->rdcost = this_rd;
5149 
5150   if (this_rd >= best_rd_so_far) {
5151     rd_cost->rate = INT_MAX;
5152     rd_cost->rdcost = INT64_MAX;
5153     return;
5154   }
5155 
5156   assert((interp_filter == SWITCHABLE) ||
5157          (interp_filter == mbmi->interp_filters.as_filters.y_filter));
5158 
5159   if (cpi->sf.inter_sf.adaptive_rd_thresh) {
5160     av1_update_rd_thresh_fact(cm, x->thresh_freq_fact,
5161                               cpi->sf.inter_sf.adaptive_rd_thresh, bsize,
5162                               THR_GLOBALMV);
5163   }
5164 
5165   av1_zero(best_pred_diff);
5166 
5167 #if CONFIG_INTERNAL_STATS
5168   store_coding_context(x, ctx, THR_GLOBALMV, best_pred_diff, 0);
5169 #else
5170   store_coding_context(x, ctx, best_pred_diff, 0);
5171 #endif  // CONFIG_INTERNAL_STATS
5172 }
5173 
5174 struct calc_target_weighted_pred_ctxt {
5175   const MACROBLOCK *x;
5176   const uint8_t *tmp;
5177   int tmp_stride;
5178   int overlap;
5179 };
5180 
calc_target_weighted_pred_above(MACROBLOCKD * xd,int rel_mi_row,int rel_mi_col,uint8_t op_mi_size,int dir,MB_MODE_INFO * nb_mi,void * fun_ctxt,const int num_planes)5181 static INLINE void calc_target_weighted_pred_above(
5182     MACROBLOCKD *xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size,
5183     int dir, MB_MODE_INFO *nb_mi, void *fun_ctxt, const int num_planes) {
5184   (void)nb_mi;
5185   (void)num_planes;
5186   (void)rel_mi_row;
5187   (void)dir;
5188 
5189   struct calc_target_weighted_pred_ctxt *ctxt =
5190       (struct calc_target_weighted_pred_ctxt *)fun_ctxt;
5191 
5192   const int bw = xd->width << MI_SIZE_LOG2;
5193   const uint8_t *const mask1d = av1_get_obmc_mask(ctxt->overlap);
5194 
5195   int32_t *wsrc = ctxt->x->wsrc_buf + (rel_mi_col * MI_SIZE);
5196   int32_t *mask = ctxt->x->mask_buf + (rel_mi_col * MI_SIZE);
5197   const uint8_t *tmp = ctxt->tmp + rel_mi_col * MI_SIZE;
5198   const int is_hbd = is_cur_buf_hbd(xd);
5199 
5200   if (!is_hbd) {
5201     for (int row = 0; row < ctxt->overlap; ++row) {
5202       const uint8_t m0 = mask1d[row];
5203       const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
5204       for (int col = 0; col < op_mi_size * MI_SIZE; ++col) {
5205         wsrc[col] = m1 * tmp[col];
5206         mask[col] = m0;
5207       }
5208       wsrc += bw;
5209       mask += bw;
5210       tmp += ctxt->tmp_stride;
5211     }
5212   } else {
5213     const uint16_t *tmp16 = CONVERT_TO_SHORTPTR(tmp);
5214 
5215     for (int row = 0; row < ctxt->overlap; ++row) {
5216       const uint8_t m0 = mask1d[row];
5217       const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
5218       for (int col = 0; col < op_mi_size * MI_SIZE; ++col) {
5219         wsrc[col] = m1 * tmp16[col];
5220         mask[col] = m0;
5221       }
5222       wsrc += bw;
5223       mask += bw;
5224       tmp16 += ctxt->tmp_stride;
5225     }
5226   }
5227 }
5228 
calc_target_weighted_pred_left(MACROBLOCKD * xd,int rel_mi_row,int rel_mi_col,uint8_t op_mi_size,int dir,MB_MODE_INFO * nb_mi,void * fun_ctxt,const int num_planes)5229 static INLINE void calc_target_weighted_pred_left(
5230     MACROBLOCKD *xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size,
5231     int dir, MB_MODE_INFO *nb_mi, void *fun_ctxt, const int num_planes) {
5232   (void)nb_mi;
5233   (void)num_planes;
5234   (void)rel_mi_col;
5235   (void)dir;
5236 
5237   struct calc_target_weighted_pred_ctxt *ctxt =
5238       (struct calc_target_weighted_pred_ctxt *)fun_ctxt;
5239 
5240   const int bw = xd->width << MI_SIZE_LOG2;
5241   const uint8_t *const mask1d = av1_get_obmc_mask(ctxt->overlap);
5242 
5243   int32_t *wsrc = ctxt->x->wsrc_buf + (rel_mi_row * MI_SIZE * bw);
5244   int32_t *mask = ctxt->x->mask_buf + (rel_mi_row * MI_SIZE * bw);
5245   const uint8_t *tmp = ctxt->tmp + (rel_mi_row * MI_SIZE * ctxt->tmp_stride);
5246   const int is_hbd = is_cur_buf_hbd(xd);
5247 
5248   if (!is_hbd) {
5249     for (int row = 0; row < op_mi_size * MI_SIZE; ++row) {
5250       for (int col = 0; col < ctxt->overlap; ++col) {
5251         const uint8_t m0 = mask1d[col];
5252         const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
5253         wsrc[col] = (wsrc[col] >> AOM_BLEND_A64_ROUND_BITS) * m0 +
5254                     (tmp[col] << AOM_BLEND_A64_ROUND_BITS) * m1;
5255         mask[col] = (mask[col] >> AOM_BLEND_A64_ROUND_BITS) * m0;
5256       }
5257       wsrc += bw;
5258       mask += bw;
5259       tmp += ctxt->tmp_stride;
5260     }
5261   } else {
5262     const uint16_t *tmp16 = CONVERT_TO_SHORTPTR(tmp);
5263 
5264     for (int row = 0; row < op_mi_size * MI_SIZE; ++row) {
5265       for (int col = 0; col < ctxt->overlap; ++col) {
5266         const uint8_t m0 = mask1d[col];
5267         const uint8_t m1 = AOM_BLEND_A64_MAX_ALPHA - m0;
5268         wsrc[col] = (wsrc[col] >> AOM_BLEND_A64_ROUND_BITS) * m0 +
5269                     (tmp16[col] << AOM_BLEND_A64_ROUND_BITS) * m1;
5270         mask[col] = (mask[col] >> AOM_BLEND_A64_ROUND_BITS) * m0;
5271       }
5272       wsrc += bw;
5273       mask += bw;
5274       tmp16 += ctxt->tmp_stride;
5275     }
5276   }
5277 }
5278 
5279 // This function has a structure similar to av1_build_obmc_inter_prediction
5280 //
5281 // The OBMC predictor is computed as:
5282 //
5283 //  PObmc(x,y) =
5284 //    AOM_BLEND_A64(Mh(x),
5285 //                  AOM_BLEND_A64(Mv(y), P(x,y), PAbove(x,y)),
5286 //                  PLeft(x, y))
5287 //
5288 // Scaling up by AOM_BLEND_A64_MAX_ALPHA ** 2 and omitting the intermediate
5289 // rounding, this can be written as:
5290 //
5291 //  AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA * Pobmc(x,y) =
5292 //    Mh(x) * Mv(y) * P(x,y) +
5293 //      Mh(x) * Cv(y) * Pabove(x,y) +
5294 //      AOM_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
5295 //
5296 // Where :
5297 //
5298 //  Cv(y) = AOM_BLEND_A64_MAX_ALPHA - Mv(y)
5299 //  Ch(y) = AOM_BLEND_A64_MAX_ALPHA - Mh(y)
5300 //
5301 // This function computes 'wsrc' and 'mask' as:
5302 //
5303 //  wsrc(x, y) =
5304 //    AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA * src(x, y) -
5305 //      Mh(x) * Cv(y) * Pabove(x,y) +
5306 //      AOM_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
5307 //
5308 //  mask(x, y) = Mh(x) * Mv(y)
5309 //
5310 // These can then be used to efficiently approximate the error for any
5311 // predictor P in the context of the provided neighbouring predictors by
5312 // computing:
5313 //
5314 //  error(x, y) =
5315 //    wsrc(x, y) - mask(x, y) * P(x, y) / (AOM_BLEND_A64_MAX_ALPHA ** 2)
5316 //
calc_target_weighted_pred(const AV1_COMMON * cm,const MACROBLOCK * x,const MACROBLOCKD * xd,const uint8_t * above,int above_stride,const uint8_t * left,int left_stride)5317 static AOM_INLINE void calc_target_weighted_pred(
5318     const AV1_COMMON *cm, const MACROBLOCK *x, const MACROBLOCKD *xd,
5319     const uint8_t *above, int above_stride, const uint8_t *left,
5320     int left_stride) {
5321   const BLOCK_SIZE bsize = xd->mi[0]->sb_type;
5322   const int bw = xd->width << MI_SIZE_LOG2;
5323   const int bh = xd->height << MI_SIZE_LOG2;
5324   int32_t *mask_buf = x->mask_buf;
5325   int32_t *wsrc_buf = x->wsrc_buf;
5326 
5327   const int is_hbd = is_cur_buf_hbd(xd);
5328   const int src_scale = AOM_BLEND_A64_MAX_ALPHA * AOM_BLEND_A64_MAX_ALPHA;
5329 
5330   // plane 0 should not be sub-sampled
5331   assert(xd->plane[0].subsampling_x == 0);
5332   assert(xd->plane[0].subsampling_y == 0);
5333 
5334   av1_zero_array(wsrc_buf, bw * bh);
5335   for (int i = 0; i < bw * bh; ++i) mask_buf[i] = AOM_BLEND_A64_MAX_ALPHA;
5336 
5337   // handle above row
5338   if (xd->up_available) {
5339     const int overlap =
5340         AOMMIN(block_size_high[bsize], block_size_high[BLOCK_64X64]) >> 1;
5341     struct calc_target_weighted_pred_ctxt ctxt = { x, above, above_stride,
5342                                                    overlap };
5343     foreach_overlappable_nb_above(cm, (MACROBLOCKD *)xd,
5344                                   max_neighbor_obmc[mi_size_wide_log2[bsize]],
5345                                   calc_target_weighted_pred_above, &ctxt);
5346   }
5347 
5348   for (int i = 0; i < bw * bh; ++i) {
5349     wsrc_buf[i] *= AOM_BLEND_A64_MAX_ALPHA;
5350     mask_buf[i] *= AOM_BLEND_A64_MAX_ALPHA;
5351   }
5352 
5353   // handle left column
5354   if (xd->left_available) {
5355     const int overlap =
5356         AOMMIN(block_size_wide[bsize], block_size_wide[BLOCK_64X64]) >> 1;
5357     struct calc_target_weighted_pred_ctxt ctxt = { x, left, left_stride,
5358                                                    overlap };
5359     foreach_overlappable_nb_left(cm, (MACROBLOCKD *)xd,
5360                                  max_neighbor_obmc[mi_size_high_log2[bsize]],
5361                                  calc_target_weighted_pred_left, &ctxt);
5362   }
5363 
5364   if (!is_hbd) {
5365     const uint8_t *src = x->plane[0].src.buf;
5366 
5367     for (int row = 0; row < bh; ++row) {
5368       for (int col = 0; col < bw; ++col) {
5369         wsrc_buf[col] = src[col] * src_scale - wsrc_buf[col];
5370       }
5371       wsrc_buf += bw;
5372       src += x->plane[0].src.stride;
5373     }
5374   } else {
5375     const uint16_t *src = CONVERT_TO_SHORTPTR(x->plane[0].src.buf);
5376 
5377     for (int row = 0; row < bh; ++row) {
5378       for (int col = 0; col < bw; ++col) {
5379         wsrc_buf[col] = src[col] * src_scale - wsrc_buf[col];
5380       }
5381       wsrc_buf += bw;
5382       src += x->plane[0].src.stride;
5383     }
5384   }
5385 }
5386 
5387 /* Use standard 3x3 Sobel matrix. Macro so it can be used for either high or
5388    low bit-depth arrays. */
5389 #define SOBEL_X(src, stride, i, j)                       \
5390   ((src)[((i)-1) + (stride) * ((j)-1)] -                 \
5391    (src)[((i) + 1) + (stride) * ((j)-1)] +  /* NOLINT */ \
5392    2 * (src)[((i)-1) + (stride) * (j)] -    /* NOLINT */ \
5393    2 * (src)[((i) + 1) + (stride) * (j)] +  /* NOLINT */ \
5394    (src)[((i)-1) + (stride) * ((j) + 1)] -  /* NOLINT */ \
5395    (src)[((i) + 1) + (stride) * ((j) + 1)]) /* NOLINT */
5396 #define SOBEL_Y(src, stride, i, j)                       \
5397   ((src)[((i)-1) + (stride) * ((j)-1)] +                 \
5398    2 * (src)[(i) + (stride) * ((j)-1)] +    /* NOLINT */ \
5399    (src)[((i) + 1) + (stride) * ((j)-1)] -  /* NOLINT */ \
5400    (src)[((i)-1) + (stride) * ((j) + 1)] -  /* NOLINT */ \
5401    2 * (src)[(i) + (stride) * ((j) + 1)] -  /* NOLINT */ \
5402    (src)[((i) + 1) + (stride) * ((j) + 1)]) /* NOLINT */
5403 
av1_sobel(const uint8_t * input,int stride,int i,int j,bool high_bd)5404 sobel_xy av1_sobel(const uint8_t *input, int stride, int i, int j,
5405                    bool high_bd) {
5406   int16_t s_x;
5407   int16_t s_y;
5408   if (high_bd) {
5409     const uint16_t *src = CONVERT_TO_SHORTPTR(input);
5410     s_x = SOBEL_X(src, stride, i, j);
5411     s_y = SOBEL_Y(src, stride, i, j);
5412   } else {
5413     s_x = SOBEL_X(input, stride, i, j);
5414     s_y = SOBEL_Y(input, stride, i, j);
5415   }
5416   sobel_xy r = { .x = s_x, .y = s_y };
5417   return r;
5418 }
5419 
5420 // 8-tap Gaussian convolution filter with sigma = 1.3, sums to 128,
5421 // all co-efficients must be even.
5422 DECLARE_ALIGNED(16, static const int16_t, gauss_filter[8]) = { 2,  12, 30, 40,
5423                                                                30, 12, 2,  0 };
5424 
av1_gaussian_blur(const uint8_t * src,int src_stride,int w,int h,uint8_t * dst,bool high_bd,int bd)5425 void av1_gaussian_blur(const uint8_t *src, int src_stride, int w, int h,
5426                        uint8_t *dst, bool high_bd, int bd) {
5427   ConvolveParams conv_params = get_conv_params(0, 0, bd);
5428   InterpFilterParams filter = { .filter_ptr = gauss_filter,
5429                                 .taps = 8,
5430                                 .subpel_shifts = 0,
5431                                 .interp_filter = EIGHTTAP_REGULAR };
5432   // Requirements from the vector-optimized implementations.
5433   assert(h % 4 == 0);
5434   assert(w % 8 == 0);
5435   // Because we use an eight tap filter, the stride should be at least 7 + w.
5436   assert(src_stride >= w + 7);
5437 #if CONFIG_AV1_HIGHBITDEPTH
5438   if (high_bd) {
5439     av1_highbd_convolve_2d_sr(CONVERT_TO_SHORTPTR(src), src_stride,
5440                               CONVERT_TO_SHORTPTR(dst), w, w, h, &filter,
5441                               &filter, 0, 0, &conv_params, bd);
5442   } else {
5443     av1_convolve_2d_sr(src, src_stride, dst, w, w, h, &filter, &filter, 0, 0,
5444                        &conv_params);
5445   }
5446 #else
5447   (void)high_bd;
5448   av1_convolve_2d_sr(src, src_stride, dst, w, w, h, &filter, &filter, 0, 0,
5449                      &conv_params);
5450 #endif
5451 }
5452 
edge_probability(const uint8_t * input,int w,int h,bool high_bd,int bd)5453 static EdgeInfo edge_probability(const uint8_t *input, int w, int h,
5454                                  bool high_bd, int bd) {
5455   // The probability of an edge in the whole image is the same as the highest
5456   // probability of an edge for any individual pixel. Use Sobel as the metric
5457   // for finding an edge.
5458   uint16_t highest = 0;
5459   uint16_t highest_x = 0;
5460   uint16_t highest_y = 0;
5461   // Ignore the 1 pixel border around the image for the computation.
5462   for (int j = 1; j < h - 1; ++j) {
5463     for (int i = 1; i < w - 1; ++i) {
5464       sobel_xy g = av1_sobel(input, w, i, j, high_bd);
5465       // Scale down to 8-bit to get same output regardless of bit depth.
5466       int16_t g_x = g.x >> (bd - 8);
5467       int16_t g_y = g.y >> (bd - 8);
5468       uint16_t magnitude = (uint16_t)sqrt(g_x * g_x + g_y * g_y);
5469       highest = AOMMAX(highest, magnitude);
5470       highest_x = AOMMAX(highest_x, g_x);
5471       highest_y = AOMMAX(highest_y, g_y);
5472     }
5473   }
5474   EdgeInfo ei = { .magnitude = highest, .x = highest_x, .y = highest_y };
5475   return ei;
5476 }
5477 
5478 /* Uses most of the Canny edge detection algorithm to find if there are any
5479  * edges in the image.
5480  */
av1_edge_exists(const uint8_t * src,int src_stride,int w,int h,bool high_bd,int bd)5481 EdgeInfo av1_edge_exists(const uint8_t *src, int src_stride, int w, int h,
5482                          bool high_bd, int bd) {
5483   if (w < 3 || h < 3) {
5484     EdgeInfo n = { .magnitude = 0, .x = 0, .y = 0 };
5485     return n;
5486   }
5487   uint8_t *blurred;
5488   if (high_bd) {
5489     blurred = CONVERT_TO_BYTEPTR(aom_memalign(32, sizeof(uint16_t) * w * h));
5490   } else {
5491     blurred = (uint8_t *)aom_memalign(32, sizeof(uint8_t) * w * h);
5492   }
5493   av1_gaussian_blur(src, src_stride, w, h, blurred, high_bd, bd);
5494   // Skip the non-maximum suppression step in Canny edge detection. We just
5495   // want a probability of an edge existing in the buffer, which is determined
5496   // by the strongest edge in it -- we don't need to eliminate the weaker
5497   // edges. Use Sobel for the edge detection.
5498   EdgeInfo prob = edge_probability(blurred, w, h, high_bd, bd);
5499   if (high_bd) {
5500     aom_free(CONVERT_TO_SHORTPTR(blurred));
5501   } else {
5502     aom_free(blurred);
5503   }
5504   return prob;
5505 }
5506