1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_config.h"
13 #include "./vpx_scale_rtcd.h"
14 #include "vp8/common/onyxc_int.h"
15 #include "vp8/common/blockd.h"
16 #include "onyx_int.h"
17 #include "vp8/common/systemdependent.h"
18 #include "quantize.h"
19 #include "vp8/common/alloccommon.h"
20 #include "mcomp.h"
21 #include "firstpass.h"
22 #include "vpx/internal/vpx_psnr.h"
23 #include "vpx_scale/vpx_scale.h"
24 #include "vp8/common/extend.h"
25 #include "ratectrl.h"
26 #include "vp8/common/quant_common.h"
27 #include "segmentation.h"
28 #if CONFIG_POSTPROC
29 #include "vp8/common/postproc.h"
30 #endif
31 #include "vpx_mem/vpx_mem.h"
32 #include "vp8/common/swapyv12buffer.h"
33 #include "vp8/common/threading.h"
34 #include "vpx_ports/vpx_timer.h"
35 #if ARCH_ARM
36 #include "vpx_ports/arm.h"
37 #endif
38 #if CONFIG_MULTI_RES_ENCODING
39 #include "mr_dissim.h"
40 #endif
41 #include "encodeframe.h"
42
43 #include <math.h>
44 #include <stdio.h>
45 #include <limits.h>
46
47 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
48 extern int vp8_update_coef_context(VP8_COMP *cpi);
49 extern void vp8_update_coef_probs(VP8_COMP *cpi);
50 #endif
51
52 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
53 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
54 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
55
56 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag);
57 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
58 extern unsigned int vp8_get_processor_freq();
59 extern void print_tree_update_probs();
60 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
61 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
62
63 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
64
65 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
66
67 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
68
69 static void set_default_lf_deltas(VP8_COMP *cpi);
70
71 extern const int vp8_gf_interval_table[101];
72
73 #if CONFIG_INTERNAL_STATS
74 #include "math.h"
75
76 extern double vp8_calc_ssim
77 (
78 YV12_BUFFER_CONFIG *source,
79 YV12_BUFFER_CONFIG *dest,
80 int lumamask,
81 double *weight
82 );
83
84
85 extern double vp8_calc_ssimg
86 (
87 YV12_BUFFER_CONFIG *source,
88 YV12_BUFFER_CONFIG *dest,
89 double *ssim_y,
90 double *ssim_u,
91 double *ssim_v
92 );
93
94
95 #endif
96
97
98 #ifdef OUTPUT_YUV_SRC
99 FILE *yuv_file;
100 #endif
101
102 #if 0
103 FILE *framepsnr;
104 FILE *kf_list;
105 FILE *keyfile;
106 #endif
107
108 #if 0
109 extern int skip_true_count;
110 extern int skip_false_count;
111 #endif
112
113
114 #ifdef VP8_ENTROPY_STATS
115 extern int intra_mode_stats[10][10][10];
116 #endif
117
118 #ifdef SPEEDSTATS
119 unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
120 unsigned int tot_pm = 0;
121 unsigned int cnt_pm = 0;
122 unsigned int tot_ef = 0;
123 unsigned int cnt_ef = 0;
124 #endif
125
126 #ifdef MODE_STATS
127 extern unsigned __int64 Sectionbits[50];
128 extern int y_modes[5] ;
129 extern int uv_modes[4] ;
130 extern int b_modes[10] ;
131
132 extern int inter_y_modes[10] ;
133 extern int inter_uv_modes[4] ;
134 extern unsigned int inter_b_modes[15];
135 #endif
136
137 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
138
139 extern const int qrounding_factors[129];
140 extern const int qzbin_factors[129];
141 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
142 extern const int vp8cx_base_skip_false_prob[128];
143
144 /* Tables relating active max Q to active min Q */
145 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] =
146 {
147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
148 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
149 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
150 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
151 3,3,3,3,3,3,4,4,4,5,5,5,5,5,6,6,
152 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
153 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
154 16,16,17,17,18,18,18,18,19,20,20,21,21,22,23,23
155 };
156 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] =
157 {
158 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
159 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
160 1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,
161 3,3,3,3,4,4,4,4,5,5,5,5,5,5,6,6,
162 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
163 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
164 16,16,17,17,18,18,18,18,19,19,20,20,20,20,21,21,
165 21,21,22,22,23,23,24,25,25,26,26,27,28,28,29,30
166 };
167 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] =
168 {
169 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
170 3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6,
171 7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,
172 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,
173 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,
174 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34,
175 35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42,
176 43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58
177 };
178 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] =
179 {
180 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4,
181 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
182 9,10,10,10,10,11,11,11,12,12,12,12,13,13,13,14,
183 14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21,
184 22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,
185 30,30,31,31,32,32,33,33,34,34,35,35,36,36,37,37,
186 38,39,39,40,40,41,41,42,42,43,43,44,45,46,47,48,
187 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64
188 };
189 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] =
190 {
191 0,0,0,0,1,1,1,1,1,2,2,2,3,3,3,4,
192 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
193 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16,
194 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,
195 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32,
196 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40,
197 41,41,42,42,43,44,45,46,47,48,49,50,51,52,53,54,
198 55,56,57,58,59,60,62,64,66,68,70,72,74,76,78,80
199 };
200 static const unsigned char inter_minq[QINDEX_RANGE] =
201 {
202 0,0,1,1,2,3,3,4,4,5,6,6,7,8,8,9,
203 9,10,11,11,12,13,13,14,15,15,16,17,17,18,19,20,
204 20,21,22,22,23,24,24,25,26,27,27,28,29,30,30,31,
205 32,33,33,34,35,36,36,37,38,39,39,40,41,42,42,43,
206 44,45,46,46,47,48,49,50,50,51,52,53,54,55,55,56,
207 57,58,59,60,60,61,62,63,64,65,66,67,67,68,69,70,
208 71,72,73,74,75,75,76,77,78,79,80,81,82,83,84,85,
209 86,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100
210 };
211
212 #ifdef PACKET_TESTING
213 extern FILE *vpxlogc;
214 #endif
215
save_layer_context(VP8_COMP * cpi)216 static void save_layer_context(VP8_COMP *cpi)
217 {
218 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
219
220 /* Save layer dependent coding state */
221 lc->target_bandwidth = cpi->target_bandwidth;
222 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
223 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
224 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
225 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
226 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
227 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
228 lc->buffer_level = cpi->buffer_level;
229 lc->bits_off_target = cpi->bits_off_target;
230 lc->total_actual_bits = cpi->total_actual_bits;
231 lc->worst_quality = cpi->worst_quality;
232 lc->active_worst_quality = cpi->active_worst_quality;
233 lc->best_quality = cpi->best_quality;
234 lc->active_best_quality = cpi->active_best_quality;
235 lc->ni_av_qi = cpi->ni_av_qi;
236 lc->ni_tot_qi = cpi->ni_tot_qi;
237 lc->ni_frames = cpi->ni_frames;
238 lc->avg_frame_qindex = cpi->avg_frame_qindex;
239 lc->rate_correction_factor = cpi->rate_correction_factor;
240 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
241 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
242 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
243 lc->inter_frame_target = cpi->inter_frame_target;
244 lc->total_byte_count = cpi->total_byte_count;
245 lc->filter_level = cpi->common.filter_level;
246
247 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
248
249 memcpy (lc->count_mb_ref_frame_usage,
250 cpi->mb.count_mb_ref_frame_usage,
251 sizeof(cpi->mb.count_mb_ref_frame_usage));
252 }
253
restore_layer_context(VP8_COMP * cpi,const int layer)254 static void restore_layer_context(VP8_COMP *cpi, const int layer)
255 {
256 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
257
258 /* Restore layer dependent coding state */
259 cpi->current_layer = layer;
260 cpi->target_bandwidth = lc->target_bandwidth;
261 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
262 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
263 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
264 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
265 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
266 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
267 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
268 cpi->buffer_level = lc->buffer_level;
269 cpi->bits_off_target = lc->bits_off_target;
270 cpi->total_actual_bits = lc->total_actual_bits;
271 cpi->active_worst_quality = lc->active_worst_quality;
272 cpi->active_best_quality = lc->active_best_quality;
273 cpi->ni_av_qi = lc->ni_av_qi;
274 cpi->ni_tot_qi = lc->ni_tot_qi;
275 cpi->ni_frames = lc->ni_frames;
276 cpi->avg_frame_qindex = lc->avg_frame_qindex;
277 cpi->rate_correction_factor = lc->rate_correction_factor;
278 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
279 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
280 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
281 cpi->inter_frame_target = lc->inter_frame_target;
282 cpi->total_byte_count = lc->total_byte_count;
283 cpi->common.filter_level = lc->filter_level;
284
285 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
286
287 memcpy (cpi->mb.count_mb_ref_frame_usage,
288 lc->count_mb_ref_frame_usage,
289 sizeof(cpi->mb.count_mb_ref_frame_usage));
290 }
291
rescale(int val,int num,int denom)292 static int rescale(int val, int num, int denom)
293 {
294 int64_t llnum = num;
295 int64_t llden = denom;
296 int64_t llval = val;
297
298 return (int)(llval * llnum / llden);
299 }
300
init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)301 static void init_temporal_layer_context(VP8_COMP *cpi,
302 VP8_CONFIG *oxcf,
303 const int layer,
304 double prev_layer_framerate)
305 {
306 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
307
308 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
309 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
310
311 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
312 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
313 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
314
315 lc->starting_buffer_level =
316 rescale((int)(oxcf->starting_buffer_level),
317 lc->target_bandwidth, 1000);
318
319 if (oxcf->optimal_buffer_level == 0)
320 lc->optimal_buffer_level = lc->target_bandwidth / 8;
321 else
322 lc->optimal_buffer_level =
323 rescale((int)(oxcf->optimal_buffer_level),
324 lc->target_bandwidth, 1000);
325
326 if (oxcf->maximum_buffer_size == 0)
327 lc->maximum_buffer_size = lc->target_bandwidth / 8;
328 else
329 lc->maximum_buffer_size =
330 rescale((int)(oxcf->maximum_buffer_size),
331 lc->target_bandwidth, 1000);
332
333 /* Work out the average size of a frame within this layer */
334 if (layer > 0)
335 lc->avg_frame_size_for_layer =
336 (int)((cpi->oxcf.target_bitrate[layer] -
337 cpi->oxcf.target_bitrate[layer-1]) * 1000 /
338 (lc->framerate - prev_layer_framerate));
339
340 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
341 lc->active_best_quality = cpi->oxcf.best_allowed_q;
342 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
343
344 lc->buffer_level = lc->starting_buffer_level;
345 lc->bits_off_target = lc->starting_buffer_level;
346
347 lc->total_actual_bits = 0;
348 lc->ni_av_qi = 0;
349 lc->ni_tot_qi = 0;
350 lc->ni_frames = 0;
351 lc->rate_correction_factor = 1.0;
352 lc->key_frame_rate_correction_factor = 1.0;
353 lc->gf_rate_correction_factor = 1.0;
354 lc->inter_frame_target = 0;
355 }
356
357 // Upon a run-time change in temporal layers, reset the layer context parameters
358 // for any "new" layers. For "existing" layers, let them inherit the parameters
359 // from the previous layer state (at the same layer #). In future we may want
360 // to better map the previous layer state(s) to the "new" ones.
reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)361 static void reset_temporal_layer_change(VP8_COMP *cpi,
362 VP8_CONFIG *oxcf,
363 const int prev_num_layers)
364 {
365 int i;
366 double prev_layer_framerate = 0;
367 const int curr_num_layers = cpi->oxcf.number_of_layers;
368 // If the previous state was 1 layer, get current layer context from cpi.
369 // We need this to set the layer context for the new layers below.
370 if (prev_num_layers == 1)
371 {
372 cpi->current_layer = 0;
373 save_layer_context(cpi);
374 }
375 for (i = 0; i < curr_num_layers; i++)
376 {
377 LAYER_CONTEXT *lc = &cpi->layer_context[i];
378 if (i >= prev_num_layers)
379 {
380 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
381 }
382 // The initial buffer levels are set based on their starting levels.
383 // We could set the buffer levels based on the previous state (normalized
384 // properly by the layer bandwidths) but we would need to keep track of
385 // the previous set of layer bandwidths (i.e., target_bitrate[i])
386 // before the layer change. For now, reset to the starting levels.
387 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
388 cpi->oxcf.target_bitrate[i];
389 lc->bits_off_target = lc->buffer_level;
390 // TDOD(marpan): Should we set the rate_correction_factor and
391 // active_worst/best_quality to values derived from the previous layer
392 // state (to smooth-out quality dips/rate fluctuation at transition)?
393
394 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
395 // is not set for 1 layer, and the restore_layer_context/save_context()
396 // are not called in the encoding loop, so we need to call it here to
397 // pass the layer context state to |cpi|.
398 if (curr_num_layers == 1)
399 {
400 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
401 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
402 lc->target_bandwidth / 1000;
403 lc->bits_off_target = lc->buffer_level;
404 restore_layer_context(cpi, 0);
405 }
406 prev_layer_framerate = cpi->output_framerate /
407 cpi->oxcf.rate_decimator[i];
408 }
409 }
410
setup_features(VP8_COMP * cpi)411 static void setup_features(VP8_COMP *cpi)
412 {
413 // If segmentation enabled set the update flags
414 if ( cpi->mb.e_mbd.segmentation_enabled )
415 {
416 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
417 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
418 }
419 else
420 {
421 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
422 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
423 }
424
425 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
426 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
427 vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
428 vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
429 vpx_memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
430 vpx_memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
431
432 set_default_lf_deltas(cpi);
433
434 }
435
436
437 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
438
439
dealloc_compressor_data(VP8_COMP * cpi)440 static void dealloc_compressor_data(VP8_COMP *cpi)
441 {
442 vpx_free(cpi->tplist);
443 cpi->tplist = NULL;
444
445 /* Delete last frame MV storage buffers */
446 vpx_free(cpi->lfmv);
447 cpi->lfmv = 0;
448
449 vpx_free(cpi->lf_ref_frame_sign_bias);
450 cpi->lf_ref_frame_sign_bias = 0;
451
452 vpx_free(cpi->lf_ref_frame);
453 cpi->lf_ref_frame = 0;
454
455 /* Delete sementation map */
456 vpx_free(cpi->segmentation_map);
457 cpi->segmentation_map = 0;
458
459 vpx_free(cpi->active_map);
460 cpi->active_map = 0;
461
462 vp8_de_alloc_frame_buffers(&cpi->common);
463
464 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
465 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
466 dealloc_raw_frame_buffers(cpi);
467
468 vpx_free(cpi->tok);
469 cpi->tok = 0;
470
471 /* Structure used to monitor GF usage */
472 vpx_free(cpi->gf_active_flags);
473 cpi->gf_active_flags = 0;
474
475 /* Activity mask based per mb zbin adjustments */
476 vpx_free(cpi->mb_activity_map);
477 cpi->mb_activity_map = 0;
478
479 vpx_free(cpi->mb.pip);
480 cpi->mb.pip = 0;
481
482 #if CONFIG_MULTITHREAD
483 vpx_free(cpi->mt_current_mb_col);
484 cpi->mt_current_mb_col = NULL;
485 #endif
486 }
487
enable_segmentation(VP8_COMP * cpi)488 static void enable_segmentation(VP8_COMP *cpi)
489 {
490 /* Set the appropriate feature bit */
491 cpi->mb.e_mbd.segmentation_enabled = 1;
492 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
493 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
494 }
disable_segmentation(VP8_COMP * cpi)495 static void disable_segmentation(VP8_COMP *cpi)
496 {
497 /* Clear the appropriate feature bit */
498 cpi->mb.e_mbd.segmentation_enabled = 0;
499 }
500
501 /* Valid values for a segment are 0 to 3
502 * Segmentation map is arrange as [Rows][Columns]
503 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)504 static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
505 {
506 /* Copy in the new segmentation map */
507 vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
508
509 /* Signal that the map should be updated. */
510 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
511 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
512 }
513
514 /* The values given for each segment can be either deltas (from the default
515 * value chosen for the frame) or absolute values.
516 *
517 * Valid range for abs values is:
518 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
519 * Valid range for delta values are:
520 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
521 *
522 * abs_delta = SEGMENT_DELTADATA (deltas)
523 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
524 *
525 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)526 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta)
527 {
528 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
529 vpx_memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
530 }
531
532
segmentation_test_function(VP8_COMP * cpi)533 static void segmentation_test_function(VP8_COMP *cpi)
534 {
535 unsigned char *seg_map;
536 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
537
538 // Create a temporary map for segmentation data.
539 CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
540
541 // Set the segmentation Map
542 set_segmentation_map(cpi, seg_map);
543
544 // Activate segmentation.
545 enable_segmentation(cpi);
546
547 // Set up the quant segment data
548 feature_data[MB_LVL_ALT_Q][0] = 0;
549 feature_data[MB_LVL_ALT_Q][1] = 4;
550 feature_data[MB_LVL_ALT_Q][2] = 0;
551 feature_data[MB_LVL_ALT_Q][3] = 0;
552 // Set up the loop segment data
553 feature_data[MB_LVL_ALT_LF][0] = 0;
554 feature_data[MB_LVL_ALT_LF][1] = 0;
555 feature_data[MB_LVL_ALT_LF][2] = 0;
556 feature_data[MB_LVL_ALT_LF][3] = 0;
557
558 // Initialise the feature data structure
559 // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
560 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
561
562 // Delete sementation map
563 vpx_free(seg_map);
564
565 seg_map = 0;
566 }
567
568 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)569 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
570 {
571 unsigned char *seg_map = cpi->segmentation_map;
572 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
573 int i;
574 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
575 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
576
577 cpi->cyclic_refresh_q = Q / 2;
578
579 // Set every macroblock to be eligible for update.
580 // For key frame this will reset seg map to 0.
581 vpx_memset(cpi->segmentation_map, 0, mbs_in_frame);
582
583 if (cpi->common.frame_type != KEY_FRAME)
584 {
585 /* Cycle through the macro_block rows */
586 /* MB loop to set local segmentation map */
587 i = cpi->cyclic_refresh_mode_index;
588 assert(i < mbs_in_frame);
589 do
590 {
591 /* If the MB is as a candidate for clean up then mark it for
592 * possible boost/refresh (segment 1) The segment id may get
593 * reset to 0 later if the MB gets coded anything other than
594 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
595 * refresh : that is to say Mbs likely to be background blocks.
596 */
597 if (cpi->cyclic_refresh_map[i] == 0)
598 {
599 seg_map[i] = 1;
600 block_count --;
601 }
602 else if (cpi->cyclic_refresh_map[i] < 0)
603 cpi->cyclic_refresh_map[i]++;
604
605 i++;
606 if (i == mbs_in_frame)
607 i = 0;
608
609 }
610 while(block_count && i != cpi->cyclic_refresh_mode_index);
611
612 cpi->cyclic_refresh_mode_index = i;
613 }
614
615 /* Activate segmentation. */
616 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
617 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
618 enable_segmentation(cpi);
619
620 /* Set up the quant segment data */
621 feature_data[MB_LVL_ALT_Q][0] = 0;
622 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
623 feature_data[MB_LVL_ALT_Q][2] = 0;
624 feature_data[MB_LVL_ALT_Q][3] = 0;
625
626 /* Set up the loop segment data */
627 feature_data[MB_LVL_ALT_LF][0] = 0;
628 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
629 feature_data[MB_LVL_ALT_LF][2] = 0;
630 feature_data[MB_LVL_ALT_LF][3] = 0;
631
632 /* Initialise the feature data structure */
633 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
634
635 }
636
set_default_lf_deltas(VP8_COMP * cpi)637 static void set_default_lf_deltas(VP8_COMP *cpi)
638 {
639 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
640 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
641
642 vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
643 vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
644
645 /* Test of ref frame deltas */
646 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
647 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
648 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
649 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
650
651 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
652
653 if(cpi->oxcf.Mode == MODE_REALTIME)
654 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
655 else
656 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
657
658 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
659 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
660 }
661
662 /* Convenience macros for mapping speed and mode into a continuous
663 * range
664 */
665 #define GOOD(x) (x+1)
666 #define RT(x) (x+7)
667
speed_map(int speed,const int * map)668 static int speed_map(int speed, const int *map)
669 {
670 int res;
671
672 do
673 {
674 res = *map++;
675 } while(speed >= *map++);
676 return res;
677 }
678
679 static const int thresh_mult_map_znn[] = {
680 /* map common to zero, nearest, and near */
681 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
682 };
683
684 static const int thresh_mult_map_vhpred[] = {
685 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(1), 2000,
686 RT(7), INT_MAX, INT_MAX
687 };
688
689 static const int thresh_mult_map_bpred[] = {
690 2000, GOOD(0), 2500, GOOD(2), 5000, GOOD(3), 7500, RT(0), 2500, RT(1), 5000,
691 RT(6), INT_MAX, INT_MAX
692 };
693
694 static const int thresh_mult_map_tm[] = {
695 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 0, RT(1), 1000, RT(2), 2000,
696 RT(7), INT_MAX, INT_MAX
697 };
698
699 static const int thresh_mult_map_new1[] = {
700 1000, GOOD(2), 2000, RT(0), 2000, INT_MAX
701 };
702
703 static const int thresh_mult_map_new2[] = {
704 1000, GOOD(2), 2000, GOOD(3), 2500, GOOD(5), 4000, RT(0), 2000, RT(2), 2500,
705 RT(5), 4000, INT_MAX
706 };
707
708 static const int thresh_mult_map_split1[] = {
709 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
710 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
711 };
712
713 static const int thresh_mult_map_split2[] = {
714 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
715 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
716 };
717
718 static const int mode_check_freq_map_zn2[] = {
719 /* {zero,nearest}{2,3} */
720 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
721 };
722
723 static const int mode_check_freq_map_vhbpred[] = {
724 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
725 };
726
727 static const int mode_check_freq_map_near2[] = {
728 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(10), 1<<2, RT(11), 1<<3, RT(12), 1<<4,
729 INT_MAX
730 };
731
732 static const int mode_check_freq_map_new1[] = {
733 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
734 };
735
736 static const int mode_check_freq_map_new2[] = {
737 0, GOOD(5), 4, RT(0), 0, RT(3), 4, RT(10), 1<<3, RT(11), 1<<4, RT(12), 1<<5,
738 INT_MAX
739 };
740
741 static const int mode_check_freq_map_split1[] = {
742 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
743 };
744
745 static const int mode_check_freq_map_split2[] = {
746 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
747 };
748
vp8_set_speed_features(VP8_COMP * cpi)749 void vp8_set_speed_features(VP8_COMP *cpi)
750 {
751 SPEED_FEATURES *sf = &cpi->sf;
752 int Mode = cpi->compressor_speed;
753 int Speed = cpi->Speed;
754 int i;
755 VP8_COMMON *cm = &cpi->common;
756 int last_improved_quant = sf->improved_quant;
757 int ref_frames;
758
759 /* Initialise default mode frequency sampling variables */
760 for (i = 0; i < MAX_MODES; i ++)
761 {
762 cpi->mode_check_freq[i] = 0;
763 }
764
765 cpi->mb.mbs_tested_so_far = 0;
766
767 /* best quality defaults */
768 sf->RD = 1;
769 sf->search_method = NSTEP;
770 sf->improved_quant = 1;
771 sf->improved_dct = 1;
772 sf->auto_filter = 1;
773 sf->recode_loop = 1;
774 sf->quarter_pixel_search = 1;
775 sf->half_pixel_search = 1;
776 sf->iterative_sub_pixel = 1;
777 sf->optimize_coefficients = 1;
778 sf->use_fastquant_for_pick = 0;
779 sf->no_skip_block4x4_search = 1;
780
781 sf->first_step = 0;
782 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
783 sf->improved_mv_pred = 1;
784
785 /* default thresholds to 0 */
786 for (i = 0; i < MAX_MODES; i++)
787 sf->thresh_mult[i] = 0;
788
789 /* Count enabled references */
790 ref_frames = 1;
791 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
792 ref_frames++;
793 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
794 ref_frames++;
795 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
796 ref_frames++;
797
798 /* Convert speed to continuous range, with clamping */
799 if (Mode == 0)
800 Speed = 0;
801 else if (Mode == 2)
802 Speed = RT(Speed);
803 else
804 {
805 if (Speed > 5)
806 Speed = 5;
807 Speed = GOOD(Speed);
808 }
809
810 sf->thresh_mult[THR_ZERO1] =
811 sf->thresh_mult[THR_NEAREST1] =
812 sf->thresh_mult[THR_NEAR1] =
813 sf->thresh_mult[THR_DC] = 0; /* always */
814
815 sf->thresh_mult[THR_ZERO2] =
816 sf->thresh_mult[THR_ZERO3] =
817 sf->thresh_mult[THR_NEAREST2] =
818 sf->thresh_mult[THR_NEAREST3] =
819 sf->thresh_mult[THR_NEAR2] =
820 sf->thresh_mult[THR_NEAR3] = speed_map(Speed, thresh_mult_map_znn);
821
822 sf->thresh_mult[THR_V_PRED] =
823 sf->thresh_mult[THR_H_PRED] = speed_map(Speed, thresh_mult_map_vhpred);
824 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
825 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
826 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
827 sf->thresh_mult[THR_NEW2] =
828 sf->thresh_mult[THR_NEW3] = speed_map(Speed, thresh_mult_map_new2);
829 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
830 sf->thresh_mult[THR_SPLIT2] =
831 sf->thresh_mult[THR_SPLIT3] = speed_map(Speed, thresh_mult_map_split2);
832
833 cpi->mode_check_freq[THR_ZERO1] =
834 cpi->mode_check_freq[THR_NEAREST1] =
835 cpi->mode_check_freq[THR_NEAR1] =
836 cpi->mode_check_freq[THR_TM] =
837 cpi->mode_check_freq[THR_DC] = 0; /* always */
838
839 cpi->mode_check_freq[THR_ZERO2] =
840 cpi->mode_check_freq[THR_ZERO3] =
841 cpi->mode_check_freq[THR_NEAREST2] =
842 cpi->mode_check_freq[THR_NEAREST3] = speed_map(Speed,
843 mode_check_freq_map_zn2);
844
845 cpi->mode_check_freq[THR_NEAR2] =
846 cpi->mode_check_freq[THR_NEAR3] = speed_map(Speed,
847 mode_check_freq_map_near2);
848
849 cpi->mode_check_freq[THR_V_PRED] =
850 cpi->mode_check_freq[THR_H_PRED] =
851 cpi->mode_check_freq[THR_B_PRED] = speed_map(Speed,
852 mode_check_freq_map_vhbpred);
853 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed,
854 mode_check_freq_map_new1);
855 cpi->mode_check_freq[THR_NEW2] =
856 cpi->mode_check_freq[THR_NEW3] = speed_map(Speed,
857 mode_check_freq_map_new2);
858 cpi->mode_check_freq[THR_SPLIT1] = speed_map(Speed,
859 mode_check_freq_map_split1);
860 cpi->mode_check_freq[THR_SPLIT2] =
861 cpi->mode_check_freq[THR_SPLIT3] = speed_map(Speed,
862 mode_check_freq_map_split2);
863 Speed = cpi->Speed;
864 switch (Mode)
865 {
866 #if !(CONFIG_REALTIME_ONLY)
867 case 0: /* best quality mode */
868 sf->first_step = 0;
869 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
870 break;
871 case 1:
872 case 3:
873 if (Speed > 0)
874 {
875 /* Disable coefficient optimization above speed 0 */
876 sf->optimize_coefficients = 0;
877 sf->use_fastquant_for_pick = 1;
878 sf->no_skip_block4x4_search = 0;
879
880 sf->first_step = 1;
881 }
882
883 if (Speed > 2)
884 {
885 sf->improved_quant = 0;
886 sf->improved_dct = 0;
887
888 /* Only do recode loop on key frames, golden frames and
889 * alt ref frames
890 */
891 sf->recode_loop = 2;
892
893 }
894
895 if (Speed > 3)
896 {
897 sf->auto_filter = 1;
898 sf->recode_loop = 0; /* recode loop off */
899 sf->RD = 0; /* Turn rd off */
900
901 }
902
903 if (Speed > 4)
904 {
905 sf->auto_filter = 0; /* Faster selection of loop filter */
906 }
907
908 break;
909 #endif
910 case 2:
911 sf->optimize_coefficients = 0;
912 sf->recode_loop = 0;
913 sf->auto_filter = 1;
914 sf->iterative_sub_pixel = 1;
915 sf->search_method = NSTEP;
916
917 if (Speed > 0)
918 {
919 sf->improved_quant = 0;
920 sf->improved_dct = 0;
921
922 sf->use_fastquant_for_pick = 1;
923 sf->no_skip_block4x4_search = 0;
924 sf->first_step = 1;
925 }
926
927 if (Speed > 2)
928 sf->auto_filter = 0; /* Faster selection of loop filter */
929
930 if (Speed > 3)
931 {
932 sf->RD = 0;
933 sf->auto_filter = 1;
934 }
935
936 if (Speed > 4)
937 {
938 sf->auto_filter = 0; /* Faster selection of loop filter */
939 sf->search_method = HEX;
940 sf->iterative_sub_pixel = 0;
941 }
942
943 if (Speed > 6)
944 {
945 unsigned int sum = 0;
946 unsigned int total_mbs = cm->MBs;
947 int thresh;
948 unsigned int total_skip;
949
950 int min = 2000;
951
952 if (cpi->oxcf.encode_breakout > 2000)
953 min = cpi->oxcf.encode_breakout;
954
955 min >>= 7;
956
957 for (i = 0; i < min; i++)
958 {
959 sum += cpi->mb.error_bins[i];
960 }
961
962 total_skip = sum;
963 sum = 0;
964
965 /* i starts from 2 to make sure thresh started from 2048 */
966 for (; i < 1024; i++)
967 {
968 sum += cpi->mb.error_bins[i];
969
970 if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip))
971 break;
972 }
973
974 i--;
975 thresh = (i << 7);
976
977 if (thresh < 2000)
978 thresh = 2000;
979
980 if (ref_frames > 1)
981 {
982 sf->thresh_mult[THR_NEW1 ] = thresh;
983 sf->thresh_mult[THR_NEAREST1 ] = thresh >> 1;
984 sf->thresh_mult[THR_NEAR1 ] = thresh >> 1;
985 }
986
987 if (ref_frames > 2)
988 {
989 sf->thresh_mult[THR_NEW2] = thresh << 1;
990 sf->thresh_mult[THR_NEAREST2 ] = thresh;
991 sf->thresh_mult[THR_NEAR2 ] = thresh;
992 }
993
994 if (ref_frames > 3)
995 {
996 sf->thresh_mult[THR_NEW3] = thresh << 1;
997 sf->thresh_mult[THR_NEAREST3 ] = thresh;
998 sf->thresh_mult[THR_NEAR3 ] = thresh;
999 }
1000
1001 sf->improved_mv_pred = 0;
1002 }
1003
1004 if (Speed > 8)
1005 sf->quarter_pixel_search = 0;
1006
1007 if(cm->version == 0)
1008 {
1009 cm->filter_type = NORMAL_LOOPFILTER;
1010
1011 if (Speed >= 14)
1012 cm->filter_type = SIMPLE_LOOPFILTER;
1013 }
1014 else
1015 {
1016 cm->filter_type = SIMPLE_LOOPFILTER;
1017 }
1018
1019 /* This has a big hit on quality. Last resort */
1020 if (Speed >= 15)
1021 sf->half_pixel_search = 0;
1022
1023 vpx_memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1024
1025 }; /* switch */
1026
1027 /* Slow quant, dct and trellis not worthwhile for first pass
1028 * so make sure they are always turned off.
1029 */
1030 if ( cpi->pass == 1 )
1031 {
1032 sf->improved_quant = 0;
1033 sf->optimize_coefficients = 0;
1034 sf->improved_dct = 0;
1035 }
1036
1037 if (cpi->sf.search_method == NSTEP)
1038 {
1039 vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1040 }
1041 else if (cpi->sf.search_method == DIAMOND)
1042 {
1043 vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1044 }
1045
1046 if (cpi->sf.improved_dct)
1047 {
1048 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1049 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1050 }
1051 else
1052 {
1053 /* No fast FDCT defined for any platform at this time. */
1054 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1055 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1056 }
1057
1058 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1059
1060 if (cpi->sf.improved_quant)
1061 {
1062 cpi->mb.quantize_b = vp8_regular_quantize_b;
1063 cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
1064 }
1065 else
1066 {
1067 cpi->mb.quantize_b = vp8_fast_quantize_b;
1068 cpi->mb.quantize_b_pair = vp8_fast_quantize_b_pair;
1069 }
1070 if (cpi->sf.improved_quant != last_improved_quant)
1071 vp8cx_init_quantizer(cpi);
1072
1073 if (cpi->sf.iterative_sub_pixel == 1)
1074 {
1075 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1076 }
1077 else if (cpi->sf.quarter_pixel_search)
1078 {
1079 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1080 }
1081 else if (cpi->sf.half_pixel_search)
1082 {
1083 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1084 }
1085 else
1086 {
1087 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1088 }
1089
1090 if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
1091 cpi->mb.optimize = 1;
1092 else
1093 cpi->mb.optimize = 0;
1094
1095 if (cpi->common.full_pixel)
1096 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1097
1098 #ifdef SPEEDSTATS
1099 frames_at_speed[cpi->Speed]++;
1100 #endif
1101 }
1102 #undef GOOD
1103 #undef RT
1104
alloc_raw_frame_buffers(VP8_COMP * cpi)1105 static void alloc_raw_frame_buffers(VP8_COMP *cpi)
1106 {
1107 #if VP8_TEMPORAL_ALT_REF
1108 int width = (cpi->oxcf.Width + 15) & ~15;
1109 int height = (cpi->oxcf.Height + 15) & ~15;
1110 #endif
1111
1112 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1113 cpi->oxcf.lag_in_frames);
1114 if(!cpi->lookahead)
1115 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1116 "Failed to allocate lag buffers");
1117
1118 #if VP8_TEMPORAL_ALT_REF
1119
1120 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer,
1121 width, height, VP8BORDERINPIXELS))
1122 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1123 "Failed to allocate altref buffer");
1124
1125 #endif
1126 }
1127
1128
dealloc_raw_frame_buffers(VP8_COMP * cpi)1129 static void dealloc_raw_frame_buffers(VP8_COMP *cpi)
1130 {
1131 #if VP8_TEMPORAL_ALT_REF
1132 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1133 #endif
1134 vp8_lookahead_destroy(cpi->lookahead);
1135 }
1136
1137
vp8_alloc_partition_data(VP8_COMP * cpi)1138 static int vp8_alloc_partition_data(VP8_COMP *cpi)
1139 {
1140 vpx_free(cpi->mb.pip);
1141
1142 cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) *
1143 (cpi->common.mb_rows + 1),
1144 sizeof(PARTITION_INFO));
1145 if(!cpi->mb.pip)
1146 return 1;
1147
1148 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1149
1150 return 0;
1151 }
1152
vp8_alloc_compressor_data(VP8_COMP * cpi)1153 void vp8_alloc_compressor_data(VP8_COMP *cpi)
1154 {
1155 VP8_COMMON *cm = & cpi->common;
1156
1157 int width = cm->Width;
1158 int height = cm->Height;
1159
1160 if (vp8_alloc_frame_buffers(cm, width, height))
1161 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1162 "Failed to allocate frame buffers");
1163
1164 if (vp8_alloc_partition_data(cpi))
1165 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1166 "Failed to allocate partition data");
1167
1168
1169 if ((width & 0xf) != 0)
1170 width += 16 - (width & 0xf);
1171
1172 if ((height & 0xf) != 0)
1173 height += 16 - (height & 0xf);
1174
1175
1176 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame,
1177 width, height, VP8BORDERINPIXELS))
1178 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1179 "Failed to allocate last frame buffer");
1180
1181 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source,
1182 width, height, VP8BORDERINPIXELS))
1183 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1184 "Failed to allocate scaled source buffer");
1185
1186 vpx_free(cpi->tok);
1187
1188 {
1189 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1190 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1191 #else
1192 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1193 #endif
1194 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1195 }
1196
1197 /* Data used for real time vc mode to see if gf needs refreshing */
1198 cpi->zeromv_count = 0;
1199
1200
1201 /* Structures used to monitor GF usage */
1202 vpx_free(cpi->gf_active_flags);
1203 CHECK_MEM_ERROR(cpi->gf_active_flags,
1204 vpx_calloc(sizeof(*cpi->gf_active_flags),
1205 cm->mb_rows * cm->mb_cols));
1206 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1207
1208 vpx_free(cpi->mb_activity_map);
1209 CHECK_MEM_ERROR(cpi->mb_activity_map,
1210 vpx_calloc(sizeof(*cpi->mb_activity_map),
1211 cm->mb_rows * cm->mb_cols));
1212
1213 /* allocate memory for storing last frame's MVs for MV prediction. */
1214 vpx_free(cpi->lfmv);
1215 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1216 sizeof(*cpi->lfmv)));
1217 vpx_free(cpi->lf_ref_frame_sign_bias);
1218 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1219 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1220 sizeof(*cpi->lf_ref_frame_sign_bias)));
1221 vpx_free(cpi->lf_ref_frame);
1222 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1223 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1224 sizeof(*cpi->lf_ref_frame)));
1225
1226 /* Create the encoder segmentation map and set all entries to 0 */
1227 vpx_free(cpi->segmentation_map);
1228 CHECK_MEM_ERROR(cpi->segmentation_map,
1229 vpx_calloc(cm->mb_rows * cm->mb_cols,
1230 sizeof(*cpi->segmentation_map)));
1231 cpi->cyclic_refresh_mode_index = 0;
1232 vpx_free(cpi->active_map);
1233 CHECK_MEM_ERROR(cpi->active_map,
1234 vpx_calloc(cm->mb_rows * cm->mb_cols,
1235 sizeof(*cpi->active_map)));
1236 vpx_memset(cpi->active_map , 1, (cm->mb_rows * cm->mb_cols));
1237
1238 #if CONFIG_MULTITHREAD
1239 if (width < 640)
1240 cpi->mt_sync_range = 1;
1241 else if (width <= 1280)
1242 cpi->mt_sync_range = 4;
1243 else if (width <= 2560)
1244 cpi->mt_sync_range = 8;
1245 else
1246 cpi->mt_sync_range = 16;
1247
1248 if (cpi->oxcf.multi_threaded > 1)
1249 {
1250 vpx_free(cpi->mt_current_mb_col);
1251 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1252 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1253 }
1254
1255 #endif
1256
1257 vpx_free(cpi->tplist);
1258 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1259 }
1260
1261
1262 /* Quant MOD */
1263 static const int q_trans[] =
1264 {
1265 0, 1, 2, 3, 4, 5, 7, 8,
1266 9, 10, 12, 13, 15, 17, 18, 19,
1267 20, 21, 23, 24, 25, 26, 27, 28,
1268 29, 30, 31, 33, 35, 37, 39, 41,
1269 43, 45, 47, 49, 51, 53, 55, 57,
1270 59, 61, 64, 67, 70, 73, 76, 79,
1271 82, 85, 88, 91, 94, 97, 100, 103,
1272 106, 109, 112, 115, 118, 121, 124, 127,
1273 };
1274
vp8_reverse_trans(int x)1275 int vp8_reverse_trans(int x)
1276 {
1277 int i;
1278
1279 for (i = 0; i < 64; i++)
1280 if (q_trans[i] >= x)
1281 return i;
1282
1283 return 63;
1284 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1285 void vp8_new_framerate(VP8_COMP *cpi, double framerate)
1286 {
1287 if(framerate < .1)
1288 framerate = 30;
1289
1290 cpi->framerate = framerate;
1291 cpi->output_framerate = framerate;
1292 cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth /
1293 cpi->output_framerate);
1294 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1295 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1296 cpi->oxcf.two_pass_vbrmin_section / 100);
1297
1298 /* Set Maximum gf/arf interval */
1299 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1300
1301 if(cpi->max_gf_interval < 12)
1302 cpi->max_gf_interval = 12;
1303
1304 /* Extended interval for genuinely static scenes */
1305 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1306
1307 /* Special conditions when altr ref frame enabled in lagged compress mode */
1308 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames)
1309 {
1310 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1311 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1312
1313 if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1314 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1315 }
1316
1317 if ( cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval )
1318 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1319 }
1320
1321
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1322 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1323 {
1324 VP8_COMMON *cm = &cpi->common;
1325
1326 cpi->oxcf = *oxcf;
1327
1328 cpi->auto_gold = 1;
1329 cpi->auto_adjust_gold_quantizer = 1;
1330
1331 cm->version = oxcf->Version;
1332 vp8_setup_version(cm);
1333
1334 /* frame rate is not available on the first frame, as it's derived from
1335 * the observed timestamps. The actual value used here doesn't matter
1336 * too much, as it will adapt quickly. If the reciprocal of the timebase
1337 * seems like a reasonable framerate, then use that as a guess, otherwise
1338 * use 30.
1339 */
1340 cpi->framerate = (double)(oxcf->timebase.den) /
1341 (double)(oxcf->timebase.num);
1342
1343 if (cpi->framerate > 180)
1344 cpi->framerate = 30;
1345
1346 cpi->ref_framerate = cpi->framerate;
1347
1348 /* change includes all joint functionality */
1349 vp8_change_config(cpi, oxcf);
1350
1351 /* Initialize active best and worst q and average q values. */
1352 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1353 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1354 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1355
1356 /* Initialise the starting buffer levels */
1357 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1358 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1359
1360 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1361 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1362 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1363 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1364
1365 cpi->total_actual_bits = 0;
1366 cpi->total_target_vs_actual = 0;
1367
1368 /* Temporal scalabilty */
1369 if (cpi->oxcf.number_of_layers > 1)
1370 {
1371 unsigned int i;
1372 double prev_layer_framerate=0;
1373
1374 for (i=0; i<cpi->oxcf.number_of_layers; i++)
1375 {
1376 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1377 prev_layer_framerate = cpi->output_framerate /
1378 cpi->oxcf.rate_decimator[i];
1379 }
1380 }
1381
1382 #if VP8_TEMPORAL_ALT_REF
1383 {
1384 int i;
1385
1386 cpi->fixed_divide[0] = 0;
1387
1388 for (i = 1; i < 512; i++)
1389 cpi->fixed_divide[i] = 0x80000 / i;
1390 }
1391 #endif
1392 }
1393
update_layer_contexts(VP8_COMP * cpi)1394 static void update_layer_contexts (VP8_COMP *cpi)
1395 {
1396 VP8_CONFIG *oxcf = &cpi->oxcf;
1397
1398 /* Update snapshots of the layer contexts to reflect new parameters */
1399 if (oxcf->number_of_layers > 1)
1400 {
1401 unsigned int i;
1402 double prev_layer_framerate=0;
1403
1404 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1405 for (i=0; i<oxcf->number_of_layers; i++)
1406 {
1407 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1408
1409 lc->framerate =
1410 cpi->ref_framerate / oxcf->rate_decimator[i];
1411 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1412
1413 lc->starting_buffer_level = rescale(
1414 (int)oxcf->starting_buffer_level_in_ms,
1415 lc->target_bandwidth, 1000);
1416
1417 if (oxcf->optimal_buffer_level == 0)
1418 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1419 else
1420 lc->optimal_buffer_level = rescale(
1421 (int)oxcf->optimal_buffer_level_in_ms,
1422 lc->target_bandwidth, 1000);
1423
1424 if (oxcf->maximum_buffer_size == 0)
1425 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1426 else
1427 lc->maximum_buffer_size = rescale(
1428 (int)oxcf->maximum_buffer_size_in_ms,
1429 lc->target_bandwidth, 1000);
1430
1431 /* Work out the average size of a frame within this layer */
1432 if (i > 0)
1433 lc->avg_frame_size_for_layer =
1434 (int)((oxcf->target_bitrate[i] -
1435 oxcf->target_bitrate[i-1]) * 1000 /
1436 (lc->framerate - prev_layer_framerate));
1437
1438 prev_layer_framerate = lc->framerate;
1439 }
1440 }
1441 }
1442
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1443 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1444 {
1445 VP8_COMMON *cm = &cpi->common;
1446 int last_w, last_h;
1447 uint32_t prev_number_of_layers;
1448
1449 if (!cpi)
1450 return;
1451
1452 if (!oxcf)
1453 return;
1454
1455 #if CONFIG_MULTITHREAD
1456 /* wait for the last picture loopfilter thread done */
1457 if (cpi->b_lpf_running)
1458 {
1459 sem_wait(&cpi->h_event_end_lpf);
1460 cpi->b_lpf_running = 0;
1461 }
1462 #endif
1463
1464 if (cm->version != oxcf->Version)
1465 {
1466 cm->version = oxcf->Version;
1467 vp8_setup_version(cm);
1468 }
1469
1470 last_w = cpi->oxcf.Width;
1471 last_h = cpi->oxcf.Height;
1472 prev_number_of_layers = cpi->oxcf.number_of_layers;
1473
1474 cpi->oxcf = *oxcf;
1475
1476 switch (cpi->oxcf.Mode)
1477 {
1478
1479 case MODE_REALTIME:
1480 cpi->pass = 0;
1481 cpi->compressor_speed = 2;
1482
1483 if (cpi->oxcf.cpu_used < -16)
1484 {
1485 cpi->oxcf.cpu_used = -16;
1486 }
1487
1488 if (cpi->oxcf.cpu_used > 16)
1489 cpi->oxcf.cpu_used = 16;
1490
1491 break;
1492
1493 case MODE_GOODQUALITY:
1494 cpi->pass = 0;
1495 cpi->compressor_speed = 1;
1496
1497 if (cpi->oxcf.cpu_used < -5)
1498 {
1499 cpi->oxcf.cpu_used = -5;
1500 }
1501
1502 if (cpi->oxcf.cpu_used > 5)
1503 cpi->oxcf.cpu_used = 5;
1504
1505 break;
1506
1507 case MODE_BESTQUALITY:
1508 cpi->pass = 0;
1509 cpi->compressor_speed = 0;
1510 break;
1511
1512 case MODE_FIRSTPASS:
1513 cpi->pass = 1;
1514 cpi->compressor_speed = 1;
1515 break;
1516 case MODE_SECONDPASS:
1517 cpi->pass = 2;
1518 cpi->compressor_speed = 1;
1519
1520 if (cpi->oxcf.cpu_used < -5)
1521 {
1522 cpi->oxcf.cpu_used = -5;
1523 }
1524
1525 if (cpi->oxcf.cpu_used > 5)
1526 cpi->oxcf.cpu_used = 5;
1527
1528 break;
1529 case MODE_SECONDPASS_BEST:
1530 cpi->pass = 2;
1531 cpi->compressor_speed = 0;
1532 break;
1533 }
1534
1535 if (cpi->pass == 0)
1536 cpi->auto_worst_q = 1;
1537
1538 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1539 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1540 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1541
1542 if (oxcf->fixed_q >= 0)
1543 {
1544 if (oxcf->worst_allowed_q < 0)
1545 cpi->oxcf.fixed_q = q_trans[0];
1546 else
1547 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1548
1549 if (oxcf->alt_q < 0)
1550 cpi->oxcf.alt_q = q_trans[0];
1551 else
1552 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1553
1554 if (oxcf->key_q < 0)
1555 cpi->oxcf.key_q = q_trans[0];
1556 else
1557 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1558
1559 if (oxcf->gold_q < 0)
1560 cpi->oxcf.gold_q = q_trans[0];
1561 else
1562 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1563
1564 }
1565
1566 cpi->baseline_gf_interval =
1567 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1568
1569 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1570
1571 cm->refresh_golden_frame = 0;
1572 cm->refresh_last_frame = 1;
1573 cm->refresh_entropy_probs = 1;
1574
1575 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1576 cpi->oxcf.token_partitions = 3;
1577 #endif
1578
1579 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3)
1580 cm->multi_token_partition =
1581 (TOKEN_PARTITION) cpi->oxcf.token_partitions;
1582
1583 setup_features(cpi);
1584
1585 {
1586 int i;
1587
1588 for (i = 0; i < MAX_MB_SEGMENTS; i++)
1589 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1590 }
1591
1592 /* At the moment the first order values may not be > MAXQ */
1593 if (cpi->oxcf.fixed_q > MAXQ)
1594 cpi->oxcf.fixed_q = MAXQ;
1595
1596 /* local file playback mode == really big buffer */
1597 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK)
1598 {
1599 cpi->oxcf.starting_buffer_level = 60000;
1600 cpi->oxcf.optimal_buffer_level = 60000;
1601 cpi->oxcf.maximum_buffer_size = 240000;
1602 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1603 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1604 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1605 }
1606
1607 /* Convert target bandwidth from Kbit/s to Bit/s */
1608 cpi->oxcf.target_bandwidth *= 1000;
1609
1610 cpi->oxcf.starting_buffer_level =
1611 rescale((int)cpi->oxcf.starting_buffer_level,
1612 cpi->oxcf.target_bandwidth, 1000);
1613
1614 /* Set or reset optimal and maximum buffer levels. */
1615 if (cpi->oxcf.optimal_buffer_level == 0)
1616 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1617 else
1618 cpi->oxcf.optimal_buffer_level =
1619 rescale((int)cpi->oxcf.optimal_buffer_level,
1620 cpi->oxcf.target_bandwidth, 1000);
1621
1622 if (cpi->oxcf.maximum_buffer_size == 0)
1623 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1624 else
1625 cpi->oxcf.maximum_buffer_size =
1626 rescale((int)cpi->oxcf.maximum_buffer_size,
1627 cpi->oxcf.target_bandwidth, 1000);
1628 // Under a configuration change, where maximum_buffer_size may change,
1629 // keep buffer level clipped to the maximum allowed buffer size.
1630 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1631 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1632 cpi->buffer_level = cpi->bits_off_target;
1633 }
1634
1635 /* Set up frame rate and related parameters rate control values. */
1636 vp8_new_framerate(cpi, cpi->framerate);
1637
1638 /* Set absolute upper and lower quality limits */
1639 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1640 cpi->best_quality = cpi->oxcf.best_allowed_q;
1641
1642 /* active values should only be modified if out of new range */
1643 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q)
1644 {
1645 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1646 }
1647 /* less likely */
1648 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q)
1649 {
1650 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1651 }
1652 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q)
1653 {
1654 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1655 }
1656 /* less likely */
1657 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q)
1658 {
1659 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1660 }
1661
1662 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1663
1664 cpi->cq_target_quality = cpi->oxcf.cq_level;
1665
1666 /* Only allow dropped frames in buffered mode */
1667 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1668
1669 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1670
1671 // Check if the number of temporal layers has changed, and if so reset the
1672 // pattern counter and set/initialize the temporal layer context for the
1673 // new layer configuration.
1674 if (cpi->oxcf.number_of_layers != prev_number_of_layers)
1675 {
1676 // If the number of temporal layers are changed we must start at the
1677 // base of the pattern cycle, so reset temporal_pattern_counter.
1678 cpi->temporal_pattern_counter = 0;
1679 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1680 }
1681
1682 cm->Width = cpi->oxcf.Width;
1683 cm->Height = cpi->oxcf.Height;
1684
1685 /* TODO(jkoleszar): if an internal spatial resampling is active,
1686 * and we downsize the input image, maybe we should clear the
1687 * internal scale immediately rather than waiting for it to
1688 * correct.
1689 */
1690
1691 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1692 if (cpi->oxcf.Sharpness > 7)
1693 cpi->oxcf.Sharpness = 7;
1694
1695 cm->sharpness_level = cpi->oxcf.Sharpness;
1696
1697 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL)
1698 {
1699 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
1700 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
1701
1702 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1703 Scale2Ratio(cm->vert_scale, &vr, &vs);
1704
1705 /* always go to the next whole number */
1706 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1707 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1708 }
1709
1710 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height)
1711 cpi->force_next_frame_intra = 1;
1712
1713 if (((cm->Width + 15) & 0xfffffff0) !=
1714 (uint32_t)(cm->yv12_fb[cm->lst_fb_idx].y_width) ||
1715 ((cm->Height + 15) & 0xfffffff0) !=
1716 (uint32_t)(cm->yv12_fb[cm->lst_fb_idx].y_height) ||
1717 cm->yv12_fb[cm->lst_fb_idx].y_width == 0)
1718 {
1719 dealloc_raw_frame_buffers(cpi);
1720 alloc_raw_frame_buffers(cpi);
1721 vp8_alloc_compressor_data(cpi);
1722 }
1723
1724 if (cpi->oxcf.fixed_q >= 0)
1725 {
1726 cpi->last_q[0] = cpi->oxcf.fixed_q;
1727 cpi->last_q[1] = cpi->oxcf.fixed_q;
1728 }
1729
1730 cpi->Speed = cpi->oxcf.cpu_used;
1731
1732 /* force to allowlag to 0 if lag_in_frames is 0; */
1733 if (cpi->oxcf.lag_in_frames == 0)
1734 {
1735 cpi->oxcf.allow_lag = 0;
1736 }
1737 /* Limit on lag buffers as these are not currently dynamically allocated */
1738 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS)
1739 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1740
1741 /* YX Temp */
1742 cpi->alt_ref_source = NULL;
1743 cpi->is_src_frame_alt_ref = 0;
1744
1745 #if CONFIG_TEMPORAL_DENOISING
1746 if (cpi->oxcf.noise_sensitivity)
1747 {
1748 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc)
1749 {
1750 int width = (cpi->oxcf.Width + 15) & ~15;
1751 int height = (cpi->oxcf.Height + 15) & ~15;
1752 vp8_denoiser_allocate(&cpi->denoiser, width, height);
1753 }
1754 }
1755 #endif
1756
1757 #if 0
1758 /* Experimental RD Code */
1759 cpi->frame_distortion = 0;
1760 cpi->last_frame_distortion = 0;
1761 #endif
1762
1763 }
1764
1765 #define M_LOG2_E 0.693147180559945309417
1766 #define log2f(x) (log (x) / (float) M_LOG2_E)
cal_mvsadcosts(int * mvsadcost[2])1767 static void cal_mvsadcosts(int *mvsadcost[2])
1768 {
1769 int i = 1;
1770
1771 mvsadcost [0] [0] = 300;
1772 mvsadcost [1] [0] = 300;
1773
1774 do
1775 {
1776 double z = 256 * (2 * (log2f(8 * i) + .6));
1777 mvsadcost [0][i] = (int) z;
1778 mvsadcost [1][i] = (int) z;
1779 mvsadcost [0][-i] = (int) z;
1780 mvsadcost [1][-i] = (int) z;
1781 }
1782 while (++i <= mvfp_max);
1783 }
1784
vp8_create_compressor(VP8_CONFIG * oxcf)1785 struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
1786 {
1787 int i;
1788
1789 VP8_COMP *cpi;
1790 VP8_COMMON *cm;
1791
1792 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1793 /* Check that the CPI instance is valid */
1794 if (!cpi)
1795 return 0;
1796
1797 cm = &cpi->common;
1798
1799 vpx_memset(cpi, 0, sizeof(VP8_COMP));
1800
1801 if (setjmp(cm->error.jmp))
1802 {
1803 cpi->common.error.setjmp = 0;
1804 vp8_remove_compressor(&cpi);
1805 return 0;
1806 }
1807
1808 cpi->common.error.setjmp = 1;
1809
1810 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
1811
1812 vp8_create_common(&cpi->common);
1813
1814 init_config(cpi, oxcf);
1815
1816 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
1817 cpi->common.current_video_frame = 0;
1818 cpi->temporal_pattern_counter = 0;
1819 cpi->kf_overspend_bits = 0;
1820 cpi->kf_bitrate_adjustment = 0;
1821 cpi->frames_till_gf_update_due = 0;
1822 cpi->gf_overspend_bits = 0;
1823 cpi->non_gf_bitrate_adjustment = 0;
1824 cpi->prob_last_coded = 128;
1825 cpi->prob_gf_coded = 128;
1826 cpi->prob_intra_coded = 63;
1827
1828 /* Prime the recent reference frame usage counters.
1829 * Hereafter they will be maintained as a sort of moving average
1830 */
1831 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1832 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1833 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1834 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1835
1836 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1837 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1838
1839 cpi->twopass.gf_decay_rate = 0;
1840 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1841
1842 cpi->gold_is_last = 0 ;
1843 cpi->alt_is_last = 0 ;
1844 cpi->gold_is_alt = 0 ;
1845
1846 cpi->active_map_enabled = 0;
1847
1848 #if 0
1849 /* Experimental code for lagged and one pass */
1850 /* Initialise one_pass GF frames stats */
1851 /* Update stats used for GF selection */
1852 if (cpi->pass == 0)
1853 {
1854 cpi->one_pass_frame_index = 0;
1855
1856 for (i = 0; i < MAX_LAG_BUFFERS; i++)
1857 {
1858 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1859 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1860 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1861 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1862 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1863 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1864 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1865 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1866 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1867 }
1868 }
1869 #endif
1870
1871 /* Should we use the cyclic refresh method.
1872 * Currently this is tied to error resilliant mode
1873 */
1874 cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode;
1875 cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 5;
1876 cpi->cyclic_refresh_mode_index = 0;
1877 cpi->cyclic_refresh_q = 32;
1878
1879 if (cpi->cyclic_refresh_mode_enabled)
1880 {
1881 CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1882 }
1883 else
1884 cpi->cyclic_refresh_map = (signed char *) NULL;
1885
1886 #ifdef VP8_ENTROPY_STATS
1887 init_context_counters();
1888 #endif
1889
1890 /*Initialize the feed-forward activity masking.*/
1891 cpi->activity_avg = 90<<12;
1892
1893 /* Give a sensible default for the first frame. */
1894 cpi->frames_since_key = 8;
1895 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1896 cpi->this_key_frame_forced = 0;
1897 cpi->next_key_frame_forced = 0;
1898
1899 cpi->source_alt_ref_pending = 0;
1900 cpi->source_alt_ref_active = 0;
1901 cpi->common.refresh_alt_ref_frame = 0;
1902
1903 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1904 #if CONFIG_INTERNAL_STATS
1905 cpi->b_calculate_ssimg = 0;
1906
1907 cpi->count = 0;
1908 cpi->bytes = 0;
1909
1910 if (cpi->b_calculate_psnr)
1911 {
1912 cpi->total_sq_error = 0.0;
1913 cpi->total_sq_error2 = 0.0;
1914 cpi->total_y = 0.0;
1915 cpi->total_u = 0.0;
1916 cpi->total_v = 0.0;
1917 cpi->total = 0.0;
1918 cpi->totalp_y = 0.0;
1919 cpi->totalp_u = 0.0;
1920 cpi->totalp_v = 0.0;
1921 cpi->totalp = 0.0;
1922 cpi->tot_recode_hits = 0;
1923 cpi->summed_quality = 0;
1924 cpi->summed_weights = 0;
1925 }
1926
1927 if (cpi->b_calculate_ssimg)
1928 {
1929 cpi->total_ssimg_y = 0;
1930 cpi->total_ssimg_u = 0;
1931 cpi->total_ssimg_v = 0;
1932 cpi->total_ssimg_all = 0;
1933 }
1934
1935 #endif
1936
1937 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1938
1939 cpi->frames_till_gf_update_due = 0;
1940 cpi->key_frame_count = 1;
1941
1942 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1943 cpi->ni_tot_qi = 0;
1944 cpi->ni_frames = 0;
1945 cpi->total_byte_count = 0;
1946
1947 cpi->drop_frame = 0;
1948
1949 cpi->rate_correction_factor = 1.0;
1950 cpi->key_frame_rate_correction_factor = 1.0;
1951 cpi->gf_rate_correction_factor = 1.0;
1952 cpi->twopass.est_max_qcorrection_factor = 1.0;
1953
1954 for (i = 0; i < KEY_FRAME_CONTEXT; i++)
1955 {
1956 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
1957 }
1958
1959 #ifdef OUTPUT_YUV_SRC
1960 yuv_file = fopen("bd.yuv", "ab");
1961 #endif
1962
1963 #if 0
1964 framepsnr = fopen("framepsnr.stt", "a");
1965 kf_list = fopen("kf_list.stt", "w");
1966 #endif
1967
1968 cpi->output_pkt_list = oxcf->output_pkt_list;
1969
1970 #if !(CONFIG_REALTIME_ONLY)
1971
1972 if (cpi->pass == 1)
1973 {
1974 vp8_init_first_pass(cpi);
1975 }
1976 else if (cpi->pass == 2)
1977 {
1978 size_t packet_sz = sizeof(FIRSTPASS_STATS);
1979 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
1980
1981 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
1982 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
1983 cpi->twopass.stats_in_end = (void*)((char *)cpi->twopass.stats_in
1984 + (packets - 1) * packet_sz);
1985 vp8_init_second_pass(cpi);
1986 }
1987
1988 #endif
1989
1990 if (cpi->compressor_speed == 2)
1991 {
1992 cpi->avg_encode_time = 0;
1993 cpi->avg_pick_mode_time = 0;
1994 }
1995
1996 vp8_set_speed_features(cpi);
1997
1998 /* Set starting values of RD threshold multipliers (128 = *1) */
1999 for (i = 0; i < MAX_MODES; i++)
2000 {
2001 cpi->mb.rd_thresh_mult[i] = 128;
2002 }
2003
2004 #ifdef VP8_ENTROPY_STATS
2005 init_mv_ref_counts();
2006 #endif
2007
2008 #if CONFIG_MULTITHREAD
2009 if(vp8cx_create_encoder_threads(cpi))
2010 {
2011 vp8_remove_compressor(&cpi);
2012 return 0;
2013 }
2014 #endif
2015
2016 cpi->fn_ptr[BLOCK_16X16].sdf = vp8_sad16x16;
2017 cpi->fn_ptr[BLOCK_16X16].vf = vp8_variance16x16;
2018 cpi->fn_ptr[BLOCK_16X16].svf = vp8_sub_pixel_variance16x16;
2019 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = vp8_variance_halfpixvar16x16_h;
2020 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = vp8_variance_halfpixvar16x16_v;
2021 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vp8_variance_halfpixvar16x16_hv;
2022 cpi->fn_ptr[BLOCK_16X16].sdx3f = vp8_sad16x16x3;
2023 cpi->fn_ptr[BLOCK_16X16].sdx8f = vp8_sad16x16x8;
2024 cpi->fn_ptr[BLOCK_16X16].sdx4df = vp8_sad16x16x4d;
2025
2026 cpi->fn_ptr[BLOCK_16X8].sdf = vp8_sad16x8;
2027 cpi->fn_ptr[BLOCK_16X8].vf = vp8_variance16x8;
2028 cpi->fn_ptr[BLOCK_16X8].svf = vp8_sub_pixel_variance16x8;
2029 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
2030 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
2031 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
2032 cpi->fn_ptr[BLOCK_16X8].sdx3f = vp8_sad16x8x3;
2033 cpi->fn_ptr[BLOCK_16X8].sdx8f = vp8_sad16x8x8;
2034 cpi->fn_ptr[BLOCK_16X8].sdx4df = vp8_sad16x8x4d;
2035
2036 cpi->fn_ptr[BLOCK_8X16].sdf = vp8_sad8x16;
2037 cpi->fn_ptr[BLOCK_8X16].vf = vp8_variance8x16;
2038 cpi->fn_ptr[BLOCK_8X16].svf = vp8_sub_pixel_variance8x16;
2039 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
2040 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
2041 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
2042 cpi->fn_ptr[BLOCK_8X16].sdx3f = vp8_sad8x16x3;
2043 cpi->fn_ptr[BLOCK_8X16].sdx8f = vp8_sad8x16x8;
2044 cpi->fn_ptr[BLOCK_8X16].sdx4df = vp8_sad8x16x4d;
2045
2046 cpi->fn_ptr[BLOCK_8X8].sdf = vp8_sad8x8;
2047 cpi->fn_ptr[BLOCK_8X8].vf = vp8_variance8x8;
2048 cpi->fn_ptr[BLOCK_8X8].svf = vp8_sub_pixel_variance8x8;
2049 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
2050 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
2051 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
2052 cpi->fn_ptr[BLOCK_8X8].sdx3f = vp8_sad8x8x3;
2053 cpi->fn_ptr[BLOCK_8X8].sdx8f = vp8_sad8x8x8;
2054 cpi->fn_ptr[BLOCK_8X8].sdx4df = vp8_sad8x8x4d;
2055
2056 cpi->fn_ptr[BLOCK_4X4].sdf = vp8_sad4x4;
2057 cpi->fn_ptr[BLOCK_4X4].vf = vp8_variance4x4;
2058 cpi->fn_ptr[BLOCK_4X4].svf = vp8_sub_pixel_variance4x4;
2059 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
2060 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
2061 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
2062 cpi->fn_ptr[BLOCK_4X4].sdx3f = vp8_sad4x4x3;
2063 cpi->fn_ptr[BLOCK_4X4].sdx8f = vp8_sad4x4x8;
2064 cpi->fn_ptr[BLOCK_4X4].sdx4df = vp8_sad4x4x4d;
2065
2066 #if ARCH_X86_32 || ARCH_X86_64
2067 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2068 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2069 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2070 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2071 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2072 #endif
2073
2074 cpi->full_search_sad = vp8_full_search_sad;
2075 cpi->diamond_search_sad = vp8_diamond_search_sad;
2076 cpi->refining_search_sad = vp8_refining_search_sad;
2077
2078 /* make sure frame 1 is okay */
2079 cpi->mb.error_bins[0] = cpi->common.MBs;
2080
2081 /* vp8cx_init_quantizer() is first called here. Add check in
2082 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2083 * called later when needed. This will avoid unnecessary calls of
2084 * vp8cx_init_quantizer() for every frame.
2085 */
2086 vp8cx_init_quantizer(cpi);
2087
2088 vp8_loop_filter_init(cm);
2089
2090 cpi->common.error.setjmp = 0;
2091
2092 #if CONFIG_MULTI_RES_ENCODING
2093
2094 /* Calculate # of MBs in a row in lower-resolution level image. */
2095 if (cpi->oxcf.mr_encoder_id > 0)
2096 vp8_cal_low_res_mb_cols(cpi);
2097
2098 #endif
2099
2100 /* setup RD costs to MACROBLOCK struct */
2101
2102 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max+1];
2103 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max+1];
2104 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max+1];
2105 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max+1];
2106
2107 cal_mvsadcosts(cpi->mb.mvsadcost);
2108
2109 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2110 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2111 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2112 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2113 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2114
2115 /* setup block ptrs & offsets */
2116 vp8_setup_block_ptrs(&cpi->mb);
2117 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2118
2119 return cpi;
2120 }
2121
2122
vp8_remove_compressor(VP8_COMP ** ptr)2123 void vp8_remove_compressor(VP8_COMP **ptr)
2124 {
2125 VP8_COMP *cpi = *ptr;
2126
2127 if (!cpi)
2128 return;
2129
2130 if (cpi && (cpi->common.current_video_frame > 0))
2131 {
2132 #if !(CONFIG_REALTIME_ONLY)
2133
2134 if (cpi->pass == 2)
2135 {
2136 vp8_end_second_pass(cpi);
2137 }
2138
2139 #endif
2140
2141 #ifdef VP8_ENTROPY_STATS
2142 print_context_counters();
2143 print_tree_update_probs();
2144 print_mode_context();
2145 #endif
2146
2147 #if CONFIG_INTERNAL_STATS
2148
2149 if (cpi->pass != 1)
2150 {
2151 FILE *f = fopen("opsnr.stt", "a");
2152 double time_encoded = (cpi->last_end_time_stamp_seen
2153 - cpi->first_time_stamp_ever) / 10000000.000;
2154 double total_encode_time = (cpi->time_receive_data +
2155 cpi->time_compress_data) / 1000.000;
2156 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2157
2158 if (cpi->b_calculate_psnr)
2159 {
2160 YV12_BUFFER_CONFIG *lst_yv12 =
2161 &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
2162
2163 if (cpi->oxcf.number_of_layers > 1)
2164 {
2165 int i;
2166
2167 fprintf(f, "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2168 "GLPsnrP\tVPXSSIM\t\n");
2169 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2170 {
2171 double dr = (double)cpi->bytes_in_layer[i] *
2172 8.0 / 1000.0 / time_encoded;
2173 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2174 lst_yv12->y_width * lst_yv12->y_height;
2175 double total_psnr =
2176 vpx_sse_to_psnr(samples, 255.0,
2177 cpi->total_error2[i]);
2178 double total_psnr2 =
2179 vpx_sse_to_psnr(samples, 255.0,
2180 cpi->total_error2_p[i]);
2181 double total_ssim = 100 * pow(cpi->sum_ssim[i] /
2182 cpi->sum_weights[i], 8.0);
2183
2184 fprintf(f, "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2185 "%7.3f\t%7.3f\n",
2186 i, dr,
2187 cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2188 total_psnr,
2189 cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2190 total_psnr2, total_ssim);
2191 }
2192 }
2193 else
2194 {
2195 double samples = 3.0 / 2 * cpi->count *
2196 lst_yv12->y_width * lst_yv12->y_height;
2197 double total_psnr = vpx_sse_to_psnr(samples, 255.0,
2198 cpi->total_sq_error);
2199 double total_psnr2 = vpx_sse_to_psnr(samples, 255.0,
2200 cpi->total_sq_error2);
2201 double total_ssim = 100 * pow(cpi->summed_quality /
2202 cpi->summed_weights, 8.0);
2203
2204 fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2205 "GLPsnrP\tVPXSSIM\t Time(us)\n");
2206 fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2207 "%7.3f\t%8.0f\n",
2208 dr, cpi->total / cpi->count, total_psnr,
2209 cpi->totalp / cpi->count, total_psnr2,
2210 total_ssim, total_encode_time);
2211 }
2212 }
2213
2214 if (cpi->b_calculate_ssimg)
2215 {
2216 if (cpi->oxcf.number_of_layers > 1)
2217 {
2218 int i;
2219
2220 fprintf(f, "Layer\tBitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2221 "Time(us)\n");
2222 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2223 {
2224 double dr = (double)cpi->bytes_in_layer[i] *
2225 8.0 / 1000.0 / time_encoded;
2226 fprintf(f, "%5d\t%7.3f\t%6.4f\t"
2227 "%6.4f\t%6.4f\t%6.4f\t%8.0f\n",
2228 i, dr,
2229 cpi->total_ssimg_y_in_layer[i] /
2230 cpi->frames_in_layer[i],
2231 cpi->total_ssimg_u_in_layer[i] /
2232 cpi->frames_in_layer[i],
2233 cpi->total_ssimg_v_in_layer[i] /
2234 cpi->frames_in_layer[i],
2235 cpi->total_ssimg_all_in_layer[i] /
2236 cpi->frames_in_layer[i],
2237 total_encode_time);
2238 }
2239 }
2240 else
2241 {
2242 fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2243 "Time(us)\n");
2244 fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
2245 cpi->total_ssimg_y / cpi->count,
2246 cpi->total_ssimg_u / cpi->count,
2247 cpi->total_ssimg_v / cpi->count,
2248 cpi->total_ssimg_all / cpi->count, total_encode_time);
2249 }
2250 }
2251
2252 fclose(f);
2253 #if 0
2254 f = fopen("qskip.stt", "a");
2255 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2256 fclose(f);
2257 #endif
2258
2259 }
2260
2261 #endif
2262
2263
2264 #ifdef SPEEDSTATS
2265
2266 if (cpi->compressor_speed == 2)
2267 {
2268 int i;
2269 FILE *f = fopen("cxspeed.stt", "a");
2270 cnt_pm /= cpi->common.MBs;
2271
2272 for (i = 0; i < 16; i++)
2273 fprintf(f, "%5d", frames_at_speed[i]);
2274
2275 fprintf(f, "\n");
2276 fclose(f);
2277 }
2278
2279 #endif
2280
2281
2282 #ifdef MODE_STATS
2283 {
2284 extern int count_mb_seg[4];
2285 FILE *f = fopen("modes.stt", "a");
2286 double dr = (double)cpi->framerate * (double)bytes * (double)8 / (double)count / (double)1000 ;
2287 fprintf(f, "intra_mode in Intra Frames:\n");
2288 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]);
2289 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]);
2290 fprintf(f, "B: ");
2291 {
2292 int i;
2293
2294 for (i = 0; i < 10; i++)
2295 fprintf(f, "%8d, ", b_modes[i]);
2296
2297 fprintf(f, "\n");
2298
2299 }
2300
2301 fprintf(f, "Modes in Inter Frames:\n");
2302 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2303 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], inter_y_modes[3], inter_y_modes[4],
2304 inter_y_modes[5], inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], inter_y_modes[9]);
2305 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2306 fprintf(f, "B: ");
2307 {
2308 int i;
2309
2310 for (i = 0; i < 15; i++)
2311 fprintf(f, "%8d, ", inter_b_modes[i]);
2312
2313 fprintf(f, "\n");
2314
2315 }
2316 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]);
2317 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]);
2318
2319
2320
2321 fclose(f);
2322 }
2323 #endif
2324
2325 #ifdef VP8_ENTROPY_STATS
2326 {
2327 int i, j, k;
2328 FILE *fmode = fopen("modecontext.c", "w");
2329
2330 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2331 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2332 fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2333
2334 for (i = 0; i < 10; i++)
2335 {
2336
2337 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2338
2339 for (j = 0; j < 10; j++)
2340 {
2341
2342 fprintf(fmode, " {");
2343
2344 for (k = 0; k < 10; k++)
2345 {
2346 if (!intra_mode_stats[i][j][k])
2347 fprintf(fmode, " %5d, ", 1);
2348 else
2349 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2350 }
2351
2352 fprintf(fmode, "}, /* left_mode %d */\n", j);
2353
2354 }
2355
2356 fprintf(fmode, " },\n");
2357
2358 }
2359
2360 fprintf(fmode, "};\n");
2361 fclose(fmode);
2362 }
2363 #endif
2364
2365
2366 #if defined(SECTIONBITS_OUTPUT)
2367
2368 if (0)
2369 {
2370 int i;
2371 FILE *f = fopen("tokenbits.stt", "a");
2372
2373 for (i = 0; i < 28; i++)
2374 fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2375
2376 fprintf(f, "\n");
2377 fclose(f);
2378 }
2379
2380 #endif
2381
2382 #if 0
2383 {
2384 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2385 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2386 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2387 }
2388 #endif
2389
2390 }
2391
2392 #if CONFIG_MULTITHREAD
2393 vp8cx_remove_encoder_threads(cpi);
2394 #endif
2395
2396 #if CONFIG_TEMPORAL_DENOISING
2397 vp8_denoiser_free(&cpi->denoiser);
2398 #endif
2399 dealloc_compressor_data(cpi);
2400 vpx_free(cpi->mb.ss);
2401 vpx_free(cpi->tok);
2402 vpx_free(cpi->cyclic_refresh_map);
2403
2404 vp8_remove_common(&cpi->common);
2405 vpx_free(cpi);
2406 *ptr = 0;
2407
2408 #ifdef OUTPUT_YUV_SRC
2409 fclose(yuv_file);
2410 #endif
2411
2412 #if 0
2413
2414 if (keyfile)
2415 fclose(keyfile);
2416
2417 if (framepsnr)
2418 fclose(framepsnr);
2419
2420 if (kf_list)
2421 fclose(kf_list);
2422
2423 #endif
2424
2425 }
2426
2427
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2428 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2429 unsigned char *recon, int recon_stride,
2430 unsigned int cols, unsigned int rows)
2431 {
2432 unsigned int row, col;
2433 uint64_t total_sse = 0;
2434 int diff;
2435
2436 for (row = 0; row + 16 <= rows; row += 16)
2437 {
2438 for (col = 0; col + 16 <= cols; col += 16)
2439 {
2440 unsigned int sse;
2441
2442 vp8_mse16x16(orig + col, orig_stride,
2443 recon + col, recon_stride,
2444 &sse);
2445 total_sse += sse;
2446 }
2447
2448 /* Handle odd-sized width */
2449 if (col < cols)
2450 {
2451 unsigned int border_row, border_col;
2452 unsigned char *border_orig = orig;
2453 unsigned char *border_recon = recon;
2454
2455 for (border_row = 0; border_row < 16; border_row++)
2456 {
2457 for (border_col = col; border_col < cols; border_col++)
2458 {
2459 diff = border_orig[border_col] - border_recon[border_col];
2460 total_sse += diff * diff;
2461 }
2462
2463 border_orig += orig_stride;
2464 border_recon += recon_stride;
2465 }
2466 }
2467
2468 orig += orig_stride * 16;
2469 recon += recon_stride * 16;
2470 }
2471
2472 /* Handle odd-sized height */
2473 for (; row < rows; row++)
2474 {
2475 for (col = 0; col < cols; col++)
2476 {
2477 diff = orig[col] - recon[col];
2478 total_sse += diff * diff;
2479 }
2480
2481 orig += orig_stride;
2482 recon += recon_stride;
2483 }
2484
2485 vp8_clear_system_state();
2486 return total_sse;
2487 }
2488
2489
generate_psnr_packet(VP8_COMP * cpi)2490 static void generate_psnr_packet(VP8_COMP *cpi)
2491 {
2492 YV12_BUFFER_CONFIG *orig = cpi->Source;
2493 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2494 struct vpx_codec_cx_pkt pkt;
2495 uint64_t sse;
2496 int i;
2497 unsigned int width = cpi->common.Width;
2498 unsigned int height = cpi->common.Height;
2499
2500 pkt.kind = VPX_CODEC_PSNR_PKT;
2501 sse = calc_plane_error(orig->y_buffer, orig->y_stride,
2502 recon->y_buffer, recon->y_stride,
2503 width, height);
2504 pkt.data.psnr.sse[0] = sse;
2505 pkt.data.psnr.sse[1] = sse;
2506 pkt.data.psnr.samples[0] = width * height;
2507 pkt.data.psnr.samples[1] = width * height;
2508
2509 width = (width + 1) / 2;
2510 height = (height + 1) / 2;
2511
2512 sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
2513 recon->u_buffer, recon->uv_stride,
2514 width, height);
2515 pkt.data.psnr.sse[0] += sse;
2516 pkt.data.psnr.sse[2] = sse;
2517 pkt.data.psnr.samples[0] += width * height;
2518 pkt.data.psnr.samples[2] = width * height;
2519
2520 sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
2521 recon->v_buffer, recon->uv_stride,
2522 width, height);
2523 pkt.data.psnr.sse[0] += sse;
2524 pkt.data.psnr.sse[3] = sse;
2525 pkt.data.psnr.samples[0] += width * height;
2526 pkt.data.psnr.samples[3] = width * height;
2527
2528 for (i = 0; i < 4; i++)
2529 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2530 (double)(pkt.data.psnr.sse[i]));
2531
2532 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2533 }
2534
2535
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2536 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags)
2537 {
2538 if (ref_frame_flags > 7)
2539 return -1 ;
2540
2541 cpi->ref_frame_flags = ref_frame_flags;
2542 return 0;
2543 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2544 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags)
2545 {
2546 if (ref_frame_flags > 7)
2547 return -1 ;
2548
2549 cpi->common.refresh_golden_frame = 0;
2550 cpi->common.refresh_alt_ref_frame = 0;
2551 cpi->common.refresh_last_frame = 0;
2552
2553 if (ref_frame_flags & VP8_LAST_FRAME)
2554 cpi->common.refresh_last_frame = 1;
2555
2556 if (ref_frame_flags & VP8_GOLD_FRAME)
2557 cpi->common.refresh_golden_frame = 1;
2558
2559 if (ref_frame_flags & VP8_ALTR_FRAME)
2560 cpi->common.refresh_alt_ref_frame = 1;
2561
2562 return 0;
2563 }
2564
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2565 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2566 {
2567 VP8_COMMON *cm = &cpi->common;
2568 int ref_fb_idx;
2569
2570 if (ref_frame_flag == VP8_LAST_FRAME)
2571 ref_fb_idx = cm->lst_fb_idx;
2572 else if (ref_frame_flag == VP8_GOLD_FRAME)
2573 ref_fb_idx = cm->gld_fb_idx;
2574 else if (ref_frame_flag == VP8_ALTR_FRAME)
2575 ref_fb_idx = cm->alt_fb_idx;
2576 else
2577 return -1;
2578
2579 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2580
2581 return 0;
2582 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2583 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2584 {
2585 VP8_COMMON *cm = &cpi->common;
2586
2587 int ref_fb_idx;
2588
2589 if (ref_frame_flag == VP8_LAST_FRAME)
2590 ref_fb_idx = cm->lst_fb_idx;
2591 else if (ref_frame_flag == VP8_GOLD_FRAME)
2592 ref_fb_idx = cm->gld_fb_idx;
2593 else if (ref_frame_flag == VP8_ALTR_FRAME)
2594 ref_fb_idx = cm->alt_fb_idx;
2595 else
2596 return -1;
2597
2598 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2599
2600 return 0;
2601 }
vp8_update_entropy(VP8_COMP * cpi,int update)2602 int vp8_update_entropy(VP8_COMP *cpi, int update)
2603 {
2604 VP8_COMMON *cm = &cpi->common;
2605 cm->refresh_entropy_probs = update;
2606
2607 return 0;
2608 }
2609
2610
2611 #if OUTPUT_YUV_SRC
vp8_write_yuv_frame(const char * name,YV12_BUFFER_CONFIG * s)2612 void vp8_write_yuv_frame(const char *name, YV12_BUFFER_CONFIG *s)
2613 {
2614 FILE *yuv_file = fopen(name, "ab");
2615 unsigned char *src = s->y_buffer;
2616 int h = s->y_height;
2617
2618 do
2619 {
2620 fwrite(src, s->y_width, 1, yuv_file);
2621 src += s->y_stride;
2622 }
2623 while (--h);
2624
2625 src = s->u_buffer;
2626 h = s->uv_height;
2627
2628 do
2629 {
2630 fwrite(src, s->uv_width, 1, yuv_file);
2631 src += s->uv_stride;
2632 }
2633 while (--h);
2634
2635 src = s->v_buffer;
2636 h = s->uv_height;
2637
2638 do
2639 {
2640 fwrite(src, s->uv_width, 1, yuv_file);
2641 src += s->uv_stride;
2642 }
2643 while (--h);
2644
2645 fclose(yuv_file);
2646 }
2647 #endif
2648
2649
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2650 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
2651 {
2652 VP8_COMMON *cm = &cpi->common;
2653
2654 /* are we resizing the image */
2655 if (cm->horiz_scale != 0 || cm->vert_scale != 0)
2656 {
2657 #if CONFIG_SPATIAL_RESAMPLING
2658 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2659 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2660 int tmp_height;
2661
2662 if (cm->vert_scale == 3)
2663 tmp_height = 9;
2664 else
2665 tmp_height = 11;
2666
2667 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2668 Scale2Ratio(cm->vert_scale, &vr, &vs);
2669
2670 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2671 tmp_height, hs, hr, vs, vr, 0);
2672
2673 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2674 cpi->Source = &cpi->scaled_source;
2675 #endif
2676 }
2677 else
2678 cpi->Source = sd;
2679 }
2680
2681
resize_key_frame(VP8_COMP * cpi)2682 static int resize_key_frame(VP8_COMP *cpi)
2683 {
2684 #if CONFIG_SPATIAL_RESAMPLING
2685 VP8_COMMON *cm = &cpi->common;
2686
2687 /* Do we need to apply resampling for one pass cbr.
2688 * In one pass this is more limited than in two pass cbr.
2689 * The test and any change is only made once per key frame sequence.
2690 */
2691 if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER))
2692 {
2693 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2694 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2695 int new_width, new_height;
2696
2697 /* If we are below the resample DOWN watermark then scale down a
2698 * notch.
2699 */
2700 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2701 {
2702 cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2703 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2704 }
2705 /* Should we now start scaling back up */
2706 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2707 {
2708 cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2709 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2710 }
2711
2712 /* Get the new height and width */
2713 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2714 Scale2Ratio(cm->vert_scale, &vr, &vs);
2715 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2716 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2717
2718 /* If the image size has changed we need to reallocate the buffers
2719 * and resample the source image
2720 */
2721 if ((cm->Width != new_width) || (cm->Height != new_height))
2722 {
2723 cm->Width = new_width;
2724 cm->Height = new_height;
2725 vp8_alloc_compressor_data(cpi);
2726 scale_and_extend_source(cpi->un_scaled_source, cpi);
2727 return 1;
2728 }
2729 }
2730
2731 #endif
2732 return 0;
2733 }
2734
2735
update_alt_ref_frame_stats(VP8_COMP * cpi)2736 static void update_alt_ref_frame_stats(VP8_COMP *cpi)
2737 {
2738 VP8_COMMON *cm = &cpi->common;
2739
2740 /* Select an interval before next GF or altref */
2741 if (!cpi->auto_gold)
2742 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2743
2744 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due)
2745 {
2746 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2747
2748 /* Set the bits per frame that we should try and recover in
2749 * subsequent inter frames to account for the extra GF spend...
2750 * note that his does not apply for GF updates that occur
2751 * coincident with a key frame as the extra cost of key frames is
2752 * dealt with elsewhere.
2753 */
2754 cpi->gf_overspend_bits += cpi->projected_frame_size;
2755 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2756 }
2757
2758 /* Update data structure that monitors level of reference to last GF */
2759 vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2760 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2761
2762 /* this frame refreshes means next frames don't unless specified by user */
2763 cpi->frames_since_golden = 0;
2764
2765 /* Clear the alternate reference update pending flag. */
2766 cpi->source_alt_ref_pending = 0;
2767
2768 /* Set the alternate reference frame active flag */
2769 cpi->source_alt_ref_active = 1;
2770
2771
2772 }
update_golden_frame_stats(VP8_COMP * cpi)2773 static void update_golden_frame_stats(VP8_COMP *cpi)
2774 {
2775 VP8_COMMON *cm = &cpi->common;
2776
2777 /* Update the Golden frame usage counts. */
2778 if (cm->refresh_golden_frame)
2779 {
2780 /* Select an interval before next GF */
2781 if (!cpi->auto_gold)
2782 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2783
2784 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0))
2785 {
2786 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2787
2788 /* Set the bits per frame that we should try and recover in
2789 * subsequent inter frames to account for the extra GF spend...
2790 * note that his does not apply for GF updates that occur
2791 * coincident with a key frame as the extra cost of key frames
2792 * is dealt with elsewhere.
2793 */
2794 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active)
2795 {
2796 /* Calcluate GF bits to be recovered
2797 * Projected size - av frame bits available for inter
2798 * frames for clip as a whole
2799 */
2800 cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target);
2801 }
2802
2803 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2804
2805 }
2806
2807 /* Update data structure that monitors level of reference to last GF */
2808 vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2809 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2810
2811 /* this frame refreshes means next frames don't unless specified by
2812 * user
2813 */
2814 cm->refresh_golden_frame = 0;
2815 cpi->frames_since_golden = 0;
2816
2817 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2818 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2819 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2820 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2821
2822 /* ******** Fixed Q test code only ************ */
2823 /* If we are going to use the ALT reference for the next group of
2824 * frames set a flag to say so.
2825 */
2826 if (cpi->oxcf.fixed_q >= 0 &&
2827 cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame)
2828 {
2829 cpi->source_alt_ref_pending = 1;
2830 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2831 }
2832
2833 if (!cpi->source_alt_ref_pending)
2834 cpi->source_alt_ref_active = 0;
2835
2836 /* Decrement count down till next gf */
2837 if (cpi->frames_till_gf_update_due > 0)
2838 cpi->frames_till_gf_update_due--;
2839
2840 }
2841 else if (!cpi->common.refresh_alt_ref_frame)
2842 {
2843 /* Decrement count down till next gf */
2844 if (cpi->frames_till_gf_update_due > 0)
2845 cpi->frames_till_gf_update_due--;
2846
2847 if (cpi->frames_till_alt_ref_frame)
2848 cpi->frames_till_alt_ref_frame --;
2849
2850 cpi->frames_since_golden ++;
2851
2852 if (cpi->frames_since_golden > 1)
2853 {
2854 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2855 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2856 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2857 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2858 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2859 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2860 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2861 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2862 }
2863 }
2864 }
2865
2866 /* This function updates the reference frame probability estimates that
2867 * will be used during mode selection
2868 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2869 static void update_rd_ref_frame_probs(VP8_COMP *cpi)
2870 {
2871 VP8_COMMON *cm = &cpi->common;
2872
2873 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2874 const int rf_intra = rfct[INTRA_FRAME];
2875 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2876
2877 if (cm->frame_type == KEY_FRAME)
2878 {
2879 cpi->prob_intra_coded = 255;
2880 cpi->prob_last_coded = 128;
2881 cpi->prob_gf_coded = 128;
2882 }
2883 else if (!(rf_intra + rf_inter))
2884 {
2885 cpi->prob_intra_coded = 63;
2886 cpi->prob_last_coded = 128;
2887 cpi->prob_gf_coded = 128;
2888 }
2889
2890 /* update reference frame costs since we can do better than what we got
2891 * last frame.
2892 */
2893 if (cpi->oxcf.number_of_layers == 1)
2894 {
2895 if (cpi->common.refresh_alt_ref_frame)
2896 {
2897 cpi->prob_intra_coded += 40;
2898 if (cpi->prob_intra_coded > 255)
2899 cpi->prob_intra_coded = 255;
2900 cpi->prob_last_coded = 200;
2901 cpi->prob_gf_coded = 1;
2902 }
2903 else if (cpi->frames_since_golden == 0)
2904 {
2905 cpi->prob_last_coded = 214;
2906 }
2907 else if (cpi->frames_since_golden == 1)
2908 {
2909 cpi->prob_last_coded = 192;
2910 cpi->prob_gf_coded = 220;
2911 }
2912 else if (cpi->source_alt_ref_active)
2913 {
2914 cpi->prob_gf_coded -= 20;
2915
2916 if (cpi->prob_gf_coded < 10)
2917 cpi->prob_gf_coded = 10;
2918 }
2919 if (!cpi->source_alt_ref_active)
2920 cpi->prob_gf_coded = 255;
2921 }
2922 }
2923
2924
2925 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2926 static int decide_key_frame(VP8_COMP *cpi)
2927 {
2928 VP8_COMMON *cm = &cpi->common;
2929
2930 int code_key_frame = 0;
2931
2932 cpi->kf_boost = 0;
2933
2934 if (cpi->Speed > 11)
2935 return 0;
2936
2937 /* Clear down mmx registers */
2938 vp8_clear_system_state();
2939
2940 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0))
2941 {
2942 double change = 1.0 * abs((int)(cpi->mb.intra_error -
2943 cpi->last_intra_error)) / (1 + cpi->last_intra_error);
2944 double change2 = 1.0 * abs((int)(cpi->mb.prediction_error -
2945 cpi->last_prediction_error)) / (1 + cpi->last_prediction_error);
2946 double minerror = cm->MBs * 256;
2947
2948 cpi->last_intra_error = cpi->mb.intra_error;
2949 cpi->last_prediction_error = cpi->mb.prediction_error;
2950
2951 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15
2952 && cpi->mb.prediction_error > minerror
2953 && (change > .25 || change2 > .25))
2954 {
2955 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > cpi->last_frame_percent_intra + 3*/
2956 return 1;
2957 }
2958
2959 return 0;
2960
2961 }
2962
2963 /* If the following are true we might as well code a key frame */
2964 if (((cpi->this_frame_percent_intra == 100) &&
2965 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
2966 ((cpi->this_frame_percent_intra > 95) &&
2967 (cpi->this_frame_percent_intra >= (cpi->last_frame_percent_intra + 5))))
2968 {
2969 code_key_frame = 1;
2970 }
2971 /* in addition if the following are true and this is not a golden frame
2972 * then code a key frame Note that on golden frames there often seems
2973 * to be a pop in intra useage anyway hence this restriction is
2974 * designed to prevent spurious key frames. The Intra pop needs to be
2975 * investigated.
2976 */
2977 else if (((cpi->this_frame_percent_intra > 60) &&
2978 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) ||
2979 ((cpi->this_frame_percent_intra > 75) &&
2980 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 3 / 2))) ||
2981 ((cpi->this_frame_percent_intra > 90) &&
2982 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 10))))
2983 {
2984 if (!cm->refresh_golden_frame)
2985 code_key_frame = 1;
2986 }
2987
2988 return code_key_frame;
2989
2990 }
2991
2992 #if !(CONFIG_REALTIME_ONLY)
Pass1Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned int * frame_flags)2993 static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags)
2994 {
2995 (void) size;
2996 (void) dest;
2997 (void) frame_flags;
2998 vp8_set_quantizer(cpi, 26);
2999
3000 vp8_first_pass(cpi);
3001 }
3002 #endif
3003
3004 #if 0
3005 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
3006 {
3007
3008 /* write the frame */
3009 FILE *yframe;
3010 int i;
3011 char filename[255];
3012
3013 sprintf(filename, "cx\\y%04d.raw", this_frame);
3014 yframe = fopen(filename, "wb");
3015
3016 for (i = 0; i < frame->y_height; i++)
3017 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
3018
3019 fclose(yframe);
3020 sprintf(filename, "cx\\u%04d.raw", this_frame);
3021 yframe = fopen(filename, "wb");
3022
3023 for (i = 0; i < frame->uv_height; i++)
3024 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3025
3026 fclose(yframe);
3027 sprintf(filename, "cx\\v%04d.raw", this_frame);
3028 yframe = fopen(filename, "wb");
3029
3030 for (i = 0; i < frame->uv_height; i++)
3031 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3032
3033 fclose(yframe);
3034 }
3035 #endif
3036 /* return of 0 means drop frame */
3037
3038 /* Function to test for conditions that indeicate we should loop
3039 * back and recode a frame.
3040 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)3041 static int recode_loop_test( VP8_COMP *cpi,
3042 int high_limit, int low_limit,
3043 int q, int maxq, int minq )
3044 {
3045 int force_recode = 0;
3046 VP8_COMMON *cm = &cpi->common;
3047
3048 /* Is frame recode allowed at all
3049 * Yes if either recode mode 1 is selected or mode two is selcted
3050 * and the frame is a key frame. golden frame or alt_ref_frame
3051 */
3052 if ( (cpi->sf.recode_loop == 1) ||
3053 ( (cpi->sf.recode_loop == 2) &&
3054 ( (cm->frame_type == KEY_FRAME) ||
3055 cm->refresh_golden_frame ||
3056 cm->refresh_alt_ref_frame ) ) )
3057 {
3058 /* General over and under shoot tests */
3059 if ( ((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
3060 ((cpi->projected_frame_size < low_limit) && (q > minq)) )
3061 {
3062 force_recode = 1;
3063 }
3064 /* Special Constrained quality tests */
3065 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
3066 {
3067 /* Undershoot and below auto cq level */
3068 if ( (q > cpi->cq_target_quality) &&
3069 (cpi->projected_frame_size <
3070 ((cpi->this_frame_target * 7) >> 3)))
3071 {
3072 force_recode = 1;
3073 }
3074 /* Severe undershoot and between auto and user cq level */
3075 else if ( (q > cpi->oxcf.cq_level) &&
3076 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
3077 (cpi->active_best_quality > cpi->oxcf.cq_level))
3078 {
3079 force_recode = 1;
3080 cpi->active_best_quality = cpi->oxcf.cq_level;
3081 }
3082 }
3083 }
3084
3085 return force_recode;
3086 }
3087
update_reference_frames(VP8_COMP * cpi)3088 static void update_reference_frames(VP8_COMP *cpi)
3089 {
3090 VP8_COMMON *cm = &cpi->common;
3091 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
3092
3093 /* At this point the new frame has been encoded.
3094 * If any buffer copy / swapping is signaled it should be done here.
3095 */
3096
3097 if (cm->frame_type == KEY_FRAME)
3098 {
3099 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME ;
3100
3101 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3102 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3103
3104 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
3105
3106 #if CONFIG_MULTI_RES_ENCODING
3107 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3108 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3109 #endif
3110 }
3111 else /* For non key frames */
3112 {
3113 if (cm->refresh_alt_ref_frame)
3114 {
3115 assert(!cm->copy_buffer_to_arf);
3116
3117 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
3118 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3119 cm->alt_fb_idx = cm->new_fb_idx;
3120
3121 #if CONFIG_MULTI_RES_ENCODING
3122 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3123 #endif
3124 }
3125 else if (cm->copy_buffer_to_arf)
3126 {
3127 assert(!(cm->copy_buffer_to_arf & ~0x3));
3128
3129 if (cm->copy_buffer_to_arf == 1)
3130 {
3131 if(cm->alt_fb_idx != cm->lst_fb_idx)
3132 {
3133 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
3134 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3135 cm->alt_fb_idx = cm->lst_fb_idx;
3136
3137 #if CONFIG_MULTI_RES_ENCODING
3138 cpi->current_ref_frames[ALTREF_FRAME] =
3139 cpi->current_ref_frames[LAST_FRAME];
3140 #endif
3141 }
3142 }
3143 else /* if (cm->copy_buffer_to_arf == 2) */
3144 {
3145 if(cm->alt_fb_idx != cm->gld_fb_idx)
3146 {
3147 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
3148 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3149 cm->alt_fb_idx = cm->gld_fb_idx;
3150
3151 #if CONFIG_MULTI_RES_ENCODING
3152 cpi->current_ref_frames[ALTREF_FRAME] =
3153 cpi->current_ref_frames[GOLDEN_FRAME];
3154 #endif
3155 }
3156 }
3157 }
3158
3159 if (cm->refresh_golden_frame)
3160 {
3161 assert(!cm->copy_buffer_to_gf);
3162
3163 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
3164 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3165 cm->gld_fb_idx = cm->new_fb_idx;
3166
3167 #if CONFIG_MULTI_RES_ENCODING
3168 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3169 #endif
3170 }
3171 else if (cm->copy_buffer_to_gf)
3172 {
3173 assert(!(cm->copy_buffer_to_arf & ~0x3));
3174
3175 if (cm->copy_buffer_to_gf == 1)
3176 {
3177 if(cm->gld_fb_idx != cm->lst_fb_idx)
3178 {
3179 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
3180 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3181 cm->gld_fb_idx = cm->lst_fb_idx;
3182
3183 #if CONFIG_MULTI_RES_ENCODING
3184 cpi->current_ref_frames[GOLDEN_FRAME] =
3185 cpi->current_ref_frames[LAST_FRAME];
3186 #endif
3187 }
3188 }
3189 else /* if (cm->copy_buffer_to_gf == 2) */
3190 {
3191 if(cm->alt_fb_idx != cm->gld_fb_idx)
3192 {
3193 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
3194 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3195 cm->gld_fb_idx = cm->alt_fb_idx;
3196
3197 #if CONFIG_MULTI_RES_ENCODING
3198 cpi->current_ref_frames[GOLDEN_FRAME] =
3199 cpi->current_ref_frames[ALTREF_FRAME];
3200 #endif
3201 }
3202 }
3203 }
3204 }
3205
3206 if (cm->refresh_last_frame)
3207 {
3208 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
3209 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
3210 cm->lst_fb_idx = cm->new_fb_idx;
3211
3212 #if CONFIG_MULTI_RES_ENCODING
3213 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
3214 #endif
3215 }
3216
3217 #if CONFIG_TEMPORAL_DENOISING
3218 if (cpi->oxcf.noise_sensitivity)
3219 {
3220 /* we shouldn't have to keep multiple copies as we know in advance which
3221 * buffer we should start - for now to get something up and running
3222 * I've chosen to copy the buffers
3223 */
3224 if (cm->frame_type == KEY_FRAME)
3225 {
3226 int i;
3227 vp8_yv12_copy_frame(
3228 cpi->Source,
3229 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3230
3231 vp8_yv12_extend_frame_borders(
3232 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3233
3234 for (i = 2; i < MAX_REF_FRAMES - 1; i++)
3235 vp8_yv12_copy_frame(
3236 &cpi->denoiser.yv12_running_avg[LAST_FRAME],
3237 &cpi->denoiser.yv12_running_avg[i]);
3238 }
3239 else /* For non key frames */
3240 {
3241 vp8_yv12_extend_frame_borders(
3242 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3243
3244 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
3245 {
3246 vp8_yv12_copy_frame(
3247 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3248 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3249 }
3250 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
3251 {
3252 vp8_yv12_copy_frame(
3253 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3254 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3255 }
3256 if(cm->refresh_last_frame)
3257 {
3258 vp8_yv12_copy_frame(
3259 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3260 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3261 }
3262 }
3263
3264 }
3265 #endif
3266
3267 }
3268
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3269 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
3270 {
3271 const FRAME_TYPE frame_type = cm->frame_type;
3272
3273 if (cm->no_lpf)
3274 {
3275 cm->filter_level = 0;
3276 }
3277 else
3278 {
3279 struct vpx_usec_timer timer;
3280
3281 vp8_clear_system_state();
3282
3283 vpx_usec_timer_start(&timer);
3284 if (cpi->sf.auto_filter == 0)
3285 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3286
3287 else
3288 vp8cx_pick_filter_level(cpi->Source, cpi);
3289
3290 if (cm->filter_level > 0)
3291 {
3292 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3293 }
3294
3295 vpx_usec_timer_mark(&timer);
3296 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3297 }
3298
3299 #if CONFIG_MULTITHREAD
3300 if (cpi->b_multi_threaded)
3301 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3302 #endif
3303
3304 if (cm->filter_level > 0)
3305 {
3306 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3307 }
3308
3309 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3310
3311 }
3312
encode_frame_to_data_rate(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3313 static void encode_frame_to_data_rate
3314 (
3315 VP8_COMP *cpi,
3316 unsigned long *size,
3317 unsigned char *dest,
3318 unsigned char* dest_end,
3319 unsigned int *frame_flags
3320 )
3321 {
3322 int Q;
3323 int frame_over_shoot_limit;
3324 int frame_under_shoot_limit;
3325
3326 int Loop = 0;
3327 int loop_count;
3328
3329 VP8_COMMON *cm = &cpi->common;
3330 int active_worst_qchanged = 0;
3331
3332 #if !(CONFIG_REALTIME_ONLY)
3333 int q_low;
3334 int q_high;
3335 int zbin_oq_high;
3336 int zbin_oq_low = 0;
3337 int top_index;
3338 int bottom_index;
3339 int overshoot_seen = 0;
3340 int undershoot_seen = 0;
3341 #endif
3342
3343 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3344 cpi->oxcf.optimal_buffer_level / 100);
3345 int drop_mark75 = drop_mark * 2 / 3;
3346 int drop_mark50 = drop_mark / 4;
3347 int drop_mark25 = drop_mark / 8;
3348
3349
3350 /* Clear down mmx registers to allow floating point in what follows */
3351 vp8_clear_system_state();
3352
3353 #if CONFIG_MULTITHREAD
3354 /* wait for the last picture loopfilter thread done */
3355 if (cpi->b_lpf_running)
3356 {
3357 sem_wait(&cpi->h_event_end_lpf);
3358 cpi->b_lpf_running = 0;
3359 }
3360 #endif
3361
3362 if(cpi->force_next_frame_intra)
3363 {
3364 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3365 cpi->force_next_frame_intra = 0;
3366 }
3367
3368 /* For an alt ref frame in 2 pass we skip the call to the second pass
3369 * function that sets the target bandwidth
3370 */
3371 #if !(CONFIG_REALTIME_ONLY)
3372
3373 if (cpi->pass == 2)
3374 {
3375 if (cpi->common.refresh_alt_ref_frame)
3376 {
3377 /* Per frame bit target for the alt ref frame */
3378 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3379 /* per second target bitrate */
3380 cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
3381 cpi->output_framerate);
3382 }
3383 }
3384 else
3385 #endif
3386 cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_framerate);
3387
3388 /* Default turn off buffer to buffer copying */
3389 cm->copy_buffer_to_gf = 0;
3390 cm->copy_buffer_to_arf = 0;
3391
3392 /* Clear zbin over-quant value and mode boost values. */
3393 cpi->mb.zbin_over_quant = 0;
3394 cpi->mb.zbin_mode_boost = 0;
3395
3396 /* Enable or disable mode based tweaking of the zbin
3397 * For 2 Pass Only used where GF/ARF prediction quality
3398 * is above a threshold
3399 */
3400 cpi->mb.zbin_mode_boost_enabled = 1;
3401 if (cpi->pass == 2)
3402 {
3403 if ( cpi->gfu_boost <= 400 )
3404 {
3405 cpi->mb.zbin_mode_boost_enabled = 0;
3406 }
3407 }
3408
3409 /* Current default encoder behaviour for the altref sign bias */
3410 if (cpi->source_alt_ref_active)
3411 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3412 else
3413 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3414
3415 /* Check to see if a key frame is signaled
3416 * For two pass with auto key frame enabled cm->frame_type may already
3417 * be set, but not for one pass.
3418 */
3419 if ((cm->current_video_frame == 0) ||
3420 (cm->frame_flags & FRAMEFLAGS_KEY) ||
3421 (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0)))
3422 {
3423 /* Key frame from VFW/auto-keyframe/first frame */
3424 cm->frame_type = KEY_FRAME;
3425 }
3426
3427 #if CONFIG_MULTI_RES_ENCODING
3428 /* In multi-resolution encoding, frame_type is decided by lowest-resolution
3429 * encoder. Same frame_type is adopted while encoding at other resolution.
3430 */
3431 if (cpi->oxcf.mr_encoder_id)
3432 {
3433 LOWER_RES_FRAME_INFO* low_res_frame_info
3434 = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info;
3435
3436 cm->frame_type = low_res_frame_info->frame_type;
3437
3438 if(cm->frame_type != KEY_FRAME)
3439 {
3440 cpi->mr_low_res_mv_avail = 1;
3441 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3442
3443 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3444 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[LAST_FRAME]
3445 == low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3446
3447 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3448 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[GOLDEN_FRAME]
3449 == low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3450
3451 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3452 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3453 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3454 }
3455 }
3456 #endif
3457
3458 /* Set various flags etc to special state if it is a key frame */
3459 if (cm->frame_type == KEY_FRAME)
3460 {
3461 int i;
3462
3463 // Set the loop filter deltas and segmentation map update
3464 setup_features(cpi);
3465
3466 /* The alternate reference frame cannot be active for a key frame */
3467 cpi->source_alt_ref_active = 0;
3468
3469 /* Reset the RD threshold multipliers to default of * 1 (128) */
3470 for (i = 0; i < MAX_MODES; i++)
3471 {
3472 cpi->mb.rd_thresh_mult[i] = 128;
3473 }
3474 }
3475
3476 #if 0
3477 /* Experimental code for lagged compress and one pass
3478 * Initialise one_pass GF frames stats
3479 * Update stats used for GF selection
3480 */
3481 {
3482 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3483
3484 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3485 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3486 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3487 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3488 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3489 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3490 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3491 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3492 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3493 }
3494 #endif
3495
3496 update_rd_ref_frame_probs(cpi);
3497
3498 if (cpi->drop_frames_allowed)
3499 {
3500 /* The reset to decimation 0 is only done here for one pass.
3501 * Once it is set two pass leaves decimation on till the next kf.
3502 */
3503 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0))
3504 cpi->decimation_factor --;
3505
3506 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0)
3507 cpi->decimation_factor = 1;
3508
3509 else if (cpi->buffer_level < drop_mark25 && (cpi->decimation_factor == 2 || cpi->decimation_factor == 3))
3510 {
3511 cpi->decimation_factor = 3;
3512 }
3513 else if (cpi->buffer_level < drop_mark50 && (cpi->decimation_factor == 1 || cpi->decimation_factor == 2))
3514 {
3515 cpi->decimation_factor = 2;
3516 }
3517 else if (cpi->buffer_level < drop_mark75 && (cpi->decimation_factor == 0 || cpi->decimation_factor == 1))
3518 {
3519 cpi->decimation_factor = 1;
3520 }
3521 }
3522
3523 /* The following decimates the frame rate according to a regular
3524 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3525 * prevent buffer under-run in CBR mode. Alternatively it might be
3526 * desirable in some situations to drop frame rate but throw more bits
3527 * at each frame.
3528 *
3529 * Note that dropping a key frame can be problematic if spatial
3530 * resampling is also active
3531 */
3532 if (cpi->decimation_factor > 0)
3533 {
3534 switch (cpi->decimation_factor)
3535 {
3536 case 1:
3537 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3538 break;
3539 case 2:
3540 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3541 break;
3542 case 3:
3543 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3544 break;
3545 }
3546
3547 /* Note that we should not throw out a key frame (especially when
3548 * spatial resampling is enabled).
3549 */
3550 if (cm->frame_type == KEY_FRAME)
3551 {
3552 cpi->decimation_count = cpi->decimation_factor;
3553 }
3554 else if (cpi->decimation_count > 0)
3555 {
3556 cpi->decimation_count --;
3557
3558 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3559 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
3560 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3561
3562 #if CONFIG_MULTI_RES_ENCODING
3563 vp8_store_drop_frame_info(cpi);
3564 #endif
3565
3566 cm->current_video_frame++;
3567 cpi->frames_since_key++;
3568 // We advance the temporal pattern for dropped frames.
3569 cpi->temporal_pattern_counter++;
3570
3571 #if CONFIG_INTERNAL_STATS
3572 cpi->count ++;
3573 #endif
3574
3575 cpi->buffer_level = cpi->bits_off_target;
3576
3577 if (cpi->oxcf.number_of_layers > 1)
3578 {
3579 unsigned int i;
3580
3581 /* Propagate bits saved by dropping the frame to higher
3582 * layers
3583 */
3584 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
3585 {
3586 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3587 lc->bits_off_target += (int)(lc->target_bandwidth /
3588 lc->framerate);
3589 if (lc->bits_off_target > lc->maximum_buffer_size)
3590 lc->bits_off_target = lc->maximum_buffer_size;
3591 lc->buffer_level = lc->bits_off_target;
3592 }
3593 }
3594
3595 return;
3596 }
3597 else
3598 cpi->decimation_count = cpi->decimation_factor;
3599 }
3600 else
3601 cpi->decimation_count = 0;
3602
3603 /* Decide how big to make the frame */
3604 if (!vp8_pick_frame_size(cpi))
3605 {
3606 /*TODO: 2 drop_frame and return code could be put together. */
3607 #if CONFIG_MULTI_RES_ENCODING
3608 vp8_store_drop_frame_info(cpi);
3609 #endif
3610 cm->current_video_frame++;
3611 cpi->frames_since_key++;
3612 // We advance the temporal pattern for dropped frames.
3613 cpi->temporal_pattern_counter++;
3614 return;
3615 }
3616
3617 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3618 * This has a knock on effect on active best quality as well.
3619 * For CBR if the buffer reaches its maximum level then we can no longer
3620 * save up bits for later frames so we might as well use them up
3621 * on the current frame.
3622 */
3623 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3624 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode)
3625 {
3626 /* Max adjustment is 1/4 */
3627 int Adjustment = cpi->active_worst_quality / 4;
3628
3629 if (Adjustment)
3630 {
3631 int buff_lvl_step;
3632
3633 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size)
3634 {
3635 buff_lvl_step = (int)
3636 ((cpi->oxcf.maximum_buffer_size -
3637 cpi->oxcf.optimal_buffer_level) /
3638 Adjustment);
3639
3640 if (buff_lvl_step)
3641 Adjustment = (int)
3642 ((cpi->buffer_level -
3643 cpi->oxcf.optimal_buffer_level) /
3644 buff_lvl_step);
3645 else
3646 Adjustment = 0;
3647 }
3648
3649 cpi->active_worst_quality -= Adjustment;
3650
3651 if(cpi->active_worst_quality < cpi->active_best_quality)
3652 cpi->active_worst_quality = cpi->active_best_quality;
3653 }
3654 }
3655
3656 /* Set an active best quality and if necessary active worst quality
3657 * There is some odd behavior for one pass here that needs attention.
3658 */
3659 if ( (cpi->pass == 2) || (cpi->ni_frames > 150))
3660 {
3661 vp8_clear_system_state();
3662
3663 Q = cpi->active_worst_quality;
3664
3665 if ( cm->frame_type == KEY_FRAME )
3666 {
3667 if ( cpi->pass == 2 )
3668 {
3669 if (cpi->gfu_boost > 600)
3670 cpi->active_best_quality = kf_low_motion_minq[Q];
3671 else
3672 cpi->active_best_quality = kf_high_motion_minq[Q];
3673
3674 /* Special case for key frames forced because we have reached
3675 * the maximum key frame interval. Here force the Q to a range
3676 * based on the ambient Q to reduce the risk of popping
3677 */
3678 if ( cpi->this_key_frame_forced )
3679 {
3680 if ( cpi->active_best_quality > cpi->avg_frame_qindex * 7/8)
3681 cpi->active_best_quality = cpi->avg_frame_qindex * 7/8;
3682 else if ( cpi->active_best_quality < cpi->avg_frame_qindex >> 2 )
3683 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3684 }
3685 }
3686 /* One pass more conservative */
3687 else
3688 cpi->active_best_quality = kf_high_motion_minq[Q];
3689 }
3690
3691 else if (cpi->oxcf.number_of_layers==1 &&
3692 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame))
3693 {
3694 /* Use the lower of cpi->active_worst_quality and recent
3695 * average Q as basis for GF/ARF Q limit unless last frame was
3696 * a key frame.
3697 */
3698 if ( (cpi->frames_since_key > 1) &&
3699 (cpi->avg_frame_qindex < cpi->active_worst_quality) )
3700 {
3701 Q = cpi->avg_frame_qindex;
3702 }
3703
3704 /* For constrained quality dont allow Q less than the cq level */
3705 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3706 (Q < cpi->cq_target_quality) )
3707 {
3708 Q = cpi->cq_target_quality;
3709 }
3710
3711 if ( cpi->pass == 2 )
3712 {
3713 if ( cpi->gfu_boost > 1000 )
3714 cpi->active_best_quality = gf_low_motion_minq[Q];
3715 else if ( cpi->gfu_boost < 400 )
3716 cpi->active_best_quality = gf_high_motion_minq[Q];
3717 else
3718 cpi->active_best_quality = gf_mid_motion_minq[Q];
3719
3720 /* Constrained quality use slightly lower active best. */
3721 if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
3722 {
3723 cpi->active_best_quality =
3724 cpi->active_best_quality * 15/16;
3725 }
3726 }
3727 /* One pass more conservative */
3728 else
3729 cpi->active_best_quality = gf_high_motion_minq[Q];
3730 }
3731 else
3732 {
3733 cpi->active_best_quality = inter_minq[Q];
3734
3735 /* For the constant/constrained quality mode we dont want
3736 * q to fall below the cq level.
3737 */
3738 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3739 (cpi->active_best_quality < cpi->cq_target_quality) )
3740 {
3741 /* If we are strongly undershooting the target rate in the last
3742 * frames then use the user passed in cq value not the auto
3743 * cq value.
3744 */
3745 if ( cpi->rolling_actual_bits < cpi->min_frame_bandwidth )
3746 cpi->active_best_quality = cpi->oxcf.cq_level;
3747 else
3748 cpi->active_best_quality = cpi->cq_target_quality;
3749 }
3750 }
3751
3752 /* If CBR and the buffer is as full then it is reasonable to allow
3753 * higher quality on the frames to prevent bits just going to waste.
3754 */
3755 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
3756 {
3757 /* Note that the use of >= here elliminates the risk of a devide
3758 * by 0 error in the else if clause
3759 */
3760 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size)
3761 cpi->active_best_quality = cpi->best_quality;
3762
3763 else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level)
3764 {
3765 int Fraction = (int)
3766 (((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128)
3767 / (cpi->oxcf.maximum_buffer_size -
3768 cpi->oxcf.optimal_buffer_level));
3769 int min_qadjustment = ((cpi->active_best_quality -
3770 cpi->best_quality) * Fraction) / 128;
3771
3772 cpi->active_best_quality -= min_qadjustment;
3773 }
3774 }
3775 }
3776 /* Make sure constrained quality mode limits are adhered to for the first
3777 * few frames of one pass encodes
3778 */
3779 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
3780 {
3781 if ( (cm->frame_type == KEY_FRAME) ||
3782 cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame )
3783 {
3784 cpi->active_best_quality = cpi->best_quality;
3785 }
3786 else if (cpi->active_best_quality < cpi->cq_target_quality)
3787 {
3788 cpi->active_best_quality = cpi->cq_target_quality;
3789 }
3790 }
3791
3792 /* Clip the active best and worst quality values to limits */
3793 if (cpi->active_worst_quality > cpi->worst_quality)
3794 cpi->active_worst_quality = cpi->worst_quality;
3795
3796 if (cpi->active_best_quality < cpi->best_quality)
3797 cpi->active_best_quality = cpi->best_quality;
3798
3799 if ( cpi->active_worst_quality < cpi->active_best_quality )
3800 cpi->active_worst_quality = cpi->active_best_quality;
3801
3802 /* Determine initial Q to try */
3803 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3804
3805 #if !(CONFIG_REALTIME_ONLY)
3806
3807 /* Set highest allowed value for Zbin over quant */
3808 if (cm->frame_type == KEY_FRAME)
3809 zbin_oq_high = 0;
3810 else if ((cpi->oxcf.number_of_layers == 1) && ((cm->refresh_alt_ref_frame ||
3811 (cm->refresh_golden_frame && !cpi->source_alt_ref_active))))
3812 {
3813 zbin_oq_high = 16;
3814 }
3815 else
3816 zbin_oq_high = ZBIN_OQ_MAX;
3817 #endif
3818
3819 /* Setup background Q adjustment for error resilient mode.
3820 * For multi-layer encodes only enable this for the base layer.
3821 */
3822 if (cpi->cyclic_refresh_mode_enabled)
3823 {
3824 if (cpi->current_layer==0)
3825 cyclic_background_refresh(cpi, Q, 0);
3826 else
3827 disable_segmentation(cpi);
3828 }
3829
3830 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
3831
3832 #if !(CONFIG_REALTIME_ONLY)
3833 /* Limit Q range for the adaptive loop. */
3834 bottom_index = cpi->active_best_quality;
3835 top_index = cpi->active_worst_quality;
3836 q_low = cpi->active_best_quality;
3837 q_high = cpi->active_worst_quality;
3838 #endif
3839
3840 vp8_save_coding_context(cpi);
3841
3842 loop_count = 0;
3843
3844 scale_and_extend_source(cpi->un_scaled_source, cpi);
3845
3846 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3847
3848 if (cpi->oxcf.noise_sensitivity > 0)
3849 {
3850 unsigned char *src;
3851 int l = 0;
3852
3853 switch (cpi->oxcf.noise_sensitivity)
3854 {
3855 case 1:
3856 l = 20;
3857 break;
3858 case 2:
3859 l = 40;
3860 break;
3861 case 3:
3862 l = 60;
3863 break;
3864 case 4:
3865 l = 80;
3866 break;
3867 case 5:
3868 l = 100;
3869 break;
3870 case 6:
3871 l = 150;
3872 break;
3873 }
3874
3875
3876 if (cm->frame_type == KEY_FRAME)
3877 {
3878 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0);
3879 }
3880 else
3881 {
3882 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0);
3883
3884 src = cpi->Source->y_buffer;
3885
3886 if (cpi->Source->y_stride < 0)
3887 {
3888 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3889 }
3890 }
3891 }
3892
3893 #endif
3894
3895 #ifdef OUTPUT_YUV_SRC
3896 vp8_write_yuv_frame(cpi->Source);
3897 #endif
3898
3899 do
3900 {
3901 vp8_clear_system_state();
3902
3903 vp8_set_quantizer(cpi, Q);
3904
3905 /* setup skip prob for costing in mode/mv decision */
3906 if (cpi->common.mb_no_coeff_skip)
3907 {
3908 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3909
3910 if (cm->frame_type != KEY_FRAME)
3911 {
3912 if (cpi->common.refresh_alt_ref_frame)
3913 {
3914 if (cpi->last_skip_false_probs[2] != 0)
3915 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3916
3917 /*
3918 if(cpi->last_skip_false_probs[2]!=0 && abs(Q- cpi->last_skip_probs_q[2])<=16 )
3919 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3920 else if (cpi->last_skip_false_probs[2]!=0)
3921 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + cpi->prob_skip_false ) / 2;
3922 */
3923 }
3924 else if (cpi->common.refresh_golden_frame)
3925 {
3926 if (cpi->last_skip_false_probs[1] != 0)
3927 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3928
3929 /*
3930 if(cpi->last_skip_false_probs[1]!=0 && abs(Q- cpi->last_skip_probs_q[1])<=16 )
3931 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3932 else if (cpi->last_skip_false_probs[1]!=0)
3933 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + cpi->prob_skip_false ) / 2;
3934 */
3935 }
3936 else
3937 {
3938 if (cpi->last_skip_false_probs[0] != 0)
3939 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3940
3941 /*
3942 if(cpi->last_skip_false_probs[0]!=0 && abs(Q- cpi->last_skip_probs_q[0])<=16 )
3943 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3944 else if(cpi->last_skip_false_probs[0]!=0)
3945 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + cpi->prob_skip_false ) / 2;
3946 */
3947 }
3948
3949 /* as this is for cost estimate, let's make sure it does not
3950 * go extreme eitehr way
3951 */
3952 if (cpi->prob_skip_false < 5)
3953 cpi->prob_skip_false = 5;
3954
3955 if (cpi->prob_skip_false > 250)
3956 cpi->prob_skip_false = 250;
3957
3958 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref)
3959 cpi->prob_skip_false = 1;
3960 }
3961
3962 #if 0
3963
3964 if (cpi->pass != 1)
3965 {
3966 FILE *f = fopen("skip.stt", "a");
3967 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
3968 fclose(f);
3969 }
3970
3971 #endif
3972
3973 }
3974
3975 if (cm->frame_type == KEY_FRAME)
3976 {
3977 if(resize_key_frame(cpi))
3978 {
3979 /* If the frame size has changed, need to reset Q, quantizer,
3980 * and background refresh.
3981 */
3982 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3983 if (cpi->cyclic_refresh_mode_enabled)
3984 {
3985 if (cpi->current_layer==0)
3986 cyclic_background_refresh(cpi, Q, 0);
3987 else
3988 disable_segmentation(cpi);
3989 }
3990 vp8_set_quantizer(cpi, Q);
3991 }
3992
3993 vp8_setup_key_frame(cpi);
3994 }
3995
3996
3997
3998 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
3999 {
4000 if(cpi->oxcf.error_resilient_mode)
4001 cm->refresh_entropy_probs = 0;
4002
4003 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
4004 {
4005 if (cm->frame_type == KEY_FRAME)
4006 cm->refresh_entropy_probs = 1;
4007 }
4008
4009 if (cm->refresh_entropy_probs == 0)
4010 {
4011 /* save a copy for later refresh */
4012 vpx_memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
4013 }
4014
4015 vp8_update_coef_context(cpi);
4016
4017 vp8_update_coef_probs(cpi);
4018
4019 /* transform / motion compensation build reconstruction frame
4020 * +pack coef partitions
4021 */
4022 vp8_encode_frame(cpi);
4023
4024 /* cpi->projected_frame_size is not needed for RT mode */
4025 }
4026 #else
4027 /* transform / motion compensation build reconstruction frame */
4028 vp8_encode_frame(cpi);
4029
4030 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
4031 cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
4032 #endif
4033 vp8_clear_system_state();
4034
4035 /* Test to see if the stats generated for this frame indicate that
4036 * we should have coded a key frame (assuming that we didn't)!
4037 */
4038
4039 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME
4040 && cpi->compressor_speed != 2)
4041 {
4042 #if !(CONFIG_REALTIME_ONLY)
4043 if (decide_key_frame(cpi))
4044 {
4045 /* Reset all our sizing numbers and recode */
4046 cm->frame_type = KEY_FRAME;
4047
4048 vp8_pick_frame_size(cpi);
4049
4050 /* Clear the Alt reference frame active flag when we have
4051 * a key frame
4052 */
4053 cpi->source_alt_ref_active = 0;
4054
4055 // Set the loop filter deltas and segmentation map update
4056 setup_features(cpi);
4057
4058 vp8_restore_coding_context(cpi);
4059
4060 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4061
4062 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
4063
4064 /* Limit Q range for the adaptive loop. */
4065 bottom_index = cpi->active_best_quality;
4066 top_index = cpi->active_worst_quality;
4067 q_low = cpi->active_best_quality;
4068 q_high = cpi->active_worst_quality;
4069
4070 loop_count++;
4071 Loop = 1;
4072
4073 continue;
4074 }
4075 #endif
4076 }
4077
4078 vp8_clear_system_state();
4079
4080 if (frame_over_shoot_limit == 0)
4081 frame_over_shoot_limit = 1;
4082
4083 /* Are we are overshooting and up against the limit of active max Q. */
4084 if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4085 (Q == cpi->active_worst_quality) &&
4086 (cpi->active_worst_quality < cpi->worst_quality) &&
4087 (cpi->projected_frame_size > frame_over_shoot_limit))
4088 {
4089 int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit;
4090
4091 /* If so is there any scope for relaxing it */
4092 while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0))
4093 {
4094 cpi->active_worst_quality++;
4095 /* Assume 1 qstep = about 4% on frame size. */
4096 over_size_percent = (int)(over_size_percent * 0.96);
4097 }
4098 #if !(CONFIG_REALTIME_ONLY)
4099 top_index = cpi->active_worst_quality;
4100 #endif
4101 /* If we have updated the active max Q do not call
4102 * vp8_update_rate_correction_factors() this loop.
4103 */
4104 active_worst_qchanged = 1;
4105 }
4106 else
4107 active_worst_qchanged = 0;
4108
4109 #if !(CONFIG_REALTIME_ONLY)
4110 /* Special case handling for forced key frames */
4111 if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
4112 {
4113 int last_q = Q;
4114 int kf_err = vp8_calc_ss_err(cpi->Source,
4115 &cm->yv12_fb[cm->new_fb_idx]);
4116
4117 /* The key frame is not good enough */
4118 if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
4119 {
4120 /* Lower q_high */
4121 q_high = (Q > q_low) ? (Q - 1) : q_low;
4122
4123 /* Adjust Q */
4124 Q = (q_high + q_low) >> 1;
4125 }
4126 /* The key frame is much better than the previous frame */
4127 else if ( kf_err < (cpi->ambient_err >> 1) )
4128 {
4129 /* Raise q_low */
4130 q_low = (Q < q_high) ? (Q + 1) : q_high;
4131
4132 /* Adjust Q */
4133 Q = (q_high + q_low + 1) >> 1;
4134 }
4135
4136 /* Clamp Q to upper and lower limits: */
4137 if (Q > q_high)
4138 Q = q_high;
4139 else if (Q < q_low)
4140 Q = q_low;
4141
4142 Loop = Q != last_q;
4143 }
4144
4145 /* Is the projected frame size out of range and are we allowed
4146 * to attempt to recode.
4147 */
4148 else if ( recode_loop_test( cpi,
4149 frame_over_shoot_limit, frame_under_shoot_limit,
4150 Q, top_index, bottom_index ) )
4151 {
4152 int last_q = Q;
4153 int Retries = 0;
4154
4155 /* Frame size out of permitted range. Update correction factor
4156 * & compute new Q to try...
4157 */
4158
4159 /* Frame is too large */
4160 if (cpi->projected_frame_size > cpi->this_frame_target)
4161 {
4162 /* Raise Qlow as to at least the current value */
4163 q_low = (Q < q_high) ? (Q + 1) : q_high;
4164
4165 /* If we are using over quant do the same for zbin_oq_low */
4166 if (cpi->mb.zbin_over_quant > 0)
4167 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4168 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4169
4170 if (undershoot_seen)
4171 {
4172 /* Update rate_correction_factor unless
4173 * cpi->active_worst_quality has changed.
4174 */
4175 if (!active_worst_qchanged)
4176 vp8_update_rate_correction_factors(cpi, 1);
4177
4178 Q = (q_high + q_low + 1) / 2;
4179
4180 /* Adjust cpi->zbin_over_quant (only allowed when Q
4181 * is max)
4182 */
4183 if (Q < MAXQ)
4184 cpi->mb.zbin_over_quant = 0;
4185 else
4186 {
4187 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4188 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4189 cpi->mb.zbin_over_quant =
4190 (zbin_oq_high + zbin_oq_low) / 2;
4191 }
4192 }
4193 else
4194 {
4195 /* Update rate_correction_factor unless
4196 * cpi->active_worst_quality has changed.
4197 */
4198 if (!active_worst_qchanged)
4199 vp8_update_rate_correction_factors(cpi, 0);
4200
4201 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4202
4203 while (((Q < q_low) ||
4204 (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4205 (Retries < 10))
4206 {
4207 vp8_update_rate_correction_factors(cpi, 0);
4208 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4209 Retries ++;
4210 }
4211 }
4212
4213 overshoot_seen = 1;
4214 }
4215 /* Frame is too small */
4216 else
4217 {
4218 if (cpi->mb.zbin_over_quant == 0)
4219 /* Lower q_high if not using over quant */
4220 q_high = (Q > q_low) ? (Q - 1) : q_low;
4221 else
4222 /* else lower zbin_oq_high */
4223 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) ?
4224 (cpi->mb.zbin_over_quant - 1) : zbin_oq_low;
4225
4226 if (overshoot_seen)
4227 {
4228 /* Update rate_correction_factor unless
4229 * cpi->active_worst_quality has changed.
4230 */
4231 if (!active_worst_qchanged)
4232 vp8_update_rate_correction_factors(cpi, 1);
4233
4234 Q = (q_high + q_low) / 2;
4235
4236 /* Adjust cpi->zbin_over_quant (only allowed when Q
4237 * is max)
4238 */
4239 if (Q < MAXQ)
4240 cpi->mb.zbin_over_quant = 0;
4241 else
4242 cpi->mb.zbin_over_quant =
4243 (zbin_oq_high + zbin_oq_low) / 2;
4244 }
4245 else
4246 {
4247 /* Update rate_correction_factor unless
4248 * cpi->active_worst_quality has changed.
4249 */
4250 if (!active_worst_qchanged)
4251 vp8_update_rate_correction_factors(cpi, 0);
4252
4253 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4254
4255 /* Special case reset for qlow for constrained quality.
4256 * This should only trigger where there is very substantial
4257 * undershoot on a frame and the auto cq level is above
4258 * the user passsed in value.
4259 */
4260 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4261 (Q < q_low) )
4262 {
4263 q_low = Q;
4264 }
4265
4266 while (((Q > q_high) ||
4267 (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4268 (Retries < 10))
4269 {
4270 vp8_update_rate_correction_factors(cpi, 0);
4271 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4272 Retries ++;
4273 }
4274 }
4275
4276 undershoot_seen = 1;
4277 }
4278
4279 /* Clamp Q to upper and lower limits: */
4280 if (Q > q_high)
4281 Q = q_high;
4282 else if (Q < q_low)
4283 Q = q_low;
4284
4285 /* Clamp cpi->zbin_over_quant */
4286 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) ?
4287 zbin_oq_low : (cpi->mb.zbin_over_quant > zbin_oq_high) ?
4288 zbin_oq_high : cpi->mb.zbin_over_quant;
4289
4290 Loop = Q != last_q;
4291 }
4292 else
4293 #endif
4294 Loop = 0;
4295
4296 if (cpi->is_src_frame_alt_ref)
4297 Loop = 0;
4298
4299 if (Loop == 1)
4300 {
4301 vp8_restore_coding_context(cpi);
4302 loop_count++;
4303 #if CONFIG_INTERNAL_STATS
4304 cpi->tot_recode_hits++;
4305 #endif
4306 }
4307 }
4308 while (Loop == 1);
4309
4310 #if 0
4311 /* Experimental code for lagged and one pass
4312 * Update stats used for one pass GF selection
4313 */
4314 {
4315 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4316 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4317 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4318 }
4319 #endif
4320
4321 /* Special case code to reduce pulsing when key frames are forced at a
4322 * fixed interval. Note the reconstruction error if it is the frame before
4323 * the force key frame
4324 */
4325 if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
4326 {
4327 cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
4328 &cm->yv12_fb[cm->new_fb_idx]);
4329 }
4330
4331 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4332 * Last frame has one more line(add to bottom) and one more column(add to
4333 * right) than cm->mip. The edge elements are initialized to 0.
4334 */
4335 #if CONFIG_MULTI_RES_ENCODING
4336 if(!cpi->oxcf.mr_encoder_id && cm->show_frame)
4337 #else
4338 if(cm->show_frame) /* do not save for altref frame */
4339 #endif
4340 {
4341 int mb_row;
4342 int mb_col;
4343 /* Point to beginning of allocated MODE_INFO arrays. */
4344 MODE_INFO *tmp = cm->mip;
4345
4346 if(cm->frame_type != KEY_FRAME)
4347 {
4348 for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
4349 {
4350 for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
4351 {
4352 if(tmp->mbmi.ref_frame != INTRA_FRAME)
4353 cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride+1)].as_int = tmp->mbmi.mv.as_int;
4354
4355 cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride+1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4356 cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride+1)] = tmp->mbmi.ref_frame;
4357 tmp++;
4358 }
4359 }
4360 }
4361 }
4362
4363 /* Count last ref frame 0,0 usage on current encoded frame. */
4364 {
4365 int mb_row;
4366 int mb_col;
4367 /* Point to beginning of MODE_INFO arrays. */
4368 MODE_INFO *tmp = cm->mi;
4369
4370 cpi->zeromv_count = 0;
4371
4372 if(cm->frame_type != KEY_FRAME)
4373 {
4374 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
4375 {
4376 for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++)
4377 {
4378 if(tmp->mbmi.mode == ZEROMV)
4379 cpi->zeromv_count++;
4380 tmp++;
4381 }
4382 tmp++;
4383 }
4384 }
4385 }
4386
4387 #if CONFIG_MULTI_RES_ENCODING
4388 vp8_cal_dissimilarity(cpi);
4389 #endif
4390
4391 /* Update the GF useage maps.
4392 * This is done after completing the compression of a frame when all
4393 * modes etc. are finalized but before loop filter
4394 */
4395 if (cpi->oxcf.number_of_layers == 1)
4396 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4397
4398 if (cm->frame_type == KEY_FRAME)
4399 cm->refresh_last_frame = 1;
4400
4401 #if 0
4402 {
4403 FILE *f = fopen("gfactive.stt", "a");
4404 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4405 fclose(f);
4406 }
4407 #endif
4408
4409 /* For inter frames the current default behavior is that when
4410 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4411 * This is purely an encoder decision at present.
4412 */
4413 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame)
4414 cm->copy_buffer_to_arf = 2;
4415 else
4416 cm->copy_buffer_to_arf = 0;
4417
4418 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4419
4420 #if CONFIG_MULTITHREAD
4421 if (cpi->b_multi_threaded)
4422 {
4423 /* start loopfilter in separate thread */
4424 sem_post(&cpi->h_event_start_lpf);
4425 cpi->b_lpf_running = 1;
4426 }
4427 else
4428 #endif
4429 {
4430 vp8_loopfilter_frame(cpi, cm);
4431 }
4432
4433 update_reference_frames(cpi);
4434
4435 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4436 if (cpi->oxcf.error_resilient_mode)
4437 {
4438 cm->refresh_entropy_probs = 0;
4439 }
4440 #endif
4441
4442 #if CONFIG_MULTITHREAD
4443 /* wait that filter_level is picked so that we can continue with stream packing */
4444 if (cpi->b_multi_threaded)
4445 sem_wait(&cpi->h_event_end_lpf);
4446 #endif
4447
4448 /* build the bitstream */
4449 vp8_pack_bitstream(cpi, dest, dest_end, size);
4450
4451 #if CONFIG_MULTITHREAD
4452 /* if PSNR packets are generated we have to wait for the lpf */
4453 if (cpi->b_lpf_running && cpi->b_calculate_psnr)
4454 {
4455 sem_wait(&cpi->h_event_end_lpf);
4456 cpi->b_lpf_running = 0;
4457 }
4458 #endif
4459
4460 /* Move storing frame_type out of the above loop since it is also
4461 * needed in motion search besides loopfilter */
4462 cm->last_frame_type = cm->frame_type;
4463
4464 /* Update rate control heuristics */
4465 cpi->total_byte_count += (*size);
4466 cpi->projected_frame_size = (*size) << 3;
4467
4468 if (cpi->oxcf.number_of_layers > 1)
4469 {
4470 unsigned int i;
4471 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
4472 cpi->layer_context[i].total_byte_count += (*size);
4473 }
4474
4475 if (!active_worst_qchanged)
4476 vp8_update_rate_correction_factors(cpi, 2);
4477
4478 cpi->last_q[cm->frame_type] = cm->base_qindex;
4479
4480 if (cm->frame_type == KEY_FRAME)
4481 {
4482 vp8_adjust_key_frame_context(cpi);
4483 }
4484
4485 /* Keep a record of ambient average Q. */
4486 if (cm->frame_type != KEY_FRAME)
4487 cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4488
4489 /* Keep a record from which we can calculate the average Q excluding
4490 * GF updates and key frames
4491 */
4492 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
4493 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame)))
4494 {
4495 cpi->ni_frames++;
4496
4497 /* Calculate the average Q for normal inter frames (not key or GFU
4498 * frames).
4499 */
4500 if ( cpi->pass == 2 )
4501 {
4502 cpi->ni_tot_qi += Q;
4503 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4504 }
4505 else
4506 {
4507 /* Damp value for first few frames */
4508 if (cpi->ni_frames > 150 )
4509 {
4510 cpi->ni_tot_qi += Q;
4511 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4512 }
4513 /* For one pass, early in the clip ... average the current frame Q
4514 * value with the worstq entered by the user as a dampening measure
4515 */
4516 else
4517 {
4518 cpi->ni_tot_qi += Q;
4519 cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4520 }
4521
4522 /* If the average Q is higher than what was used in the last
4523 * frame (after going through the recode loop to keep the frame
4524 * size within range) then use the last frame value - 1. The -1
4525 * is designed to stop Q and hence the data rate, from
4526 * progressively falling away during difficult sections, but at
4527 * the same time reduce the number of itterations around the
4528 * recode loop.
4529 */
4530 if (Q > cpi->ni_av_qi)
4531 cpi->ni_av_qi = Q - 1;
4532 }
4533 }
4534
4535 /* Update the buffer level variable. */
4536 /* Non-viewable frames are a special case and are treated as pure overhead. */
4537 if ( !cm->show_frame )
4538 cpi->bits_off_target -= cpi->projected_frame_size;
4539 else
4540 cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4541
4542 /* Clip the buffer level to the maximum specified buffer size */
4543 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
4544 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4545
4546 /* Rolling monitors of whether we are over or underspending used to
4547 * help regulate min and Max Q in two pass.
4548 */
4549 cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4550 cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4551 cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4552 cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32;
4553
4554 /* Actual bits spent */
4555 cpi->total_actual_bits += cpi->projected_frame_size;
4556
4557 /* Debug stats */
4558 cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
4559
4560 cpi->buffer_level = cpi->bits_off_target;
4561
4562 /* Propagate values to higher temporal layers */
4563 if (cpi->oxcf.number_of_layers > 1)
4564 {
4565 unsigned int i;
4566
4567 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
4568 {
4569 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4570 int bits_off_for_this_layer =
4571 (int)(lc->target_bandwidth / lc->framerate -
4572 cpi->projected_frame_size);
4573
4574 lc->bits_off_target += bits_off_for_this_layer;
4575
4576 /* Clip buffer level to maximum buffer size for the layer */
4577 if (lc->bits_off_target > lc->maximum_buffer_size)
4578 lc->bits_off_target = lc->maximum_buffer_size;
4579
4580 lc->total_actual_bits += cpi->projected_frame_size;
4581 lc->total_target_vs_actual += bits_off_for_this_layer;
4582 lc->buffer_level = lc->bits_off_target;
4583 }
4584 }
4585
4586 /* Update bits left to the kf and gf groups to account for overshoot
4587 * or undershoot on these frames
4588 */
4589 if (cm->frame_type == KEY_FRAME)
4590 {
4591 cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
4592
4593 if (cpi->twopass.kf_group_bits < 0)
4594 cpi->twopass.kf_group_bits = 0 ;
4595 }
4596 else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
4597 {
4598 cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
4599
4600 if (cpi->twopass.gf_group_bits < 0)
4601 cpi->twopass.gf_group_bits = 0 ;
4602 }
4603
4604 if (cm->frame_type != KEY_FRAME)
4605 {
4606 if (cpi->common.refresh_alt_ref_frame)
4607 {
4608 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4609 cpi->last_skip_probs_q[2] = cm->base_qindex;
4610 }
4611 else if (cpi->common.refresh_golden_frame)
4612 {
4613 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4614 cpi->last_skip_probs_q[1] = cm->base_qindex;
4615 }
4616 else
4617 {
4618 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4619 cpi->last_skip_probs_q[0] = cm->base_qindex;
4620
4621 /* update the baseline */
4622 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4623
4624 }
4625 }
4626
4627 #if 0 && CONFIG_INTERNAL_STATS
4628 {
4629 FILE *f = fopen("tmp.stt", "a");
4630
4631 vp8_clear_system_state();
4632
4633 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4634 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4635 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4636 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4637 cpi->common.current_video_frame, cpi->this_frame_target,
4638 cpi->projected_frame_size,
4639 (cpi->projected_frame_size - cpi->this_frame_target),
4640 cpi->total_target_vs_actual,
4641 cpi->buffer_level,
4642 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4643 cpi->total_actual_bits, cm->base_qindex,
4644 cpi->active_best_quality, cpi->active_worst_quality,
4645 cpi->ni_av_qi, cpi->cq_target_quality,
4646 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4647 cm->frame_type, cpi->gfu_boost,
4648 cpi->twopass.est_max_qcorrection_factor,
4649 cpi->twopass.bits_left,
4650 cpi->twopass.total_left_stats.coded_error,
4651 (double)cpi->twopass.bits_left /
4652 cpi->twopass.total_left_stats.coded_error,
4653 cpi->tot_recode_hits);
4654 else
4655 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4656 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4657 "%8.2lf %"PRId64" %10.3lf %8d\n",
4658 cpi->common.current_video_frame, cpi->this_frame_target,
4659 cpi->projected_frame_size,
4660 (cpi->projected_frame_size - cpi->this_frame_target),
4661 cpi->total_target_vs_actual,
4662 cpi->buffer_level,
4663 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4664 cpi->total_actual_bits, cm->base_qindex,
4665 cpi->active_best_quality, cpi->active_worst_quality,
4666 cpi->ni_av_qi, cpi->cq_target_quality,
4667 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4668 cm->frame_type, cpi->gfu_boost,
4669 cpi->twopass.est_max_qcorrection_factor,
4670 cpi->twopass.bits_left,
4671 cpi->twopass.total_left_stats.coded_error,
4672 cpi->tot_recode_hits);
4673
4674 fclose(f);
4675
4676 {
4677 FILE *fmodes = fopen("Modes.stt", "a");
4678
4679 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4680 cpi->common.current_video_frame,
4681 cm->frame_type, cm->refresh_golden_frame,
4682 cm->refresh_alt_ref_frame);
4683
4684 fprintf(fmodes, "\n");
4685
4686 fclose(fmodes);
4687 }
4688 }
4689
4690 #endif
4691
4692 if (cm->refresh_golden_frame == 1)
4693 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4694 else
4695 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
4696
4697 if (cm->refresh_alt_ref_frame == 1)
4698 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4699 else
4700 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
4701
4702
4703 if (cm->refresh_last_frame & cm->refresh_golden_frame)
4704 /* both refreshed */
4705 cpi->gold_is_last = 1;
4706 else if (cm->refresh_last_frame ^ cm->refresh_golden_frame)
4707 /* 1 refreshed but not the other */
4708 cpi->gold_is_last = 0;
4709
4710 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame)
4711 /* both refreshed */
4712 cpi->alt_is_last = 1;
4713 else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame)
4714 /* 1 refreshed but not the other */
4715 cpi->alt_is_last = 0;
4716
4717 if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame)
4718 /* both refreshed */
4719 cpi->gold_is_alt = 1;
4720 else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame)
4721 /* 1 refreshed but not the other */
4722 cpi->gold_is_alt = 0;
4723
4724 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4725
4726 if (cpi->gold_is_last)
4727 cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4728
4729 if (cpi->alt_is_last)
4730 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4731
4732 if (cpi->gold_is_alt)
4733 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4734
4735
4736 if (!cpi->oxcf.error_resilient_mode)
4737 {
4738 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME))
4739 /* Update the alternate reference frame stats as appropriate. */
4740 update_alt_ref_frame_stats(cpi);
4741 else
4742 /* Update the Golden frame stats as appropriate. */
4743 update_golden_frame_stats(cpi);
4744 }
4745
4746 if (cm->frame_type == KEY_FRAME)
4747 {
4748 /* Tell the caller that the frame was coded as a key frame */
4749 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4750
4751 /* As this frame is a key frame the next defaults to an inter frame. */
4752 cm->frame_type = INTER_FRAME;
4753
4754 cpi->last_frame_percent_intra = 100;
4755 }
4756 else
4757 {
4758 *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
4759
4760 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4761 }
4762
4763 /* Clear the one shot update flags for segmentation map and mode/ref
4764 * loop filter deltas.
4765 */
4766 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4767 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4768 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4769
4770
4771 /* Dont increment frame counters if this was an altref buffer update
4772 * not a real frame
4773 */
4774 if (cm->show_frame)
4775 {
4776 cm->current_video_frame++;
4777 cpi->frames_since_key++;
4778 cpi->temporal_pattern_counter++;
4779 }
4780
4781 /* reset to normal state now that we are done. */
4782
4783
4784
4785 #if 0
4786 {
4787 char filename[512];
4788 FILE *recon_file;
4789 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4790 recon_file = fopen(filename, "wb");
4791 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4792 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4793 fclose(recon_file);
4794 }
4795 #endif
4796
4797 /* DEBUG */
4798 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4799
4800
4801 }
4802 #if !(CONFIG_REALTIME_ONLY)
Pass2Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4803 static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned char * dest_end, unsigned int *frame_flags)
4804 {
4805
4806 if (!cpi->common.refresh_alt_ref_frame)
4807 vp8_second_pass(cpi);
4808
4809 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4810 cpi->twopass.bits_left -= 8 * *size;
4811
4812 if (!cpi->common.refresh_alt_ref_frame)
4813 {
4814 double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
4815 *cpi->oxcf.two_pass_vbrmin_section / 100);
4816 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4817 }
4818 }
4819 #endif
4820
4821 /* For ARM NEON, d8-d15 are callee-saved registers, and need to be saved. */
4822 #if HAVE_NEON
4823 extern void vp8_push_neon(int64_t *store);
4824 extern void vp8_pop_neon(int64_t *store);
4825 #endif
4826
4827
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4828 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time)
4829 {
4830 #if HAVE_NEON
4831 int64_t store_reg[8];
4832 #if CONFIG_RUNTIME_CPU_DETECT
4833 VP8_COMMON *cm = &cpi->common;
4834 #endif
4835 #endif
4836 struct vpx_usec_timer timer;
4837 int res = 0;
4838
4839 #if HAVE_NEON
4840 #if CONFIG_RUNTIME_CPU_DETECT
4841 if (cm->cpu_caps & HAS_NEON)
4842 #endif
4843 {
4844 vp8_push_neon(store_reg);
4845 }
4846 #endif
4847
4848 vpx_usec_timer_start(&timer);
4849
4850 /* Reinit the lookahead buffer if the frame size changes */
4851 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height)
4852 {
4853 assert(cpi->oxcf.lag_in_frames < 2);
4854 dealloc_raw_frame_buffers(cpi);
4855 alloc_raw_frame_buffers(cpi);
4856 }
4857
4858 if(vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
4859 frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
4860 res = -1;
4861 vpx_usec_timer_mark(&timer);
4862 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4863
4864 #if HAVE_NEON
4865 #if CONFIG_RUNTIME_CPU_DETECT
4866 if (cm->cpu_caps & HAS_NEON)
4867 #endif
4868 {
4869 vp8_pop_neon(store_reg);
4870 }
4871 #endif
4872
4873 return res;
4874 }
4875
4876
frame_is_reference(const VP8_COMP * cpi)4877 static int frame_is_reference(const VP8_COMP *cpi)
4878 {
4879 const VP8_COMMON *cm = &cpi->common;
4880 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4881
4882 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame
4883 || cm->refresh_golden_frame || cm->refresh_alt_ref_frame
4884 || cm->copy_buffer_to_gf || cm->copy_buffer_to_arf
4885 || cm->refresh_entropy_probs
4886 || xd->mode_ref_lf_delta_update
4887 || xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4888 }
4889
4890
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,unsigned long * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4891 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, unsigned char *dest_end, int64_t *time_stamp, int64_t *time_end, int flush)
4892 {
4893 #if HAVE_NEON
4894 int64_t store_reg[8];
4895 #endif
4896 VP8_COMMON *cm;
4897 struct vpx_usec_timer tsctimer;
4898 struct vpx_usec_timer ticktimer;
4899 struct vpx_usec_timer cmptimer;
4900 YV12_BUFFER_CONFIG *force_src_buffer;
4901
4902 if (!cpi)
4903 return -1;
4904
4905 cm = &cpi->common;
4906
4907 if (setjmp(cpi->common.error.jmp))
4908 {
4909 cpi->common.error.setjmp = 0;
4910 return VPX_CODEC_CORRUPT_FRAME;
4911 }
4912
4913 force_src_buffer = NULL;
4914 cpi->common.error.setjmp = 1;
4915
4916 #if HAVE_NEON
4917 #if CONFIG_RUNTIME_CPU_DETECT
4918 if (cm->cpu_caps & HAS_NEON)
4919 #endif
4920 {
4921 vp8_push_neon(store_reg);
4922 }
4923 #endif
4924
4925 vpx_usec_timer_start(&cmptimer);
4926
4927 cpi->source = NULL;
4928
4929 #if !(CONFIG_REALTIME_ONLY)
4930 /* Should we code an alternate reference frame */
4931 if (cpi->oxcf.error_resilient_mode == 0 &&
4932 cpi->oxcf.play_alternate &&
4933 cpi->source_alt_ref_pending)
4934 {
4935 if ((cpi->source = vp8_lookahead_peek(cpi->lookahead,
4936 cpi->frames_till_gf_update_due,
4937 PEEK_FORWARD)))
4938 {
4939 cpi->alt_ref_source = cpi->source;
4940 if (cpi->oxcf.arnr_max_frames > 0)
4941 {
4942 vp8_temporal_filter_prepare_c(cpi,
4943 cpi->frames_till_gf_update_due);
4944 force_src_buffer = &cpi->alt_ref_buffer;
4945 }
4946 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4947 cm->refresh_alt_ref_frame = 1;
4948 cm->refresh_golden_frame = 0;
4949 cm->refresh_last_frame = 0;
4950 cm->show_frame = 0;
4951 /* Clear Pending alt Ref flag. */
4952 cpi->source_alt_ref_pending = 0;
4953 cpi->is_src_frame_alt_ref = 0;
4954 }
4955 }
4956 #endif
4957
4958 if (!cpi->source)
4959 {
4960 /* Read last frame source if we are encoding first pass. */
4961 if (cpi->pass == 1 && cm->current_video_frame > 0)
4962 {
4963 if((cpi->last_source = vp8_lookahead_peek(cpi->lookahead, 1,
4964 PEEK_BACKWARD)) == NULL)
4965 return -1;
4966 }
4967
4968
4969 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush)))
4970 {
4971 cm->show_frame = 1;
4972
4973 cpi->is_src_frame_alt_ref = cpi->alt_ref_source
4974 && (cpi->source == cpi->alt_ref_source);
4975
4976 if(cpi->is_src_frame_alt_ref)
4977 cpi->alt_ref_source = NULL;
4978 }
4979 }
4980
4981 if (cpi->source)
4982 {
4983 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4984 cpi->un_scaled_source = cpi->Source;
4985 *time_stamp = cpi->source->ts_start;
4986 *time_end = cpi->source->ts_end;
4987 *frame_flags = cpi->source->flags;
4988
4989 if (cpi->pass == 1 && cm->current_video_frame > 0)
4990 {
4991 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4992 }
4993 }
4994 else
4995 {
4996 *size = 0;
4997 #if !(CONFIG_REALTIME_ONLY)
4998
4999 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done)
5000 {
5001 vp8_end_first_pass(cpi); /* get last stats packet */
5002 cpi->twopass.first_pass_done = 1;
5003 }
5004
5005 #endif
5006
5007 #if HAVE_NEON
5008 #if CONFIG_RUNTIME_CPU_DETECT
5009 if (cm->cpu_caps & HAS_NEON)
5010 #endif
5011 {
5012 vp8_pop_neon(store_reg);
5013 }
5014 #endif
5015 return -1;
5016 }
5017
5018 if (cpi->source->ts_start < cpi->first_time_stamp_ever)
5019 {
5020 cpi->first_time_stamp_ever = cpi->source->ts_start;
5021 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
5022 }
5023
5024 /* adjust frame rates based on timestamps given */
5025 if (cm->show_frame)
5026 {
5027 int64_t this_duration;
5028 int step = 0;
5029
5030 if (cpi->source->ts_start == cpi->first_time_stamp_ever)
5031 {
5032 this_duration = cpi->source->ts_end - cpi->source->ts_start;
5033 step = 1;
5034 }
5035 else
5036 {
5037 int64_t last_duration;
5038
5039 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
5040 last_duration = cpi->last_end_time_stamp_seen
5041 - cpi->last_time_stamp_seen;
5042 /* do a step update if the duration changes by 10% */
5043 if (last_duration)
5044 step = (int)(((this_duration - last_duration) *
5045 10 / last_duration));
5046 }
5047
5048 if (this_duration)
5049 {
5050 if (step)
5051 cpi->ref_framerate = 10000000.0 / this_duration;
5052 else
5053 {
5054 double avg_duration, interval;
5055
5056 /* Average this frame's rate into the last second's average
5057 * frame rate. If we haven't seen 1 second yet, then average
5058 * over the whole interval seen.
5059 */
5060 interval = (double)(cpi->source->ts_end -
5061 cpi->first_time_stamp_ever);
5062 if(interval > 10000000.0)
5063 interval = 10000000;
5064
5065 avg_duration = 10000000.0 / cpi->ref_framerate;
5066 avg_duration *= (interval - avg_duration + this_duration);
5067 avg_duration /= interval;
5068
5069 cpi->ref_framerate = 10000000.0 / avg_duration;
5070 }
5071
5072 if (cpi->oxcf.number_of_layers > 1)
5073 {
5074 unsigned int i;
5075
5076 /* Update frame rates for each layer */
5077 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
5078 for (i=0; i<cpi->oxcf.number_of_layers; i++)
5079 {
5080 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5081 lc->framerate = cpi->ref_framerate /
5082 cpi->oxcf.rate_decimator[i];
5083 }
5084 }
5085 else
5086 vp8_new_framerate(cpi, cpi->ref_framerate);
5087 }
5088
5089 cpi->last_time_stamp_seen = cpi->source->ts_start;
5090 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5091 }
5092
5093 if (cpi->oxcf.number_of_layers > 1)
5094 {
5095 int layer;
5096
5097 update_layer_contexts (cpi);
5098
5099 /* Restore layer specific context & set frame rate */
5100 layer = cpi->oxcf.layer_id[
5101 cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5102 restore_layer_context (cpi, layer);
5103 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5104 }
5105
5106 if (cpi->compressor_speed == 2)
5107 {
5108 vpx_usec_timer_start(&tsctimer);
5109 vpx_usec_timer_start(&ticktimer);
5110 }
5111
5112 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100)/cm->MBs;
5113
5114 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5115 {
5116 int i;
5117 const int num_part = (1 << cm->multi_token_partition);
5118 /* the available bytes in dest */
5119 const unsigned long dest_size = dest_end - dest;
5120 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5121
5122 unsigned char *dp = dest;
5123
5124 cpi->partition_d[0] = dp;
5125 dp += dest_size/10; /* reserve 1/10 for control partition */
5126 cpi->partition_d_end[0] = dp;
5127
5128 for(i = 0; i < num_part; i++)
5129 {
5130 cpi->partition_d[i + 1] = dp;
5131 dp += tok_part_buff_size;
5132 cpi->partition_d_end[i + 1] = dp;
5133 }
5134 }
5135 #endif
5136
5137 /* start with a 0 size frame */
5138 *size = 0;
5139
5140 /* Clear down mmx registers */
5141 vp8_clear_system_state();
5142
5143 cm->frame_type = INTER_FRAME;
5144 cm->frame_flags = *frame_flags;
5145
5146 #if 0
5147
5148 if (cm->refresh_alt_ref_frame)
5149 {
5150 cm->refresh_golden_frame = 0;
5151 cm->refresh_last_frame = 0;
5152 }
5153 else
5154 {
5155 cm->refresh_golden_frame = 0;
5156 cm->refresh_last_frame = 1;
5157 }
5158
5159 #endif
5160 /* find a free buffer for the new frame */
5161 {
5162 int i = 0;
5163 for(; i < NUM_YV12_BUFFERS; i++)
5164 {
5165 if(!cm->yv12_fb[i].flags)
5166 {
5167 cm->new_fb_idx = i;
5168 break;
5169 }
5170 }
5171
5172 assert(i < NUM_YV12_BUFFERS );
5173 }
5174 #if !(CONFIG_REALTIME_ONLY)
5175
5176 if (cpi->pass == 1)
5177 {
5178 Pass1Encode(cpi, size, dest, frame_flags);
5179 }
5180 else if (cpi->pass == 2)
5181 {
5182 Pass2Encode(cpi, size, dest, dest_end, frame_flags);
5183 }
5184 else
5185 #endif
5186 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5187
5188 if (cpi->compressor_speed == 2)
5189 {
5190 unsigned int duration, duration2;
5191 vpx_usec_timer_mark(&tsctimer);
5192 vpx_usec_timer_mark(&ticktimer);
5193
5194 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5195 duration2 = (unsigned int)((double)duration / 2);
5196
5197 if (cm->frame_type != KEY_FRAME)
5198 {
5199 if (cpi->avg_encode_time == 0)
5200 cpi->avg_encode_time = duration;
5201 else
5202 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5203 }
5204
5205 if (duration2)
5206 {
5207 {
5208
5209 if (cpi->avg_pick_mode_time == 0)
5210 cpi->avg_pick_mode_time = duration2;
5211 else
5212 cpi->avg_pick_mode_time = (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5213 }
5214 }
5215
5216 }
5217
5218 if (cm->refresh_entropy_probs == 0)
5219 {
5220 vpx_memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5221 }
5222
5223 /* Save the contexts separately for alt ref, gold and last. */
5224 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5225 if(cm->refresh_alt_ref_frame)
5226 vpx_memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5227
5228 if(cm->refresh_golden_frame)
5229 vpx_memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5230
5231 if(cm->refresh_last_frame)
5232 vpx_memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5233
5234 /* if its a dropped frame honor the requests on subsequent frames */
5235 if (*size > 0)
5236 {
5237 cpi->droppable = !frame_is_reference(cpi);
5238
5239 /* return to normal state */
5240 cm->refresh_entropy_probs = 1;
5241 cm->refresh_alt_ref_frame = 0;
5242 cm->refresh_golden_frame = 0;
5243 cm->refresh_last_frame = 1;
5244 cm->frame_type = INTER_FRAME;
5245
5246 }
5247
5248 /* Save layer specific state */
5249 if (cpi->oxcf.number_of_layers > 1)
5250 save_layer_context (cpi);
5251
5252 vpx_usec_timer_mark(&cmptimer);
5253 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5254
5255 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame)
5256 {
5257 generate_psnr_packet(cpi);
5258 }
5259
5260 #if CONFIG_INTERNAL_STATS
5261
5262 if (cpi->pass != 1)
5263 {
5264 cpi->bytes += *size;
5265
5266 if (cm->show_frame)
5267 {
5268 cpi->common.show_frame_mi = cpi->common.mi;
5269 cpi->count ++;
5270
5271 if (cpi->b_calculate_psnr)
5272 {
5273 uint64_t ye,ue,ve;
5274 double frame_psnr;
5275 YV12_BUFFER_CONFIG *orig = cpi->Source;
5276 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5277 int y_samples = orig->y_height * orig->y_width ;
5278 int uv_samples = orig->uv_height * orig->uv_width ;
5279 int t_samples = y_samples + 2 * uv_samples;
5280 double sq_error, sq_error2;
5281
5282 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5283 recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height);
5284
5285 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5286 recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height);
5287
5288 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5289 recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height);
5290
5291 sq_error = (double)(ye + ue + ve);
5292
5293 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5294
5295 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5296 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5297 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5298 cpi->total_sq_error += sq_error;
5299 cpi->total += frame_psnr;
5300 #if CONFIG_POSTPROC
5301 {
5302 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5303 double frame_psnr2, frame_ssim2 = 0;
5304 double weight = 0;
5305
5306 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0);
5307 vp8_clear_system_state();
5308
5309 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5310 pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height);
5311
5312 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5313 pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height);
5314
5315 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5316 pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height);
5317
5318 sq_error2 = (double)(ye + ue + ve);
5319
5320 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5321
5322 cpi->totalp_y += vpx_sse_to_psnr(y_samples,
5323 255.0, (double)ye);
5324 cpi->totalp_u += vpx_sse_to_psnr(uv_samples,
5325 255.0, (double)ue);
5326 cpi->totalp_v += vpx_sse_to_psnr(uv_samples,
5327 255.0, (double)ve);
5328 cpi->total_sq_error2 += sq_error2;
5329 cpi->totalp += frame_psnr2;
5330
5331 frame_ssim2 = vp8_calc_ssim(cpi->Source,
5332 &cm->post_proc_buffer, 1, &weight);
5333
5334 cpi->summed_quality += frame_ssim2 * weight;
5335 cpi->summed_weights += weight;
5336
5337 if (cpi->oxcf.number_of_layers > 1)
5338 {
5339 unsigned int i;
5340
5341 for (i=cpi->current_layer;
5342 i<cpi->oxcf.number_of_layers; i++)
5343 {
5344 cpi->frames_in_layer[i]++;
5345
5346 cpi->bytes_in_layer[i] += *size;
5347 cpi->sum_psnr[i] += frame_psnr;
5348 cpi->sum_psnr_p[i] += frame_psnr2;
5349 cpi->total_error2[i] += sq_error;
5350 cpi->total_error2_p[i] += sq_error2;
5351 cpi->sum_ssim[i] += frame_ssim2 * weight;
5352 cpi->sum_weights[i] += weight;
5353 }
5354 }
5355 }
5356 #endif
5357 }
5358
5359 if (cpi->b_calculate_ssimg)
5360 {
5361 double y, u, v, frame_all;
5362 frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
5363 &y, &u, &v);
5364
5365 if (cpi->oxcf.number_of_layers > 1)
5366 {
5367 unsigned int i;
5368
5369 for (i=cpi->current_layer;
5370 i<cpi->oxcf.number_of_layers; i++)
5371 {
5372 if (!cpi->b_calculate_psnr)
5373 cpi->frames_in_layer[i]++;
5374
5375 cpi->total_ssimg_y_in_layer[i] += y;
5376 cpi->total_ssimg_u_in_layer[i] += u;
5377 cpi->total_ssimg_v_in_layer[i] += v;
5378 cpi->total_ssimg_all_in_layer[i] += frame_all;
5379 }
5380 }
5381 else
5382 {
5383 cpi->total_ssimg_y += y;
5384 cpi->total_ssimg_u += u;
5385 cpi->total_ssimg_v += v;
5386 cpi->total_ssimg_all += frame_all;
5387 }
5388 }
5389
5390 }
5391 }
5392
5393 #if 0
5394
5395 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5396 {
5397 skiptruecount += cpi->skip_true_count;
5398 skipfalsecount += cpi->skip_false_count;
5399 }
5400
5401 #endif
5402 #if 0
5403
5404 if (cpi->pass != 1)
5405 {
5406 FILE *f = fopen("skip.stt", "a");
5407 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5408
5409 if (cpi->is_src_frame_alt_ref == 1)
5410 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5411
5412 fclose(f);
5413 }
5414
5415 #endif
5416 #endif
5417
5418 #if HAVE_NEON
5419 #if CONFIG_RUNTIME_CPU_DETECT
5420 if (cm->cpu_caps & HAS_NEON)
5421 #endif
5422 {
5423 vp8_pop_neon(store_reg);
5424 }
5425 #endif
5426
5427 cpi->common.error.setjmp = 0;
5428
5429 return 0;
5430 }
5431
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5432 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags)
5433 {
5434 if (cpi->common.refresh_alt_ref_frame)
5435 return -1;
5436 else
5437 {
5438 int ret;
5439
5440 #if CONFIG_MULTITHREAD
5441 if(cpi->b_lpf_running)
5442 {
5443 sem_wait(&cpi->h_event_end_lpf);
5444 cpi->b_lpf_running = 0;
5445 }
5446 #endif
5447
5448 #if CONFIG_POSTPROC
5449 cpi->common.show_frame_mi = cpi->common.mi;
5450 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5451 #else
5452
5453 if (cpi->common.frame_to_show)
5454 {
5455 *dest = *cpi->common.frame_to_show;
5456 dest->y_width = cpi->common.Width;
5457 dest->y_height = cpi->common.Height;
5458 dest->uv_height = cpi->common.Height / 2;
5459 ret = 0;
5460 }
5461 else
5462 {
5463 ret = -1;
5464 }
5465
5466 #endif
5467 vp8_clear_system_state();
5468 return ret;
5469 }
5470 }
5471
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5472 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4])
5473 {
5474 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5475 int internal_delta_q[MAX_MB_SEGMENTS];
5476 const int range = 63;
5477 int i;
5478
5479 // This method is currently incompatible with the cyclic refresh method
5480 if ( cpi->cyclic_refresh_mode_enabled )
5481 return -1;
5482
5483 // Check number of rows and columns match
5484 if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols)
5485 return -1;
5486
5487 // Range check the delta Q values and convert the external Q range values
5488 // to internal ones.
5489 if ( (abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5490 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range) )
5491 return -1;
5492
5493 // Range check the delta lf values
5494 if ( (abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5495 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range) )
5496 return -1;
5497
5498 if (!map)
5499 {
5500 disable_segmentation(cpi);
5501 return 0;
5502 }
5503
5504 // Translate the external delta q values to internal values.
5505 for ( i = 0; i < MAX_MB_SEGMENTS; i++ )
5506 internal_delta_q[i] =
5507 ( delta_q[i] >= 0 ) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5508
5509 /* Set the segmentation Map */
5510 set_segmentation_map(cpi, map);
5511
5512 /* Activate segmentation. */
5513 enable_segmentation(cpi);
5514
5515 /* Set up the quant segment data */
5516 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5517 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5518 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5519 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5520
5521 /* Set up the loop segment data s */
5522 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5523 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5524 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5525 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5526
5527 cpi->segment_encode_breakout[0] = threshold[0];
5528 cpi->segment_encode_breakout[1] = threshold[1];
5529 cpi->segment_encode_breakout[2] = threshold[2];
5530 cpi->segment_encode_breakout[3] = threshold[3];
5531
5532 /* Initialise the feature data structure */
5533 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5534
5535 return 0;
5536 }
5537
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5538 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols)
5539 {
5540 if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols)
5541 {
5542 if (map)
5543 {
5544 vpx_memcpy(cpi->active_map, map, rows * cols);
5545 cpi->active_map_enabled = 1;
5546 }
5547 else
5548 cpi->active_map_enabled = 0;
5549
5550 return 0;
5551 }
5552 else
5553 {
5554 return -1 ;
5555 }
5556 }
5557
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5558 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode, VPX_SCALING vert_mode)
5559 {
5560 if (horiz_mode <= ONETWO)
5561 cpi->common.horiz_scale = horiz_mode;
5562 else
5563 return -1;
5564
5565 if (vert_mode <= ONETWO)
5566 cpi->common.vert_scale = vert_mode;
5567 else
5568 return -1;
5569
5570 return 0;
5571 }
5572
5573
5574
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5575 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest)
5576 {
5577 int i, j;
5578 int Total = 0;
5579
5580 unsigned char *src = source->y_buffer;
5581 unsigned char *dst = dest->y_buffer;
5582
5583 /* Loop through the Y plane raw and reconstruction data summing
5584 * (square differences)
5585 */
5586 for (i = 0; i < source->y_height; i += 16)
5587 {
5588 for (j = 0; j < source->y_width; j += 16)
5589 {
5590 unsigned int sse;
5591 Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
5592 }
5593
5594 src += 16 * source->y_stride;
5595 dst += 16 * dest->y_stride;
5596 }
5597
5598 return Total;
5599 }
5600
5601
vp8_get_quantizer(VP8_COMP * cpi)5602 int vp8_get_quantizer(VP8_COMP *cpi)
5603 {
5604 return cpi->common.base_qindex;
5605 }
5606