1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_config.h"
13 #include "./vpx_scale_rtcd.h"
14 #include "vp8/common/onyxc_int.h"
15 #include "vp8/common/blockd.h"
16 #include "onyx_int.h"
17 #include "vp8/common/systemdependent.h"
18 #include "quantize.h"
19 #include "vp8/common/alloccommon.h"
20 #include "mcomp.h"
21 #include "firstpass.h"
22 #include "vpx/internal/vpx_psnr.h"
23 #include "vpx_scale/vpx_scale.h"
24 #include "vp8/common/extend.h"
25 #include "ratectrl.h"
26 #include "vp8/common/quant_common.h"
27 #include "segmentation.h"
28 #if CONFIG_POSTPROC
29 #include "vp8/common/postproc.h"
30 #endif
31 #include "vpx_mem/vpx_mem.h"
32 #include "vp8/common/swapyv12buffer.h"
33 #include "vp8/common/threading.h"
34 #include "vpx_ports/vpx_timer.h"
35 #if ARCH_ARM
36 #include "vpx_ports/arm.h"
37 #endif
38 #if CONFIG_MULTI_RES_ENCODING
39 #include "mr_dissim.h"
40 #endif
41 #include "encodeframe.h"
42
43 #include <math.h>
44 #include <stdio.h>
45 #include <limits.h>
46
47 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
48 extern int vp8_update_coef_context(VP8_COMP *cpi);
49 extern void vp8_update_coef_probs(VP8_COMP *cpi);
50 #endif
51
52 extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
53 extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val);
54 extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi);
55
56 extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag);
57 extern void print_parms(VP8_CONFIG *ocf, char *filenam);
58 extern unsigned int vp8_get_processor_freq();
59 extern void print_tree_update_probs();
60 extern int vp8cx_create_encoder_threads(VP8_COMP *cpi);
61 extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi);
62
63 int vp8_estimate_entropy_savings(VP8_COMP *cpi);
64
65 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
66
67 extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
68
69 static void set_default_lf_deltas(VP8_COMP *cpi);
70
71 extern const int vp8_gf_interval_table[101];
72
73 #if CONFIG_INTERNAL_STATS
74 #include "math.h"
75
76 extern double vp8_calc_ssim
77 (
78 YV12_BUFFER_CONFIG *source,
79 YV12_BUFFER_CONFIG *dest,
80 int lumamask,
81 double *weight
82 );
83
84
85 extern double vp8_calc_ssimg
86 (
87 YV12_BUFFER_CONFIG *source,
88 YV12_BUFFER_CONFIG *dest,
89 double *ssim_y,
90 double *ssim_u,
91 double *ssim_v
92 );
93
94
95 #endif
96
97
98 #ifdef OUTPUT_YUV_SRC
99 FILE *yuv_file;
100 #endif
101 #ifdef OUTPUT_YUV_DENOISED
102 FILE *yuv_denoised_file;
103 #endif
104
105 #if 0
106 FILE *framepsnr;
107 FILE *kf_list;
108 FILE *keyfile;
109 #endif
110
111 #if 0
112 extern int skip_true_count;
113 extern int skip_false_count;
114 #endif
115
116
117 #ifdef VP8_ENTROPY_STATS
118 extern int intra_mode_stats[10][10][10];
119 #endif
120
121 #ifdef SPEEDSTATS
122 unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
123 unsigned int tot_pm = 0;
124 unsigned int cnt_pm = 0;
125 unsigned int tot_ef = 0;
126 unsigned int cnt_ef = 0;
127 #endif
128
129 #ifdef MODE_STATS
130 extern unsigned __int64 Sectionbits[50];
131 extern int y_modes[5] ;
132 extern int uv_modes[4] ;
133 extern int b_modes[10] ;
134
135 extern int inter_y_modes[10] ;
136 extern int inter_uv_modes[4] ;
137 extern unsigned int inter_b_modes[15];
138 #endif
139
140 extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
141
142 extern const int qrounding_factors[129];
143 extern const int qzbin_factors[129];
144 extern void vp8cx_init_quantizer(VP8_COMP *cpi);
145 extern const int vp8cx_base_skip_false_prob[128];
146
147 /* Tables relating active max Q to active min Q */
148 static const unsigned char kf_low_motion_minq[QINDEX_RANGE] =
149 {
150 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
151 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
152 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
153 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
154 3,3,3,3,3,3,4,4,4,5,5,5,5,5,6,6,
155 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
156 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
157 16,16,17,17,18,18,18,18,19,20,20,21,21,22,23,23
158 };
159 static const unsigned char kf_high_motion_minq[QINDEX_RANGE] =
160 {
161 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
162 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
163 1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3,
164 3,3,3,3,4,4,4,4,5,5,5,5,5,5,6,6,
165 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11,
166 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16,
167 16,16,17,17,18,18,18,18,19,19,20,20,20,20,21,21,
168 21,21,22,22,23,23,24,25,25,26,26,27,28,28,29,30
169 };
170 static const unsigned char gf_low_motion_minq[QINDEX_RANGE] =
171 {
172 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,
173 3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6,
174 7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10,
175 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18,
176 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,
177 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34,
178 35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42,
179 43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58
180 };
181 static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] =
182 {
183 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4,
184 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
185 9,10,10,10,10,11,11,11,12,12,12,12,13,13,13,14,
186 14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21,
187 22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,
188 30,30,31,31,32,32,33,33,34,34,35,35,36,36,37,37,
189 38,39,39,40,40,41,41,42,42,43,43,44,45,46,47,48,
190 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64
191 };
192 static const unsigned char gf_high_motion_minq[QINDEX_RANGE] =
193 {
194 0,0,0,0,1,1,1,1,1,2,2,2,3,3,3,4,
195 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9,
196 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16,
197 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,
198 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32,
199 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40,
200 41,41,42,42,43,44,45,46,47,48,49,50,51,52,53,54,
201 55,56,57,58,59,60,62,64,66,68,70,72,74,76,78,80
202 };
203 static const unsigned char inter_minq[QINDEX_RANGE] =
204 {
205 0,0,1,1,2,3,3,4,4,5,6,6,7,8,8,9,
206 9,10,11,11,12,13,13,14,15,15,16,17,17,18,19,20,
207 20,21,22,22,23,24,24,25,26,27,27,28,29,30,30,31,
208 32,33,33,34,35,36,36,37,38,39,39,40,41,42,42,43,
209 44,45,46,46,47,48,49,50,50,51,52,53,54,55,55,56,
210 57,58,59,60,60,61,62,63,64,65,66,67,67,68,69,70,
211 71,72,73,74,75,75,76,77,78,79,80,81,82,83,84,85,
212 86,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100
213 };
214
215 #ifdef PACKET_TESTING
216 extern FILE *vpxlogc;
217 #endif
218
save_layer_context(VP8_COMP * cpi)219 static void save_layer_context(VP8_COMP *cpi)
220 {
221 LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer];
222
223 /* Save layer dependent coding state */
224 lc->target_bandwidth = cpi->target_bandwidth;
225 lc->starting_buffer_level = cpi->oxcf.starting_buffer_level;
226 lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level;
227 lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size;
228 lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms;
229 lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms;
230 lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms;
231 lc->buffer_level = cpi->buffer_level;
232 lc->bits_off_target = cpi->bits_off_target;
233 lc->total_actual_bits = cpi->total_actual_bits;
234 lc->worst_quality = cpi->worst_quality;
235 lc->active_worst_quality = cpi->active_worst_quality;
236 lc->best_quality = cpi->best_quality;
237 lc->active_best_quality = cpi->active_best_quality;
238 lc->ni_av_qi = cpi->ni_av_qi;
239 lc->ni_tot_qi = cpi->ni_tot_qi;
240 lc->ni_frames = cpi->ni_frames;
241 lc->avg_frame_qindex = cpi->avg_frame_qindex;
242 lc->rate_correction_factor = cpi->rate_correction_factor;
243 lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor;
244 lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor;
245 lc->zbin_over_quant = cpi->mb.zbin_over_quant;
246 lc->inter_frame_target = cpi->inter_frame_target;
247 lc->total_byte_count = cpi->total_byte_count;
248 lc->filter_level = cpi->common.filter_level;
249
250 lc->last_frame_percent_intra = cpi->last_frame_percent_intra;
251
252 memcpy (lc->count_mb_ref_frame_usage,
253 cpi->mb.count_mb_ref_frame_usage,
254 sizeof(cpi->mb.count_mb_ref_frame_usage));
255 }
256
restore_layer_context(VP8_COMP * cpi,const int layer)257 static void restore_layer_context(VP8_COMP *cpi, const int layer)
258 {
259 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
260
261 /* Restore layer dependent coding state */
262 cpi->current_layer = layer;
263 cpi->target_bandwidth = lc->target_bandwidth;
264 cpi->oxcf.target_bandwidth = lc->target_bandwidth;
265 cpi->oxcf.starting_buffer_level = lc->starting_buffer_level;
266 cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level;
267 cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size;
268 cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms;
269 cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms;
270 cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms;
271 cpi->buffer_level = lc->buffer_level;
272 cpi->bits_off_target = lc->bits_off_target;
273 cpi->total_actual_bits = lc->total_actual_bits;
274 cpi->active_worst_quality = lc->active_worst_quality;
275 cpi->active_best_quality = lc->active_best_quality;
276 cpi->ni_av_qi = lc->ni_av_qi;
277 cpi->ni_tot_qi = lc->ni_tot_qi;
278 cpi->ni_frames = lc->ni_frames;
279 cpi->avg_frame_qindex = lc->avg_frame_qindex;
280 cpi->rate_correction_factor = lc->rate_correction_factor;
281 cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor;
282 cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor;
283 cpi->mb.zbin_over_quant = lc->zbin_over_quant;
284 cpi->inter_frame_target = lc->inter_frame_target;
285 cpi->total_byte_count = lc->total_byte_count;
286 cpi->common.filter_level = lc->filter_level;
287
288 cpi->last_frame_percent_intra = lc->last_frame_percent_intra;
289
290 memcpy (cpi->mb.count_mb_ref_frame_usage,
291 lc->count_mb_ref_frame_usage,
292 sizeof(cpi->mb.count_mb_ref_frame_usage));
293 }
294
rescale(int val,int num,int denom)295 static int rescale(int val, int num, int denom)
296 {
297 int64_t llnum = num;
298 int64_t llden = denom;
299 int64_t llval = val;
300
301 return (int)(llval * llnum / llden);
302 }
303
init_temporal_layer_context(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int layer,double prev_layer_framerate)304 static void init_temporal_layer_context(VP8_COMP *cpi,
305 VP8_CONFIG *oxcf,
306 const int layer,
307 double prev_layer_framerate)
308 {
309 LAYER_CONTEXT *lc = &cpi->layer_context[layer];
310
311 lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer];
312 lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000;
313
314 lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level;
315 lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level;
316 lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size;
317
318 lc->starting_buffer_level =
319 rescale((int)(oxcf->starting_buffer_level),
320 lc->target_bandwidth, 1000);
321
322 if (oxcf->optimal_buffer_level == 0)
323 lc->optimal_buffer_level = lc->target_bandwidth / 8;
324 else
325 lc->optimal_buffer_level =
326 rescale((int)(oxcf->optimal_buffer_level),
327 lc->target_bandwidth, 1000);
328
329 if (oxcf->maximum_buffer_size == 0)
330 lc->maximum_buffer_size = lc->target_bandwidth / 8;
331 else
332 lc->maximum_buffer_size =
333 rescale((int)(oxcf->maximum_buffer_size),
334 lc->target_bandwidth, 1000);
335
336 /* Work out the average size of a frame within this layer */
337 if (layer > 0)
338 lc->avg_frame_size_for_layer =
339 (int)((cpi->oxcf.target_bitrate[layer] -
340 cpi->oxcf.target_bitrate[layer-1]) * 1000 /
341 (lc->framerate - prev_layer_framerate));
342
343 lc->active_worst_quality = cpi->oxcf.worst_allowed_q;
344 lc->active_best_quality = cpi->oxcf.best_allowed_q;
345 lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
346
347 lc->buffer_level = lc->starting_buffer_level;
348 lc->bits_off_target = lc->starting_buffer_level;
349
350 lc->total_actual_bits = 0;
351 lc->ni_av_qi = 0;
352 lc->ni_tot_qi = 0;
353 lc->ni_frames = 0;
354 lc->rate_correction_factor = 1.0;
355 lc->key_frame_rate_correction_factor = 1.0;
356 lc->gf_rate_correction_factor = 1.0;
357 lc->inter_frame_target = 0;
358 }
359
360 // Upon a run-time change in temporal layers, reset the layer context parameters
361 // for any "new" layers. For "existing" layers, let them inherit the parameters
362 // from the previous layer state (at the same layer #). In future we may want
363 // to better map the previous layer state(s) to the "new" ones.
reset_temporal_layer_change(VP8_COMP * cpi,VP8_CONFIG * oxcf,const int prev_num_layers)364 static void reset_temporal_layer_change(VP8_COMP *cpi,
365 VP8_CONFIG *oxcf,
366 const int prev_num_layers)
367 {
368 int i;
369 double prev_layer_framerate = 0;
370 const int curr_num_layers = cpi->oxcf.number_of_layers;
371 // If the previous state was 1 layer, get current layer context from cpi.
372 // We need this to set the layer context for the new layers below.
373 if (prev_num_layers == 1)
374 {
375 cpi->current_layer = 0;
376 save_layer_context(cpi);
377 }
378 for (i = 0; i < curr_num_layers; i++)
379 {
380 LAYER_CONTEXT *lc = &cpi->layer_context[i];
381 if (i >= prev_num_layers)
382 {
383 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
384 }
385 // The initial buffer levels are set based on their starting levels.
386 // We could set the buffer levels based on the previous state (normalized
387 // properly by the layer bandwidths) but we would need to keep track of
388 // the previous set of layer bandwidths (i.e., target_bitrate[i])
389 // before the layer change. For now, reset to the starting levels.
390 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
391 cpi->oxcf.target_bitrate[i];
392 lc->bits_off_target = lc->buffer_level;
393 // TDOD(marpan): Should we set the rate_correction_factor and
394 // active_worst/best_quality to values derived from the previous layer
395 // state (to smooth-out quality dips/rate fluctuation at transition)?
396
397 // We need to treat the 1 layer case separately: oxcf.target_bitrate[i]
398 // is not set for 1 layer, and the restore_layer_context/save_context()
399 // are not called in the encoding loop, so we need to call it here to
400 // pass the layer context state to |cpi|.
401 if (curr_num_layers == 1)
402 {
403 lc->target_bandwidth = cpi->oxcf.target_bandwidth;
404 lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms *
405 lc->target_bandwidth / 1000;
406 lc->bits_off_target = lc->buffer_level;
407 restore_layer_context(cpi, 0);
408 }
409 prev_layer_framerate = cpi->output_framerate /
410 cpi->oxcf.rate_decimator[i];
411 }
412 }
413
setup_features(VP8_COMP * cpi)414 static void setup_features(VP8_COMP *cpi)
415 {
416 // If segmentation enabled set the update flags
417 if ( cpi->mb.e_mbd.segmentation_enabled )
418 {
419 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
420 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
421 }
422 else
423 {
424 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
425 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
426 }
427
428 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0;
429 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
430 vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
431 vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
432 vpx_memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
433 vpx_memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
434
435 set_default_lf_deltas(cpi);
436
437 }
438
439
440 static void dealloc_raw_frame_buffers(VP8_COMP *cpi);
441
442
dealloc_compressor_data(VP8_COMP * cpi)443 static void dealloc_compressor_data(VP8_COMP *cpi)
444 {
445 vpx_free(cpi->tplist);
446 cpi->tplist = NULL;
447
448 /* Delete last frame MV storage buffers */
449 vpx_free(cpi->lfmv);
450 cpi->lfmv = 0;
451
452 vpx_free(cpi->lf_ref_frame_sign_bias);
453 cpi->lf_ref_frame_sign_bias = 0;
454
455 vpx_free(cpi->lf_ref_frame);
456 cpi->lf_ref_frame = 0;
457
458 /* Delete sementation map */
459 vpx_free(cpi->segmentation_map);
460 cpi->segmentation_map = 0;
461
462 vpx_free(cpi->active_map);
463 cpi->active_map = 0;
464
465 vp8_de_alloc_frame_buffers(&cpi->common);
466
467 vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame);
468 vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source);
469 dealloc_raw_frame_buffers(cpi);
470
471 vpx_free(cpi->tok);
472 cpi->tok = 0;
473
474 /* Structure used to monitor GF usage */
475 vpx_free(cpi->gf_active_flags);
476 cpi->gf_active_flags = 0;
477
478 /* Activity mask based per mb zbin adjustments */
479 vpx_free(cpi->mb_activity_map);
480 cpi->mb_activity_map = 0;
481
482 vpx_free(cpi->mb.pip);
483 cpi->mb.pip = 0;
484
485 #if CONFIG_MULTITHREAD
486 vpx_free(cpi->mt_current_mb_col);
487 cpi->mt_current_mb_col = NULL;
488 #endif
489 }
490
enable_segmentation(VP8_COMP * cpi)491 static void enable_segmentation(VP8_COMP *cpi)
492 {
493 /* Set the appropriate feature bit */
494 cpi->mb.e_mbd.segmentation_enabled = 1;
495 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
496 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
497 }
disable_segmentation(VP8_COMP * cpi)498 static void disable_segmentation(VP8_COMP *cpi)
499 {
500 /* Clear the appropriate feature bit */
501 cpi->mb.e_mbd.segmentation_enabled = 0;
502 }
503
504 /* Valid values for a segment are 0 to 3
505 * Segmentation map is arrange as [Rows][Columns]
506 */
set_segmentation_map(VP8_COMP * cpi,unsigned char * segmentation_map)507 static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map)
508 {
509 /* Copy in the new segmentation map */
510 vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
511
512 /* Signal that the map should be updated. */
513 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
514 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
515 }
516
517 /* The values given for each segment can be either deltas (from the default
518 * value chosen for the frame) or absolute values.
519 *
520 * Valid range for abs values is:
521 * (0-127 for MB_LVL_ALT_Q), (0-63 for SEGMENT_ALT_LF)
522 * Valid range for delta values are:
523 * (+/-127 for MB_LVL_ALT_Q), (+/-63 for SEGMENT_ALT_LF)
524 *
525 * abs_delta = SEGMENT_DELTADATA (deltas)
526 * abs_delta = SEGMENT_ABSDATA (use the absolute values given).
527 *
528 */
set_segment_data(VP8_COMP * cpi,signed char * feature_data,unsigned char abs_delta)529 static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta)
530 {
531 cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
532 vpx_memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data));
533 }
534
535
segmentation_test_function(VP8_COMP * cpi)536 static void segmentation_test_function(VP8_COMP *cpi)
537 {
538 unsigned char *seg_map;
539 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
540
541 // Create a temporary map for segmentation data.
542 CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
543
544 // Set the segmentation Map
545 set_segmentation_map(cpi, seg_map);
546
547 // Activate segmentation.
548 enable_segmentation(cpi);
549
550 // Set up the quant segment data
551 feature_data[MB_LVL_ALT_Q][0] = 0;
552 feature_data[MB_LVL_ALT_Q][1] = 4;
553 feature_data[MB_LVL_ALT_Q][2] = 0;
554 feature_data[MB_LVL_ALT_Q][3] = 0;
555 // Set up the loop segment data
556 feature_data[MB_LVL_ALT_LF][0] = 0;
557 feature_data[MB_LVL_ALT_LF][1] = 0;
558 feature_data[MB_LVL_ALT_LF][2] = 0;
559 feature_data[MB_LVL_ALT_LF][3] = 0;
560
561 // Initialise the feature data structure
562 // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
563 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
564
565 // Delete sementation map
566 vpx_free(seg_map);
567
568 seg_map = 0;
569 }
570
571 /* A simple function to cyclically refresh the background at a lower Q */
cyclic_background_refresh(VP8_COMP * cpi,int Q,int lf_adjustment)572 static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
573 {
574 unsigned char *seg_map = cpi->segmentation_map;
575 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
576 int i;
577 int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
578 int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
579
580 cpi->cyclic_refresh_q = Q / 2;
581
582 // Set every macroblock to be eligible for update.
583 // For key frame this will reset seg map to 0.
584 vpx_memset(cpi->segmentation_map, 0, mbs_in_frame);
585
586 if (cpi->common.frame_type != KEY_FRAME)
587 {
588 /* Cycle through the macro_block rows */
589 /* MB loop to set local segmentation map */
590 i = cpi->cyclic_refresh_mode_index;
591 assert(i < mbs_in_frame);
592 do
593 {
594 /* If the MB is as a candidate for clean up then mark it for
595 * possible boost/refresh (segment 1) The segment id may get
596 * reset to 0 later if the MB gets coded anything other than
597 * last frame 0,0 as only (last frame 0,0) MBs are eligable for
598 * refresh : that is to say Mbs likely to be background blocks.
599 */
600 if (cpi->cyclic_refresh_map[i] == 0)
601 {
602 seg_map[i] = 1;
603 block_count --;
604 }
605 else if (cpi->cyclic_refresh_map[i] < 0)
606 cpi->cyclic_refresh_map[i]++;
607
608 i++;
609 if (i == mbs_in_frame)
610 i = 0;
611
612 }
613 while(block_count && i != cpi->cyclic_refresh_mode_index);
614
615 cpi->cyclic_refresh_mode_index = i;
616
617 #if CONFIG_TEMPORAL_DENOISING
618 if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive &&
619 Q < (int)cpi->denoiser.denoise_pars.qp_thresh) {
620 // Under aggressive denoising mode, use segmentation to turn off loop
621 // filter below some qp thresh. The loop filter is turned off for all
622 // blocks that have been encoded as ZEROMV LAST x frames in a row,
623 // where x is set by cpi->denoiser.denoise_pars.consec_zerolast.
624 // This is to avoid "dot" artifacts that can occur from repeated
625 // loop filtering on noisy input source.
626 cpi->cyclic_refresh_q = Q;
627 lf_adjustment = -MAX_LOOP_FILTER;
628 for (i = 0; i < mbs_in_frame; ++i) {
629 seg_map[i] = (cpi->consec_zero_last[i] >
630 cpi->denoiser.denoise_pars.consec_zerolast) ? 1 : 0;
631 }
632 }
633 #endif
634 }
635
636 /* Activate segmentation. */
637 cpi->mb.e_mbd.update_mb_segmentation_map = 1;
638 cpi->mb.e_mbd.update_mb_segmentation_data = 1;
639 enable_segmentation(cpi);
640
641 /* Set up the quant segment data */
642 feature_data[MB_LVL_ALT_Q][0] = 0;
643 feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q);
644 feature_data[MB_LVL_ALT_Q][2] = 0;
645 feature_data[MB_LVL_ALT_Q][3] = 0;
646
647 /* Set up the loop segment data */
648 feature_data[MB_LVL_ALT_LF][0] = 0;
649 feature_data[MB_LVL_ALT_LF][1] = lf_adjustment;
650 feature_data[MB_LVL_ALT_LF][2] = 0;
651 feature_data[MB_LVL_ALT_LF][3] = 0;
652
653 /* Initialise the feature data structure */
654 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
655
656 }
657
set_default_lf_deltas(VP8_COMP * cpi)658 static void set_default_lf_deltas(VP8_COMP *cpi)
659 {
660 cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1;
661 cpi->mb.e_mbd.mode_ref_lf_delta_update = 1;
662
663 vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas));
664 vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas));
665
666 /* Test of ref frame deltas */
667 cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2;
668 cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0;
669 cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2;
670 cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2;
671
672 cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */
673
674 if(cpi->oxcf.Mode == MODE_REALTIME)
675 cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */
676 else
677 cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */
678
679 cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */
680 cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */
681 }
682
683 /* Convenience macros for mapping speed and mode into a continuous
684 * range
685 */
686 #define GOOD(x) (x+1)
687 #define RT(x) (x+7)
688
speed_map(int speed,const int * map)689 static int speed_map(int speed, const int *map)
690 {
691 int res;
692
693 do
694 {
695 res = *map++;
696 } while(speed >= *map++);
697 return res;
698 }
699
700 static const int thresh_mult_map_znn[] = {
701 /* map common to zero, nearest, and near */
702 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX
703 };
704
705 static const int thresh_mult_map_vhpred[] = {
706 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(1), 2000,
707 RT(7), INT_MAX, INT_MAX
708 };
709
710 static const int thresh_mult_map_bpred[] = {
711 2000, GOOD(0), 2500, GOOD(2), 5000, GOOD(3), 7500, RT(0), 2500, RT(1), 5000,
712 RT(6), INT_MAX, INT_MAX
713 };
714
715 static const int thresh_mult_map_tm[] = {
716 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 0, RT(1), 1000, RT(2), 2000,
717 RT(7), INT_MAX, INT_MAX
718 };
719
720 static const int thresh_mult_map_new1[] = {
721 1000, GOOD(2), 2000, RT(0), 2000, INT_MAX
722 };
723
724 static const int thresh_mult_map_new2[] = {
725 1000, GOOD(2), 2000, GOOD(3), 2500, GOOD(5), 4000, RT(0), 2000, RT(2), 2500,
726 RT(5), 4000, INT_MAX
727 };
728
729 static const int thresh_mult_map_split1[] = {
730 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX,
731 RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX
732 };
733
734 static const int thresh_mult_map_split2[] = {
735 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX,
736 RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX
737 };
738
739 static const int mode_check_freq_map_zn2[] = {
740 /* {zero,nearest}{2,3} */
741 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
742 };
743
744 static const int mode_check_freq_map_vhbpred[] = {
745 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX
746 };
747
748 static const int mode_check_freq_map_near2[] = {
749 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(10), 1<<2, RT(11), 1<<3, RT(12), 1<<4,
750 INT_MAX
751 };
752
753 static const int mode_check_freq_map_new1[] = {
754 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX
755 };
756
757 static const int mode_check_freq_map_new2[] = {
758 0, GOOD(5), 4, RT(0), 0, RT(3), 4, RT(10), 1<<3, RT(11), 1<<4, RT(12), 1<<5,
759 INT_MAX
760 };
761
762 static const int mode_check_freq_map_split1[] = {
763 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX
764 };
765
766 static const int mode_check_freq_map_split2[] = {
767 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX
768 };
769
vp8_set_speed_features(VP8_COMP * cpi)770 void vp8_set_speed_features(VP8_COMP *cpi)
771 {
772 SPEED_FEATURES *sf = &cpi->sf;
773 int Mode = cpi->compressor_speed;
774 int Speed = cpi->Speed;
775 int i;
776 VP8_COMMON *cm = &cpi->common;
777 int last_improved_quant = sf->improved_quant;
778 int ref_frames;
779
780 /* Initialise default mode frequency sampling variables */
781 for (i = 0; i < MAX_MODES; i ++)
782 {
783 cpi->mode_check_freq[i] = 0;
784 }
785
786 cpi->mb.mbs_tested_so_far = 0;
787
788 /* best quality defaults */
789 sf->RD = 1;
790 sf->search_method = NSTEP;
791 sf->improved_quant = 1;
792 sf->improved_dct = 1;
793 sf->auto_filter = 1;
794 sf->recode_loop = 1;
795 sf->quarter_pixel_search = 1;
796 sf->half_pixel_search = 1;
797 sf->iterative_sub_pixel = 1;
798 sf->optimize_coefficients = 1;
799 sf->use_fastquant_for_pick = 0;
800 sf->no_skip_block4x4_search = 1;
801
802 sf->first_step = 0;
803 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
804 sf->improved_mv_pred = 1;
805
806 /* default thresholds to 0 */
807 for (i = 0; i < MAX_MODES; i++)
808 sf->thresh_mult[i] = 0;
809
810 /* Count enabled references */
811 ref_frames = 1;
812 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
813 ref_frames++;
814 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
815 ref_frames++;
816 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
817 ref_frames++;
818
819 /* Convert speed to continuous range, with clamping */
820 if (Mode == 0)
821 Speed = 0;
822 else if (Mode == 2)
823 Speed = RT(Speed);
824 else
825 {
826 if (Speed > 5)
827 Speed = 5;
828 Speed = GOOD(Speed);
829 }
830
831 sf->thresh_mult[THR_ZERO1] =
832 sf->thresh_mult[THR_NEAREST1] =
833 sf->thresh_mult[THR_NEAR1] =
834 sf->thresh_mult[THR_DC] = 0; /* always */
835
836 sf->thresh_mult[THR_ZERO2] =
837 sf->thresh_mult[THR_ZERO3] =
838 sf->thresh_mult[THR_NEAREST2] =
839 sf->thresh_mult[THR_NEAREST3] =
840 sf->thresh_mult[THR_NEAR2] =
841 sf->thresh_mult[THR_NEAR3] = speed_map(Speed, thresh_mult_map_znn);
842
843 sf->thresh_mult[THR_V_PRED] =
844 sf->thresh_mult[THR_H_PRED] = speed_map(Speed, thresh_mult_map_vhpred);
845 sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred);
846 sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm);
847 sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1);
848 sf->thresh_mult[THR_NEW2] =
849 sf->thresh_mult[THR_NEW3] = speed_map(Speed, thresh_mult_map_new2);
850 sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1);
851 sf->thresh_mult[THR_SPLIT2] =
852 sf->thresh_mult[THR_SPLIT3] = speed_map(Speed, thresh_mult_map_split2);
853
854 cpi->mode_check_freq[THR_ZERO1] =
855 cpi->mode_check_freq[THR_NEAREST1] =
856 cpi->mode_check_freq[THR_NEAR1] =
857 cpi->mode_check_freq[THR_TM] =
858 cpi->mode_check_freq[THR_DC] = 0; /* always */
859
860 cpi->mode_check_freq[THR_ZERO2] =
861 cpi->mode_check_freq[THR_ZERO3] =
862 cpi->mode_check_freq[THR_NEAREST2] =
863 cpi->mode_check_freq[THR_NEAREST3] = speed_map(Speed,
864 mode_check_freq_map_zn2);
865
866 cpi->mode_check_freq[THR_NEAR2] =
867 cpi->mode_check_freq[THR_NEAR3] = speed_map(Speed,
868 mode_check_freq_map_near2);
869
870 cpi->mode_check_freq[THR_V_PRED] =
871 cpi->mode_check_freq[THR_H_PRED] =
872 cpi->mode_check_freq[THR_B_PRED] = speed_map(Speed,
873 mode_check_freq_map_vhbpred);
874 cpi->mode_check_freq[THR_NEW1] = speed_map(Speed,
875 mode_check_freq_map_new1);
876 cpi->mode_check_freq[THR_NEW2] =
877 cpi->mode_check_freq[THR_NEW3] = speed_map(Speed,
878 mode_check_freq_map_new2);
879 cpi->mode_check_freq[THR_SPLIT1] = speed_map(Speed,
880 mode_check_freq_map_split1);
881 cpi->mode_check_freq[THR_SPLIT2] =
882 cpi->mode_check_freq[THR_SPLIT3] = speed_map(Speed,
883 mode_check_freq_map_split2);
884 Speed = cpi->Speed;
885 switch (Mode)
886 {
887 #if !(CONFIG_REALTIME_ONLY)
888 case 0: /* best quality mode */
889 sf->first_step = 0;
890 sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
891 break;
892 case 1:
893 case 3:
894 if (Speed > 0)
895 {
896 /* Disable coefficient optimization above speed 0 */
897 sf->optimize_coefficients = 0;
898 sf->use_fastquant_for_pick = 1;
899 sf->no_skip_block4x4_search = 0;
900
901 sf->first_step = 1;
902 }
903
904 if (Speed > 2)
905 {
906 sf->improved_quant = 0;
907 sf->improved_dct = 0;
908
909 /* Only do recode loop on key frames, golden frames and
910 * alt ref frames
911 */
912 sf->recode_loop = 2;
913
914 }
915
916 if (Speed > 3)
917 {
918 sf->auto_filter = 1;
919 sf->recode_loop = 0; /* recode loop off */
920 sf->RD = 0; /* Turn rd off */
921
922 }
923
924 if (Speed > 4)
925 {
926 sf->auto_filter = 0; /* Faster selection of loop filter */
927 }
928
929 break;
930 #endif
931 case 2:
932 sf->optimize_coefficients = 0;
933 sf->recode_loop = 0;
934 sf->auto_filter = 1;
935 sf->iterative_sub_pixel = 1;
936 sf->search_method = NSTEP;
937
938 if (Speed > 0)
939 {
940 sf->improved_quant = 0;
941 sf->improved_dct = 0;
942
943 sf->use_fastquant_for_pick = 1;
944 sf->no_skip_block4x4_search = 0;
945 sf->first_step = 1;
946 }
947
948 if (Speed > 2)
949 sf->auto_filter = 0; /* Faster selection of loop filter */
950
951 if (Speed > 3)
952 {
953 sf->RD = 0;
954 sf->auto_filter = 1;
955 }
956
957 if (Speed > 4)
958 {
959 sf->auto_filter = 0; /* Faster selection of loop filter */
960 sf->search_method = HEX;
961 sf->iterative_sub_pixel = 0;
962 }
963
964 if (Speed > 6)
965 {
966 unsigned int sum = 0;
967 unsigned int total_mbs = cm->MBs;
968 int thresh;
969 unsigned int total_skip;
970
971 int min = 2000;
972
973 if (cpi->oxcf.encode_breakout > 2000)
974 min = cpi->oxcf.encode_breakout;
975
976 min >>= 7;
977
978 for (i = 0; i < min; i++)
979 {
980 sum += cpi->mb.error_bins[i];
981 }
982
983 total_skip = sum;
984 sum = 0;
985
986 /* i starts from 2 to make sure thresh started from 2048 */
987 for (; i < 1024; i++)
988 {
989 sum += cpi->mb.error_bins[i];
990
991 if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip))
992 break;
993 }
994
995 i--;
996 thresh = (i << 7);
997
998 if (thresh < 2000)
999 thresh = 2000;
1000
1001 if (ref_frames > 1)
1002 {
1003 sf->thresh_mult[THR_NEW1 ] = thresh;
1004 sf->thresh_mult[THR_NEAREST1 ] = thresh >> 1;
1005 sf->thresh_mult[THR_NEAR1 ] = thresh >> 1;
1006 }
1007
1008 if (ref_frames > 2)
1009 {
1010 sf->thresh_mult[THR_NEW2] = thresh << 1;
1011 sf->thresh_mult[THR_NEAREST2 ] = thresh;
1012 sf->thresh_mult[THR_NEAR2 ] = thresh;
1013 }
1014
1015 if (ref_frames > 3)
1016 {
1017 sf->thresh_mult[THR_NEW3] = thresh << 1;
1018 sf->thresh_mult[THR_NEAREST3 ] = thresh;
1019 sf->thresh_mult[THR_NEAR3 ] = thresh;
1020 }
1021
1022 sf->improved_mv_pred = 0;
1023 }
1024
1025 if (Speed > 8)
1026 sf->quarter_pixel_search = 0;
1027
1028 if(cm->version == 0)
1029 {
1030 cm->filter_type = NORMAL_LOOPFILTER;
1031
1032 if (Speed >= 14)
1033 cm->filter_type = SIMPLE_LOOPFILTER;
1034 }
1035 else
1036 {
1037 cm->filter_type = SIMPLE_LOOPFILTER;
1038 }
1039
1040 /* This has a big hit on quality. Last resort */
1041 if (Speed >= 15)
1042 sf->half_pixel_search = 0;
1043
1044 vpx_memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins));
1045
1046 }; /* switch */
1047
1048 /* Slow quant, dct and trellis not worthwhile for first pass
1049 * so make sure they are always turned off.
1050 */
1051 if ( cpi->pass == 1 )
1052 {
1053 sf->improved_quant = 0;
1054 sf->optimize_coefficients = 0;
1055 sf->improved_dct = 0;
1056 }
1057
1058 if (cpi->sf.search_method == NSTEP)
1059 {
1060 vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1061 }
1062 else if (cpi->sf.search_method == DIAMOND)
1063 {
1064 vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride);
1065 }
1066
1067 if (cpi->sf.improved_dct)
1068 {
1069 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1070 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1071 }
1072 else
1073 {
1074 /* No fast FDCT defined for any platform at this time. */
1075 cpi->mb.short_fdct8x4 = vp8_short_fdct8x4;
1076 cpi->mb.short_fdct4x4 = vp8_short_fdct4x4;
1077 }
1078
1079 cpi->mb.short_walsh4x4 = vp8_short_walsh4x4;
1080
1081 if (cpi->sf.improved_quant)
1082 {
1083 cpi->mb.quantize_b = vp8_regular_quantize_b;
1084 cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
1085 }
1086 else
1087 {
1088 cpi->mb.quantize_b = vp8_fast_quantize_b;
1089 cpi->mb.quantize_b_pair = vp8_fast_quantize_b_pair;
1090 }
1091 if (cpi->sf.improved_quant != last_improved_quant)
1092 vp8cx_init_quantizer(cpi);
1093
1094 if (cpi->sf.iterative_sub_pixel == 1)
1095 {
1096 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively;
1097 }
1098 else if (cpi->sf.quarter_pixel_search)
1099 {
1100 cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step;
1101 }
1102 else if (cpi->sf.half_pixel_search)
1103 {
1104 cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step;
1105 }
1106 else
1107 {
1108 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1109 }
1110
1111 if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1)
1112 cpi->mb.optimize = 1;
1113 else
1114 cpi->mb.optimize = 0;
1115
1116 if (cpi->common.full_pixel)
1117 cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step;
1118
1119 #ifdef SPEEDSTATS
1120 frames_at_speed[cpi->Speed]++;
1121 #endif
1122 }
1123 #undef GOOD
1124 #undef RT
1125
alloc_raw_frame_buffers(VP8_COMP * cpi)1126 static void alloc_raw_frame_buffers(VP8_COMP *cpi)
1127 {
1128 #if VP8_TEMPORAL_ALT_REF
1129 int width = (cpi->oxcf.Width + 15) & ~15;
1130 int height = (cpi->oxcf.Height + 15) & ~15;
1131 #endif
1132
1133 cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height,
1134 cpi->oxcf.lag_in_frames);
1135 if(!cpi->lookahead)
1136 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1137 "Failed to allocate lag buffers");
1138
1139 #if VP8_TEMPORAL_ALT_REF
1140
1141 if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer,
1142 width, height, VP8BORDERINPIXELS))
1143 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1144 "Failed to allocate altref buffer");
1145
1146 #endif
1147 }
1148
1149
dealloc_raw_frame_buffers(VP8_COMP * cpi)1150 static void dealloc_raw_frame_buffers(VP8_COMP *cpi)
1151 {
1152 #if VP8_TEMPORAL_ALT_REF
1153 vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer);
1154 #endif
1155 vp8_lookahead_destroy(cpi->lookahead);
1156 }
1157
1158
vp8_alloc_partition_data(VP8_COMP * cpi)1159 static int vp8_alloc_partition_data(VP8_COMP *cpi)
1160 {
1161 vpx_free(cpi->mb.pip);
1162
1163 cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) *
1164 (cpi->common.mb_rows + 1),
1165 sizeof(PARTITION_INFO));
1166 if(!cpi->mb.pip)
1167 return 1;
1168
1169 cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1;
1170
1171 return 0;
1172 }
1173
vp8_alloc_compressor_data(VP8_COMP * cpi)1174 void vp8_alloc_compressor_data(VP8_COMP *cpi)
1175 {
1176 VP8_COMMON *cm = & cpi->common;
1177
1178 int width = cm->Width;
1179 int height = cm->Height;
1180
1181 if (vp8_alloc_frame_buffers(cm, width, height))
1182 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1183 "Failed to allocate frame buffers");
1184
1185 if (vp8_alloc_partition_data(cpi))
1186 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1187 "Failed to allocate partition data");
1188
1189
1190 if ((width & 0xf) != 0)
1191 width += 16 - (width & 0xf);
1192
1193 if ((height & 0xf) != 0)
1194 height += 16 - (height & 0xf);
1195
1196
1197 if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame,
1198 width, height, VP8BORDERINPIXELS))
1199 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1200 "Failed to allocate last frame buffer");
1201
1202 if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source,
1203 width, height, VP8BORDERINPIXELS))
1204 vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,
1205 "Failed to allocate scaled source buffer");
1206
1207 vpx_free(cpi->tok);
1208
1209 {
1210 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1211 unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */
1212 #else
1213 unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16;
1214 #endif
1215 CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
1216 }
1217
1218 /* Data used for real time vc mode to see if gf needs refreshing */
1219 cpi->zeromv_count = 0;
1220
1221
1222 /* Structures used to monitor GF usage */
1223 vpx_free(cpi->gf_active_flags);
1224 CHECK_MEM_ERROR(cpi->gf_active_flags,
1225 vpx_calloc(sizeof(*cpi->gf_active_flags),
1226 cm->mb_rows * cm->mb_cols));
1227 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
1228
1229 vpx_free(cpi->mb_activity_map);
1230 CHECK_MEM_ERROR(cpi->mb_activity_map,
1231 vpx_calloc(sizeof(*cpi->mb_activity_map),
1232 cm->mb_rows * cm->mb_cols));
1233
1234 /* allocate memory for storing last frame's MVs for MV prediction. */
1235 vpx_free(cpi->lfmv);
1236 CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1237 sizeof(*cpi->lfmv)));
1238 vpx_free(cpi->lf_ref_frame_sign_bias);
1239 CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias,
1240 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1241 sizeof(*cpi->lf_ref_frame_sign_bias)));
1242 vpx_free(cpi->lf_ref_frame);
1243 CHECK_MEM_ERROR(cpi->lf_ref_frame,
1244 vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2),
1245 sizeof(*cpi->lf_ref_frame)));
1246
1247 /* Create the encoder segmentation map and set all entries to 0 */
1248 vpx_free(cpi->segmentation_map);
1249 CHECK_MEM_ERROR(cpi->segmentation_map,
1250 vpx_calloc(cm->mb_rows * cm->mb_cols,
1251 sizeof(*cpi->segmentation_map)));
1252 cpi->cyclic_refresh_mode_index = 0;
1253 vpx_free(cpi->active_map);
1254 CHECK_MEM_ERROR(cpi->active_map,
1255 vpx_calloc(cm->mb_rows * cm->mb_cols,
1256 sizeof(*cpi->active_map)));
1257 vpx_memset(cpi->active_map , 1, (cm->mb_rows * cm->mb_cols));
1258
1259 #if CONFIG_MULTITHREAD
1260 if (width < 640)
1261 cpi->mt_sync_range = 1;
1262 else if (width <= 1280)
1263 cpi->mt_sync_range = 4;
1264 else if (width <= 2560)
1265 cpi->mt_sync_range = 8;
1266 else
1267 cpi->mt_sync_range = 16;
1268
1269 if (cpi->oxcf.multi_threaded > 1)
1270 {
1271 vpx_free(cpi->mt_current_mb_col);
1272 CHECK_MEM_ERROR(cpi->mt_current_mb_col,
1273 vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows));
1274 }
1275
1276 #endif
1277
1278 vpx_free(cpi->tplist);
1279 CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows));
1280
1281 #if CONFIG_TEMPORAL_DENOISING
1282 if (cpi->oxcf.noise_sensitivity > 0) {
1283 vp8_denoiser_free(&cpi->denoiser);
1284 vp8_denoiser_allocate(&cpi->denoiser, width, height,
1285 cm->mb_rows, cm->mb_cols,
1286 cpi->oxcf.noise_sensitivity);
1287 }
1288 #endif
1289 }
1290
1291
1292 /* Quant MOD */
1293 static const int q_trans[] =
1294 {
1295 0, 1, 2, 3, 4, 5, 7, 8,
1296 9, 10, 12, 13, 15, 17, 18, 19,
1297 20, 21, 23, 24, 25, 26, 27, 28,
1298 29, 30, 31, 33, 35, 37, 39, 41,
1299 43, 45, 47, 49, 51, 53, 55, 57,
1300 59, 61, 64, 67, 70, 73, 76, 79,
1301 82, 85, 88, 91, 94, 97, 100, 103,
1302 106, 109, 112, 115, 118, 121, 124, 127,
1303 };
1304
vp8_reverse_trans(int x)1305 int vp8_reverse_trans(int x)
1306 {
1307 int i;
1308
1309 for (i = 0; i < 64; i++)
1310 if (q_trans[i] >= x)
1311 return i;
1312
1313 return 63;
1314 }
vp8_new_framerate(VP8_COMP * cpi,double framerate)1315 void vp8_new_framerate(VP8_COMP *cpi, double framerate)
1316 {
1317 if(framerate < .1)
1318 framerate = 30;
1319
1320 cpi->framerate = framerate;
1321 cpi->output_framerate = framerate;
1322 cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth /
1323 cpi->output_framerate);
1324 cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth;
1325 cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth *
1326 cpi->oxcf.two_pass_vbrmin_section / 100);
1327
1328 /* Set Maximum gf/arf interval */
1329 cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2);
1330
1331 if(cpi->max_gf_interval < 12)
1332 cpi->max_gf_interval = 12;
1333
1334 /* Extended interval for genuinely static scenes */
1335 cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1;
1336
1337 /* Special conditions when altr ref frame enabled in lagged compress mode */
1338 if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames)
1339 {
1340 if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1341 cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1342
1343 if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1)
1344 cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1;
1345 }
1346
1347 if ( cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval )
1348 cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval;
1349 }
1350
1351
init_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1352 static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1353 {
1354 VP8_COMMON *cm = &cpi->common;
1355
1356 cpi->oxcf = *oxcf;
1357
1358 cpi->auto_gold = 1;
1359 cpi->auto_adjust_gold_quantizer = 1;
1360
1361 cm->version = oxcf->Version;
1362 vp8_setup_version(cm);
1363
1364 /* frame rate is not available on the first frame, as it's derived from
1365 * the observed timestamps. The actual value used here doesn't matter
1366 * too much, as it will adapt quickly. If the reciprocal of the timebase
1367 * seems like a reasonable framerate, then use that as a guess, otherwise
1368 * use 30.
1369 */
1370 cpi->framerate = (double)(oxcf->timebase.den) /
1371 (double)(oxcf->timebase.num);
1372
1373 if (cpi->framerate > 180)
1374 cpi->framerate = 30;
1375
1376 cpi->ref_framerate = cpi->framerate;
1377
1378 /* change includes all joint functionality */
1379 vp8_change_config(cpi, oxcf);
1380
1381 /* Initialize active best and worst q and average q values. */
1382 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1383 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1384 cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q;
1385
1386 /* Initialise the starting buffer levels */
1387 cpi->buffer_level = cpi->oxcf.starting_buffer_level;
1388 cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
1389
1390 cpi->rolling_target_bits = cpi->av_per_frame_bandwidth;
1391 cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth;
1392 cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth;
1393 cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth;
1394
1395 cpi->total_actual_bits = 0;
1396 cpi->total_target_vs_actual = 0;
1397
1398 /* Temporal scalabilty */
1399 if (cpi->oxcf.number_of_layers > 1)
1400 {
1401 unsigned int i;
1402 double prev_layer_framerate=0;
1403
1404 for (i=0; i<cpi->oxcf.number_of_layers; i++)
1405 {
1406 init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
1407 prev_layer_framerate = cpi->output_framerate /
1408 cpi->oxcf.rate_decimator[i];
1409 }
1410 }
1411
1412 #if VP8_TEMPORAL_ALT_REF
1413 {
1414 int i;
1415
1416 cpi->fixed_divide[0] = 0;
1417
1418 for (i = 1; i < 512; i++)
1419 cpi->fixed_divide[i] = 0x80000 / i;
1420 }
1421 #endif
1422 }
1423
update_layer_contexts(VP8_COMP * cpi)1424 static void update_layer_contexts (VP8_COMP *cpi)
1425 {
1426 VP8_CONFIG *oxcf = &cpi->oxcf;
1427
1428 /* Update snapshots of the layer contexts to reflect new parameters */
1429 if (oxcf->number_of_layers > 1)
1430 {
1431 unsigned int i;
1432 double prev_layer_framerate=0;
1433
1434 assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS);
1435 for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i)
1436 {
1437 LAYER_CONTEXT *lc = &cpi->layer_context[i];
1438
1439 lc->framerate =
1440 cpi->ref_framerate / oxcf->rate_decimator[i];
1441 lc->target_bandwidth = oxcf->target_bitrate[i] * 1000;
1442
1443 lc->starting_buffer_level = rescale(
1444 (int)oxcf->starting_buffer_level_in_ms,
1445 lc->target_bandwidth, 1000);
1446
1447 if (oxcf->optimal_buffer_level == 0)
1448 lc->optimal_buffer_level = lc->target_bandwidth / 8;
1449 else
1450 lc->optimal_buffer_level = rescale(
1451 (int)oxcf->optimal_buffer_level_in_ms,
1452 lc->target_bandwidth, 1000);
1453
1454 if (oxcf->maximum_buffer_size == 0)
1455 lc->maximum_buffer_size = lc->target_bandwidth / 8;
1456 else
1457 lc->maximum_buffer_size = rescale(
1458 (int)oxcf->maximum_buffer_size_in_ms,
1459 lc->target_bandwidth, 1000);
1460
1461 /* Work out the average size of a frame within this layer */
1462 if (i > 0)
1463 lc->avg_frame_size_for_layer =
1464 (int)((oxcf->target_bitrate[i] -
1465 oxcf->target_bitrate[i-1]) * 1000 /
1466 (lc->framerate - prev_layer_framerate));
1467
1468 prev_layer_framerate = lc->framerate;
1469 }
1470 }
1471 }
1472
vp8_change_config(VP8_COMP * cpi,VP8_CONFIG * oxcf)1473 void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf)
1474 {
1475 VP8_COMMON *cm = &cpi->common;
1476 int last_w, last_h, prev_number_of_layers;
1477
1478 if (!cpi)
1479 return;
1480
1481 if (!oxcf)
1482 return;
1483
1484 #if CONFIG_MULTITHREAD
1485 /* wait for the last picture loopfilter thread done */
1486 if (cpi->b_lpf_running)
1487 {
1488 sem_wait(&cpi->h_event_end_lpf);
1489 cpi->b_lpf_running = 0;
1490 }
1491 #endif
1492
1493 if (cm->version != oxcf->Version)
1494 {
1495 cm->version = oxcf->Version;
1496 vp8_setup_version(cm);
1497 }
1498
1499 last_w = cpi->oxcf.Width;
1500 last_h = cpi->oxcf.Height;
1501 prev_number_of_layers = cpi->oxcf.number_of_layers;
1502
1503 cpi->oxcf = *oxcf;
1504
1505 switch (cpi->oxcf.Mode)
1506 {
1507
1508 case MODE_REALTIME:
1509 cpi->pass = 0;
1510 cpi->compressor_speed = 2;
1511
1512 if (cpi->oxcf.cpu_used < -16)
1513 {
1514 cpi->oxcf.cpu_used = -16;
1515 }
1516
1517 if (cpi->oxcf.cpu_used > 16)
1518 cpi->oxcf.cpu_used = 16;
1519
1520 break;
1521
1522 case MODE_GOODQUALITY:
1523 cpi->pass = 0;
1524 cpi->compressor_speed = 1;
1525
1526 if (cpi->oxcf.cpu_used < -5)
1527 {
1528 cpi->oxcf.cpu_used = -5;
1529 }
1530
1531 if (cpi->oxcf.cpu_used > 5)
1532 cpi->oxcf.cpu_used = 5;
1533
1534 break;
1535
1536 case MODE_BESTQUALITY:
1537 cpi->pass = 0;
1538 cpi->compressor_speed = 0;
1539 break;
1540
1541 case MODE_FIRSTPASS:
1542 cpi->pass = 1;
1543 cpi->compressor_speed = 1;
1544 break;
1545 case MODE_SECONDPASS:
1546 cpi->pass = 2;
1547 cpi->compressor_speed = 1;
1548
1549 if (cpi->oxcf.cpu_used < -5)
1550 {
1551 cpi->oxcf.cpu_used = -5;
1552 }
1553
1554 if (cpi->oxcf.cpu_used > 5)
1555 cpi->oxcf.cpu_used = 5;
1556
1557 break;
1558 case MODE_SECONDPASS_BEST:
1559 cpi->pass = 2;
1560 cpi->compressor_speed = 0;
1561 break;
1562 }
1563
1564 if (cpi->pass == 0)
1565 cpi->auto_worst_q = 1;
1566
1567 cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q];
1568 cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q];
1569 cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level];
1570
1571 if (oxcf->fixed_q >= 0)
1572 {
1573 if (oxcf->worst_allowed_q < 0)
1574 cpi->oxcf.fixed_q = q_trans[0];
1575 else
1576 cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q];
1577
1578 if (oxcf->alt_q < 0)
1579 cpi->oxcf.alt_q = q_trans[0];
1580 else
1581 cpi->oxcf.alt_q = q_trans[oxcf->alt_q];
1582
1583 if (oxcf->key_q < 0)
1584 cpi->oxcf.key_q = q_trans[0];
1585 else
1586 cpi->oxcf.key_q = q_trans[oxcf->key_q];
1587
1588 if (oxcf->gold_q < 0)
1589 cpi->oxcf.gold_q = q_trans[0];
1590 else
1591 cpi->oxcf.gold_q = q_trans[oxcf->gold_q];
1592
1593 }
1594
1595 cpi->baseline_gf_interval =
1596 cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL;
1597
1598 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
1599
1600 cm->refresh_golden_frame = 0;
1601 cm->refresh_last_frame = 1;
1602 cm->refresh_entropy_probs = 1;
1603
1604 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1605 cpi->oxcf.token_partitions = 3;
1606 #endif
1607
1608 if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3)
1609 cm->multi_token_partition =
1610 (TOKEN_PARTITION) cpi->oxcf.token_partitions;
1611
1612 setup_features(cpi);
1613
1614 {
1615 int i;
1616
1617 for (i = 0; i < MAX_MB_SEGMENTS; i++)
1618 cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
1619 }
1620
1621 /* At the moment the first order values may not be > MAXQ */
1622 if (cpi->oxcf.fixed_q > MAXQ)
1623 cpi->oxcf.fixed_q = MAXQ;
1624
1625 /* local file playback mode == really big buffer */
1626 if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK)
1627 {
1628 cpi->oxcf.starting_buffer_level = 60000;
1629 cpi->oxcf.optimal_buffer_level = 60000;
1630 cpi->oxcf.maximum_buffer_size = 240000;
1631 cpi->oxcf.starting_buffer_level_in_ms = 60000;
1632 cpi->oxcf.optimal_buffer_level_in_ms = 60000;
1633 cpi->oxcf.maximum_buffer_size_in_ms = 240000;
1634 }
1635
1636 /* Convert target bandwidth from Kbit/s to Bit/s */
1637 cpi->oxcf.target_bandwidth *= 1000;
1638
1639 cpi->oxcf.starting_buffer_level =
1640 rescale((int)cpi->oxcf.starting_buffer_level,
1641 cpi->oxcf.target_bandwidth, 1000);
1642
1643 /* Set or reset optimal and maximum buffer levels. */
1644 if (cpi->oxcf.optimal_buffer_level == 0)
1645 cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8;
1646 else
1647 cpi->oxcf.optimal_buffer_level =
1648 rescale((int)cpi->oxcf.optimal_buffer_level,
1649 cpi->oxcf.target_bandwidth, 1000);
1650
1651 if (cpi->oxcf.maximum_buffer_size == 0)
1652 cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8;
1653 else
1654 cpi->oxcf.maximum_buffer_size =
1655 rescale((int)cpi->oxcf.maximum_buffer_size,
1656 cpi->oxcf.target_bandwidth, 1000);
1657 // Under a configuration change, where maximum_buffer_size may change,
1658 // keep buffer level clipped to the maximum allowed buffer size.
1659 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
1660 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
1661 cpi->buffer_level = cpi->bits_off_target;
1662 }
1663
1664 /* Set up frame rate and related parameters rate control values. */
1665 vp8_new_framerate(cpi, cpi->framerate);
1666
1667 /* Set absolute upper and lower quality limits */
1668 cpi->worst_quality = cpi->oxcf.worst_allowed_q;
1669 cpi->best_quality = cpi->oxcf.best_allowed_q;
1670
1671 /* active values should only be modified if out of new range */
1672 if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q)
1673 {
1674 cpi->active_worst_quality = cpi->oxcf.worst_allowed_q;
1675 }
1676 /* less likely */
1677 else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q)
1678 {
1679 cpi->active_worst_quality = cpi->oxcf.best_allowed_q;
1680 }
1681 if (cpi->active_best_quality < cpi->oxcf.best_allowed_q)
1682 {
1683 cpi->active_best_quality = cpi->oxcf.best_allowed_q;
1684 }
1685 /* less likely */
1686 else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q)
1687 {
1688 cpi->active_best_quality = cpi->oxcf.worst_allowed_q;
1689 }
1690
1691 cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0;
1692
1693 cpi->cq_target_quality = cpi->oxcf.cq_level;
1694
1695 /* Only allow dropped frames in buffered mode */
1696 cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode;
1697
1698 cpi->target_bandwidth = cpi->oxcf.target_bandwidth;
1699
1700 // Check if the number of temporal layers has changed, and if so reset the
1701 // pattern counter and set/initialize the temporal layer context for the
1702 // new layer configuration.
1703 if (cpi->oxcf.number_of_layers != prev_number_of_layers)
1704 {
1705 // If the number of temporal layers are changed we must start at the
1706 // base of the pattern cycle, so reset temporal_pattern_counter.
1707 cpi->temporal_pattern_counter = 0;
1708 reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
1709 }
1710
1711 cm->Width = cpi->oxcf.Width;
1712 cm->Height = cpi->oxcf.Height;
1713
1714 /* TODO(jkoleszar): if an internal spatial resampling is active,
1715 * and we downsize the input image, maybe we should clear the
1716 * internal scale immediately rather than waiting for it to
1717 * correct.
1718 */
1719
1720 /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */
1721 if (cpi->oxcf.Sharpness > 7)
1722 cpi->oxcf.Sharpness = 7;
1723
1724 cm->sharpness_level = cpi->oxcf.Sharpness;
1725
1726 if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL)
1727 {
1728 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
1729 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
1730
1731 Scale2Ratio(cm->horiz_scale, &hr, &hs);
1732 Scale2Ratio(cm->vert_scale, &vr, &vs);
1733
1734 /* always go to the next whole number */
1735 cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs;
1736 cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs;
1737 }
1738
1739 if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height)
1740 cpi->force_next_frame_intra = 1;
1741
1742 if (((cm->Width + 15) & 0xfffffff0) !=
1743 cm->yv12_fb[cm->lst_fb_idx].y_width ||
1744 ((cm->Height + 15) & 0xfffffff0) !=
1745 cm->yv12_fb[cm->lst_fb_idx].y_height ||
1746 cm->yv12_fb[cm->lst_fb_idx].y_width == 0)
1747 {
1748 dealloc_raw_frame_buffers(cpi);
1749 alloc_raw_frame_buffers(cpi);
1750 vp8_alloc_compressor_data(cpi);
1751 }
1752
1753 if (cpi->oxcf.fixed_q >= 0)
1754 {
1755 cpi->last_q[0] = cpi->oxcf.fixed_q;
1756 cpi->last_q[1] = cpi->oxcf.fixed_q;
1757 }
1758
1759 cpi->Speed = cpi->oxcf.cpu_used;
1760
1761 /* force to allowlag to 0 if lag_in_frames is 0; */
1762 if (cpi->oxcf.lag_in_frames == 0)
1763 {
1764 cpi->oxcf.allow_lag = 0;
1765 }
1766 /* Limit on lag buffers as these are not currently dynamically allocated */
1767 else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS)
1768 cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS;
1769
1770 /* YX Temp */
1771 cpi->alt_ref_source = NULL;
1772 cpi->is_src_frame_alt_ref = 0;
1773
1774 #if CONFIG_TEMPORAL_DENOISING
1775 if (cpi->oxcf.noise_sensitivity)
1776 {
1777 if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc)
1778 {
1779 int width = (cpi->oxcf.Width + 15) & ~15;
1780 int height = (cpi->oxcf.Height + 15) & ~15;
1781 vp8_denoiser_allocate(&cpi->denoiser, width, height,
1782 cm->mb_rows, cm->mb_cols,
1783 cpi->oxcf.noise_sensitivity);
1784 }
1785 }
1786 #endif
1787
1788 #if 0
1789 /* Experimental RD Code */
1790 cpi->frame_distortion = 0;
1791 cpi->last_frame_distortion = 0;
1792 #endif
1793
1794 }
1795
1796 #ifndef M_LOG2_E
1797 #define M_LOG2_E 0.693147180559945309417
1798 #endif
1799 #define log2f(x) (log (x) / (float) M_LOG2_E)
1800
cal_mvsadcosts(int * mvsadcost[2])1801 static void cal_mvsadcosts(int *mvsadcost[2])
1802 {
1803 int i = 1;
1804
1805 mvsadcost [0] [0] = 300;
1806 mvsadcost [1] [0] = 300;
1807
1808 do
1809 {
1810 double z = 256 * (2 * (log2f(8 * i) + .6));
1811 mvsadcost [0][i] = (int) z;
1812 mvsadcost [1][i] = (int) z;
1813 mvsadcost [0][-i] = (int) z;
1814 mvsadcost [1][-i] = (int) z;
1815 }
1816 while (++i <= mvfp_max);
1817 }
1818
vp8_create_compressor(VP8_CONFIG * oxcf)1819 struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
1820 {
1821 int i;
1822
1823 VP8_COMP *cpi;
1824 VP8_COMMON *cm;
1825
1826 cpi = vpx_memalign(32, sizeof(VP8_COMP));
1827 /* Check that the CPI instance is valid */
1828 if (!cpi)
1829 return 0;
1830
1831 cm = &cpi->common;
1832
1833 vpx_memset(cpi, 0, sizeof(VP8_COMP));
1834
1835 if (setjmp(cm->error.jmp))
1836 {
1837 cpi->common.error.setjmp = 0;
1838 vp8_remove_compressor(&cpi);
1839 return 0;
1840 }
1841
1842 cpi->common.error.setjmp = 1;
1843
1844 CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1));
1845
1846 vp8_create_common(&cpi->common);
1847
1848 init_config(cpi, oxcf);
1849
1850 memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
1851 cpi->common.current_video_frame = 0;
1852 cpi->temporal_pattern_counter = 0;
1853 cpi->kf_overspend_bits = 0;
1854 cpi->kf_bitrate_adjustment = 0;
1855 cpi->frames_till_gf_update_due = 0;
1856 cpi->gf_overspend_bits = 0;
1857 cpi->non_gf_bitrate_adjustment = 0;
1858 cpi->prob_last_coded = 128;
1859 cpi->prob_gf_coded = 128;
1860 cpi->prob_intra_coded = 63;
1861
1862 /* Prime the recent reference frame usage counters.
1863 * Hereafter they will be maintained as a sort of moving average
1864 */
1865 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
1866 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
1867 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
1868 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
1869
1870 /* Set reference frame sign bias for ALTREF frame to 1 (for now) */
1871 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
1872
1873 cpi->twopass.gf_decay_rate = 0;
1874 cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL;
1875
1876 cpi->gold_is_last = 0 ;
1877 cpi->alt_is_last = 0 ;
1878 cpi->gold_is_alt = 0 ;
1879
1880 cpi->active_map_enabled = 0;
1881
1882 #if 0
1883 /* Experimental code for lagged and one pass */
1884 /* Initialise one_pass GF frames stats */
1885 /* Update stats used for GF selection */
1886 if (cpi->pass == 0)
1887 {
1888 cpi->one_pass_frame_index = 0;
1889
1890 for (i = 0; i < MAX_LAG_BUFFERS; i++)
1891 {
1892 cpi->one_pass_frame_stats[i].frames_so_far = 0;
1893 cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
1894 cpi->one_pass_frame_stats[i].frame_coded_error = 0.0;
1895 cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0;
1896 cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0;
1897 cpi->one_pass_frame_stats[i].frame_mvr = 0.0;
1898 cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0;
1899 cpi->one_pass_frame_stats[i].frame_mvc = 0.0;
1900 cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0;
1901 }
1902 }
1903 #endif
1904
1905 /* Should we use the cyclic refresh method.
1906 * Currently this is tied to error resilliant mode
1907 */
1908 cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode;
1909 cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 5;
1910 if (cpi->oxcf.number_of_layers == 1) {
1911 cpi->cyclic_refresh_mode_max_mbs_perframe =
1912 (cpi->common.mb_rows * cpi->common.mb_cols) / 20;
1913 } else if (cpi->oxcf.number_of_layers == 2) {
1914 cpi->cyclic_refresh_mode_max_mbs_perframe =
1915 (cpi->common.mb_rows * cpi->common.mb_cols) / 10;
1916 }
1917 cpi->cyclic_refresh_mode_index = 0;
1918 cpi->cyclic_refresh_q = 32;
1919
1920 if (cpi->cyclic_refresh_mode_enabled)
1921 {
1922 CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
1923 }
1924 else
1925 cpi->cyclic_refresh_map = (signed char *) NULL;
1926
1927 CHECK_MEM_ERROR(cpi->consec_zero_last,
1928 vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
1929
1930 #ifdef VP8_ENTROPY_STATS
1931 init_context_counters();
1932 #endif
1933
1934 /*Initialize the feed-forward activity masking.*/
1935 cpi->activity_avg = 90<<12;
1936
1937 /* Give a sensible default for the first frame. */
1938 cpi->frames_since_key = 8;
1939 cpi->key_frame_frequency = cpi->oxcf.key_freq;
1940 cpi->this_key_frame_forced = 0;
1941 cpi->next_key_frame_forced = 0;
1942
1943 cpi->source_alt_ref_pending = 0;
1944 cpi->source_alt_ref_active = 0;
1945 cpi->common.refresh_alt_ref_frame = 0;
1946
1947 cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
1948 #if CONFIG_INTERNAL_STATS
1949 cpi->b_calculate_ssimg = 0;
1950
1951 cpi->count = 0;
1952 cpi->bytes = 0;
1953
1954 if (cpi->b_calculate_psnr)
1955 {
1956 cpi->total_sq_error = 0.0;
1957 cpi->total_sq_error2 = 0.0;
1958 cpi->total_y = 0.0;
1959 cpi->total_u = 0.0;
1960 cpi->total_v = 0.0;
1961 cpi->total = 0.0;
1962 cpi->totalp_y = 0.0;
1963 cpi->totalp_u = 0.0;
1964 cpi->totalp_v = 0.0;
1965 cpi->totalp = 0.0;
1966 cpi->tot_recode_hits = 0;
1967 cpi->summed_quality = 0;
1968 cpi->summed_weights = 0;
1969 }
1970
1971 if (cpi->b_calculate_ssimg)
1972 {
1973 cpi->total_ssimg_y = 0;
1974 cpi->total_ssimg_u = 0;
1975 cpi->total_ssimg_v = 0;
1976 cpi->total_ssimg_all = 0;
1977 }
1978
1979 #endif
1980
1981 cpi->first_time_stamp_ever = 0x7FFFFFFF;
1982
1983 cpi->frames_till_gf_update_due = 0;
1984 cpi->key_frame_count = 1;
1985
1986 cpi->ni_av_qi = cpi->oxcf.worst_allowed_q;
1987 cpi->ni_tot_qi = 0;
1988 cpi->ni_frames = 0;
1989 cpi->total_byte_count = 0;
1990
1991 cpi->drop_frame = 0;
1992
1993 cpi->rate_correction_factor = 1.0;
1994 cpi->key_frame_rate_correction_factor = 1.0;
1995 cpi->gf_rate_correction_factor = 1.0;
1996 cpi->twopass.est_max_qcorrection_factor = 1.0;
1997
1998 for (i = 0; i < KEY_FRAME_CONTEXT; i++)
1999 {
2000 cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
2001 }
2002
2003 #ifdef OUTPUT_YUV_SRC
2004 yuv_file = fopen("bd.yuv", "ab");
2005 #endif
2006 #ifdef OUTPUT_YUV_DENOISED
2007 yuv_denoised_file = fopen("denoised.yuv", "ab");
2008 #endif
2009
2010 #if 0
2011 framepsnr = fopen("framepsnr.stt", "a");
2012 kf_list = fopen("kf_list.stt", "w");
2013 #endif
2014
2015 cpi->output_pkt_list = oxcf->output_pkt_list;
2016
2017 #if !(CONFIG_REALTIME_ONLY)
2018
2019 if (cpi->pass == 1)
2020 {
2021 vp8_init_first_pass(cpi);
2022 }
2023 else if (cpi->pass == 2)
2024 {
2025 size_t packet_sz = sizeof(FIRSTPASS_STATS);
2026 int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
2027
2028 cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
2029 cpi->twopass.stats_in = cpi->twopass.stats_in_start;
2030 cpi->twopass.stats_in_end = (void*)((char *)cpi->twopass.stats_in
2031 + (packets - 1) * packet_sz);
2032 vp8_init_second_pass(cpi);
2033 }
2034
2035 #endif
2036
2037 if (cpi->compressor_speed == 2)
2038 {
2039 cpi->avg_encode_time = 0;
2040 cpi->avg_pick_mode_time = 0;
2041 }
2042
2043 vp8_set_speed_features(cpi);
2044
2045 /* Set starting values of RD threshold multipliers (128 = *1) */
2046 for (i = 0; i < MAX_MODES; i++)
2047 {
2048 cpi->mb.rd_thresh_mult[i] = 128;
2049 }
2050
2051 #ifdef VP8_ENTROPY_STATS
2052 init_mv_ref_counts();
2053 #endif
2054
2055 #if CONFIG_MULTITHREAD
2056 if(vp8cx_create_encoder_threads(cpi))
2057 {
2058 vp8_remove_compressor(&cpi);
2059 return 0;
2060 }
2061 #endif
2062
2063 cpi->fn_ptr[BLOCK_16X16].sdf = vp8_sad16x16;
2064 cpi->fn_ptr[BLOCK_16X16].vf = vp8_variance16x16;
2065 cpi->fn_ptr[BLOCK_16X16].svf = vp8_sub_pixel_variance16x16;
2066 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = vp8_variance_halfpixvar16x16_h;
2067 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = vp8_variance_halfpixvar16x16_v;
2068 cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vp8_variance_halfpixvar16x16_hv;
2069 cpi->fn_ptr[BLOCK_16X16].sdx3f = vp8_sad16x16x3;
2070 cpi->fn_ptr[BLOCK_16X16].sdx8f = vp8_sad16x16x8;
2071 cpi->fn_ptr[BLOCK_16X16].sdx4df = vp8_sad16x16x4d;
2072
2073 cpi->fn_ptr[BLOCK_16X8].sdf = vp8_sad16x8;
2074 cpi->fn_ptr[BLOCK_16X8].vf = vp8_variance16x8;
2075 cpi->fn_ptr[BLOCK_16X8].svf = vp8_sub_pixel_variance16x8;
2076 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
2077 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
2078 cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
2079 cpi->fn_ptr[BLOCK_16X8].sdx3f = vp8_sad16x8x3;
2080 cpi->fn_ptr[BLOCK_16X8].sdx8f = vp8_sad16x8x8;
2081 cpi->fn_ptr[BLOCK_16X8].sdx4df = vp8_sad16x8x4d;
2082
2083 cpi->fn_ptr[BLOCK_8X16].sdf = vp8_sad8x16;
2084 cpi->fn_ptr[BLOCK_8X16].vf = vp8_variance8x16;
2085 cpi->fn_ptr[BLOCK_8X16].svf = vp8_sub_pixel_variance8x16;
2086 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
2087 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
2088 cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
2089 cpi->fn_ptr[BLOCK_8X16].sdx3f = vp8_sad8x16x3;
2090 cpi->fn_ptr[BLOCK_8X16].sdx8f = vp8_sad8x16x8;
2091 cpi->fn_ptr[BLOCK_8X16].sdx4df = vp8_sad8x16x4d;
2092
2093 cpi->fn_ptr[BLOCK_8X8].sdf = vp8_sad8x8;
2094 cpi->fn_ptr[BLOCK_8X8].vf = vp8_variance8x8;
2095 cpi->fn_ptr[BLOCK_8X8].svf = vp8_sub_pixel_variance8x8;
2096 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
2097 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
2098 cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
2099 cpi->fn_ptr[BLOCK_8X8].sdx3f = vp8_sad8x8x3;
2100 cpi->fn_ptr[BLOCK_8X8].sdx8f = vp8_sad8x8x8;
2101 cpi->fn_ptr[BLOCK_8X8].sdx4df = vp8_sad8x8x4d;
2102
2103 cpi->fn_ptr[BLOCK_4X4].sdf = vp8_sad4x4;
2104 cpi->fn_ptr[BLOCK_4X4].vf = vp8_variance4x4;
2105 cpi->fn_ptr[BLOCK_4X4].svf = vp8_sub_pixel_variance4x4;
2106 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
2107 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
2108 cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
2109 cpi->fn_ptr[BLOCK_4X4].sdx3f = vp8_sad4x4x3;
2110 cpi->fn_ptr[BLOCK_4X4].sdx8f = vp8_sad4x4x8;
2111 cpi->fn_ptr[BLOCK_4X4].sdx4df = vp8_sad4x4x4d;
2112
2113 #if ARCH_X86 || ARCH_X86_64
2114 cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
2115 cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
2116 cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
2117 cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
2118 cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
2119 #endif
2120
2121 cpi->full_search_sad = vp8_full_search_sad;
2122 cpi->diamond_search_sad = vp8_diamond_search_sad;
2123 cpi->refining_search_sad = vp8_refining_search_sad;
2124
2125 /* make sure frame 1 is okay */
2126 cpi->mb.error_bins[0] = cpi->common.MBs;
2127
2128 /* vp8cx_init_quantizer() is first called here. Add check in
2129 * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only
2130 * called later when needed. This will avoid unnecessary calls of
2131 * vp8cx_init_quantizer() for every frame.
2132 */
2133 vp8cx_init_quantizer(cpi);
2134
2135 vp8_loop_filter_init(cm);
2136
2137 cpi->common.error.setjmp = 0;
2138
2139 #if CONFIG_MULTI_RES_ENCODING
2140
2141 /* Calculate # of MBs in a row in lower-resolution level image. */
2142 if (cpi->oxcf.mr_encoder_id > 0)
2143 vp8_cal_low_res_mb_cols(cpi);
2144
2145 #endif
2146
2147 /* setup RD costs to MACROBLOCK struct */
2148
2149 cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max+1];
2150 cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max+1];
2151 cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max+1];
2152 cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max+1];
2153
2154 cal_mvsadcosts(cpi->mb.mvsadcost);
2155
2156 cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost;
2157 cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost;
2158 cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs;
2159 cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs;
2160 cpi->mb.token_costs = cpi->rd_costs.token_costs;
2161
2162 /* setup block ptrs & offsets */
2163 vp8_setup_block_ptrs(&cpi->mb);
2164 vp8_setup_block_dptrs(&cpi->mb.e_mbd);
2165
2166 return cpi;
2167 }
2168
2169
vp8_remove_compressor(VP8_COMP ** ptr)2170 void vp8_remove_compressor(VP8_COMP **ptr)
2171 {
2172 VP8_COMP *cpi = *ptr;
2173
2174 if (!cpi)
2175 return;
2176
2177 if (cpi && (cpi->common.current_video_frame > 0))
2178 {
2179 #if !(CONFIG_REALTIME_ONLY)
2180
2181 if (cpi->pass == 2)
2182 {
2183 vp8_end_second_pass(cpi);
2184 }
2185
2186 #endif
2187
2188 #ifdef VP8_ENTROPY_STATS
2189 print_context_counters();
2190 print_tree_update_probs();
2191 print_mode_context();
2192 #endif
2193
2194 #if CONFIG_INTERNAL_STATS
2195
2196 if (cpi->pass != 1)
2197 {
2198 FILE *f = fopen("opsnr.stt", "a");
2199 double time_encoded = (cpi->last_end_time_stamp_seen
2200 - cpi->first_time_stamp_ever) / 10000000.000;
2201 double total_encode_time = (cpi->time_receive_data +
2202 cpi->time_compress_data) / 1000.000;
2203 double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded;
2204
2205 if (cpi->b_calculate_psnr)
2206 {
2207 YV12_BUFFER_CONFIG *lst_yv12 =
2208 &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
2209
2210 if (cpi->oxcf.number_of_layers > 1)
2211 {
2212 int i;
2213
2214 fprintf(f, "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2215 "GLPsnrP\tVPXSSIM\t\n");
2216 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2217 {
2218 double dr = (double)cpi->bytes_in_layer[i] *
2219 8.0 / 1000.0 / time_encoded;
2220 double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
2221 lst_yv12->y_width * lst_yv12->y_height;
2222 double total_psnr =
2223 vpx_sse_to_psnr(samples, 255.0,
2224 cpi->total_error2[i]);
2225 double total_psnr2 =
2226 vpx_sse_to_psnr(samples, 255.0,
2227 cpi->total_error2_p[i]);
2228 double total_ssim = 100 * pow(cpi->sum_ssim[i] /
2229 cpi->sum_weights[i], 8.0);
2230
2231 fprintf(f, "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2232 "%7.3f\t%7.3f\n",
2233 i, dr,
2234 cpi->sum_psnr[i] / cpi->frames_in_layer[i],
2235 total_psnr,
2236 cpi->sum_psnr_p[i] / cpi->frames_in_layer[i],
2237 total_psnr2, total_ssim);
2238 }
2239 }
2240 else
2241 {
2242 double samples = 3.0 / 2 * cpi->count *
2243 lst_yv12->y_width * lst_yv12->y_height;
2244 double total_psnr = vpx_sse_to_psnr(samples, 255.0,
2245 cpi->total_sq_error);
2246 double total_psnr2 = vpx_sse_to_psnr(samples, 255.0,
2247 cpi->total_sq_error2);
2248 double total_ssim = 100 * pow(cpi->summed_quality /
2249 cpi->summed_weights, 8.0);
2250
2251 fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
2252 "GLPsnrP\tVPXSSIM\t Time(us)\n");
2253 fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
2254 "%7.3f\t%8.0f\n",
2255 dr, cpi->total / cpi->count, total_psnr,
2256 cpi->totalp / cpi->count, total_psnr2,
2257 total_ssim, total_encode_time);
2258 }
2259 }
2260
2261 if (cpi->b_calculate_ssimg)
2262 {
2263 if (cpi->oxcf.number_of_layers > 1)
2264 {
2265 int i;
2266
2267 fprintf(f, "Layer\tBitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2268 "Time(us)\n");
2269 for (i=0; i<(int)cpi->oxcf.number_of_layers; i++)
2270 {
2271 double dr = (double)cpi->bytes_in_layer[i] *
2272 8.0 / 1000.0 / time_encoded;
2273 fprintf(f, "%5d\t%7.3f\t%6.4f\t"
2274 "%6.4f\t%6.4f\t%6.4f\t%8.0f\n",
2275 i, dr,
2276 cpi->total_ssimg_y_in_layer[i] /
2277 cpi->frames_in_layer[i],
2278 cpi->total_ssimg_u_in_layer[i] /
2279 cpi->frames_in_layer[i],
2280 cpi->total_ssimg_v_in_layer[i] /
2281 cpi->frames_in_layer[i],
2282 cpi->total_ssimg_all_in_layer[i] /
2283 cpi->frames_in_layer[i],
2284 total_encode_time);
2285 }
2286 }
2287 else
2288 {
2289 fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t"
2290 "Time(us)\n");
2291 fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr,
2292 cpi->total_ssimg_y / cpi->count,
2293 cpi->total_ssimg_u / cpi->count,
2294 cpi->total_ssimg_v / cpi->count,
2295 cpi->total_ssimg_all / cpi->count, total_encode_time);
2296 }
2297 }
2298
2299 fclose(f);
2300 #if 0
2301 f = fopen("qskip.stt", "a");
2302 fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount);
2303 fclose(f);
2304 #endif
2305
2306 }
2307
2308 #endif
2309
2310
2311 #ifdef SPEEDSTATS
2312
2313 if (cpi->compressor_speed == 2)
2314 {
2315 int i;
2316 FILE *f = fopen("cxspeed.stt", "a");
2317 cnt_pm /= cpi->common.MBs;
2318
2319 for (i = 0; i < 16; i++)
2320 fprintf(f, "%5d", frames_at_speed[i]);
2321
2322 fprintf(f, "\n");
2323 fclose(f);
2324 }
2325
2326 #endif
2327
2328
2329 #ifdef MODE_STATS
2330 {
2331 extern int count_mb_seg[4];
2332 FILE *f = fopen("modes.stt", "a");
2333 double dr = (double)cpi->framerate * (double)bytes * (double)8 / (double)count / (double)1000 ;
2334 fprintf(f, "intra_mode in Intra Frames:\n");
2335 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]);
2336 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]);
2337 fprintf(f, "B: ");
2338 {
2339 int i;
2340
2341 for (i = 0; i < 10; i++)
2342 fprintf(f, "%8d, ", b_modes[i]);
2343
2344 fprintf(f, "\n");
2345
2346 }
2347
2348 fprintf(f, "Modes in Inter Frames:\n");
2349 fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n",
2350 inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], inter_y_modes[3], inter_y_modes[4],
2351 inter_y_modes[5], inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], inter_y_modes[9]);
2352 fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]);
2353 fprintf(f, "B: ");
2354 {
2355 int i;
2356
2357 for (i = 0; i < 15; i++)
2358 fprintf(f, "%8d, ", inter_b_modes[i]);
2359
2360 fprintf(f, "\n");
2361
2362 }
2363 fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]);
2364 fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]);
2365
2366
2367
2368 fclose(f);
2369 }
2370 #endif
2371
2372 #ifdef VP8_ENTROPY_STATS
2373 {
2374 int i, j, k;
2375 FILE *fmode = fopen("modecontext.c", "w");
2376
2377 fprintf(fmode, "\n#include \"entropymode.h\"\n\n");
2378 fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts ");
2379 fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
2380
2381 for (i = 0; i < 10; i++)
2382 {
2383
2384 fprintf(fmode, " { /* Above Mode : %d */\n", i);
2385
2386 for (j = 0; j < 10; j++)
2387 {
2388
2389 fprintf(fmode, " {");
2390
2391 for (k = 0; k < 10; k++)
2392 {
2393 if (!intra_mode_stats[i][j][k])
2394 fprintf(fmode, " %5d, ", 1);
2395 else
2396 fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]);
2397 }
2398
2399 fprintf(fmode, "}, /* left_mode %d */\n", j);
2400
2401 }
2402
2403 fprintf(fmode, " },\n");
2404
2405 }
2406
2407 fprintf(fmode, "};\n");
2408 fclose(fmode);
2409 }
2410 #endif
2411
2412
2413 #if defined(SECTIONBITS_OUTPUT)
2414
2415 if (0)
2416 {
2417 int i;
2418 FILE *f = fopen("tokenbits.stt", "a");
2419
2420 for (i = 0; i < 28; i++)
2421 fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
2422
2423 fprintf(f, "\n");
2424 fclose(f);
2425 }
2426
2427 #endif
2428
2429 #if 0
2430 {
2431 printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
2432 printf("\n_frames recive_data encod_mb_row compress_frame Total\n");
2433 printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000);
2434 }
2435 #endif
2436
2437 }
2438
2439 #if CONFIG_MULTITHREAD
2440 vp8cx_remove_encoder_threads(cpi);
2441 #endif
2442
2443 #if CONFIG_TEMPORAL_DENOISING
2444 vp8_denoiser_free(&cpi->denoiser);
2445 #endif
2446 dealloc_compressor_data(cpi);
2447 vpx_free(cpi->mb.ss);
2448 vpx_free(cpi->tok);
2449 vpx_free(cpi->cyclic_refresh_map);
2450 vpx_free(cpi->consec_zero_last);
2451
2452 vp8_remove_common(&cpi->common);
2453 vpx_free(cpi);
2454 *ptr = 0;
2455
2456 #ifdef OUTPUT_YUV_SRC
2457 fclose(yuv_file);
2458 #endif
2459 #ifdef OUTPUT_YUV_DENOISED
2460 fclose(yuv_denoised_file);
2461 #endif
2462
2463 #if 0
2464
2465 if (keyfile)
2466 fclose(keyfile);
2467
2468 if (framepsnr)
2469 fclose(framepsnr);
2470
2471 if (kf_list)
2472 fclose(kf_list);
2473
2474 #endif
2475
2476 }
2477
2478
calc_plane_error(unsigned char * orig,int orig_stride,unsigned char * recon,int recon_stride,unsigned int cols,unsigned int rows)2479 static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
2480 unsigned char *recon, int recon_stride,
2481 unsigned int cols, unsigned int rows)
2482 {
2483 unsigned int row, col;
2484 uint64_t total_sse = 0;
2485 int diff;
2486
2487 for (row = 0; row + 16 <= rows; row += 16)
2488 {
2489 for (col = 0; col + 16 <= cols; col += 16)
2490 {
2491 unsigned int sse;
2492
2493 vp8_mse16x16(orig + col, orig_stride,
2494 recon + col, recon_stride,
2495 &sse);
2496 total_sse += sse;
2497 }
2498
2499 /* Handle odd-sized width */
2500 if (col < cols)
2501 {
2502 unsigned int border_row, border_col;
2503 unsigned char *border_orig = orig;
2504 unsigned char *border_recon = recon;
2505
2506 for (border_row = 0; border_row < 16; border_row++)
2507 {
2508 for (border_col = col; border_col < cols; border_col++)
2509 {
2510 diff = border_orig[border_col] - border_recon[border_col];
2511 total_sse += diff * diff;
2512 }
2513
2514 border_orig += orig_stride;
2515 border_recon += recon_stride;
2516 }
2517 }
2518
2519 orig += orig_stride * 16;
2520 recon += recon_stride * 16;
2521 }
2522
2523 /* Handle odd-sized height */
2524 for (; row < rows; row++)
2525 {
2526 for (col = 0; col < cols; col++)
2527 {
2528 diff = orig[col] - recon[col];
2529 total_sse += diff * diff;
2530 }
2531
2532 orig += orig_stride;
2533 recon += recon_stride;
2534 }
2535
2536 vp8_clear_system_state();
2537 return total_sse;
2538 }
2539
2540
generate_psnr_packet(VP8_COMP * cpi)2541 static void generate_psnr_packet(VP8_COMP *cpi)
2542 {
2543 YV12_BUFFER_CONFIG *orig = cpi->Source;
2544 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
2545 struct vpx_codec_cx_pkt pkt;
2546 uint64_t sse;
2547 int i;
2548 unsigned int width = cpi->common.Width;
2549 unsigned int height = cpi->common.Height;
2550
2551 pkt.kind = VPX_CODEC_PSNR_PKT;
2552 sse = calc_plane_error(orig->y_buffer, orig->y_stride,
2553 recon->y_buffer, recon->y_stride,
2554 width, height);
2555 pkt.data.psnr.sse[0] = sse;
2556 pkt.data.psnr.sse[1] = sse;
2557 pkt.data.psnr.samples[0] = width * height;
2558 pkt.data.psnr.samples[1] = width * height;
2559
2560 width = (width + 1) / 2;
2561 height = (height + 1) / 2;
2562
2563 sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
2564 recon->u_buffer, recon->uv_stride,
2565 width, height);
2566 pkt.data.psnr.sse[0] += sse;
2567 pkt.data.psnr.sse[2] = sse;
2568 pkt.data.psnr.samples[0] += width * height;
2569 pkt.data.psnr.samples[2] = width * height;
2570
2571 sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
2572 recon->v_buffer, recon->uv_stride,
2573 width, height);
2574 pkt.data.psnr.sse[0] += sse;
2575 pkt.data.psnr.sse[3] = sse;
2576 pkt.data.psnr.samples[0] += width * height;
2577 pkt.data.psnr.samples[3] = width * height;
2578
2579 for (i = 0; i < 4; i++)
2580 pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
2581 (double)(pkt.data.psnr.sse[i]));
2582
2583 vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
2584 }
2585
2586
vp8_use_as_reference(VP8_COMP * cpi,int ref_frame_flags)2587 int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags)
2588 {
2589 if (ref_frame_flags > 7)
2590 return -1 ;
2591
2592 cpi->ref_frame_flags = ref_frame_flags;
2593 return 0;
2594 }
vp8_update_reference(VP8_COMP * cpi,int ref_frame_flags)2595 int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags)
2596 {
2597 if (ref_frame_flags > 7)
2598 return -1 ;
2599
2600 cpi->common.refresh_golden_frame = 0;
2601 cpi->common.refresh_alt_ref_frame = 0;
2602 cpi->common.refresh_last_frame = 0;
2603
2604 if (ref_frame_flags & VP8_LAST_FRAME)
2605 cpi->common.refresh_last_frame = 1;
2606
2607 if (ref_frame_flags & VP8_GOLD_FRAME)
2608 cpi->common.refresh_golden_frame = 1;
2609
2610 if (ref_frame_flags & VP8_ALTR_FRAME)
2611 cpi->common.refresh_alt_ref_frame = 1;
2612
2613 return 0;
2614 }
2615
vp8_get_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2616 int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2617 {
2618 VP8_COMMON *cm = &cpi->common;
2619 int ref_fb_idx;
2620
2621 if (ref_frame_flag == VP8_LAST_FRAME)
2622 ref_fb_idx = cm->lst_fb_idx;
2623 else if (ref_frame_flag == VP8_GOLD_FRAME)
2624 ref_fb_idx = cm->gld_fb_idx;
2625 else if (ref_frame_flag == VP8_ALTR_FRAME)
2626 ref_fb_idx = cm->alt_fb_idx;
2627 else
2628 return -1;
2629
2630 vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd);
2631
2632 return 0;
2633 }
vp8_set_reference(VP8_COMP * cpi,enum vpx_ref_frame_type ref_frame_flag,YV12_BUFFER_CONFIG * sd)2634 int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd)
2635 {
2636 VP8_COMMON *cm = &cpi->common;
2637
2638 int ref_fb_idx;
2639
2640 if (ref_frame_flag == VP8_LAST_FRAME)
2641 ref_fb_idx = cm->lst_fb_idx;
2642 else if (ref_frame_flag == VP8_GOLD_FRAME)
2643 ref_fb_idx = cm->gld_fb_idx;
2644 else if (ref_frame_flag == VP8_ALTR_FRAME)
2645 ref_fb_idx = cm->alt_fb_idx;
2646 else
2647 return -1;
2648
2649 vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]);
2650
2651 return 0;
2652 }
vp8_update_entropy(VP8_COMP * cpi,int update)2653 int vp8_update_entropy(VP8_COMP *cpi, int update)
2654 {
2655 VP8_COMMON *cm = &cpi->common;
2656 cm->refresh_entropy_probs = update;
2657
2658 return 0;
2659 }
2660
2661
2662 #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED)
vp8_write_yuv_frame(FILE * yuv_file,YV12_BUFFER_CONFIG * s)2663 void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s)
2664 {
2665 unsigned char *src = s->y_buffer;
2666 int h = s->y_height;
2667
2668 do
2669 {
2670 fwrite(src, s->y_width, 1, yuv_file);
2671 src += s->y_stride;
2672 }
2673 while (--h);
2674
2675 src = s->u_buffer;
2676 h = s->uv_height;
2677
2678 do
2679 {
2680 fwrite(src, s->uv_width, 1, yuv_file);
2681 src += s->uv_stride;
2682 }
2683 while (--h);
2684
2685 src = s->v_buffer;
2686 h = s->uv_height;
2687
2688 do
2689 {
2690 fwrite(src, s->uv_width, 1, yuv_file);
2691 src += s->uv_stride;
2692 }
2693 while (--h);
2694 }
2695 #endif
2696
scale_and_extend_source(YV12_BUFFER_CONFIG * sd,VP8_COMP * cpi)2697 static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
2698 {
2699 VP8_COMMON *cm = &cpi->common;
2700
2701 /* are we resizing the image */
2702 if (cm->horiz_scale != 0 || cm->vert_scale != 0)
2703 {
2704 #if CONFIG_SPATIAL_RESAMPLING
2705 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2706 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2707 int tmp_height;
2708
2709 if (cm->vert_scale == 3)
2710 tmp_height = 9;
2711 else
2712 tmp_height = 11;
2713
2714 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2715 Scale2Ratio(cm->vert_scale, &vr, &vs);
2716
2717 vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer,
2718 tmp_height, hs, hr, vs, vr, 0);
2719
2720 vp8_yv12_extend_frame_borders(&cpi->scaled_source);
2721 cpi->Source = &cpi->scaled_source;
2722 #endif
2723 }
2724 else
2725 cpi->Source = sd;
2726 }
2727
2728
resize_key_frame(VP8_COMP * cpi)2729 static int resize_key_frame(VP8_COMP *cpi)
2730 {
2731 #if CONFIG_SPATIAL_RESAMPLING
2732 VP8_COMMON *cm = &cpi->common;
2733
2734 /* Do we need to apply resampling for one pass cbr.
2735 * In one pass this is more limited than in two pass cbr.
2736 * The test and any change is only made once per key frame sequence.
2737 */
2738 if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER))
2739 {
2740 int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs);
2741 int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs);
2742 int new_width, new_height;
2743
2744 /* If we are below the resample DOWN watermark then scale down a
2745 * notch.
2746 */
2747 if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2748 {
2749 cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO;
2750 cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO;
2751 }
2752 /* Should we now start scaling back up */
2753 else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100))
2754 {
2755 cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL;
2756 cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL;
2757 }
2758
2759 /* Get the new height and width */
2760 Scale2Ratio(cm->horiz_scale, &hr, &hs);
2761 Scale2Ratio(cm->vert_scale, &vr, &vs);
2762 new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs;
2763 new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs;
2764
2765 /* If the image size has changed we need to reallocate the buffers
2766 * and resample the source image
2767 */
2768 if ((cm->Width != new_width) || (cm->Height != new_height))
2769 {
2770 cm->Width = new_width;
2771 cm->Height = new_height;
2772 vp8_alloc_compressor_data(cpi);
2773 scale_and_extend_source(cpi->un_scaled_source, cpi);
2774 return 1;
2775 }
2776 }
2777
2778 #endif
2779 return 0;
2780 }
2781
2782
update_alt_ref_frame_stats(VP8_COMP * cpi)2783 static void update_alt_ref_frame_stats(VP8_COMP *cpi)
2784 {
2785 VP8_COMMON *cm = &cpi->common;
2786
2787 /* Select an interval before next GF or altref */
2788 if (!cpi->auto_gold)
2789 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2790
2791 if ((cpi->pass != 2) && cpi->frames_till_gf_update_due)
2792 {
2793 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2794
2795 /* Set the bits per frame that we should try and recover in
2796 * subsequent inter frames to account for the extra GF spend...
2797 * note that his does not apply for GF updates that occur
2798 * coincident with a key frame as the extra cost of key frames is
2799 * dealt with elsewhere.
2800 */
2801 cpi->gf_overspend_bits += cpi->projected_frame_size;
2802 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2803 }
2804
2805 /* Update data structure that monitors level of reference to last GF */
2806 vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2807 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2808
2809 /* this frame refreshes means next frames don't unless specified by user */
2810 cpi->frames_since_golden = 0;
2811
2812 /* Clear the alternate reference update pending flag. */
2813 cpi->source_alt_ref_pending = 0;
2814
2815 /* Set the alternate reference frame active flag */
2816 cpi->source_alt_ref_active = 1;
2817
2818
2819 }
update_golden_frame_stats(VP8_COMP * cpi)2820 static void update_golden_frame_stats(VP8_COMP *cpi)
2821 {
2822 VP8_COMMON *cm = &cpi->common;
2823
2824 /* Update the Golden frame usage counts. */
2825 if (cm->refresh_golden_frame)
2826 {
2827 /* Select an interval before next GF */
2828 if (!cpi->auto_gold)
2829 cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
2830
2831 if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0))
2832 {
2833 cpi->current_gf_interval = cpi->frames_till_gf_update_due;
2834
2835 /* Set the bits per frame that we should try and recover in
2836 * subsequent inter frames to account for the extra GF spend...
2837 * note that his does not apply for GF updates that occur
2838 * coincident with a key frame as the extra cost of key frames
2839 * is dealt with elsewhere.
2840 */
2841 if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active)
2842 {
2843 /* Calcluate GF bits to be recovered
2844 * Projected size - av frame bits available for inter
2845 * frames for clip as a whole
2846 */
2847 cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target);
2848 }
2849
2850 cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due;
2851
2852 }
2853
2854 /* Update data structure that monitors level of reference to last GF */
2855 vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols));
2856 cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
2857
2858 /* this frame refreshes means next frames don't unless specified by
2859 * user
2860 */
2861 cm->refresh_golden_frame = 0;
2862 cpi->frames_since_golden = 0;
2863
2864 cpi->recent_ref_frame_usage[INTRA_FRAME] = 1;
2865 cpi->recent_ref_frame_usage[LAST_FRAME] = 1;
2866 cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1;
2867 cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1;
2868
2869 /* ******** Fixed Q test code only ************ */
2870 /* If we are going to use the ALT reference for the next group of
2871 * frames set a flag to say so.
2872 */
2873 if (cpi->oxcf.fixed_q >= 0 &&
2874 cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame)
2875 {
2876 cpi->source_alt_ref_pending = 1;
2877 cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
2878 }
2879
2880 if (!cpi->source_alt_ref_pending)
2881 cpi->source_alt_ref_active = 0;
2882
2883 /* Decrement count down till next gf */
2884 if (cpi->frames_till_gf_update_due > 0)
2885 cpi->frames_till_gf_update_due--;
2886
2887 }
2888 else if (!cpi->common.refresh_alt_ref_frame)
2889 {
2890 /* Decrement count down till next gf */
2891 if (cpi->frames_till_gf_update_due > 0)
2892 cpi->frames_till_gf_update_due--;
2893
2894 if (cpi->frames_till_alt_ref_frame)
2895 cpi->frames_till_alt_ref_frame --;
2896
2897 cpi->frames_since_golden ++;
2898
2899 if (cpi->frames_since_golden > 1)
2900 {
2901 cpi->recent_ref_frame_usage[INTRA_FRAME] +=
2902 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME];
2903 cpi->recent_ref_frame_usage[LAST_FRAME] +=
2904 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME];
2905 cpi->recent_ref_frame_usage[GOLDEN_FRAME] +=
2906 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME];
2907 cpi->recent_ref_frame_usage[ALTREF_FRAME] +=
2908 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
2909 }
2910 }
2911 }
2912
2913 /* This function updates the reference frame probability estimates that
2914 * will be used during mode selection
2915 */
update_rd_ref_frame_probs(VP8_COMP * cpi)2916 static void update_rd_ref_frame_probs(VP8_COMP *cpi)
2917 {
2918 VP8_COMMON *cm = &cpi->common;
2919
2920 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
2921 const int rf_intra = rfct[INTRA_FRAME];
2922 const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
2923
2924 if (cm->frame_type == KEY_FRAME)
2925 {
2926 cpi->prob_intra_coded = 255;
2927 cpi->prob_last_coded = 128;
2928 cpi->prob_gf_coded = 128;
2929 }
2930 else if (!(rf_intra + rf_inter))
2931 {
2932 cpi->prob_intra_coded = 63;
2933 cpi->prob_last_coded = 128;
2934 cpi->prob_gf_coded = 128;
2935 }
2936
2937 /* update reference frame costs since we can do better than what we got
2938 * last frame.
2939 */
2940 if (cpi->oxcf.number_of_layers == 1)
2941 {
2942 if (cpi->common.refresh_alt_ref_frame)
2943 {
2944 cpi->prob_intra_coded += 40;
2945 if (cpi->prob_intra_coded > 255)
2946 cpi->prob_intra_coded = 255;
2947 cpi->prob_last_coded = 200;
2948 cpi->prob_gf_coded = 1;
2949 }
2950 else if (cpi->frames_since_golden == 0)
2951 {
2952 cpi->prob_last_coded = 214;
2953 }
2954 else if (cpi->frames_since_golden == 1)
2955 {
2956 cpi->prob_last_coded = 192;
2957 cpi->prob_gf_coded = 220;
2958 }
2959 else if (cpi->source_alt_ref_active)
2960 {
2961 cpi->prob_gf_coded -= 20;
2962
2963 if (cpi->prob_gf_coded < 10)
2964 cpi->prob_gf_coded = 10;
2965 }
2966 if (!cpi->source_alt_ref_active)
2967 cpi->prob_gf_coded = 255;
2968 }
2969 }
2970
2971
2972 /* 1 = key, 0 = inter */
decide_key_frame(VP8_COMP * cpi)2973 static int decide_key_frame(VP8_COMP *cpi)
2974 {
2975 VP8_COMMON *cm = &cpi->common;
2976
2977 int code_key_frame = 0;
2978
2979 cpi->kf_boost = 0;
2980
2981 if (cpi->Speed > 11)
2982 return 0;
2983
2984 /* Clear down mmx registers */
2985 vp8_clear_system_state();
2986
2987 if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0))
2988 {
2989 double change = 1.0 * abs((int)(cpi->mb.intra_error -
2990 cpi->last_intra_error)) / (1 + cpi->last_intra_error);
2991 double change2 = 1.0 * abs((int)(cpi->mb.prediction_error -
2992 cpi->last_prediction_error)) / (1 + cpi->last_prediction_error);
2993 double minerror = cm->MBs * 256;
2994
2995 cpi->last_intra_error = cpi->mb.intra_error;
2996 cpi->last_prediction_error = cpi->mb.prediction_error;
2997
2998 if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15
2999 && cpi->mb.prediction_error > minerror
3000 && (change > .25 || change2 > .25))
3001 {
3002 /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > cpi->last_frame_percent_intra + 3*/
3003 return 1;
3004 }
3005
3006 return 0;
3007
3008 }
3009
3010 /* If the following are true we might as well code a key frame */
3011 if (((cpi->this_frame_percent_intra == 100) &&
3012 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) ||
3013 ((cpi->this_frame_percent_intra > 95) &&
3014 (cpi->this_frame_percent_intra >= (cpi->last_frame_percent_intra + 5))))
3015 {
3016 code_key_frame = 1;
3017 }
3018 /* in addition if the following are true and this is not a golden frame
3019 * then code a key frame Note that on golden frames there often seems
3020 * to be a pop in intra useage anyway hence this restriction is
3021 * designed to prevent spurious key frames. The Intra pop needs to be
3022 * investigated.
3023 */
3024 else if (((cpi->this_frame_percent_intra > 60) &&
3025 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) ||
3026 ((cpi->this_frame_percent_intra > 75) &&
3027 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 3 / 2))) ||
3028 ((cpi->this_frame_percent_intra > 90) &&
3029 (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 10))))
3030 {
3031 if (!cm->refresh_golden_frame)
3032 code_key_frame = 1;
3033 }
3034
3035 return code_key_frame;
3036
3037 }
3038
3039 #if !(CONFIG_REALTIME_ONLY)
Pass1Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned int * frame_flags)3040 static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags)
3041 {
3042 (void) size;
3043 (void) dest;
3044 (void) frame_flags;
3045 vp8_set_quantizer(cpi, 26);
3046
3047 vp8_first_pass(cpi);
3048 }
3049 #endif
3050
3051 #if 0
3052 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
3053 {
3054
3055 /* write the frame */
3056 FILE *yframe;
3057 int i;
3058 char filename[255];
3059
3060 sprintf(filename, "cx\\y%04d.raw", this_frame);
3061 yframe = fopen(filename, "wb");
3062
3063 for (i = 0; i < frame->y_height; i++)
3064 fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
3065
3066 fclose(yframe);
3067 sprintf(filename, "cx\\u%04d.raw", this_frame);
3068 yframe = fopen(filename, "wb");
3069
3070 for (i = 0; i < frame->uv_height; i++)
3071 fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3072
3073 fclose(yframe);
3074 sprintf(filename, "cx\\v%04d.raw", this_frame);
3075 yframe = fopen(filename, "wb");
3076
3077 for (i = 0; i < frame->uv_height; i++)
3078 fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
3079
3080 fclose(yframe);
3081 }
3082 #endif
3083 /* return of 0 means drop frame */
3084
3085 /* Function to test for conditions that indeicate we should loop
3086 * back and recode a frame.
3087 */
recode_loop_test(VP8_COMP * cpi,int high_limit,int low_limit,int q,int maxq,int minq)3088 static int recode_loop_test( VP8_COMP *cpi,
3089 int high_limit, int low_limit,
3090 int q, int maxq, int minq )
3091 {
3092 int force_recode = 0;
3093 VP8_COMMON *cm = &cpi->common;
3094
3095 /* Is frame recode allowed at all
3096 * Yes if either recode mode 1 is selected or mode two is selcted
3097 * and the frame is a key frame. golden frame or alt_ref_frame
3098 */
3099 if ( (cpi->sf.recode_loop == 1) ||
3100 ( (cpi->sf.recode_loop == 2) &&
3101 ( (cm->frame_type == KEY_FRAME) ||
3102 cm->refresh_golden_frame ||
3103 cm->refresh_alt_ref_frame ) ) )
3104 {
3105 /* General over and under shoot tests */
3106 if ( ((cpi->projected_frame_size > high_limit) && (q < maxq)) ||
3107 ((cpi->projected_frame_size < low_limit) && (q > minq)) )
3108 {
3109 force_recode = 1;
3110 }
3111 /* Special Constrained quality tests */
3112 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
3113 {
3114 /* Undershoot and below auto cq level */
3115 if ( (q > cpi->cq_target_quality) &&
3116 (cpi->projected_frame_size <
3117 ((cpi->this_frame_target * 7) >> 3)))
3118 {
3119 force_recode = 1;
3120 }
3121 /* Severe undershoot and between auto and user cq level */
3122 else if ( (q > cpi->oxcf.cq_level) &&
3123 (cpi->projected_frame_size < cpi->min_frame_bandwidth) &&
3124 (cpi->active_best_quality > cpi->oxcf.cq_level))
3125 {
3126 force_recode = 1;
3127 cpi->active_best_quality = cpi->oxcf.cq_level;
3128 }
3129 }
3130 }
3131
3132 return force_recode;
3133 }
3134
update_reference_frames(VP8_COMP * cpi)3135 static void update_reference_frames(VP8_COMP *cpi)
3136 {
3137 VP8_COMMON *cm = &cpi->common;
3138 YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb;
3139
3140 /* At this point the new frame has been encoded.
3141 * If any buffer copy / swapping is signaled it should be done here.
3142 */
3143
3144 if (cm->frame_type == KEY_FRAME)
3145 {
3146 yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME ;
3147
3148 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3149 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3150
3151 cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx;
3152
3153 #if CONFIG_MULTI_RES_ENCODING
3154 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3155 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3156 #endif
3157 }
3158 else /* For non key frames */
3159 {
3160 if (cm->refresh_alt_ref_frame)
3161 {
3162 assert(!cm->copy_buffer_to_arf);
3163
3164 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME;
3165 cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3166 cm->alt_fb_idx = cm->new_fb_idx;
3167
3168 #if CONFIG_MULTI_RES_ENCODING
3169 cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame;
3170 #endif
3171 }
3172 else if (cm->copy_buffer_to_arf)
3173 {
3174 assert(!(cm->copy_buffer_to_arf & ~0x3));
3175
3176 if (cm->copy_buffer_to_arf == 1)
3177 {
3178 if(cm->alt_fb_idx != cm->lst_fb_idx)
3179 {
3180 yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME;
3181 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3182 cm->alt_fb_idx = cm->lst_fb_idx;
3183
3184 #if CONFIG_MULTI_RES_ENCODING
3185 cpi->current_ref_frames[ALTREF_FRAME] =
3186 cpi->current_ref_frames[LAST_FRAME];
3187 #endif
3188 }
3189 }
3190 else /* if (cm->copy_buffer_to_arf == 2) */
3191 {
3192 if(cm->alt_fb_idx != cm->gld_fb_idx)
3193 {
3194 yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME;
3195 yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME;
3196 cm->alt_fb_idx = cm->gld_fb_idx;
3197
3198 #if CONFIG_MULTI_RES_ENCODING
3199 cpi->current_ref_frames[ALTREF_FRAME] =
3200 cpi->current_ref_frames[GOLDEN_FRAME];
3201 #endif
3202 }
3203 }
3204 }
3205
3206 if (cm->refresh_golden_frame)
3207 {
3208 assert(!cm->copy_buffer_to_gf);
3209
3210 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME;
3211 cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3212 cm->gld_fb_idx = cm->new_fb_idx;
3213
3214 #if CONFIG_MULTI_RES_ENCODING
3215 cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame;
3216 #endif
3217 }
3218 else if (cm->copy_buffer_to_gf)
3219 {
3220 assert(!(cm->copy_buffer_to_arf & ~0x3));
3221
3222 if (cm->copy_buffer_to_gf == 1)
3223 {
3224 if(cm->gld_fb_idx != cm->lst_fb_idx)
3225 {
3226 yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME;
3227 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3228 cm->gld_fb_idx = cm->lst_fb_idx;
3229
3230 #if CONFIG_MULTI_RES_ENCODING
3231 cpi->current_ref_frames[GOLDEN_FRAME] =
3232 cpi->current_ref_frames[LAST_FRAME];
3233 #endif
3234 }
3235 }
3236 else /* if (cm->copy_buffer_to_gf == 2) */
3237 {
3238 if(cm->alt_fb_idx != cm->gld_fb_idx)
3239 {
3240 yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME;
3241 yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME;
3242 cm->gld_fb_idx = cm->alt_fb_idx;
3243
3244 #if CONFIG_MULTI_RES_ENCODING
3245 cpi->current_ref_frames[GOLDEN_FRAME] =
3246 cpi->current_ref_frames[ALTREF_FRAME];
3247 #endif
3248 }
3249 }
3250 }
3251 }
3252
3253 if (cm->refresh_last_frame)
3254 {
3255 cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME;
3256 cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME;
3257 cm->lst_fb_idx = cm->new_fb_idx;
3258
3259 #if CONFIG_MULTI_RES_ENCODING
3260 cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame;
3261 #endif
3262 }
3263
3264 #if CONFIG_TEMPORAL_DENOISING
3265 if (cpi->oxcf.noise_sensitivity)
3266 {
3267 /* we shouldn't have to keep multiple copies as we know in advance which
3268 * buffer we should start - for now to get something up and running
3269 * I've chosen to copy the buffers
3270 */
3271 if (cm->frame_type == KEY_FRAME)
3272 {
3273 int i;
3274 for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
3275 vp8_yv12_copy_frame(cpi->Source,
3276 &cpi->denoiser.yv12_running_avg[i]);
3277 }
3278 else /* For non key frames */
3279 {
3280 vp8_yv12_extend_frame_borders(
3281 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
3282
3283 if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
3284 {
3285 vp8_yv12_copy_frame(
3286 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3287 &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
3288 }
3289 if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
3290 {
3291 vp8_yv12_copy_frame(
3292 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3293 &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
3294 }
3295 if(cm->refresh_last_frame)
3296 {
3297 vp8_yv12_copy_frame(
3298 &cpi->denoiser.yv12_running_avg[INTRA_FRAME],
3299 &cpi->denoiser.yv12_running_avg[LAST_FRAME]);
3300 }
3301 }
3302
3303 }
3304 #endif
3305
3306 }
3307
vp8_loopfilter_frame(VP8_COMP * cpi,VP8_COMMON * cm)3308 void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
3309 {
3310 const FRAME_TYPE frame_type = cm->frame_type;
3311
3312 if (cm->no_lpf)
3313 {
3314 cm->filter_level = 0;
3315 }
3316 else
3317 {
3318 struct vpx_usec_timer timer;
3319
3320 vp8_clear_system_state();
3321
3322 vpx_usec_timer_start(&timer);
3323 if (cpi->sf.auto_filter == 0)
3324 vp8cx_pick_filter_level_fast(cpi->Source, cpi);
3325
3326 else
3327 vp8cx_pick_filter_level(cpi->Source, cpi);
3328
3329 if (cm->filter_level > 0)
3330 {
3331 vp8cx_set_alt_lf_level(cpi, cm->filter_level);
3332 }
3333
3334 vpx_usec_timer_mark(&timer);
3335 cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
3336 }
3337
3338 #if CONFIG_MULTITHREAD
3339 if (cpi->b_multi_threaded)
3340 sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */
3341 #endif
3342
3343 if (cm->filter_level > 0)
3344 {
3345 vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type);
3346 }
3347
3348 vp8_yv12_extend_frame_borders(cm->frame_to_show);
3349
3350 }
3351
encode_frame_to_data_rate(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)3352 static void encode_frame_to_data_rate
3353 (
3354 VP8_COMP *cpi,
3355 unsigned long *size,
3356 unsigned char *dest,
3357 unsigned char* dest_end,
3358 unsigned int *frame_flags
3359 )
3360 {
3361 int Q;
3362 int frame_over_shoot_limit;
3363 int frame_under_shoot_limit;
3364
3365 int Loop = 0;
3366 int loop_count;
3367
3368 VP8_COMMON *cm = &cpi->common;
3369 int active_worst_qchanged = 0;
3370
3371 #if !(CONFIG_REALTIME_ONLY)
3372 int q_low;
3373 int q_high;
3374 int zbin_oq_high;
3375 int zbin_oq_low = 0;
3376 int top_index;
3377 int bottom_index;
3378 int overshoot_seen = 0;
3379 int undershoot_seen = 0;
3380 #endif
3381
3382 int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark *
3383 cpi->oxcf.optimal_buffer_level / 100);
3384 int drop_mark75 = drop_mark * 2 / 3;
3385 int drop_mark50 = drop_mark / 4;
3386 int drop_mark25 = drop_mark / 8;
3387
3388
3389 /* Clear down mmx registers to allow floating point in what follows */
3390 vp8_clear_system_state();
3391
3392 #if CONFIG_MULTITHREAD
3393 /* wait for the last picture loopfilter thread done */
3394 if (cpi->b_lpf_running)
3395 {
3396 sem_wait(&cpi->h_event_end_lpf);
3397 cpi->b_lpf_running = 0;
3398 }
3399 #endif
3400
3401 if(cpi->force_next_frame_intra)
3402 {
3403 cm->frame_type = KEY_FRAME; /* delayed intra frame */
3404 cpi->force_next_frame_intra = 0;
3405 }
3406
3407 /* For an alt ref frame in 2 pass we skip the call to the second pass
3408 * function that sets the target bandwidth
3409 */
3410 #if !(CONFIG_REALTIME_ONLY)
3411
3412 if (cpi->pass == 2)
3413 {
3414 if (cpi->common.refresh_alt_ref_frame)
3415 {
3416 /* Per frame bit target for the alt ref frame */
3417 cpi->per_frame_bandwidth = cpi->twopass.gf_bits;
3418 /* per second target bitrate */
3419 cpi->target_bandwidth = (int)(cpi->twopass.gf_bits *
3420 cpi->output_framerate);
3421 }
3422 }
3423 else
3424 #endif
3425 cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_framerate);
3426
3427 /* Default turn off buffer to buffer copying */
3428 cm->copy_buffer_to_gf = 0;
3429 cm->copy_buffer_to_arf = 0;
3430
3431 /* Clear zbin over-quant value and mode boost values. */
3432 cpi->mb.zbin_over_quant = 0;
3433 cpi->mb.zbin_mode_boost = 0;
3434
3435 /* Enable or disable mode based tweaking of the zbin
3436 * For 2 Pass Only used where GF/ARF prediction quality
3437 * is above a threshold
3438 */
3439 cpi->mb.zbin_mode_boost_enabled = 1;
3440 if (cpi->pass == 2)
3441 {
3442 if ( cpi->gfu_boost <= 400 )
3443 {
3444 cpi->mb.zbin_mode_boost_enabled = 0;
3445 }
3446 }
3447
3448 /* Current default encoder behaviour for the altref sign bias */
3449 if (cpi->source_alt_ref_active)
3450 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1;
3451 else
3452 cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0;
3453
3454 /* Check to see if a key frame is signaled
3455 * For two pass with auto key frame enabled cm->frame_type may already
3456 * be set, but not for one pass.
3457 */
3458 if ((cm->current_video_frame == 0) ||
3459 (cm->frame_flags & FRAMEFLAGS_KEY) ||
3460 (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0)))
3461 {
3462 /* Key frame from VFW/auto-keyframe/first frame */
3463 cm->frame_type = KEY_FRAME;
3464 }
3465
3466 #if CONFIG_MULTI_RES_ENCODING
3467 /* In multi-resolution encoding, frame_type is decided by lowest-resolution
3468 * encoder. Same frame_type is adopted while encoding at other resolution.
3469 */
3470 if (cpi->oxcf.mr_encoder_id)
3471 {
3472 LOWER_RES_FRAME_INFO* low_res_frame_info
3473 = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info;
3474
3475 cm->frame_type = low_res_frame_info->frame_type;
3476
3477 if(cm->frame_type != KEY_FRAME)
3478 {
3479 cpi->mr_low_res_mv_avail = 1;
3480 cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped);
3481
3482 if (cpi->ref_frame_flags & VP8_LAST_FRAME)
3483 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[LAST_FRAME]
3484 == low_res_frame_info->low_res_ref_frames[LAST_FRAME]);
3485
3486 if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
3487 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[GOLDEN_FRAME]
3488 == low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]);
3489
3490 if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
3491 cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME]
3492 == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]);
3493 }
3494 }
3495 #endif
3496
3497 /* Set various flags etc to special state if it is a key frame */
3498 if (cm->frame_type == KEY_FRAME)
3499 {
3500 int i;
3501
3502 // Set the loop filter deltas and segmentation map update
3503 setup_features(cpi);
3504
3505 /* The alternate reference frame cannot be active for a key frame */
3506 cpi->source_alt_ref_active = 0;
3507
3508 /* Reset the RD threshold multipliers to default of * 1 (128) */
3509 for (i = 0; i < MAX_MODES; i++)
3510 {
3511 cpi->mb.rd_thresh_mult[i] = 128;
3512 }
3513
3514 // Reset the zero_last counter to 0 on key frame.
3515 vpx_memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
3516 }
3517
3518 #if 0
3519 /* Experimental code for lagged compress and one pass
3520 * Initialise one_pass GF frames stats
3521 * Update stats used for GF selection
3522 */
3523 {
3524 cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS;
3525
3526 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0;
3527 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0;
3528 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0;
3529 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0;
3530 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0;
3531 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0;
3532 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0;
3533 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0;
3534 cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0;
3535 }
3536 #endif
3537
3538 update_rd_ref_frame_probs(cpi);
3539
3540 if (cpi->drop_frames_allowed)
3541 {
3542 /* The reset to decimation 0 is only done here for one pass.
3543 * Once it is set two pass leaves decimation on till the next kf.
3544 */
3545 if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0))
3546 cpi->decimation_factor --;
3547
3548 if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0)
3549 cpi->decimation_factor = 1;
3550
3551 else if (cpi->buffer_level < drop_mark25 && (cpi->decimation_factor == 2 || cpi->decimation_factor == 3))
3552 {
3553 cpi->decimation_factor = 3;
3554 }
3555 else if (cpi->buffer_level < drop_mark50 && (cpi->decimation_factor == 1 || cpi->decimation_factor == 2))
3556 {
3557 cpi->decimation_factor = 2;
3558 }
3559 else if (cpi->buffer_level < drop_mark75 && (cpi->decimation_factor == 0 || cpi->decimation_factor == 1))
3560 {
3561 cpi->decimation_factor = 1;
3562 }
3563 }
3564
3565 /* The following decimates the frame rate according to a regular
3566 * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help
3567 * prevent buffer under-run in CBR mode. Alternatively it might be
3568 * desirable in some situations to drop frame rate but throw more bits
3569 * at each frame.
3570 *
3571 * Note that dropping a key frame can be problematic if spatial
3572 * resampling is also active
3573 */
3574 if (cpi->decimation_factor > 0)
3575 {
3576 switch (cpi->decimation_factor)
3577 {
3578 case 1:
3579 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2;
3580 break;
3581 case 2:
3582 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3583 break;
3584 case 3:
3585 cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4;
3586 break;
3587 }
3588
3589 /* Note that we should not throw out a key frame (especially when
3590 * spatial resampling is enabled).
3591 */
3592 if (cm->frame_type == KEY_FRAME)
3593 {
3594 cpi->decimation_count = cpi->decimation_factor;
3595 }
3596 else if (cpi->decimation_count > 0)
3597 {
3598 cpi->decimation_count --;
3599
3600 cpi->bits_off_target += cpi->av_per_frame_bandwidth;
3601 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
3602 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
3603
3604 #if CONFIG_MULTI_RES_ENCODING
3605 vp8_store_drop_frame_info(cpi);
3606 #endif
3607
3608 cm->current_video_frame++;
3609 cpi->frames_since_key++;
3610 // We advance the temporal pattern for dropped frames.
3611 cpi->temporal_pattern_counter++;
3612
3613 #if CONFIG_INTERNAL_STATS
3614 cpi->count ++;
3615 #endif
3616
3617 cpi->buffer_level = cpi->bits_off_target;
3618
3619 if (cpi->oxcf.number_of_layers > 1)
3620 {
3621 unsigned int i;
3622
3623 /* Propagate bits saved by dropping the frame to higher
3624 * layers
3625 */
3626 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
3627 {
3628 LAYER_CONTEXT *lc = &cpi->layer_context[i];
3629 lc->bits_off_target += (int)(lc->target_bandwidth /
3630 lc->framerate);
3631 if (lc->bits_off_target > lc->maximum_buffer_size)
3632 lc->bits_off_target = lc->maximum_buffer_size;
3633 lc->buffer_level = lc->bits_off_target;
3634 }
3635 }
3636
3637 return;
3638 }
3639 else
3640 cpi->decimation_count = cpi->decimation_factor;
3641 }
3642 else
3643 cpi->decimation_count = 0;
3644
3645 /* Decide how big to make the frame */
3646 if (!vp8_pick_frame_size(cpi))
3647 {
3648 /*TODO: 2 drop_frame and return code could be put together. */
3649 #if CONFIG_MULTI_RES_ENCODING
3650 vp8_store_drop_frame_info(cpi);
3651 #endif
3652 cm->current_video_frame++;
3653 cpi->frames_since_key++;
3654 // We advance the temporal pattern for dropped frames.
3655 cpi->temporal_pattern_counter++;
3656 return;
3657 }
3658
3659 /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full.
3660 * This has a knock on effect on active best quality as well.
3661 * For CBR if the buffer reaches its maximum level then we can no longer
3662 * save up bits for later frames so we might as well use them up
3663 * on the current frame.
3664 */
3665 if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) &&
3666 (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode)
3667 {
3668 /* Max adjustment is 1/4 */
3669 int Adjustment = cpi->active_worst_quality / 4;
3670
3671 if (Adjustment)
3672 {
3673 int buff_lvl_step;
3674
3675 if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size)
3676 {
3677 buff_lvl_step = (int)
3678 ((cpi->oxcf.maximum_buffer_size -
3679 cpi->oxcf.optimal_buffer_level) /
3680 Adjustment);
3681
3682 if (buff_lvl_step)
3683 Adjustment = (int)
3684 ((cpi->buffer_level -
3685 cpi->oxcf.optimal_buffer_level) /
3686 buff_lvl_step);
3687 else
3688 Adjustment = 0;
3689 }
3690
3691 cpi->active_worst_quality -= Adjustment;
3692
3693 if(cpi->active_worst_quality < cpi->active_best_quality)
3694 cpi->active_worst_quality = cpi->active_best_quality;
3695 }
3696 }
3697
3698 /* Set an active best quality and if necessary active worst quality
3699 * There is some odd behavior for one pass here that needs attention.
3700 */
3701 if ( (cpi->pass == 2) || (cpi->ni_frames > 150))
3702 {
3703 vp8_clear_system_state();
3704
3705 Q = cpi->active_worst_quality;
3706
3707 if ( cm->frame_type == KEY_FRAME )
3708 {
3709 if ( cpi->pass == 2 )
3710 {
3711 if (cpi->gfu_boost > 600)
3712 cpi->active_best_quality = kf_low_motion_minq[Q];
3713 else
3714 cpi->active_best_quality = kf_high_motion_minq[Q];
3715
3716 /* Special case for key frames forced because we have reached
3717 * the maximum key frame interval. Here force the Q to a range
3718 * based on the ambient Q to reduce the risk of popping
3719 */
3720 if ( cpi->this_key_frame_forced )
3721 {
3722 if ( cpi->active_best_quality > cpi->avg_frame_qindex * 7/8)
3723 cpi->active_best_quality = cpi->avg_frame_qindex * 7/8;
3724 else if ( cpi->active_best_quality < cpi->avg_frame_qindex >> 2 )
3725 cpi->active_best_quality = cpi->avg_frame_qindex >> 2;
3726 }
3727 }
3728 /* One pass more conservative */
3729 else
3730 cpi->active_best_quality = kf_high_motion_minq[Q];
3731 }
3732
3733 else if (cpi->oxcf.number_of_layers==1 &&
3734 (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame))
3735 {
3736 /* Use the lower of cpi->active_worst_quality and recent
3737 * average Q as basis for GF/ARF Q limit unless last frame was
3738 * a key frame.
3739 */
3740 if ( (cpi->frames_since_key > 1) &&
3741 (cpi->avg_frame_qindex < cpi->active_worst_quality) )
3742 {
3743 Q = cpi->avg_frame_qindex;
3744 }
3745
3746 /* For constrained quality dont allow Q less than the cq level */
3747 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3748 (Q < cpi->cq_target_quality) )
3749 {
3750 Q = cpi->cq_target_quality;
3751 }
3752
3753 if ( cpi->pass == 2 )
3754 {
3755 if ( cpi->gfu_boost > 1000 )
3756 cpi->active_best_quality = gf_low_motion_minq[Q];
3757 else if ( cpi->gfu_boost < 400 )
3758 cpi->active_best_quality = gf_high_motion_minq[Q];
3759 else
3760 cpi->active_best_quality = gf_mid_motion_minq[Q];
3761
3762 /* Constrained quality use slightly lower active best. */
3763 if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY )
3764 {
3765 cpi->active_best_quality =
3766 cpi->active_best_quality * 15/16;
3767 }
3768 }
3769 /* One pass more conservative */
3770 else
3771 cpi->active_best_quality = gf_high_motion_minq[Q];
3772 }
3773 else
3774 {
3775 cpi->active_best_quality = inter_minq[Q];
3776
3777 /* For the constant/constrained quality mode we dont want
3778 * q to fall below the cq level.
3779 */
3780 if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
3781 (cpi->active_best_quality < cpi->cq_target_quality) )
3782 {
3783 /* If we are strongly undershooting the target rate in the last
3784 * frames then use the user passed in cq value not the auto
3785 * cq value.
3786 */
3787 if ( cpi->rolling_actual_bits < cpi->min_frame_bandwidth )
3788 cpi->active_best_quality = cpi->oxcf.cq_level;
3789 else
3790 cpi->active_best_quality = cpi->cq_target_quality;
3791 }
3792 }
3793
3794 /* If CBR and the buffer is as full then it is reasonable to allow
3795 * higher quality on the frames to prevent bits just going to waste.
3796 */
3797 if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)
3798 {
3799 /* Note that the use of >= here elliminates the risk of a devide
3800 * by 0 error in the else if clause
3801 */
3802 if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size)
3803 cpi->active_best_quality = cpi->best_quality;
3804
3805 else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level)
3806 {
3807 int Fraction = (int)
3808 (((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128)
3809 / (cpi->oxcf.maximum_buffer_size -
3810 cpi->oxcf.optimal_buffer_level));
3811 int min_qadjustment = ((cpi->active_best_quality -
3812 cpi->best_quality) * Fraction) / 128;
3813
3814 cpi->active_best_quality -= min_qadjustment;
3815 }
3816 }
3817 }
3818 /* Make sure constrained quality mode limits are adhered to for the first
3819 * few frames of one pass encodes
3820 */
3821 else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY)
3822 {
3823 if ( (cm->frame_type == KEY_FRAME) ||
3824 cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame )
3825 {
3826 cpi->active_best_quality = cpi->best_quality;
3827 }
3828 else if (cpi->active_best_quality < cpi->cq_target_quality)
3829 {
3830 cpi->active_best_quality = cpi->cq_target_quality;
3831 }
3832 }
3833
3834 /* Clip the active best and worst quality values to limits */
3835 if (cpi->active_worst_quality > cpi->worst_quality)
3836 cpi->active_worst_quality = cpi->worst_quality;
3837
3838 if (cpi->active_best_quality < cpi->best_quality)
3839 cpi->active_best_quality = cpi->best_quality;
3840
3841 if ( cpi->active_worst_quality < cpi->active_best_quality )
3842 cpi->active_worst_quality = cpi->active_best_quality;
3843
3844 /* Determine initial Q to try */
3845 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
3846
3847 #if !(CONFIG_REALTIME_ONLY)
3848
3849 /* Set highest allowed value for Zbin over quant */
3850 if (cm->frame_type == KEY_FRAME)
3851 zbin_oq_high = 0;
3852 else if ((cpi->oxcf.number_of_layers == 1) && ((cm->refresh_alt_ref_frame ||
3853 (cm->refresh_golden_frame && !cpi->source_alt_ref_active))))
3854 {
3855 zbin_oq_high = 16;
3856 }
3857 else
3858 zbin_oq_high = ZBIN_OQ_MAX;
3859 #endif
3860
3861 /* Setup background Q adjustment for error resilient mode.
3862 * For multi-layer encodes only enable this for the base layer.
3863 */
3864 if (cpi->cyclic_refresh_mode_enabled)
3865 {
3866 if (cpi->current_layer==0)
3867 cyclic_background_refresh(cpi, Q, 0);
3868 else
3869 disable_segmentation(cpi);
3870 }
3871
3872 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
3873
3874 #if !(CONFIG_REALTIME_ONLY)
3875 /* Limit Q range for the adaptive loop. */
3876 bottom_index = cpi->active_best_quality;
3877 top_index = cpi->active_worst_quality;
3878 q_low = cpi->active_best_quality;
3879 q_high = cpi->active_worst_quality;
3880 #endif
3881
3882 vp8_save_coding_context(cpi);
3883
3884 loop_count = 0;
3885
3886 scale_and_extend_source(cpi->un_scaled_source, cpi);
3887
3888 #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING)
3889
3890 if (cpi->oxcf.noise_sensitivity > 0)
3891 {
3892 unsigned char *src;
3893 int l = 0;
3894
3895 switch (cpi->oxcf.noise_sensitivity)
3896 {
3897 case 1:
3898 l = 20;
3899 break;
3900 case 2:
3901 l = 40;
3902 break;
3903 case 3:
3904 l = 60;
3905 break;
3906 case 4:
3907 l = 80;
3908 break;
3909 case 5:
3910 l = 100;
3911 break;
3912 case 6:
3913 l = 150;
3914 break;
3915 }
3916
3917
3918 if (cm->frame_type == KEY_FRAME)
3919 {
3920 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0);
3921 }
3922 else
3923 {
3924 vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0);
3925
3926 src = cpi->Source->y_buffer;
3927
3928 if (cpi->Source->y_stride < 0)
3929 {
3930 src += cpi->Source->y_stride * (cpi->Source->y_height - 1);
3931 }
3932 }
3933 }
3934
3935 #endif
3936
3937
3938 #ifdef OUTPUT_YUV_SRC
3939 vp8_write_yuv_frame(yuv_file, cpi->Source);
3940 #endif
3941
3942 do
3943 {
3944 vp8_clear_system_state();
3945
3946 vp8_set_quantizer(cpi, Q);
3947
3948 /* setup skip prob for costing in mode/mv decision */
3949 if (cpi->common.mb_no_coeff_skip)
3950 {
3951 cpi->prob_skip_false = cpi->base_skip_false_prob[Q];
3952
3953 if (cm->frame_type != KEY_FRAME)
3954 {
3955 if (cpi->common.refresh_alt_ref_frame)
3956 {
3957 if (cpi->last_skip_false_probs[2] != 0)
3958 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3959
3960 /*
3961 if(cpi->last_skip_false_probs[2]!=0 && abs(Q- cpi->last_skip_probs_q[2])<=16 )
3962 cpi->prob_skip_false = cpi->last_skip_false_probs[2];
3963 else if (cpi->last_skip_false_probs[2]!=0)
3964 cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + cpi->prob_skip_false ) / 2;
3965 */
3966 }
3967 else if (cpi->common.refresh_golden_frame)
3968 {
3969 if (cpi->last_skip_false_probs[1] != 0)
3970 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3971
3972 /*
3973 if(cpi->last_skip_false_probs[1]!=0 && abs(Q- cpi->last_skip_probs_q[1])<=16 )
3974 cpi->prob_skip_false = cpi->last_skip_false_probs[1];
3975 else if (cpi->last_skip_false_probs[1]!=0)
3976 cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + cpi->prob_skip_false ) / 2;
3977 */
3978 }
3979 else
3980 {
3981 if (cpi->last_skip_false_probs[0] != 0)
3982 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3983
3984 /*
3985 if(cpi->last_skip_false_probs[0]!=0 && abs(Q- cpi->last_skip_probs_q[0])<=16 )
3986 cpi->prob_skip_false = cpi->last_skip_false_probs[0];
3987 else if(cpi->last_skip_false_probs[0]!=0)
3988 cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + cpi->prob_skip_false ) / 2;
3989 */
3990 }
3991
3992 /* as this is for cost estimate, let's make sure it does not
3993 * go extreme eitehr way
3994 */
3995 if (cpi->prob_skip_false < 5)
3996 cpi->prob_skip_false = 5;
3997
3998 if (cpi->prob_skip_false > 250)
3999 cpi->prob_skip_false = 250;
4000
4001 if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref)
4002 cpi->prob_skip_false = 1;
4003 }
4004
4005 #if 0
4006
4007 if (cpi->pass != 1)
4008 {
4009 FILE *f = fopen("skip.stt", "a");
4010 fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false);
4011 fclose(f);
4012 }
4013
4014 #endif
4015
4016 }
4017
4018 if (cm->frame_type == KEY_FRAME)
4019 {
4020 if(resize_key_frame(cpi))
4021 {
4022 /* If the frame size has changed, need to reset Q, quantizer,
4023 * and background refresh.
4024 */
4025 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4026 if (cpi->cyclic_refresh_mode_enabled)
4027 {
4028 if (cpi->current_layer==0)
4029 cyclic_background_refresh(cpi, Q, 0);
4030 else
4031 disable_segmentation(cpi);
4032 }
4033 // Reset the consec_zero_last counter on key frame.
4034 vpx_memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols);
4035 vp8_set_quantizer(cpi, Q);
4036 }
4037
4038 vp8_setup_key_frame(cpi);
4039 }
4040
4041
4042
4043 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
4044 {
4045 if(cpi->oxcf.error_resilient_mode)
4046 cm->refresh_entropy_probs = 0;
4047
4048 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
4049 {
4050 if (cm->frame_type == KEY_FRAME)
4051 cm->refresh_entropy_probs = 1;
4052 }
4053
4054 if (cm->refresh_entropy_probs == 0)
4055 {
4056 /* save a copy for later refresh */
4057 vpx_memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc));
4058 }
4059
4060 vp8_update_coef_context(cpi);
4061
4062 vp8_update_coef_probs(cpi);
4063
4064 /* transform / motion compensation build reconstruction frame
4065 * +pack coef partitions
4066 */
4067 vp8_encode_frame(cpi);
4068
4069 /* cpi->projected_frame_size is not needed for RT mode */
4070 }
4071 #else
4072 /* transform / motion compensation build reconstruction frame */
4073 vp8_encode_frame(cpi);
4074
4075 cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi);
4076 cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0;
4077 #endif
4078 vp8_clear_system_state();
4079
4080 /* Test to see if the stats generated for this frame indicate that
4081 * we should have coded a key frame (assuming that we didn't)!
4082 */
4083
4084 if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME
4085 && cpi->compressor_speed != 2)
4086 {
4087 #if !(CONFIG_REALTIME_ONLY)
4088 if (decide_key_frame(cpi))
4089 {
4090 /* Reset all our sizing numbers and recode */
4091 cm->frame_type = KEY_FRAME;
4092
4093 vp8_pick_frame_size(cpi);
4094
4095 /* Clear the Alt reference frame active flag when we have
4096 * a key frame
4097 */
4098 cpi->source_alt_ref_active = 0;
4099
4100 // Set the loop filter deltas and segmentation map update
4101 setup_features(cpi);
4102
4103 vp8_restore_coding_context(cpi);
4104
4105 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4106
4107 vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit);
4108
4109 /* Limit Q range for the adaptive loop. */
4110 bottom_index = cpi->active_best_quality;
4111 top_index = cpi->active_worst_quality;
4112 q_low = cpi->active_best_quality;
4113 q_high = cpi->active_worst_quality;
4114
4115 loop_count++;
4116 Loop = 1;
4117
4118 continue;
4119 }
4120 #endif
4121 }
4122
4123 vp8_clear_system_state();
4124
4125 if (frame_over_shoot_limit == 0)
4126 frame_over_shoot_limit = 1;
4127
4128 /* Are we are overshooting and up against the limit of active max Q. */
4129 if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) &&
4130 (Q == cpi->active_worst_quality) &&
4131 (cpi->active_worst_quality < cpi->worst_quality) &&
4132 (cpi->projected_frame_size > frame_over_shoot_limit))
4133 {
4134 int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit;
4135
4136 /* If so is there any scope for relaxing it */
4137 while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0))
4138 {
4139 cpi->active_worst_quality++;
4140 /* Assume 1 qstep = about 4% on frame size. */
4141 over_size_percent = (int)(over_size_percent * 0.96);
4142 }
4143 #if !(CONFIG_REALTIME_ONLY)
4144 top_index = cpi->active_worst_quality;
4145 #endif
4146 /* If we have updated the active max Q do not call
4147 * vp8_update_rate_correction_factors() this loop.
4148 */
4149 active_worst_qchanged = 1;
4150 }
4151 else
4152 active_worst_qchanged = 0;
4153
4154 #if !(CONFIG_REALTIME_ONLY)
4155 /* Special case handling for forced key frames */
4156 if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced )
4157 {
4158 int last_q = Q;
4159 int kf_err = vp8_calc_ss_err(cpi->Source,
4160 &cm->yv12_fb[cm->new_fb_idx]);
4161
4162 /* The key frame is not good enough */
4163 if ( kf_err > ((cpi->ambient_err * 7) >> 3) )
4164 {
4165 /* Lower q_high */
4166 q_high = (Q > q_low) ? (Q - 1) : q_low;
4167
4168 /* Adjust Q */
4169 Q = (q_high + q_low) >> 1;
4170 }
4171 /* The key frame is much better than the previous frame */
4172 else if ( kf_err < (cpi->ambient_err >> 1) )
4173 {
4174 /* Raise q_low */
4175 q_low = (Q < q_high) ? (Q + 1) : q_high;
4176
4177 /* Adjust Q */
4178 Q = (q_high + q_low + 1) >> 1;
4179 }
4180
4181 /* Clamp Q to upper and lower limits: */
4182 if (Q > q_high)
4183 Q = q_high;
4184 else if (Q < q_low)
4185 Q = q_low;
4186
4187 Loop = Q != last_q;
4188 }
4189
4190 /* Is the projected frame size out of range and are we allowed
4191 * to attempt to recode.
4192 */
4193 else if ( recode_loop_test( cpi,
4194 frame_over_shoot_limit, frame_under_shoot_limit,
4195 Q, top_index, bottom_index ) )
4196 {
4197 int last_q = Q;
4198 int Retries = 0;
4199
4200 /* Frame size out of permitted range. Update correction factor
4201 * & compute new Q to try...
4202 */
4203
4204 /* Frame is too large */
4205 if (cpi->projected_frame_size > cpi->this_frame_target)
4206 {
4207 /* Raise Qlow as to at least the current value */
4208 q_low = (Q < q_high) ? (Q + 1) : q_high;
4209
4210 /* If we are using over quant do the same for zbin_oq_low */
4211 if (cpi->mb.zbin_over_quant > 0)
4212 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4213 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4214
4215 if (undershoot_seen)
4216 {
4217 /* Update rate_correction_factor unless
4218 * cpi->active_worst_quality has changed.
4219 */
4220 if (!active_worst_qchanged)
4221 vp8_update_rate_correction_factors(cpi, 1);
4222
4223 Q = (q_high + q_low + 1) / 2;
4224
4225 /* Adjust cpi->zbin_over_quant (only allowed when Q
4226 * is max)
4227 */
4228 if (Q < MAXQ)
4229 cpi->mb.zbin_over_quant = 0;
4230 else
4231 {
4232 zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ?
4233 (cpi->mb.zbin_over_quant + 1) : zbin_oq_high;
4234 cpi->mb.zbin_over_quant =
4235 (zbin_oq_high + zbin_oq_low) / 2;
4236 }
4237 }
4238 else
4239 {
4240 /* Update rate_correction_factor unless
4241 * cpi->active_worst_quality has changed.
4242 */
4243 if (!active_worst_qchanged)
4244 vp8_update_rate_correction_factors(cpi, 0);
4245
4246 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4247
4248 while (((Q < q_low) ||
4249 (cpi->mb.zbin_over_quant < zbin_oq_low)) &&
4250 (Retries < 10))
4251 {
4252 vp8_update_rate_correction_factors(cpi, 0);
4253 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4254 Retries ++;
4255 }
4256 }
4257
4258 overshoot_seen = 1;
4259 }
4260 /* Frame is too small */
4261 else
4262 {
4263 if (cpi->mb.zbin_over_quant == 0)
4264 /* Lower q_high if not using over quant */
4265 q_high = (Q > q_low) ? (Q - 1) : q_low;
4266 else
4267 /* else lower zbin_oq_high */
4268 zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) ?
4269 (cpi->mb.zbin_over_quant - 1) : zbin_oq_low;
4270
4271 if (overshoot_seen)
4272 {
4273 /* Update rate_correction_factor unless
4274 * cpi->active_worst_quality has changed.
4275 */
4276 if (!active_worst_qchanged)
4277 vp8_update_rate_correction_factors(cpi, 1);
4278
4279 Q = (q_high + q_low) / 2;
4280
4281 /* Adjust cpi->zbin_over_quant (only allowed when Q
4282 * is max)
4283 */
4284 if (Q < MAXQ)
4285 cpi->mb.zbin_over_quant = 0;
4286 else
4287 cpi->mb.zbin_over_quant =
4288 (zbin_oq_high + zbin_oq_low) / 2;
4289 }
4290 else
4291 {
4292 /* Update rate_correction_factor unless
4293 * cpi->active_worst_quality has changed.
4294 */
4295 if (!active_worst_qchanged)
4296 vp8_update_rate_correction_factors(cpi, 0);
4297
4298 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4299
4300 /* Special case reset for qlow for constrained quality.
4301 * This should only trigger where there is very substantial
4302 * undershoot on a frame and the auto cq level is above
4303 * the user passsed in value.
4304 */
4305 if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
4306 (Q < q_low) )
4307 {
4308 q_low = Q;
4309 }
4310
4311 while (((Q > q_high) ||
4312 (cpi->mb.zbin_over_quant > zbin_oq_high)) &&
4313 (Retries < 10))
4314 {
4315 vp8_update_rate_correction_factors(cpi, 0);
4316 Q = vp8_regulate_q(cpi, cpi->this_frame_target);
4317 Retries ++;
4318 }
4319 }
4320
4321 undershoot_seen = 1;
4322 }
4323
4324 /* Clamp Q to upper and lower limits: */
4325 if (Q > q_high)
4326 Q = q_high;
4327 else if (Q < q_low)
4328 Q = q_low;
4329
4330 /* Clamp cpi->zbin_over_quant */
4331 cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) ?
4332 zbin_oq_low : (cpi->mb.zbin_over_quant > zbin_oq_high) ?
4333 zbin_oq_high : cpi->mb.zbin_over_quant;
4334
4335 Loop = Q != last_q;
4336 }
4337 else
4338 #endif
4339 Loop = 0;
4340
4341 if (cpi->is_src_frame_alt_ref)
4342 Loop = 0;
4343
4344 if (Loop == 1)
4345 {
4346 vp8_restore_coding_context(cpi);
4347 loop_count++;
4348 #if CONFIG_INTERNAL_STATS
4349 cpi->tot_recode_hits++;
4350 #endif
4351 }
4352 }
4353 while (Loop == 1);
4354
4355 #if 0
4356 /* Experimental code for lagged and one pass
4357 * Update stats used for one pass GF selection
4358 */
4359 {
4360 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error;
4361 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error;
4362 cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0;
4363 }
4364 #endif
4365
4366 /* Special case code to reduce pulsing when key frames are forced at a
4367 * fixed interval. Note the reconstruction error if it is the frame before
4368 * the force key frame
4369 */
4370 if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) )
4371 {
4372 cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
4373 &cm->yv12_fb[cm->new_fb_idx]);
4374 }
4375
4376 /* This frame's MVs are saved and will be used in next frame's MV predictor.
4377 * Last frame has one more line(add to bottom) and one more column(add to
4378 * right) than cm->mip. The edge elements are initialized to 0.
4379 */
4380 #if CONFIG_MULTI_RES_ENCODING
4381 if(!cpi->oxcf.mr_encoder_id && cm->show_frame)
4382 #else
4383 if(cm->show_frame) /* do not save for altref frame */
4384 #endif
4385 {
4386 int mb_row;
4387 int mb_col;
4388 /* Point to beginning of allocated MODE_INFO arrays. */
4389 MODE_INFO *tmp = cm->mip;
4390
4391 if(cm->frame_type != KEY_FRAME)
4392 {
4393 for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
4394 {
4395 for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
4396 {
4397 if(tmp->mbmi.ref_frame != INTRA_FRAME)
4398 cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride+1)].as_int = tmp->mbmi.mv.as_int;
4399
4400 cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride+1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
4401 cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride+1)] = tmp->mbmi.ref_frame;
4402 tmp++;
4403 }
4404 }
4405 }
4406 }
4407
4408 /* Count last ref frame 0,0 usage on current encoded frame. */
4409 {
4410 int mb_row;
4411 int mb_col;
4412 /* Point to beginning of MODE_INFO arrays. */
4413 MODE_INFO *tmp = cm->mi;
4414
4415 cpi->zeromv_count = 0;
4416
4417 if(cm->frame_type != KEY_FRAME)
4418 {
4419 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
4420 {
4421 for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++)
4422 {
4423 if(tmp->mbmi.mode == ZEROMV)
4424 cpi->zeromv_count++;
4425 tmp++;
4426 }
4427 tmp++;
4428 }
4429 }
4430 }
4431
4432 #if CONFIG_MULTI_RES_ENCODING
4433 vp8_cal_dissimilarity(cpi);
4434 #endif
4435
4436 /* Update the GF useage maps.
4437 * This is done after completing the compression of a frame when all
4438 * modes etc. are finalized but before loop filter
4439 */
4440 if (cpi->oxcf.number_of_layers == 1)
4441 vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
4442
4443 if (cm->frame_type == KEY_FRAME)
4444 cm->refresh_last_frame = 1;
4445
4446 #if 0
4447 {
4448 FILE *f = fopen("gfactive.stt", "a");
4449 fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame);
4450 fclose(f);
4451 }
4452 #endif
4453
4454 /* For inter frames the current default behavior is that when
4455 * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer
4456 * This is purely an encoder decision at present.
4457 */
4458 if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame)
4459 cm->copy_buffer_to_arf = 2;
4460 else
4461 cm->copy_buffer_to_arf = 0;
4462
4463 cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
4464
4465 #if CONFIG_MULTITHREAD
4466 if (cpi->b_multi_threaded)
4467 {
4468 /* start loopfilter in separate thread */
4469 sem_post(&cpi->h_event_start_lpf);
4470 cpi->b_lpf_running = 1;
4471 }
4472 else
4473 #endif
4474 {
4475 vp8_loopfilter_frame(cpi, cm);
4476 }
4477
4478 update_reference_frames(cpi);
4479
4480 #ifdef OUTPUT_YUV_DENOISED
4481 vp8_write_yuv_frame(yuv_denoised_file,
4482 &cpi->denoiser.yv12_running_avg[INTRA_FRAME]);
4483 #endif
4484
4485 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
4486 if (cpi->oxcf.error_resilient_mode)
4487 {
4488 cm->refresh_entropy_probs = 0;
4489 }
4490 #endif
4491
4492 #if CONFIG_MULTITHREAD
4493 /* wait that filter_level is picked so that we can continue with stream packing */
4494 if (cpi->b_multi_threaded)
4495 sem_wait(&cpi->h_event_end_lpf);
4496 #endif
4497
4498 /* build the bitstream */
4499 vp8_pack_bitstream(cpi, dest, dest_end, size);
4500
4501 #if CONFIG_MULTITHREAD
4502 /* if PSNR packets are generated we have to wait for the lpf */
4503 if (cpi->b_lpf_running && cpi->b_calculate_psnr)
4504 {
4505 sem_wait(&cpi->h_event_end_lpf);
4506 cpi->b_lpf_running = 0;
4507 }
4508 #endif
4509
4510 /* Move storing frame_type out of the above loop since it is also
4511 * needed in motion search besides loopfilter */
4512 cm->last_frame_type = cm->frame_type;
4513
4514 /* Update rate control heuristics */
4515 cpi->total_byte_count += (*size);
4516 cpi->projected_frame_size = (*size) << 3;
4517
4518 if (cpi->oxcf.number_of_layers > 1)
4519 {
4520 unsigned int i;
4521 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
4522 cpi->layer_context[i].total_byte_count += (*size);
4523 }
4524
4525 if (!active_worst_qchanged)
4526 vp8_update_rate_correction_factors(cpi, 2);
4527
4528 cpi->last_q[cm->frame_type] = cm->base_qindex;
4529
4530 if (cm->frame_type == KEY_FRAME)
4531 {
4532 vp8_adjust_key_frame_context(cpi);
4533 }
4534
4535 /* Keep a record of ambient average Q. */
4536 if (cm->frame_type != KEY_FRAME)
4537 cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2;
4538
4539 /* Keep a record from which we can calculate the average Q excluding
4540 * GF updates and key frames
4541 */
4542 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
4543 (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame)))
4544 {
4545 cpi->ni_frames++;
4546
4547 /* Calculate the average Q for normal inter frames (not key or GFU
4548 * frames).
4549 */
4550 if ( cpi->pass == 2 )
4551 {
4552 cpi->ni_tot_qi += Q;
4553 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4554 }
4555 else
4556 {
4557 /* Damp value for first few frames */
4558 if (cpi->ni_frames > 150 )
4559 {
4560 cpi->ni_tot_qi += Q;
4561 cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames);
4562 }
4563 /* For one pass, early in the clip ... average the current frame Q
4564 * value with the worstq entered by the user as a dampening measure
4565 */
4566 else
4567 {
4568 cpi->ni_tot_qi += Q;
4569 cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2;
4570 }
4571
4572 /* If the average Q is higher than what was used in the last
4573 * frame (after going through the recode loop to keep the frame
4574 * size within range) then use the last frame value - 1. The -1
4575 * is designed to stop Q and hence the data rate, from
4576 * progressively falling away during difficult sections, but at
4577 * the same time reduce the number of itterations around the
4578 * recode loop.
4579 */
4580 if (Q > cpi->ni_av_qi)
4581 cpi->ni_av_qi = Q - 1;
4582 }
4583 }
4584
4585 /* Update the buffer level variable. */
4586 /* Non-viewable frames are a special case and are treated as pure overhead. */
4587 if ( !cm->show_frame )
4588 cpi->bits_off_target -= cpi->projected_frame_size;
4589 else
4590 cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
4591
4592 /* Clip the buffer level to the maximum specified buffer size */
4593 if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
4594 cpi->bits_off_target = cpi->oxcf.maximum_buffer_size;
4595
4596 /* Rolling monitors of whether we are over or underspending used to
4597 * help regulate min and Max Q in two pass.
4598 */
4599 cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4;
4600 cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4;
4601 cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32;
4602 cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32;
4603
4604 /* Actual bits spent */
4605 cpi->total_actual_bits += cpi->projected_frame_size;
4606
4607 /* Debug stats */
4608 cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size);
4609
4610 cpi->buffer_level = cpi->bits_off_target;
4611
4612 /* Propagate values to higher temporal layers */
4613 if (cpi->oxcf.number_of_layers > 1)
4614 {
4615 unsigned int i;
4616
4617 for (i=cpi->current_layer+1; i<cpi->oxcf.number_of_layers; i++)
4618 {
4619 LAYER_CONTEXT *lc = &cpi->layer_context[i];
4620 int bits_off_for_this_layer =
4621 (int)(lc->target_bandwidth / lc->framerate -
4622 cpi->projected_frame_size);
4623
4624 lc->bits_off_target += bits_off_for_this_layer;
4625
4626 /* Clip buffer level to maximum buffer size for the layer */
4627 if (lc->bits_off_target > lc->maximum_buffer_size)
4628 lc->bits_off_target = lc->maximum_buffer_size;
4629
4630 lc->total_actual_bits += cpi->projected_frame_size;
4631 lc->total_target_vs_actual += bits_off_for_this_layer;
4632 lc->buffer_level = lc->bits_off_target;
4633 }
4634 }
4635
4636 /* Update bits left to the kf and gf groups to account for overshoot
4637 * or undershoot on these frames
4638 */
4639 if (cm->frame_type == KEY_FRAME)
4640 {
4641 cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
4642
4643 if (cpi->twopass.kf_group_bits < 0)
4644 cpi->twopass.kf_group_bits = 0 ;
4645 }
4646 else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame)
4647 {
4648 cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size;
4649
4650 if (cpi->twopass.gf_group_bits < 0)
4651 cpi->twopass.gf_group_bits = 0 ;
4652 }
4653
4654 if (cm->frame_type != KEY_FRAME)
4655 {
4656 if (cpi->common.refresh_alt_ref_frame)
4657 {
4658 cpi->last_skip_false_probs[2] = cpi->prob_skip_false;
4659 cpi->last_skip_probs_q[2] = cm->base_qindex;
4660 }
4661 else if (cpi->common.refresh_golden_frame)
4662 {
4663 cpi->last_skip_false_probs[1] = cpi->prob_skip_false;
4664 cpi->last_skip_probs_q[1] = cm->base_qindex;
4665 }
4666 else
4667 {
4668 cpi->last_skip_false_probs[0] = cpi->prob_skip_false;
4669 cpi->last_skip_probs_q[0] = cm->base_qindex;
4670
4671 /* update the baseline */
4672 cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false;
4673
4674 }
4675 }
4676
4677 #if 0 && CONFIG_INTERNAL_STATS
4678 {
4679 FILE *f = fopen("tmp.stt", "a");
4680
4681 vp8_clear_system_state();
4682
4683 if (cpi->twopass.total_left_stats.coded_error != 0.0)
4684 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4685 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4686 "%8.2lf %"PRId64" %10.3lf %10"PRId64" %8d\n",
4687 cpi->common.current_video_frame, cpi->this_frame_target,
4688 cpi->projected_frame_size,
4689 (cpi->projected_frame_size - cpi->this_frame_target),
4690 cpi->total_target_vs_actual,
4691 cpi->buffer_level,
4692 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4693 cpi->total_actual_bits, cm->base_qindex,
4694 cpi->active_best_quality, cpi->active_worst_quality,
4695 cpi->ni_av_qi, cpi->cq_target_quality,
4696 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4697 cm->frame_type, cpi->gfu_boost,
4698 cpi->twopass.est_max_qcorrection_factor,
4699 cpi->twopass.bits_left,
4700 cpi->twopass.total_left_stats.coded_error,
4701 (double)cpi->twopass.bits_left /
4702 cpi->twopass.total_left_stats.coded_error,
4703 cpi->tot_recode_hits);
4704 else
4705 fprintf(f, "%10d %10d %10d %10d %10d %10"PRId64" %10"PRId64
4706 "%10"PRId64" %10d %6d %6d %6d %6d %5d %5d %5d %8d "
4707 "%8.2lf %"PRId64" %10.3lf %8d\n",
4708 cpi->common.current_video_frame, cpi->this_frame_target,
4709 cpi->projected_frame_size,
4710 (cpi->projected_frame_size - cpi->this_frame_target),
4711 cpi->total_target_vs_actual,
4712 cpi->buffer_level,
4713 (cpi->oxcf.starting_buffer_level-cpi->bits_off_target),
4714 cpi->total_actual_bits, cm->base_qindex,
4715 cpi->active_best_quality, cpi->active_worst_quality,
4716 cpi->ni_av_qi, cpi->cq_target_quality,
4717 cm->refresh_golden_frame, cm->refresh_alt_ref_frame,
4718 cm->frame_type, cpi->gfu_boost,
4719 cpi->twopass.est_max_qcorrection_factor,
4720 cpi->twopass.bits_left,
4721 cpi->twopass.total_left_stats.coded_error,
4722 cpi->tot_recode_hits);
4723
4724 fclose(f);
4725
4726 {
4727 FILE *fmodes = fopen("Modes.stt", "a");
4728
4729 fprintf(fmodes, "%6d:%1d:%1d:%1d ",
4730 cpi->common.current_video_frame,
4731 cm->frame_type, cm->refresh_golden_frame,
4732 cm->refresh_alt_ref_frame);
4733
4734 fprintf(fmodes, "\n");
4735
4736 fclose(fmodes);
4737 }
4738 }
4739
4740 #endif
4741
4742 if (cm->refresh_golden_frame == 1)
4743 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN;
4744 else
4745 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN;
4746
4747 if (cm->refresh_alt_ref_frame == 1)
4748 cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF;
4749 else
4750 cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF;
4751
4752
4753 if (cm->refresh_last_frame & cm->refresh_golden_frame)
4754 /* both refreshed */
4755 cpi->gold_is_last = 1;
4756 else if (cm->refresh_last_frame ^ cm->refresh_golden_frame)
4757 /* 1 refreshed but not the other */
4758 cpi->gold_is_last = 0;
4759
4760 if (cm->refresh_last_frame & cm->refresh_alt_ref_frame)
4761 /* both refreshed */
4762 cpi->alt_is_last = 1;
4763 else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame)
4764 /* 1 refreshed but not the other */
4765 cpi->alt_is_last = 0;
4766
4767 if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame)
4768 /* both refreshed */
4769 cpi->gold_is_alt = 1;
4770 else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame)
4771 /* 1 refreshed but not the other */
4772 cpi->gold_is_alt = 0;
4773
4774 cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME;
4775
4776 if (cpi->gold_is_last)
4777 cpi->ref_frame_flags &= ~VP8_GOLD_FRAME;
4778
4779 if (cpi->alt_is_last)
4780 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4781
4782 if (cpi->gold_is_alt)
4783 cpi->ref_frame_flags &= ~VP8_ALTR_FRAME;
4784
4785
4786 if (!cpi->oxcf.error_resilient_mode)
4787 {
4788 if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME))
4789 /* Update the alternate reference frame stats as appropriate. */
4790 update_alt_ref_frame_stats(cpi);
4791 else
4792 /* Update the Golden frame stats as appropriate. */
4793 update_golden_frame_stats(cpi);
4794 }
4795
4796 if (cm->frame_type == KEY_FRAME)
4797 {
4798 /* Tell the caller that the frame was coded as a key frame */
4799 *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY;
4800
4801 /* As this frame is a key frame the next defaults to an inter frame. */
4802 cm->frame_type = INTER_FRAME;
4803
4804 cpi->last_frame_percent_intra = 100;
4805 }
4806 else
4807 {
4808 *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
4809
4810 cpi->last_frame_percent_intra = cpi->this_frame_percent_intra;
4811 }
4812
4813 /* Clear the one shot update flags for segmentation map and mode/ref
4814 * loop filter deltas.
4815 */
4816 cpi->mb.e_mbd.update_mb_segmentation_map = 0;
4817 cpi->mb.e_mbd.update_mb_segmentation_data = 0;
4818 cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
4819
4820
4821 /* Dont increment frame counters if this was an altref buffer update
4822 * not a real frame
4823 */
4824 if (cm->show_frame)
4825 {
4826 cm->current_video_frame++;
4827 cpi->frames_since_key++;
4828 cpi->temporal_pattern_counter++;
4829 }
4830
4831 /* reset to normal state now that we are done. */
4832
4833
4834
4835 #if 0
4836 {
4837 char filename[512];
4838 FILE *recon_file;
4839 sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame);
4840 recon_file = fopen(filename, "wb");
4841 fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc,
4842 cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file);
4843 fclose(recon_file);
4844 }
4845 #endif
4846
4847 /* DEBUG */
4848 /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */
4849
4850
4851 }
4852 #if !(CONFIG_REALTIME_ONLY)
Pass2Encode(VP8_COMP * cpi,unsigned long * size,unsigned char * dest,unsigned char * dest_end,unsigned int * frame_flags)4853 static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned char * dest_end, unsigned int *frame_flags)
4854 {
4855
4856 if (!cpi->common.refresh_alt_ref_frame)
4857 vp8_second_pass(cpi);
4858
4859 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
4860 cpi->twopass.bits_left -= 8 * *size;
4861
4862 if (!cpi->common.refresh_alt_ref_frame)
4863 {
4864 double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth
4865 *cpi->oxcf.two_pass_vbrmin_section / 100);
4866 cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate);
4867 }
4868 }
4869 #endif
4870
vp8_receive_raw_frame(VP8_COMP * cpi,unsigned int frame_flags,YV12_BUFFER_CONFIG * sd,int64_t time_stamp,int64_t end_time)4871 int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time)
4872 {
4873 struct vpx_usec_timer timer;
4874 int res = 0;
4875
4876 vpx_usec_timer_start(&timer);
4877
4878 /* Reinit the lookahead buffer if the frame size changes */
4879 if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height)
4880 {
4881 assert(cpi->oxcf.lag_in_frames < 2);
4882 dealloc_raw_frame_buffers(cpi);
4883 alloc_raw_frame_buffers(cpi);
4884 }
4885
4886 if(vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
4887 frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL))
4888 res = -1;
4889 vpx_usec_timer_mark(&timer);
4890 cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
4891
4892 return res;
4893 }
4894
4895
frame_is_reference(const VP8_COMP * cpi)4896 static int frame_is_reference(const VP8_COMP *cpi)
4897 {
4898 const VP8_COMMON *cm = &cpi->common;
4899 const MACROBLOCKD *xd = &cpi->mb.e_mbd;
4900
4901 return cm->frame_type == KEY_FRAME || cm->refresh_last_frame
4902 || cm->refresh_golden_frame || cm->refresh_alt_ref_frame
4903 || cm->copy_buffer_to_gf || cm->copy_buffer_to_arf
4904 || cm->refresh_entropy_probs
4905 || xd->mode_ref_lf_delta_update
4906 || xd->update_mb_segmentation_map || xd->update_mb_segmentation_data;
4907 }
4908
4909
vp8_get_compressed_data(VP8_COMP * cpi,unsigned int * frame_flags,unsigned long * size,unsigned char * dest,unsigned char * dest_end,int64_t * time_stamp,int64_t * time_end,int flush)4910 int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, unsigned char *dest_end, int64_t *time_stamp, int64_t *time_end, int flush)
4911 {
4912 VP8_COMMON *cm;
4913 struct vpx_usec_timer tsctimer;
4914 struct vpx_usec_timer ticktimer;
4915 struct vpx_usec_timer cmptimer;
4916 YV12_BUFFER_CONFIG *force_src_buffer = NULL;
4917
4918 if (!cpi)
4919 return -1;
4920
4921 cm = &cpi->common;
4922
4923 if (setjmp(cpi->common.error.jmp))
4924 {
4925 cpi->common.error.setjmp = 0;
4926 vp8_clear_system_state();
4927 return VPX_CODEC_CORRUPT_FRAME;
4928 }
4929
4930 cpi->common.error.setjmp = 1;
4931
4932 vpx_usec_timer_start(&cmptimer);
4933
4934 cpi->source = NULL;
4935
4936 #if !(CONFIG_REALTIME_ONLY)
4937 /* Should we code an alternate reference frame */
4938 if (cpi->oxcf.error_resilient_mode == 0 &&
4939 cpi->oxcf.play_alternate &&
4940 cpi->source_alt_ref_pending)
4941 {
4942 if ((cpi->source = vp8_lookahead_peek(cpi->lookahead,
4943 cpi->frames_till_gf_update_due,
4944 PEEK_FORWARD)))
4945 {
4946 cpi->alt_ref_source = cpi->source;
4947 if (cpi->oxcf.arnr_max_frames > 0)
4948 {
4949 vp8_temporal_filter_prepare_c(cpi,
4950 cpi->frames_till_gf_update_due);
4951 force_src_buffer = &cpi->alt_ref_buffer;
4952 }
4953 cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due;
4954 cm->refresh_alt_ref_frame = 1;
4955 cm->refresh_golden_frame = 0;
4956 cm->refresh_last_frame = 0;
4957 cm->show_frame = 0;
4958 /* Clear Pending alt Ref flag. */
4959 cpi->source_alt_ref_pending = 0;
4960 cpi->is_src_frame_alt_ref = 0;
4961 }
4962 }
4963 #endif
4964
4965 if (!cpi->source)
4966 {
4967 /* Read last frame source if we are encoding first pass. */
4968 if (cpi->pass == 1 && cm->current_video_frame > 0)
4969 {
4970 if((cpi->last_source = vp8_lookahead_peek(cpi->lookahead, 1,
4971 PEEK_BACKWARD)) == NULL)
4972 return -1;
4973 }
4974
4975
4976 if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush)))
4977 {
4978 cm->show_frame = 1;
4979
4980 cpi->is_src_frame_alt_ref = cpi->alt_ref_source
4981 && (cpi->source == cpi->alt_ref_source);
4982
4983 if(cpi->is_src_frame_alt_ref)
4984 cpi->alt_ref_source = NULL;
4985 }
4986 }
4987
4988 if (cpi->source)
4989 {
4990 cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img;
4991 cpi->un_scaled_source = cpi->Source;
4992 *time_stamp = cpi->source->ts_start;
4993 *time_end = cpi->source->ts_end;
4994 *frame_flags = cpi->source->flags;
4995
4996 if (cpi->pass == 1 && cm->current_video_frame > 0)
4997 {
4998 cpi->last_frame_unscaled_source = &cpi->last_source->img;
4999 }
5000 }
5001 else
5002 {
5003 *size = 0;
5004 #if !(CONFIG_REALTIME_ONLY)
5005
5006 if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done)
5007 {
5008 vp8_end_first_pass(cpi); /* get last stats packet */
5009 cpi->twopass.first_pass_done = 1;
5010 }
5011
5012 #endif
5013
5014 return -1;
5015 }
5016
5017 if (cpi->source->ts_start < cpi->first_time_stamp_ever)
5018 {
5019 cpi->first_time_stamp_ever = cpi->source->ts_start;
5020 cpi->last_end_time_stamp_seen = cpi->source->ts_start;
5021 }
5022
5023 /* adjust frame rates based on timestamps given */
5024 if (cm->show_frame)
5025 {
5026 int64_t this_duration;
5027 int step = 0;
5028
5029 if (cpi->source->ts_start == cpi->first_time_stamp_ever)
5030 {
5031 this_duration = cpi->source->ts_end - cpi->source->ts_start;
5032 step = 1;
5033 }
5034 else
5035 {
5036 int64_t last_duration;
5037
5038 this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen;
5039 last_duration = cpi->last_end_time_stamp_seen
5040 - cpi->last_time_stamp_seen;
5041 /* do a step update if the duration changes by 10% */
5042 if (last_duration)
5043 step = (int)(((this_duration - last_duration) *
5044 10 / last_duration));
5045 }
5046
5047 if (this_duration)
5048 {
5049 if (step)
5050 cpi->ref_framerate = 10000000.0 / this_duration;
5051 else
5052 {
5053 double avg_duration, interval;
5054
5055 /* Average this frame's rate into the last second's average
5056 * frame rate. If we haven't seen 1 second yet, then average
5057 * over the whole interval seen.
5058 */
5059 interval = (double)(cpi->source->ts_end -
5060 cpi->first_time_stamp_ever);
5061 if(interval > 10000000.0)
5062 interval = 10000000;
5063
5064 avg_duration = 10000000.0 / cpi->ref_framerate;
5065 avg_duration *= (interval - avg_duration + this_duration);
5066 avg_duration /= interval;
5067
5068 cpi->ref_framerate = 10000000.0 / avg_duration;
5069 }
5070
5071 if (cpi->oxcf.number_of_layers > 1)
5072 {
5073 unsigned int i;
5074
5075 /* Update frame rates for each layer */
5076 assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS);
5077 for (i = 0; i < cpi->oxcf.number_of_layers &&
5078 i < VPX_TS_MAX_LAYERS; ++i)
5079 {
5080 LAYER_CONTEXT *lc = &cpi->layer_context[i];
5081 lc->framerate = cpi->ref_framerate /
5082 cpi->oxcf.rate_decimator[i];
5083 }
5084 }
5085 else
5086 vp8_new_framerate(cpi, cpi->ref_framerate);
5087 }
5088
5089 cpi->last_time_stamp_seen = cpi->source->ts_start;
5090 cpi->last_end_time_stamp_seen = cpi->source->ts_end;
5091 }
5092
5093 if (cpi->oxcf.number_of_layers > 1)
5094 {
5095 int layer;
5096
5097 update_layer_contexts (cpi);
5098
5099 /* Restore layer specific context & set frame rate */
5100 layer = cpi->oxcf.layer_id[
5101 cpi->temporal_pattern_counter % cpi->oxcf.periodicity];
5102 restore_layer_context (cpi, layer);
5103 vp8_new_framerate(cpi, cpi->layer_context[layer].framerate);
5104 }
5105
5106 if (cpi->compressor_speed == 2)
5107 {
5108 vpx_usec_timer_start(&tsctimer);
5109 vpx_usec_timer_start(&ticktimer);
5110 }
5111
5112 cpi->lf_zeromv_pct = (cpi->zeromv_count * 100)/cm->MBs;
5113
5114 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
5115 {
5116 int i;
5117 const int num_part = (1 << cm->multi_token_partition);
5118 /* the available bytes in dest */
5119 const unsigned long dest_size = dest_end - dest;
5120 const int tok_part_buff_size = (dest_size * 9) / (10 * num_part);
5121
5122 unsigned char *dp = dest;
5123
5124 cpi->partition_d[0] = dp;
5125 dp += dest_size/10; /* reserve 1/10 for control partition */
5126 cpi->partition_d_end[0] = dp;
5127
5128 for(i = 0; i < num_part; i++)
5129 {
5130 cpi->partition_d[i + 1] = dp;
5131 dp += tok_part_buff_size;
5132 cpi->partition_d_end[i + 1] = dp;
5133 }
5134 }
5135 #endif
5136
5137 /* start with a 0 size frame */
5138 *size = 0;
5139
5140 /* Clear down mmx registers */
5141 vp8_clear_system_state();
5142
5143 cm->frame_type = INTER_FRAME;
5144 cm->frame_flags = *frame_flags;
5145
5146 #if 0
5147
5148 if (cm->refresh_alt_ref_frame)
5149 {
5150 cm->refresh_golden_frame = 0;
5151 cm->refresh_last_frame = 0;
5152 }
5153 else
5154 {
5155 cm->refresh_golden_frame = 0;
5156 cm->refresh_last_frame = 1;
5157 }
5158
5159 #endif
5160 /* find a free buffer for the new frame */
5161 {
5162 int i = 0;
5163 for(; i < NUM_YV12_BUFFERS; i++)
5164 {
5165 if(!cm->yv12_fb[i].flags)
5166 {
5167 cm->new_fb_idx = i;
5168 break;
5169 }
5170 }
5171
5172 assert(i < NUM_YV12_BUFFERS );
5173 }
5174 #if !(CONFIG_REALTIME_ONLY)
5175
5176 if (cpi->pass == 1)
5177 {
5178 Pass1Encode(cpi, size, dest, frame_flags);
5179 }
5180 else if (cpi->pass == 2)
5181 {
5182 Pass2Encode(cpi, size, dest, dest_end, frame_flags);
5183 }
5184 else
5185 #endif
5186 encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags);
5187
5188 if (cpi->compressor_speed == 2)
5189 {
5190 unsigned int duration, duration2;
5191 vpx_usec_timer_mark(&tsctimer);
5192 vpx_usec_timer_mark(&ticktimer);
5193
5194 duration = (int)(vpx_usec_timer_elapsed(&ticktimer));
5195 duration2 = (unsigned int)((double)duration / 2);
5196
5197 if (cm->frame_type != KEY_FRAME)
5198 {
5199 if (cpi->avg_encode_time == 0)
5200 cpi->avg_encode_time = duration;
5201 else
5202 cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3;
5203 }
5204
5205 if (duration2)
5206 {
5207 {
5208
5209 if (cpi->avg_pick_mode_time == 0)
5210 cpi->avg_pick_mode_time = duration2;
5211 else
5212 cpi->avg_pick_mode_time = (7 * cpi->avg_pick_mode_time + duration2) >> 3;
5213 }
5214 }
5215
5216 }
5217
5218 if (cm->refresh_entropy_probs == 0)
5219 {
5220 vpx_memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc));
5221 }
5222
5223 /* Save the contexts separately for alt ref, gold and last. */
5224 /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */
5225 if(cm->refresh_alt_ref_frame)
5226 vpx_memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc));
5227
5228 if(cm->refresh_golden_frame)
5229 vpx_memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc));
5230
5231 if(cm->refresh_last_frame)
5232 vpx_memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc));
5233
5234 /* if its a dropped frame honor the requests on subsequent frames */
5235 if (*size > 0)
5236 {
5237 cpi->droppable = !frame_is_reference(cpi);
5238
5239 /* return to normal state */
5240 cm->refresh_entropy_probs = 1;
5241 cm->refresh_alt_ref_frame = 0;
5242 cm->refresh_golden_frame = 0;
5243 cm->refresh_last_frame = 1;
5244 cm->frame_type = INTER_FRAME;
5245
5246 }
5247
5248 /* Save layer specific state */
5249 if (cpi->oxcf.number_of_layers > 1)
5250 save_layer_context (cpi);
5251
5252 vpx_usec_timer_mark(&cmptimer);
5253 cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
5254
5255 if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame)
5256 {
5257 generate_psnr_packet(cpi);
5258 }
5259
5260 #if CONFIG_INTERNAL_STATS
5261
5262 if (cpi->pass != 1)
5263 {
5264 cpi->bytes += *size;
5265
5266 if (cm->show_frame)
5267 {
5268 cpi->common.show_frame_mi = cpi->common.mi;
5269 cpi->count ++;
5270
5271 if (cpi->b_calculate_psnr)
5272 {
5273 uint64_t ye,ue,ve;
5274 double frame_psnr;
5275 YV12_BUFFER_CONFIG *orig = cpi->Source;
5276 YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
5277 int y_samples = orig->y_height * orig->y_width ;
5278 int uv_samples = orig->uv_height * orig->uv_width ;
5279 int t_samples = y_samples + 2 * uv_samples;
5280 double sq_error;
5281
5282 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5283 recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height);
5284
5285 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5286 recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height);
5287
5288 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5289 recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height);
5290
5291 sq_error = (double)(ye + ue + ve);
5292
5293 frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error);
5294
5295 cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye);
5296 cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue);
5297 cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve);
5298 cpi->total_sq_error += sq_error;
5299 cpi->total += frame_psnr;
5300 #if CONFIG_POSTPROC
5301 {
5302 YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer;
5303 double sq_error2;
5304 double frame_psnr2, frame_ssim2 = 0;
5305 double weight = 0;
5306
5307 vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0);
5308 vp8_clear_system_state();
5309
5310 ye = calc_plane_error(orig->y_buffer, orig->y_stride,
5311 pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height);
5312
5313 ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
5314 pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height);
5315
5316 ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
5317 pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height);
5318
5319 sq_error2 = (double)(ye + ue + ve);
5320
5321 frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2);
5322
5323 cpi->totalp_y += vpx_sse_to_psnr(y_samples,
5324 255.0, (double)ye);
5325 cpi->totalp_u += vpx_sse_to_psnr(uv_samples,
5326 255.0, (double)ue);
5327 cpi->totalp_v += vpx_sse_to_psnr(uv_samples,
5328 255.0, (double)ve);
5329 cpi->total_sq_error2 += sq_error2;
5330 cpi->totalp += frame_psnr2;
5331
5332 frame_ssim2 = vp8_calc_ssim(cpi->Source,
5333 &cm->post_proc_buffer, 1, &weight);
5334
5335 cpi->summed_quality += frame_ssim2 * weight;
5336 cpi->summed_weights += weight;
5337
5338 if (cpi->oxcf.number_of_layers > 1)
5339 {
5340 unsigned int i;
5341
5342 for (i=cpi->current_layer;
5343 i<cpi->oxcf.number_of_layers; i++)
5344 {
5345 cpi->frames_in_layer[i]++;
5346
5347 cpi->bytes_in_layer[i] += *size;
5348 cpi->sum_psnr[i] += frame_psnr;
5349 cpi->sum_psnr_p[i] += frame_psnr2;
5350 cpi->total_error2[i] += sq_error;
5351 cpi->total_error2_p[i] += sq_error2;
5352 cpi->sum_ssim[i] += frame_ssim2 * weight;
5353 cpi->sum_weights[i] += weight;
5354 }
5355 }
5356 }
5357 #endif
5358 }
5359
5360 if (cpi->b_calculate_ssimg)
5361 {
5362 double y, u, v, frame_all;
5363 frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
5364 &y, &u, &v);
5365
5366 if (cpi->oxcf.number_of_layers > 1)
5367 {
5368 unsigned int i;
5369
5370 for (i=cpi->current_layer;
5371 i<cpi->oxcf.number_of_layers; i++)
5372 {
5373 if (!cpi->b_calculate_psnr)
5374 cpi->frames_in_layer[i]++;
5375
5376 cpi->total_ssimg_y_in_layer[i] += y;
5377 cpi->total_ssimg_u_in_layer[i] += u;
5378 cpi->total_ssimg_v_in_layer[i] += v;
5379 cpi->total_ssimg_all_in_layer[i] += frame_all;
5380 }
5381 }
5382 else
5383 {
5384 cpi->total_ssimg_y += y;
5385 cpi->total_ssimg_u += u;
5386 cpi->total_ssimg_v += v;
5387 cpi->total_ssimg_all += frame_all;
5388 }
5389 }
5390
5391 }
5392 }
5393
5394 #if 0
5395
5396 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q)
5397 {
5398 skiptruecount += cpi->skip_true_count;
5399 skipfalsecount += cpi->skip_false_count;
5400 }
5401
5402 #endif
5403 #if 0
5404
5405 if (cpi->pass != 1)
5406 {
5407 FILE *f = fopen("skip.stt", "a");
5408 fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size);
5409
5410 if (cpi->is_src_frame_alt_ref == 1)
5411 fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size);
5412
5413 fclose(f);
5414 }
5415
5416 #endif
5417 #endif
5418
5419 cpi->common.error.setjmp = 0;
5420
5421 return 0;
5422 }
5423
vp8_get_preview_raw_frame(VP8_COMP * cpi,YV12_BUFFER_CONFIG * dest,vp8_ppflags_t * flags)5424 int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags)
5425 {
5426 if (cpi->common.refresh_alt_ref_frame)
5427 return -1;
5428 else
5429 {
5430 int ret;
5431
5432 #if CONFIG_MULTITHREAD
5433 if(cpi->b_lpf_running)
5434 {
5435 sem_wait(&cpi->h_event_end_lpf);
5436 cpi->b_lpf_running = 0;
5437 }
5438 #endif
5439
5440 #if CONFIG_POSTPROC
5441 cpi->common.show_frame_mi = cpi->common.mi;
5442 ret = vp8_post_proc_frame(&cpi->common, dest, flags);
5443 #else
5444
5445 if (cpi->common.frame_to_show)
5446 {
5447 *dest = *cpi->common.frame_to_show;
5448 dest->y_width = cpi->common.Width;
5449 dest->y_height = cpi->common.Height;
5450 dest->uv_height = cpi->common.Height / 2;
5451 ret = 0;
5452 }
5453 else
5454 {
5455 ret = -1;
5456 }
5457
5458 #endif
5459 vp8_clear_system_state();
5460 return ret;
5461 }
5462 }
5463
vp8_set_roimap(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols,int delta_q[4],int delta_lf[4],unsigned int threshold[4])5464 int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4])
5465 {
5466 signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS];
5467 int internal_delta_q[MAX_MB_SEGMENTS];
5468 const int range = 63;
5469 int i;
5470
5471 // This method is currently incompatible with the cyclic refresh method
5472 if ( cpi->cyclic_refresh_mode_enabled )
5473 return -1;
5474
5475 // Check number of rows and columns match
5476 if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
5477 return -1;
5478
5479 // Range check the delta Q values and convert the external Q range values
5480 // to internal ones.
5481 if ( (abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) ||
5482 (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range) )
5483 return -1;
5484
5485 // Range check the delta lf values
5486 if ( (abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) ||
5487 (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range) )
5488 return -1;
5489
5490 if (!map)
5491 {
5492 disable_segmentation(cpi);
5493 return 0;
5494 }
5495
5496 // Translate the external delta q values to internal values.
5497 for ( i = 0; i < MAX_MB_SEGMENTS; i++ )
5498 internal_delta_q[i] =
5499 ( delta_q[i] >= 0 ) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];
5500
5501 /* Set the segmentation Map */
5502 set_segmentation_map(cpi, map);
5503
5504 /* Activate segmentation. */
5505 enable_segmentation(cpi);
5506
5507 /* Set up the quant segment data */
5508 feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0];
5509 feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1];
5510 feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2];
5511 feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3];
5512
5513 /* Set up the loop segment data s */
5514 feature_data[MB_LVL_ALT_LF][0] = delta_lf[0];
5515 feature_data[MB_LVL_ALT_LF][1] = delta_lf[1];
5516 feature_data[MB_LVL_ALT_LF][2] = delta_lf[2];
5517 feature_data[MB_LVL_ALT_LF][3] = delta_lf[3];
5518
5519 cpi->segment_encode_breakout[0] = threshold[0];
5520 cpi->segment_encode_breakout[1] = threshold[1];
5521 cpi->segment_encode_breakout[2] = threshold[2];
5522 cpi->segment_encode_breakout[3] = threshold[3];
5523
5524 /* Initialise the feature data structure */
5525 set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA);
5526
5527 return 0;
5528 }
5529
vp8_set_active_map(VP8_COMP * cpi,unsigned char * map,unsigned int rows,unsigned int cols)5530 int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols)
5531 {
5532 if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols)
5533 {
5534 if (map)
5535 {
5536 vpx_memcpy(cpi->active_map, map, rows * cols);
5537 cpi->active_map_enabled = 1;
5538 }
5539 else
5540 cpi->active_map_enabled = 0;
5541
5542 return 0;
5543 }
5544 else
5545 {
5546 return -1 ;
5547 }
5548 }
5549
vp8_set_internal_size(VP8_COMP * cpi,VPX_SCALING horiz_mode,VPX_SCALING vert_mode)5550 int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode, VPX_SCALING vert_mode)
5551 {
5552 if (horiz_mode <= ONETWO)
5553 cpi->common.horiz_scale = horiz_mode;
5554 else
5555 return -1;
5556
5557 if (vert_mode <= ONETWO)
5558 cpi->common.vert_scale = vert_mode;
5559 else
5560 return -1;
5561
5562 return 0;
5563 }
5564
5565
5566
vp8_calc_ss_err(YV12_BUFFER_CONFIG * source,YV12_BUFFER_CONFIG * dest)5567 int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest)
5568 {
5569 int i, j;
5570 int Total = 0;
5571
5572 unsigned char *src = source->y_buffer;
5573 unsigned char *dst = dest->y_buffer;
5574
5575 /* Loop through the Y plane raw and reconstruction data summing
5576 * (square differences)
5577 */
5578 for (i = 0; i < source->y_height; i += 16)
5579 {
5580 for (j = 0; j < source->y_width; j += 16)
5581 {
5582 unsigned int sse;
5583 Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
5584 }
5585
5586 src += 16 * source->y_stride;
5587 dst += 16 * dest->y_stride;
5588 }
5589
5590 return Total;
5591 }
5592
5593
vp8_get_quantizer(VP8_COMP * cpi)5594 int vp8_get_quantizer(VP8_COMP *cpi)
5595 {
5596 return cpi->common.base_qindex;
5597 }
5598