1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <stdio.h>
13 #include <limits.h>
14
15 #include "vpx/vpx_encoder.h"
16 #include "vpx_mem/vpx_mem.h"
17 #include "vpx_ports/mem_ops.h"
18
19 #include "vp9/common/vp9_entropy.h"
20 #include "vp9/common/vp9_entropymode.h"
21 #include "vp9/common/vp9_entropymv.h"
22 #include "vp9/common/vp9_mvref_common.h"
23 #include "vp9/common/vp9_pred_common.h"
24 #include "vp9/common/vp9_seg_common.h"
25 #include "vp9/common/vp9_systemdependent.h"
26 #include "vp9/common/vp9_tile_common.h"
27
28 #include "vp9/encoder/vp9_cost.h"
29 #include "vp9/encoder/vp9_bitstream.h"
30 #include "vp9/encoder/vp9_encodemv.h"
31 #include "vp9/encoder/vp9_mcomp.h"
32 #include "vp9/encoder/vp9_segmentation.h"
33 #include "vp9/encoder/vp9_subexp.h"
34 #include "vp9/encoder/vp9_tokenize.h"
35 #include "vp9/encoder/vp9_write_bit_buffer.h"
36
37 static struct vp9_token intra_mode_encodings[INTRA_MODES];
38 static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS];
39 static struct vp9_token partition_encodings[PARTITION_TYPES];
40 static struct vp9_token inter_mode_encodings[INTER_MODES];
41
vp9_entropy_mode_init()42 void vp9_entropy_mode_init() {
43 vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree);
44 vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree);
45 vp9_tokens_from_tree(partition_encodings, vp9_partition_tree);
46 vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree);
47 }
48
write_intra_mode(vp9_writer * w,PREDICTION_MODE mode,const vp9_prob * probs)49 static void write_intra_mode(vp9_writer *w, PREDICTION_MODE mode,
50 const vp9_prob *probs) {
51 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
52 }
53
write_inter_mode(vp9_writer * w,PREDICTION_MODE mode,const vp9_prob * probs)54 static void write_inter_mode(vp9_writer *w, PREDICTION_MODE mode,
55 const vp9_prob *probs) {
56 assert(is_inter_mode(mode));
57 vp9_write_token(w, vp9_inter_mode_tree, probs,
58 &inter_mode_encodings[INTER_OFFSET(mode)]);
59 }
60
encode_unsigned_max(struct vp9_write_bit_buffer * wb,int data,int max)61 static void encode_unsigned_max(struct vp9_write_bit_buffer *wb,
62 int data, int max) {
63 vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
64 }
65
prob_diff_update(const vp9_tree_index * tree,vp9_prob probs[],const unsigned int counts[],int n,vp9_writer * w)66 static void prob_diff_update(const vp9_tree_index *tree,
67 vp9_prob probs[/*n - 1*/],
68 const unsigned int counts[/*n - 1*/],
69 int n, vp9_writer *w) {
70 int i;
71 unsigned int branch_ct[32][2];
72
73 // Assuming max number of probabilities <= 32
74 assert(n <= 32);
75
76 vp9_tree_probs_from_distribution(tree, branch_ct, counts);
77 for (i = 0; i < n - 1; ++i)
78 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
79 }
80
write_selected_tx_size(const VP9_COMMON * cm,const MACROBLOCKD * xd,TX_SIZE tx_size,BLOCK_SIZE bsize,vp9_writer * w)81 static void write_selected_tx_size(const VP9_COMMON *cm,
82 const MACROBLOCKD *xd,
83 TX_SIZE tx_size, BLOCK_SIZE bsize,
84 vp9_writer *w) {
85 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
86 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
87 &cm->fc.tx_probs);
88 vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
89 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
90 vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
91 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
92 vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
93 }
94 }
95
write_skip(const VP9_COMMON * cm,const MACROBLOCKD * xd,int segment_id,const MODE_INFO * mi,vp9_writer * w)96 static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd,
97 int segment_id, const MODE_INFO *mi, vp9_writer *w) {
98 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
99 return 1;
100 } else {
101 const int skip = mi->mbmi.skip;
102 vp9_write(w, skip, vp9_get_skip_prob(cm, xd));
103 return skip;
104 }
105 }
106
update_skip_probs(VP9_COMMON * cm,vp9_writer * w)107 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) {
108 int k;
109
110 for (k = 0; k < SKIP_CONTEXTS; ++k)
111 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]);
112 }
113
update_switchable_interp_probs(VP9_COMMON * cm,vp9_writer * w)114 static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) {
115 int j;
116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
117 prob_diff_update(vp9_switchable_interp_tree,
118 cm->fc.switchable_interp_prob[j],
119 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w);
120 }
121
pack_mb_tokens(vp9_writer * w,TOKENEXTRA ** tp,const TOKENEXTRA * const stop)122 static void pack_mb_tokens(vp9_writer *w,
123 TOKENEXTRA **tp, const TOKENEXTRA *const stop) {
124 TOKENEXTRA *p = *tp;
125
126 while (p < stop && p->token != EOSB_TOKEN) {
127 const int t = p->token;
128 const struct vp9_token *const a = &vp9_coef_encodings[t];
129 const vp9_extra_bit *const b = &vp9_extra_bits[t];
130 int i = 0;
131 int v = a->value;
132 int n = a->len;
133
134 /* skip one or two nodes */
135 if (p->skip_eob_node) {
136 n -= p->skip_eob_node;
137 i = 2 * p->skip_eob_node;
138 }
139
140 // TODO(jbb): expanding this can lead to big gains. It allows
141 // much better branch prediction and would enable us to avoid numerous
142 // lookups and compares.
143
144 // If we have a token that's in the constrained set, the coefficient tree
145 // is split into two treed writes. The first treed write takes care of the
146 // unconstrained nodes. The second treed write takes care of the
147 // constrained nodes.
148 if (t >= TWO_TOKEN && t < EOB_TOKEN) {
149 int len = UNCONSTRAINED_NODES - p->skip_eob_node;
150 int bits = v >> (n - len);
151 vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i);
152 vp9_write_tree(w, vp9_coef_con_tree,
153 vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
154 v, n - len, 0);
155 } else {
156 vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i);
157 }
158
159 if (b->base_val) {
160 const int e = p->extra, l = b->len;
161
162 if (l) {
163 const unsigned char *pb = b->prob;
164 int v = e >> 1;
165 int n = l; /* number of bits in v, assumed nonzero */
166 int i = 0;
167
168 do {
169 const int bb = (v >> --n) & 1;
170 vp9_write(w, bb, pb[i >> 1]);
171 i = b->tree[i + bb];
172 } while (n);
173 }
174
175 vp9_write_bit(w, e & 1);
176 }
177 ++p;
178 }
179
180 *tp = p + (p->token == EOSB_TOKEN);
181 }
182
write_segment_id(vp9_writer * w,const struct segmentation * seg,int segment_id)183 static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
184 int segment_id) {
185 if (seg->enabled && seg->update_map)
186 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
187 }
188
189 // This function encodes the reference frame
write_ref_frames(const VP9_COMMON * cm,const MACROBLOCKD * xd,vp9_writer * w)190 static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd,
191 vp9_writer *w) {
192 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
193 const int is_compound = has_second_ref(mbmi);
194 const int segment_id = mbmi->segment_id;
195
196 // If segment level coding of this signal is disabled...
197 // or the segment allows multiple reference frame options
198 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
199 assert(!is_compound);
200 assert(mbmi->ref_frame[0] ==
201 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
202 } else {
203 // does the feature use compound prediction or not
204 // (if not specified at the frame/segment level)
205 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
206 vp9_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd));
207 } else {
208 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE));
209 }
210
211 if (is_compound) {
212 vp9_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME,
213 vp9_get_pred_prob_comp_ref_p(cm, xd));
214 } else {
215 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
216 vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd));
217 if (bit0) {
218 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
219 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd));
220 }
221 }
222 }
223 }
224
pack_inter_mode_mvs(VP9_COMP * cpi,const MODE_INFO * mi,vp9_writer * w)225 static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
226 vp9_writer *w) {
227 VP9_COMMON *const cm = &cpi->common;
228 const nmv_context *nmvc = &cm->fc.nmvc;
229 const MACROBLOCK *const x = &cpi->mb;
230 const MACROBLOCKD *const xd = &x->e_mbd;
231 const struct segmentation *const seg = &cm->seg;
232 const MB_MODE_INFO *const mbmi = &mi->mbmi;
233 const PREDICTION_MODE mode = mbmi->mode;
234 const int segment_id = mbmi->segment_id;
235 const BLOCK_SIZE bsize = mbmi->sb_type;
236 const int allow_hp = cm->allow_high_precision_mv;
237 const int is_inter = is_inter_block(mbmi);
238 const int is_compound = has_second_ref(mbmi);
239 int skip, ref;
240
241 if (seg->update_map) {
242 if (seg->temporal_update) {
243 const int pred_flag = mbmi->seg_id_predicted;
244 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
245 vp9_write(w, pred_flag, pred_prob);
246 if (!pred_flag)
247 write_segment_id(w, seg, segment_id);
248 } else {
249 write_segment_id(w, seg, segment_id);
250 }
251 }
252
253 skip = write_skip(cm, xd, segment_id, mi, w);
254
255 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
256 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
257
258 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
259 !(is_inter &&
260 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
261 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w);
262 }
263
264 if (!is_inter) {
265 if (bsize >= BLOCK_8X8) {
266 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
267 } else {
268 int idx, idy;
269 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
270 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
271 for (idy = 0; idy < 2; idy += num_4x4_h) {
272 for (idx = 0; idx < 2; idx += num_4x4_w) {
273 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
274 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]);
275 }
276 }
277 }
278 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]);
279 } else {
280 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
281 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx];
282 write_ref_frames(cm, xd, w);
283
284 // If segment skip is not enabled code the mode.
285 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
286 if (bsize >= BLOCK_8X8) {
287 write_inter_mode(w, mode, inter_probs);
288 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)];
289 }
290 }
291
292 if (cm->interp_filter == SWITCHABLE) {
293 const int ctx = vp9_get_pred_context_switchable_interp(xd);
294 vp9_write_token(w, vp9_switchable_interp_tree,
295 cm->fc.switchable_interp_prob[ctx],
296 &switchable_interp_encodings[mbmi->interp_filter]);
297 } else {
298 assert(mbmi->interp_filter == cm->interp_filter);
299 }
300
301 if (bsize < BLOCK_8X8) {
302 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
303 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
304 int idx, idy;
305 for (idy = 0; idy < 2; idy += num_4x4_h) {
306 for (idx = 0; idx < 2; idx += num_4x4_w) {
307 const int j = idy * 2 + idx;
308 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
309 write_inter_mode(w, b_mode, inter_probs);
310 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
311 if (b_mode == NEWMV) {
312 for (ref = 0; ref < 1 + is_compound; ++ref)
313 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
314 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
315 nmvc, allow_hp);
316 }
317 }
318 }
319 } else {
320 if (mode == NEWMV) {
321 for (ref = 0; ref < 1 + is_compound; ++ref)
322 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
323 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
324 allow_hp);
325 }
326 }
327 }
328 }
329
write_mb_modes_kf(const VP9_COMMON * cm,const MACROBLOCKD * xd,MODE_INFO ** mi_8x8,vp9_writer * w)330 static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd,
331 MODE_INFO **mi_8x8, vp9_writer *w) {
332 const struct segmentation *const seg = &cm->seg;
333 const MODE_INFO *const mi = mi_8x8[0];
334 const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride];
335 const MODE_INFO *const left_mi = xd->left_available ? mi_8x8[-1] : NULL;
336 const MB_MODE_INFO *const mbmi = &mi->mbmi;
337 const BLOCK_SIZE bsize = mbmi->sb_type;
338
339 if (seg->update_map)
340 write_segment_id(w, seg, mbmi->segment_id);
341
342 write_skip(cm, xd, mbmi->segment_id, mi, w);
343
344 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
345 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w);
346
347 if (bsize >= BLOCK_8X8) {
348 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
349 } else {
350 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
351 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
352 int idx, idy;
353
354 for (idy = 0; idy < 2; idy += num_4x4_h) {
355 for (idx = 0; idx < 2; idx += num_4x4_w) {
356 const int block = idy * 2 + idx;
357 write_intra_mode(w, mi->bmi[block].as_mode,
358 get_y_mode_probs(mi, above_mi, left_mi, block));
359 }
360 }
361 }
362
363 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
364 }
365
write_modes_b(VP9_COMP * cpi,const TileInfo * const tile,vp9_writer * w,TOKENEXTRA ** tok,const TOKENEXTRA * const tok_end,int mi_row,int mi_col)366 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
367 vp9_writer *w, TOKENEXTRA **tok,
368 const TOKENEXTRA *const tok_end,
369 int mi_row, int mi_col) {
370 const VP9_COMMON *const cm = &cpi->common;
371 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
372 MODE_INFO *m;
373
374 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
375 m = xd->mi[0];
376
377 set_mi_row_col(xd, tile,
378 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
379 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
380 cm->mi_rows, cm->mi_cols);
381 if (frame_is_intra_only(cm)) {
382 write_mb_modes_kf(cm, xd, xd->mi, w);
383 } else {
384 pack_inter_mode_mvs(cpi, m, w);
385 }
386
387 assert(*tok < tok_end);
388 pack_mb_tokens(w, tok, tok_end);
389 }
390
write_partition(const VP9_COMMON * const cm,const MACROBLOCKD * const xd,int hbs,int mi_row,int mi_col,PARTITION_TYPE p,BLOCK_SIZE bsize,vp9_writer * w)391 static void write_partition(const VP9_COMMON *const cm,
392 const MACROBLOCKD *const xd,
393 int hbs, int mi_row, int mi_col,
394 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) {
395 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
396 const vp9_prob *const probs = get_partition_probs(cm, ctx);
397 const int has_rows = (mi_row + hbs) < cm->mi_rows;
398 const int has_cols = (mi_col + hbs) < cm->mi_cols;
399
400 if (has_rows && has_cols) {
401 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
402 } else if (!has_rows && has_cols) {
403 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
404 vp9_write(w, p == PARTITION_SPLIT, probs[1]);
405 } else if (has_rows && !has_cols) {
406 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
407 vp9_write(w, p == PARTITION_SPLIT, probs[2]);
408 } else {
409 assert(p == PARTITION_SPLIT);
410 }
411 }
412
write_modes_sb(VP9_COMP * cpi,const TileInfo * const tile,vp9_writer * w,TOKENEXTRA ** tok,const TOKENEXTRA * const tok_end,int mi_row,int mi_col,BLOCK_SIZE bsize)413 static void write_modes_sb(VP9_COMP *cpi,
414 const TileInfo *const tile, vp9_writer *w,
415 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
416 int mi_row, int mi_col, BLOCK_SIZE bsize) {
417 const VP9_COMMON *const cm = &cpi->common;
418 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
419
420 const int bsl = b_width_log2(bsize);
421 const int bs = (1 << bsl) / 4;
422 PARTITION_TYPE partition;
423 BLOCK_SIZE subsize;
424 const MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
425
426 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
427 return;
428
429 partition = partition_lookup[bsl][m->mbmi.sb_type];
430 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
431 subsize = get_subsize(bsize, partition);
432 if (subsize < BLOCK_8X8) {
433 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
434 } else {
435 switch (partition) {
436 case PARTITION_NONE:
437 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
438 break;
439 case PARTITION_HORZ:
440 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
441 if (mi_row + bs < cm->mi_rows)
442 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
443 break;
444 case PARTITION_VERT:
445 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
446 if (mi_col + bs < cm->mi_cols)
447 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
448 break;
449 case PARTITION_SPLIT:
450 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
451 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
452 subsize);
453 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
454 subsize);
455 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
456 subsize);
457 break;
458 default:
459 assert(0);
460 }
461 }
462
463 // update partition context
464 if (bsize >= BLOCK_8X8 &&
465 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
466 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
467 }
468
write_modes(VP9_COMP * cpi,const TileInfo * const tile,vp9_writer * w,TOKENEXTRA ** tok,const TOKENEXTRA * const tok_end)469 static void write_modes(VP9_COMP *cpi,
470 const TileInfo *const tile, vp9_writer *w,
471 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
472 int mi_row, mi_col;
473
474 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
475 mi_row += MI_BLOCK_SIZE) {
476 vp9_zero(cpi->mb.e_mbd.left_seg_context);
477 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
478 mi_col += MI_BLOCK_SIZE)
479 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
480 BLOCK_64X64);
481 }
482 }
483
build_tree_distribution(VP9_COMP * cpi,TX_SIZE tx_size,vp9_coeff_stats * coef_branch_ct,vp9_coeff_probs_model * coef_probs)484 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size,
485 vp9_coeff_stats *coef_branch_ct,
486 vp9_coeff_probs_model *coef_probs) {
487 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size];
488 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
489 cpi->common.counts.eob_branch[tx_size];
490 int i, j, k, l, m;
491
492 for (i = 0; i < PLANE_TYPES; ++i) {
493 for (j = 0; j < REF_TYPES; ++j) {
494 for (k = 0; k < COEF_BANDS; ++k) {
495 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
496 vp9_tree_probs_from_distribution(vp9_coef_tree,
497 coef_branch_ct[i][j][k][l],
498 coef_counts[i][j][k][l]);
499 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
500 coef_branch_ct[i][j][k][l][0][0];
501 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
502 coef_probs[i][j][k][l][m] = get_binary_prob(
503 coef_branch_ct[i][j][k][l][m][0],
504 coef_branch_ct[i][j][k][l][m][1]);
505 }
506 }
507 }
508 }
509 }
510
update_coef_probs_common(vp9_writer * const bc,VP9_COMP * cpi,TX_SIZE tx_size,vp9_coeff_stats * frame_branch_ct,vp9_coeff_probs_model * new_coef_probs)511 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
512 TX_SIZE tx_size,
513 vp9_coeff_stats *frame_branch_ct,
514 vp9_coeff_probs_model *new_coef_probs) {
515 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc.coef_probs[tx_size];
516 const vp9_prob upd = DIFF_UPDATE_PROB;
517 const int entropy_nodes_update = UNCONSTRAINED_NODES;
518 int i, j, k, l, t;
519 switch (cpi->sf.use_fast_coef_updates) {
520 case TWO_LOOP: {
521 /* dry run to see if there is any update at all needed */
522 int savings = 0;
523 int update[2] = {0, 0};
524 for (i = 0; i < PLANE_TYPES; ++i) {
525 for (j = 0; j < REF_TYPES; ++j) {
526 for (k = 0; k < COEF_BANDS; ++k) {
527 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
528 for (t = 0; t < entropy_nodes_update; ++t) {
529 vp9_prob newp = new_coef_probs[i][j][k][l][t];
530 const vp9_prob oldp = old_coef_probs[i][j][k][l][t];
531 int s;
532 int u = 0;
533 if (t == PIVOT_NODE)
534 s = vp9_prob_diff_update_savings_search_model(
535 frame_branch_ct[i][j][k][l][0],
536 old_coef_probs[i][j][k][l], &newp, upd);
537 else
538 s = vp9_prob_diff_update_savings_search(
539 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
540 if (s > 0 && newp != oldp)
541 u = 1;
542 if (u)
543 savings += s - (int)(vp9_cost_zero(upd));
544 else
545 savings -= (int)(vp9_cost_zero(upd));
546 update[u]++;
547 }
548 }
549 }
550 }
551 }
552
553 // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
554 /* Is coef updated at all */
555 if (update[1] == 0 || savings < 0) {
556 vp9_write_bit(bc, 0);
557 return;
558 }
559 vp9_write_bit(bc, 1);
560 for (i = 0; i < PLANE_TYPES; ++i) {
561 for (j = 0; j < REF_TYPES; ++j) {
562 for (k = 0; k < COEF_BANDS; ++k) {
563 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
564 // calc probs and branch cts for this frame only
565 for (t = 0; t < entropy_nodes_update; ++t) {
566 vp9_prob newp = new_coef_probs[i][j][k][l][t];
567 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
568 const vp9_prob upd = DIFF_UPDATE_PROB;
569 int s;
570 int u = 0;
571 if (t == PIVOT_NODE)
572 s = vp9_prob_diff_update_savings_search_model(
573 frame_branch_ct[i][j][k][l][0],
574 old_coef_probs[i][j][k][l], &newp, upd);
575 else
576 s = vp9_prob_diff_update_savings_search(
577 frame_branch_ct[i][j][k][l][t],
578 *oldp, &newp, upd);
579 if (s > 0 && newp != *oldp)
580 u = 1;
581 vp9_write(bc, u, upd);
582 if (u) {
583 /* send/use new probability */
584 vp9_write_prob_diff_update(bc, newp, *oldp);
585 *oldp = newp;
586 }
587 }
588 }
589 }
590 }
591 }
592 return;
593 }
594
595 case ONE_LOOP:
596 case ONE_LOOP_REDUCED: {
597 const int prev_coef_contexts_to_update =
598 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ?
599 COEFF_CONTEXTS >> 1 : COEFF_CONTEXTS;
600 const int coef_band_to_update =
601 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ?
602 COEF_BANDS >> 1 : COEF_BANDS;
603 int updates = 0;
604 int noupdates_before_first = 0;
605 for (i = 0; i < PLANE_TYPES; ++i) {
606 for (j = 0; j < REF_TYPES; ++j) {
607 for (k = 0; k < COEF_BANDS; ++k) {
608 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
609 // calc probs and branch cts for this frame only
610 for (t = 0; t < entropy_nodes_update; ++t) {
611 vp9_prob newp = new_coef_probs[i][j][k][l][t];
612 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
613 int s;
614 int u = 0;
615 if (l >= prev_coef_contexts_to_update ||
616 k >= coef_band_to_update) {
617 u = 0;
618 } else {
619 if (t == PIVOT_NODE)
620 s = vp9_prob_diff_update_savings_search_model(
621 frame_branch_ct[i][j][k][l][0],
622 old_coef_probs[i][j][k][l], &newp, upd);
623 else
624 s = vp9_prob_diff_update_savings_search(
625 frame_branch_ct[i][j][k][l][t],
626 *oldp, &newp, upd);
627 if (s > 0 && newp != *oldp)
628 u = 1;
629 }
630 updates += u;
631 if (u == 0 && updates == 0) {
632 noupdates_before_first++;
633 continue;
634 }
635 if (u == 1 && updates == 1) {
636 int v;
637 // first update
638 vp9_write_bit(bc, 1);
639 for (v = 0; v < noupdates_before_first; ++v)
640 vp9_write(bc, 0, upd);
641 }
642 vp9_write(bc, u, upd);
643 if (u) {
644 /* send/use new probability */
645 vp9_write_prob_diff_update(bc, newp, *oldp);
646 *oldp = newp;
647 }
648 }
649 }
650 }
651 }
652 }
653 if (updates == 0) {
654 vp9_write_bit(bc, 0); // no updates
655 }
656 return;
657 }
658
659 default:
660 assert(0);
661 }
662 }
663
update_coef_probs(VP9_COMP * cpi,vp9_writer * w)664 static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) {
665 const TX_MODE tx_mode = cpi->common.tx_mode;
666 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
667 TX_SIZE tx_size;
668 vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES];
669 vp9_coeff_probs_model frame_coef_probs[TX_SIZES][PLANE_TYPES];
670
671 vp9_clear_system_state();
672
673 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size)
674 build_tree_distribution(cpi, tx_size, frame_branch_ct[tx_size],
675 frame_coef_probs[tx_size]);
676
677 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
678 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct[tx_size],
679 frame_coef_probs[tx_size]);
680 }
681
encode_loopfilter(struct loopfilter * lf,struct vp9_write_bit_buffer * wb)682 static void encode_loopfilter(struct loopfilter *lf,
683 struct vp9_write_bit_buffer *wb) {
684 int i;
685
686 // Encode the loop filter level and type
687 vp9_wb_write_literal(wb, lf->filter_level, 6);
688 vp9_wb_write_literal(wb, lf->sharpness_level, 3);
689
690 // Write out loop filter deltas applied at the MB level based on mode or
691 // ref frame (if they are enabled).
692 vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled);
693
694 if (lf->mode_ref_delta_enabled) {
695 vp9_wb_write_bit(wb, lf->mode_ref_delta_update);
696 if (lf->mode_ref_delta_update) {
697 for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
698 const int delta = lf->ref_deltas[i];
699 const int changed = delta != lf->last_ref_deltas[i];
700 vp9_wb_write_bit(wb, changed);
701 if (changed) {
702 lf->last_ref_deltas[i] = delta;
703 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
704 vp9_wb_write_bit(wb, delta < 0);
705 }
706 }
707
708 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
709 const int delta = lf->mode_deltas[i];
710 const int changed = delta != lf->last_mode_deltas[i];
711 vp9_wb_write_bit(wb, changed);
712 if (changed) {
713 lf->last_mode_deltas[i] = delta;
714 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
715 vp9_wb_write_bit(wb, delta < 0);
716 }
717 }
718 }
719 }
720 }
721
write_delta_q(struct vp9_write_bit_buffer * wb,int delta_q)722 static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
723 if (delta_q != 0) {
724 vp9_wb_write_bit(wb, 1);
725 vp9_wb_write_literal(wb, abs(delta_q), 4);
726 vp9_wb_write_bit(wb, delta_q < 0);
727 } else {
728 vp9_wb_write_bit(wb, 0);
729 }
730 }
731
encode_quantization(const VP9_COMMON * const cm,struct vp9_write_bit_buffer * wb)732 static void encode_quantization(const VP9_COMMON *const cm,
733 struct vp9_write_bit_buffer *wb) {
734 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
735 write_delta_q(wb, cm->y_dc_delta_q);
736 write_delta_q(wb, cm->uv_dc_delta_q);
737 write_delta_q(wb, cm->uv_ac_delta_q);
738 }
739
encode_segmentation(VP9_COMMON * cm,MACROBLOCKD * xd,struct vp9_write_bit_buffer * wb)740 static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd,
741 struct vp9_write_bit_buffer *wb) {
742 int i, j;
743
744 const struct segmentation *seg = &cm->seg;
745
746 vp9_wb_write_bit(wb, seg->enabled);
747 if (!seg->enabled)
748 return;
749
750 // Segmentation map
751 vp9_wb_write_bit(wb, seg->update_map);
752 if (seg->update_map) {
753 // Select the coding strategy (temporal or spatial)
754 vp9_choose_segmap_coding_method(cm, xd);
755 // Write out probabilities used to decode unpredicted macro-block segments
756 for (i = 0; i < SEG_TREE_PROBS; i++) {
757 const int prob = seg->tree_probs[i];
758 const int update = prob != MAX_PROB;
759 vp9_wb_write_bit(wb, update);
760 if (update)
761 vp9_wb_write_literal(wb, prob, 8);
762 }
763
764 // Write out the chosen coding method.
765 vp9_wb_write_bit(wb, seg->temporal_update);
766 if (seg->temporal_update) {
767 for (i = 0; i < PREDICTION_PROBS; i++) {
768 const int prob = seg->pred_probs[i];
769 const int update = prob != MAX_PROB;
770 vp9_wb_write_bit(wb, update);
771 if (update)
772 vp9_wb_write_literal(wb, prob, 8);
773 }
774 }
775 }
776
777 // Segmentation data
778 vp9_wb_write_bit(wb, seg->update_data);
779 if (seg->update_data) {
780 vp9_wb_write_bit(wb, seg->abs_delta);
781
782 for (i = 0; i < MAX_SEGMENTS; i++) {
783 for (j = 0; j < SEG_LVL_MAX; j++) {
784 const int active = vp9_segfeature_active(seg, i, j);
785 vp9_wb_write_bit(wb, active);
786 if (active) {
787 const int data = vp9_get_segdata(seg, i, j);
788 const int data_max = vp9_seg_feature_data_max(j);
789
790 if (vp9_is_segfeature_signed(j)) {
791 encode_unsigned_max(wb, abs(data), data_max);
792 vp9_wb_write_bit(wb, data < 0);
793 } else {
794 encode_unsigned_max(wb, data, data_max);
795 }
796 }
797 }
798 }
799 }
800 }
801
encode_txfm_probs(VP9_COMMON * cm,vp9_writer * w)802 static void encode_txfm_probs(VP9_COMMON *cm, vp9_writer *w) {
803 // Mode
804 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2);
805 if (cm->tx_mode >= ALLOW_32X32)
806 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
807
808 // Probabilities
809 if (cm->tx_mode == TX_MODE_SELECT) {
810 int i, j;
811 unsigned int ct_8x8p[TX_SIZES - 3][2];
812 unsigned int ct_16x16p[TX_SIZES - 2][2];
813 unsigned int ct_32x32p[TX_SIZES - 1][2];
814
815
816 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
817 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p);
818 for (j = 0; j < TX_SIZES - 3; j++)
819 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]);
820 }
821
822 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
823 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p);
824 for (j = 0; j < TX_SIZES - 2; j++)
825 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
826 ct_16x16p[j]);
827 }
828
829 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
830 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
831 for (j = 0; j < TX_SIZES - 1; j++)
832 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
833 ct_32x32p[j]);
834 }
835 }
836 }
837
write_interp_filter(INTERP_FILTER filter,struct vp9_write_bit_buffer * wb)838 static void write_interp_filter(INTERP_FILTER filter,
839 struct vp9_write_bit_buffer *wb) {
840 const int filter_to_literal[] = { 1, 0, 2, 3 };
841
842 vp9_wb_write_bit(wb, filter == SWITCHABLE);
843 if (filter != SWITCHABLE)
844 vp9_wb_write_literal(wb, filter_to_literal[filter], 2);
845 }
846
fix_interp_filter(VP9_COMMON * cm)847 static void fix_interp_filter(VP9_COMMON *cm) {
848 if (cm->interp_filter == SWITCHABLE) {
849 // Check to see if only one of the filters is actually used
850 int count[SWITCHABLE_FILTERS];
851 int i, j, c = 0;
852 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
853 count[i] = 0;
854 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
855 count[i] += cm->counts.switchable_interp[j][i];
856 c += (count[i] > 0);
857 }
858 if (c == 1) {
859 // Only one filter is used. So set the filter at frame level
860 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
861 if (count[i]) {
862 cm->interp_filter = i;
863 break;
864 }
865 }
866 }
867 }
868 }
869
write_tile_info(const VP9_COMMON * const cm,struct vp9_write_bit_buffer * wb)870 static void write_tile_info(const VP9_COMMON *const cm,
871 struct vp9_write_bit_buffer *wb) {
872 int min_log2_tile_cols, max_log2_tile_cols, ones;
873 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
874
875 // columns
876 ones = cm->log2_tile_cols - min_log2_tile_cols;
877 while (ones--)
878 vp9_wb_write_bit(wb, 1);
879
880 if (cm->log2_tile_cols < max_log2_tile_cols)
881 vp9_wb_write_bit(wb, 0);
882
883 // rows
884 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
885 if (cm->log2_tile_rows != 0)
886 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
887 }
888
get_refresh_mask(VP9_COMP * cpi)889 static int get_refresh_mask(VP9_COMP *cpi) {
890 if (vp9_preserve_existing_gf(cpi)) {
891 // We have decided to preserve the previously existing golden frame as our
892 // new ARF frame. However, in the short term we leave it in the GF slot and,
893 // if we're updating the GF with the current decoded frame, we save it
894 // instead to the ARF slot.
895 // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we
896 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
897 // there so that it can be done outside of the recode loop.
898 // Note: This is highly specific to the use of ARF as a forward reference,
899 // and this needs to be generalized as other uses are implemented
900 // (like RTC/temporal scalability).
901 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
902 (cpi->refresh_golden_frame << cpi->alt_fb_idx);
903 } else {
904 int arf_idx = cpi->alt_fb_idx;
905 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
906 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
907 arf_idx = gf_group->arf_update_idx[gf_group->index];
908 }
909 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
910 (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
911 (cpi->refresh_alt_ref_frame << arf_idx);
912 }
913 }
914
encode_tiles(VP9_COMP * cpi,uint8_t * data_ptr)915 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
916 VP9_COMMON *const cm = &cpi->common;
917 vp9_writer residual_bc;
918
919 int tile_row, tile_col;
920 TOKENEXTRA *tok[4][1 << 6], *tok_end;
921 size_t total_size = 0;
922 const int tile_cols = 1 << cm->log2_tile_cols;
923 const int tile_rows = 1 << cm->log2_tile_rows;
924
925 vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) *
926 mi_cols_aligned_to_sb(cm->mi_cols));
927
928 tok[0][0] = cpi->tok;
929 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
930 if (tile_row)
931 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] +
932 cpi->tok_count[tile_row - 1][tile_cols - 1];
933
934 for (tile_col = 1; tile_col < tile_cols; tile_col++)
935 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
936 cpi->tok_count[tile_row][tile_col - 1];
937 }
938
939 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
940 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
941 TileInfo tile;
942
943 vp9_tile_init(&tile, cm, tile_row, tile_col);
944 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
945
946 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
947 vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
948 else
949 vp9_start_encode(&residual_bc, data_ptr + total_size);
950
951 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end);
952 assert(tok[tile_row][tile_col] == tok_end);
953 vp9_stop_encode(&residual_bc);
954 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
955 // size of this tile
956 mem_put_be32(data_ptr + total_size, residual_bc.pos);
957 total_size += 4;
958 }
959
960 total_size += residual_bc.pos;
961 }
962 }
963
964 return total_size;
965 }
966
write_display_size(const VP9_COMMON * cm,struct vp9_write_bit_buffer * wb)967 static void write_display_size(const VP9_COMMON *cm,
968 struct vp9_write_bit_buffer *wb) {
969 const int scaling_active = cm->width != cm->display_width ||
970 cm->height != cm->display_height;
971 vp9_wb_write_bit(wb, scaling_active);
972 if (scaling_active) {
973 vp9_wb_write_literal(wb, cm->display_width - 1, 16);
974 vp9_wb_write_literal(wb, cm->display_height - 1, 16);
975 }
976 }
977
write_frame_size(const VP9_COMMON * cm,struct vp9_write_bit_buffer * wb)978 static void write_frame_size(const VP9_COMMON *cm,
979 struct vp9_write_bit_buffer *wb) {
980 vp9_wb_write_literal(wb, cm->width - 1, 16);
981 vp9_wb_write_literal(wb, cm->height - 1, 16);
982
983 write_display_size(cm, wb);
984 }
985
write_frame_size_with_refs(VP9_COMP * cpi,struct vp9_write_bit_buffer * wb)986 static void write_frame_size_with_refs(VP9_COMP *cpi,
987 struct vp9_write_bit_buffer *wb) {
988 VP9_COMMON *const cm = &cpi->common;
989 int found = 0;
990
991 MV_REFERENCE_FRAME ref_frame;
992 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
993 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
994 found = cm->width == cfg->y_crop_width &&
995 cm->height == cfg->y_crop_height;
996
997 // Set "found" to 0 for temporal svc and for spatial svc key frame
998 if (cpi->use_svc &&
999 (cpi->svc.number_spatial_layers == 1 ||
1000 cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame)) {
1001 found = 0;
1002 }
1003 vp9_wb_write_bit(wb, found);
1004 if (found) {
1005 break;
1006 }
1007 }
1008
1009 if (!found) {
1010 vp9_wb_write_literal(wb, cm->width - 1, 16);
1011 vp9_wb_write_literal(wb, cm->height - 1, 16);
1012 }
1013
1014 write_display_size(cm, wb);
1015 }
1016
write_sync_code(struct vp9_write_bit_buffer * wb)1017 static void write_sync_code(struct vp9_write_bit_buffer *wb) {
1018 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8);
1019 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8);
1020 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8);
1021 }
1022
write_profile(BITSTREAM_PROFILE profile,struct vp9_write_bit_buffer * wb)1023 static void write_profile(BITSTREAM_PROFILE profile,
1024 struct vp9_write_bit_buffer *wb) {
1025 switch (profile) {
1026 case PROFILE_0:
1027 vp9_wb_write_literal(wb, 0, 2);
1028 break;
1029 case PROFILE_1:
1030 vp9_wb_write_literal(wb, 2, 2);
1031 break;
1032 case PROFILE_2:
1033 vp9_wb_write_literal(wb, 1, 2);
1034 break;
1035 case PROFILE_3:
1036 vp9_wb_write_literal(wb, 6, 3);
1037 break;
1038 default:
1039 assert(0);
1040 }
1041 }
1042
write_bitdepth_colorspace_sampling(VP9_COMMON * const cm,struct vp9_write_bit_buffer * wb)1043 static void write_bitdepth_colorspace_sampling(
1044 VP9_COMMON *const cm, struct vp9_write_bit_buffer *wb) {
1045 if (cm->profile >= PROFILE_2) {
1046 assert(cm->bit_depth > BITS_8);
1047 vp9_wb_write_bit(wb, cm->bit_depth - BITS_10);
1048 }
1049 vp9_wb_write_literal(wb, cm->color_space, 3);
1050 if (cm->color_space != SRGB) {
1051 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1052 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1053 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
1054 vp9_wb_write_bit(wb, cm->subsampling_x);
1055 vp9_wb_write_bit(wb, cm->subsampling_y);
1056 vp9_wb_write_bit(wb, 0); // unused
1057 } else {
1058 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
1059 }
1060 } else {
1061 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1062 vp9_wb_write_bit(wb, 0); // unused
1063 }
1064 }
1065
write_uncompressed_header(VP9_COMP * cpi,struct vp9_write_bit_buffer * wb)1066 static void write_uncompressed_header(VP9_COMP *cpi,
1067 struct vp9_write_bit_buffer *wb) {
1068 VP9_COMMON *const cm = &cpi->common;
1069
1070 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
1071
1072 write_profile(cm->profile, wb);
1073
1074 vp9_wb_write_bit(wb, 0); // show_existing_frame
1075 vp9_wb_write_bit(wb, cm->frame_type);
1076 vp9_wb_write_bit(wb, cm->show_frame);
1077 vp9_wb_write_bit(wb, cm->error_resilient_mode);
1078
1079 if (cm->frame_type == KEY_FRAME) {
1080 write_sync_code(wb);
1081 write_bitdepth_colorspace_sampling(cm, wb);
1082 write_frame_size(cm, wb);
1083 } else {
1084 if (!cm->show_frame)
1085 vp9_wb_write_bit(wb, cm->intra_only);
1086
1087 if (!cm->error_resilient_mode)
1088 vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
1089
1090 if (cm->intra_only) {
1091 write_sync_code(wb);
1092
1093 // Note for profile 0, 420 8bpp is assumed.
1094 if (cm->profile > PROFILE_0) {
1095 write_bitdepth_colorspace_sampling(cm, wb);
1096 }
1097
1098 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1099 write_frame_size(cm, wb);
1100 } else {
1101 MV_REFERENCE_FRAME ref_frame;
1102 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1103 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1104 vp9_wb_write_literal(wb, get_ref_frame_idx(cpi, ref_frame),
1105 REF_FRAMES_LOG2);
1106 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
1107 }
1108
1109 write_frame_size_with_refs(cpi, wb);
1110
1111 vp9_wb_write_bit(wb, cm->allow_high_precision_mv);
1112
1113 fix_interp_filter(cm);
1114 write_interp_filter(cm->interp_filter, wb);
1115 }
1116 }
1117
1118 if (!cm->error_resilient_mode) {
1119 vp9_wb_write_bit(wb, cm->refresh_frame_context);
1120 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
1121 }
1122
1123 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1124
1125 encode_loopfilter(&cm->lf, wb);
1126 encode_quantization(cm, wb);
1127 encode_segmentation(cm, &cpi->mb.e_mbd, wb);
1128
1129 write_tile_info(cm, wb);
1130 }
1131
write_compressed_header(VP9_COMP * cpi,uint8_t * data)1132 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1133 VP9_COMMON *const cm = &cpi->common;
1134 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1135 FRAME_CONTEXT *const fc = &cm->fc;
1136 vp9_writer header_bc;
1137
1138 vp9_start_encode(&header_bc, data);
1139
1140 if (xd->lossless)
1141 cm->tx_mode = ONLY_4X4;
1142 else
1143 encode_txfm_probs(cm, &header_bc);
1144
1145 update_coef_probs(cpi, &header_bc);
1146 update_skip_probs(cm, &header_bc);
1147
1148 if (!frame_is_intra_only(cm)) {
1149 int i;
1150
1151 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1152 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i],
1153 cm->counts.inter_mode[i], INTER_MODES, &header_bc);
1154
1155 vp9_zero(cm->counts.inter_mode);
1156
1157 if (cm->interp_filter == SWITCHABLE)
1158 update_switchable_interp_probs(cm, &header_bc);
1159
1160 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1161 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1162 cm->counts.intra_inter[i]);
1163
1164 if (cm->allow_comp_inter_inter) {
1165 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
1166 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
1167
1168 vp9_write_bit(&header_bc, use_compound_pred);
1169 if (use_compound_pred) {
1170 vp9_write_bit(&header_bc, use_hybrid_pred);
1171 if (use_hybrid_pred)
1172 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1173 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1174 cm->counts.comp_inter[i]);
1175 }
1176 }
1177
1178 if (cm->reference_mode != COMPOUND_REFERENCE) {
1179 for (i = 0; i < REF_CONTEXTS; i++) {
1180 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
1181 cm->counts.single_ref[i][0]);
1182 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
1183 cm->counts.single_ref[i][1]);
1184 }
1185 }
1186
1187 if (cm->reference_mode != SINGLE_REFERENCE)
1188 for (i = 0; i < REF_CONTEXTS; i++)
1189 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1190 cm->counts.comp_ref[i]);
1191
1192 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1193 prob_diff_update(vp9_intra_mode_tree, cm->fc.y_mode_prob[i],
1194 cm->counts.y_mode[i], INTRA_MODES, &header_bc);
1195
1196 for (i = 0; i < PARTITION_CONTEXTS; ++i)
1197 prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
1198 cm->counts.partition[i], PARTITION_TYPES, &header_bc);
1199
1200 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc);
1201 }
1202
1203 vp9_stop_encode(&header_bc);
1204 assert(header_bc.pos <= 0xffff);
1205
1206 return header_bc.pos;
1207 }
1208
vp9_pack_bitstream(VP9_COMP * cpi,uint8_t * dest,size_t * size)1209 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
1210 uint8_t *data = dest;
1211 size_t first_part_size, uncompressed_hdr_size;
1212 struct vp9_write_bit_buffer wb = {data, 0};
1213 struct vp9_write_bit_buffer saved_wb;
1214
1215 write_uncompressed_header(cpi, &wb);
1216 saved_wb = wb;
1217 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
1218
1219 uncompressed_hdr_size = vp9_wb_bytes_written(&wb);
1220 data += uncompressed_hdr_size;
1221
1222 vp9_clear_system_state();
1223
1224 first_part_size = write_compressed_header(cpi, data);
1225 data += first_part_size;
1226 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1227 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1228
1229 data += encode_tiles(cpi, data);
1230
1231 *size = data - dest;
1232 }
1233