1 /*
2 * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10 #include <assert.h>
11 #include <stdlib.h>
12
13 #include "./vpx_config.h"
14
15 #include "vp9/common/vp9_common.h"
16
17 #include "vp9/encoder/vp9_extend.h"
18 #include "vp9/encoder/vp9_lookahead.h"
19 #include "vp9/encoder/vp9_onyx_int.h"
20
21 struct lookahead_ctx {
22 unsigned int max_sz; /* Absolute size of the queue */
23 unsigned int sz; /* Number of buffers currently in the queue */
24 unsigned int read_idx; /* Read index */
25 unsigned int write_idx; /* Write index */
26 struct lookahead_entry *buf; /* Buffer list */
27 };
28
29
30 /* Return the buffer at the given absolute index and increment the index */
pop(struct lookahead_ctx * ctx,unsigned int * idx)31 static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
32 unsigned int *idx) {
33 unsigned int index = *idx;
34 struct lookahead_entry *buf = ctx->buf + index;
35
36 assert(index < ctx->max_sz);
37 if (++index >= ctx->max_sz)
38 index -= ctx->max_sz;
39 *idx = index;
40 return buf;
41 }
42
43
vp9_lookahead_destroy(struct lookahead_ctx * ctx)44 void vp9_lookahead_destroy(struct lookahead_ctx *ctx) {
45 if (ctx) {
46 if (ctx->buf) {
47 unsigned int i;
48
49 for (i = 0; i < ctx->max_sz; i++)
50 vp9_free_frame_buffer(&ctx->buf[i].img);
51 free(ctx->buf);
52 }
53 free(ctx);
54 }
55 }
56
57
vp9_lookahead_init(unsigned int width,unsigned int height,unsigned int subsampling_x,unsigned int subsampling_y,unsigned int depth)58 struct lookahead_ctx *vp9_lookahead_init(unsigned int width,
59 unsigned int height,
60 unsigned int subsampling_x,
61 unsigned int subsampling_y,
62 unsigned int depth) {
63 struct lookahead_ctx *ctx = NULL;
64
65 // Clamp the lookahead queue depth
66 depth = clamp(depth, 1, MAX_LAG_BUFFERS);
67
68 // Allocate memory to keep previous source frames available.
69 depth += MAX_PRE_FRAMES;
70
71 // Allocate the lookahead structures
72 ctx = calloc(1, sizeof(*ctx));
73 if (ctx) {
74 unsigned int i;
75 ctx->max_sz = depth;
76 ctx->buf = calloc(depth, sizeof(*ctx->buf));
77 if (!ctx->buf)
78 goto bail;
79 for (i = 0; i < depth; i++)
80 if (vp9_alloc_frame_buffer(&ctx->buf[i].img,
81 width, height, subsampling_x, subsampling_y,
82 VP9_ENC_BORDER_IN_PIXELS))
83 goto bail;
84 }
85 return ctx;
86 bail:
87 vp9_lookahead_destroy(ctx);
88 return NULL;
89 }
90
91 #define USE_PARTIAL_COPY 0
92
vp9_lookahead_push(struct lookahead_ctx * ctx,YV12_BUFFER_CONFIG * src,int64_t ts_start,int64_t ts_end,unsigned int flags)93 int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
94 int64_t ts_start, int64_t ts_end, unsigned int flags) {
95 struct lookahead_entry *buf;
96 #if USE_PARTIAL_COPY
97 int row, col, active_end;
98 int mb_rows = (src->y_height + 15) >> 4;
99 int mb_cols = (src->y_width + 15) >> 4;
100 #endif
101
102 if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz)
103 return 1;
104 ctx->sz++;
105 buf = pop(ctx, &ctx->write_idx);
106
107 #if USE_PARTIAL_COPY
108 // TODO(jkoleszar): This is disabled for now, as
109 // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
110
111 // Only do this partial copy if the following conditions are all met:
112 // 1. Lookahead queue has has size of 1.
113 // 2. Active map is provided.
114 // 3. This is not a key frame, golden nor altref frame.
115 if (ctx->max_sz == 1 && active_map && !flags) {
116 for (row = 0; row < mb_rows; ++row) {
117 col = 0;
118
119 while (1) {
120 // Find the first active macroblock in this row.
121 for (; col < mb_cols; ++col) {
122 if (active_map[col])
123 break;
124 }
125
126 // No more active macroblock in this row.
127 if (col == mb_cols)
128 break;
129
130 // Find the end of active region in this row.
131 active_end = col;
132
133 for (; active_end < mb_cols; ++active_end) {
134 if (!active_map[active_end])
135 break;
136 }
137
138 // Only copy this active region.
139 vp9_copy_and_extend_frame_with_rect(src, &buf->img,
140 row << 4,
141 col << 4, 16,
142 (active_end - col) << 4);
143
144 // Start again from the end of this active region.
145 col = active_end;
146 }
147
148 active_map += mb_cols;
149 }
150 } else {
151 vp9_copy_and_extend_frame(src, &buf->img);
152 }
153 #else
154 // Partial copy not implemented yet
155 vp9_copy_and_extend_frame(src, &buf->img);
156 #endif
157
158 buf->ts_start = ts_start;
159 buf->ts_end = ts_end;
160 buf->flags = flags;
161 return 0;
162 }
163
164
vp9_lookahead_pop(struct lookahead_ctx * ctx,int drain)165 struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx,
166 int drain) {
167 struct lookahead_entry *buf = NULL;
168
169 if (ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
170 buf = pop(ctx, &ctx->read_idx);
171 ctx->sz--;
172 }
173 return buf;
174 }
175
176
vp9_lookahead_peek(struct lookahead_ctx * ctx,int index)177 struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx,
178 int index) {
179 struct lookahead_entry *buf = NULL;
180
181 if (index >= 0) {
182 // Forward peek
183 if (index < (int)ctx->sz) {
184 index += ctx->read_idx;
185 if (index >= (int)ctx->max_sz)
186 index -= ctx->max_sz;
187 buf = ctx->buf + index;
188 }
189 } else if (index < 0) {
190 // Backward peek
191 if (-index <= MAX_PRE_FRAMES) {
192 index += ctx->read_idx;
193 if (index < 0)
194 index += ctx->max_sz;
195 buf = ctx->buf + index;
196 }
197 }
198
199 return buf;
200 }
201
vp9_lookahead_depth(struct lookahead_ctx * ctx)202 unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) {
203 return ctx->sz;
204 }
205