1 /**************************************************************************
2  *
3  * Copyright 2009 Younes Manton.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <math.h>
29 #include <assert.h>
30 
31 #include "util/u_memory.h"
32 #include "util/u_rect.h"
33 #include "util/u_sampler.h"
34 #include "util/u_video.h"
35 
36 #include "vl_mpeg12_decoder.h"
37 #include "vl_defines.h"
38 
39 #define SCALE_FACTOR_SNORM (32768.0f / 256.0f)
40 #define SCALE_FACTOR_SSCALED (1.0f / 256.0f)
41 
42 struct format_config {
43    enum pipe_format zscan_source_format;
44    enum pipe_format idct_source_format;
45    enum pipe_format mc_source_format;
46 
47    float idct_scale;
48    float mc_scale;
49 };
50 
51 static const struct format_config bitstream_format_config[] = {
52 //   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
53 //   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
54    { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
55    { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
56 };
57 
58 static const unsigned num_bitstream_format_configs =
59    sizeof(bitstream_format_config) / sizeof(struct format_config);
60 
61 static const struct format_config idct_format_config[] = {
62 //   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
63 //   { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
64    { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SNORM },
65    { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, PIPE_FORMAT_R16G16B16A16_SNORM, 1.0f, SCALE_FACTOR_SNORM }
66 };
67 
68 static const unsigned num_idct_format_configs =
69    sizeof(idct_format_config) / sizeof(struct format_config);
70 
71 static const struct format_config mc_format_config[] = {
72    //{ PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SSCALED, 0.0f, SCALE_FACTOR_SSCALED },
73    { PIPE_FORMAT_R16_SNORM, PIPE_FORMAT_NONE, PIPE_FORMAT_R16_SNORM, 0.0f, SCALE_FACTOR_SNORM }
74 };
75 
76 static const unsigned num_mc_format_configs =
77    sizeof(mc_format_config) / sizeof(struct format_config);
78 
79 static const unsigned const_empty_block_mask_420[3][2][2] = {
80    { { 0x20, 0x10 },  { 0x08, 0x04 } },
81    { { 0x02, 0x02 },  { 0x02, 0x02 } },
82    { { 0x01, 0x01 },  { 0x01, 0x01 } }
83 };
84 
85 static bool
init_zscan_buffer(struct vl_mpeg12_decoder * dec,struct vl_mpeg12_buffer * buffer)86 init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
87 {
88    struct pipe_resource *res, res_tmpl;
89    struct pipe_sampler_view sv_tmpl;
90    struct pipe_surface **destination;
91 
92    unsigned i;
93 
94    assert(dec && buffer);
95 
96    memset(&res_tmpl, 0, sizeof(res_tmpl));
97    res_tmpl.target = PIPE_TEXTURE_2D;
98    res_tmpl.format = dec->zscan_source_format;
99    res_tmpl.width0 = dec->blocks_per_line * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
100    res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line;
101    res_tmpl.depth0 = 1;
102    res_tmpl.array_size = 1;
103    res_tmpl.usage = PIPE_USAGE_STREAM;
104    res_tmpl.bind = PIPE_BIND_SAMPLER_VIEW;
105 
106    res = dec->base.context->screen->resource_create(dec->base.context->screen, &res_tmpl);
107    if (!res)
108       goto error_source;
109 
110 
111    memset(&sv_tmpl, 0, sizeof(sv_tmpl));
112    u_sampler_view_default_template(&sv_tmpl, res, res->format);
113    sv_tmpl.swizzle_r = sv_tmpl.swizzle_g = sv_tmpl.swizzle_b = sv_tmpl.swizzle_a = PIPE_SWIZZLE_RED;
114    buffer->zscan_source = dec->base.context->create_sampler_view(dec->base.context, res, &sv_tmpl);
115    pipe_resource_reference(&res, NULL);
116    if (!buffer->zscan_source)
117       goto error_sampler;
118 
119    if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
120       destination = dec->idct_source->get_surfaces(dec->idct_source);
121    else
122       destination = dec->mc_source->get_surfaces(dec->mc_source);
123 
124    if (!destination)
125       goto error_surface;
126 
127    for (i = 0; i < VL_NUM_COMPONENTS; ++i)
128       if (!vl_zscan_init_buffer(i == 0 ? &dec->zscan_y : &dec->zscan_c,
129                                 &buffer->zscan[i], buffer->zscan_source, destination[i]))
130          goto error_plane;
131 
132    return true;
133 
134 error_plane:
135    for (; i > 0; --i)
136       vl_zscan_cleanup_buffer(&buffer->zscan[i - 1]);
137 
138 error_surface:
139 error_sampler:
140    pipe_sampler_view_reference(&buffer->zscan_source, NULL);
141 
142 error_source:
143    return false;
144 }
145 
146 static void
cleanup_zscan_buffer(struct vl_mpeg12_buffer * buffer)147 cleanup_zscan_buffer(struct vl_mpeg12_buffer *buffer)
148 {
149    unsigned i;
150 
151    assert(buffer);
152 
153    for (i = 0; i < VL_NUM_COMPONENTS; ++i)
154       vl_zscan_cleanup_buffer(&buffer->zscan[i]);
155 
156    pipe_sampler_view_reference(&buffer->zscan_source, NULL);
157 }
158 
159 static bool
init_idct_buffer(struct vl_mpeg12_decoder * dec,struct vl_mpeg12_buffer * buffer)160 init_idct_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
161 {
162    struct pipe_sampler_view **idct_source_sv, **mc_source_sv;
163 
164    unsigned i;
165 
166    assert(dec && buffer);
167 
168    idct_source_sv = dec->idct_source->get_sampler_view_planes(dec->idct_source);
169    if (!idct_source_sv)
170       goto error_source_sv;
171 
172    mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
173    if (!mc_source_sv)
174       goto error_mc_source_sv;
175 
176    for (i = 0; i < 3; ++i)
177       if (!vl_idct_init_buffer(i == 0 ? &dec->idct_y : &dec->idct_c,
178                                &buffer->idct[i], idct_source_sv[i],
179                                mc_source_sv[i]))
180          goto error_plane;
181 
182    return true;
183 
184 error_plane:
185    for (; i > 0; --i)
186       vl_idct_cleanup_buffer(&buffer->idct[i - 1]);
187 
188 error_mc_source_sv:
189 error_source_sv:
190    return false;
191 }
192 
193 static void
cleanup_idct_buffer(struct vl_mpeg12_buffer * buf)194 cleanup_idct_buffer(struct vl_mpeg12_buffer *buf)
195 {
196    unsigned i;
197 
198    assert(buf);
199 
200    for (i = 0; i < 3; ++i)
201       vl_idct_cleanup_buffer(&buf->idct[0]);
202 }
203 
204 static bool
init_mc_buffer(struct vl_mpeg12_decoder * dec,struct vl_mpeg12_buffer * buf)205 init_mc_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buf)
206 {
207    assert(dec && buf);
208 
209    if(!vl_mc_init_buffer(&dec->mc_y, &buf->mc[0]))
210       goto error_mc_y;
211 
212    if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[1]))
213       goto error_mc_cb;
214 
215    if(!vl_mc_init_buffer(&dec->mc_c, &buf->mc[2]))
216       goto error_mc_cr;
217 
218    return true;
219 
220 error_mc_cr:
221    vl_mc_cleanup_buffer(&buf->mc[1]);
222 
223 error_mc_cb:
224    vl_mc_cleanup_buffer(&buf->mc[0]);
225 
226 error_mc_y:
227    return false;
228 }
229 
230 static void
cleanup_mc_buffer(struct vl_mpeg12_buffer * buf)231 cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
232 {
233    unsigned i;
234 
235    assert(buf);
236 
237    for (i = 0; i < VL_NUM_COMPONENTS; ++i)
238       vl_mc_cleanup_buffer(&buf->mc[i]);
239 }
240 
241 static INLINE void
MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock * mb,unsigned weights[2])242 MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
243 {
244    assert(mb);
245 
246    switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
247    case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
248       weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
249       weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
250       break;
251 
252    case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
253       weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
254       weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
255       break;
256 
257    case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
258       weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
259       weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
260       break;
261 
262    default:
263       if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) {
264          weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
265          weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
266       } else {
267          /* no motion vector, but also not intra mb ->
268             just copy the old frame content */
269          weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
270          weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
271       }
272       break;
273    }
274 }
275 
276 static INLINE struct vl_motionvector
MotionVectorToPipe(const struct pipe_mpeg12_macroblock * mb,unsigned vector,unsigned field_select_mask,unsigned weight)277 MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
278                    unsigned field_select_mask, unsigned weight)
279 {
280    struct vl_motionvector mv;
281 
282    assert(mb);
283 
284    if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
285       switch (mb->macroblock_modes.bits.frame_motion_type) {
286       case PIPE_MPEG12_MO_TYPE_FRAME:
287          mv.top.x = mb->PMV[0][vector][0];
288          mv.top.y = mb->PMV[0][vector][1];
289          mv.top.field_select = PIPE_VIDEO_FRAME;
290          mv.top.weight = weight;
291 
292          mv.bottom.x = mb->PMV[0][vector][0];
293          mv.bottom.y = mb->PMV[0][vector][1];
294          mv.bottom.weight = weight;
295          mv.bottom.field_select = PIPE_VIDEO_FRAME;
296          break;
297 
298       case PIPE_MPEG12_MO_TYPE_FIELD:
299          mv.top.x = mb->PMV[0][vector][0];
300          mv.top.y = mb->PMV[0][vector][1];
301          mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
302             PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
303          mv.top.weight = weight;
304 
305          mv.bottom.x = mb->PMV[1][vector][0];
306          mv.bottom.y = mb->PMV[1][vector][1];
307          mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
308             PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
309          mv.bottom.weight = weight;
310          break;
311 
312       default: // TODO: Support DUALPRIME and 16x8
313          break;
314       }
315    } else {
316       mv.top.x = mv.top.y = 0;
317       mv.top.field_select = PIPE_VIDEO_FRAME;
318       mv.top.weight = weight;
319 
320       mv.bottom.x = mv.bottom.y = 0;
321       mv.bottom.field_select = PIPE_VIDEO_FRAME;
322       mv.bottom.weight = weight;
323    }
324    return mv;
325 }
326 
327 static INLINE void
UploadYcbcrBlocks(struct vl_mpeg12_decoder * dec,struct vl_mpeg12_buffer * buf,const struct pipe_mpeg12_macroblock * mb)328 UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
329                   struct vl_mpeg12_buffer *buf,
330                   const struct pipe_mpeg12_macroblock *mb)
331 {
332    unsigned intra;
333    unsigned tb, x, y, num_blocks = 0;
334 
335    assert(dec && buf);
336    assert(mb);
337 
338    if (!mb->coded_block_pattern)
339       return;
340 
341    intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA ? 1 : 0;
342 
343    for (y = 0; y < 2; ++y) {
344       for (x = 0; x < 2; ++x) {
345          if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
346 
347             struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
348             stream->x = mb->x * 2 + x;
349             stream->y = mb->y * 2 + y;
350             stream->intra = intra;
351             stream->coding = mb->macroblock_modes.bits.dct_type;
352             stream->block_num = buf->block_num++;
353 
354             buf->num_ycbcr_blocks[0]++;
355             buf->ycbcr_stream[0]++;
356 
357             num_blocks++;
358          }
359       }
360    }
361 
362    /* TODO: Implement 422, 444 */
363    //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
364 
365    for (tb = 1; tb < 3; ++tb) {
366       if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
367 
368          struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
369          stream->x = mb->x;
370          stream->y = mb->y;
371          stream->intra = intra;
372          stream->coding = 0;
373          stream->block_num = buf->block_num++;
374 
375          buf->num_ycbcr_blocks[tb]++;
376          buf->ycbcr_stream[tb]++;
377 
378          num_blocks++;
379       }
380    }
381 
382    memcpy(buf->texels, mb->blocks, 64 * sizeof(short) * num_blocks);
383    buf->texels += 64 * num_blocks;
384 }
385 
386 static void
vl_mpeg12_destroy_buffer(void * buffer)387 vl_mpeg12_destroy_buffer(void *buffer)
388 {
389    struct vl_mpeg12_buffer *buf = buffer;
390 
391    assert(buf);
392 
393    cleanup_zscan_buffer(buf);
394    cleanup_idct_buffer(buf);
395    cleanup_mc_buffer(buf);
396    vl_vb_cleanup(&buf->vertex_stream);
397 
398    FREE(buf);
399 }
400 
401 static void
vl_mpeg12_destroy(struct pipe_video_decoder * decoder)402 vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
403 {
404    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
405    unsigned i;
406 
407    assert(decoder);
408 
409    /* Asserted in softpipe_delete_fs_state() for some reason */
410    dec->base.context->bind_vs_state(dec->base.context, NULL);
411    dec->base.context->bind_fs_state(dec->base.context, NULL);
412 
413    dec->base.context->delete_depth_stencil_alpha_state(dec->base.context, dec->dsa);
414    dec->base.context->delete_sampler_state(dec->base.context, dec->sampler_ycbcr);
415 
416    vl_mc_cleanup(&dec->mc_y);
417    vl_mc_cleanup(&dec->mc_c);
418    dec->mc_source->destroy(dec->mc_source);
419 
420    if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
421       vl_idct_cleanup(&dec->idct_y);
422       vl_idct_cleanup(&dec->idct_c);
423       dec->idct_source->destroy(dec->idct_source);
424    }
425 
426    vl_zscan_cleanup(&dec->zscan_y);
427    vl_zscan_cleanup(&dec->zscan_c);
428 
429    dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
430    dec->base.context->delete_vertex_elements_state(dec->base.context, dec->ves_mv);
431 
432    pipe_resource_reference(&dec->quads.buffer, NULL);
433    pipe_resource_reference(&dec->pos.buffer, NULL);
434 
435    pipe_sampler_view_reference(&dec->zscan_linear, NULL);
436    pipe_sampler_view_reference(&dec->zscan_normal, NULL);
437    pipe_sampler_view_reference(&dec->zscan_alternate, NULL);
438 
439    for (i = 0; i < 4; ++i)
440       if (dec->dec_buffers[i])
441          vl_mpeg12_destroy_buffer(dec->dec_buffers[i]);
442 
443    FREE(dec);
444 }
445 
446 static struct vl_mpeg12_buffer *
vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder * dec,struct pipe_video_buffer * target)447 vl_mpeg12_get_decode_buffer(struct vl_mpeg12_decoder *dec, struct pipe_video_buffer *target)
448 {
449    struct vl_mpeg12_buffer *buffer;
450 
451    assert(dec);
452 
453    buffer = vl_video_buffer_get_associated_data(target, &dec->base);
454    if (buffer)
455       return buffer;
456 
457    buffer = dec->dec_buffers[dec->current_buffer];
458    if (buffer)
459       return buffer;
460 
461    buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
462    if (buffer == NULL)
463       return NULL;
464 
465    if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
466                    dec->base.width / VL_MACROBLOCK_WIDTH,
467                    dec->base.height / VL_MACROBLOCK_HEIGHT))
468       goto error_vertex_buffer;
469 
470    if (!init_mc_buffer(dec, buffer))
471       goto error_mc;
472 
473    if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
474       if (!init_idct_buffer(dec, buffer))
475          goto error_idct;
476 
477    if (!init_zscan_buffer(dec, buffer))
478       goto error_zscan;
479 
480    if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
481       vl_mpg12_bs_init(&buffer->bs, &dec->base);
482 
483    if (dec->expect_chunked_decode)
484       vl_video_buffer_set_associated_data(target, &dec->base,
485                                           buffer, vl_mpeg12_destroy_buffer);
486    else
487       dec->dec_buffers[dec->current_buffer] = buffer;
488 
489    return buffer;
490 
491 error_zscan:
492    cleanup_idct_buffer(buffer);
493 
494 error_idct:
495    cleanup_mc_buffer(buffer);
496 
497 error_mc:
498    vl_vb_cleanup(&buffer->vertex_stream);
499 
500 error_vertex_buffer:
501    FREE(buffer);
502    return NULL;
503 }
504 
505 static void
vl_mpeg12_begin_frame(struct pipe_video_decoder * decoder,struct pipe_video_buffer * target,struct pipe_picture_desc * picture)506 vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder,
507                       struct pipe_video_buffer *target,
508                       struct pipe_picture_desc *picture)
509 {
510    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
511    struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
512    struct vl_mpeg12_buffer *buf;
513 
514    struct pipe_resource *tex;
515    struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
516 
517    uint8_t intra_matrix[64];
518    uint8_t non_intra_matrix[64];
519 
520    unsigned i;
521 
522    assert(dec && target && picture);
523 
524    buf = vl_mpeg12_get_decode_buffer(dec, target);
525    assert(buf);
526 
527    if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
528       memcpy(intra_matrix, desc->intra_matrix, sizeof(intra_matrix));
529       memcpy(non_intra_matrix, desc->non_intra_matrix, sizeof(non_intra_matrix));
530       intra_matrix[0] = 1 << (7 - desc->intra_dc_precision);
531    } else {
532       memset(intra_matrix, 0x10, sizeof(intra_matrix));
533       memset(non_intra_matrix, 0x10, sizeof(non_intra_matrix));
534    }
535 
536    for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
537       struct vl_zscan *zscan = i == 0 ? &dec->zscan_y : &dec->zscan_c;
538       vl_zscan_upload_quant(zscan, &buf->zscan[i], intra_matrix, true);
539       vl_zscan_upload_quant(zscan, &buf->zscan[i], non_intra_matrix, false);
540    }
541 
542    vl_vb_map(&buf->vertex_stream, dec->base.context);
543 
544    tex = buf->zscan_source->texture;
545    rect.width = tex->width0;
546    rect.height = tex->height0;
547 
548    buf->tex_transfer = dec->base.context->get_transfer
549    (
550       dec->base.context, tex,
551       0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
552       &rect
553    );
554 
555    buf->block_num = 0;
556    buf->texels = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer);
557 
558    for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
559       buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
560       buf->num_ycbcr_blocks[i] = 0;
561    }
562 
563    for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
564       buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
565 
566    if (dec->base.entrypoint >= PIPE_VIDEO_ENTRYPOINT_IDCT) {
567       for (i = 0; i < VL_NUM_COMPONENTS; ++i)
568          vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
569    }
570 }
571 
572 static void
vl_mpeg12_decode_macroblock(struct pipe_video_decoder * decoder,struct pipe_video_buffer * target,struct pipe_picture_desc * picture,const struct pipe_macroblock * macroblocks,unsigned num_macroblocks)573 vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
574                             struct pipe_video_buffer *target,
575                             struct pipe_picture_desc *picture,
576                             const struct pipe_macroblock *macroblocks,
577                             unsigned num_macroblocks)
578 {
579    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
580    const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
581    struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
582    struct vl_mpeg12_buffer *buf;
583 
584    unsigned i, j, mv_weights[2];
585 
586    assert(dec && target && picture);
587    assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
588 
589    buf = vl_mpeg12_get_decode_buffer(dec, target);
590    assert(buf);
591 
592    for (; num_macroblocks > 0; --num_macroblocks) {
593       unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
594 
595       if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_PATTERN | PIPE_MPEG12_MB_TYPE_INTRA))
596          UploadYcbcrBlocks(dec, buf, mb);
597 
598       MacroBlockTypeToPipeWeights(mb, mv_weights);
599 
600       for (i = 0; i < 2; ++i) {
601           if (!desc->ref[i]) continue;
602 
603          buf->mv_stream[i][mb_addr] = MotionVectorToPipe
604          (
605             mb, i,
606             i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
607             mv_weights[i]
608          );
609       }
610 
611       /* see section 7.6.6 of the spec */
612       if (mb->num_skipped_macroblocks > 0) {
613          struct vl_motionvector skipped_mv[2];
614 
615          if (desc->ref[0] && !desc->ref[1]) {
616             skipped_mv[0].top.x = skipped_mv[0].top.y = 0;
617             skipped_mv[0].top.weight = PIPE_VIDEO_MV_WEIGHT_MAX;
618          } else {
619            skipped_mv[0] = buf->mv_stream[0][mb_addr];
620            skipped_mv[1] = buf->mv_stream[1][mb_addr];
621          }
622          skipped_mv[0].top.field_select = PIPE_VIDEO_FRAME;
623          skipped_mv[1].top.field_select = PIPE_VIDEO_FRAME;
624 
625          skipped_mv[0].bottom = skipped_mv[0].top;
626          skipped_mv[1].bottom = skipped_mv[1].top;
627 
628          ++mb_addr;
629          for (i = 0; i < mb->num_skipped_macroblocks; ++i, ++mb_addr) {
630             for (j = 0; j < 2; ++j) {
631                if (!desc->ref[j]) continue;
632                buf->mv_stream[j][mb_addr] = skipped_mv[j];
633 
634             }
635          }
636       }
637 
638       ++mb;
639    }
640 }
641 
642 static void
vl_mpeg12_decode_bitstream(struct pipe_video_decoder * decoder,struct pipe_video_buffer * target,struct pipe_picture_desc * picture,unsigned num_buffers,const void * const * buffers,const unsigned * sizes)643 vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
644                            struct pipe_video_buffer *target,
645                            struct pipe_picture_desc *picture,
646                            unsigned num_buffers,
647                            const void * const *buffers,
648                            const unsigned *sizes)
649 {
650    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
651    struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
652    struct vl_mpeg12_buffer *buf;
653 
654    unsigned i;
655 
656    assert(dec && target && picture);
657 
658    buf = vl_mpeg12_get_decode_buffer(dec, target);
659    assert(buf);
660 
661    for (i = 0; i < VL_NUM_COMPONENTS; ++i)
662       vl_zscan_set_layout(&buf->zscan[i], desc->alternate_scan ?
663                           dec->zscan_alternate : dec->zscan_normal);
664 
665    vl_mpg12_bs_decode(&buf->bs, target, desc, num_buffers, buffers, sizes);
666 }
667 
668 static void
vl_mpeg12_end_frame(struct pipe_video_decoder * decoder,struct pipe_video_buffer * target,struct pipe_picture_desc * picture)669 vl_mpeg12_end_frame(struct pipe_video_decoder *decoder,
670                     struct pipe_video_buffer *target,
671                     struct pipe_picture_desc *picture)
672 {
673    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
674    struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc *)picture;
675    struct pipe_sampler_view **ref_frames[2];
676    struct pipe_sampler_view **mc_source_sv;
677    struct pipe_surface **target_surfaces;
678    struct pipe_vertex_buffer vb[3];
679    struct vl_mpeg12_buffer *buf;
680 
681    const unsigned *plane_order;
682    unsigned i, j, component;
683    unsigned nr_components;
684 
685    assert(dec && target && picture);
686    assert(!target->interlaced);
687 
688    buf = vl_mpeg12_get_decode_buffer(dec, target);
689 
690    vl_vb_unmap(&buf->vertex_stream, dec->base.context);
691 
692    dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer);
693    dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer);
694 
695    vb[0] = dec->quads;
696    vb[1] = dec->pos;
697 
698    target_surfaces = target->get_surfaces(target);
699 
700    for (i = 0; i < VL_MAX_REF_FRAMES; ++i) {
701       if (desc->ref[i])
702          ref_frames[i] = desc->ref[i]->get_sampler_view_planes(desc->ref[i]);
703       else
704          ref_frames[i] = NULL;
705    }
706 
707    dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
708    for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
709       if (!target_surfaces[i]) continue;
710 
711       vl_mc_set_surface(&buf->mc[i], target_surfaces[i]);
712 
713       for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
714          if (!ref_frames[j] || !ref_frames[j][i]) continue;
715 
716          vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
717          dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
718 
719          vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
720       }
721    }
722 
723    dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
724    for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
725       if (!buf->num_ycbcr_blocks[i]) continue;
726 
727       vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
728       dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
729 
730       vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
731 
732       if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
733          vl_idct_flush(i ? &dec->idct_c : &dec->idct_y, &buf->idct[i], buf->num_ycbcr_blocks[i]);
734    }
735 
736    plane_order = vl_video_buffer_plane_order(target->buffer_format);
737    mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
738    for (i = 0, component = 0; component < VL_NUM_COMPONENTS; ++i) {
739       if (!target_surfaces[i]) continue;
740 
741       nr_components = util_format_get_nr_components(target_surfaces[i]->texture->format);
742       for (j = 0; j < nr_components; ++j, ++component) {
743          unsigned plane = plane_order[component];
744          if (!buf->num_ycbcr_blocks[plane]) continue;
745 
746          vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane);
747          dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
748 
749          if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
750             vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]);
751          else {
752             dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[plane]);
753             dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
754          }
755          vl_mc_render_ycbcr(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], j, buf->num_ycbcr_blocks[plane]);
756       }
757    }
758    ++dec->current_buffer;
759    dec->current_buffer %= 4;
760 }
761 
762 static void
vl_mpeg12_flush(struct pipe_video_decoder * decoder)763 vl_mpeg12_flush(struct pipe_video_decoder *decoder)
764 {
765    assert(decoder);
766 
767    //Noop, for shaders it is much faster to flush everything in end_frame
768 }
769 
770 static bool
init_pipe_state(struct vl_mpeg12_decoder * dec)771 init_pipe_state(struct vl_mpeg12_decoder *dec)
772 {
773    struct pipe_depth_stencil_alpha_state dsa;
774    struct pipe_sampler_state sampler;
775    unsigned i;
776 
777    assert(dec);
778 
779    memset(&dsa, 0, sizeof dsa);
780    dsa.depth.enabled = 0;
781    dsa.depth.writemask = 0;
782    dsa.depth.func = PIPE_FUNC_ALWAYS;
783    for (i = 0; i < 2; ++i) {
784       dsa.stencil[i].enabled = 0;
785       dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
786       dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
787       dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
788       dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
789       dsa.stencil[i].valuemask = 0;
790       dsa.stencil[i].writemask = 0;
791    }
792    dsa.alpha.enabled = 0;
793    dsa.alpha.func = PIPE_FUNC_ALWAYS;
794    dsa.alpha.ref_value = 0;
795    dec->dsa = dec->base.context->create_depth_stencil_alpha_state(dec->base.context, &dsa);
796    dec->base.context->bind_depth_stencil_alpha_state(dec->base.context, dec->dsa);
797 
798    memset(&sampler, 0, sizeof(sampler));
799    sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
800    sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
801    sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
802    sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
803    sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
804    sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
805    sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
806    sampler.compare_func = PIPE_FUNC_ALWAYS;
807    sampler.normalized_coords = 1;
808    dec->sampler_ycbcr = dec->base.context->create_sampler_state(dec->base.context, &sampler);
809    if (!dec->sampler_ycbcr)
810       return false;
811 
812    return true;
813 }
814 
815 static const struct format_config*
find_format_config(struct vl_mpeg12_decoder * dec,const struct format_config configs[],unsigned num_configs)816 find_format_config(struct vl_mpeg12_decoder *dec, const struct format_config configs[], unsigned num_configs)
817 {
818    struct pipe_screen *screen;
819    unsigned i;
820 
821    assert(dec);
822 
823    screen = dec->base.context->screen;
824 
825    for (i = 0; i < num_configs; ++i) {
826       if (!screen->is_format_supported(screen, configs[i].zscan_source_format, PIPE_TEXTURE_2D,
827                                        1, PIPE_BIND_SAMPLER_VIEW))
828          continue;
829 
830       if (configs[i].idct_source_format != PIPE_FORMAT_NONE) {
831          if (!screen->is_format_supported(screen, configs[i].idct_source_format, PIPE_TEXTURE_2D,
832                                           1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
833             continue;
834 
835          if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_3D,
836                                           1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
837             continue;
838       } else {
839          if (!screen->is_format_supported(screen, configs[i].mc_source_format, PIPE_TEXTURE_2D,
840                                           1, PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET))
841             continue;
842       }
843       return &configs[i];
844    }
845 
846    return NULL;
847 }
848 
849 static bool
init_zscan(struct vl_mpeg12_decoder * dec,const struct format_config * format_config)850 init_zscan(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
851 {
852    unsigned num_channels;
853 
854    assert(dec);
855 
856    dec->zscan_source_format = format_config->zscan_source_format;
857    dec->zscan_linear = vl_zscan_layout(dec->base.context, vl_zscan_linear, dec->blocks_per_line);
858    dec->zscan_normal = vl_zscan_layout(dec->base.context, vl_zscan_normal, dec->blocks_per_line);
859    dec->zscan_alternate = vl_zscan_layout(dec->base.context, vl_zscan_alternate, dec->blocks_per_line);
860 
861    num_channels = dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT ? 4 : 1;
862 
863    if (!vl_zscan_init(&dec->zscan_y, dec->base.context, dec->base.width, dec->base.height,
864                       dec->blocks_per_line, dec->num_blocks, num_channels))
865       return false;
866 
867    if (!vl_zscan_init(&dec->zscan_c, dec->base.context, dec->chroma_width, dec->chroma_height,
868                       dec->blocks_per_line, dec->num_blocks, num_channels))
869       return false;
870 
871    return true;
872 }
873 
874 static bool
init_idct(struct vl_mpeg12_decoder * dec,const struct format_config * format_config)875 init_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
876 {
877    unsigned nr_of_idct_render_targets, max_inst;
878    enum pipe_format formats[3];
879    struct pipe_video_buffer templat;
880 
881    struct pipe_sampler_view *matrix = NULL;
882 
883    nr_of_idct_render_targets = dec->base.context->screen->get_param
884    (
885       dec->base.context->screen, PIPE_CAP_MAX_RENDER_TARGETS
886    );
887 
888    max_inst = dec->base.context->screen->get_shader_param
889    (
890       dec->base.context->screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_MAX_INSTRUCTIONS
891    );
892 
893    // Just assume we need 32 inst per render target, not 100% true, but should work in most cases
894    if (nr_of_idct_render_targets >= 4 && max_inst >= 32*4)
895       // more than 4 render targets usually doesn't makes any seens
896       nr_of_idct_render_targets = 4;
897    else
898       nr_of_idct_render_targets = 1;
899 
900    formats[0] = formats[1] = formats[2] = format_config->idct_source_format;
901    memset(&templat, 0, sizeof(templat));
902    templat.width = dec->base.width / 4;
903    templat.height = dec->base.height;
904    templat.chroma_format = dec->base.chroma_format;
905    dec->idct_source = vl_video_buffer_create_ex
906    (
907       dec->base.context, &templat,
908       formats, 1, PIPE_USAGE_STATIC
909    );
910 
911    if (!dec->idct_source)
912       goto error_idct_source;
913 
914    formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
915    memset(&templat, 0, sizeof(templat));
916    templat.width = dec->base.width / nr_of_idct_render_targets;
917    templat.height = dec->base.height / 4;
918    templat.chroma_format = dec->base.chroma_format;
919    dec->mc_source = vl_video_buffer_create_ex
920    (
921       dec->base.context, &templat,
922       formats, nr_of_idct_render_targets, PIPE_USAGE_STATIC
923    );
924 
925    if (!dec->mc_source)
926       goto error_mc_source;
927 
928    if (!(matrix = vl_idct_upload_matrix(dec->base.context, format_config->idct_scale)))
929       goto error_matrix;
930 
931    if (!vl_idct_init(&dec->idct_y, dec->base.context, dec->base.width, dec->base.height,
932                      nr_of_idct_render_targets, matrix, matrix))
933       goto error_y;
934 
935    if(!vl_idct_init(&dec->idct_c, dec->base.context, dec->chroma_width, dec->chroma_height,
936                     nr_of_idct_render_targets, matrix, matrix))
937       goto error_c;
938 
939    pipe_sampler_view_reference(&matrix, NULL);
940 
941    return true;
942 
943 error_c:
944    vl_idct_cleanup(&dec->idct_y);
945 
946 error_y:
947    pipe_sampler_view_reference(&matrix, NULL);
948 
949 error_matrix:
950    dec->mc_source->destroy(dec->mc_source);
951 
952 error_mc_source:
953    dec->idct_source->destroy(dec->idct_source);
954 
955 error_idct_source:
956    return false;
957 }
958 
959 static bool
init_mc_source_widthout_idct(struct vl_mpeg12_decoder * dec,const struct format_config * format_config)960 init_mc_source_widthout_idct(struct vl_mpeg12_decoder *dec, const struct format_config* format_config)
961 {
962    enum pipe_format formats[3];
963    struct pipe_video_buffer templat;
964 
965    formats[0] = formats[1] = formats[2] = format_config->mc_source_format;
966    memset(&templat, 0, sizeof(templat));
967    templat.width = dec->base.width;
968    templat.height = dec->base.height;
969    templat.chroma_format = dec->base.chroma_format;
970    dec->mc_source = vl_video_buffer_create_ex
971    (
972       dec->base.context, &templat,
973       formats, 1, PIPE_USAGE_STATIC
974    );
975 
976    return dec->mc_source != NULL;
977 }
978 
979 static void
mc_vert_shader_callback(void * priv,struct vl_mc * mc,struct ureg_program * shader,unsigned first_output,struct ureg_dst tex)980 mc_vert_shader_callback(void *priv, struct vl_mc *mc,
981                         struct ureg_program *shader,
982                         unsigned first_output,
983                         struct ureg_dst tex)
984 {
985    struct vl_mpeg12_decoder *dec = priv;
986    struct ureg_dst o_vtex;
987 
988    assert(priv && mc);
989    assert(shader);
990 
991    if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
992       struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
993       vl_idct_stage2_vert_shader(idct, shader, first_output, tex);
994    } else {
995       o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output);
996       ureg_MOV(shader, ureg_writemask(o_vtex, TGSI_WRITEMASK_XY), ureg_src(tex));
997    }
998 }
999 
1000 static void
mc_frag_shader_callback(void * priv,struct vl_mc * mc,struct ureg_program * shader,unsigned first_input,struct ureg_dst dst)1001 mc_frag_shader_callback(void *priv, struct vl_mc *mc,
1002                         struct ureg_program *shader,
1003                         unsigned first_input,
1004                         struct ureg_dst dst)
1005 {
1006    struct vl_mpeg12_decoder *dec = priv;
1007    struct ureg_src src, sampler;
1008 
1009    assert(priv && mc);
1010    assert(shader);
1011 
1012    if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1013       struct vl_idct *idct = mc == &dec->mc_y ? &dec->idct_y : &dec->idct_c;
1014       vl_idct_stage2_frag_shader(idct, shader, first_input, dst);
1015    } else {
1016       src = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, first_input, TGSI_INTERPOLATE_LINEAR);
1017       sampler = ureg_DECL_sampler(shader, 0);
1018       ureg_TEX(shader, dst, TGSI_TEXTURE_2D, src, sampler);
1019    }
1020 }
1021 
1022 struct pipe_video_decoder *
vl_create_mpeg12_decoder(struct pipe_context * context,enum pipe_video_profile profile,enum pipe_video_entrypoint entrypoint,enum pipe_video_chroma_format chroma_format,unsigned width,unsigned height,unsigned max_references,bool expect_chunked_decode)1023 vl_create_mpeg12_decoder(struct pipe_context *context,
1024                          enum pipe_video_profile profile,
1025                          enum pipe_video_entrypoint entrypoint,
1026                          enum pipe_video_chroma_format chroma_format,
1027                          unsigned width, unsigned height, unsigned max_references,
1028                          bool expect_chunked_decode)
1029 {
1030    const unsigned block_size_pixels = VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
1031    const struct format_config *format_config;
1032    struct vl_mpeg12_decoder *dec;
1033 
1034    assert(u_reduce_video_profile(profile) == PIPE_VIDEO_CODEC_MPEG12);
1035 
1036    dec = CALLOC_STRUCT(vl_mpeg12_decoder);
1037 
1038    if (!dec)
1039       return NULL;
1040 
1041    dec->base.context = context;
1042    dec->base.profile = profile;
1043    dec->base.entrypoint = entrypoint;
1044    dec->base.chroma_format = chroma_format;
1045    dec->base.width = width;
1046    dec->base.height = height;
1047    dec->base.max_references = max_references;
1048 
1049    dec->base.destroy = vl_mpeg12_destroy;
1050    dec->base.begin_frame = vl_mpeg12_begin_frame;
1051    dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
1052    dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
1053    dec->base.end_frame = vl_mpeg12_end_frame;
1054    dec->base.flush = vl_mpeg12_flush;
1055 
1056    dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
1057    dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
1058    dec->width_in_macroblocks = align(dec->base.width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
1059    dec->expect_chunked_decode = expect_chunked_decode;
1060 
1061    /* TODO: Implement 422, 444 */
1062    assert(dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
1063 
1064    if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
1065       dec->chroma_width = dec->base.width / 2;
1066       dec->chroma_height = dec->base.height / 2;
1067       dec->num_blocks = dec->num_blocks * 2;
1068    } else if (dec->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
1069       dec->chroma_width = dec->base.width;
1070       dec->chroma_height = dec->base.height / 2;
1071       dec->num_blocks = dec->num_blocks * 2 + dec->num_blocks;
1072    } else {
1073       dec->chroma_width = dec->base.width;
1074       dec->chroma_height = dec->base.height;
1075       dec->num_blocks = dec->num_blocks * 3;
1076    }
1077 
1078    dec->quads = vl_vb_upload_quads(dec->base.context);
1079    dec->pos = vl_vb_upload_pos(
1080       dec->base.context,
1081       dec->base.width / VL_MACROBLOCK_WIDTH,
1082       dec->base.height / VL_MACROBLOCK_HEIGHT
1083    );
1084 
1085    dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
1086    dec->ves_mv = vl_vb_get_ves_mv(dec->base.context);
1087 
1088    switch (entrypoint) {
1089    case PIPE_VIDEO_ENTRYPOINT_BITSTREAM:
1090       format_config = find_format_config(dec, bitstream_format_config, num_bitstream_format_configs);
1091       break;
1092 
1093    case PIPE_VIDEO_ENTRYPOINT_IDCT:
1094       format_config = find_format_config(dec, idct_format_config, num_idct_format_configs);
1095       break;
1096 
1097    case PIPE_VIDEO_ENTRYPOINT_MC:
1098       format_config = find_format_config(dec, mc_format_config, num_mc_format_configs);
1099       break;
1100 
1101    default:
1102       assert(0);
1103       FREE(dec);
1104       return NULL;
1105    }
1106 
1107    if (!format_config) {
1108       FREE(dec);
1109       return NULL;
1110    }
1111 
1112    if (!init_zscan(dec, format_config))
1113       goto error_zscan;
1114 
1115    if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1116       if (!init_idct(dec, format_config))
1117          goto error_sources;
1118    } else {
1119       if (!init_mc_source_widthout_idct(dec, format_config))
1120          goto error_sources;
1121    }
1122 
1123    if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
1124                    VL_MACROBLOCK_HEIGHT, format_config->mc_scale,
1125                    mc_vert_shader_callback, mc_frag_shader_callback, dec))
1126       goto error_mc_y;
1127 
1128    // TODO
1129    if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
1130                    VL_BLOCK_HEIGHT, format_config->mc_scale,
1131                    mc_vert_shader_callback, mc_frag_shader_callback, dec))
1132       goto error_mc_c;
1133 
1134    if (!init_pipe_state(dec))
1135       goto error_pipe_state;
1136 
1137    return &dec->base;
1138 
1139 error_pipe_state:
1140    vl_mc_cleanup(&dec->mc_c);
1141 
1142 error_mc_c:
1143    vl_mc_cleanup(&dec->mc_y);
1144 
1145 error_mc_y:
1146    if (entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) {
1147       vl_idct_cleanup(&dec->idct_y);
1148       vl_idct_cleanup(&dec->idct_c);
1149       dec->idct_source->destroy(dec->idct_source);
1150    }
1151    dec->mc_source->destroy(dec->mc_source);
1152 
1153 error_sources:
1154    vl_zscan_cleanup(&dec->zscan_y);
1155    vl_zscan_cleanup(&dec->zscan_c);
1156 
1157 error_zscan:
1158    FREE(dec);
1159    return NULL;
1160 }
1161