1 /*
2  * This file is auto-generated. Modifications will be lost.
3  *
4  * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5  * for more information.
6  */
7 #ifndef __LINUX_VIDEODEV2_EXYNOS_MEDIA_H
8 #define __LINUX_VIDEODEV2_EXYNOS_MEDIA_H
9 #include <linux/videodev2.h>
10 #define V4L2_CID_EXYNOS_BASE (V4L2_CTRL_CLASS_USER | 0x2000)
11 #define V4L2_CID_CACHEABLE (V4L2_CID_EXYNOS_BASE + 10)
12 #define V4L2_CID_CSC_EQ_MODE (V4L2_CID_EXYNOS_BASE + 100)
13 #define V4L2_CID_CSC_EQ (V4L2_CID_EXYNOS_BASE + 101)
14 #define V4L2_CID_CSC_RANGE (V4L2_CID_EXYNOS_BASE + 102)
15 #define V4L2_CID_CONTENT_PROTECTION (V4L2_CID_EXYNOS_BASE + 201)
16 #define V4L2_PIX_FMT_NV12N v4l2_fourcc('N', 'N', '1', '2')
17 #define V4L2_PIX_FMT_NV12NT v4l2_fourcc('T', 'N', '1', '2')
18 #define V4L2_PIX_FMT_YUV420N v4l2_fourcc('Y', 'N', '1', '2')
19 #define V4L2_PIX_FMT_NV12N_10B v4l2_fourcc('B', 'N', '1', '2')
20 #define V4L2_PIX_FMT_NV12M_S10B v4l2_fourcc('B', 'M', '1', '2')
21 #define V4L2_PIX_FMT_NV21M_S10B v4l2_fourcc('B', 'M', '2', '1')
22 #define V4L2_PIX_FMT_NV16M_S10B v4l2_fourcc('B', 'M', '1', '6')
23 #define V4L2_PIX_FMT_NV61M_S10B v4l2_fourcc('B', 'M', '6', '1')
24 #define V4L2_PIX_FMT_NV12M_P010 v4l2_fourcc('P', 'M', '1', '2')
25 #define V4L2_PIX_FMT_NV21M_P010 v4l2_fourcc('P', 'M', '2', '1')
26 #define V4L2_PIX_FMT_NV16M_P210 v4l2_fourcc('P', 'M', '1', '6')
27 #define V4L2_PIX_FMT_NV61M_P210 v4l2_fourcc('P', 'M', '6', '1')
28 #define V4L2_PIX_FMT_NV12N_P010 v4l2_fourcc('N', 'P', '1', '2')
29 #define V4L2_PIX_FMT_NV12_P010 v4l2_fourcc('P', 'N', '1', '2')
30 #define V4L2_PIX_FMT_NV12M_SBWC_8B v4l2_fourcc('M', '1', 'S', '8')
31 #define V4L2_PIX_FMT_NV12M_SBWC_10B v4l2_fourcc('M', '1', 'S', '1')
32 #define V4L2_PIX_FMT_NV21M_SBWC_8B v4l2_fourcc('M', '2', 'S', '8')
33 #define V4L2_PIX_FMT_NV21M_SBWC_10B v4l2_fourcc('M', '2', 'S', '1')
34 #define V4L2_PIX_FMT_NV12N_SBWC_8B v4l2_fourcc('N', '1', 'S', '8')
35 #define V4L2_PIX_FMT_NV12N_SBWC_10B v4l2_fourcc('N', '1', 'S', '1')
36 #define V4L2_PIX_FMT_NV12M_SBWCL_8B v4l2_fourcc('M', '1', 'L', '8')
37 #define V4L2_PIX_FMT_NV12M_SBWCL_10B v4l2_fourcc('M', '1', 'L', '1')
38 #define V4L2_PIX_FMT_NV12N_SBWCL_8B v4l2_fourcc('N', '1', 'L', '8')
39 #define V4L2_PIX_FMT_NV12N_SBWCL_10B v4l2_fourcc('N', '1', 'L', '1')
40 #define V4L2_PIX_FMT_NV12M_AFBC_8B v4l2_fourcc('M', '1', 'A', '8')
41 #define V4L2_PIX_FMT_NV12M_AFBC_10B v4l2_fourcc('M', '1', 'A', '1')
42 #ifndef __ALIGN_UP
43 #define __ALIGN_UP(x,a) (((x) + ((a) - 1)) & ~((a) - 1))
44 #endif
45 #define NV12N_STRIDE(w) (__ALIGN_UP((w), 64))
46 #define NV12N_Y_SIZE(w,h) (NV12N_STRIDE(w) * __ALIGN_UP((h), 16))
47 #define NV12N_CBCR_SIZE(w,h) (NV12N_STRIDE(w) * __ALIGN_UP((h), 16) / 2)
48 #define NV12N_CBCR_BASE(base,w,h) ((base) + NV12N_Y_SIZE((w), (h)))
49 #define NV12N_10B_Y_8B_SIZE(w,h) (__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
50 #define NV12N_10B_Y_2B_SIZE(w,h) ((__ALIGN_UP((w) / 4, 16) * __ALIGN_UP((h), 16) + 64))
51 #define NV12N_10B_CBCR_8B_SIZE(w,h) \
52 (__ALIGN_UP((__ALIGN_UP((w), 64) * (__ALIGN_UP((h), 16) / 2) + 256), 16))
53 #define NV12N_10B_CBCR_2B_SIZE(w,h) ((__ALIGN_UP((w) / 4, 16) * (__ALIGN_UP((h), 16) / 2) + 64))
54 #define NV12N_10B_CBCR_BASE(base,w,h) ((base) + NV12N_10B_Y_8B_SIZE((w), (h)) + NV12N_10B_Y_2B_SIZE((w), (h)))
55 #define YUV420N_Y_SIZE(w,h) (__ALIGN_UP((w), 16) * __ALIGN_UP((h), 16) + 256)
56 #define YUV420N_CB_SIZE(w,h) \
57 (__ALIGN_UP((__ALIGN_UP((w) / 2, 16) * (__ALIGN_UP((h), 16) / 2) + 256), 16))
58 #define YUV420N_CR_SIZE(w,h) \
59 (__ALIGN_UP((__ALIGN_UP((w) / 2, 16) * (__ALIGN_UP((h), 16) / 2) + 256), 16))
60 #define YUV420N_CB_BASE(base,w,h) ((base) + YUV420N_Y_SIZE((w), (h)))
61 #define YUV420N_CR_BASE(base,w,h) (YUV420N_CB_BASE((base), (w), (h)) + YUV420N_CB_SIZE((w), (h)))
62 #define NV12M_Y_SIZE(w,h) (__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
63 #define NV12M_CBCR_SIZE(w,h) ((__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) / 2) + 256)
64 #define NV12M_Y_2B_SIZE(w,h) (__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) + 256)
65 #define NV12M_CBCR_2B_SIZE(w,h) ((__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) / 2) + 256)
66 #define NV16M_Y_SIZE(w,h) (__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
67 #define NV16M_CBCR_SIZE(w,h) (__ALIGN_UP((w), 64) * __ALIGN_UP((h), 16) + 256)
68 #define NV16M_Y_2B_SIZE(w,h) (__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) + 256)
69 #define NV16M_CBCR_2B_SIZE(w,h) (__ALIGN_UP((w / 4), 16) * __ALIGN_UP((h), 16) + 256)
70 #define S10B_8B_STRIDE(w) (__ALIGN_UP((w), 64))
71 #define S10B_2B_STRIDE(w) (__ALIGN_UP(((w + 3) / 4), 16))
72 #define __COUNT_BLOCKS(x,a) (((x) + ((a) - 1)) / (a))
73 #define SBWC_HEADER_STRIDE_ALIGN 16
74 #define SBWC_PAYLOAD_STRIDE_ALIGN 64
75 #define SBWC_BLOCK_WIDTH 32
76 #define SBWC_BLOCK_HEIGHT 4
77 #define SBWC_ALIGNED_H(h,a) __ALIGN_UP((h), a)
78 #define SBWC_H_BLOCKS(w) __COUNT_BLOCKS((w), SBWC_BLOCK_WIDTH)
79 #define SBWC_8B_STRIDE(w) (__ALIGN_UP((8 / 2) * SBWC_BLOCK_WIDTH, SBWC_PAYLOAD_STRIDE_ALIGN) * SBWC_H_BLOCKS(w))
80 #define SBWC_10B_STRIDE(w) (__ALIGN_UP((10 / 2) * SBWC_BLOCK_WIDTH, SBWC_PAYLOAD_STRIDE_ALIGN) * SBWC_H_BLOCKS(w))
81 #define SBWC_HEADER_STRIDE(w) (__ALIGN_UP(__COUNT_BLOCKS(w, SBWC_BLOCK_WIDTH * 2), SBWC_HEADER_STRIDE_ALIGN))
82 #define SBWC_Y_VSTRIDE_BLOCKS(h,a) __COUNT_BLOCKS(SBWC_ALIGNED_H(h, a), SBWC_BLOCK_HEIGHT)
83 #define SBWC_CBCR_VSTRIDE_BLOCKS(h,a) __COUNT_BLOCKS(SBWC_ALIGNED_H(h, a) / 2, SBWC_BLOCK_HEIGHT)
84 #define SBWC_8B_Y_SIZE(w,h) ((SBWC_8B_STRIDE(w) * SBWC_Y_VSTRIDE_BLOCKS(h, 16)) + 64)
85 #define SBWC_8B_CBCR_SIZE(w,h) ((SBWC_8B_STRIDE(w) * SBWC_CBCR_VSTRIDE_BLOCKS(h, 16)) + 64)
86 #define SBWC_8B_Y_HEADER_SIZE(w,h) ((SBWC_HEADER_STRIDE(w) * SBWC_Y_VSTRIDE_BLOCKS(h, 16)) + 256)
87 #define SBWC_8B_CBCR_HEADER_SIZE(w,h) ((SBWC_HEADER_STRIDE(w) * SBWC_CBCR_VSTRIDE_BLOCKS(h, 16)) + 128)
88 #define SBWC_10B_Y_SIZE(w,h) ((SBWC_10B_STRIDE(w) * SBWC_Y_VSTRIDE_BLOCKS(h, 8)) + 64)
89 #define SBWC_10B_CBCR_SIZE(w,h) ((SBWC_10B_STRIDE(w) * SBWC_CBCR_VSTRIDE_BLOCKS(h, 8)) + 64)
90 #define SBWC_10B_Y_HEADER_SIZE(w,h) ((SBWC_HEADER_STRIDE(w) * SBWC_Y_VSTRIDE_BLOCKS(h, 8)) + 256)
91 #define SBWC_10B_CBCR_HEADER_SIZE(w,h) ((SBWC_HEADER_STRIDE(w) * SBWC_CBCR_VSTRIDE_BLOCKS(h, 8)) + 128)
92 #define SBWC_8B_CBCR_BASE(base,w,h) ((base) + SBWC_8B_Y_SIZE(w, h) + SBWC_8B_Y_HEADER_SIZE(w, h))
93 #define SBWC_10B_CBCR_BASE(base,w,h) ((base) + SBWC_10B_Y_SIZE(w, h) + SBWC_10B_Y_HEADER_SIZE(w, h))
94 #define SBWCL_8B_STRIDE(w,r) (((128 * (r)) / 100) * (((w) + 31) / 32))
95 #define SBWCL_10B_STRIDE(w,r) (((160 * (r)) / 100) * (((w) + 31) / 32))
96 #define SBWCL_8B_Y_SIZE(w,h,r) ((SBWCL_8B_STRIDE(w, r) * ((__ALIGN_UP((h), 16) + 3) / 4)) + 64)
97 #define SBWCL_8B_CBCR_SIZE(w,h,r) ((SBWCL_8B_STRIDE(w, r) * (((__ALIGN_UP((h), 16) / 2) + 3) / 4)) + 64)
98 #define SBWCL_10B_Y_SIZE(w,h,r) ((SBWCL_10B_STRIDE(w, r) * ((__ALIGN_UP((h), 16) + 3) / 4)) + 64)
99 #define SBWCL_10B_CBCR_SIZE(w,h,r) ((SBWCL_10B_STRIDE(w, r) * (((__ALIGN_UP((h), 16) / 2) + 3) / 4)) + 64)
100 #define SBWCL_8B_CBCR_BASE(base,w,h,r) ((base) + SBWCL_8B_Y_SIZE(w, h, r))
101 #define SBWCL_10B_CBCR_BASE(base,w,h,r) ((base) + SBWCL_10B_Y_SIZE(w, h, r))
102 #define AFBC_8B_STRIDE(w) __ALIGN_UP(w, 16)
103 #define AFBC_10B_STRIDE(w) __ALIGN_UP(w * 2, 16)
104 #define AFBC_8B_Y_SIZE(w,h) ((((((w) + 31) / 32) * (((h) + 7) / 8) * 16 + 127) / 128) * 128 + (((w) + 31) / 32) * (((h) + 7) / 8) * 384)
105 #define AFBC_10B_Y_SIZE(w,h) ((((((w) + 31) / 32) * (((h) + 7) / 8) * 16 + 127) / 128) * 128 + (((w) + 31) / 32) * (((h) + 7) / 8) * 512)
106 #endif
107