1 #ifndef _UAPI_MSM_KGSL_H
2 #define _UAPI_MSM_KGSL_H
3 
4 /*
5  * The KGSL version has proven not to be very useful in userspace if features
6  * are cherry picked into other trees out of order so it is frozen as of 3.14.
7  * It is left here for backwards compatabilty and as a reminder that
8  * software releases are never linear. Also, I like pie.
9  */
10 
11 #define KGSL_VERSION_MAJOR        3
12 #define KGSL_VERSION_MINOR        14
13 
14 /*
15  * We have traditionally mixed context and issueibcmds / command batch flags
16  * together into a big flag stew. This worked fine until we started adding a
17  * lot more command batch flags and we started running out of bits. Turns out
18  * we have a bit of room in the context type / priority mask that we could use
19  * for command batches, but that means we need to split out the flags into two
20  * coherent sets.
21  *
22  * If any future definitions are for both context and cmdbatch add both defines
23  * and link the cmdbatch to the context define as we do below. Otherwise feel
24  * free to add exclusive bits to either set.
25  */
26 
27 /* --- context flags --- */
28 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
29 #define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
30 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
31 #define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
32 #define KGSL_CONTEXT_CTX_SWITCH		0x00000008
33 #define KGSL_CONTEXT_PREAMBLE		0x00000010
34 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
35 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
36 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
37 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
38 #define KGSL_CONTEXT_END_OF_FRAME	0x00000100
39 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
40 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41 #define KGSL_CONTEXT_SYNC               0x00000400
42 #define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
43 
44 #define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
45 #define KGSL_CONTEXT_PRIORITY_SHIFT     12
46 #define KGSL_CONTEXT_PRIORITY_UNDEF     0
47 
48 #define KGSL_CONTEXT_IFH_NOP            0x00010000
49 #define KGSL_CONTEXT_SECURE             0x00020000
50 
51 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
52 #define KGSL_CONTEXT_TYPE_SHIFT         20
53 #define KGSL_CONTEXT_TYPE_ANY		0
54 #define KGSL_CONTEXT_TYPE_GL		1
55 #define KGSL_CONTEXT_TYPE_CL		2
56 #define KGSL_CONTEXT_TYPE_C2D		3
57 #define KGSL_CONTEXT_TYPE_RS		4
58 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
59 
60 #define KGSL_CONTEXT_INVALID 0xffffffff
61 
62 /*
63  * --- command batch flags ---
64  * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
65  * definitions or bits that are valid for both contexts and cmdbatches.  To be
66  * safe the other 8 bits that are still available in the context field should be
67  * omitted here in case we need to share - the other bits are available for
68  * cmdbatch only flags as needed
69  */
70 #define KGSL_CMDBATCH_MEMLIST		0x00000001
71 #define KGSL_CMDBATCH_MARKER		0x00000002
72 #define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
73 #define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
74 #define KGSL_CMDBATCH_PROFILING		0x00000010
75 #define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
76 #define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
77 #define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
78 
79 /*
80  * Reserve bits [16:19] and bits [28:31] for possible bits shared between
81  * contexts and command batches.  Update this comment as new flags are added.
82  */
83 
84 /* --- Memory allocation flags --- */
85 
86 /* General allocation hints */
87 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
88 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
89 
90 /* Memory is secure */
91 #define KGSL_MEMFLAGS_SECURE      0x00000008
92 
93 /* Memory caching hints */
94 #define KGSL_CACHEMODE_MASK 0x0C000000
95 #define KGSL_CACHEMODE_SHIFT 26
96 
97 #define KGSL_CACHEMODE_WRITECOMBINE 0
98 #define KGSL_CACHEMODE_UNCACHED 1
99 #define KGSL_CACHEMODE_WRITETHROUGH 2
100 #define KGSL_CACHEMODE_WRITEBACK 3
101 
102 /* Memory types for which allocations are made */
103 #define KGSL_MEMTYPE_MASK		0x0000FF00
104 #define KGSL_MEMTYPE_SHIFT		8
105 
106 #define KGSL_MEMTYPE_OBJECTANY			0
107 #define KGSL_MEMTYPE_FRAMEBUFFER		1
108 #define KGSL_MEMTYPE_RENDERBUFFER		2
109 #define KGSL_MEMTYPE_ARRAYBUFFER		3
110 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
111 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
112 #define KGSL_MEMTYPE_TEXTURE			6
113 #define KGSL_MEMTYPE_SURFACE			7
114 #define KGSL_MEMTYPE_EGL_SURFACE		8
115 #define KGSL_MEMTYPE_GL				9
116 #define KGSL_MEMTYPE_CL				10
117 #define KGSL_MEMTYPE_CL_BUFFER_MAP		11
118 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
119 #define KGSL_MEMTYPE_CL_IMAGE_MAP		13
120 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
121 #define KGSL_MEMTYPE_CL_KERNEL_STACK		15
122 #define KGSL_MEMTYPE_COMMAND			16
123 #define KGSL_MEMTYPE_2D				17
124 #define KGSL_MEMTYPE_EGL_IMAGE			18
125 #define KGSL_MEMTYPE_EGL_SHADOW			19
126 #define KGSL_MEMTYPE_MULTISAMPLE		20
127 #define KGSL_MEMTYPE_KERNEL			255
128 
129 /*
130  * Alignment hint, passed as the power of 2 exponent.
131  * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
132  */
133 #define KGSL_MEMALIGN_MASK		0x00FF0000
134 #define KGSL_MEMALIGN_SHIFT		16
135 
136 enum kgsl_user_mem_type {
137 	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
138 	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
139 	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
140 	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
141 	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
142 };
143 #define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
144 #define KGSL_MEMFLAGS_USERMEM_SHIFT 5
145 
146 /*
147  * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
148  * leave a good value for allocated memory. In the flags we use
149  * 0 to indicate allocated memory and thus need to add 1 to the enum
150  * values.
151  */
152 #define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
153 
154 #define KGSL_MEMFLAGS_NOT_USERMEM 0
155 #define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
156 #define KGSL_MEMFLAGS_USERMEM_ASHMEM \
157 		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
158 #define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
159 #define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
160 
161 /* --- generic KGSL flag values --- */
162 
163 #define KGSL_FLAGS_NORMALMODE  0x00000000
164 #define KGSL_FLAGS_SAFEMODE    0x00000001
165 #define KGSL_FLAGS_INITIALIZED0 0x00000002
166 #define KGSL_FLAGS_INITIALIZED 0x00000004
167 #define KGSL_FLAGS_STARTED     0x00000008
168 #define KGSL_FLAGS_ACTIVE      0x00000010
169 #define KGSL_FLAGS_RESERVED0   0x00000020
170 #define KGSL_FLAGS_RESERVED1   0x00000040
171 #define KGSL_FLAGS_RESERVED2   0x00000080
172 #define KGSL_FLAGS_SOFT_RESET  0x00000100
173 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
174 
175 /* Clock flags to show which clocks should be controled by a given platform */
176 #define KGSL_CLK_SRC	0x00000001
177 #define KGSL_CLK_CORE	0x00000002
178 #define KGSL_CLK_IFACE	0x00000004
179 #define KGSL_CLK_MEM	0x00000008
180 #define KGSL_CLK_MEM_IFACE 0x00000010
181 #define KGSL_CLK_AXI	0x00000020
182 
183 /* Server Side Sync Timeout in milliseconds */
184 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
185 
186 /*
187  * Reset status values for context
188  */
189 enum kgsl_ctx_reset_stat {
190 	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
191 	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
192 	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
193 	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
194 };
195 
196 #define KGSL_CONVERT_TO_MBPS(val) \
197 	(val*1000*1000U)
198 
199 /* device id */
200 enum kgsl_deviceid {
201 	KGSL_DEVICE_3D0		= 0x00000000,
202 	KGSL_DEVICE_2D0		= 0x00000001,
203 	KGSL_DEVICE_2D1		= 0x00000002,
204 	KGSL_DEVICE_MAX		= 0x00000003
205 };
206 
207 struct kgsl_devinfo {
208 
209 	unsigned int device_id;
210 	/* chip revision id
211 	* coreid:8 majorrev:8 minorrev:8 patch:8
212 	*/
213 	unsigned int chip_id;
214 	unsigned int mmu_enabled;
215 	unsigned long gmem_gpubaseaddr;
216 	/*
217 	* This field contains the adreno revision
218 	* number 200, 205, 220, etc...
219 	*/
220 	unsigned int gpu_id;
221 	size_t gmem_sizebytes;
222 };
223 
224 /* this structure defines the region of memory that can be mmap()ed from this
225    driver. The timestamp fields are volatile because they are written by the
226    GPU
227 */
228 struct kgsl_devmemstore {
229 	volatile unsigned int soptimestamp;
230 	unsigned int sbz;
231 	volatile unsigned int eoptimestamp;
232 	unsigned int sbz2;
233 	volatile unsigned int ts_cmp_enable;
234 	unsigned int sbz3;
235 	volatile unsigned int ref_wait_ts;
236 	unsigned int sbz4;
237 	unsigned int current_context;
238 	unsigned int sbz5;
239 };
240 
241 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
242 	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
243 	 offsetof(struct kgsl_devmemstore, field))
244 
245 /* timestamp id*/
246 enum kgsl_timestamp_type {
247 	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
248 	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
249 	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
250 };
251 
252 /* property types - used with kgsl_device_getproperty */
253 enum kgsl_property_type {
254 	KGSL_PROP_DEVICE_INFO     = 0x00000001,
255 	KGSL_PROP_DEVICE_SHADOW   = 0x00000002,
256 	KGSL_PROP_DEVICE_POWER    = 0x00000003,
257 	KGSL_PROP_SHMEM           = 0x00000004,
258 	KGSL_PROP_SHMEM_APERTURES = 0x00000005,
259 	KGSL_PROP_MMU_ENABLE 	  = 0x00000006,
260 	KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
261 	KGSL_PROP_VERSION         = 0x00000008,
262 	KGSL_PROP_GPU_RESET_STAT  = 0x00000009,
263 	KGSL_PROP_PWRCTRL         = 0x0000000E,
264 	KGSL_PROP_PWR_CONSTRAINT  = 0x00000012,
265 };
266 
267 struct kgsl_shadowprop {
268 	unsigned long gpuaddr;
269 	size_t size;
270 	unsigned int flags; /* contains KGSL_FLAGS_ values */
271 };
272 
273 struct kgsl_version {
274 	unsigned int drv_major;
275 	unsigned int drv_minor;
276 	unsigned int dev_major;
277 	unsigned int dev_minor;
278 };
279 
280 /* Performance counter groups */
281 
282 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
283 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
284 #define KGSL_PERFCOUNTER_GROUP_PC 0x2
285 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3
286 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
287 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5
288 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6
289 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7
290 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
291 #define KGSL_PERFCOUNTER_GROUP_TP 0x9
292 #define KGSL_PERFCOUNTER_GROUP_SP 0xA
293 #define KGSL_PERFCOUNTER_GROUP_RB 0xB
294 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC
295 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
296 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
297 #define KGSL_PERFCOUNTER_GROUP_MH 0xF
298 #define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
299 #define KGSL_PERFCOUNTER_GROUP_SQ 0x11
300 #define KGSL_PERFCOUNTER_GROUP_SX 0x12
301 #define KGSL_PERFCOUNTER_GROUP_TCF 0x13
302 #define KGSL_PERFCOUNTER_GROUP_TCM 0x14
303 #define KGSL_PERFCOUNTER_GROUP_TCR 0x15
304 #define KGSL_PERFCOUNTER_GROUP_L2 0x16
305 #define KGSL_PERFCOUNTER_GROUP_VSC 0x17
306 #define KGSL_PERFCOUNTER_GROUP_CCU 0x18
307 #define KGSL_PERFCOUNTER_GROUP_MAX 0x19
308 
309 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
310 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
311 
312 /* structure holds list of ibs */
313 struct kgsl_ibdesc {
314 	unsigned long gpuaddr;
315 	unsigned long __pad;
316 	size_t sizedwords;
317 	unsigned int ctrl;
318 };
319 
320 /**
321  * struct kgsl_cmdbatch_profiling_buffer
322  * @wall_clock_s: Wall clock at ringbuffer submission time (seconds)
323  * @wall_clock_ns: Wall clock at ringbuffer submission time (nanoseconds)
324  * @gpu_ticks_queued: GPU ticks at ringbuffer submission
325  * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
326  * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
327  *
328  * This structure defines the profiling buffer used to measure cmdbatch
329  * execution time
330  */
331 struct kgsl_cmdbatch_profiling_buffer {
332 	uint64_t wall_clock_s;
333 	uint64_t wall_clock_ns;
334 	uint64_t gpu_ticks_queued;
335 	uint64_t gpu_ticks_submitted;
336 	uint64_t gpu_ticks_retired;
337 };
338 
339 /* ioctls */
340 #define KGSL_IOC_TYPE 0x09
341 
342 /* get misc info about the GPU
343    type should be a value from enum kgsl_property_type
344    value points to a structure that varies based on type
345    sizebytes is sizeof() that structure
346    for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
347    this structure contaings hardware versioning info.
348    for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
349    this is used to find mmap() offset and sizes for mapping
350    struct kgsl_memstore into userspace.
351 */
352 struct kgsl_device_getproperty {
353 	unsigned int type;
354 	void __user *value;
355 	size_t sizebytes;
356 };
357 
358 #define IOCTL_KGSL_DEVICE_GETPROPERTY \
359 	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
360 
361 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
362  */
363 
364 /* block until the GPU has executed past a given timestamp
365  * timeout is in milliseconds.
366  */
367 struct kgsl_device_waittimestamp {
368 	unsigned int timestamp;
369 	unsigned int timeout;
370 };
371 
372 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
373 	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
374 
375 struct kgsl_device_waittimestamp_ctxtid {
376 	unsigned int context_id;
377 	unsigned int timestamp;
378 	unsigned int timeout;
379 };
380 
381 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
382 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
383 
384 /* DEPRECATED: issue indirect commands to the GPU.
385  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
386  * ibaddr and sizedwords must specify a subset of a buffer created
387  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
388  * flags may be a mask of KGSL_CONTEXT_ values
389  * timestamp is a returned counter value which can be passed to
390  * other ioctls to determine when the commands have been executed by
391  * the GPU.
392  *
393  * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
394  * instead
395  */
396 struct kgsl_ringbuffer_issueibcmds {
397 	unsigned int drawctxt_id;
398 	unsigned long ibdesc_addr;
399 	unsigned int numibs;
400 	unsigned int timestamp; /*output param */
401 	unsigned int flags;
402 };
403 
404 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
405 	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
406 
407 /* read the most recently executed timestamp value
408  * type should be a value from enum kgsl_timestamp_type
409  */
410 struct kgsl_cmdstream_readtimestamp {
411 	unsigned int type;
412 	unsigned int timestamp; /*output param */
413 };
414 
415 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
416 	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
417 
418 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
419 	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
420 
421 /* free memory when the GPU reaches a given timestamp.
422  * gpuaddr specify a memory region created by a
423  * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
424  * type should be a value from enum kgsl_timestamp_type
425  */
426 struct kgsl_cmdstream_freememontimestamp {
427 	unsigned long gpuaddr;
428 	unsigned int type;
429 	unsigned int timestamp;
430 };
431 
432 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
433 	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
434 
435 /* Previous versions of this header had incorrectly defined
436    IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
437    of a write only ioctl.  To ensure binary compatability, the following
438    #define will be used to intercept the incorrect ioctl
439 */
440 
441 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
442 	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
443 
444 /* create a draw context, which is used to preserve GPU state.
445  * The flags field may contain a mask KGSL_CONTEXT_*  values
446  */
447 struct kgsl_drawctxt_create {
448 	unsigned int flags;
449 	unsigned int drawctxt_id; /*output param */
450 };
451 
452 #define IOCTL_KGSL_DRAWCTXT_CREATE \
453 	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
454 
455 /* destroy a draw context */
456 struct kgsl_drawctxt_destroy {
457 	unsigned int drawctxt_id;
458 };
459 
460 #define IOCTL_KGSL_DRAWCTXT_DESTROY \
461 	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
462 
463 /* add a block of pmem, fb, ashmem or user allocated address
464  * into the GPU address space */
465 struct kgsl_map_user_mem {
466 	int fd;
467 	unsigned long gpuaddr;   /*output param */
468 	size_t len;
469 	size_t offset;
470 	unsigned long hostptr;   /*input param */
471 	enum kgsl_user_mem_type memtype;
472 	unsigned int flags;
473 };
474 
475 #define IOCTL_KGSL_MAP_USER_MEM \
476 	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
477 
478 struct kgsl_cmdstream_readtimestamp_ctxtid {
479 	unsigned int context_id;
480 	unsigned int type;
481 	unsigned int timestamp; /*output param */
482 };
483 
484 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
485 	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
486 
487 struct kgsl_cmdstream_freememontimestamp_ctxtid {
488 	unsigned int context_id;
489 	unsigned long gpuaddr;
490 	unsigned int type;
491 	unsigned int timestamp;
492 };
493 
494 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
495 	_IOW(KGSL_IOC_TYPE, 0x17, \
496 	struct kgsl_cmdstream_freememontimestamp_ctxtid)
497 
498 /* add a block of pmem or fb into the GPU address space */
499 struct kgsl_sharedmem_from_pmem {
500 	int pmem_fd;
501 	unsigned long gpuaddr;	/*output param */
502 	unsigned int len;
503 	unsigned int offset;
504 };
505 
506 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
507 	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
508 
509 /* remove memory from the GPU's address space */
510 struct kgsl_sharedmem_free {
511 	unsigned long gpuaddr;
512 };
513 
514 #define IOCTL_KGSL_SHAREDMEM_FREE \
515 	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
516 
517 struct kgsl_cff_user_event {
518 	unsigned char cff_opcode;
519 	unsigned int op1;
520 	unsigned int op2;
521 	unsigned int op3;
522 	unsigned int op4;
523 	unsigned int op5;
524 	unsigned int __pad[2];
525 };
526 
527 #define IOCTL_KGSL_CFF_USER_EVENT \
528 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
529 
530 struct kgsl_gmem_desc {
531 	unsigned int x;
532 	unsigned int y;
533 	unsigned int width;
534 	unsigned int height;
535 	unsigned int pitch;
536 };
537 
538 struct kgsl_buffer_desc {
539 	void 			*hostptr;
540 	unsigned long	gpuaddr;
541 	int				size;
542 	unsigned int	format;
543 	unsigned int  	pitch;
544 	unsigned int  	enabled;
545 };
546 
547 struct kgsl_bind_gmem_shadow {
548 	unsigned int drawctxt_id;
549 	struct kgsl_gmem_desc gmem_desc;
550 	unsigned int shadow_x;
551 	unsigned int shadow_y;
552 	struct kgsl_buffer_desc shadow_buffer;
553 	unsigned int buffer_id;
554 };
555 
556 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
557     _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
558 
559 /* add a block of memory into the GPU address space */
560 
561 /*
562  * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
563  * use IOCTL_KGSL_GPUMEM_ALLOC instead
564  */
565 
566 struct kgsl_sharedmem_from_vmalloc {
567 	unsigned long gpuaddr;	/*output param */
568 	unsigned int hostptr;
569 	unsigned int flags;
570 };
571 
572 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
573 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
574 
575 /*
576  * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
577  * supports both directions (flush and invalidate). This code will still
578  * work, but by definition it will do a flush of the cache which might not be
579  * what you want to have happen on a buffer following a GPU operation.  It is
580  * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
581  */
582 
583 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
584 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
585 
586 struct kgsl_drawctxt_set_bin_base_offset {
587 	unsigned int drawctxt_id;
588 	unsigned int offset;
589 };
590 
591 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
592 	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
593 
594 enum kgsl_cmdwindow_type {
595 	KGSL_CMDWINDOW_MIN     = 0x00000000,
596 	KGSL_CMDWINDOW_2D      = 0x00000000,
597 	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
598 	KGSL_CMDWINDOW_MMU     = 0x00000002,
599 	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
600 	KGSL_CMDWINDOW_MAX     = 0x000000FF,
601 };
602 
603 /* write to the command window */
604 struct kgsl_cmdwindow_write {
605 	enum kgsl_cmdwindow_type target;
606 	unsigned int addr;
607 	unsigned int data;
608 };
609 
610 #define IOCTL_KGSL_CMDWINDOW_WRITE \
611 	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
612 
613 struct kgsl_gpumem_alloc {
614 	unsigned long gpuaddr; /* output param */
615 	size_t size;
616 	unsigned int flags;
617 };
618 
619 #define IOCTL_KGSL_GPUMEM_ALLOC \
620 	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
621 
622 struct kgsl_cff_syncmem {
623 	unsigned long gpuaddr;
624 	size_t len;
625 	unsigned int __pad[2]; /* For future binary compatibility */
626 };
627 
628 #define IOCTL_KGSL_CFF_SYNCMEM \
629 	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
630 
631 /*
632  * A timestamp event allows the user space to register an action following an
633  * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
634  * _IOWR to support fences which need to return a fd for the priv parameter.
635  */
636 
637 struct kgsl_timestamp_event {
638 	int type;                /* Type of event (see list below) */
639 	unsigned int timestamp;  /* Timestamp to trigger event on */
640 	unsigned int context_id; /* Context for the timestamp */
641 	void __user *priv;	 /* Pointer to the event specific blob */
642 	size_t len;              /* Size of the event specific blob */
643 };
644 
645 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
646 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
647 
648 /* A genlock timestamp event releases an existing lock on timestamp expire */
649 
650 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1
651 
652 struct kgsl_timestamp_event_genlock {
653 	int handle; /* Handle of the genlock lock to release */
654 };
655 
656 /* A fence timestamp event releases an existing lock on timestamp expire */
657 
658 #define KGSL_TIMESTAMP_EVENT_FENCE 2
659 
660 struct kgsl_timestamp_event_fence {
661 	int fence_fd; /* Fence to signal */
662 };
663 
664 /*
665  * Set a property within the kernel.  Uses the same structure as
666  * IOCTL_KGSL_GETPROPERTY
667  */
668 
669 #define IOCTL_KGSL_SETPROPERTY \
670 	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
671 
672 #define IOCTL_KGSL_TIMESTAMP_EVENT \
673 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
674 
675 /**
676  * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
677  * @id: returned id value for this allocation.
678  * @flags: mask of KGSL_MEM* values requested and actual flags on return.
679  * @size: requested size of the allocation and actual size on return.
680  * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
681  * @gpuaddr: returned GPU address for the allocation
682  *
683  * Allocate memory for access by the GPU. The flags and size fields are echoed
684  * back by the kernel, so that the caller can know if the request was
685  * adjusted.
686  *
687  * Supported flags:
688  * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
689  * KGSL_MEMTYPE*: usage hint for debugging aid
690  * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
691  * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
692  * address will be 0. Calling mmap() will set the GPU address.
693  */
694 struct kgsl_gpumem_alloc_id {
695 	unsigned int id;
696 	unsigned int flags;
697 	size_t size;
698 	size_t mmapsize;
699 	unsigned long gpuaddr;
700 /* private: reserved for future use*/
701 	unsigned long __pad[2];
702 };
703 
704 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \
705 	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
706 
707 /**
708  * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
709  * @id: GPU allocation id to free
710  *
711  * Free an allocation by id, in case a GPU address has not been assigned or
712  * is unknown. Freeing an allocation by id with this ioctl or by GPU address
713  * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
714  */
715 struct kgsl_gpumem_free_id {
716 	unsigned int id;
717 /* private: reserved for future use*/
718 	unsigned int __pad;
719 };
720 
721 #define IOCTL_KGSL_GPUMEM_FREE_ID \
722 	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
723 
724 /**
725  * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
726  * @gpuaddr: GPU address to query. Also set on return.
727  * @id: GPU allocation id to query. Also set on return.
728  * @flags: returned mask of KGSL_MEM* values.
729  * @size: returned size of the allocation.
730  * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
731  * @useraddr: returned address of the userspace mapping for this buffer
732  *
733  * This ioctl allows querying of all user visible attributes of an existing
734  * allocation, by either the GPU address or the id returned by a previous
735  * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
736  * return all attributes so this ioctl can be used to look them up if needed.
737  *
738  */
739 struct kgsl_gpumem_get_info {
740 	unsigned long gpuaddr;
741 	unsigned int id;
742 	unsigned int flags;
743 	size_t size;
744 	size_t mmapsize;
745 	unsigned long useraddr;
746 /* private: reserved for future use*/
747 	unsigned long __pad[4];
748 };
749 
750 #define IOCTL_KGSL_GPUMEM_GET_INFO\
751 	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
752 
753 /**
754  * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
755  * @gpuaddr: GPU address of the buffer to sync.
756  * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
757  * @op: a mask of KGSL_GPUMEM_CACHE_* values
758  * @offset: offset into the buffer
759  * @length: number of bytes starting from offset to perform
760  * the cache operation on
761  *
762  * Sync the L2 cache for memory headed to and from the GPU - this replaces
763  * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
764  * directions
765  *
766  */
767 struct kgsl_gpumem_sync_cache {
768 	unsigned long gpuaddr;
769 	unsigned int id;
770 	unsigned int op;
771 	size_t offset;
772 	size_t length;
773 };
774 
775 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
776 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
777 
778 #define KGSL_GPUMEM_CACHE_INV (1 << 1)
779 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
780 
781 #define KGSL_GPUMEM_CACHE_FLUSH \
782 	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
783 
784 /* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
785 #define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
786 
787 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
788 	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
789 
790 /**
791  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
792  * @groupid: Performance counter group ID
793  * @countable: Countable to select within the group
794  * @offset: Return offset of the reserved LO counter
795  * @offset_hi: Return offset of the reserved HI counter
796  *
797  * Get an available performance counter from a specified groupid.  The offset
798  * of the performance counter will be returned after successfully assigning
799  * the countable to the counter for the specified group.  An error will be
800  * returned and an offset of 0 if the groupid is invalid or there are no
801  * more counters left.  After successfully getting a perfcounter, the user
802  * must call kgsl_perfcounter_put(groupid, contable) when finished with
803  * the perfcounter to clear up perfcounter resources.
804  *
805  */
806 struct kgsl_perfcounter_get {
807 	unsigned int groupid;
808 	unsigned int countable;
809 	unsigned int offset;
810 	unsigned int offset_hi;
811 /* private: reserved for future use */
812 	unsigned int __pad; /* For future binary compatibility */
813 };
814 
815 #define IOCTL_KGSL_PERFCOUNTER_GET \
816 	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
817 
818 /**
819  * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
820  * @groupid: Performance counter group ID
821  * @countable: Countable to release within the group
822  *
823  * Put an allocated performance counter to allow others to have access to the
824  * resource that was previously taken.  This is only to be called after
825  * successfully getting a performance counter from kgsl_perfcounter_get().
826  *
827  */
828 struct kgsl_perfcounter_put {
829 	unsigned int groupid;
830 	unsigned int countable;
831 /* private: reserved for future use */
832 	unsigned int __pad[2]; /* For future binary compatibility */
833 };
834 
835 #define IOCTL_KGSL_PERFCOUNTER_PUT \
836 	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
837 
838 /**
839  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
840  * @groupid: Performance counter group ID
841  * @countable: Return active countables array
842  * @size: Size of active countables array
843  * @max_counters: Return total number counters for the group ID
844  *
845  * Query the available performance counters given a groupid.  The array
846  * *countables is used to return the current active countables in counters.
847  * The size of the array is passed in so the kernel will only write at most
848  * size or counter->size for the group id.  The total number of available
849  * counters for the group ID is returned in max_counters.
850  * If the array or size passed in are invalid, then only the maximum number
851  * of counters will be returned, no data will be written to *countables.
852  * If the groupid is invalid an error code will be returned.
853  *
854  */
855 struct kgsl_perfcounter_query {
856 	unsigned int groupid;
857 	/* Array to return the current countable for up to size counters */
858 	unsigned int __user *countables;
859 	unsigned int count;
860 	unsigned int max_counters;
861 /* private: reserved for future use */
862 	unsigned int __pad[2]; /* For future binary compatibility */
863 };
864 
865 #define IOCTL_KGSL_PERFCOUNTER_QUERY \
866 	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
867 
868 /**
869  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
870  * @groupid: Performance counter group IDs
871  * @countable: Performance counter countable IDs
872  * @value: Return performance counter reads
873  * @size: Size of all arrays (groupid/countable pair and return value)
874  *
875  * Read in the current value of a performance counter given by the groupid
876  * and countable.
877  *
878  */
879 
880 struct kgsl_perfcounter_read_group {
881 	unsigned int groupid;
882 	unsigned int countable;
883 	unsigned long long value;
884 };
885 
886 struct kgsl_perfcounter_read {
887 	struct kgsl_perfcounter_read_group __user *reads;
888 	unsigned int count;
889 /* private: reserved for future use */
890 	unsigned int __pad[2]; /* For future binary compatibility */
891 };
892 
893 #define IOCTL_KGSL_PERFCOUNTER_READ \
894 	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
895 /*
896  * struct kgsl_gpumem_sync_cache_bulk - argument to
897  * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
898  * @id_list: list of GPU buffer ids of the buffers to sync
899  * @count: number of GPU buffer ids in id_list
900  * @op: a mask of KGSL_GPUMEM_CACHE_* values
901  *
902  * Sync the cache for memory headed to and from the GPU. Certain
903  * optimizations can be made on the cache operation based on the total
904  * size of the working set of memory to be managed.
905  */
906 struct kgsl_gpumem_sync_cache_bulk {
907 	unsigned int __user *id_list;
908 	unsigned int count;
909 	unsigned int op;
910 /* private: reserved for future use */
911 	unsigned int __pad[2]; /* For future binary compatibility */
912 };
913 
914 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
915 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
916 
917 /*
918  * struct kgsl_cmd_syncpoint_timestamp
919  * @context_id: ID of a KGSL context
920  * @timestamp: GPU timestamp
921  *
922  * This structure defines a syncpoint comprising a context/timestamp pair. A
923  * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
924  * dependencies that must be met before the command can be submitted to the
925  * hardware
926  */
927 struct kgsl_cmd_syncpoint_timestamp {
928 	unsigned int context_id;
929 	unsigned int timestamp;
930 };
931 
932 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
933 
934 struct kgsl_cmd_syncpoint_fence {
935 	int fd;
936 };
937 
938 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
939 
940 /**
941  * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
942  * @type: type of sync point defined here
943  * @priv: Pointer to the type specific buffer
944  * @size: Size of the type specific buffer
945  *
946  * This structure contains pointers defining a specific command sync point.
947  * The pointer and size should point to a type appropriate structure.
948  */
949 struct kgsl_cmd_syncpoint {
950 	int type;
951 	void __user *priv;
952 	size_t size;
953 };
954 
955 /* Flag to indicate that the cmdlist may contain memlists */
956 #define KGSL_IBDESC_MEMLIST 0x1
957 
958 /* Flag to point out the cmdbatch profiling buffer in the memlist */
959 #define KGSL_IBDESC_PROFILING_BUFFER 0x2
960 
961 /**
962  * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
963  * @context_id: KGSL context ID that owns the commands
964  * @flags:
965  * @cmdlist: User pointer to a list of kgsl_ibdesc structures
966  * @numcmds: Number of commands listed in cmdlist
967  * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
968  * @numsyncs: Number of sync points listed in synclist
969  * @timestamp: On entry the a user defined timestamp, on exist the timestamp
970  * assigned to the command batch
971  *
972  * This structure specifies a command to send to the GPU hardware.  This is
973  * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
974  * submit IB lists and it adds sync points to block the IB until the
975  * dependencies are satisified.  This entry point is the new and preferred way
976  * to submit commands to the GPU. The memory list can be used to specify all
977  * memory that is referrenced in the current set of commands.
978  */
979 
980 struct kgsl_submit_commands {
981 	unsigned int context_id;
982 	unsigned int flags;
983 	struct kgsl_ibdesc __user *cmdlist;
984 	unsigned int numcmds;
985 	struct kgsl_cmd_syncpoint __user *synclist;
986 	unsigned int numsyncs;
987 	unsigned int timestamp;
988 /* private: reserved for future use */
989 	unsigned int __pad[4];
990 };
991 
992 #define IOCTL_KGSL_SUBMIT_COMMANDS \
993 	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
994 
995 /**
996  * struct kgsl_device_constraint - device constraint argument
997  * @context_id: KGSL context ID
998  * @type: type of constraint i.e pwrlevel/none
999  * @data: constraint data
1000  * @size: size of the constraint data
1001  */
1002 struct kgsl_device_constraint {
1003 	unsigned int type;
1004 	unsigned int context_id;
1005 	void __user *data;
1006 	size_t size;
1007 };
1008 
1009 /* Constraint Type*/
1010 #define KGSL_CONSTRAINT_NONE 0
1011 #define KGSL_CONSTRAINT_PWRLEVEL 1
1012 
1013 /* PWRLEVEL constraint level*/
1014 /* set to min frequency */
1015 #define KGSL_CONSTRAINT_PWR_MIN    0
1016 /* set to max frequency */
1017 #define KGSL_CONSTRAINT_PWR_MAX    1
1018 
1019 struct kgsl_device_constraint_pwrlevel {
1020 	unsigned int level;
1021 };
1022 
1023 /**
1024  * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1025  * @id: returned id for the syncsource that was created.
1026  *
1027  * This ioctl creates a userspace sync timeline.
1028  */
1029 
1030 struct kgsl_syncsource_create {
1031 	unsigned int id;
1032 /* private: reserved for future use */
1033 	unsigned int __pad[3];
1034 };
1035 
1036 #define IOCTL_KGSL_SYNCSOURCE_CREATE \
1037 	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1038 
1039 /**
1040  * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1041  * @id: syncsource id to destroy
1042  *
1043  * This ioctl creates a userspace sync timeline.
1044  */
1045 
1046 struct kgsl_syncsource_destroy {
1047 	unsigned int id;
1048 /* private: reserved for future use */
1049 	unsigned int __pad[3];
1050 };
1051 
1052 #define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1053 	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1054 
1055 /**
1056  * struct kgsl_syncsource_create_fence - Argument to
1057  *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1058  * @id: syncsource id
1059  * @fence_fd: returned sync_fence fd
1060  *
1061  * Create a fence that may be signaled by userspace by calling
1062  * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1063  * these fences.
1064  */
1065 struct kgsl_syncsource_create_fence {
1066 	unsigned int id;
1067 	int fence_fd;
1068 /* private: reserved for future use */
1069 	unsigned int __pad[4];
1070 };
1071 
1072 /**
1073  * struct kgsl_syncsource_signal_fence - Argument to
1074  *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1075  * @id: syncsource id
1076  * @fence_fd: sync_fence fd to signal
1077  *
1078  * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1079  * call using the same syncsource id. This allows a fence to be shared
1080  * to other processes but only signaled by the process owning the fd
1081  * used to create the fence.
1082  */
1083 #define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1084 	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1085 
1086 struct kgsl_syncsource_signal_fence {
1087 	unsigned int id;
1088 	int fence_fd;
1089 /* private: reserved for future use */
1090 	unsigned int __pad[4];
1091 };
1092 
1093 #define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1094 	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1095 
1096 #endif /* _UAPI_MSM_KGSL_H */
1097