1 #ifndef _UAPI_MSM_KGSL_H
2 #define _UAPI_MSM_KGSL_H
3 
4 /*
5  * The KGSL version has proven not to be very useful in userspace if features
6  * are cherry picked into other trees out of order so it is frozen as of 3.14.
7  * It is left here for backwards compatabilty and as a reminder that
8  * software releases are never linear. Also, I like pie.
9  */
10 
11 #define KGSL_VERSION_MAJOR        3
12 #define KGSL_VERSION_MINOR        14
13 
14 /*
15  * We have traditionally mixed context and issueibcmds / command batch flags
16  * together into a big flag stew. This worked fine until we started adding a
17  * lot more command batch flags and we started running out of bits. Turns out
18  * we have a bit of room in the context type / priority mask that we could use
19  * for command batches, but that means we need to split out the flags into two
20  * coherent sets.
21  *
22  * If any future definitions are for both context and cmdbatch add both defines
23  * and link the cmdbatch to the context define as we do below. Otherwise feel
24  * free to add exclusive bits to either set.
25  */
26 
27 /* --- context flags --- */
28 #define KGSL_CONTEXT_SAVE_GMEM		0x00000001
29 #define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
30 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
31 #define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
32 #define KGSL_CONTEXT_CTX_SWITCH		0x00000008
33 #define KGSL_CONTEXT_PREAMBLE		0x00000010
34 #define KGSL_CONTEXT_TRASH_STATE	0x00000020
35 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
36 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
37 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
38 #define KGSL_CONTEXT_END_OF_FRAME	0x00000100
39 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
40 /* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41 #define KGSL_CONTEXT_SYNC               0x00000400
42 #define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
43 
44 #define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
45 #define KGSL_CONTEXT_PRIORITY_SHIFT     12
46 #define KGSL_CONTEXT_PRIORITY_UNDEF     0
47 
48 #define KGSL_CONTEXT_IFH_NOP            0x00010000
49 #define KGSL_CONTEXT_SECURE             0x00020000
50 
51 #define KGSL_CONTEXT_TYPE_MASK          0x01F00000
52 #define KGSL_CONTEXT_TYPE_SHIFT         20
53 #define KGSL_CONTEXT_TYPE_ANY		0
54 #define KGSL_CONTEXT_TYPE_GL		1
55 #define KGSL_CONTEXT_TYPE_CL		2
56 #define KGSL_CONTEXT_TYPE_C2D		3
57 #define KGSL_CONTEXT_TYPE_RS		4
58 #define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
59 
60 #define KGSL_CONTEXT_INVALID 0xffffffff
61 
62 /*
63  * --- command batch flags ---
64  * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
65  * definitions or bits that are valid for both contexts and cmdbatches.  To be
66  * safe the other 8 bits that are still available in the context field should be
67  * omitted here in case we need to share - the other bits are available for
68  * cmdbatch only flags as needed
69  */
70 #define KGSL_CMDBATCH_MEMLIST		0x00000001
71 #define KGSL_CMDBATCH_MARKER		0x00000002
72 #define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
73 #define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
74 #define KGSL_CMDBATCH_PROFILING		0x00000010
75 #define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
76 #define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
77 #define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
78 
79 /*
80  * Reserve bits [16:19] and bits [28:31] for possible bits shared between
81  * contexts and command batches.  Update this comment as new flags are added.
82  */
83 
84 /* --- Memory allocation flags --- */
85 
86 /* General allocation hints */
87 #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
88 #define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
89 
90 /* Memory is secure */
91 #define KGSL_MEMFLAGS_SECURE      0x00000008
92 
93 /* Memory caching hints */
94 #define KGSL_CACHEMODE_MASK 0x0C000000
95 #define KGSL_CACHEMODE_SHIFT 26
96 
97 #define KGSL_CACHEMODE_WRITECOMBINE 0
98 #define KGSL_CACHEMODE_UNCACHED 1
99 #define KGSL_CACHEMODE_WRITETHROUGH 2
100 #define KGSL_CACHEMODE_WRITEBACK 3
101 
102 /* Memory types for which allocations are made */
103 #define KGSL_MEMTYPE_MASK		0x0000FF00
104 #define KGSL_MEMTYPE_SHIFT		8
105 
106 #define KGSL_MEMTYPE_OBJECTANY			0
107 #define KGSL_MEMTYPE_FRAMEBUFFER		1
108 #define KGSL_MEMTYPE_RENDERBUFFER		2
109 #define KGSL_MEMTYPE_ARRAYBUFFER		3
110 #define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
111 #define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
112 #define KGSL_MEMTYPE_TEXTURE			6
113 #define KGSL_MEMTYPE_SURFACE			7
114 #define KGSL_MEMTYPE_EGL_SURFACE		8
115 #define KGSL_MEMTYPE_GL				9
116 #define KGSL_MEMTYPE_CL				10
117 #define KGSL_MEMTYPE_CL_BUFFER_MAP		11
118 #define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
119 #define KGSL_MEMTYPE_CL_IMAGE_MAP		13
120 #define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
121 #define KGSL_MEMTYPE_CL_KERNEL_STACK		15
122 #define KGSL_MEMTYPE_COMMAND			16
123 #define KGSL_MEMTYPE_2D				17
124 #define KGSL_MEMTYPE_EGL_IMAGE			18
125 #define KGSL_MEMTYPE_EGL_SHADOW			19
126 #define KGSL_MEMTYPE_MULTISAMPLE		20
127 #define KGSL_MEMTYPE_KERNEL			255
128 
129 /*
130  * Alignment hint, passed as the power of 2 exponent.
131  * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
132  */
133 #define KGSL_MEMALIGN_MASK		0x00FF0000
134 #define KGSL_MEMALIGN_SHIFT		16
135 
136 enum kgsl_user_mem_type {
137 	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
138 	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
139 	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
140 	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
141 	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
142 };
143 #define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
144 #define KGSL_MEMFLAGS_USERMEM_SHIFT 5
145 
146 /*
147  * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
148  * leave a good value for allocated memory. In the flags we use
149  * 0 to indicate allocated memory and thus need to add 1 to the enum
150  * values.
151  */
152 #define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
153 
154 #define KGSL_MEMFLAGS_NOT_USERMEM 0
155 #define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
156 #define KGSL_MEMFLAGS_USERMEM_ASHMEM \
157 		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
158 #define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
159 #define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
160 
161 /* --- generic KGSL flag values --- */
162 
163 #define KGSL_FLAGS_NORMALMODE  0x00000000
164 #define KGSL_FLAGS_SAFEMODE    0x00000001
165 #define KGSL_FLAGS_INITIALIZED0 0x00000002
166 #define KGSL_FLAGS_INITIALIZED 0x00000004
167 #define KGSL_FLAGS_STARTED     0x00000008
168 #define KGSL_FLAGS_ACTIVE      0x00000010
169 #define KGSL_FLAGS_RESERVED0   0x00000020
170 #define KGSL_FLAGS_RESERVED1   0x00000040
171 #define KGSL_FLAGS_RESERVED2   0x00000080
172 #define KGSL_FLAGS_SOFT_RESET  0x00000100
173 #define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
174 
175 /* Clock flags to show which clocks should be controled by a given platform */
176 #define KGSL_CLK_SRC	0x00000001
177 #define KGSL_CLK_CORE	0x00000002
178 #define KGSL_CLK_IFACE	0x00000004
179 #define KGSL_CLK_MEM	0x00000008
180 #define KGSL_CLK_MEM_IFACE 0x00000010
181 #define KGSL_CLK_AXI	0x00000020
182 
183 /* Server Side Sync Timeout in milliseconds */
184 #define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
185 
186 /*
187  * Reset status values for context
188  */
189 enum kgsl_ctx_reset_stat {
190 	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
191 	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
192 	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
193 	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
194 };
195 
196 #define KGSL_CONVERT_TO_MBPS(val) \
197 	(val*1000*1000U)
198 
199 /* device id */
200 enum kgsl_deviceid {
201 	KGSL_DEVICE_3D0		= 0x00000000,
202 	KGSL_DEVICE_2D0		= 0x00000001,
203 	KGSL_DEVICE_2D1		= 0x00000002,
204 	KGSL_DEVICE_MAX		= 0x00000003
205 };
206 
207 struct kgsl_devinfo {
208 
209 	unsigned int device_id;
210 	/* chip revision id
211 	* coreid:8 majorrev:8 minorrev:8 patch:8
212 	*/
213 	unsigned int chip_id;
214 	unsigned int mmu_enabled;
215 	unsigned long gmem_gpubaseaddr;
216 	/*
217 	* This field contains the adreno revision
218 	* number 200, 205, 220, etc...
219 	*/
220 	unsigned int gpu_id;
221 	size_t gmem_sizebytes;
222 };
223 
224 /* this structure defines the region of memory that can be mmap()ed from this
225    driver. The timestamp fields are volatile because they are written by the
226    GPU
227 */
228 struct kgsl_devmemstore {
229 	volatile unsigned int soptimestamp;
230 	unsigned int sbz;
231 	volatile unsigned int eoptimestamp;
232 	unsigned int sbz2;
233 	volatile unsigned int ts_cmp_enable;
234 	unsigned int sbz3;
235 	volatile unsigned int ref_wait_ts;
236 	unsigned int sbz4;
237 	unsigned int current_context;
238 	unsigned int sbz5;
239 };
240 
241 #define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
242 	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
243 	 offsetof(struct kgsl_devmemstore, field))
244 
245 /* timestamp id*/
246 enum kgsl_timestamp_type {
247 	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
248 	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
249 	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
250 };
251 
252 /* property types - used with kgsl_device_getproperty */
253 enum kgsl_property_type {
254 	KGSL_PROP_DEVICE_INFO     = 0x00000001,
255 	KGSL_PROP_DEVICE_SHADOW   = 0x00000002,
256 	KGSL_PROP_DEVICE_POWER    = 0x00000003,
257 	KGSL_PROP_SHMEM           = 0x00000004,
258 	KGSL_PROP_SHMEM_APERTURES = 0x00000005,
259 	KGSL_PROP_MMU_ENABLE 	  = 0x00000006,
260 	KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
261 	KGSL_PROP_VERSION         = 0x00000008,
262 	KGSL_PROP_GPU_RESET_STAT  = 0x00000009,
263 	KGSL_PROP_PWRCTRL         = 0x0000000E,
264 	KGSL_PROP_PWR_CONSTRAINT  = 0x00000012,
265 };
266 
267 struct kgsl_shadowprop {
268 	unsigned long gpuaddr;
269 	size_t size;
270 	unsigned int flags; /* contains KGSL_FLAGS_ values */
271 };
272 
273 struct kgsl_version {
274 	unsigned int drv_major;
275 	unsigned int drv_minor;
276 	unsigned int dev_major;
277 	unsigned int dev_minor;
278 };
279 
280 /* Performance counter groups */
281 
282 #define KGSL_PERFCOUNTER_GROUP_CP 0x0
283 #define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
284 #define KGSL_PERFCOUNTER_GROUP_PC 0x2
285 #define KGSL_PERFCOUNTER_GROUP_VFD 0x3
286 #define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
287 #define KGSL_PERFCOUNTER_GROUP_VPC 0x5
288 #define KGSL_PERFCOUNTER_GROUP_TSE 0x6
289 #define KGSL_PERFCOUNTER_GROUP_RAS 0x7
290 #define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
291 #define KGSL_PERFCOUNTER_GROUP_TP 0x9
292 #define KGSL_PERFCOUNTER_GROUP_SP 0xA
293 #define KGSL_PERFCOUNTER_GROUP_RB 0xB
294 #define KGSL_PERFCOUNTER_GROUP_PWR 0xC
295 #define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
296 #define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
297 #define KGSL_PERFCOUNTER_GROUP_MH 0xF
298 #define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
299 #define KGSL_PERFCOUNTER_GROUP_SQ 0x11
300 #define KGSL_PERFCOUNTER_GROUP_SX 0x12
301 #define KGSL_PERFCOUNTER_GROUP_TCF 0x13
302 #define KGSL_PERFCOUNTER_GROUP_TCM 0x14
303 #define KGSL_PERFCOUNTER_GROUP_TCR 0x15
304 #define KGSL_PERFCOUNTER_GROUP_L2 0x16
305 #define KGSL_PERFCOUNTER_GROUP_VSC 0x17
306 #define KGSL_PERFCOUNTER_GROUP_CCU 0x18
307 #define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
308 #define KGSL_PERFCOUNTER_GROUP_MAX 0x1C
309 
310 #define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
311 #define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
312 
313 /* structure holds list of ibs */
314 struct kgsl_ibdesc {
315 	unsigned long gpuaddr;
316 	unsigned long __pad;
317 	size_t sizedwords;
318 	unsigned int ctrl;
319 };
320 
321 /**
322  * struct kgsl_cmdbatch_profiling_buffer
323  * @wall_clock_s: Wall clock at ringbuffer submission time (seconds)
324  * @wall_clock_ns: Wall clock at ringbuffer submission time (nanoseconds)
325  * @gpu_ticks_queued: GPU ticks at ringbuffer submission
326  * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
327  * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
328  *
329  * This structure defines the profiling buffer used to measure cmdbatch
330  * execution time
331  */
332 struct kgsl_cmdbatch_profiling_buffer {
333 	uint64_t wall_clock_s;
334 	uint64_t wall_clock_ns;
335 	uint64_t gpu_ticks_queued;
336 	uint64_t gpu_ticks_submitted;
337 	uint64_t gpu_ticks_retired;
338 };
339 
340 /* ioctls */
341 #define KGSL_IOC_TYPE 0x09
342 
343 /* get misc info about the GPU
344    type should be a value from enum kgsl_property_type
345    value points to a structure that varies based on type
346    sizebytes is sizeof() that structure
347    for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
348    this structure contaings hardware versioning info.
349    for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
350    this is used to find mmap() offset and sizes for mapping
351    struct kgsl_memstore into userspace.
352 */
353 struct kgsl_device_getproperty {
354 	unsigned int type;
355 	void __user *value;
356 	size_t sizebytes;
357 };
358 
359 #define IOCTL_KGSL_DEVICE_GETPROPERTY \
360 	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
361 
362 /* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
363  */
364 
365 /* block until the GPU has executed past a given timestamp
366  * timeout is in milliseconds.
367  */
368 struct kgsl_device_waittimestamp {
369 	unsigned int timestamp;
370 	unsigned int timeout;
371 };
372 
373 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
374 	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
375 
376 struct kgsl_device_waittimestamp_ctxtid {
377 	unsigned int context_id;
378 	unsigned int timestamp;
379 	unsigned int timeout;
380 };
381 
382 #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
383 	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
384 
385 /* DEPRECATED: issue indirect commands to the GPU.
386  * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
387  * ibaddr and sizedwords must specify a subset of a buffer created
388  * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
389  * flags may be a mask of KGSL_CONTEXT_ values
390  * timestamp is a returned counter value which can be passed to
391  * other ioctls to determine when the commands have been executed by
392  * the GPU.
393  *
394  * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
395  * instead
396  */
397 struct kgsl_ringbuffer_issueibcmds {
398 	unsigned int drawctxt_id;
399 	unsigned long ibdesc_addr;
400 	unsigned int numibs;
401 	unsigned int timestamp; /*output param */
402 	unsigned int flags;
403 };
404 
405 #define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
406 	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
407 
408 /* read the most recently executed timestamp value
409  * type should be a value from enum kgsl_timestamp_type
410  */
411 struct kgsl_cmdstream_readtimestamp {
412 	unsigned int type;
413 	unsigned int timestamp; /*output param */
414 };
415 
416 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
417 	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
418 
419 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
420 	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
421 
422 /* free memory when the GPU reaches a given timestamp.
423  * gpuaddr specify a memory region created by a
424  * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
425  * type should be a value from enum kgsl_timestamp_type
426  */
427 struct kgsl_cmdstream_freememontimestamp {
428 	unsigned long gpuaddr;
429 	unsigned int type;
430 	unsigned int timestamp;
431 };
432 
433 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
434 	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
435 
436 /* Previous versions of this header had incorrectly defined
437    IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
438    of a write only ioctl.  To ensure binary compatability, the following
439    #define will be used to intercept the incorrect ioctl
440 */
441 
442 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
443 	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
444 
445 /* create a draw context, which is used to preserve GPU state.
446  * The flags field may contain a mask KGSL_CONTEXT_*  values
447  */
448 struct kgsl_drawctxt_create {
449 	unsigned int flags;
450 	unsigned int drawctxt_id; /*output param */
451 };
452 
453 #define IOCTL_KGSL_DRAWCTXT_CREATE \
454 	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
455 
456 /* destroy a draw context */
457 struct kgsl_drawctxt_destroy {
458 	unsigned int drawctxt_id;
459 };
460 
461 #define IOCTL_KGSL_DRAWCTXT_DESTROY \
462 	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
463 
464 /* add a block of pmem, fb, ashmem or user allocated address
465  * into the GPU address space */
466 struct kgsl_map_user_mem {
467 	int fd;
468 	unsigned long gpuaddr;   /*output param */
469 	size_t len;
470 	size_t offset;
471 	unsigned long hostptr;   /*input param */
472 	enum kgsl_user_mem_type memtype;
473 	unsigned int flags;
474 };
475 
476 #define IOCTL_KGSL_MAP_USER_MEM \
477 	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
478 
479 struct kgsl_cmdstream_readtimestamp_ctxtid {
480 	unsigned int context_id;
481 	unsigned int type;
482 	unsigned int timestamp; /*output param */
483 };
484 
485 #define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
486 	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
487 
488 struct kgsl_cmdstream_freememontimestamp_ctxtid {
489 	unsigned int context_id;
490 	unsigned long gpuaddr;
491 	unsigned int type;
492 	unsigned int timestamp;
493 };
494 
495 #define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
496 	_IOW(KGSL_IOC_TYPE, 0x17, \
497 	struct kgsl_cmdstream_freememontimestamp_ctxtid)
498 
499 /* add a block of pmem or fb into the GPU address space */
500 struct kgsl_sharedmem_from_pmem {
501 	int pmem_fd;
502 	unsigned long gpuaddr;	/*output param */
503 	unsigned int len;
504 	unsigned int offset;
505 };
506 
507 #define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
508 	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
509 
510 /* remove memory from the GPU's address space */
511 struct kgsl_sharedmem_free {
512 	unsigned long gpuaddr;
513 };
514 
515 #define IOCTL_KGSL_SHAREDMEM_FREE \
516 	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
517 
518 struct kgsl_cff_user_event {
519 	unsigned char cff_opcode;
520 	unsigned int op1;
521 	unsigned int op2;
522 	unsigned int op3;
523 	unsigned int op4;
524 	unsigned int op5;
525 	unsigned int __pad[2];
526 };
527 
528 #define IOCTL_KGSL_CFF_USER_EVENT \
529 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
530 
531 struct kgsl_gmem_desc {
532 	unsigned int x;
533 	unsigned int y;
534 	unsigned int width;
535 	unsigned int height;
536 	unsigned int pitch;
537 };
538 
539 struct kgsl_buffer_desc {
540 	void 			*hostptr;
541 	unsigned long	gpuaddr;
542 	int				size;
543 	unsigned int	format;
544 	unsigned int  	pitch;
545 	unsigned int  	enabled;
546 };
547 
548 struct kgsl_bind_gmem_shadow {
549 	unsigned int drawctxt_id;
550 	struct kgsl_gmem_desc gmem_desc;
551 	unsigned int shadow_x;
552 	unsigned int shadow_y;
553 	struct kgsl_buffer_desc shadow_buffer;
554 	unsigned int buffer_id;
555 };
556 
557 #define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
558     _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
559 
560 /* add a block of memory into the GPU address space */
561 
562 /*
563  * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
564  * use IOCTL_KGSL_GPUMEM_ALLOC instead
565  */
566 
567 struct kgsl_sharedmem_from_vmalloc {
568 	unsigned long gpuaddr;	/*output param */
569 	unsigned int hostptr;
570 	unsigned int flags;
571 };
572 
573 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
574 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
575 
576 /*
577  * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
578  * supports both directions (flush and invalidate). This code will still
579  * work, but by definition it will do a flush of the cache which might not be
580  * what you want to have happen on a buffer following a GPU operation.  It is
581  * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
582  */
583 
584 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
585 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
586 
587 struct kgsl_drawctxt_set_bin_base_offset {
588 	unsigned int drawctxt_id;
589 	unsigned int offset;
590 };
591 
592 #define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
593 	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
594 
595 enum kgsl_cmdwindow_type {
596 	KGSL_CMDWINDOW_MIN     = 0x00000000,
597 	KGSL_CMDWINDOW_2D      = 0x00000000,
598 	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
599 	KGSL_CMDWINDOW_MMU     = 0x00000002,
600 	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
601 	KGSL_CMDWINDOW_MAX     = 0x000000FF,
602 };
603 
604 /* write to the command window */
605 struct kgsl_cmdwindow_write {
606 	enum kgsl_cmdwindow_type target;
607 	unsigned int addr;
608 	unsigned int data;
609 };
610 
611 #define IOCTL_KGSL_CMDWINDOW_WRITE \
612 	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
613 
614 struct kgsl_gpumem_alloc {
615 	unsigned long gpuaddr; /* output param */
616 	size_t size;
617 	unsigned int flags;
618 };
619 
620 #define IOCTL_KGSL_GPUMEM_ALLOC \
621 	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
622 
623 struct kgsl_cff_syncmem {
624 	unsigned long gpuaddr;
625 	size_t len;
626 	unsigned int __pad[2]; /* For future binary compatibility */
627 };
628 
629 #define IOCTL_KGSL_CFF_SYNCMEM \
630 	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
631 
632 /*
633  * A timestamp event allows the user space to register an action following an
634  * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
635  * _IOWR to support fences which need to return a fd for the priv parameter.
636  */
637 
638 struct kgsl_timestamp_event {
639 	int type;                /* Type of event (see list below) */
640 	unsigned int timestamp;  /* Timestamp to trigger event on */
641 	unsigned int context_id; /* Context for the timestamp */
642 	void __user *priv;	 /* Pointer to the event specific blob */
643 	size_t len;              /* Size of the event specific blob */
644 };
645 
646 #define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
647 	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
648 
649 /* A genlock timestamp event releases an existing lock on timestamp expire */
650 
651 #define KGSL_TIMESTAMP_EVENT_GENLOCK 1
652 
653 struct kgsl_timestamp_event_genlock {
654 	int handle; /* Handle of the genlock lock to release */
655 };
656 
657 /* A fence timestamp event releases an existing lock on timestamp expire */
658 
659 #define KGSL_TIMESTAMP_EVENT_FENCE 2
660 
661 struct kgsl_timestamp_event_fence {
662 	int fence_fd; /* Fence to signal */
663 };
664 
665 /*
666  * Set a property within the kernel.  Uses the same structure as
667  * IOCTL_KGSL_GETPROPERTY
668  */
669 
670 #define IOCTL_KGSL_SETPROPERTY \
671 	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
672 
673 #define IOCTL_KGSL_TIMESTAMP_EVENT \
674 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
675 
676 /**
677  * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
678  * @id: returned id value for this allocation.
679  * @flags: mask of KGSL_MEM* values requested and actual flags on return.
680  * @size: requested size of the allocation and actual size on return.
681  * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
682  * @gpuaddr: returned GPU address for the allocation
683  *
684  * Allocate memory for access by the GPU. The flags and size fields are echoed
685  * back by the kernel, so that the caller can know if the request was
686  * adjusted.
687  *
688  * Supported flags:
689  * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
690  * KGSL_MEMTYPE*: usage hint for debugging aid
691  * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
692  * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
693  * address will be 0. Calling mmap() will set the GPU address.
694  */
695 struct kgsl_gpumem_alloc_id {
696 	unsigned int id;
697 	unsigned int flags;
698 	size_t size;
699 	size_t mmapsize;
700 	unsigned long gpuaddr;
701 /* private: reserved for future use*/
702 	unsigned long __pad[2];
703 };
704 
705 #define IOCTL_KGSL_GPUMEM_ALLOC_ID \
706 	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
707 
708 /**
709  * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
710  * @id: GPU allocation id to free
711  *
712  * Free an allocation by id, in case a GPU address has not been assigned or
713  * is unknown. Freeing an allocation by id with this ioctl or by GPU address
714  * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
715  */
716 struct kgsl_gpumem_free_id {
717 	unsigned int id;
718 /* private: reserved for future use*/
719 	unsigned int __pad;
720 };
721 
722 #define IOCTL_KGSL_GPUMEM_FREE_ID \
723 	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
724 
725 /**
726  * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
727  * @gpuaddr: GPU address to query. Also set on return.
728  * @id: GPU allocation id to query. Also set on return.
729  * @flags: returned mask of KGSL_MEM* values.
730  * @size: returned size of the allocation.
731  * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
732  * @useraddr: returned address of the userspace mapping for this buffer
733  *
734  * This ioctl allows querying of all user visible attributes of an existing
735  * allocation, by either the GPU address or the id returned by a previous
736  * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
737  * return all attributes so this ioctl can be used to look them up if needed.
738  *
739  */
740 struct kgsl_gpumem_get_info {
741 	unsigned long gpuaddr;
742 	unsigned int id;
743 	unsigned int flags;
744 	size_t size;
745 	size_t mmapsize;
746 	unsigned long useraddr;
747 /* private: reserved for future use*/
748 	unsigned long __pad[4];
749 };
750 
751 #define IOCTL_KGSL_GPUMEM_GET_INFO\
752 	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
753 
754 /**
755  * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
756  * @gpuaddr: GPU address of the buffer to sync.
757  * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
758  * @op: a mask of KGSL_GPUMEM_CACHE_* values
759  * @offset: offset into the buffer
760  * @length: number of bytes starting from offset to perform
761  * the cache operation on
762  *
763  * Sync the L2 cache for memory headed to and from the GPU - this replaces
764  * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
765  * directions
766  *
767  */
768 struct kgsl_gpumem_sync_cache {
769 	unsigned long gpuaddr;
770 	unsigned int id;
771 	unsigned int op;
772 	size_t offset;
773 	size_t length;
774 };
775 
776 #define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
777 #define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
778 
779 #define KGSL_GPUMEM_CACHE_INV (1 << 1)
780 #define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
781 
782 #define KGSL_GPUMEM_CACHE_FLUSH \
783 	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
784 
785 /* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
786 #define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
787 
788 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
789 	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
790 
791 /**
792  * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
793  * @groupid: Performance counter group ID
794  * @countable: Countable to select within the group
795  * @offset: Return offset of the reserved LO counter
796  * @offset_hi: Return offset of the reserved HI counter
797  *
798  * Get an available performance counter from a specified groupid.  The offset
799  * of the performance counter will be returned after successfully assigning
800  * the countable to the counter for the specified group.  An error will be
801  * returned and an offset of 0 if the groupid is invalid or there are no
802  * more counters left.  After successfully getting a perfcounter, the user
803  * must call kgsl_perfcounter_put(groupid, contable) when finished with
804  * the perfcounter to clear up perfcounter resources.
805  *
806  */
807 struct kgsl_perfcounter_get {
808 	unsigned int groupid;
809 	unsigned int countable;
810 	unsigned int offset;
811 	unsigned int offset_hi;
812 /* private: reserved for future use */
813 	unsigned int __pad; /* For future binary compatibility */
814 };
815 
816 #define IOCTL_KGSL_PERFCOUNTER_GET \
817 	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
818 
819 /**
820  * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
821  * @groupid: Performance counter group ID
822  * @countable: Countable to release within the group
823  *
824  * Put an allocated performance counter to allow others to have access to the
825  * resource that was previously taken.  This is only to be called after
826  * successfully getting a performance counter from kgsl_perfcounter_get().
827  *
828  */
829 struct kgsl_perfcounter_put {
830 	unsigned int groupid;
831 	unsigned int countable;
832 /* private: reserved for future use */
833 	unsigned int __pad[2]; /* For future binary compatibility */
834 };
835 
836 #define IOCTL_KGSL_PERFCOUNTER_PUT \
837 	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
838 
839 /**
840  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
841  * @groupid: Performance counter group ID
842  * @countable: Return active countables array
843  * @size: Size of active countables array
844  * @max_counters: Return total number counters for the group ID
845  *
846  * Query the available performance counters given a groupid.  The array
847  * *countables is used to return the current active countables in counters.
848  * The size of the array is passed in so the kernel will only write at most
849  * size or counter->size for the group id.  The total number of available
850  * counters for the group ID is returned in max_counters.
851  * If the array or size passed in are invalid, then only the maximum number
852  * of counters will be returned, no data will be written to *countables.
853  * If the groupid is invalid an error code will be returned.
854  *
855  */
856 struct kgsl_perfcounter_query {
857 	unsigned int groupid;
858 	/* Array to return the current countable for up to size counters */
859 	unsigned int __user *countables;
860 	unsigned int count;
861 	unsigned int max_counters;
862 /* private: reserved for future use */
863 	unsigned int __pad[2]; /* For future binary compatibility */
864 };
865 
866 #define IOCTL_KGSL_PERFCOUNTER_QUERY \
867 	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
868 
869 /**
870  * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
871  * @groupid: Performance counter group IDs
872  * @countable: Performance counter countable IDs
873  * @value: Return performance counter reads
874  * @size: Size of all arrays (groupid/countable pair and return value)
875  *
876  * Read in the current value of a performance counter given by the groupid
877  * and countable.
878  *
879  */
880 
881 struct kgsl_perfcounter_read_group {
882 	unsigned int groupid;
883 	unsigned int countable;
884 	unsigned long long value;
885 };
886 
887 struct kgsl_perfcounter_read {
888 	struct kgsl_perfcounter_read_group __user *reads;
889 	unsigned int count;
890 /* private: reserved for future use */
891 	unsigned int __pad[2]; /* For future binary compatibility */
892 };
893 
894 #define IOCTL_KGSL_PERFCOUNTER_READ \
895 	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
896 /*
897  * struct kgsl_gpumem_sync_cache_bulk - argument to
898  * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
899  * @id_list: list of GPU buffer ids of the buffers to sync
900  * @count: number of GPU buffer ids in id_list
901  * @op: a mask of KGSL_GPUMEM_CACHE_* values
902  *
903  * Sync the cache for memory headed to and from the GPU. Certain
904  * optimizations can be made on the cache operation based on the total
905  * size of the working set of memory to be managed.
906  */
907 struct kgsl_gpumem_sync_cache_bulk {
908 	unsigned int __user *id_list;
909 	unsigned int count;
910 	unsigned int op;
911 /* private: reserved for future use */
912 	unsigned int __pad[2]; /* For future binary compatibility */
913 };
914 
915 #define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
916 	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
917 
918 /*
919  * struct kgsl_cmd_syncpoint_timestamp
920  * @context_id: ID of a KGSL context
921  * @timestamp: GPU timestamp
922  *
923  * This structure defines a syncpoint comprising a context/timestamp pair. A
924  * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
925  * dependencies that must be met before the command can be submitted to the
926  * hardware
927  */
928 struct kgsl_cmd_syncpoint_timestamp {
929 	unsigned int context_id;
930 	unsigned int timestamp;
931 };
932 
933 #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
934 
935 struct kgsl_cmd_syncpoint_fence {
936 	int fd;
937 };
938 
939 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
940 
941 /**
942  * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
943  * @type: type of sync point defined here
944  * @priv: Pointer to the type specific buffer
945  * @size: Size of the type specific buffer
946  *
947  * This structure contains pointers defining a specific command sync point.
948  * The pointer and size should point to a type appropriate structure.
949  */
950 struct kgsl_cmd_syncpoint {
951 	int type;
952 	void __user *priv;
953 	size_t size;
954 };
955 
956 /* Flag to indicate that the cmdlist may contain memlists */
957 #define KGSL_IBDESC_MEMLIST 0x1
958 
959 /* Flag to point out the cmdbatch profiling buffer in the memlist */
960 #define KGSL_IBDESC_PROFILING_BUFFER 0x2
961 
962 /**
963  * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
964  * @context_id: KGSL context ID that owns the commands
965  * @flags:
966  * @cmdlist: User pointer to a list of kgsl_ibdesc structures
967  * @numcmds: Number of commands listed in cmdlist
968  * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
969  * @numsyncs: Number of sync points listed in synclist
970  * @timestamp: On entry the a user defined timestamp, on exist the timestamp
971  * assigned to the command batch
972  *
973  * This structure specifies a command to send to the GPU hardware.  This is
974  * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
975  * submit IB lists and it adds sync points to block the IB until the
976  * dependencies are satisified.  This entry point is the new and preferred way
977  * to submit commands to the GPU. The memory list can be used to specify all
978  * memory that is referrenced in the current set of commands.
979  */
980 
981 struct kgsl_submit_commands {
982 	unsigned int context_id;
983 	unsigned int flags;
984 	struct kgsl_ibdesc __user *cmdlist;
985 	unsigned int numcmds;
986 	struct kgsl_cmd_syncpoint __user *synclist;
987 	unsigned int numsyncs;
988 	unsigned int timestamp;
989 /* private: reserved for future use */
990 	unsigned int __pad[4];
991 };
992 
993 #define IOCTL_KGSL_SUBMIT_COMMANDS \
994 	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
995 
996 /**
997  * struct kgsl_device_constraint - device constraint argument
998  * @context_id: KGSL context ID
999  * @type: type of constraint i.e pwrlevel/none
1000  * @data: constraint data
1001  * @size: size of the constraint data
1002  */
1003 struct kgsl_device_constraint {
1004 	unsigned int type;
1005 	unsigned int context_id;
1006 	void __user *data;
1007 	size_t size;
1008 };
1009 
1010 /* Constraint Type*/
1011 #define KGSL_CONSTRAINT_NONE 0
1012 #define KGSL_CONSTRAINT_PWRLEVEL 1
1013 
1014 /* PWRLEVEL constraint level*/
1015 /* set to min frequency */
1016 #define KGSL_CONSTRAINT_PWR_MIN    0
1017 /* set to max frequency */
1018 #define KGSL_CONSTRAINT_PWR_MAX    1
1019 
1020 struct kgsl_device_constraint_pwrlevel {
1021 	unsigned int level;
1022 };
1023 
1024 /**
1025  * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1026  * @id: returned id for the syncsource that was created.
1027  *
1028  * This ioctl creates a userspace sync timeline.
1029  */
1030 
1031 struct kgsl_syncsource_create {
1032 	unsigned int id;
1033 /* private: reserved for future use */
1034 	unsigned int __pad[3];
1035 };
1036 
1037 #define IOCTL_KGSL_SYNCSOURCE_CREATE \
1038 	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1039 
1040 /**
1041  * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1042  * @id: syncsource id to destroy
1043  *
1044  * This ioctl creates a userspace sync timeline.
1045  */
1046 
1047 struct kgsl_syncsource_destroy {
1048 	unsigned int id;
1049 /* private: reserved for future use */
1050 	unsigned int __pad[3];
1051 };
1052 
1053 #define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1054 	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1055 
1056 /**
1057  * struct kgsl_syncsource_create_fence - Argument to
1058  *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1059  * @id: syncsource id
1060  * @fence_fd: returned sync_fence fd
1061  *
1062  * Create a fence that may be signaled by userspace by calling
1063  * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1064  * these fences.
1065  */
1066 struct kgsl_syncsource_create_fence {
1067 	unsigned int id;
1068 	int fence_fd;
1069 /* private: reserved for future use */
1070 	unsigned int __pad[4];
1071 };
1072 
1073 /**
1074  * struct kgsl_syncsource_signal_fence - Argument to
1075  *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1076  * @id: syncsource id
1077  * @fence_fd: sync_fence fd to signal
1078  *
1079  * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1080  * call using the same syncsource id. This allows a fence to be shared
1081  * to other processes but only signaled by the process owning the fd
1082  * used to create the fence.
1083  */
1084 #define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1085 	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1086 
1087 struct kgsl_syncsource_signal_fence {
1088 	unsigned int id;
1089 	int fence_fd;
1090 /* private: reserved for future use */
1091 	unsigned int __pad[4];
1092 };
1093 
1094 #define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1095 	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1096 
1097 #endif /* _UAPI_MSM_KGSL_H */
1098