1 /* 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #ifndef _I915_DRM_H_ 28 #define _I915_DRM_H_ 29 30 #include "drm.h" 31 32 #if defined(__cplusplus) 33 extern "C" { 34 #endif 35 36 /* Please note that modifications to all structs defined here are 37 * subject to backwards-compatibility constraints. 38 */ 39 40 /** 41 * DOC: uevents generated by i915 on it's device node 42 * 43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 44 * event from the gpu l3 cache. Additional information supplied is ROW, 45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 46 * track of these events and if a specific cache-line seems to have a 47 * persistent error remap it with the l3 remapping tool supplied in 48 * intel-gpu-tools. The value supplied with the event is always 1. 49 * 50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via 51 * hangcheck. The error detection event is a good indicator of when things 52 * began to go badly. The value supplied with the event is a 1 upon error 53 * detection, and a 0 upon reset completion, signifying no more error 54 * exists. NOTE: Disabling hangcheck or reset via module parameter will 55 * cause the related events to not be seen. 56 * 57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 58 * the GPU. The value supplied with the event is always 1. NOTE: Disable 59 * reset via module parameter will cause this event to not be seen. 60 */ 61 #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 62 #define I915_ERROR_UEVENT "ERROR" 63 #define I915_RESET_UEVENT "RESET" 64 65 /* 66 * MOCS indexes used for GPU surfaces, defining the cacheability of the 67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 68 */ 69 enum i915_mocs_table_index { 70 /* 71 * Not cached anywhere, coherency between CPU and GPU accesses is 72 * guaranteed. 73 */ 74 I915_MOCS_UNCACHED, 75 /* 76 * Cacheability and coherency controlled by the kernel automatically 77 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 78 * usage of the surface (used for display scanout or not). 79 */ 80 I915_MOCS_PTE, 81 /* 82 * Cached in all GPU caches available on the platform. 83 * Coherency between CPU and GPU accesses to the surface is not 84 * guaranteed without extra synchronization. 85 */ 86 I915_MOCS_CACHED, 87 }; 88 89 /* 90 * Different engines serve different roles, and there may be more than one 91 * engine serving each role. enum drm_i915_gem_engine_class provides a 92 * classification of the role of the engine, which may be used when requesting 93 * operations to be performed on a certain subset of engines, or for providing 94 * information about that group. 95 */ 96 enum drm_i915_gem_engine_class { 97 I915_ENGINE_CLASS_RENDER = 0, 98 I915_ENGINE_CLASS_COPY = 1, 99 I915_ENGINE_CLASS_VIDEO = 2, 100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 101 102 I915_ENGINE_CLASS_INVALID = -1 103 }; 104 105 /* Each region is a minimum of 16k, and there are at most 255 of them. 106 */ 107 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 108 * of chars for next/prev indices */ 109 #define I915_LOG_MIN_TEX_REGION_SIZE 14 110 111 typedef struct _drm_i915_init { 112 enum { 113 I915_INIT_DMA = 0x01, 114 I915_CLEANUP_DMA = 0x02, 115 I915_RESUME_DMA = 0x03 116 } func; 117 unsigned int mmio_offset; 118 int sarea_priv_offset; 119 unsigned int ring_start; 120 unsigned int ring_end; 121 unsigned int ring_size; 122 unsigned int front_offset; 123 unsigned int back_offset; 124 unsigned int depth_offset; 125 unsigned int w; 126 unsigned int h; 127 unsigned int pitch; 128 unsigned int pitch_bits; 129 unsigned int back_pitch; 130 unsigned int depth_pitch; 131 unsigned int cpp; 132 unsigned int chipset; 133 } drm_i915_init_t; 134 135 typedef struct _drm_i915_sarea { 136 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 137 int last_upload; /* last time texture was uploaded */ 138 int last_enqueue; /* last time a buffer was enqueued */ 139 int last_dispatch; /* age of the most recently dispatched buffer */ 140 int ctxOwner; /* last context to upload state */ 141 int texAge; 142 int pf_enabled; /* is pageflipping allowed? */ 143 int pf_active; 144 int pf_current_page; /* which buffer is being displayed? */ 145 int perf_boxes; /* performance boxes to be displayed */ 146 int width, height; /* screen size in pixels */ 147 148 drm_handle_t front_handle; 149 int front_offset; 150 int front_size; 151 152 drm_handle_t back_handle; 153 int back_offset; 154 int back_size; 155 156 drm_handle_t depth_handle; 157 int depth_offset; 158 int depth_size; 159 160 drm_handle_t tex_handle; 161 int tex_offset; 162 int tex_size; 163 int log_tex_granularity; 164 int pitch; 165 int rotation; /* 0, 90, 180 or 270 */ 166 int rotated_offset; 167 int rotated_size; 168 int rotated_pitch; 169 int virtualX, virtualY; 170 171 unsigned int front_tiled; 172 unsigned int back_tiled; 173 unsigned int depth_tiled; 174 unsigned int rotated_tiled; 175 unsigned int rotated2_tiled; 176 177 int pipeA_x; 178 int pipeA_y; 179 int pipeA_w; 180 int pipeA_h; 181 int pipeB_x; 182 int pipeB_y; 183 int pipeB_w; 184 int pipeB_h; 185 186 /* fill out some space for old userspace triple buffer */ 187 drm_handle_t unused_handle; 188 __u32 unused1, unused2, unused3; 189 190 /* buffer object handles for static buffers. May change 191 * over the lifetime of the client. 192 */ 193 __u32 front_bo_handle; 194 __u32 back_bo_handle; 195 __u32 unused_bo_handle; 196 __u32 depth_bo_handle; 197 198 } drm_i915_sarea_t; 199 200 /* due to userspace building against these headers we need some compat here */ 201 #define planeA_x pipeA_x 202 #define planeA_y pipeA_y 203 #define planeA_w pipeA_w 204 #define planeA_h pipeA_h 205 #define planeB_x pipeB_x 206 #define planeB_y pipeB_y 207 #define planeB_w pipeB_w 208 #define planeB_h pipeB_h 209 210 /* Flags for perf_boxes 211 */ 212 #define I915_BOX_RING_EMPTY 0x1 213 #define I915_BOX_FLIP 0x2 214 #define I915_BOX_WAIT 0x4 215 #define I915_BOX_TEXTURE_LOAD 0x8 216 #define I915_BOX_LOST_CONTEXT 0x10 217 218 /* 219 * i915 specific ioctls. 220 * 221 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 222 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 223 * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 224 */ 225 #define DRM_I915_INIT 0x00 226 #define DRM_I915_FLUSH 0x01 227 #define DRM_I915_FLIP 0x02 228 #define DRM_I915_BATCHBUFFER 0x03 229 #define DRM_I915_IRQ_EMIT 0x04 230 #define DRM_I915_IRQ_WAIT 0x05 231 #define DRM_I915_GETPARAM 0x06 232 #define DRM_I915_SETPARAM 0x07 233 #define DRM_I915_ALLOC 0x08 234 #define DRM_I915_FREE 0x09 235 #define DRM_I915_INIT_HEAP 0x0a 236 #define DRM_I915_CMDBUFFER 0x0b 237 #define DRM_I915_DESTROY_HEAP 0x0c 238 #define DRM_I915_SET_VBLANK_PIPE 0x0d 239 #define DRM_I915_GET_VBLANK_PIPE 0x0e 240 #define DRM_I915_VBLANK_SWAP 0x0f 241 #define DRM_I915_HWS_ADDR 0x11 242 #define DRM_I915_GEM_INIT 0x13 243 #define DRM_I915_GEM_EXECBUFFER 0x14 244 #define DRM_I915_GEM_PIN 0x15 245 #define DRM_I915_GEM_UNPIN 0x16 246 #define DRM_I915_GEM_BUSY 0x17 247 #define DRM_I915_GEM_THROTTLE 0x18 248 #define DRM_I915_GEM_ENTERVT 0x19 249 #define DRM_I915_GEM_LEAVEVT 0x1a 250 #define DRM_I915_GEM_CREATE 0x1b 251 #define DRM_I915_GEM_PREAD 0x1c 252 #define DRM_I915_GEM_PWRITE 0x1d 253 #define DRM_I915_GEM_MMAP 0x1e 254 #define DRM_I915_GEM_SET_DOMAIN 0x1f 255 #define DRM_I915_GEM_SW_FINISH 0x20 256 #define DRM_I915_GEM_SET_TILING 0x21 257 #define DRM_I915_GEM_GET_TILING 0x22 258 #define DRM_I915_GEM_GET_APERTURE 0x23 259 #define DRM_I915_GEM_MMAP_GTT 0x24 260 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 261 #define DRM_I915_GEM_MADVISE 0x26 262 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 263 #define DRM_I915_OVERLAY_ATTRS 0x28 264 #define DRM_I915_GEM_EXECBUFFER2 0x29 265 #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 266 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 267 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 268 #define DRM_I915_GEM_WAIT 0x2c 269 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 270 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 271 #define DRM_I915_GEM_SET_CACHING 0x2f 272 #define DRM_I915_GEM_GET_CACHING 0x30 273 #define DRM_I915_REG_READ 0x31 274 #define DRM_I915_GET_RESET_STATS 0x32 275 #define DRM_I915_GEM_USERPTR 0x33 276 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 277 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 278 #define DRM_I915_PERF_OPEN 0x36 279 #define DRM_I915_PERF_ADD_CONFIG 0x37 280 #define DRM_I915_PERF_REMOVE_CONFIG 0x38 281 282 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 283 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 284 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 285 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 286 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 287 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 288 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 289 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 290 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 291 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 292 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 293 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 294 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 295 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 296 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 297 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 298 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 299 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 300 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 301 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 302 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 303 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 304 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 305 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 306 #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 307 #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 308 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 309 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 310 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 311 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 312 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 313 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 314 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 315 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 316 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 317 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 318 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 319 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 320 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 321 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 322 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 323 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 324 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 325 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 326 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 327 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 328 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 329 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 330 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 331 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 332 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 333 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 334 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 335 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 336 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 337 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 338 339 /* Allow drivers to submit batchbuffers directly to hardware, relying 340 * on the security mechanisms provided by hardware. 341 */ 342 typedef struct drm_i915_batchbuffer { 343 int start; /* agp offset */ 344 int used; /* nr bytes in use */ 345 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 346 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 347 int num_cliprects; /* mulitpass with multiple cliprects? */ 348 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */ 349 } drm_i915_batchbuffer_t; 350 351 /* As above, but pass a pointer to userspace buffer which can be 352 * validated by the kernel prior to sending to hardware. 353 */ 354 typedef struct _drm_i915_cmdbuffer { 355 char *buf; /* pointer to userspace command buffer */ 356 int sz; /* nr bytes in buf */ 357 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 358 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 359 int num_cliprects; /* mulitpass with multiple cliprects? */ 360 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */ 361 } drm_i915_cmdbuffer_t; 362 363 /* Userspace can request & wait on irq's: 364 */ 365 typedef struct drm_i915_irq_emit { 366 int *irq_seq; 367 } drm_i915_irq_emit_t; 368 369 typedef struct drm_i915_irq_wait { 370 int irq_seq; 371 } drm_i915_irq_wait_t; 372 373 /* Ioctl to query kernel params: 374 */ 375 #define I915_PARAM_IRQ_ACTIVE 1 376 #define I915_PARAM_ALLOW_BATCHBUFFER 2 377 #define I915_PARAM_LAST_DISPATCH 3 378 #define I915_PARAM_CHIPSET_ID 4 379 #define I915_PARAM_HAS_GEM 5 380 #define I915_PARAM_NUM_FENCES_AVAIL 6 381 #define I915_PARAM_HAS_OVERLAY 7 382 #define I915_PARAM_HAS_PAGEFLIPPING 8 383 #define I915_PARAM_HAS_EXECBUF2 9 384 #define I915_PARAM_HAS_BSD 10 385 #define I915_PARAM_HAS_BLT 11 386 #define I915_PARAM_HAS_RELAXED_FENCING 12 387 #define I915_PARAM_HAS_COHERENT_RINGS 13 388 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 389 #define I915_PARAM_HAS_RELAXED_DELTA 15 390 #define I915_PARAM_HAS_GEN7_SOL_RESET 16 391 #define I915_PARAM_HAS_LLC 17 392 #define I915_PARAM_HAS_ALIASING_PPGTT 18 393 #define I915_PARAM_HAS_WAIT_TIMEOUT 19 394 #define I915_PARAM_HAS_SEMAPHORES 20 395 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 396 #define I915_PARAM_HAS_VEBOX 22 397 #define I915_PARAM_HAS_SECURE_BATCHES 23 398 #define I915_PARAM_HAS_PINNED_BATCHES 24 399 #define I915_PARAM_HAS_EXEC_NO_RELOC 25 400 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 401 #define I915_PARAM_HAS_WT 27 402 #define I915_PARAM_CMD_PARSER_VERSION 28 403 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 404 #define I915_PARAM_MMAP_VERSION 30 405 #define I915_PARAM_HAS_BSD2 31 406 #define I915_PARAM_REVISION 32 407 #define I915_PARAM_SUBSLICE_TOTAL 33 408 #define I915_PARAM_EU_TOTAL 34 409 #define I915_PARAM_HAS_GPU_RESET 35 410 #define I915_PARAM_HAS_RESOURCE_STREAMER 36 411 #define I915_PARAM_HAS_EXEC_SOFTPIN 37 412 #define I915_PARAM_HAS_POOLED_EU 38 413 #define I915_PARAM_MIN_EU_IN_POOL 39 414 #define I915_PARAM_MMAP_GTT_VERSION 40 415 416 /* 417 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 418 * priorities and the driver will attempt to execute batches in priority order. 419 * The param returns a capability bitmask, nonzero implies that the scheduler 420 * is enabled, with different features present according to the mask. 421 * 422 * The initial priority for each batch is supplied by the context and is 423 * controlled via I915_CONTEXT_PARAM_PRIORITY. 424 */ 425 #define I915_PARAM_HAS_SCHEDULER 41 426 #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 427 #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 428 #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 429 430 #define I915_PARAM_HUC_STATUS 42 431 432 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 433 * synchronisation with implicit fencing on individual objects. 434 * See EXEC_OBJECT_ASYNC. 435 */ 436 #define I915_PARAM_HAS_EXEC_ASYNC 43 437 438 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 439 * both being able to pass in a sync_file fd to wait upon before executing, 440 * and being able to return a new sync_file fd that is signaled when the 441 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 442 */ 443 #define I915_PARAM_HAS_EXEC_FENCE 44 444 445 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 446 * user specified bufffers for post-mortem debugging of GPU hangs. See 447 * EXEC_OBJECT_CAPTURE. 448 */ 449 #define I915_PARAM_HAS_EXEC_CAPTURE 45 450 451 #define I915_PARAM_SLICE_MASK 46 452 453 /* Assuming it's uniform for each slice, this queries the mask of subslices 454 * per-slice for this system. 455 */ 456 #define I915_PARAM_SUBSLICE_MASK 47 457 458 /* 459 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 460 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 461 */ 462 #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 463 464 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 465 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 466 */ 467 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 468 469 /* 470 * Query whether every context (both per-file default and user created) is 471 * isolated (insofar as HW supports). If this parameter is not true, then 472 * freshly created contexts may inherit values from an existing context, 473 * rather than default HW values. If true, it also ensures (insofar as HW 474 * supports) that all state set by this context will not leak to any other 475 * context. 476 * 477 * As not every engine across every gen support contexts, the returned 478 * value reports the support of context isolation for individual engines by 479 * returning a bitmask of each engine class set to true if that class supports 480 * isolation. 481 */ 482 #define I915_PARAM_HAS_CONTEXT_ISOLATION 50 483 484 /* Frequency of the command streamer timestamps given by the *_TIMESTAMP 485 * registers. This used to be fixed per platform but from CNL onwards, this 486 * might vary depending on the parts. 487 */ 488 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 489 490 typedef struct drm_i915_getparam { 491 __s32 param; 492 /* 493 * WARNING: Using pointers instead of fixed-size u64 means we need to write 494 * compat32 code. Don't repeat this mistake. 495 */ 496 int *value; 497 } drm_i915_getparam_t; 498 499 /* Ioctl to set kernel params: 500 */ 501 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 502 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 503 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 504 #define I915_SETPARAM_NUM_USED_FENCES 4 505 506 typedef struct drm_i915_setparam { 507 int param; 508 int value; 509 } drm_i915_setparam_t; 510 511 /* A memory manager for regions of shared memory: 512 */ 513 #define I915_MEM_REGION_AGP 1 514 515 typedef struct drm_i915_mem_alloc { 516 int region; 517 int alignment; 518 int size; 519 int *region_offset; /* offset from start of fb or agp */ 520 } drm_i915_mem_alloc_t; 521 522 typedef struct drm_i915_mem_free { 523 int region; 524 int region_offset; 525 } drm_i915_mem_free_t; 526 527 typedef struct drm_i915_mem_init_heap { 528 int region; 529 int size; 530 int start; 531 } drm_i915_mem_init_heap_t; 532 533 /* Allow memory manager to be torn down and re-initialized (eg on 534 * rotate): 535 */ 536 typedef struct drm_i915_mem_destroy_heap { 537 int region; 538 } drm_i915_mem_destroy_heap_t; 539 540 /* Allow X server to configure which pipes to monitor for vblank signals 541 */ 542 #define DRM_I915_VBLANK_PIPE_A 1 543 #define DRM_I915_VBLANK_PIPE_B 2 544 545 typedef struct drm_i915_vblank_pipe { 546 int pipe; 547 } drm_i915_vblank_pipe_t; 548 549 /* Schedule buffer swap at given vertical blank: 550 */ 551 typedef struct drm_i915_vblank_swap { 552 drm_drawable_t drawable; 553 enum drm_vblank_seq_type seqtype; 554 unsigned int sequence; 555 } drm_i915_vblank_swap_t; 556 557 typedef struct drm_i915_hws_addr { 558 __u64 addr; 559 } drm_i915_hws_addr_t; 560 561 struct drm_i915_gem_init { 562 /** 563 * Beginning offset in the GTT to be managed by the DRM memory 564 * manager. 565 */ 566 __u64 gtt_start; 567 /** 568 * Ending offset in the GTT to be managed by the DRM memory 569 * manager. 570 */ 571 __u64 gtt_end; 572 }; 573 574 struct drm_i915_gem_create { 575 /** 576 * Requested size for the object. 577 * 578 * The (page-aligned) allocated size for the object will be returned. 579 */ 580 __u64 size; 581 /** 582 * Returned handle for the object. 583 * 584 * Object handles are nonzero. 585 */ 586 __u32 handle; 587 __u32 pad; 588 }; 589 590 struct drm_i915_gem_pread { 591 /** Handle for the object being read. */ 592 __u32 handle; 593 __u32 pad; 594 /** Offset into the object to read from */ 595 __u64 offset; 596 /** Length of data to read */ 597 __u64 size; 598 /** 599 * Pointer to write the data into. 600 * 601 * This is a fixed-size type for 32/64 compatibility. 602 */ 603 __u64 data_ptr; 604 }; 605 606 struct drm_i915_gem_pwrite { 607 /** Handle for the object being written to. */ 608 __u32 handle; 609 __u32 pad; 610 /** Offset into the object to write to */ 611 __u64 offset; 612 /** Length of data to write */ 613 __u64 size; 614 /** 615 * Pointer to read the data from. 616 * 617 * This is a fixed-size type for 32/64 compatibility. 618 */ 619 __u64 data_ptr; 620 }; 621 622 struct drm_i915_gem_mmap { 623 /** Handle for the object being mapped. */ 624 __u32 handle; 625 __u32 pad; 626 /** Offset in the object to map. */ 627 __u64 offset; 628 /** 629 * Length of data to map. 630 * 631 * The value will be page-aligned. 632 */ 633 __u64 size; 634 /** 635 * Returned pointer the data was mapped at. 636 * 637 * This is a fixed-size type for 32/64 compatibility. 638 */ 639 __u64 addr_ptr; 640 641 /** 642 * Flags for extended behaviour. 643 * 644 * Added in version 2. 645 */ 646 __u64 flags; 647 #define I915_MMAP_WC 0x1 648 }; 649 650 struct drm_i915_gem_mmap_gtt { 651 /** Handle for the object being mapped. */ 652 __u32 handle; 653 __u32 pad; 654 /** 655 * Fake offset to use for subsequent mmap call 656 * 657 * This is a fixed-size type for 32/64 compatibility. 658 */ 659 __u64 offset; 660 }; 661 662 struct drm_i915_gem_set_domain { 663 /** Handle for the object */ 664 __u32 handle; 665 666 /** New read domains */ 667 __u32 read_domains; 668 669 /** New write domain */ 670 __u32 write_domain; 671 }; 672 673 struct drm_i915_gem_sw_finish { 674 /** Handle for the object */ 675 __u32 handle; 676 }; 677 678 struct drm_i915_gem_relocation_entry { 679 /** 680 * Handle of the buffer being pointed to by this relocation entry. 681 * 682 * It's appealing to make this be an index into the mm_validate_entry 683 * list to refer to the buffer, but this allows the driver to create 684 * a relocation list for state buffers and not re-write it per 685 * exec using the buffer. 686 */ 687 __u32 target_handle; 688 689 /** 690 * Value to be added to the offset of the target buffer to make up 691 * the relocation entry. 692 */ 693 __u32 delta; 694 695 /** Offset in the buffer the relocation entry will be written into */ 696 __u64 offset; 697 698 /** 699 * Offset value of the target buffer that the relocation entry was last 700 * written as. 701 * 702 * If the buffer has the same offset as last time, we can skip syncing 703 * and writing the relocation. This value is written back out by 704 * the execbuffer ioctl when the relocation is written. 705 */ 706 __u64 presumed_offset; 707 708 /** 709 * Target memory domains read by this operation. 710 */ 711 __u32 read_domains; 712 713 /** 714 * Target memory domains written by this operation. 715 * 716 * Note that only one domain may be written by the whole 717 * execbuffer operation, so that where there are conflicts, 718 * the application will get -EINVAL back. 719 */ 720 __u32 write_domain; 721 }; 722 723 /** @{ 724 * Intel memory domains 725 * 726 * Most of these just align with the various caches in 727 * the system and are used to flush and invalidate as 728 * objects end up cached in different domains. 729 */ 730 /** CPU cache */ 731 #define I915_GEM_DOMAIN_CPU 0x00000001 732 /** Render cache, used by 2D and 3D drawing */ 733 #define I915_GEM_DOMAIN_RENDER 0x00000002 734 /** Sampler cache, used by texture engine */ 735 #define I915_GEM_DOMAIN_SAMPLER 0x00000004 736 /** Command queue, used to load batch buffers */ 737 #define I915_GEM_DOMAIN_COMMAND 0x00000008 738 /** Instruction cache, used by shader programs */ 739 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 740 /** Vertex address cache */ 741 #define I915_GEM_DOMAIN_VERTEX 0x00000020 742 /** GTT domain - aperture and scanout */ 743 #define I915_GEM_DOMAIN_GTT 0x00000040 744 /** WC domain - uncached access */ 745 #define I915_GEM_DOMAIN_WC 0x00000080 746 /** @} */ 747 748 struct drm_i915_gem_exec_object { 749 /** 750 * User's handle for a buffer to be bound into the GTT for this 751 * operation. 752 */ 753 __u32 handle; 754 755 /** Number of relocations to be performed on this buffer */ 756 __u32 relocation_count; 757 /** 758 * Pointer to array of struct drm_i915_gem_relocation_entry containing 759 * the relocations to be performed in this buffer. 760 */ 761 __u64 relocs_ptr; 762 763 /** Required alignment in graphics aperture */ 764 __u64 alignment; 765 766 /** 767 * Returned value of the updated offset of the object, for future 768 * presumed_offset writes. 769 */ 770 __u64 offset; 771 }; 772 773 struct drm_i915_gem_execbuffer { 774 /** 775 * List of buffers to be validated with their relocations to be 776 * performend on them. 777 * 778 * This is a pointer to an array of struct drm_i915_gem_validate_entry. 779 * 780 * These buffers must be listed in an order such that all relocations 781 * a buffer is performing refer to buffers that have already appeared 782 * in the validate list. 783 */ 784 __u64 buffers_ptr; 785 __u32 buffer_count; 786 787 /** Offset in the batchbuffer to start execution from. */ 788 __u32 batch_start_offset; 789 /** Bytes used in batchbuffer from batch_start_offset */ 790 __u32 batch_len; 791 __u32 DR1; 792 __u32 DR4; 793 __u32 num_cliprects; 794 /** This is a struct drm_clip_rect *cliprects */ 795 __u64 cliprects_ptr; 796 }; 797 798 struct drm_i915_gem_exec_object2 { 799 /** 800 * User's handle for a buffer to be bound into the GTT for this 801 * operation. 802 */ 803 __u32 handle; 804 805 /** Number of relocations to be performed on this buffer */ 806 __u32 relocation_count; 807 /** 808 * Pointer to array of struct drm_i915_gem_relocation_entry containing 809 * the relocations to be performed in this buffer. 810 */ 811 __u64 relocs_ptr; 812 813 /** Required alignment in graphics aperture */ 814 __u64 alignment; 815 816 /** 817 * When the EXEC_OBJECT_PINNED flag is specified this is populated by 818 * the user with the GTT offset at which this object will be pinned. 819 * When the I915_EXEC_NO_RELOC flag is specified this must contain the 820 * presumed_offset of the object. 821 * During execbuffer2 the kernel populates it with the value of the 822 * current GTT offset of the object, for future presumed_offset writes. 823 */ 824 __u64 offset; 825 826 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 827 #define EXEC_OBJECT_NEEDS_GTT (1<<1) 828 #define EXEC_OBJECT_WRITE (1<<2) 829 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 830 #define EXEC_OBJECT_PINNED (1<<4) 831 #define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 832 /* The kernel implicitly tracks GPU activity on all GEM objects, and 833 * synchronises operations with outstanding rendering. This includes 834 * rendering on other devices if exported via dma-buf. However, sometimes 835 * this tracking is too coarse and the user knows better. For example, 836 * if the object is split into non-overlapping ranges shared between different 837 * clients or engines (i.e. suballocating objects), the implicit tracking 838 * by kernel assumes that each operation affects the whole object rather 839 * than an individual range, causing needless synchronisation between clients. 840 * The kernel will also forgo any CPU cache flushes prior to rendering from 841 * the object as the client is expected to be also handling such domain 842 * tracking. 843 * 844 * The kernel maintains the implicit tracking in order to manage resources 845 * used by the GPU - this flag only disables the synchronisation prior to 846 * rendering with this object in this execbuf. 847 * 848 * Opting out of implicit synhronisation requires the user to do its own 849 * explicit tracking to avoid rendering corruption. See, for example, 850 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 851 */ 852 #define EXEC_OBJECT_ASYNC (1<<6) 853 /* Request that the contents of this execobject be copied into the error 854 * state upon a GPU hang involving this batch for post-mortem debugging. 855 * These buffers are recorded in no particular order as "user" in 856 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 857 * if the kernel supports this flag. 858 */ 859 #define EXEC_OBJECT_CAPTURE (1<<7) 860 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 861 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 862 __u64 flags; 863 864 union { 865 __u64 rsvd1; 866 __u64 pad_to_size; 867 }; 868 __u64 rsvd2; 869 }; 870 871 struct drm_i915_gem_exec_fence { 872 /** 873 * User's handle for a drm_syncobj to wait on or signal. 874 */ 875 __u32 handle; 876 877 #define I915_EXEC_FENCE_WAIT (1<<0) 878 #define I915_EXEC_FENCE_SIGNAL (1<<1) 879 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) 880 __u32 flags; 881 }; 882 883 struct drm_i915_gem_execbuffer2 { 884 /** 885 * List of gem_exec_object2 structs 886 */ 887 __u64 buffers_ptr; 888 __u32 buffer_count; 889 890 /** Offset in the batchbuffer to start execution from. */ 891 __u32 batch_start_offset; 892 /** Bytes used in batchbuffer from batch_start_offset */ 893 __u32 batch_len; 894 __u32 DR1; 895 __u32 DR4; 896 __u32 num_cliprects; 897 /** 898 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY 899 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a 900 * struct drm_i915_gem_exec_fence *fences. 901 */ 902 __u64 cliprects_ptr; 903 #define I915_EXEC_RING_MASK (7<<0) 904 #define I915_EXEC_DEFAULT (0<<0) 905 #define I915_EXEC_RENDER (1<<0) 906 #define I915_EXEC_BSD (2<<0) 907 #define I915_EXEC_BLT (3<<0) 908 #define I915_EXEC_VEBOX (4<<0) 909 910 /* Used for switching the constants addressing mode on gen4+ RENDER ring. 911 * Gen6+ only supports relative addressing to dynamic state (default) and 912 * absolute addressing. 913 * 914 * These flags are ignored for the BSD and BLT rings. 915 */ 916 #define I915_EXEC_CONSTANTS_MASK (3<<6) 917 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 918 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 919 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 920 __u64 flags; 921 __u64 rsvd1; /* now used for context info */ 922 __u64 rsvd2; 923 }; 924 925 /** Resets the SO write offset registers for transform feedback on gen7. */ 926 #define I915_EXEC_GEN7_SOL_RESET (1<<8) 927 928 /** Request a privileged ("secure") batch buffer. Note only available for 929 * DRM_ROOT_ONLY | DRM_MASTER processes. 930 */ 931 #define I915_EXEC_SECURE (1<<9) 932 933 /** Inform the kernel that the batch is and will always be pinned. This 934 * negates the requirement for a workaround to be performed to avoid 935 * an incoherent CS (such as can be found on 830/845). If this flag is 936 * not passed, the kernel will endeavour to make sure the batch is 937 * coherent with the CS before execution. If this flag is passed, 938 * userspace assumes the responsibility for ensuring the same. 939 */ 940 #define I915_EXEC_IS_PINNED (1<<10) 941 942 /** Provide a hint to the kernel that the command stream and auxiliary 943 * state buffers already holds the correct presumed addresses and so the 944 * relocation process may be skipped if no buffers need to be moved in 945 * preparation for the execbuffer. 946 */ 947 #define I915_EXEC_NO_RELOC (1<<11) 948 949 /** Use the reloc.handle as an index into the exec object array rather 950 * than as the per-file handle. 951 */ 952 #define I915_EXEC_HANDLE_LUT (1<<12) 953 954 /** Used for switching BSD rings on the platforms with two BSD rings */ 955 #define I915_EXEC_BSD_SHIFT (13) 956 #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 957 /* default ping-pong mode */ 958 #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 959 #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 960 #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 961 962 /** Tell the kernel that the batchbuffer is processed by 963 * the resource streamer. 964 */ 965 #define I915_EXEC_RESOURCE_STREAMER (1<<15) 966 967 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 968 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 969 * the batch. 970 * 971 * Returns -EINVAL if the sync_file fd cannot be found. 972 */ 973 #define I915_EXEC_FENCE_IN (1<<16) 974 975 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 976 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 977 * to the caller, and it should be close() after use. (The fd is a regular 978 * file descriptor and will be cleaned up on process termination. It holds 979 * a reference to the request, but nothing else.) 980 * 981 * The sync_file fd can be combined with other sync_file and passed either 982 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 983 * will only occur after this request completes), or to other devices. 984 * 985 * Using I915_EXEC_FENCE_OUT requires use of 986 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 987 * back to userspace. Failure to do so will cause the out-fence to always 988 * be reported as zero, and the real fence fd to be leaked. 989 */ 990 #define I915_EXEC_FENCE_OUT (1<<17) 991 992 /* 993 * Traditionally the execbuf ioctl has only considered the final element in 994 * the execobject[] to be the executable batch. Often though, the client 995 * will known the batch object prior to construction and being able to place 996 * it into the execobject[] array first can simplify the relocation tracking. 997 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 998 * execobject[] as the * batch instead (the default is to use the last 999 * element). 1000 */ 1001 #define I915_EXEC_BATCH_FIRST (1<<18) 1002 1003 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 1004 * define an array of i915_gem_exec_fence structures which specify a set of 1005 * dma fences to wait upon or signal. 1006 */ 1007 #define I915_EXEC_FENCE_ARRAY (1<<19) 1008 1009 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) 1010 1011 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1012 #define i915_execbuffer2_set_context_id(eb2, context) \ 1013 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 1014 #define i915_execbuffer2_get_context_id(eb2) \ 1015 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 1016 1017 struct drm_i915_gem_pin { 1018 /** Handle of the buffer to be pinned. */ 1019 __u32 handle; 1020 __u32 pad; 1021 1022 /** alignment required within the aperture */ 1023 __u64 alignment; 1024 1025 /** Returned GTT offset of the buffer. */ 1026 __u64 offset; 1027 }; 1028 1029 struct drm_i915_gem_unpin { 1030 /** Handle of the buffer to be unpinned. */ 1031 __u32 handle; 1032 __u32 pad; 1033 }; 1034 1035 struct drm_i915_gem_busy { 1036 /** Handle of the buffer to check for busy */ 1037 __u32 handle; 1038 1039 /** Return busy status 1040 * 1041 * A return of 0 implies that the object is idle (after 1042 * having flushed any pending activity), and a non-zero return that 1043 * the object is still in-flight on the GPU. (The GPU has not yet 1044 * signaled completion for all pending requests that reference the 1045 * object.) An object is guaranteed to become idle eventually (so 1046 * long as no new GPU commands are executed upon it). Due to the 1047 * asynchronous nature of the hardware, an object reported 1048 * as busy may become idle before the ioctl is completed. 1049 * 1050 * Furthermore, if the object is busy, which engine is busy is only 1051 * provided as a guide. There are race conditions which prevent the 1052 * report of which engines are busy from being always accurate. 1053 * However, the converse is not true. If the object is idle, the 1054 * result of the ioctl, that all engines are idle, is accurate. 1055 * 1056 * The returned dword is split into two fields to indicate both 1057 * the engines on which the object is being read, and the 1058 * engine on which it is currently being written (if any). 1059 * 1060 * The low word (bits 0:15) indicate if the object is being written 1061 * to by any engine (there can only be one, as the GEM implicit 1062 * synchronisation rules force writes to be serialised). Only the 1063 * engine for the last write is reported. 1064 * 1065 * The high word (bits 16:31) are a bitmask of which engines are 1066 * currently reading from the object. Multiple engines may be 1067 * reading from the object simultaneously. 1068 * 1069 * The value of each engine is the same as specified in the 1070 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc. 1071 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to 1072 * the I915_EXEC_RENDER engine for execution, and so it is never 1073 * reported as active itself. Some hardware may have parallel 1074 * execution engines, e.g. multiple media engines, which are 1075 * mapped to the same identifier in the EXECBUFFER2 ioctl and 1076 * so are not separately reported for busyness. 1077 * 1078 * Caveat emptor: 1079 * Only the boolean result of this query is reliable; that is whether 1080 * the object is idle or busy. The report of which engines are busy 1081 * should be only used as a heuristic. 1082 */ 1083 __u32 busy; 1084 }; 1085 1086 /** 1087 * I915_CACHING_NONE 1088 * 1089 * GPU access is not coherent with cpu caches. Default for machines without an 1090 * LLC. 1091 */ 1092 #define I915_CACHING_NONE 0 1093 /** 1094 * I915_CACHING_CACHED 1095 * 1096 * GPU access is coherent with cpu caches and furthermore the data is cached in 1097 * last-level caches shared between cpu cores and the gpu GT. Default on 1098 * machines with HAS_LLC. 1099 */ 1100 #define I915_CACHING_CACHED 1 1101 /** 1102 * I915_CACHING_DISPLAY 1103 * 1104 * Special GPU caching mode which is coherent with the scanout engines. 1105 * Transparently falls back to I915_CACHING_NONE on platforms where no special 1106 * cache mode (like write-through or gfdt flushing) is available. The kernel 1107 * automatically sets this mode when using a buffer as a scanout target. 1108 * Userspace can manually set this mode to avoid a costly stall and clflush in 1109 * the hotpath of drawing the first frame. 1110 */ 1111 #define I915_CACHING_DISPLAY 2 1112 1113 struct drm_i915_gem_caching { 1114 /** 1115 * Handle of the buffer to set/get the caching level of. */ 1116 __u32 handle; 1117 1118 /** 1119 * Cacheing level to apply or return value 1120 * 1121 * bits0-15 are for generic caching control (i.e. the above defined 1122 * values). bits16-31 are reserved for platform-specific variations 1123 * (e.g. l3$ caching on gen7). */ 1124 __u32 caching; 1125 }; 1126 1127 #define I915_TILING_NONE 0 1128 #define I915_TILING_X 1 1129 #define I915_TILING_Y 2 1130 #define I915_TILING_LAST I915_TILING_Y 1131 1132 #define I915_BIT_6_SWIZZLE_NONE 0 1133 #define I915_BIT_6_SWIZZLE_9 1 1134 #define I915_BIT_6_SWIZZLE_9_10 2 1135 #define I915_BIT_6_SWIZZLE_9_11 3 1136 #define I915_BIT_6_SWIZZLE_9_10_11 4 1137 /* Not seen by userland */ 1138 #define I915_BIT_6_SWIZZLE_UNKNOWN 5 1139 /* Seen by userland. */ 1140 #define I915_BIT_6_SWIZZLE_9_17 6 1141 #define I915_BIT_6_SWIZZLE_9_10_17 7 1142 1143 struct drm_i915_gem_set_tiling { 1144 /** Handle of the buffer to have its tiling state updated */ 1145 __u32 handle; 1146 1147 /** 1148 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1149 * I915_TILING_Y). 1150 * 1151 * This value is to be set on request, and will be updated by the 1152 * kernel on successful return with the actual chosen tiling layout. 1153 * 1154 * The tiling mode may be demoted to I915_TILING_NONE when the system 1155 * has bit 6 swizzling that can't be managed correctly by GEM. 1156 * 1157 * Buffer contents become undefined when changing tiling_mode. 1158 */ 1159 __u32 tiling_mode; 1160 1161 /** 1162 * Stride in bytes for the object when in I915_TILING_X or 1163 * I915_TILING_Y. 1164 */ 1165 __u32 stride; 1166 1167 /** 1168 * Returned address bit 6 swizzling required for CPU access through 1169 * mmap mapping. 1170 */ 1171 __u32 swizzle_mode; 1172 }; 1173 1174 struct drm_i915_gem_get_tiling { 1175 /** Handle of the buffer to get tiling state for. */ 1176 __u32 handle; 1177 1178 /** 1179 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1180 * I915_TILING_Y). 1181 */ 1182 __u32 tiling_mode; 1183 1184 /** 1185 * Returned address bit 6 swizzling required for CPU access through 1186 * mmap mapping. 1187 */ 1188 __u32 swizzle_mode; 1189 1190 /** 1191 * Returned address bit 6 swizzling required for CPU access through 1192 * mmap mapping whilst bound. 1193 */ 1194 __u32 phys_swizzle_mode; 1195 }; 1196 1197 struct drm_i915_gem_get_aperture { 1198 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1199 __u64 aper_size; 1200 1201 /** 1202 * Available space in the aperture used by i915_gem_execbuffer, in 1203 * bytes 1204 */ 1205 __u64 aper_available_size; 1206 }; 1207 1208 struct drm_i915_get_pipe_from_crtc_id { 1209 /** ID of CRTC being requested **/ 1210 __u32 crtc_id; 1211 1212 /** pipe of requested CRTC **/ 1213 __u32 pipe; 1214 }; 1215 1216 #define I915_MADV_WILLNEED 0 1217 #define I915_MADV_DONTNEED 1 1218 #define __I915_MADV_PURGED 2 /* internal state */ 1219 1220 struct drm_i915_gem_madvise { 1221 /** Handle of the buffer to change the backing store advice */ 1222 __u32 handle; 1223 1224 /* Advice: either the buffer will be needed again in the near future, 1225 * or wont be and could be discarded under memory pressure. 1226 */ 1227 __u32 madv; 1228 1229 /** Whether the backing store still exists. */ 1230 __u32 retained; 1231 }; 1232 1233 /* flags */ 1234 #define I915_OVERLAY_TYPE_MASK 0xff 1235 #define I915_OVERLAY_YUV_PLANAR 0x01 1236 #define I915_OVERLAY_YUV_PACKED 0x02 1237 #define I915_OVERLAY_RGB 0x03 1238 1239 #define I915_OVERLAY_DEPTH_MASK 0xff00 1240 #define I915_OVERLAY_RGB24 0x1000 1241 #define I915_OVERLAY_RGB16 0x2000 1242 #define I915_OVERLAY_RGB15 0x3000 1243 #define I915_OVERLAY_YUV422 0x0100 1244 #define I915_OVERLAY_YUV411 0x0200 1245 #define I915_OVERLAY_YUV420 0x0300 1246 #define I915_OVERLAY_YUV410 0x0400 1247 1248 #define I915_OVERLAY_SWAP_MASK 0xff0000 1249 #define I915_OVERLAY_NO_SWAP 0x000000 1250 #define I915_OVERLAY_UV_SWAP 0x010000 1251 #define I915_OVERLAY_Y_SWAP 0x020000 1252 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1253 1254 #define I915_OVERLAY_FLAGS_MASK 0xff000000 1255 #define I915_OVERLAY_ENABLE 0x01000000 1256 1257 struct drm_intel_overlay_put_image { 1258 /* various flags and src format description */ 1259 __u32 flags; 1260 /* source picture description */ 1261 __u32 bo_handle; 1262 /* stride values and offsets are in bytes, buffer relative */ 1263 __u16 stride_Y; /* stride for packed formats */ 1264 __u16 stride_UV; 1265 __u32 offset_Y; /* offset for packet formats */ 1266 __u32 offset_U; 1267 __u32 offset_V; 1268 /* in pixels */ 1269 __u16 src_width; 1270 __u16 src_height; 1271 /* to compensate the scaling factors for partially covered surfaces */ 1272 __u16 src_scan_width; 1273 __u16 src_scan_height; 1274 /* output crtc description */ 1275 __u32 crtc_id; 1276 __u16 dst_x; 1277 __u16 dst_y; 1278 __u16 dst_width; 1279 __u16 dst_height; 1280 }; 1281 1282 /* flags */ 1283 #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1284 #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1285 #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 1286 struct drm_intel_overlay_attrs { 1287 __u32 flags; 1288 __u32 color_key; 1289 __s32 brightness; 1290 __u32 contrast; 1291 __u32 saturation; 1292 __u32 gamma0; 1293 __u32 gamma1; 1294 __u32 gamma2; 1295 __u32 gamma3; 1296 __u32 gamma4; 1297 __u32 gamma5; 1298 }; 1299 1300 /* 1301 * Intel sprite handling 1302 * 1303 * Color keying works with a min/mask/max tuple. Both source and destination 1304 * color keying is allowed. 1305 * 1306 * Source keying: 1307 * Sprite pixels within the min & max values, masked against the color channels 1308 * specified in the mask field, will be transparent. All other pixels will 1309 * be displayed on top of the primary plane. For RGB surfaces, only the min 1310 * and mask fields will be used; ranged compares are not allowed. 1311 * 1312 * Destination keying: 1313 * Primary plane pixels that match the min value, masked against the color 1314 * channels specified in the mask field, will be replaced by corresponding 1315 * pixels from the sprite plane. 1316 * 1317 * Note that source & destination keying are exclusive; only one can be 1318 * active on a given plane. 1319 */ 1320 1321 #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 1322 #define I915_SET_COLORKEY_DESTINATION (1<<1) 1323 #define I915_SET_COLORKEY_SOURCE (1<<2) 1324 struct drm_intel_sprite_colorkey { 1325 __u32 plane_id; 1326 __u32 min_value; 1327 __u32 channel_mask; 1328 __u32 max_value; 1329 __u32 flags; 1330 }; 1331 1332 struct drm_i915_gem_wait { 1333 /** Handle of BO we shall wait on */ 1334 __u32 bo_handle; 1335 __u32 flags; 1336 /** Number of nanoseconds to wait, Returns time remaining. */ 1337 __s64 timeout_ns; 1338 }; 1339 1340 struct drm_i915_gem_context_create { 1341 /* output: id of new context*/ 1342 __u32 ctx_id; 1343 __u32 pad; 1344 }; 1345 1346 struct drm_i915_gem_context_destroy { 1347 __u32 ctx_id; 1348 __u32 pad; 1349 }; 1350 1351 struct drm_i915_reg_read { 1352 /* 1353 * Register offset. 1354 * For 64bit wide registers where the upper 32bits don't immediately 1355 * follow the lower 32bits, the offset of the lower 32bits must 1356 * be specified 1357 */ 1358 __u64 offset; 1359 #define I915_REG_READ_8B_WA (1ul << 0) 1360 1361 __u64 val; /* Return value */ 1362 }; 1363 /* Known registers: 1364 * 1365 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1366 * - Note this register returns an invalid value if using the default 1367 * single instruction 8byte read, in order to workaround that pass 1368 * flag I915_REG_READ_8B_WA in offset field. 1369 * 1370 */ 1371 1372 struct drm_i915_reset_stats { 1373 __u32 ctx_id; 1374 __u32 flags; 1375 1376 /* All resets since boot/module reload, for all contexts */ 1377 __u32 reset_count; 1378 1379 /* Number of batches lost when active in GPU, for this context */ 1380 __u32 batch_active; 1381 1382 /* Number of batches lost pending for execution, for this context */ 1383 __u32 batch_pending; 1384 1385 __u32 pad; 1386 }; 1387 1388 struct drm_i915_gem_userptr { 1389 __u64 user_ptr; 1390 __u64 user_size; 1391 __u32 flags; 1392 #define I915_USERPTR_READ_ONLY 0x1 1393 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 1394 /** 1395 * Returned handle for the object. 1396 * 1397 * Object handles are nonzero. 1398 */ 1399 __u32 handle; 1400 }; 1401 1402 struct drm_i915_gem_context_param { 1403 __u32 ctx_id; 1404 __u32 size; 1405 __u64 param; 1406 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1407 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1408 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1409 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1410 #define I915_CONTEXT_PARAM_BANNABLE 0x5 1411 #define I915_CONTEXT_PARAM_PRIORITY 0x6 1412 #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1413 #define I915_CONTEXT_DEFAULT_PRIORITY 0 1414 #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1415 __u64 value; 1416 }; 1417 1418 enum drm_i915_oa_format { 1419 I915_OA_FORMAT_A13 = 1, /* HSW only */ 1420 I915_OA_FORMAT_A29, /* HSW only */ 1421 I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 1422 I915_OA_FORMAT_B4_C8, /* HSW only */ 1423 I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 1424 I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 1425 I915_OA_FORMAT_C4_B8, /* HSW+ */ 1426 1427 /* Gen8+ */ 1428 I915_OA_FORMAT_A12, 1429 I915_OA_FORMAT_A12_B8_C8, 1430 I915_OA_FORMAT_A32u40_A4u32_B8_C8, 1431 1432 I915_OA_FORMAT_MAX /* non-ABI */ 1433 }; 1434 1435 enum drm_i915_perf_property_id { 1436 /** 1437 * Open the stream for a specific context handle (as used with 1438 * execbuffer2). A stream opened for a specific context this way 1439 * won't typically require root privileges. 1440 */ 1441 DRM_I915_PERF_PROP_CTX_HANDLE = 1, 1442 1443 /** 1444 * A value of 1 requests the inclusion of raw OA unit reports as 1445 * part of stream samples. 1446 */ 1447 DRM_I915_PERF_PROP_SAMPLE_OA, 1448 1449 /** 1450 * The value specifies which set of OA unit metrics should be 1451 * be configured, defining the contents of any OA unit reports. 1452 */ 1453 DRM_I915_PERF_PROP_OA_METRICS_SET, 1454 1455 /** 1456 * The value specifies the size and layout of OA unit reports. 1457 */ 1458 DRM_I915_PERF_PROP_OA_FORMAT, 1459 1460 /** 1461 * Specifying this property implicitly requests periodic OA unit 1462 * sampling and (at least on Haswell) the sampling frequency is derived 1463 * from this exponent as follows: 1464 * 1465 * 80ns * 2^(period_exponent + 1) 1466 */ 1467 DRM_I915_PERF_PROP_OA_EXPONENT, 1468 1469 DRM_I915_PERF_PROP_MAX /* non-ABI */ 1470 }; 1471 1472 struct drm_i915_perf_open_param { 1473 __u32 flags; 1474 #define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 1475 #define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 1476 #define I915_PERF_FLAG_DISABLED (1<<2) 1477 1478 /** The number of u64 (id, value) pairs */ 1479 __u32 num_properties; 1480 1481 /** 1482 * Pointer to array of u64 (id, value) pairs configuring the stream 1483 * to open. 1484 */ 1485 __u64 properties_ptr; 1486 }; 1487 1488 /** 1489 * Enable data capture for a stream that was either opened in a disabled state 1490 * via I915_PERF_FLAG_DISABLED or was later disabled via 1491 * I915_PERF_IOCTL_DISABLE. 1492 * 1493 * It is intended to be cheaper to disable and enable a stream than it may be 1494 * to close and re-open a stream with the same configuration. 1495 * 1496 * It's undefined whether any pending data for the stream will be lost. 1497 */ 1498 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 1499 1500 /** 1501 * Disable data capture for a stream. 1502 * 1503 * It is an error to try and read a stream that is disabled. 1504 */ 1505 #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 1506 1507 /** 1508 * Common to all i915 perf records 1509 */ 1510 struct drm_i915_perf_record_header { 1511 __u32 type; 1512 __u16 pad; 1513 __u16 size; 1514 }; 1515 1516 enum drm_i915_perf_record_type { 1517 1518 /** 1519 * Samples are the work horse record type whose contents are extensible 1520 * and defined when opening an i915 perf stream based on the given 1521 * properties. 1522 * 1523 * Boolean properties following the naming convention 1524 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 1525 * every sample. 1526 * 1527 * The order of these sample properties given by userspace has no 1528 * affect on the ordering of data within a sample. The order is 1529 * documented here. 1530 * 1531 * struct { 1532 * struct drm_i915_perf_record_header header; 1533 * 1534 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 1535 * }; 1536 */ 1537 DRM_I915_PERF_RECORD_SAMPLE = 1, 1538 1539 /* 1540 * Indicates that one or more OA reports were not written by the 1541 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 1542 * command collides with periodic sampling - which would be more likely 1543 * at higher sampling frequencies. 1544 */ 1545 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 1546 1547 /** 1548 * An error occurred that resulted in all pending OA reports being lost. 1549 */ 1550 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 1551 1552 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 1553 }; 1554 1555 /** 1556 * Structure to upload perf dynamic configuration into the kernel. 1557 */ 1558 struct drm_i915_perf_oa_config { 1559 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ 1560 char uuid[36]; 1561 1562 __u32 n_mux_regs; 1563 __u32 n_boolean_regs; 1564 __u32 n_flex_regs; 1565 1566 /* 1567 * These fields are pointers to tuples of u32 values (register 1568 * address, value). For example the expected length of the buffer 1569 * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). 1570 */ 1571 __u64 mux_regs_ptr; 1572 __u64 boolean_regs_ptr; 1573 __u64 flex_regs_ptr; 1574 }; 1575 1576 #if defined(__cplusplus) 1577 } 1578 #endif 1579 1580 #endif /* _I915_DRM_H_ */ 1581