• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright 2014 Advanced Micro Devices, Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   */
23  
24  /**
25   * \file amdgpu.h
26   *
27   * Declare public libdrm_amdgpu API
28   *
29   * This file define API exposed by libdrm_amdgpu library.
30   * User wanted to use libdrm_amdgpu functionality must include
31   * this file.
32   *
33   */
34  #ifndef _AMDGPU_H_
35  #define _AMDGPU_H_
36  
37  #include <stdint.h>
38  #include <stdbool.h>
39  
40  struct drm_amdgpu_info_hw_ip;
41  
42  /*--------------------------------------------------------------------------*/
43  /* --------------------------- Defines ------------------------------------ */
44  /*--------------------------------------------------------------------------*/
45  
46  /**
47   * Define max. number of Command Buffers (IB) which could be sent to the single
48   * hardware IP to accommodate CE/DE requirements
49   *
50   * \sa amdgpu_cs_ib_info
51  */
52  #define AMDGPU_CS_MAX_IBS_PER_SUBMIT		4
53  
54  /**
55   * Special timeout value meaning that the timeout is infinite.
56   */
57  #define AMDGPU_TIMEOUT_INFINITE			0xffffffffffffffffull
58  
59  /**
60   * Used in amdgpu_cs_query_fence_status(), meaning that the given timeout
61   * is absolute.
62   */
63  #define AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE     (1 << 0)
64  
65  /*--------------------------------------------------------------------------*/
66  /* ----------------------------- Enums ------------------------------------ */
67  /*--------------------------------------------------------------------------*/
68  
69  /**
70   * Enum describing possible handle types
71   *
72   * \sa amdgpu_bo_import, amdgpu_bo_export
73   *
74  */
75  enum amdgpu_bo_handle_type {
76  	/** GEM flink name (needs DRM authentication, used by DRI2) */
77  	amdgpu_bo_handle_type_gem_flink_name = 0,
78  
79  	/** KMS handle which is used by all driver ioctls */
80  	amdgpu_bo_handle_type_kms = 1,
81  
82  	/** DMA-buf fd handle */
83  	amdgpu_bo_handle_type_dma_buf_fd = 2
84  };
85  
86  /** Define known types of GPU VM VA ranges */
87  enum amdgpu_gpu_va_range
88  {
89  	/** Allocate from "normal"/general range */
90  	amdgpu_gpu_va_range_general = 0
91  };
92  
93  /*--------------------------------------------------------------------------*/
94  /* -------------------------- Datatypes ----------------------------------- */
95  /*--------------------------------------------------------------------------*/
96  
97  /**
98   * Define opaque pointer to context associated with fd.
99   * This context will be returned as the result of
100   * "initialize" function and should be pass as the first
101   * parameter to any API call
102   */
103  typedef struct amdgpu_device *amdgpu_device_handle;
104  
105  /**
106   * Define GPU Context type as pointer to opaque structure
107   * Example of GPU Context is the "rendering" context associated
108   * with OpenGL context (glCreateContext)
109   */
110  typedef struct amdgpu_context *amdgpu_context_handle;
111  
112  /**
113   * Define handle for amdgpu resources: buffer, GDS, etc.
114   */
115  typedef struct amdgpu_bo *amdgpu_bo_handle;
116  
117  /**
118   * Define handle for list of BOs
119   */
120  typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
121  
122  /**
123   * Define handle to be used to work with VA allocated ranges
124   */
125  typedef struct amdgpu_va *amdgpu_va_handle;
126  
127  /*--------------------------------------------------------------------------*/
128  /* -------------------------- Structures ---------------------------------- */
129  /*--------------------------------------------------------------------------*/
130  
131  /**
132   * Structure describing memory allocation request
133   *
134   * \sa amdgpu_bo_alloc()
135   *
136  */
137  struct amdgpu_bo_alloc_request {
138  	/** Allocation request. It must be aligned correctly. */
139  	uint64_t alloc_size;
140  
141  	/**
142  	 * It may be required to have some specific alignment requirements
143  	 * for physical back-up storage (e.g. for displayable surface).
144  	 * If 0 there is no special alignment requirement
145  	 */
146  	uint64_t phys_alignment;
147  
148  	/**
149  	 * UMD should specify where to allocate memory and how it
150  	 * will be accessed by the CPU.
151  	 */
152  	uint32_t preferred_heap;
153  
154  	/** Additional flags passed on allocation */
155  	uint64_t flags;
156  };
157  
158  /**
159   * Special UMD specific information associated with buffer.
160   *
161   * It may be need to pass some buffer charactersitic as part
162   * of buffer sharing. Such information are defined UMD and
163   * opaque for libdrm_amdgpu as well for kernel driver.
164   *
165   * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info,
166   *     amdgpu_bo_import(), amdgpu_bo_export
167   *
168  */
169  struct amdgpu_bo_metadata {
170  	/** Special flag associated with surface */
171  	uint64_t flags;
172  
173  	/**
174  	 * ASIC-specific tiling information (also used by DCE).
175  	 * The encoding is defined by the AMDGPU_TILING_* definitions.
176  	 */
177  	uint64_t tiling_info;
178  
179  	/** Size of metadata associated with the buffer, in bytes. */
180  	uint32_t size_metadata;
181  
182  	/** UMD specific metadata. Opaque for kernel */
183  	uint32_t umd_metadata[64];
184  };
185  
186  /**
187   * Structure describing allocated buffer. Client may need
188   * to query such information as part of 'sharing' buffers mechanism
189   *
190   * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(),
191   *     amdgpu_bo_import(), amdgpu_bo_export()
192  */
193  struct amdgpu_bo_info {
194  	/** Allocated memory size */
195  	uint64_t alloc_size;
196  
197  	/**
198  	 * It may be required to have some specific alignment requirements
199  	 * for physical back-up storage.
200  	 */
201  	uint64_t phys_alignment;
202  
203  	/** Heap where to allocate memory. */
204  	uint32_t preferred_heap;
205  
206  	/** Additional allocation flags. */
207  	uint64_t alloc_flags;
208  
209  	/** Metadata associated with buffer if any. */
210  	struct amdgpu_bo_metadata metadata;
211  };
212  
213  /**
214   * Structure with information about "imported" buffer
215   *
216   * \sa amdgpu_bo_import()
217   *
218   */
219  struct amdgpu_bo_import_result {
220  	/** Handle of memory/buffer to use */
221  	amdgpu_bo_handle buf_handle;
222  
223  	 /** Buffer size */
224  	uint64_t alloc_size;
225  };
226  
227  /**
228   *
229   * Structure to describe GDS partitioning information.
230   * \note OA and GWS resources are asscoiated with GDS partition
231   *
232   * \sa amdgpu_gpu_resource_query_gds_info
233   *
234  */
235  struct amdgpu_gds_resource_info {
236  	uint32_t gds_gfx_partition_size;
237  	uint32_t compute_partition_size;
238  	uint32_t gds_total_size;
239  	uint32_t gws_per_gfx_partition;
240  	uint32_t gws_per_compute_partition;
241  	uint32_t oa_per_gfx_partition;
242  	uint32_t oa_per_compute_partition;
243  };
244  
245  /**
246   * Structure describing CS fence
247   *
248   * \sa amdgpu_cs_query_fence_status(), amdgpu_cs_request, amdgpu_cs_submit()
249   *
250  */
251  struct amdgpu_cs_fence {
252  
253  	/** In which context IB was sent to execution */
254  	amdgpu_context_handle context;
255  
256  	/** To which HW IP type the fence belongs */
257  	uint32_t ip_type;
258  
259  	/** IP instance index if there are several IPs of the same type. */
260  	uint32_t ip_instance;
261  
262  	/** Ring index of the HW IP */
263  	uint32_t ring;
264  
265  	/** Specify fence for which we need to check submission status.*/
266  	uint64_t fence;
267  };
268  
269  /**
270   * Structure describing IB
271   *
272   * \sa amdgpu_cs_request, amdgpu_cs_submit()
273   *
274  */
275  struct amdgpu_cs_ib_info {
276  	/** Special flags */
277  	uint64_t flags;
278  
279  	/** Virtual MC address of the command buffer */
280  	uint64_t ib_mc_address;
281  
282  	/**
283  	 * Size of Command Buffer to be submitted.
284  	 *   - The size is in units of dwords (4 bytes).
285  	 *   - Could be 0
286  	 */
287  	uint32_t size;
288  };
289  
290  /**
291   * Structure describing fence information
292   *
293   * \sa amdgpu_cs_request, amdgpu_cs_query_fence,
294   *     amdgpu_cs_submit(), amdgpu_cs_query_fence_status()
295  */
296  struct amdgpu_cs_fence_info {
297  	/** buffer object for the fence */
298  	amdgpu_bo_handle handle;
299  
300  	/** fence offset in the unit of sizeof(uint64_t) */
301  	uint64_t offset;
302  };
303  
304  /**
305   * Structure describing submission request
306   *
307   * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx
308   *
309   * \sa amdgpu_cs_submit()
310  */
311  struct amdgpu_cs_request {
312  	/** Specify flags with additional information */
313  	uint64_t flags;
314  
315  	/** Specify HW IP block type to which to send the IB. */
316  	unsigned ip_type;
317  
318  	/** IP instance index if there are several IPs of the same type. */
319  	unsigned ip_instance;
320  
321  	/**
322  	 * Specify ring index of the IP. We could have several rings
323  	 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
324  	 */
325  	uint32_t ring;
326  
327  	/**
328  	 * List handle with resources used by this request.
329  	 */
330  	amdgpu_bo_list_handle resources;
331  
332  	/**
333  	 * Number of dependencies this Command submission needs to
334  	 * wait for before starting execution.
335  	 */
336  	uint32_t number_of_dependencies;
337  
338  	/**
339  	 * Array of dependencies which need to be met before
340  	 * execution can start.
341  	 */
342  	struct amdgpu_cs_fence *dependencies;
343  
344  	/** Number of IBs to submit in the field ibs. */
345  	uint32_t number_of_ibs;
346  
347  	/**
348  	 * IBs to submit. Those IBs will be submit together as single entity
349  	 */
350  	struct amdgpu_cs_ib_info *ibs;
351  
352  	/**
353  	 * The returned sequence number for the command submission
354  	 */
355  	uint64_t seq_no;
356  
357  	/**
358  	 * The fence information
359  	 */
360  	struct amdgpu_cs_fence_info fence_info;
361  };
362  
363  /**
364   * Structure which provide information about GPU VM MC Address space
365   * alignments requirements
366   *
367   * \sa amdgpu_query_buffer_size_alignment
368   */
369  struct amdgpu_buffer_size_alignments {
370  	/** Size alignment requirement for allocation in
371  	 * local memory */
372  	uint64_t size_local;
373  
374  	/**
375  	 * Size alignment requirement for allocation in remote memory
376  	 */
377  	uint64_t size_remote;
378  };
379  
380  /**
381   * Structure which provide information about heap
382   *
383   * \sa amdgpu_query_heap_info()
384   *
385   */
386  struct amdgpu_heap_info {
387  	/** Theoretical max. available memory in the given heap */
388  	uint64_t heap_size;
389  
390  	/**
391  	 * Number of bytes allocated in the heap. This includes all processes
392  	 * and private allocations in the kernel. It changes when new buffers
393  	 * are allocated, freed, and moved. It cannot be larger than
394  	 * heap_size.
395  	 */
396  	uint64_t heap_usage;
397  
398  	/**
399  	 * Theoretical possible max. size of buffer which
400  	 * could be allocated in the given heap
401  	 */
402  	uint64_t max_allocation;
403  };
404  
405  /**
406   * Describe GPU h/w info needed for UMD correct initialization
407   *
408   * \sa amdgpu_query_gpu_info()
409  */
410  struct amdgpu_gpu_info {
411  	/** Asic id */
412  	uint32_t asic_id;
413  	/** Chip revision */
414  	uint32_t chip_rev;
415  	/** Chip external revision */
416  	uint32_t chip_external_rev;
417  	/** Family ID */
418  	uint32_t family_id;
419  	/** Special flags */
420  	uint64_t ids_flags;
421  	/** max engine clock*/
422  	uint64_t max_engine_clk;
423  	/** max memory clock */
424  	uint64_t max_memory_clk;
425  	/** number of shader engines */
426  	uint32_t num_shader_engines;
427  	/** number of shader arrays per engine */
428  	uint32_t num_shader_arrays_per_engine;
429  	/**  Number of available good shader pipes */
430  	uint32_t avail_quad_shader_pipes;
431  	/**  Max. number of shader pipes.(including good and bad pipes  */
432  	uint32_t max_quad_shader_pipes;
433  	/** Number of parameter cache entries per shader quad pipe */
434  	uint32_t cache_entries_per_quad_pipe;
435  	/**  Number of available graphics context */
436  	uint32_t num_hw_gfx_contexts;
437  	/** Number of render backend pipes */
438  	uint32_t rb_pipes;
439  	/**  Enabled render backend pipe mask */
440  	uint32_t enabled_rb_pipes_mask;
441  	/** Frequency of GPU Counter */
442  	uint32_t gpu_counter_freq;
443  	/** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */
444  	uint32_t backend_disable[4];
445  	/** Value of MC_ARB_RAMCFG register*/
446  	uint32_t mc_arb_ramcfg;
447  	/** Value of GB_ADDR_CONFIG */
448  	uint32_t gb_addr_cfg;
449  	/** Values of the GB_TILE_MODE0..31 registers */
450  	uint32_t gb_tile_mode[32];
451  	/** Values of GB_MACROTILE_MODE0..15 registers */
452  	uint32_t gb_macro_tile_mode[16];
453  	/** Value of PA_SC_RASTER_CONFIG register per SE */
454  	uint32_t pa_sc_raster_cfg[4];
455  	/** Value of PA_SC_RASTER_CONFIG_1 register per SE */
456  	uint32_t pa_sc_raster_cfg1[4];
457  	/* CU info */
458  	uint32_t cu_active_number;
459  	uint32_t cu_ao_mask;
460  	uint32_t cu_bitmap[4][4];
461  	/* video memory type info*/
462  	uint32_t vram_type;
463  	/* video memory bit width*/
464  	uint32_t vram_bit_width;
465  	/** constant engine ram size*/
466  	uint32_t ce_ram_size;
467  	/* vce harvesting instance */
468  	uint32_t vce_harvest_config;
469  	/* PCI revision ID */
470  	uint32_t pci_rev_id;
471  };
472  
473  
474  /*--------------------------------------------------------------------------*/
475  /*------------------------- Functions --------------------------------------*/
476  /*--------------------------------------------------------------------------*/
477  
478  /*
479   * Initialization / Cleanup
480   *
481  */
482  
483  /**
484   *
485   * \param   fd            - \c [in]  File descriptor for AMD GPU device
486   *                                   received previously as the result of
487   *                                   e.g. drmOpen() call.
488   *                                   For legacy fd type, the DRI2/DRI3
489   *                                   authentication should be done before
490   *                                   calling this function.
491   * \param   major_version - \c [out] Major version of library. It is assumed
492   *                                   that adding new functionality will cause
493   *                                   increase in major version
494   * \param   minor_version - \c [out] Minor version of library
495   * \param   device_handle - \c [out] Pointer to opaque context which should
496   *                                   be passed as the first parameter on each
497   *                                   API call
498   *
499   *
500   * \return   0 on success\n
501   *          <0 - Negative POSIX Error code
502   *
503   *
504   * \sa amdgpu_device_deinitialize()
505  */
506  int amdgpu_device_initialize(int fd,
507  			     uint32_t *major_version,
508  			     uint32_t *minor_version,
509  			     amdgpu_device_handle *device_handle);
510  
511  /**
512   *
513   * When access to such library does not needed any more the special
514   * function must be call giving opportunity to clean up any
515   * resources if needed.
516   *
517   * \param   device_handle - \c [in]  Context associated with file
518   *                                   descriptor for AMD GPU device
519   *                                   received previously as the
520   *                                   result e.g. of drmOpen() call.
521   *
522   * \return  0 on success\n
523   *         <0 - Negative POSIX Error code
524   *
525   * \sa amdgpu_device_initialize()
526   *
527  */
528  int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
529  
530  /*
531   * Memory Management
532   *
533  */
534  
535  /**
536   * Allocate memory to be used by UMD for GPU related operations
537   *
538   * \param   dev		 - \c [in] Device handle.
539   *				   See #amdgpu_device_initialize()
540   * \param   alloc_buffer - \c [in] Pointer to the structure describing an
541   *				   allocation request
542   * \param   buf_handle	- \c [out] Allocated buffer handle
543   *
544   * \return   0 on success\n
545   *          <0 - Negative POSIX Error code
546   *
547   * \sa amdgpu_bo_free()
548  */
549  int amdgpu_bo_alloc(amdgpu_device_handle dev,
550  		    struct amdgpu_bo_alloc_request *alloc_buffer,
551  		    amdgpu_bo_handle *buf_handle);
552  
553  /**
554   * Associate opaque data with buffer to be queried by another UMD
555   *
556   * \param   dev	       - \c [in] Device handle. See #amdgpu_device_initialize()
557   * \param   buf_handle - \c [in] Buffer handle
558   * \param   info       - \c [in] Metadata to associated with buffer
559   *
560   * \return   0 on success\n
561   *          <0 - Negative POSIX Error code
562  */
563  int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle,
564  			   struct amdgpu_bo_metadata *info);
565  
566  /**
567   * Query buffer information including metadata previusly associated with
568   * buffer.
569   *
570   * \param   dev	       - \c [in] Device handle.
571   *				 See #amdgpu_device_initialize()
572   * \param   buf_handle - \c [in]   Buffer handle
573   * \param   info       - \c [out]  Structure describing buffer
574   *
575   * \return   0 on success\n
576   *          <0 - Negative POSIX Error code
577   *
578   * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
579  */
580  int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle,
581  			 struct amdgpu_bo_info *info);
582  
583  /**
584   * Allow others to get access to buffer
585   *
586   * \param   dev		  - \c [in] Device handle.
587   *				    See #amdgpu_device_initialize()
588   * \param   buf_handle    - \c [in] Buffer handle
589   * \param   type          - \c [in] Type of handle requested
590   * \param   shared_handle - \c [out] Special "shared" handle
591   *
592   * \return   0 on success\n
593   *          <0 - Negative POSIX Error code
594   *
595   * \sa amdgpu_bo_import()
596   *
597  */
598  int amdgpu_bo_export(amdgpu_bo_handle buf_handle,
599  		     enum amdgpu_bo_handle_type type,
600  		     uint32_t *shared_handle);
601  
602  /**
603   * Request access to "shared" buffer
604   *
605   * \param   dev		  - \c [in] Device handle.
606   *				    See #amdgpu_device_initialize()
607   * \param   type	  - \c [in] Type of handle requested
608   * \param   shared_handle - \c [in] Shared handle received as result "import"
609   *				     operation
610   * \param   output        - \c [out] Pointer to structure with information
611   *				     about imported buffer
612   *
613   * \return   0 on success\n
614   *          <0 - Negative POSIX Error code
615   *
616   * \note  Buffer must be "imported" only using new "fd" (different from
617   *	  one used by "exporter").
618   *
619   * \sa amdgpu_bo_export()
620   *
621  */
622  int amdgpu_bo_import(amdgpu_device_handle dev,
623  		     enum amdgpu_bo_handle_type type,
624  		     uint32_t shared_handle,
625  		     struct amdgpu_bo_import_result *output);
626  
627  /**
628   * Request GPU access to user allocated memory e.g. via "malloc"
629   *
630   * \param dev - [in] Device handle. See #amdgpu_device_initialize()
631   * \param cpu - [in] CPU address of user allocated memory which we
632   * want to map to GPU address space (make GPU accessible)
633   * (This address must be correctly aligned).
634   * \param size - [in] Size of allocation (must be correctly aligned)
635   * \param buf_handle - [out] Buffer handle for the userptr memory
636   * resource on submission and be used in other operations.
637   *
638   *
639   * \return   0 on success\n
640   *          <0 - Negative POSIX Error code
641   *
642   * \note
643   * This call doesn't guarantee that such memory will be persistently
644   * "locked" / make non-pageable. The purpose of this call is to provide
645   * opportunity for GPU get access to this resource during submission.
646   *
647   * The maximum amount of memory which could be mapped in this call depends
648   * if overcommit is disabled or not. If overcommit is disabled than the max.
649   * amount of memory to be pinned will be limited by left "free" size in total
650   * amount of memory which could be locked simultaneously ("GART" size).
651   *
652   * Supported (theoretical) max. size of mapping is restricted only by
653   * "GART" size.
654   *
655   * It is responsibility of caller to correctly specify access rights
656   * on VA assignment.
657  */
658  int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
659  				    void *cpu, uint64_t size,
660  				    amdgpu_bo_handle *buf_handle);
661  
662  /**
663   * Free previosuly allocated memory
664   *
665   * \param   dev	       - \c [in] Device handle. See #amdgpu_device_initialize()
666   * \param   buf_handle - \c [in]  Buffer handle to free
667   *
668   * \return   0 on success\n
669   *          <0 - Negative POSIX Error code
670   *
671   * \note In the case of memory shared between different applications all
672   *	 resources will be “physically” freed only all such applications
673   *	 will be terminated
674   * \note If is UMD responsibility to ‘free’ buffer only when there is no
675   *	 more GPU access
676   *
677   * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
678   *
679  */
680  int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
681  
682  /**
683   * Request CPU access to GPU accessable memory
684   *
685   * \param   buf_handle - \c [in] Buffer handle
686   * \param   cpu        - \c [out] CPU address to be used for access
687   *
688   * \return   0 on success\n
689   *          <0 - Negative POSIX Error code
690   *
691   * \sa amdgpu_bo_cpu_unmap()
692   *
693  */
694  int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu);
695  
696  /**
697   * Release CPU access to GPU memory
698   *
699   * \param   buf_handle  - \c [in] Buffer handle
700   *
701   * \return   0 on success\n
702   *          <0 - Negative POSIX Error code
703   *
704   * \sa amdgpu_bo_cpu_map()
705   *
706  */
707  int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
708  
709  /**
710   * Wait until a buffer is not used by the device.
711   *
712   * \param   dev           - \c [in] Device handle. See #amdgpu_device_initialize()
713   * \param   buf_handle    - \c [in] Buffer handle.
714   * \param   timeout_ns    - Timeout in nanoseconds.
715   * \param   buffer_busy   - 0 if buffer is idle, all GPU access was completed
716   *                            and no GPU access is scheduled.
717   *                          1 GPU access is in fly or scheduled
718   *
719   * \return   0 - on success
720   *          <0 - Negative POSIX Error code
721   */
722  int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
723  			    uint64_t timeout_ns,
724  			    bool *buffer_busy);
725  
726  /**
727   * Creates a BO list handle for command submission.
728   *
729   * \param   dev			- \c [in] Device handle.
730   *				   See #amdgpu_device_initialize()
731   * \param   number_of_resources	- \c [in] Number of BOs in the list
732   * \param   resources		- \c [in] List of BO handles
733   * \param   resource_prios	- \c [in] Optional priority for each handle
734   * \param   result		- \c [out] Created BO list handle
735   *
736   * \return   0 on success\n
737   *          <0 - Negative POSIX Error code
738   *
739   * \sa amdgpu_bo_list_destroy()
740  */
741  int amdgpu_bo_list_create(amdgpu_device_handle dev,
742  			  uint32_t number_of_resources,
743  			  amdgpu_bo_handle *resources,
744  			  uint8_t *resource_prios,
745  			  amdgpu_bo_list_handle *result);
746  
747  /**
748   * Destroys a BO list handle.
749   *
750   * \param   handle	- \c [in] BO list handle.
751   *
752   * \return   0 on success\n
753   *          <0 - Negative POSIX Error code
754   *
755   * \sa amdgpu_bo_list_create()
756  */
757  int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle);
758  
759  /**
760   * Update resources for existing BO list
761   *
762   * \param   handle              - \c [in] BO list handle
763   * \param   number_of_resources - \c [in] Number of BOs in the list
764   * \param   resources           - \c [in] List of BO handles
765   * \param   resource_prios      - \c [in] Optional priority for each handle
766   *
767   * \return   0 on success\n
768   *          <0 - Negative POSIX Error code
769   *
770   * \sa amdgpu_bo_list_update()
771  */
772  int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
773  			  uint32_t number_of_resources,
774  			  amdgpu_bo_handle *resources,
775  			  uint8_t *resource_prios);
776  
777  /*
778   * GPU Execution context
779   *
780  */
781  
782  /**
783   * Create GPU execution Context
784   *
785   * For the purpose of GPU Scheduler and GPU Robustness extensions it is
786   * necessary to have information/identify rendering/compute contexts.
787   * It also may be needed to associate some specific requirements with such
788   * contexts.  Kernel driver will guarantee that submission from the same
789   * context will always be executed in order (first come, first serve).
790   *
791   *
792   * \param   dev	    - \c [in] Device handle. See #amdgpu_device_initialize()
793   * \param   context - \c [out] GPU Context handle
794   *
795   * \return   0 on success\n
796   *          <0 - Negative POSIX Error code
797   *
798   * \sa amdgpu_cs_ctx_free()
799   *
800  */
801  int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
802  			 amdgpu_context_handle *context);
803  
804  /**
805   *
806   * Destroy GPU execution context when not needed any more
807   *
808   * \param   context - \c [in] GPU Context handle
809   *
810   * \return   0 on success\n
811   *          <0 - Negative POSIX Error code
812   *
813   * \sa amdgpu_cs_ctx_create()
814   *
815  */
816  int amdgpu_cs_ctx_free(amdgpu_context_handle context);
817  
818  /**
819   * Query reset state for the specific GPU Context
820   *
821   * \param   context - \c [in]  GPU Context handle
822   * \param   state   - \c [out] One of AMDGPU_CTX_*_RESET
823   * \param   hangs   - \c [out] Number of hangs caused by the context.
824   *
825   * \return   0 on success\n
826   *          <0 - Negative POSIX Error code
827   *
828   * \sa amdgpu_cs_ctx_create()
829   *
830  */
831  int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
832  				uint32_t *state, uint32_t *hangs);
833  
834  /*
835   * Command Buffers Management
836   *
837  */
838  
839  /**
840   * Send request to submit command buffers to hardware.
841   *
842   * Kernel driver could use GPU Scheduler to make decision when physically
843   * sent this request to the hardware. Accordingly this request could be put
844   * in queue and sent for execution later. The only guarantee is that request
845   * from the same GPU context to the same ip:ip_instance:ring will be executed in
846   * order.
847   *
848   * The caller can specify the user fence buffer/location with the fence_info in the
849   * cs_request.The sequence number is returned via the 'seq_no' paramter
850   * in ibs_request structure.
851   *
852   *
853   * \param   dev		       - \c [in]  Device handle.
854   *					  See #amdgpu_device_initialize()
855   * \param   context            - \c [in]  GPU Context
856   * \param   flags              - \c [in]  Global submission flags
857   * \param   ibs_request        - \c [in/out] Pointer to submission requests.
858   *					  We could submit to the several
859   *					  engines/rings simulteniously as
860   *					  'atomic' operation
861   * \param   number_of_requests - \c [in]  Number of submission requests
862   *
863   * \return   0 on success\n
864   *          <0 - Negative POSIX Error code
865   *
866   * \note It is required to pass correct resource list with buffer handles
867   *	 which will be accessible by command buffers from submission
868   *	 This will allow kernel driver to correctly implement "paging".
869   *	 Failure to do so will have unpredictable results.
870   *
871   * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(),
872   *     amdgpu_cs_query_fence_status()
873   *
874  */
875  int amdgpu_cs_submit(amdgpu_context_handle context,
876  		     uint64_t flags,
877  		     struct amdgpu_cs_request *ibs_request,
878  		     uint32_t number_of_requests);
879  
880  /**
881   *  Query status of Command Buffer Submission
882   *
883   * \param   fence   - \c [in] Structure describing fence to query
884   * \param   timeout_ns - \c [in] Timeout value to wait
885   * \param   flags   - \c [in] Flags for the query
886   * \param   expired - \c [out] If fence expired or not.\n
887   *				0  – if fence is not expired\n
888   *				!0 - otherwise
889   *
890   * \return   0 on success\n
891   *          <0 - Negative POSIX Error code
892   *
893   * \note If UMD wants only to check operation status and returned immediately
894   *	 then timeout value as 0 must be passed. In this case success will be
895   *	 returned in the case if submission was completed or timeout error
896   *	 code.
897   *
898   * \sa amdgpu_cs_submit()
899  */
900  int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
901  				 uint64_t timeout_ns,
902  				 uint64_t flags,
903  				 uint32_t *expired);
904  
905  /*
906   * Query / Info API
907   *
908  */
909  
910  /**
911   * Query allocation size alignments
912   *
913   * UMD should query information about GPU VM MC size alignments requirements
914   * to be able correctly choose required allocation size and implement
915   * internal optimization if needed.
916   *
917   * \param   dev  - \c [in] Device handle. See #amdgpu_device_initialize()
918   * \param   info - \c [out] Pointer to structure to get size alignment
919   *			  requirements
920   *
921   * \return   0 on success\n
922   *          <0 - Negative POSIX Error code
923   *
924  */
925  int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
926  				       struct amdgpu_buffer_size_alignments
927  						*info);
928  
929  /**
930   * Query firmware versions
931   *
932   * \param   dev	        - \c [in] Device handle. See #amdgpu_device_initialize()
933   * \param   fw_type     - \c [in] AMDGPU_INFO_FW_*
934   * \param   ip_instance - \c [in] Index of the IP block of the same type.
935   * \param   index       - \c [in] Index of the engine. (for SDMA and MEC)
936   * \param   version     - \c [out] Pointer to to the "version" return value
937   * \param   feature     - \c [out] Pointer to to the "feature" return value
938   *
939   * \return   0 on success\n
940   *          <0 - Negative POSIX Error code
941   *
942  */
943  int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
944  				  unsigned ip_instance, unsigned index,
945  				  uint32_t *version, uint32_t *feature);
946  
947  /**
948   * Query the number of HW IP instances of a certain type.
949   *
950   * \param   dev      - \c [in] Device handle. See #amdgpu_device_initialize()
951   * \param   type     - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
952   * \param   count    - \c [out] Pointer to structure to get information
953   *
954   * \return   0 on success\n
955   *          <0 - Negative POSIX Error code
956  */
957  int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
958  			     uint32_t *count);
959  
960  /**
961   * Query engine information
962   *
963   * This query allows UMD to query information different engines and their
964   * capabilities.
965   *
966   * \param   dev         - \c [in] Device handle. See #amdgpu_device_initialize()
967   * \param   type        - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
968   * \param   ip_instance - \c [in] Index of the IP block of the same type.
969   * \param   info        - \c [out] Pointer to structure to get information
970   *
971   * \return   0 on success\n
972   *          <0 - Negative POSIX Error code
973  */
974  int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
975  			    unsigned ip_instance,
976  			    struct drm_amdgpu_info_hw_ip *info);
977  
978  /**
979   * Query heap information
980   *
981   * This query allows UMD to query potentially available memory resources and
982   * adjust their logic if necessary.
983   *
984   * \param   dev  - \c [in] Device handle. See #amdgpu_device_initialize()
985   * \param   heap - \c [in] Heap type
986   * \param   info - \c [in] Pointer to structure to get needed information
987   *
988   * \return   0 on success\n
989   *          <0 - Negative POSIX Error code
990   *
991  */
992  int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap,
993  			   uint32_t flags, struct amdgpu_heap_info *info);
994  
995  /**
996   * Get the CRTC ID from the mode object ID
997   *
998   * \param   dev    - \c [in] Device handle. See #amdgpu_device_initialize()
999   * \param   id     - \c [in] Mode object ID
1000   * \param   result - \c [in] Pointer to the CRTC ID
1001   *
1002   * \return   0 on success\n
1003   *          <0 - Negative POSIX Error code
1004   *
1005  */
1006  int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
1007  			      int32_t *result);
1008  
1009  /**
1010   * Query GPU H/w Info
1011   *
1012   * Query hardware specific information
1013   *
1014   * \param   dev  - \c [in] Device handle. See #amdgpu_device_initialize()
1015   * \param   heap - \c [in] Heap type
1016   * \param   info - \c [in] Pointer to structure to get needed information
1017   *
1018   * \return   0 on success\n
1019   *          <0 - Negative POSIX Error code
1020   *
1021  */
1022  int amdgpu_query_gpu_info(amdgpu_device_handle dev,
1023  			   struct amdgpu_gpu_info *info);
1024  
1025  /**
1026   * Query hardware or driver information.
1027   *
1028   * The return size is query-specific and depends on the "info_id" parameter.
1029   * No more than "size" bytes is returned.
1030   *
1031   * \param   dev     - \c [in] Device handle. See #amdgpu_device_initialize()
1032   * \param   info_id - \c [in] AMDGPU_INFO_*
1033   * \param   size    - \c [in] Size of the returned value.
1034   * \param   value   - \c [out] Pointer to the return value.
1035   *
1036   * \return   0 on success\n
1037   *          <0 - Negative POSIX error code
1038   *
1039  */
1040  int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
1041  		      unsigned size, void *value);
1042  
1043  /**
1044   * Query information about GDS
1045   *
1046   * \param   dev	     - \c [in] Device handle. See #amdgpu_device_initialize()
1047   * \param   gds_info - \c [out] Pointer to structure to get GDS information
1048   *
1049   * \return   0 on success\n
1050   *          <0 - Negative POSIX Error code
1051   *
1052  */
1053  int amdgpu_query_gds_info(amdgpu_device_handle dev,
1054  			struct amdgpu_gds_resource_info *gds_info);
1055  
1056  /**
1057   * Read a set of consecutive memory-mapped registers.
1058   * Not all registers are allowed to be read by userspace.
1059   *
1060   * \param   dev          - \c [in] Device handle. See #amdgpu_device_initialize(
1061   * \param   dword_offset - \c [in] Register offset in dwords
1062   * \param   count        - \c [in] The number of registers to read starting
1063   *                                 from the offset
1064   * \param   instance     - \c [in] GRBM_GFX_INDEX selector. It may have other
1065   *                                 uses. Set it to 0xffffffff if unsure.
1066   * \param   flags        - \c [in] Flags with additional information.
1067   * \param   values       - \c [out] The pointer to return values.
1068   *
1069   * \return   0 on success\n
1070   *          <0 - Negative POSIX error code
1071   *
1072  */
1073  int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
1074  			     unsigned count, uint32_t instance, uint32_t flags,
1075  			     uint32_t *values);
1076  
1077  /**
1078   * Flag to request VA address range in the 32bit address space
1079  */
1080  #define AMDGPU_VA_RANGE_32_BIT		0x1
1081  
1082  /**
1083   * Allocate virtual address range
1084   *
1085   * \param dev - [in] Device handle. See #amdgpu_device_initialize()
1086   * \param va_range_type - \c [in] Type of MC va range from which to allocate
1087   * \param size - \c [in] Size of range. Size must be correctly* aligned.
1088   * It is client responsibility to correctly aligned size based on the future
1089   * usage of allocated range.
1090   * \param va_base_alignment - \c [in] Overwrite base address alignment
1091   * requirement for GPU VM MC virtual
1092   * address assignment. Must be multiple of size alignments received as
1093   * 'amdgpu_buffer_size_alignments'.
1094   * If 0 use the default one.
1095   * \param va_base_required - \c [in] Specified required va base address.
1096   * If 0 then library choose available one.
1097   * If !0 value will be passed and those value already "in use" then
1098   * corresponding error status will be returned.
1099   * \param va_base_allocated - \c [out] On return: Allocated VA base to be used
1100   * by client.
1101   * \param va_range_handle - \c [out] On return: Handle assigned to allocation
1102   * \param flags - \c [in] flags for special VA range
1103   *
1104   * \return 0 on success\n
1105   * >0 - AMD specific error code\n
1106   * <0 - Negative POSIX Error code
1107   *
1108   * \notes \n
1109   * It is client responsibility to correctly handle VA assignments and usage.
1110   * Neither kernel driver nor libdrm_amdpgu are able to prevent and
1111   * detect wrong va assignemnt.
1112   *
1113   * It is client responsibility to correctly handle multi-GPU cases and to pass
1114   * the corresponding arrays of all devices handles where corresponding VA will
1115   * be used.
1116   *
1117  */
1118  int amdgpu_va_range_alloc(amdgpu_device_handle dev,
1119  			   enum amdgpu_gpu_va_range va_range_type,
1120  			   uint64_t size,
1121  			   uint64_t va_base_alignment,
1122  			   uint64_t va_base_required,
1123  			   uint64_t *va_base_allocated,
1124  			   amdgpu_va_handle *va_range_handle,
1125  			   uint64_t flags);
1126  
1127  /**
1128   * Free previously allocated virtual address range
1129   *
1130   *
1131   * \param va_range_handle - \c [in] Handle assigned to VA allocation
1132   *
1133   * \return 0 on success\n
1134   * >0 - AMD specific error code\n
1135   * <0 - Negative POSIX Error code
1136   *
1137  */
1138  int amdgpu_va_range_free(amdgpu_va_handle va_range_handle);
1139  
1140  /**
1141  * Query virtual address range
1142  *
1143  * UMD can query GPU VM range supported by each device
1144  * to initialize its own VAM accordingly.
1145  *
1146  * \param   dev    - [in] Device handle. See #amdgpu_device_initialize()
1147  * \param   type   - \c [in] Type of virtual address range
1148  * \param   offset - \c [out] Start offset of virtual address range
1149  * \param   size   - \c [out] Size of virtual address range
1150  *
1151  * \return   0 on success\n
1152  *          <0 - Negative POSIX Error code
1153  *
1154  */
1155  
1156  int amdgpu_va_range_query(amdgpu_device_handle dev,
1157  			  enum amdgpu_gpu_va_range type,
1158  			  uint64_t *start,
1159  			  uint64_t *end);
1160  
1161  /**
1162   *  VA mapping/unmapping for the buffer object
1163   *
1164   * \param  bo		- \c [in] BO handle
1165   * \param  offset	- \c [in] Start offset to map
1166   * \param  size		- \c [in] Size to map
1167   * \param  addr		- \c [in] Start virtual address.
1168   * \param  flags	- \c [in] Supported flags for mapping/unmapping
1169   * \param  ops		- \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
1170   *
1171   * \return   0 on success\n
1172   *          <0 - Negative POSIX Error code
1173   *
1174  */
1175  
1176  int amdgpu_bo_va_op(amdgpu_bo_handle bo,
1177  		    uint64_t offset,
1178  		    uint64_t size,
1179  		    uint64_t addr,
1180  		    uint64_t flags,
1181  		    uint32_t ops);
1182  
1183  #endif /* #ifdef _AMDGPU_H_ */
1184