1 /*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <errno.h>
30 #include <string.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_drm.h"
34 #include "amdgpu_internal.h"
35 #include "xf86drm.h"
36
amdgpu_query_info(amdgpu_device_handle dev,unsigned info_id,unsigned size,void * value)37 int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
38 unsigned size, void *value)
39 {
40 struct drm_amdgpu_info request;
41
42 memset(&request, 0, sizeof(request));
43 request.return_pointer = (uintptr_t)value;
44 request.return_size = size;
45 request.query = info_id;
46
47 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
48 sizeof(struct drm_amdgpu_info));
49 }
50
amdgpu_query_crtc_from_id(amdgpu_device_handle dev,unsigned id,int32_t * result)51 int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
52 int32_t *result)
53 {
54 struct drm_amdgpu_info request;
55
56 memset(&request, 0, sizeof(request));
57 request.return_pointer = (uintptr_t)result;
58 request.return_size = sizeof(*result);
59 request.query = AMDGPU_INFO_CRTC_FROM_ID;
60 request.mode_crtc.id = id;
61
62 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
63 sizeof(struct drm_amdgpu_info));
64 }
65
amdgpu_read_mm_registers(amdgpu_device_handle dev,unsigned dword_offset,unsigned count,uint32_t instance,uint32_t flags,uint32_t * values)66 int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
67 unsigned count, uint32_t instance, uint32_t flags,
68 uint32_t *values)
69 {
70 struct drm_amdgpu_info request;
71
72 memset(&request, 0, sizeof(request));
73 request.return_pointer = (uintptr_t)values;
74 request.return_size = count * sizeof(uint32_t);
75 request.query = AMDGPU_INFO_READ_MMR_REG;
76 request.read_mmr_reg.dword_offset = dword_offset;
77 request.read_mmr_reg.count = count;
78 request.read_mmr_reg.instance = instance;
79 request.read_mmr_reg.flags = flags;
80
81 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
82 sizeof(struct drm_amdgpu_info));
83 }
84
amdgpu_query_hw_ip_count(amdgpu_device_handle dev,unsigned type,uint32_t * count)85 int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
86 uint32_t *count)
87 {
88 struct drm_amdgpu_info request;
89
90 memset(&request, 0, sizeof(request));
91 request.return_pointer = (uintptr_t)count;
92 request.return_size = sizeof(*count);
93 request.query = AMDGPU_INFO_HW_IP_COUNT;
94 request.query_hw_ip.type = type;
95
96 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
97 sizeof(struct drm_amdgpu_info));
98 }
99
amdgpu_query_hw_ip_info(amdgpu_device_handle dev,unsigned type,unsigned ip_instance,struct drm_amdgpu_info_hw_ip * info)100 int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
101 unsigned ip_instance,
102 struct drm_amdgpu_info_hw_ip *info)
103 {
104 struct drm_amdgpu_info request;
105
106 memset(&request, 0, sizeof(request));
107 request.return_pointer = (uintptr_t)info;
108 request.return_size = sizeof(*info);
109 request.query = AMDGPU_INFO_HW_IP_INFO;
110 request.query_hw_ip.type = type;
111 request.query_hw_ip.ip_instance = ip_instance;
112
113 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
114 sizeof(struct drm_amdgpu_info));
115 }
116
amdgpu_query_firmware_version(amdgpu_device_handle dev,unsigned fw_type,unsigned ip_instance,unsigned index,uint32_t * version,uint32_t * feature)117 int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
118 unsigned ip_instance, unsigned index,
119 uint32_t *version, uint32_t *feature)
120 {
121 struct drm_amdgpu_info request;
122 struct drm_amdgpu_info_firmware firmware = {};
123 int r;
124
125 memset(&request, 0, sizeof(request));
126 request.return_pointer = (uintptr_t)&firmware;
127 request.return_size = sizeof(firmware);
128 request.query = AMDGPU_INFO_FW_VERSION;
129 request.query_fw.fw_type = fw_type;
130 request.query_fw.ip_instance = ip_instance;
131 request.query_fw.index = index;
132
133 r = drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
134 sizeof(struct drm_amdgpu_info));
135 if (r)
136 return r;
137
138 *version = firmware.ver;
139 *feature = firmware.feature;
140 return 0;
141 }
142
amdgpu_query_gpu_info_init(amdgpu_device_handle dev)143 drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
144 {
145 int r, i;
146
147 r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(dev->dev_info),
148 &dev->dev_info);
149 if (r)
150 return r;
151
152 dev->info.asic_id = dev->dev_info.device_id;
153 dev->info.chip_rev = dev->dev_info.chip_rev;
154 dev->info.chip_external_rev = dev->dev_info.external_rev;
155 dev->info.family_id = dev->dev_info.family;
156 dev->info.max_engine_clk = dev->dev_info.max_engine_clock;
157 dev->info.max_memory_clk = dev->dev_info.max_memory_clock;
158 dev->info.gpu_counter_freq = dev->dev_info.gpu_counter_freq;
159 dev->info.enabled_rb_pipes_mask = dev->dev_info.enabled_rb_pipes_mask;
160 dev->info.rb_pipes = dev->dev_info.num_rb_pipes;
161 dev->info.ids_flags = dev->dev_info.ids_flags;
162 dev->info.num_hw_gfx_contexts = dev->dev_info.num_hw_gfx_contexts;
163 dev->info.num_shader_engines = dev->dev_info.num_shader_engines;
164 dev->info.num_shader_arrays_per_engine =
165 dev->dev_info.num_shader_arrays_per_engine;
166 dev->info.vram_type = dev->dev_info.vram_type;
167 dev->info.vram_bit_width = dev->dev_info.vram_bit_width;
168 dev->info.ce_ram_size = dev->dev_info.ce_ram_size;
169 dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
170 dev->info.pci_rev_id = dev->dev_info.pci_rev;
171
172 for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
173 unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
174 (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
175 AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
176
177 r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
178 &dev->info.backend_disable[i]);
179 if (r)
180 return r;
181 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
182 dev->info.backend_disable[i] =
183 (dev->info.backend_disable[i] >> 16) & 0xff;
184
185 r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
186 &dev->info.pa_sc_raster_cfg[i]);
187 if (r)
188 return r;
189
190 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
191 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
192 &dev->info.pa_sc_raster_cfg1[i]);
193 if (r)
194 return r;
195 }
196 }
197
198 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
199 dev->info.gb_tile_mode);
200 if (r)
201 return r;
202
203 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
204 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
205 dev->info.gb_macro_tile_mode);
206 if (r)
207 return r;
208 }
209
210 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
211 &dev->info.gb_addr_cfg);
212 if (r)
213 return r;
214
215 r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
216 &dev->info.mc_arb_ramcfg);
217 if (r)
218 return r;
219
220 dev->info.cu_active_number = dev->dev_info.cu_active_number;
221 dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
222 memcpy(&dev->info.cu_bitmap[0][0], &dev->dev_info.cu_bitmap[0][0], sizeof(dev->info.cu_bitmap));
223
224 /* TODO: info->max_quad_shader_pipes is not set */
225 /* TODO: info->avail_quad_shader_pipes is not set */
226 /* TODO: info->cache_entries_per_quad_pipe is not set */
227 return 0;
228 }
229
amdgpu_query_gpu_info(amdgpu_device_handle dev,struct amdgpu_gpu_info * info)230 int amdgpu_query_gpu_info(amdgpu_device_handle dev,
231 struct amdgpu_gpu_info *info)
232 {
233 if ((dev == NULL) || (info == NULL))
234 return -EINVAL;
235 /* Get ASIC info*/
236 *info = dev->info;
237
238 return 0;
239 }
240
amdgpu_query_heap_info(amdgpu_device_handle dev,uint32_t heap,uint32_t flags,struct amdgpu_heap_info * info)241 int amdgpu_query_heap_info(amdgpu_device_handle dev,
242 uint32_t heap,
243 uint32_t flags,
244 struct amdgpu_heap_info *info)
245 {
246 struct drm_amdgpu_info_vram_gtt vram_gtt_info = {};
247 int r;
248
249 r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_GTT,
250 sizeof(vram_gtt_info), &vram_gtt_info);
251 if (r)
252 return r;
253
254 /* Get heap information */
255 switch (heap) {
256 case AMDGPU_GEM_DOMAIN_VRAM:
257 /* query visible only vram heap */
258 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
259 info->heap_size = vram_gtt_info.vram_cpu_accessible_size;
260 else /* query total vram heap */
261 info->heap_size = vram_gtt_info.vram_size;
262
263 info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
264
265 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
266 r = amdgpu_query_info(dev, AMDGPU_INFO_VIS_VRAM_USAGE,
267 sizeof(info->heap_usage),
268 &info->heap_usage);
269 else
270 r = amdgpu_query_info(dev, AMDGPU_INFO_VRAM_USAGE,
271 sizeof(info->heap_usage),
272 &info->heap_usage);
273 if (r)
274 return r;
275 break;
276 case AMDGPU_GEM_DOMAIN_GTT:
277 info->heap_size = vram_gtt_info.gtt_size;
278 info->max_allocation = vram_gtt_info.vram_cpu_accessible_size;
279
280 r = amdgpu_query_info(dev, AMDGPU_INFO_GTT_USAGE,
281 sizeof(info->heap_usage),
282 &info->heap_usage);
283 if (r)
284 return r;
285 break;
286 default:
287 return -EINVAL;
288 }
289
290 return 0;
291 }
292
amdgpu_query_gds_info(amdgpu_device_handle dev,struct amdgpu_gds_resource_info * gds_info)293 int amdgpu_query_gds_info(amdgpu_device_handle dev,
294 struct amdgpu_gds_resource_info *gds_info)
295 {
296 struct drm_amdgpu_info_gds gds_config = {};
297 int r;
298
299 if (gds_info == NULL)
300 return -EINVAL;
301
302 r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
303 sizeof(gds_config), &gds_config);
304 if (r)
305 return r;
306
307 gds_info->gds_gfx_partition_size = gds_config.gds_gfx_partition_size;
308 gds_info->compute_partition_size = gds_config.compute_partition_size;
309 gds_info->gds_total_size = gds_config.gds_total_size;
310 gds_info->gws_per_gfx_partition = gds_config.gws_per_gfx_partition;
311 gds_info->gws_per_compute_partition = gds_config.gws_per_compute_partition;
312 gds_info->oa_per_gfx_partition = gds_config.oa_per_gfx_partition;
313 gds_info->oa_per_compute_partition = gds_config.oa_per_compute_partition;
314
315 return 0;
316 }
317