1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22 */
23 
24 #include <stdio.h>
25 
26 #include "CUnit/Basic.h"
27 
28 #include "amdgpu_test.h"
29 #include "amdgpu_drm.h"
30 
31 #define BUFFER_SIZE (4*1024)
32 #define BUFFER_ALIGN (4*1024)
33 
34 static amdgpu_device_handle device_handle;
35 static uint32_t major_version;
36 static uint32_t minor_version;
37 
38 static amdgpu_bo_handle buffer_handle;
39 static uint64_t virtual_mc_base_address;
40 static amdgpu_va_handle va_handle;
41 
42 static void amdgpu_bo_export_import(void);
43 static void amdgpu_bo_metadata(void);
44 static void amdgpu_bo_map_unmap(void);
45 static void amdgpu_memory_alloc(void);
46 static void amdgpu_mem_fail_alloc(void);
47 
48 CU_TestInfo bo_tests[] = {
49 	{ "Export/Import",  amdgpu_bo_export_import },
50 	{ "Metadata",  amdgpu_bo_metadata },
51 	{ "CPU map/unmap",  amdgpu_bo_map_unmap },
52 	{ "Memory alloc Test",  amdgpu_memory_alloc },
53 	{ "Memory fail alloc Test",  amdgpu_mem_fail_alloc },
54 	CU_TEST_INFO_NULL,
55 };
56 
suite_bo_tests_init(void)57 int suite_bo_tests_init(void)
58 {
59 	struct amdgpu_bo_alloc_request req = {0};
60 	amdgpu_bo_handle buf_handle;
61 	uint64_t va;
62 	int r;
63 
64 	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
65 				  &minor_version, &device_handle);
66 	if (r) {
67 		if ((r == -EACCES) && (errno == EACCES))
68 			printf("\n\nError:%s. "
69 				"Hint:Try to run this test program as root.",
70 				strerror(errno));
71 
72 		return CUE_SINIT_FAILED;
73 	}
74 
75 	req.alloc_size = BUFFER_SIZE;
76 	req.phys_alignment = BUFFER_ALIGN;
77 	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
78 
79 	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
80 	if (r)
81 		return CUE_SINIT_FAILED;
82 
83 	r = amdgpu_va_range_alloc(device_handle,
84 				  amdgpu_gpu_va_range_general,
85 				  BUFFER_SIZE, BUFFER_ALIGN, 0,
86 				  &va, &va_handle, 0);
87 	if (r)
88 		goto error_va_alloc;
89 
90 	r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, va, 0, AMDGPU_VA_OP_MAP);
91 	if (r)
92 		goto error_va_map;
93 
94 	buffer_handle = buf_handle;
95 	virtual_mc_base_address = va;
96 
97 	return CUE_SUCCESS;
98 
99 error_va_map:
100 	amdgpu_va_range_free(va_handle);
101 
102 error_va_alloc:
103 	amdgpu_bo_free(buf_handle);
104 	return CUE_SINIT_FAILED;
105 }
106 
suite_bo_tests_clean(void)107 int suite_bo_tests_clean(void)
108 {
109 	int r;
110 
111 	r = amdgpu_bo_va_op(buffer_handle, 0, BUFFER_SIZE,
112 			    virtual_mc_base_address, 0,
113 			    AMDGPU_VA_OP_UNMAP);
114 	if (r)
115 		return CUE_SCLEAN_FAILED;
116 
117 	r = amdgpu_va_range_free(va_handle);
118 	if (r)
119 		return CUE_SCLEAN_FAILED;
120 
121 	r = amdgpu_bo_free(buffer_handle);
122 	if (r)
123 		return CUE_SCLEAN_FAILED;
124 
125 	r = amdgpu_device_deinitialize(device_handle);
126 	if (r)
127 		return CUE_SCLEAN_FAILED;
128 
129 	return CUE_SUCCESS;
130 }
131 
amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type)132 static void amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type)
133 {
134 	struct amdgpu_bo_import_result res = {0};
135 	uint32_t shared_handle;
136 	int r;
137 
138 	r = amdgpu_bo_export(buffer_handle, type, &shared_handle);
139 	CU_ASSERT_EQUAL(r, 0);
140 
141 	r = amdgpu_bo_import(device_handle, type, shared_handle, &res);
142 	CU_ASSERT_EQUAL(r, 0);
143 
144 	CU_ASSERT_EQUAL(res.buf_handle, buffer_handle);
145 	CU_ASSERT_EQUAL(res.alloc_size, BUFFER_SIZE);
146 
147 	r = amdgpu_bo_free(res.buf_handle);
148 	CU_ASSERT_EQUAL(r, 0);
149 }
150 
amdgpu_bo_export_import(void)151 static void amdgpu_bo_export_import(void)
152 {
153 	if (open_render_node) {
154 		printf("(DRM render node is used. Skip export/Import test) ");
155 		return;
156 	}
157 
158 	amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_gem_flink_name);
159 	amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_dma_buf_fd);
160 }
161 
amdgpu_bo_metadata(void)162 static void amdgpu_bo_metadata(void)
163 {
164 	struct amdgpu_bo_metadata meta = {0};
165 	struct amdgpu_bo_info info = {0};
166 	int r;
167 
168 	meta.size_metadata = 1;
169 	meta.umd_metadata[0] = 0xdeadbeef;
170 
171 	r = amdgpu_bo_set_metadata(buffer_handle, &meta);
172 	CU_ASSERT_EQUAL(r, 0);
173 
174 	r = amdgpu_bo_query_info(buffer_handle, &info);
175 	CU_ASSERT_EQUAL(r, 0);
176 
177 	CU_ASSERT_EQUAL(info.metadata.size_metadata, 1);
178 	CU_ASSERT_EQUAL(info.metadata.umd_metadata[0], 0xdeadbeef);
179 }
180 
amdgpu_bo_map_unmap(void)181 static void amdgpu_bo_map_unmap(void)
182 {
183 	uint32_t *ptr;
184 	int i, r;
185 
186 	r = amdgpu_bo_cpu_map(buffer_handle, (void **)&ptr);
187 	CU_ASSERT_EQUAL(r, 0);
188 	CU_ASSERT_NOT_EQUAL(ptr, NULL);
189 
190 	for (i = 0; i < (BUFFER_SIZE / 4); ++i)
191 		ptr[i] = 0xdeadbeef;
192 
193 	r = amdgpu_bo_cpu_unmap(buffer_handle);
194 	CU_ASSERT_EQUAL(r, 0);
195 }
196 
amdgpu_memory_alloc(void)197 static void amdgpu_memory_alloc(void)
198 {
199 	amdgpu_bo_handle bo;
200 	amdgpu_va_handle va_handle;
201 	uint64_t bo_mc;
202 	int r;
203 
204 	/* Test visible VRAM */
205 	bo = gpu_mem_alloc(device_handle,
206 			4096, 4096,
207 			AMDGPU_GEM_DOMAIN_VRAM,
208 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
209 			&bo_mc, &va_handle);
210 
211 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
212 	CU_ASSERT_EQUAL(r, 0);
213 
214 	/* Test invisible VRAM */
215 	bo = gpu_mem_alloc(device_handle,
216 			4096, 4096,
217 			AMDGPU_GEM_DOMAIN_VRAM,
218 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
219 			&bo_mc, &va_handle);
220 
221 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
222 	CU_ASSERT_EQUAL(r, 0);
223 
224 	/* Test GART Cacheable */
225 	bo = gpu_mem_alloc(device_handle,
226 			4096, 4096,
227 			AMDGPU_GEM_DOMAIN_GTT,
228 			0, &bo_mc, &va_handle);
229 
230 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
231 	CU_ASSERT_EQUAL(r, 0);
232 
233 	/* Test GART USWC */
234 	bo = gpu_mem_alloc(device_handle,
235 			4096, 4096,
236 			AMDGPU_GEM_DOMAIN_GTT,
237 			AMDGPU_GEM_CREATE_CPU_GTT_USWC,
238 			&bo_mc, &va_handle);
239 
240 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
241 	CU_ASSERT_EQUAL(r, 0);
242 }
243 
amdgpu_mem_fail_alloc(void)244 static void amdgpu_mem_fail_alloc(void)
245 {
246 	amdgpu_bo_handle bo;
247 	int r;
248 	struct amdgpu_bo_alloc_request req = {0};
249 	amdgpu_bo_handle buf_handle;
250 
251 	/* Test impossible mem allocation, 1TB */
252 	req.alloc_size = 0xE8D4A51000;
253 	req.phys_alignment = 4096;
254 	req.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
255 	req.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
256 
257 	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
258 	CU_ASSERT_EQUAL(r, -ENOMEM);
259 
260 	if (!r) {
261 		r = amdgpu_bo_free(bo);
262 		CU_ASSERT_EQUAL(r, 0);
263 	}
264 }
265