1 /*
2  * Copyright (C) 2014 Etnaviv Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Christian Gmeiner <christian.gmeiner@gmail.com>
25  */
26 
27 #include "os/os_mman.h"
28 #include "util/hash_table.h"
29 
30 #include "etnaviv_priv.h"
31 #include "etnaviv_drmif.h"
32 
33 pthread_mutex_t etna_drm_table_lock = PTHREAD_MUTEX_INITIALIZER;
34 void _etna_bo_del(struct etna_bo *bo);
35 
36 /* set buffer name, and add to table, call w/ etna_drm_table_lock held: */
set_name(struct etna_bo * bo,uint32_t name)37 static void set_name(struct etna_bo *bo, uint32_t name)
38 {
39 	bo->name = name;
40 	/* add ourself into the name table: */
41 	_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
42 }
43 
44 /* Called under etna_drm_table_lock */
_etna_bo_del(struct etna_bo * bo)45 void _etna_bo_del(struct etna_bo *bo)
46 {
47 	VG_BO_FREE(bo);
48 
49 	if (bo->va)
50 		util_vma_heap_free(&bo->dev->address_space, bo->va, bo->size);
51 
52 	if (bo->map)
53 		os_munmap(bo->map, bo->size);
54 
55 	if (bo->handle) {
56 		struct drm_gem_close req = {
57 			.handle = bo->handle,
58 		};
59 
60 		if (bo->name)
61 			_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
62 
63 		_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
64 		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
65 	}
66 
67 	free(bo);
68 }
69 
70 /* lookup a buffer from it's handle, call w/ etna_drm_table_lock held: */
lookup_bo(void * tbl,uint32_t handle)71 static struct etna_bo *lookup_bo(void *tbl, uint32_t handle)
72 {
73 	struct etna_bo *bo = NULL;
74 	struct hash_entry *entry = _mesa_hash_table_search(tbl, &handle);
75 
76 	if (entry) {
77 		/* found, incr refcnt and return: */
78 		bo = etna_bo_ref(entry->data);
79 
80 		/* don't break the bucket if this bo was found in one */
81 		list_delinit(&bo->list);
82 	}
83 
84 	return bo;
85 }
86 
87 /* allocate a new buffer object, call w/ etna_drm_table_lock held */
bo_from_handle(struct etna_device * dev,uint32_t size,uint32_t handle,uint32_t flags)88 static struct etna_bo *bo_from_handle(struct etna_device *dev,
89 		uint32_t size, uint32_t handle, uint32_t flags)
90 {
91 	struct etna_bo *bo = calloc(sizeof(*bo), 1);
92 
93 	if (!bo) {
94 		struct drm_gem_close req = {
95 			.handle = handle,
96 		};
97 
98 		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
99 
100 		return NULL;
101 	}
102 
103 	bo->dev = etna_device_ref(dev);
104 	bo->size = size;
105 	bo->handle = handle;
106 	bo->flags = flags;
107 	p_atomic_set(&bo->refcnt, 1);
108 	list_inithead(&bo->list);
109 	/* add ourselves to the handle table: */
110 	_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
111 
112 	if (dev->use_softpin)
113 		bo->va = util_vma_heap_alloc(&dev->address_space, bo->size, 4096);
114 
115 	return bo;
116 }
117 
118 /* allocate a new (un-tiled) buffer object */
etna_bo_new(struct etna_device * dev,uint32_t size,uint32_t flags)119 struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
120 		uint32_t flags)
121 {
122 	struct etna_bo *bo;
123 	int ret;
124 	struct drm_etnaviv_gem_new req = {
125 			.flags = flags,
126 	};
127 
128 	bo = etna_bo_cache_alloc(&dev->bo_cache, &size, flags);
129 	if (bo)
130 		return bo;
131 
132 	req.size = size;
133 	ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GEM_NEW,
134 			&req, sizeof(req));
135 	if (ret)
136 		return NULL;
137 
138 	pthread_mutex_lock(&etna_drm_table_lock);
139 	bo = bo_from_handle(dev, size, req.handle, flags);
140 	bo->reuse = 1;
141 	pthread_mutex_unlock(&etna_drm_table_lock);
142 
143 	VG_BO_ALLOC(bo);
144 
145 	return bo;
146 }
147 
etna_bo_ref(struct etna_bo * bo)148 struct etna_bo *etna_bo_ref(struct etna_bo *bo)
149 {
150 	p_atomic_inc(&bo->refcnt);
151 
152 	return bo;
153 }
154 
155 /* get buffer info */
get_buffer_info(struct etna_bo * bo)156 static int get_buffer_info(struct etna_bo *bo)
157 {
158 	int ret;
159 	struct drm_etnaviv_gem_info req = {
160 		.handle = bo->handle,
161 	};
162 
163 	ret = drmCommandWriteRead(bo->dev->fd, DRM_ETNAVIV_GEM_INFO,
164 			&req, sizeof(req));
165 	if (ret) {
166 		return ret;
167 	}
168 
169 	/* really all we need for now is mmap offset */
170 	bo->offset = req.offset;
171 
172 	return 0;
173 }
174 
175 /* import a buffer object from DRI2 name */
etna_bo_from_name(struct etna_device * dev,uint32_t name)176 struct etna_bo *etna_bo_from_name(struct etna_device *dev,
177 		uint32_t name)
178 {
179 	struct etna_bo *bo;
180 	struct drm_gem_open req = {
181 		.name = name,
182 	};
183 
184 	pthread_mutex_lock(&etna_drm_table_lock);
185 
186 	/* check name table first, to see if bo is already open: */
187 	bo = lookup_bo(dev->name_table, name);
188 	if (bo)
189 		goto out_unlock;
190 
191 	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
192 		ERROR_MSG("gem-open failed: %s", strerror(errno));
193 		goto out_unlock;
194 	}
195 
196 	bo = lookup_bo(dev->handle_table, req.handle);
197 	if (bo)
198 		goto out_unlock;
199 
200 	bo = bo_from_handle(dev, req.size, req.handle, 0);
201 	if (bo) {
202 		set_name(bo, name);
203 		VG_BO_ALLOC(bo);
204 	}
205 
206 out_unlock:
207 	pthread_mutex_unlock(&etna_drm_table_lock);
208 
209 	return bo;
210 }
211 
212 /* import a buffer from dmabuf fd, does not take ownership of the
213  * fd so caller should close() the fd when it is otherwise done
214  * with it (even if it is still using the 'struct etna_bo *')
215  */
etna_bo_from_dmabuf(struct etna_device * dev,int fd)216 struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
217 {
218 	struct etna_bo *bo;
219 	int ret, size;
220 	uint32_t handle;
221 
222 	/* take the lock before calling drmPrimeFDToHandle to avoid
223 	 * racing against etna_bo_del, which might invalidate the
224 	 * returned handle.
225 	 */
226 	pthread_mutex_lock(&etna_drm_table_lock);
227 
228 	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
229 	if (ret) {
230 		pthread_mutex_unlock(&etna_drm_table_lock);
231 		return NULL;
232 	}
233 
234 	bo = lookup_bo(dev->handle_table, handle);
235 	if (bo)
236 		goto out_unlock;
237 
238 	/* lseek() to get bo size */
239 	size = lseek(fd, 0, SEEK_END);
240 	lseek(fd, 0, SEEK_CUR);
241 
242 	bo = bo_from_handle(dev, size, handle, 0);
243 
244 	VG_BO_ALLOC(bo);
245 
246 out_unlock:
247 	pthread_mutex_unlock(&etna_drm_table_lock);
248 
249 	return bo;
250 }
251 
252 /* destroy a buffer object */
etna_bo_del(struct etna_bo * bo)253 void etna_bo_del(struct etna_bo *bo)
254 {
255 	if (!bo)
256 		return;
257 
258 	struct etna_device *dev = bo->dev;
259 
260 	pthread_mutex_lock(&etna_drm_table_lock);
261 
262 	/* Must test under table lock to avoid racing with the from_dmabuf/name
263 	 * paths, which rely on the BO refcount to be stable over the lookup, so
264 	 * they can grab a reference when the BO is found in the hash.
265 	 */
266 	if (!p_atomic_dec_zero(&bo->refcnt))
267 	   goto out;
268 
269 	if (bo->reuse && (etna_bo_cache_free(&dev->bo_cache, bo) == 0))
270 		goto out;
271 
272 	_etna_bo_del(bo);
273 	etna_device_del_locked(dev);
274 out:
275 	pthread_mutex_unlock(&etna_drm_table_lock);
276 }
277 
278 /* get the global flink/DRI2 buffer name */
etna_bo_get_name(struct etna_bo * bo,uint32_t * name)279 int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
280 {
281 	if (!bo->name) {
282 		struct drm_gem_flink req = {
283 			.handle = bo->handle,
284 		};
285 		int ret;
286 
287 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
288 		if (ret) {
289 			return ret;
290 		}
291 
292 		pthread_mutex_lock(&etna_drm_table_lock);
293 		set_name(bo, req.name);
294 		pthread_mutex_unlock(&etna_drm_table_lock);
295 		bo->reuse = 0;
296 	}
297 
298 	*name = bo->name;
299 
300 	return 0;
301 }
302 
etna_bo_handle(struct etna_bo * bo)303 uint32_t etna_bo_handle(struct etna_bo *bo)
304 {
305 	return bo->handle;
306 }
307 
308 /* caller owns the dmabuf fd that is returned and is responsible
309  * to close() it when done
310  */
etna_bo_dmabuf(struct etna_bo * bo)311 int etna_bo_dmabuf(struct etna_bo *bo)
312 {
313 	int ret, prime_fd;
314 
315 	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
316 				&prime_fd);
317 	if (ret) {
318 		ERROR_MSG("failed to get dmabuf fd: %d", ret);
319 		return ret;
320 	}
321 
322 	bo->reuse = 0;
323 
324 	return prime_fd;
325 }
326 
etna_bo_size(struct etna_bo * bo)327 uint32_t etna_bo_size(struct etna_bo *bo)
328 {
329 	return bo->size;
330 }
331 
etna_bo_gpu_va(struct etna_bo * bo)332 uint32_t etna_bo_gpu_va(struct etna_bo *bo)
333 {
334 	return bo->va;
335 }
336 
etna_bo_map(struct etna_bo * bo)337 void *etna_bo_map(struct etna_bo *bo)
338 {
339 	if (!bo->map) {
340 		if (!bo->offset) {
341 			get_buffer_info(bo);
342 		}
343 
344 		bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE,
345 				  MAP_SHARED, bo->dev->fd, bo->offset);
346 		if (bo->map == MAP_FAILED) {
347 			ERROR_MSG("mmap failed: %s", strerror(errno));
348 			bo->map = NULL;
349 		}
350 	}
351 
352 	return bo->map;
353 }
354 
etna_bo_cpu_prep(struct etna_bo * bo,uint32_t op)355 int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
356 {
357 	struct drm_etnaviv_gem_cpu_prep req = {
358 		.handle = bo->handle,
359 		.op = op,
360 	};
361 
362 	get_abs_timeout(&req.timeout, 5000000000);
363 
364 	return drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_PREP,
365 			&req, sizeof(req));
366 }
367 
etna_bo_cpu_fini(struct etna_bo * bo)368 void etna_bo_cpu_fini(struct etna_bo *bo)
369 {
370 	struct drm_etnaviv_gem_cpu_fini req = {
371 		.handle = bo->handle,
372 	};
373 
374 	drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_FINI,
375 			&req, sizeof(req));
376 }
377