1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #ifdef HAVE_CONFIG_H
26 # include <config.h>
27 #endif
28 
29 #include <stdlib.h>
30 #include <stdint.h>
31 #include <stddef.h>
32 
33 #include "private.h"
34 
35 
36 int
abi16_chan_nv04(struct nouveau_object * obj)37 abi16_chan_nv04(struct nouveau_object *obj)
38 {
39 	struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
40 	struct nv04_fifo *nv04 = obj->data;
41 	struct drm_nouveau_channel_alloc req = {nv04->vram, nv04->gart};
42 	int ret;
43 
44 	ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
45 				  &req, sizeof(req));
46 	if (ret)
47 		return ret;
48 
49 	nv04->base.channel = req.channel;
50 	nv04->base.pushbuf = req.pushbuf_domains;
51 	nv04->notify = req.notifier_handle;
52 	nv04->base.object->handle = req.channel;
53 	nv04->base.object->length = sizeof(*nv04);
54 	return 0;
55 }
56 
57 int
abi16_chan_nvc0(struct nouveau_object * obj)58 abi16_chan_nvc0(struct nouveau_object *obj)
59 {
60 	struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
61 	struct drm_nouveau_channel_alloc req = {};
62 	struct nvc0_fifo *nvc0 = obj->data;
63 	int ret;
64 
65 	ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
66 				  &req, sizeof(req));
67 	if (ret)
68 		return ret;
69 
70 	nvc0->base.channel = req.channel;
71 	nvc0->base.pushbuf = req.pushbuf_domains;
72 	nvc0->notify = req.notifier_handle;
73 	nvc0->base.object->handle = req.channel;
74 	nvc0->base.object->length = sizeof(*nvc0);
75 	return 0;
76 }
77 
78 int
abi16_chan_nve0(struct nouveau_object * obj)79 abi16_chan_nve0(struct nouveau_object *obj)
80 {
81 	struct nouveau_device *dev = (struct nouveau_device *)obj->parent;
82 	struct drm_nouveau_channel_alloc req = {};
83 	struct nve0_fifo *nve0 = obj->data;
84 	int ret;
85 
86 	if (obj->length > offsetof(struct nve0_fifo, engine)) {
87 		req.fb_ctxdma_handle = 0xffffffff;
88 		req.tt_ctxdma_handle = nve0->engine;
89 	}
90 
91 	ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
92 				  &req, sizeof(req));
93 	if (ret)
94 		return ret;
95 
96 	nve0->base.channel = req.channel;
97 	nve0->base.pushbuf = req.pushbuf_domains;
98 	nve0->notify = req.notifier_handle;
99 	nve0->base.object->handle = req.channel;
100 	nve0->base.object->length = sizeof(*nve0);
101 	return 0;
102 }
103 
104 int
abi16_engobj(struct nouveau_object * obj)105 abi16_engobj(struct nouveau_object *obj)
106 {
107 	struct drm_nouveau_grobj_alloc req = {
108 		obj->parent->handle, obj->handle, obj->oclass
109 	};
110 	struct nouveau_device *dev;
111 	int ret;
112 
113 	dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
114 	ret = drmCommandWrite(dev->fd, DRM_NOUVEAU_GROBJ_ALLOC,
115 			      &req, sizeof(req));
116 	if (ret)
117 		return ret;
118 
119 	obj->length = sizeof(struct nouveau_object *);
120 	return 0;
121 }
122 
123 int
abi16_ntfy(struct nouveau_object * obj)124 abi16_ntfy(struct nouveau_object *obj)
125 {
126 	struct nv04_notify *ntfy = obj->data;
127 	struct drm_nouveau_notifierobj_alloc req = {
128 		obj->parent->handle, ntfy->object->handle, ntfy->length
129 	};
130 	struct nouveau_device *dev;
131 	int ret;
132 
133 	dev = nouveau_object_find(obj, NOUVEAU_DEVICE_CLASS);
134 	ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_NOTIFIEROBJ_ALLOC,
135 				  &req, sizeof(req));
136 	if (ret)
137 		return ret;
138 
139 	ntfy->offset = req.offset;
140 	ntfy->object->length = sizeof(*ntfy);
141 	return 0;
142 }
143 
144 void
abi16_bo_info(struct nouveau_bo * bo,struct drm_nouveau_gem_info * info)145 abi16_bo_info(struct nouveau_bo *bo, struct drm_nouveau_gem_info *info)
146 {
147 	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
148 
149 	nvbo->map_handle = info->map_handle;
150 	bo->handle = info->handle;
151 	bo->size = info->size;
152 	bo->offset = info->offset;
153 
154 	bo->flags = 0;
155 	if (info->domain & NOUVEAU_GEM_DOMAIN_VRAM)
156 		bo->flags |= NOUVEAU_BO_VRAM;
157 	if (info->domain & NOUVEAU_GEM_DOMAIN_GART)
158 		bo->flags |= NOUVEAU_BO_GART;
159 	if (!(info->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG))
160 		bo->flags |= NOUVEAU_BO_CONTIG;
161 	if (nvbo->map_handle)
162 		bo->flags |= NOUVEAU_BO_MAP;
163 
164 	if (bo->device->chipset >= 0xc0) {
165 		bo->config.nvc0.memtype   = (info->tile_flags & 0xff00) >> 8;
166 		bo->config.nvc0.tile_mode = info->tile_mode;
167 	} else
168 	if (bo->device->chipset >= 0x80 || bo->device->chipset == 0x50) {
169 		bo->config.nv50.memtype   = (info->tile_flags & 0x07f00) >> 8 |
170 					    (info->tile_flags & 0x30000) >> 9;
171 		bo->config.nv50.tile_mode = info->tile_mode << 4;
172 	} else {
173 		bo->config.nv04.surf_flags = info->tile_flags & 7;
174 		bo->config.nv04.surf_pitch = info->tile_mode;
175 	}
176 }
177 
178 int
abi16_bo_init(struct nouveau_bo * bo,uint32_t alignment,union nouveau_bo_config * config)179 abi16_bo_init(struct nouveau_bo *bo, uint32_t alignment,
180 	      union nouveau_bo_config *config)
181 {
182 	struct nouveau_device *dev = bo->device;
183 	struct drm_nouveau_gem_new req = {};
184 	struct drm_nouveau_gem_info *info = &req.info;
185 	int ret;
186 
187 	if (bo->flags & NOUVEAU_BO_VRAM)
188 		info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
189 	if (bo->flags & NOUVEAU_BO_GART)
190 		info->domain |= NOUVEAU_GEM_DOMAIN_GART;
191 	if (!info->domain)
192 		info->domain |= NOUVEAU_GEM_DOMAIN_VRAM |
193 				NOUVEAU_GEM_DOMAIN_GART;
194 
195 	if (bo->flags & NOUVEAU_BO_MAP)
196 		info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
197 
198 	if (!(bo->flags & NOUVEAU_BO_CONTIG))
199 		info->tile_flags = NOUVEAU_GEM_TILE_NONCONTIG;
200 
201 	info->size = bo->size;
202 	req.align = alignment;
203 
204 	if (config) {
205 		if (dev->chipset >= 0xc0) {
206 			info->tile_flags = (config->nvc0.memtype & 0xff) << 8;
207 			info->tile_mode  = config->nvc0.tile_mode;
208 		} else
209 		if (dev->chipset >= 0x80 || dev->chipset == 0x50) {
210 			info->tile_flags = (config->nv50.memtype & 0x07f) << 8 |
211 					   (config->nv50.memtype & 0x180) << 9;
212 			info->tile_mode  = config->nv50.tile_mode >> 4;
213 		} else {
214 			info->tile_flags = config->nv04.surf_flags & 7;
215 			info->tile_mode  = config->nv04.surf_pitch;
216 		}
217 	}
218 
219 	if (!nouveau_device(dev)->have_bo_usage)
220 		info->tile_flags &= 0x0000ff00;
221 
222 	ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_NEW,
223 				  &req, sizeof(req));
224 	if (ret == 0)
225 		abi16_bo_info(bo, &req.info);
226 	return ret;
227 }
228