1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  *
24  */
25 
26 #include "util/format/u_format.h"
27 #include "util/u_inlines.h"
28 #include "util/u_surface.h"
29 
30 #include "nv_m2mf.xml.h"
31 #include "nv_object.xml.h"
32 #include "nv30/nv30_screen.h"
33 #include "nv30/nv30_context.h"
34 #include "nv30/nv30_resource.h"
35 #include "nv30/nv30_transfer.h"
36 
37 static inline unsigned
layer_offset(struct pipe_resource * pt,unsigned level,unsigned layer)38 layer_offset(struct pipe_resource *pt, unsigned level, unsigned layer)
39 {
40    struct nv30_miptree *mt = nv30_miptree(pt);
41    struct nv30_miptree_level *lvl = &mt->level[level];
42 
43    if (pt->target == PIPE_TEXTURE_CUBE)
44       return (layer * mt->layer_size) + lvl->offset;
45 
46    return lvl->offset + (layer * lvl->zslice_size);
47 }
48 
49 static bool
nv30_miptree_get_handle(struct pipe_screen * pscreen,struct pipe_resource * pt,struct winsys_handle * handle)50 nv30_miptree_get_handle(struct pipe_screen *pscreen,
51                         struct pipe_resource *pt,
52                         struct winsys_handle *handle)
53 {
54    struct nv30_miptree *mt = nv30_miptree(pt);
55    unsigned stride;
56 
57    if (!mt || !mt->base.bo)
58       return false;
59 
60    stride = mt->level[0].pitch;
61 
62    return nouveau_screen_bo_get_handle(pscreen, mt->base.bo, stride, handle);
63 }
64 
65 static void
nv30_miptree_destroy(struct pipe_screen * pscreen,struct pipe_resource * pt)66 nv30_miptree_destroy(struct pipe_screen *pscreen, struct pipe_resource *pt)
67 {
68    struct nv30_miptree *mt = nv30_miptree(pt);
69 
70    nouveau_bo_ref(NULL, &mt->base.bo);
71    FREE(mt);
72 }
73 
74 struct nv30_transfer {
75    struct pipe_transfer base;
76    struct nv30_rect img;
77    struct nv30_rect tmp;
78    unsigned nblocksx;
79    unsigned nblocksy;
80 };
81 
82 static inline struct nv30_transfer *
nv30_transfer(struct pipe_transfer * ptx)83 nv30_transfer(struct pipe_transfer *ptx)
84 {
85    return (struct nv30_transfer *)ptx;
86 }
87 
88 static inline void
define_rect(struct pipe_resource * pt,unsigned level,unsigned z,unsigned x,unsigned y,unsigned w,unsigned h,struct nv30_rect * rect)89 define_rect(struct pipe_resource *pt, unsigned level, unsigned z,
90             unsigned x, unsigned y, unsigned w, unsigned h,
91             struct nv30_rect *rect)
92 {
93    struct nv30_miptree *mt = nv30_miptree(pt);
94    struct nv30_miptree_level *lvl = &mt->level[level];
95 
96    rect->w = u_minify(pt->width0, level) << mt->ms_x;
97    rect->w = util_format_get_nblocksx(pt->format, rect->w);
98    rect->h = u_minify(pt->height0, level) << mt->ms_y;
99    rect->h = util_format_get_nblocksy(pt->format, rect->h);
100    rect->d = 1;
101    rect->z = 0;
102    if (mt->swizzled) {
103       if (pt->target == PIPE_TEXTURE_3D) {
104          rect->d = u_minify(pt->depth0, level);
105          rect->z = z; z = 0;
106       }
107       rect->pitch = 0;
108    } else {
109       rect->pitch = lvl->pitch;
110    }
111 
112    rect->bo     = mt->base.bo;
113    rect->domain = NOUVEAU_BO_VRAM;
114    rect->offset = layer_offset(pt, level, z);
115    rect->cpp    = util_format_get_blocksize(pt->format);
116 
117    rect->x0     = util_format_get_nblocksx(pt->format, x) << mt->ms_x;
118    rect->y0     = util_format_get_nblocksy(pt->format, y) << mt->ms_y;
119    rect->x1     = rect->x0 + (util_format_get_nblocksx(pt->format, w) << mt->ms_x);
120    rect->y1     = rect->y0 + (util_format_get_nblocksy(pt->format, h) << mt->ms_y);
121 
122    /* XXX There's some indication that swizzled formats > 4 bytes are treated
123     * differently. However that only applies to RGBA16_FLOAT, RGBA32_FLOAT,
124     * and the DXT* formats. The former aren't properly supported yet, and the
125     * latter avoid swizzled layouts.
126 
127    if (mt->swizzled && rect->cpp > 4) {
128       unsigned scale = rect->cpp / 4;
129       rect->w *= scale;
130       rect->x0 *= scale;
131       rect->x1 *= scale;
132       rect->cpp = 4;
133    }
134    */
135 }
136 
137 void
nv30_resource_copy_region(struct pipe_context * pipe,struct pipe_resource * dstres,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * srcres,unsigned src_level,const struct pipe_box * src_box)138 nv30_resource_copy_region(struct pipe_context *pipe,
139                           struct pipe_resource *dstres, unsigned dst_level,
140                           unsigned dstx, unsigned dsty, unsigned dstz,
141                           struct pipe_resource *srcres, unsigned src_level,
142                           const struct pipe_box *src_box)
143 {
144    struct nv30_context *nv30 = nv30_context(pipe);
145    struct nv30_rect src, dst;
146 
147    if (dstres->target == PIPE_BUFFER && srcres->target == PIPE_BUFFER) {
148       nouveau_copy_buffer(&nv30->base,
149                           nv04_resource(dstres), dstx,
150                           nv04_resource(srcres), src_box->x, src_box->width);
151       return;
152    }
153 
154    define_rect(srcres, src_level, src_box->z, src_box->x, src_box->y,
155                        src_box->width, src_box->height, &src);
156    define_rect(dstres, dst_level, dstz, dstx, dsty,
157                        src_box->width, src_box->height, &dst);
158 
159    nv30_transfer_rect(nv30, NEAREST, &src, &dst);
160 }
161 
162 static void
nv30_resource_resolve(struct nv30_context * nv30,const struct pipe_blit_info * info)163 nv30_resource_resolve(struct nv30_context *nv30,
164                       const struct pipe_blit_info *info)
165 {
166    struct nv30_miptree *src_mt = nv30_miptree(info->src.resource);
167    struct nv30_rect src, dst;
168    unsigned x, x0, x1, y, y1, w, h;
169 
170    define_rect(info->src.resource, 0, info->src.box.z, info->src.box.x,
171       info->src.box.y, info->src.box.width, info->src.box.height, &src);
172    define_rect(info->dst.resource, 0, info->dst.box.z, info->dst.box.x,
173       info->dst.box.y, info->dst.box.width, info->dst.box.height, &dst);
174 
175    x0 = src.x0;
176    x1 = src.x1;
177    y1 = src.y1;
178 
179    /* On nv3x we must use sifm which is restricted to 1024x1024 tiles */
180    for (y = src.y0; y < y1; y += h) {
181       h = y1 - y;
182       if (h > 1024)
183          h = 1024;
184 
185       src.y0 = 0;
186       src.y1 = h;
187       src.h = h;
188 
189       dst.y1 = dst.y0 + (h >> src_mt->ms_y);
190       dst.h = h >> src_mt->ms_y;
191 
192       for (x = x0; x < x1; x += w) {
193          w = x1 - x;
194          if (w > 1024)
195             w = 1024;
196 
197          src.offset = y * src.pitch + x * src.cpp;
198          src.x0 = 0;
199          src.x1 = w;
200          src.w = w;
201 
202          dst.offset = (y >> src_mt->ms_y) * dst.pitch +
203                       (x >> src_mt->ms_x) * dst.cpp;
204          dst.x1 = dst.x0 + (w >> src_mt->ms_x);
205          dst.w = w >> src_mt->ms_x;
206 
207          nv30_transfer_rect(nv30, BILINEAR, &src, &dst);
208       }
209    }
210 }
211 
212 void
nv30_blit(struct pipe_context * pipe,const struct pipe_blit_info * blit_info)213 nv30_blit(struct pipe_context *pipe,
214           const struct pipe_blit_info *blit_info)
215 {
216    struct nv30_context *nv30 = nv30_context(pipe);
217    struct pipe_blit_info info = *blit_info;
218 
219    if (info.src.resource->nr_samples > 1 &&
220        info.dst.resource->nr_samples <= 1 &&
221        !util_format_is_depth_or_stencil(info.src.resource->format) &&
222        !util_format_is_pure_integer(info.src.resource->format)) {
223       nv30_resource_resolve(nv30, blit_info);
224       return;
225    }
226 
227    if (util_try_blit_via_copy_region(pipe, &info)) {
228       return; /* done */
229    }
230 
231    if (info.mask & PIPE_MASK_S) {
232       debug_printf("nv30: cannot blit stencil, skipping\n");
233       info.mask &= ~PIPE_MASK_S;
234    }
235 
236    if (!util_blitter_is_blit_supported(nv30->blitter, &info)) {
237       debug_printf("nv30: blit unsupported %s -> %s\n",
238                    util_format_short_name(info.src.resource->format),
239                    util_format_short_name(info.dst.resource->format));
240       return;
241    }
242 
243    /* XXX turn off occlusion queries */
244 
245    util_blitter_save_vertex_buffer_slot(nv30->blitter, nv30->vtxbuf);
246    util_blitter_save_vertex_elements(nv30->blitter, nv30->vertex);
247    util_blitter_save_vertex_shader(nv30->blitter, nv30->vertprog.program);
248    util_blitter_save_rasterizer(nv30->blitter, nv30->rast);
249    util_blitter_save_viewport(nv30->blitter, &nv30->viewport);
250    util_blitter_save_scissor(nv30->blitter, &nv30->scissor);
251    util_blitter_save_fragment_shader(nv30->blitter, nv30->fragprog.program);
252    util_blitter_save_blend(nv30->blitter, nv30->blend);
253    util_blitter_save_depth_stencil_alpha(nv30->blitter,
254                                          nv30->zsa);
255    util_blitter_save_stencil_ref(nv30->blitter, &nv30->stencil_ref);
256    util_blitter_save_sample_mask(nv30->blitter, nv30->sample_mask);
257    util_blitter_save_framebuffer(nv30->blitter, &nv30->framebuffer);
258    util_blitter_save_fragment_sampler_states(nv30->blitter,
259                      nv30->fragprog.num_samplers,
260                      (void**)nv30->fragprog.samplers);
261    util_blitter_save_fragment_sampler_views(nv30->blitter,
262                      nv30->fragprog.num_textures, nv30->fragprog.textures);
263    util_blitter_save_render_condition(nv30->blitter, nv30->render_cond_query,
264                                       nv30->render_cond_cond, nv30->render_cond_mode);
265    util_blitter_blit(nv30->blitter, &info);
266 }
267 
268 void
nv30_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)269 nv30_flush_resource(struct pipe_context *pipe,
270                     struct pipe_resource *resource)
271 {
272 }
273 
274 static void *
nv30_miptree_transfer_map(struct pipe_context * pipe,struct pipe_resource * pt,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)275 nv30_miptree_transfer_map(struct pipe_context *pipe, struct pipe_resource *pt,
276                           unsigned level, unsigned usage,
277                           const struct pipe_box *box,
278                           struct pipe_transfer **ptransfer)
279 {
280    struct nv30_context *nv30 = nv30_context(pipe);
281    struct nouveau_device *dev = nv30->screen->base.device;
282    struct nv30_miptree *mt = nv30_miptree(pt);
283    struct nv30_transfer *tx;
284    unsigned access = 0;
285    int ret;
286 
287    tx = CALLOC_STRUCT(nv30_transfer);
288    if (!tx)
289       return NULL;
290    pipe_resource_reference(&tx->base.resource, pt);
291    tx->base.level = level;
292    tx->base.usage = usage;
293    tx->base.box = *box;
294    tx->base.stride = align(util_format_get_nblocksx(pt->format, box->width) *
295                            util_format_get_blocksize(pt->format), 64);
296    tx->base.layer_stride = util_format_get_nblocksy(pt->format, box->height) *
297                            tx->base.stride;
298 
299    tx->nblocksx = util_format_get_nblocksx(pt->format, box->width);
300    tx->nblocksy = util_format_get_nblocksy(pt->format, box->height);
301 
302    define_rect(pt, level, box->z, box->x, box->y,
303                box->width, box->height, &tx->img);
304 
305    ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
306                         tx->base.layer_stride * tx->base.box.depth, NULL,
307                         &tx->tmp.bo);
308    if (ret) {
309       pipe_resource_reference(&tx->base.resource, NULL);
310       FREE(tx);
311       return NULL;
312    }
313 
314    tx->tmp.domain = NOUVEAU_BO_GART;
315    tx->tmp.offset = 0;
316    tx->tmp.pitch  = tx->base.stride;
317    tx->tmp.cpp    = tx->img.cpp;
318    tx->tmp.w      = tx->nblocksx;
319    tx->tmp.h      = tx->nblocksy;
320    tx->tmp.d      = 1;
321    tx->tmp.x0     = 0;
322    tx->tmp.y0     = 0;
323    tx->tmp.x1     = tx->tmp.w;
324    tx->tmp.y1     = tx->tmp.h;
325    tx->tmp.z      = 0;
326 
327    if (usage & PIPE_MAP_READ) {
328       bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
329       unsigned offset = tx->img.offset;
330       unsigned z = tx->img.z;
331       unsigned i;
332       for (i = 0; i < box->depth; ++i) {
333          nv30_transfer_rect(nv30, NEAREST, &tx->img, &tx->tmp);
334          if (is_3d && mt->swizzled)
335             tx->img.z++;
336          else if (is_3d)
337             tx->img.offset += mt->level[level].zslice_size;
338          else
339             tx->img.offset += mt->layer_size;
340          tx->tmp.offset += tx->base.layer_stride;
341       }
342       tx->img.z = z;
343       tx->img.offset = offset;
344       tx->tmp.offset = 0;
345    }
346 
347    if (tx->tmp.bo->map) {
348       *ptransfer = &tx->base;
349       return tx->tmp.bo->map;
350    }
351 
352    if (usage & PIPE_MAP_READ)
353       access |= NOUVEAU_BO_RD;
354    if (usage & PIPE_MAP_WRITE)
355       access |= NOUVEAU_BO_WR;
356 
357    ret = nouveau_bo_map(tx->tmp.bo, access, nv30->base.client);
358    if (ret) {
359       pipe_resource_reference(&tx->base.resource, NULL);
360       FREE(tx);
361       return NULL;
362    }
363 
364    *ptransfer = &tx->base;
365    return tx->tmp.bo->map;
366 }
367 
368 static void
nv30_miptree_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * ptx)369 nv30_miptree_transfer_unmap(struct pipe_context *pipe,
370                             struct pipe_transfer *ptx)
371 {
372    struct nv30_context *nv30 = nv30_context(pipe);
373    struct nv30_transfer *tx = nv30_transfer(ptx);
374    struct nv30_miptree *mt = nv30_miptree(tx->base.resource);
375    unsigned i;
376 
377    if (ptx->usage & PIPE_MAP_WRITE) {
378       bool is_3d = mt->base.base.target == PIPE_TEXTURE_3D;
379       for (i = 0; i < tx->base.box.depth; ++i) {
380          nv30_transfer_rect(nv30, NEAREST, &tx->tmp, &tx->img);
381          if (is_3d && mt->swizzled)
382             tx->img.z++;
383          else if (is_3d)
384             tx->img.offset += mt->level[tx->base.level].zslice_size;
385          else
386             tx->img.offset += mt->layer_size;
387          tx->tmp.offset += tx->base.layer_stride;
388       }
389 
390       /* Allow the copies above to finish executing before freeing the source */
391       nouveau_fence_work(nv30->screen->base.fence.current,
392                          nouveau_fence_unref_bo, tx->tmp.bo);
393    } else {
394       nouveau_bo_ref(NULL, &tx->tmp.bo);
395    }
396    pipe_resource_reference(&ptx->resource, NULL);
397    FREE(tx);
398 }
399 
400 const struct u_resource_vtbl nv30_miptree_vtbl = {
401    nv30_miptree_get_handle,
402    nv30_miptree_destroy,
403    nv30_miptree_transfer_map,
404    u_default_transfer_flush_region,
405    nv30_miptree_transfer_unmap,
406 };
407 
408 struct pipe_resource *
nv30_miptree_create(struct pipe_screen * pscreen,const struct pipe_resource * tmpl)409 nv30_miptree_create(struct pipe_screen *pscreen,
410                     const struct pipe_resource *tmpl)
411 {
412    struct nouveau_device *dev = nouveau_screen(pscreen)->device;
413    struct nv30_miptree *mt = CALLOC_STRUCT(nv30_miptree);
414    struct pipe_resource *pt = &mt->base.base;
415    unsigned blocksz, size;
416    unsigned w, h, d, l;
417    int ret;
418 
419    switch (tmpl->nr_samples) {
420    case 4:
421       mt->ms_mode = 0x00004000;
422       mt->ms_x = 1;
423       mt->ms_y = 1;
424       break;
425    case 2:
426       mt->ms_mode = 0x00003000;
427       mt->ms_x = 1;
428       mt->ms_y = 0;
429       break;
430    default:
431       mt->ms_mode = 0x00000000;
432       mt->ms_x = 0;
433       mt->ms_y = 0;
434       break;
435    }
436 
437    mt->base.vtbl = &nv30_miptree_vtbl;
438    *pt = *tmpl;
439    pipe_reference_init(&pt->reference, 1);
440    pt->screen = pscreen;
441 
442    w = pt->width0 << mt->ms_x;
443    h = pt->height0 << mt->ms_y;
444    d = (pt->target == PIPE_TEXTURE_3D) ? pt->depth0 : 1;
445    blocksz = util_format_get_blocksize(pt->format);
446 
447    if ((pt->target == PIPE_TEXTURE_RECT) ||
448        (pt->bind & PIPE_BIND_SCANOUT) ||
449        !util_is_power_of_two_or_zero(pt->width0) ||
450        !util_is_power_of_two_or_zero(pt->height0) ||
451        !util_is_power_of_two_or_zero(pt->depth0) ||
452        mt->ms_mode) {
453       mt->uniform_pitch = util_format_get_nblocksx(pt->format, w) * blocksz;
454       mt->uniform_pitch = align(mt->uniform_pitch, 64);
455       if (pt->bind & PIPE_BIND_SCANOUT) {
456          struct nv30_screen *screen = nv30_screen(pscreen);
457          int pitch_align = MAX2(
458                screen->eng3d->oclass >= NV40_3D_CLASS ? 1024 : 256,
459                /* round_down_pow2(mt->uniform_pitch / 4) */
460                1 << (util_last_bit(mt->uniform_pitch / 4) - 1));
461          mt->uniform_pitch = align(mt->uniform_pitch, pitch_align);
462       }
463    }
464 
465    if (util_format_is_compressed(pt->format)) {
466       // Compressed (DXT) formats are packed tightly. We don't mark them as
467       // swizzled, since their layout is largely linear. However we do end up
468       // omitting the LINEAR flag when texturing them, as the levels are not
469       // uniformly sized (for POT sizes).
470    } else if (!mt->uniform_pitch) {
471       mt->swizzled = true;
472    }
473 
474    size = 0;
475    for (l = 0; l <= pt->last_level; l++) {
476       struct nv30_miptree_level *lvl = &mt->level[l];
477       unsigned nbx = util_format_get_nblocksx(pt->format, w);
478       unsigned nby = util_format_get_nblocksy(pt->format, h);
479 
480       lvl->offset = size;
481       lvl->pitch  = mt->uniform_pitch;
482       if (!lvl->pitch)
483          lvl->pitch = nbx * blocksz;
484 
485       lvl->zslice_size = lvl->pitch * nby;
486       size += lvl->zslice_size * d;
487 
488       w = u_minify(w, 1);
489       h = u_minify(h, 1);
490       d = u_minify(d, 1);
491    }
492 
493    mt->layer_size = size;
494    if (pt->target == PIPE_TEXTURE_CUBE) {
495       if (!mt->uniform_pitch)
496          mt->layer_size = align(mt->layer_size, 128);
497       size = mt->layer_size * 6;
498    }
499 
500    ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 256, size, NULL, &mt->base.bo);
501    if (ret) {
502       FREE(mt);
503       return NULL;
504    }
505 
506    mt->base.domain = NOUVEAU_BO_VRAM;
507    return &mt->base.base;
508 }
509 
510 struct pipe_resource *
nv30_miptree_from_handle(struct pipe_screen * pscreen,const struct pipe_resource * tmpl,struct winsys_handle * handle)511 nv30_miptree_from_handle(struct pipe_screen *pscreen,
512                          const struct pipe_resource *tmpl,
513                          struct winsys_handle *handle)
514 {
515    struct nv30_miptree *mt;
516    unsigned stride;
517 
518    /* only supports 2D, non-mipmapped textures for the moment */
519    if ((tmpl->target != PIPE_TEXTURE_2D &&
520         tmpl->target != PIPE_TEXTURE_RECT) ||
521        tmpl->last_level != 0 ||
522        tmpl->depth0 != 1 ||
523        tmpl->array_size > 1)
524       return NULL;
525 
526    mt = CALLOC_STRUCT(nv30_miptree);
527    if (!mt)
528       return NULL;
529 
530    mt->base.bo = nouveau_screen_bo_from_handle(pscreen, handle, &stride);
531    if (mt->base.bo == NULL) {
532       FREE(mt);
533       return NULL;
534    }
535 
536    mt->base.base = *tmpl;
537    mt->base.vtbl = &nv30_miptree_vtbl;
538    pipe_reference_init(&mt->base.base.reference, 1);
539    mt->base.base.screen = pscreen;
540    mt->uniform_pitch = stride;
541    mt->level[0].pitch = mt->uniform_pitch;
542    mt->level[0].offset = 0;
543 
544    /* no need to adjust bo reference count */
545    return &mt->base.base;
546 }
547 
548 struct pipe_surface *
nv30_miptree_surface_new(struct pipe_context * pipe,struct pipe_resource * pt,const struct pipe_surface * tmpl)549 nv30_miptree_surface_new(struct pipe_context *pipe,
550                          struct pipe_resource *pt,
551                          const struct pipe_surface *tmpl)
552 {
553    struct nv30_miptree *mt = nv30_miptree(pt); /* guaranteed */
554    struct nv30_surface *ns;
555    struct pipe_surface *ps;
556    struct nv30_miptree_level *lvl = &mt->level[tmpl->u.tex.level];
557 
558    ns = CALLOC_STRUCT(nv30_surface);
559    if (!ns)
560       return NULL;
561    ps = &ns->base;
562 
563    pipe_reference_init(&ps->reference, 1);
564    pipe_resource_reference(&ps->texture, pt);
565    ps->context = pipe;
566    ps->format = tmpl->format;
567    ps->u.tex.level = tmpl->u.tex.level;
568    ps->u.tex.first_layer = tmpl->u.tex.first_layer;
569    ps->u.tex.last_layer = tmpl->u.tex.last_layer;
570 
571    ns->width = u_minify(pt->width0, ps->u.tex.level);
572    ns->height = u_minify(pt->height0, ps->u.tex.level);
573    ns->depth = ps->u.tex.last_layer - ps->u.tex.first_layer + 1;
574    ns->offset = layer_offset(pt, ps->u.tex.level, ps->u.tex.first_layer);
575    if (mt->swizzled)
576       ns->pitch = 4096; /* random, just something the hw won't reject.. */
577    else
578       ns->pitch = lvl->pitch;
579 
580    /* comment says there are going to be removed, but they're used by the st */
581    ps->width = ns->width;
582    ps->height = ns->height;
583    return ps;
584 }
585 
586 void
nv30_miptree_surface_del(struct pipe_context * pipe,struct pipe_surface * ps)587 nv30_miptree_surface_del(struct pipe_context *pipe, struct pipe_surface *ps)
588 {
589    struct nv30_surface *ns = nv30_surface(ps);
590 
591    pipe_resource_reference(&ps->texture, NULL);
592    FREE(ns);
593 }
594