1 /*
2  * Copyright © 2011 Red Hat All Rights Reserved.
3  * Copyright © 2017 Advanced Micro Devices, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  */
27 
28 #include "ac_surface.h"
29 
30 #include "ac_gpu_info.h"
31 #include "addrlib/inc/addrinterface.h"
32 #include "addrlib/src/amdgpu_asic_addr.h"
33 #include "amd_family.h"
34 #include "drm-uapi/amdgpu_drm.h"
35 #include "sid.h"
36 #include "util/hash_table.h"
37 #include "util/macros.h"
38 #include "util/simple_mtx.h"
39 #include "util/u_atomic.h"
40 #include "util/u_math.h"
41 #include "util/u_memory.h"
42 
43 #include <amdgpu.h>
44 #include <errno.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 
48 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
49 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
50 #endif
51 
52 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
53 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
54 #endif
55 
56 struct ac_addrlib {
57    ADDR_HANDLE handle;
58 
59    /* The cache of DCC retile maps for reuse when allocating images of
60     * similar sizes.
61     */
62    simple_mtx_t dcc_retile_map_lock;
63    struct hash_table *dcc_retile_maps;
64    struct hash_table *dcc_retile_tile_indices;
65 };
66 
67 struct dcc_retile_map_key {
68    enum radeon_family family;
69    unsigned retile_width;
70    unsigned retile_height;
71    bool rb_aligned;
72    bool pipe_aligned;
73    unsigned dcc_retile_num_elements;
74    ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT input;
75 };
76 
dcc_retile_map_hash_key(const void * key)77 static uint32_t dcc_retile_map_hash_key(const void *key)
78 {
79    return _mesa_hash_data(key, sizeof(struct dcc_retile_map_key));
80 }
81 
dcc_retile_map_keys_equal(const void * a,const void * b)82 static bool dcc_retile_map_keys_equal(const void *a, const void *b)
83 {
84    return memcmp(a, b, sizeof(struct dcc_retile_map_key)) == 0;
85 }
86 
dcc_retile_map_free(struct hash_entry * entry)87 static void dcc_retile_map_free(struct hash_entry *entry)
88 {
89    free((void *)entry->key);
90    free(entry->data);
91 }
92 
93 struct dcc_retile_tile_key {
94    enum radeon_family family;
95    unsigned bpp;
96    unsigned swizzle_mode;
97    bool rb_aligned;
98    bool pipe_aligned;
99 };
100 
101 struct dcc_retile_tile_data {
102    unsigned tile_width_log2;
103    unsigned tile_height_log2;
104    uint16_t *data;
105 };
106 
dcc_retile_tile_hash_key(const void * key)107 static uint32_t dcc_retile_tile_hash_key(const void *key)
108 {
109    return _mesa_hash_data(key, sizeof(struct dcc_retile_tile_key));
110 }
111 
dcc_retile_tile_keys_equal(const void * a,const void * b)112 static bool dcc_retile_tile_keys_equal(const void *a, const void *b)
113 {
114    return memcmp(a, b, sizeof(struct dcc_retile_tile_key)) == 0;
115 }
116 
dcc_retile_tile_free(struct hash_entry * entry)117 static void dcc_retile_tile_free(struct hash_entry *entry)
118 {
119    free((void *)entry->key);
120    free(((struct dcc_retile_tile_data *)entry->data)->data);
121    free(entry->data);
122 }
123 
124 /* Assumes dcc_retile_map_lock is taken. */
125 static const struct dcc_retile_tile_data *
ac_compute_dcc_retile_tile_indices(struct ac_addrlib * addrlib,const struct radeon_info * info,unsigned bpp,unsigned swizzle_mode,bool rb_aligned,bool pipe_aligned)126 ac_compute_dcc_retile_tile_indices(struct ac_addrlib *addrlib, const struct radeon_info *info,
127                                    unsigned bpp, unsigned swizzle_mode, bool rb_aligned,
128                                    bool pipe_aligned)
129 {
130    struct dcc_retile_tile_key key;
131    memset(&key, 0, sizeof(key));
132 
133    key.family = info->family;
134    key.bpp = bpp;
135    key.swizzle_mode = swizzle_mode;
136    key.rb_aligned = rb_aligned;
137    key.pipe_aligned = pipe_aligned;
138 
139    struct hash_entry *entry = _mesa_hash_table_search(addrlib->dcc_retile_tile_indices, &key);
140    if (entry)
141       return entry->data;
142 
143    ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
144    ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
145    din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
146    dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
147 
148    din.dccKeyFlags.pipeAligned = pipe_aligned;
149    din.dccKeyFlags.rbAligned = rb_aligned;
150    din.resourceType = ADDR_RSRC_TEX_2D;
151    din.swizzleMode = swizzle_mode;
152    din.bpp = bpp;
153    din.unalignedWidth = 1;
154    din.unalignedHeight = 1;
155    din.numSlices = 1;
156    din.numFrags = 1;
157    din.numMipLevels = 1;
158 
159    ADDR_E_RETURNCODE ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
160    if (ret != ADDR_OK)
161       return NULL;
162 
163    ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin = {0};
164    addrin.size = sizeof(addrin);
165    addrin.swizzleMode = swizzle_mode;
166    addrin.resourceType = ADDR_RSRC_TEX_2D;
167    addrin.bpp = bpp;
168    addrin.numSlices = 1;
169    addrin.numMipLevels = 1;
170    addrin.numFrags = 1;
171    addrin.pitch = dout.pitch;
172    addrin.height = dout.height;
173    addrin.compressBlkWidth = dout.compressBlkWidth;
174    addrin.compressBlkHeight = dout.compressBlkHeight;
175    addrin.compressBlkDepth = dout.compressBlkDepth;
176    addrin.metaBlkWidth = dout.metaBlkWidth;
177    addrin.metaBlkHeight = dout.metaBlkHeight;
178    addrin.metaBlkDepth = dout.metaBlkDepth;
179    addrin.dccKeyFlags.pipeAligned = pipe_aligned;
180    addrin.dccKeyFlags.rbAligned = rb_aligned;
181 
182    unsigned w = dout.metaBlkWidth / dout.compressBlkWidth;
183    unsigned h = dout.metaBlkHeight / dout.compressBlkHeight;
184    uint16_t *indices = malloc(w * h * sizeof(uint16_t));
185    if (!indices)
186       return NULL;
187 
188    ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {0};
189    addrout.size = sizeof(addrout);
190 
191    for (unsigned y = 0; y < h; ++y) {
192       addrin.y = y * dout.compressBlkHeight;
193       for (unsigned x = 0; x < w; ++x) {
194          addrin.x = x * dout.compressBlkWidth;
195          addrout.addr = 0;
196 
197          if (Addr2ComputeDccAddrFromCoord(addrlib->handle, &addrin, &addrout) != ADDR_OK) {
198             free(indices);
199             return NULL;
200          }
201          indices[y * w + x] = addrout.addr;
202       }
203    }
204 
205    struct dcc_retile_tile_data *data = calloc(1, sizeof(*data));
206    if (!data) {
207       free(indices);
208       return NULL;
209    }
210 
211    data->tile_width_log2 = util_logbase2(w);
212    data->tile_height_log2 = util_logbase2(h);
213    data->data = indices;
214 
215    struct dcc_retile_tile_key *heap_key = mem_dup(&key, sizeof(key));
216    if (!heap_key) {
217       free(data);
218       free(indices);
219       return NULL;
220    }
221 
222    entry = _mesa_hash_table_insert(addrlib->dcc_retile_tile_indices, heap_key, data);
223    if (!entry) {
224       free(heap_key);
225       free(data);
226       free(indices);
227    }
228    return data;
229 }
230 
ac_compute_retile_tile_addr(const struct dcc_retile_tile_data * tile,unsigned stride,unsigned x,unsigned y)231 static uint32_t ac_compute_retile_tile_addr(const struct dcc_retile_tile_data *tile,
232                                             unsigned stride, unsigned x, unsigned y)
233 {
234    unsigned x_mask = (1u << tile->tile_width_log2) - 1;
235    unsigned y_mask = (1u << tile->tile_height_log2) - 1;
236    unsigned tile_size_log2 = tile->tile_width_log2 + tile->tile_height_log2;
237 
238    unsigned base = ((y >> tile->tile_height_log2) * stride + (x >> tile->tile_width_log2))
239                    << tile_size_log2;
240    unsigned offset_in_tile = tile->data[((y & y_mask) << tile->tile_width_log2) + (x & x_mask)];
241    return base + offset_in_tile;
242 }
243 
ac_compute_dcc_retile_map(struct ac_addrlib * addrlib,const struct radeon_info * info,unsigned retile_width,unsigned retile_height,bool rb_aligned,bool pipe_aligned,bool use_uint16,unsigned dcc_retile_num_elements,const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT * in)244 static uint32_t *ac_compute_dcc_retile_map(struct ac_addrlib *addrlib,
245                                            const struct radeon_info *info, unsigned retile_width,
246                                            unsigned retile_height, bool rb_aligned,
247                                            bool pipe_aligned, bool use_uint16,
248                                            unsigned dcc_retile_num_elements,
249                                            const ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT *in)
250 {
251    unsigned dcc_retile_map_size = dcc_retile_num_elements * (use_uint16 ? 2 : 4);
252    struct dcc_retile_map_key key;
253 
254    assert(in->numFrags == 1 && in->numSlices == 1 && in->numMipLevels == 1);
255 
256    memset(&key, 0, sizeof(key));
257    key.family = info->family;
258    key.retile_width = retile_width;
259    key.retile_height = retile_height;
260    key.rb_aligned = rb_aligned;
261    key.pipe_aligned = pipe_aligned;
262    key.dcc_retile_num_elements = dcc_retile_num_elements;
263    memcpy(&key.input, in, sizeof(*in));
264 
265    simple_mtx_lock(&addrlib->dcc_retile_map_lock);
266 
267    /* If we have already computed this retile map, get it from the hash table. */
268    struct hash_entry *entry = _mesa_hash_table_search(addrlib->dcc_retile_maps, &key);
269    if (entry) {
270       uint32_t *map = entry->data;
271       simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
272       return map;
273    }
274 
275    const struct dcc_retile_tile_data *src_tile = ac_compute_dcc_retile_tile_indices(
276       addrlib, info, in->bpp, in->swizzleMode, rb_aligned, pipe_aligned);
277    const struct dcc_retile_tile_data *dst_tile =
278       ac_compute_dcc_retile_tile_indices(addrlib, info, in->bpp, in->swizzleMode, false, false);
279    if (!src_tile || !dst_tile) {
280       simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
281       return NULL;
282    }
283 
284    void *dcc_retile_map = malloc(dcc_retile_map_size);
285    if (!dcc_retile_map) {
286       simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
287       return NULL;
288    }
289 
290    unsigned index = 0;
291    unsigned w = DIV_ROUND_UP(retile_width, in->compressBlkWidth);
292    unsigned h = DIV_ROUND_UP(retile_height, in->compressBlkHeight);
293    unsigned src_stride = DIV_ROUND_UP(w, 1u << src_tile->tile_width_log2);
294    unsigned dst_stride = DIV_ROUND_UP(w, 1u << dst_tile->tile_width_log2);
295 
296    for (unsigned y = 0; y < h; ++y) {
297       for (unsigned x = 0; x < w; ++x) {
298          unsigned src_addr = ac_compute_retile_tile_addr(src_tile, src_stride, x, y);
299          unsigned dst_addr = ac_compute_retile_tile_addr(dst_tile, dst_stride, x, y);
300 
301          if (use_uint16) {
302             ((uint16_t *)dcc_retile_map)[2 * index] = src_addr;
303             ((uint16_t *)dcc_retile_map)[2 * index + 1] = dst_addr;
304          } else {
305             ((uint32_t *)dcc_retile_map)[2 * index] = src_addr;
306             ((uint32_t *)dcc_retile_map)[2 * index + 1] = dst_addr;
307          }
308          ++index;
309       }
310    }
311 
312    /* Fill the remaining pairs with the last one (for the compute shader). */
313    for (unsigned i = index * 2; i < dcc_retile_num_elements; i++) {
314       if (use_uint16)
315          ((uint16_t *)dcc_retile_map)[i] = ((uint16_t *)dcc_retile_map)[i - 2];
316       else
317          ((uint32_t *)dcc_retile_map)[i] = ((uint32_t *)dcc_retile_map)[i - 2];
318    }
319 
320    /* Insert the retile map into the hash table, so that it can be reused and
321     * the computation can be skipped for similar image sizes.
322     */
323    _mesa_hash_table_insert(addrlib->dcc_retile_maps, mem_dup(&key, sizeof(key)), dcc_retile_map);
324 
325    simple_mtx_unlock(&addrlib->dcc_retile_map_lock);
326    return dcc_retile_map;
327 }
328 
allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)329 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT *pInput)
330 {
331    return malloc(pInput->sizeInBytes);
332 }
333 
freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)334 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT *pInput)
335 {
336    free(pInput->pVirtAddr);
337    return ADDR_OK;
338 }
339 
ac_addrlib_create(const struct radeon_info * info,const struct amdgpu_gpu_info * amdinfo,uint64_t * max_alignment)340 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
341                                      const struct amdgpu_gpu_info *amdinfo, uint64_t *max_alignment)
342 {
343    ADDR_CREATE_INPUT addrCreateInput = {0};
344    ADDR_CREATE_OUTPUT addrCreateOutput = {0};
345    ADDR_REGISTER_VALUE regValue = {0};
346    ADDR_CREATE_FLAGS createFlags = {{0}};
347    ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
348    ADDR_E_RETURNCODE addrRet;
349 
350    addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
351    addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
352 
353    regValue.gbAddrConfig = amdinfo->gb_addr_cfg;
354    createFlags.value = 0;
355 
356    addrCreateInput.chipFamily = info->family_id;
357    addrCreateInput.chipRevision = info->chip_external_rev;
358 
359    if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
360       return NULL;
361 
362    if (addrCreateInput.chipFamily >= FAMILY_AI) {
363       addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
364    } else {
365       regValue.noOfBanks = amdinfo->mc_arb_ramcfg & 0x3;
366       regValue.noOfRanks = (amdinfo->mc_arb_ramcfg & 0x4) >> 2;
367 
368       regValue.backendDisables = amdinfo->enabled_rb_pipes_mask;
369       regValue.pTileConfig = amdinfo->gb_tile_mode;
370       regValue.noOfEntries = ARRAY_SIZE(amdinfo->gb_tile_mode);
371       if (addrCreateInput.chipFamily == FAMILY_SI) {
372          regValue.pMacroTileConfig = NULL;
373          regValue.noOfMacroEntries = 0;
374       } else {
375          regValue.pMacroTileConfig = amdinfo->gb_macro_tile_mode;
376          regValue.noOfMacroEntries = ARRAY_SIZE(amdinfo->gb_macro_tile_mode);
377       }
378 
379       createFlags.useTileIndex = 1;
380       createFlags.useHtileSliceAlign = 1;
381 
382       addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
383    }
384 
385    addrCreateInput.callbacks.allocSysMem = allocSysMem;
386    addrCreateInput.callbacks.freeSysMem = freeSysMem;
387    addrCreateInput.callbacks.debugPrint = 0;
388    addrCreateInput.createFlags = createFlags;
389    addrCreateInput.regValue = regValue;
390 
391    addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
392    if (addrRet != ADDR_OK)
393       return NULL;
394 
395    if (max_alignment) {
396       addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
397       if (addrRet == ADDR_OK) {
398          *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
399       }
400    }
401 
402    struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
403    if (!addrlib) {
404       AddrDestroy(addrCreateOutput.hLib);
405       return NULL;
406    }
407 
408    addrlib->handle = addrCreateOutput.hLib;
409    simple_mtx_init(&addrlib->dcc_retile_map_lock, mtx_plain);
410    addrlib->dcc_retile_maps =
411       _mesa_hash_table_create(NULL, dcc_retile_map_hash_key, dcc_retile_map_keys_equal);
412    addrlib->dcc_retile_tile_indices =
413       _mesa_hash_table_create(NULL, dcc_retile_tile_hash_key, dcc_retile_tile_keys_equal);
414    return addrlib;
415 }
416 
ac_addrlib_destroy(struct ac_addrlib * addrlib)417 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
418 {
419    AddrDestroy(addrlib->handle);
420    simple_mtx_destroy(&addrlib->dcc_retile_map_lock);
421    _mesa_hash_table_destroy(addrlib->dcc_retile_maps, dcc_retile_map_free);
422    _mesa_hash_table_destroy(addrlib->dcc_retile_tile_indices, dcc_retile_tile_free);
423    free(addrlib);
424 }
425 
surf_config_sanity(const struct ac_surf_config * config,unsigned flags)426 static int surf_config_sanity(const struct ac_surf_config *config, unsigned flags)
427 {
428    /* FMASK is allocated together with the color surface and can't be
429     * allocated separately.
430     */
431    assert(!(flags & RADEON_SURF_FMASK));
432    if (flags & RADEON_SURF_FMASK)
433       return -EINVAL;
434 
435    /* all dimension must be at least 1 ! */
436    if (!config->info.width || !config->info.height || !config->info.depth ||
437        !config->info.array_size || !config->info.levels)
438       return -EINVAL;
439 
440    switch (config->info.samples) {
441    case 0:
442    case 1:
443    case 2:
444    case 4:
445    case 8:
446       break;
447    case 16:
448       if (flags & RADEON_SURF_Z_OR_SBUFFER)
449          return -EINVAL;
450       break;
451    default:
452       return -EINVAL;
453    }
454 
455    if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
456       switch (config->info.storage_samples) {
457       case 0:
458       case 1:
459       case 2:
460       case 4:
461       case 8:
462          break;
463       default:
464          return -EINVAL;
465       }
466    }
467 
468    if (config->is_3d && config->info.array_size > 1)
469       return -EINVAL;
470    if (config->is_cube && config->info.depth > 1)
471       return -EINVAL;
472 
473    return 0;
474 }
475 
gfx6_compute_level(ADDR_HANDLE addrlib,const struct ac_surf_config * config,struct radeon_surf * surf,bool is_stencil,unsigned level,bool compressed,ADDR_COMPUTE_SURFACE_INFO_INPUT * AddrSurfInfoIn,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * AddrSurfInfoOut,ADDR_COMPUTE_DCCINFO_INPUT * AddrDccIn,ADDR_COMPUTE_DCCINFO_OUTPUT * AddrDccOut,ADDR_COMPUTE_HTILE_INFO_INPUT * AddrHtileIn,ADDR_COMPUTE_HTILE_INFO_OUTPUT * AddrHtileOut)476 static int gfx6_compute_level(ADDR_HANDLE addrlib, const struct ac_surf_config *config,
477                               struct radeon_surf *surf, bool is_stencil, unsigned level,
478                               bool compressed, ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
479                               ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
480                               ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
481                               ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
482                               ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
483                               ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
484 {
485    struct legacy_surf_level *surf_level;
486    ADDR_E_RETURNCODE ret;
487 
488    AddrSurfInfoIn->mipLevel = level;
489    AddrSurfInfoIn->width = u_minify(config->info.width, level);
490    AddrSurfInfoIn->height = u_minify(config->info.height, level);
491 
492    /* Make GFX6 linear surfaces compatible with GFX9 for hybrid graphics,
493     * because GFX9 needs linear alignment of 256 bytes.
494     */
495    if (config->info.levels == 1 && AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
496        AddrSurfInfoIn->bpp && util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
497       unsigned alignment = 256 / (AddrSurfInfoIn->bpp / 8);
498 
499       AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
500    }
501 
502    /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
503     * true for r32g32b32 formats. */
504    if (AddrSurfInfoIn->bpp == 96) {
505       assert(config->info.levels == 1);
506       assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
507 
508       /* The least common multiple of 64 bytes and 12 bytes/pixel is
509        * 192 bytes, or 16 pixels. */
510       AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
511    }
512 
513    if (config->is_3d)
514       AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
515    else if (config->is_cube)
516       AddrSurfInfoIn->numSlices = 6;
517    else
518       AddrSurfInfoIn->numSlices = config->info.array_size;
519 
520    if (level > 0) {
521       /* Set the base level pitch. This is needed for calculation
522        * of non-zero levels. */
523       if (is_stencil)
524          AddrSurfInfoIn->basePitch = surf->u.legacy.stencil_level[0].nblk_x;
525       else
526          AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
527 
528       /* Convert blocks to pixels for compressed formats. */
529       if (compressed)
530          AddrSurfInfoIn->basePitch *= surf->blk_w;
531    }
532 
533    ret = AddrComputeSurfaceInfo(addrlib, AddrSurfInfoIn, AddrSurfInfoOut);
534    if (ret != ADDR_OK) {
535       return ret;
536    }
537 
538    surf_level = is_stencil ? &surf->u.legacy.stencil_level[level] : &surf->u.legacy.level[level];
539    surf_level->offset = align64(surf->surf_size, AddrSurfInfoOut->baseAlign);
540    surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
541    surf_level->nblk_x = AddrSurfInfoOut->pitch;
542    surf_level->nblk_y = AddrSurfInfoOut->height;
543 
544    switch (AddrSurfInfoOut->tileMode) {
545    case ADDR_TM_LINEAR_ALIGNED:
546       surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
547       break;
548    case ADDR_TM_1D_TILED_THIN1:
549       surf_level->mode = RADEON_SURF_MODE_1D;
550       break;
551    case ADDR_TM_2D_TILED_THIN1:
552       surf_level->mode = RADEON_SURF_MODE_2D;
553       break;
554    default:
555       assert(0);
556    }
557 
558    if (is_stencil)
559       surf->u.legacy.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
560    else
561       surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
562 
563    surf->surf_size = surf_level->offset + AddrSurfInfoOut->surfSize;
564 
565    /* Clear DCC fields at the beginning. */
566    surf_level->dcc_offset = 0;
567 
568    /* The previous level's flag tells us if we can use DCC for this level. */
569    if (AddrSurfInfoIn->flags.dccCompatible && (level == 0 || AddrDccOut->subLvlCompressible)) {
570       bool prev_level_clearable = level == 0 || AddrDccOut->dccRamSizeAligned;
571 
572       AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
573       AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
574       AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
575       AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
576       AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
577 
578       ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
579 
580       if (ret == ADDR_OK) {
581          surf_level->dcc_offset = surf->dcc_size;
582          surf->num_dcc_levels = level + 1;
583          surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
584          surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
585 
586          /* If the DCC size of a subresource (1 mip level or 1 slice)
587           * is not aligned, the DCC memory layout is not contiguous for
588           * that subresource, which means we can't use fast clear.
589           *
590           * We only do fast clears for whole mipmap levels. If we did
591           * per-slice fast clears, the same restriction would apply.
592           * (i.e. only compute the slice size and see if it's aligned)
593           *
594           * The last level can be non-contiguous and still be clearable
595           * if it's interleaved with the next level that doesn't exist.
596           */
597          if (AddrDccOut->dccRamSizeAligned ||
598              (prev_level_clearable && level == config->info.levels - 1))
599             surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
600          else
601             surf_level->dcc_fast_clear_size = 0;
602 
603          /* Compute the DCC slice size because addrlib doesn't
604           * provide this info. As DCC memory is linear (each
605           * slice is the same size) it's easy to compute.
606           */
607          surf->dcc_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
608 
609          /* For arrays, we have to compute the DCC info again
610           * with one slice size to get a correct fast clear
611           * size.
612           */
613          if (config->info.array_size > 1) {
614             AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
615             AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
616             AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
617             AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
618             AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
619 
620             ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
621             if (ret == ADDR_OK) {
622                /* If the DCC memory isn't properly
623                 * aligned, the data are interleaved
624                 * accross slices.
625                 */
626                if (AddrDccOut->dccRamSizeAligned)
627                   surf_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
628                else
629                   surf_level->dcc_slice_fast_clear_size = 0;
630             }
631 
632             if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
633                 surf->dcc_slice_size != surf_level->dcc_slice_fast_clear_size) {
634                surf->dcc_size = 0;
635                surf->num_dcc_levels = 0;
636                AddrDccOut->subLvlCompressible = false;
637             }
638          } else {
639             surf_level->dcc_slice_fast_clear_size = surf_level->dcc_fast_clear_size;
640          }
641       }
642    }
643 
644    /* HTILE. */
645    if (!is_stencil && AddrSurfInfoIn->flags.depth && surf_level->mode == RADEON_SURF_MODE_2D &&
646        level == 0 && !(surf->flags & RADEON_SURF_NO_HTILE)) {
647       AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
648       AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
649       AddrHtileIn->height = AddrSurfInfoOut->height;
650       AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
651       AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
652       AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
653       AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
654       AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
655       AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
656 
657       ret = AddrComputeHtileInfo(addrlib, AddrHtileIn, AddrHtileOut);
658 
659       if (ret == ADDR_OK) {
660          surf->htile_size = AddrHtileOut->htileBytes;
661          surf->htile_slice_size = AddrHtileOut->sliceSize;
662          surf->htile_alignment = AddrHtileOut->baseAlign;
663       }
664    }
665 
666    return 0;
667 }
668 
gfx6_set_micro_tile_mode(struct radeon_surf * surf,const struct radeon_info * info)669 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf, const struct radeon_info *info)
670 {
671    uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
672 
673    if (info->chip_class >= GFX7)
674       surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
675    else
676       surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
677 }
678 
cik_get_macro_tile_index(struct radeon_surf * surf)679 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
680 {
681    unsigned index, tileb;
682 
683    tileb = 8 * 8 * surf->bpe;
684    tileb = MIN2(surf->u.legacy.tile_split, tileb);
685 
686    for (index = 0; tileb > 64; index++)
687       tileb >>= 1;
688 
689    assert(index < 16);
690    return index;
691 }
692 
get_display_flag(const struct ac_surf_config * config,const struct radeon_surf * surf)693 static bool get_display_flag(const struct ac_surf_config *config, const struct radeon_surf *surf)
694 {
695    unsigned num_channels = config->info.num_channels;
696    unsigned bpe = surf->bpe;
697 
698    if (!config->is_3d && !config->is_cube && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
699        surf->flags & RADEON_SURF_SCANOUT && config->info.samples <= 1 && surf->blk_w <= 2 &&
700        surf->blk_h == 1) {
701       /* subsampled */
702       if (surf->blk_w == 2 && surf->blk_h == 1)
703          return true;
704 
705       if (/* RGBA8 or RGBA16F */
706           (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
707           /* R5G6B5 or R5G5B5A1 */
708           (bpe == 2 && num_channels >= 3) ||
709           /* C8 palette */
710           (bpe == 1 && num_channels == 1))
711          return true;
712    }
713    return false;
714 }
715 
716 /**
717  * This must be called after the first level is computed.
718  *
719  * Copy surface-global settings like pipe/bank config from level 0 surface
720  * computation, and compute tile swizzle.
721  */
gfx6_surface_settings(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * csio,struct radeon_surf * surf)722 static int gfx6_surface_settings(ADDR_HANDLE addrlib, const struct radeon_info *info,
723                                  const struct ac_surf_config *config,
724                                  ADDR_COMPUTE_SURFACE_INFO_OUTPUT *csio, struct radeon_surf *surf)
725 {
726    surf->surf_alignment = csio->baseAlign;
727    surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
728    gfx6_set_micro_tile_mode(surf, info);
729 
730    /* For 2D modes only. */
731    if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
732       surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
733       surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
734       surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
735       surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
736       surf->u.legacy.num_banks = csio->pTileInfo->banks;
737       surf->u.legacy.macro_tile_index = csio->macroModeIndex;
738    } else {
739       surf->u.legacy.macro_tile_index = 0;
740    }
741 
742    /* Compute tile swizzle. */
743    /* TODO: fix tile swizzle with mipmapping for GFX6 */
744    if ((info->chip_class >= GFX7 || config->info.levels == 1) && config->info.surf_index &&
745        surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
746        !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
747        !get_display_flag(config, surf)) {
748       ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
749       ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
750 
751       AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
752       AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
753 
754       AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
755       AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
756       AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
757       AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
758       AddrBaseSwizzleIn.tileMode = csio->tileMode;
759 
760       int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn, &AddrBaseSwizzleOut);
761       if (r != ADDR_OK)
762          return r;
763 
764       assert(AddrBaseSwizzleOut.tileSwizzle <=
765              u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
766       surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
767    }
768    return 0;
769 }
770 
ac_compute_cmask(const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf)771 static void ac_compute_cmask(const struct radeon_info *info, const struct ac_surf_config *config,
772                              struct radeon_surf *surf)
773 {
774    unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
775    unsigned num_pipes = info->num_tile_pipes;
776    unsigned cl_width, cl_height;
777 
778    if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
779        (config->info.samples >= 2 && !surf->fmask_size))
780       return;
781 
782    assert(info->chip_class <= GFX8);
783 
784    switch (num_pipes) {
785    case 2:
786       cl_width = 32;
787       cl_height = 16;
788       break;
789    case 4:
790       cl_width = 32;
791       cl_height = 32;
792       break;
793    case 8:
794       cl_width = 64;
795       cl_height = 32;
796       break;
797    case 16: /* Hawaii */
798       cl_width = 64;
799       cl_height = 64;
800       break;
801    default:
802       assert(0);
803       return;
804    }
805 
806    unsigned base_align = num_pipes * pipe_interleave_bytes;
807 
808    unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width * 8);
809    unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height * 8);
810    unsigned slice_elements = (width * height) / (8 * 8);
811 
812    /* Each element of CMASK is a nibble. */
813    unsigned slice_bytes = slice_elements / 2;
814 
815    surf->u.legacy.cmask_slice_tile_max = (width * height) / (128 * 128);
816    if (surf->u.legacy.cmask_slice_tile_max)
817       surf->u.legacy.cmask_slice_tile_max -= 1;
818 
819    unsigned num_layers;
820    if (config->is_3d)
821       num_layers = config->info.depth;
822    else if (config->is_cube)
823       num_layers = 6;
824    else
825       num_layers = config->info.array_size;
826 
827    surf->cmask_alignment = MAX2(256, base_align);
828    surf->cmask_slice_size = align(slice_bytes, base_align);
829    surf->cmask_size = surf->cmask_slice_size * num_layers;
830 }
831 
832 /**
833  * Fill in the tiling information in \p surf based on the given surface config.
834  *
835  * The following fields of \p surf must be initialized by the caller:
836  * blk_w, blk_h, bpe, flags.
837  */
gfx6_compute_surface(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)838 static int gfx6_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *info,
839                                 const struct ac_surf_config *config, enum radeon_surf_mode mode,
840                                 struct radeon_surf *surf)
841 {
842    unsigned level;
843    bool compressed;
844    ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
845    ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
846    ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
847    ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
848    ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
849    ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
850    ADDR_TILEINFO AddrTileInfoIn = {0};
851    ADDR_TILEINFO AddrTileInfoOut = {0};
852    int r;
853 
854    AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
855    AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
856    AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
857    AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
858    AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
859    AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
860    AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
861 
862    compressed = surf->blk_w == 4 && surf->blk_h == 4;
863 
864    /* MSAA requires 2D tiling. */
865    if (config->info.samples > 1)
866       mode = RADEON_SURF_MODE_2D;
867 
868    /* DB doesn't support linear layouts. */
869    if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) && mode < RADEON_SURF_MODE_1D)
870       mode = RADEON_SURF_MODE_1D;
871 
872    /* Set the requested tiling mode. */
873    switch (mode) {
874    case RADEON_SURF_MODE_LINEAR_ALIGNED:
875       AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
876       break;
877    case RADEON_SURF_MODE_1D:
878       AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
879       break;
880    case RADEON_SURF_MODE_2D:
881       AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
882       break;
883    default:
884       assert(0);
885    }
886 
887    /* The format must be set correctly for the allocation of compressed
888     * textures to work. In other cases, setting the bpp is sufficient.
889     */
890    if (compressed) {
891       switch (surf->bpe) {
892       case 8:
893          AddrSurfInfoIn.format = ADDR_FMT_BC1;
894          break;
895       case 16:
896          AddrSurfInfoIn.format = ADDR_FMT_BC3;
897          break;
898       default:
899          assert(0);
900       }
901    } else {
902       AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
903    }
904 
905    AddrDccIn.numSamples = AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
906    AddrSurfInfoIn.tileIndex = -1;
907 
908    if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
909       AddrDccIn.numSamples = AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
910    }
911 
912    /* Set the micro tile type. */
913    if (surf->flags & RADEON_SURF_SCANOUT)
914       AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
915    else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
916       AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
917    else
918       AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
919 
920    AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
921    AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
922    AddrSurfInfoIn.flags.cube = config->is_cube;
923    AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
924    AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
925    AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
926 
927    /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
928     * requested, because TC-compatible HTILE requires 2D tiling.
929     */
930    AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
931                                     !AddrSurfInfoIn.flags.fmask && config->info.samples <= 1 &&
932                                     !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
933 
934    /* DCC notes:
935     * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
936     *   with samples >= 4.
937     * - Mipmapped array textures have low performance (discovered by a closed
938     *   driver team).
939     */
940    AddrSurfInfoIn.flags.dccCompatible =
941       info->chip_class >= GFX8 && info->has_graphics && /* disable DCC on compute-only chips */
942       !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
943       !compressed &&
944       ((config->info.array_size == 1 && config->info.depth == 1) || config->info.levels == 1);
945 
946    AddrSurfInfoIn.flags.noStencil = (surf->flags & RADEON_SURF_SBUFFER) == 0;
947    AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
948 
949    /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
950     * for Z and stencil. This can cause a number of problems which we work
951     * around here:
952     *
953     * - a depth part that is incompatible with mipmapped texturing
954     * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
955     *   incorrect tiling applied to the stencil part, stencil buffer
956     *   memory accesses that go out of bounds) even without mipmapping
957     *
958     * Some piglit tests that are prone to different types of related
959     * failures:
960     *  ./bin/ext_framebuffer_multisample-upsample 2 stencil
961     *  ./bin/framebuffer-blit-levels {draw,read} stencil
962     *  ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
963     *  ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
964     *  ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
965     */
966    int stencil_tile_idx = -1;
967 
968    if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
969        (config->info.levels > 1 || info->family == CHIP_STONEY)) {
970       /* Compute stencilTileIdx that is compatible with the (depth)
971        * tileIdx. This degrades the depth surface if necessary to
972        * ensure that a matching stencilTileIdx exists. */
973       AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
974 
975       /* Keep the depth mip-tail compatible with texturing. */
976       AddrSurfInfoIn.flags.noStencil = 1;
977    }
978 
979    /* Set preferred macrotile parameters. This is usually required
980     * for shared resources. This is for 2D tiling only. */
981    if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
982        AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 && surf->u.legacy.bankw &&
983        surf->u.legacy.bankh && surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
984       /* If any of these parameters are incorrect, the calculation
985        * will fail. */
986       AddrTileInfoIn.banks = surf->u.legacy.num_banks;
987       AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
988       AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
989       AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
990       AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
991       AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
992       AddrSurfInfoIn.flags.opt4Space = 0;
993       AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
994 
995       /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
996        * the tile index, because we are expected to know it if
997        * we know the other parameters.
998        *
999        * This is something that can easily be fixed in Addrlib.
1000        * For now, just figure it out here.
1001        * Note that only 2D_TILE_THIN1 is handled here.
1002        */
1003       assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1004       assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
1005 
1006       if (info->chip_class == GFX6) {
1007          if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
1008             if (surf->bpe == 2)
1009                AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
1010             else
1011                AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
1012          } else {
1013             if (surf->bpe == 1)
1014                AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
1015             else if (surf->bpe == 2)
1016                AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
1017             else if (surf->bpe == 4)
1018                AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
1019             else
1020                AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
1021          }
1022       } else {
1023          /* GFX7 - GFX8 */
1024          if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
1025             AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
1026          else
1027             AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
1028 
1029          /* Addrlib doesn't set this if tileIndex is forced like above. */
1030          AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
1031       }
1032    }
1033 
1034    surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1035    surf->num_dcc_levels = 0;
1036    surf->surf_size = 0;
1037    surf->dcc_size = 0;
1038    surf->dcc_alignment = 1;
1039    surf->htile_size = 0;
1040    surf->htile_slice_size = 0;
1041    surf->htile_alignment = 1;
1042 
1043    const bool only_stencil =
1044       (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
1045 
1046    /* Calculate texture layout information. */
1047    if (!only_stencil) {
1048       for (level = 0; level < config->info.levels; level++) {
1049          r = gfx6_compute_level(addrlib, config, surf, false, level, compressed, &AddrSurfInfoIn,
1050                                 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, &AddrHtileIn,
1051                                 &AddrHtileOut);
1052          if (r)
1053             return r;
1054 
1055          if (level > 0)
1056             continue;
1057 
1058          if (!AddrSurfInfoOut.tcCompatible) {
1059             AddrSurfInfoIn.flags.tcCompatible = 0;
1060             surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1061          }
1062 
1063          if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
1064             AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
1065             AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
1066             stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
1067 
1068             assert(stencil_tile_idx >= 0);
1069          }
1070 
1071          r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1072          if (r)
1073             return r;
1074       }
1075    }
1076 
1077    /* Calculate texture layout information for stencil. */
1078    if (surf->flags & RADEON_SURF_SBUFFER) {
1079       AddrSurfInfoIn.tileIndex = stencil_tile_idx;
1080       AddrSurfInfoIn.bpp = 8;
1081       AddrSurfInfoIn.flags.depth = 0;
1082       AddrSurfInfoIn.flags.stencil = 1;
1083       AddrSurfInfoIn.flags.tcCompatible = 0;
1084       /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
1085       AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
1086 
1087       for (level = 0; level < config->info.levels; level++) {
1088          r = gfx6_compute_level(addrlib, config, surf, true, level, compressed, &AddrSurfInfoIn,
1089                                 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, NULL, NULL);
1090          if (r)
1091             return r;
1092 
1093          /* DB uses the depth pitch for both stencil and depth. */
1094          if (!only_stencil) {
1095             if (surf->u.legacy.stencil_level[level].nblk_x != surf->u.legacy.level[level].nblk_x)
1096                surf->u.legacy.stencil_adjusted = true;
1097          } else {
1098             surf->u.legacy.level[level].nblk_x = surf->u.legacy.stencil_level[level].nblk_x;
1099          }
1100 
1101          if (level == 0) {
1102             if (only_stencil) {
1103                r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1104                if (r)
1105                   return r;
1106             }
1107 
1108             /* For 2D modes only. */
1109             if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1110                surf->u.legacy.stencil_tile_split = AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1111             }
1112          }
1113       }
1114    }
1115 
1116    /* Compute FMASK. */
1117    if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color && info->has_graphics &&
1118        !(surf->flags & RADEON_SURF_NO_FMASK)) {
1119       ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1120       ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1121       ADDR_TILEINFO fmask_tile_info = {0};
1122 
1123       fin.size = sizeof(fin);
1124       fout.size = sizeof(fout);
1125 
1126       fin.tileMode = AddrSurfInfoOut.tileMode;
1127       fin.pitch = AddrSurfInfoOut.pitch;
1128       fin.height = config->info.height;
1129       fin.numSlices = AddrSurfInfoIn.numSlices;
1130       fin.numSamples = AddrSurfInfoIn.numSamples;
1131       fin.numFrags = AddrSurfInfoIn.numFrags;
1132       fin.tileIndex = -1;
1133       fout.pTileInfo = &fmask_tile_info;
1134 
1135       r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1136       if (r)
1137          return r;
1138 
1139       surf->fmask_size = fout.fmaskBytes;
1140       surf->fmask_alignment = fout.baseAlign;
1141       surf->fmask_tile_swizzle = 0;
1142 
1143       surf->u.legacy.fmask.slice_tile_max = (fout.pitch * fout.height) / 64;
1144       if (surf->u.legacy.fmask.slice_tile_max)
1145          surf->u.legacy.fmask.slice_tile_max -= 1;
1146 
1147       surf->u.legacy.fmask.tiling_index = fout.tileIndex;
1148       surf->u.legacy.fmask.bankh = fout.pTileInfo->bankHeight;
1149       surf->u.legacy.fmask.pitch_in_pixels = fout.pitch;
1150       surf->u.legacy.fmask.slice_size = fout.sliceSize;
1151 
1152       /* Compute tile swizzle for FMASK. */
1153       if (config->info.fmask_surf_index && !(surf->flags & RADEON_SURF_SHAREABLE)) {
1154          ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1155          ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1156 
1157          xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1158          xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1159 
1160          /* This counter starts from 1 instead of 0. */
1161          xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1162          xin.tileIndex = fout.tileIndex;
1163          xin.macroModeIndex = fout.macroModeIndex;
1164          xin.pTileInfo = fout.pTileInfo;
1165          xin.tileMode = fin.tileMode;
1166 
1167          int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1168          if (r != ADDR_OK)
1169             return r;
1170 
1171          assert(xout.tileSwizzle <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1172          surf->fmask_tile_swizzle = xout.tileSwizzle;
1173       }
1174    }
1175 
1176    /* Recalculate the whole DCC miptree size including disabled levels.
1177     * This is what addrlib does, but calling addrlib would be a lot more
1178     * complicated.
1179     */
1180    if (surf->dcc_size && config->info.levels > 1) {
1181       /* The smallest miplevels that are never compressed by DCC
1182        * still read the DCC buffer via TC if the base level uses DCC,
1183        * and for some reason the DCC buffer needs to be larger if
1184        * the miptree uses non-zero tile_swizzle. Otherwise there are
1185        * VM faults.
1186        *
1187        * "dcc_alignment * 4" was determined by trial and error.
1188        */
1189       surf->dcc_size = align64(surf->surf_size >> 8, surf->dcc_alignment * 4);
1190    }
1191 
1192    /* Make sure HTILE covers the whole miptree, because the shader reads
1193     * TC-compatible HTILE even for levels where it's disabled by DB.
1194     */
1195    if (surf->htile_size && config->info.levels > 1 &&
1196        surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) {
1197       /* MSAA can't occur with levels > 1, so ignore the sample count. */
1198       const unsigned total_pixels = surf->surf_size / surf->bpe;
1199       const unsigned htile_block_size = 8 * 8;
1200       const unsigned htile_element_size = 4;
1201 
1202       surf->htile_size = (total_pixels / htile_block_size) * htile_element_size;
1203       surf->htile_size = align(surf->htile_size, surf->htile_alignment);
1204    } else if (!surf->htile_size) {
1205       /* Unset this if HTILE is not present. */
1206       surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1207    }
1208 
1209    surf->is_linear = surf->u.legacy.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
1210    surf->is_displayable = surf->is_linear || surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1211                           surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
1212 
1213    /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1214     * used at the same time. This case is not currently expected to occur
1215     * because we don't use rotated. Enforce this restriction on all chips
1216     * to facilitate testing.
1217     */
1218    if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1219       assert(!"rotate micro tile mode is unsupported");
1220       return ADDR_ERROR;
1221    }
1222 
1223    ac_compute_cmask(info, config, surf);
1224    return 0;
1225 }
1226 
1227 /* This is only called when expecting a tiled layout. */
gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,struct radeon_surf * surf,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in,bool is_fmask,AddrSwizzleMode * swizzle_mode)1228 static int gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib, struct radeon_surf *surf,
1229                                            ADDR2_COMPUTE_SURFACE_INFO_INPUT *in, bool is_fmask,
1230                                            AddrSwizzleMode *swizzle_mode)
1231 {
1232    ADDR_E_RETURNCODE ret;
1233    ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1234    ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1235 
1236    sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1237    sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1238 
1239    sin.flags = in->flags;
1240    sin.resourceType = in->resourceType;
1241    sin.format = in->format;
1242    sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1243    /* TODO: We could allow some of these: */
1244    sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1245    sin.forbiddenBlock.var = 1;   /* don't allow the variable-sized swizzle modes */
1246    sin.bpp = in->bpp;
1247    sin.width = in->width;
1248    sin.height = in->height;
1249    sin.numSlices = in->numSlices;
1250    sin.numMipLevels = in->numMipLevels;
1251    sin.numSamples = in->numSamples;
1252    sin.numFrags = in->numFrags;
1253 
1254    if (is_fmask) {
1255       sin.flags.display = 0;
1256       sin.flags.color = 0;
1257       sin.flags.fmask = 1;
1258    }
1259 
1260    if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1261       sin.forbiddenBlock.linear = 1;
1262 
1263       if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1264          sin.preferredSwSet.sw_D = 1;
1265       else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1266          sin.preferredSwSet.sw_S = 1;
1267       else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1268          sin.preferredSwSet.sw_Z = 1;
1269       else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1270          sin.preferredSwSet.sw_R = 1;
1271    }
1272 
1273    ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1274    if (ret != ADDR_OK)
1275       return ret;
1276 
1277    *swizzle_mode = sout.swizzleMode;
1278    return 0;
1279 }
1280 
is_dcc_supported_by_CB(const struct radeon_info * info,unsigned sw_mode)1281 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1282 {
1283    if (info->chip_class >= GFX10)
1284       return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1285 
1286    return sw_mode != ADDR_SW_LINEAR;
1287 }
1288 
is_dcc_supported_by_L2(const struct radeon_info * info,const struct radeon_surf * surf)1289 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1290                                             const struct radeon_surf *surf)
1291 {
1292    if (info->chip_class <= GFX9) {
1293       /* Only independent 64B blocks are supported. */
1294       return surf->u.gfx9.dcc.independent_64B_blocks && !surf->u.gfx9.dcc.independent_128B_blocks &&
1295              surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1296    }
1297 
1298    if (info->family == CHIP_NAVI10) {
1299       /* Only independent 128B blocks are supported. */
1300       return !surf->u.gfx9.dcc.independent_64B_blocks && surf->u.gfx9.dcc.independent_128B_blocks &&
1301              surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1302    }
1303 
1304    if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
1305       /* Either 64B or 128B can be used, but not both.
1306        * If 64B is used, DCC image stores are unsupported.
1307        */
1308       return surf->u.gfx9.dcc.independent_64B_blocks != surf->u.gfx9.dcc.independent_128B_blocks &&
1309              (!surf->u.gfx9.dcc.independent_64B_blocks ||
1310               surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) &&
1311              (!surf->u.gfx9.dcc.independent_128B_blocks ||
1312               surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B);
1313    }
1314 
1315    /* 128B is recommended, but 64B can be set too if needed for 4K by DCN.
1316     * Since there is no reason to ever disable 128B, require it.
1317     * DCC image stores are always supported.
1318     */
1319    return surf->u.gfx9.dcc.independent_128B_blocks &&
1320           surf->u.gfx9.dcc.max_compressed_block_size <= V_028C78_MAX_BLOCK_SIZE_128B;
1321 }
1322 
is_dcc_supported_by_DCN(const struct radeon_info * info,const struct ac_surf_config * config,const struct radeon_surf * surf,bool rb_aligned,bool pipe_aligned)1323 static bool is_dcc_supported_by_DCN(const struct radeon_info *info,
1324                                     const struct ac_surf_config *config,
1325                                     const struct radeon_surf *surf, bool rb_aligned,
1326                                     bool pipe_aligned)
1327 {
1328    if (!info->use_display_dcc_unaligned && !info->use_display_dcc_with_retile_blit)
1329       return false;
1330 
1331    /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1332    if (surf->bpe != 4)
1333       return false;
1334 
1335    /* Handle unaligned DCC. */
1336    if (info->use_display_dcc_unaligned && (rb_aligned || pipe_aligned))
1337       return false;
1338 
1339    switch (info->chip_class) {
1340    case GFX9:
1341       /* There are more constraints, but we always set
1342        * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1343        * which always works.
1344        */
1345       assert(surf->u.gfx9.dcc.independent_64B_blocks &&
1346              surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1347       return true;
1348    case GFX10:
1349    case GFX10_3:
1350       /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1351       if (info->chip_class == GFX10 && surf->u.gfx9.dcc.independent_128B_blocks)
1352          return false;
1353 
1354       /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1. */
1355       return ((config->info.width <= 2560 && config->info.height <= 2560) ||
1356               (surf->u.gfx9.dcc.independent_64B_blocks &&
1357                surf->u.gfx9.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1358    default:
1359       unreachable("unhandled chip");
1360       return false;
1361    }
1362 }
1363 
gfx9_compute_miptree(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf,bool compressed,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in)1364 static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_info *info,
1365                                 const struct ac_surf_config *config, struct radeon_surf *surf,
1366                                 bool compressed, ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1367 {
1368    ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0};
1369    ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1370    ADDR_E_RETURNCODE ret;
1371 
1372    out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1373    out.pMipInfo = mip_info;
1374 
1375    ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1376    if (ret != ADDR_OK)
1377       return ret;
1378 
1379    if (in->flags.stencil) {
1380       surf->u.gfx9.stencil.swizzle_mode = in->swizzleMode;
1381       surf->u.gfx9.stencil.epitch =
1382          out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1383       surf->surf_alignment = MAX2(surf->surf_alignment, out.baseAlign);
1384       surf->u.gfx9.stencil_offset = align(surf->surf_size, out.baseAlign);
1385       surf->surf_size = surf->u.gfx9.stencil_offset + out.surfSize;
1386       return 0;
1387    }
1388 
1389    surf->u.gfx9.surf.swizzle_mode = in->swizzleMode;
1390    surf->u.gfx9.surf.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1391 
1392    /* CMASK fast clear uses these even if FMASK isn't allocated.
1393     * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1394     */
1395    surf->u.gfx9.fmask.swizzle_mode = surf->u.gfx9.surf.swizzle_mode & ~0x3;
1396    surf->u.gfx9.fmask.epitch = surf->u.gfx9.surf.epitch;
1397 
1398    surf->u.gfx9.surf_slice_size = out.sliceSize;
1399    surf->u.gfx9.surf_pitch = out.pitch;
1400    surf->u.gfx9.surf_height = out.height;
1401    surf->surf_size = out.surfSize;
1402    surf->surf_alignment = out.baseAlign;
1403 
1404    if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
1405        surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR) {
1406       /* Adjust surf_pitch to be in elements units not in pixels */
1407       surf->u.gfx9.surf_pitch = align(surf->u.gfx9.surf_pitch / surf->blk_w, 256 / surf->bpe);
1408       surf->u.gfx9.surf.epitch =
1409          MAX2(surf->u.gfx9.surf.epitch, surf->u.gfx9.surf_pitch * surf->blk_w - 1);
1410       /* The surface is really a surf->bpe bytes per pixel surface even if we
1411        * use it as a surf->bpe bytes per element one.
1412        * Adjust surf_slice_size and surf_size to reflect the change
1413        * made to surf_pitch.
1414        */
1415       surf->u.gfx9.surf_slice_size =
1416          MAX2(surf->u.gfx9.surf_slice_size,
1417               surf->u.gfx9.surf_pitch * out.height * surf->bpe * surf->blk_w);
1418       surf->surf_size = surf->u.gfx9.surf_slice_size * in->numSlices;
1419    }
1420 
1421    if (in->swizzleMode == ADDR_SW_LINEAR) {
1422       for (unsigned i = 0; i < in->numMipLevels; i++) {
1423          surf->u.gfx9.offset[i] = mip_info[i].offset;
1424          surf->u.gfx9.pitch[i] = mip_info[i].pitch;
1425       }
1426    }
1427 
1428    surf->u.gfx9.base_mip_width = mip_info[0].pitch;
1429    surf->u.gfx9.base_mip_height = mip_info[0].height;
1430 
1431    if (in->flags.depth) {
1432       assert(in->swizzleMode != ADDR_SW_LINEAR);
1433 
1434       if (surf->flags & RADEON_SURF_NO_HTILE)
1435          return 0;
1436 
1437       /* HTILE */
1438       ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
1439       ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
1440 
1441       hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
1442       hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
1443 
1444       assert(in->flags.metaPipeUnaligned == 0);
1445       assert(in->flags.metaRbUnaligned == 0);
1446 
1447       hin.hTileFlags.pipeAligned = 1;
1448       hin.hTileFlags.rbAligned = 1;
1449       hin.depthFlags = in->flags;
1450       hin.swizzleMode = in->swizzleMode;
1451       hin.unalignedWidth = in->width;
1452       hin.unalignedHeight = in->height;
1453       hin.numSlices = in->numSlices;
1454       hin.numMipLevels = in->numMipLevels;
1455       hin.firstMipIdInTail = out.firstMipIdInTail;
1456 
1457       ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
1458       if (ret != ADDR_OK)
1459          return ret;
1460 
1461       surf->htile_size = hout.htileBytes;
1462       surf->htile_slice_size = hout.sliceSize;
1463       surf->htile_alignment = hout.baseAlign;
1464       return 0;
1465    }
1466 
1467    {
1468       /* Compute tile swizzle for the color surface.
1469        * All *_X and *_T modes can use the swizzle.
1470        */
1471       if (config->info.surf_index && in->swizzleMode >= ADDR_SW_64KB_Z_T && !out.mipChainInTail &&
1472           !(surf->flags & RADEON_SURF_SHAREABLE) && !in->flags.display) {
1473          ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1474          ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1475 
1476          xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1477          xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1478 
1479          xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1480          xin.flags = in->flags;
1481          xin.swizzleMode = in->swizzleMode;
1482          xin.resourceType = in->resourceType;
1483          xin.format = in->format;
1484          xin.numSamples = in->numSamples;
1485          xin.numFrags = in->numFrags;
1486 
1487          ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1488          if (ret != ADDR_OK)
1489             return ret;
1490 
1491          assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1492          surf->tile_swizzle = xout.pipeBankXor;
1493       }
1494 
1495       /* DCC */
1496       if (info->has_graphics && !(surf->flags & RADEON_SURF_DISABLE_DCC) && !compressed &&
1497           is_dcc_supported_by_CB(info, in->swizzleMode) &&
1498           (!in->flags.display ||
1499            is_dcc_supported_by_DCN(info, config, surf, !in->flags.metaRbUnaligned,
1500                                    !in->flags.metaPipeUnaligned))) {
1501          ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
1502          ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
1503          ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
1504 
1505          din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
1506          dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
1507          dout.pMipInfo = meta_mip_info;
1508 
1509          din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
1510          din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
1511          din.resourceType = in->resourceType;
1512          din.swizzleMode = in->swizzleMode;
1513          din.bpp = in->bpp;
1514          din.unalignedWidth = in->width;
1515          din.unalignedHeight = in->height;
1516          din.numSlices = in->numSlices;
1517          din.numFrags = in->numFrags;
1518          din.numMipLevels = in->numMipLevels;
1519          din.dataSurfaceSize = out.surfSize;
1520          din.firstMipIdInTail = out.firstMipIdInTail;
1521 
1522          ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1523          if (ret != ADDR_OK)
1524             return ret;
1525 
1526          surf->u.gfx9.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
1527          surf->u.gfx9.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
1528          surf->u.gfx9.dcc_block_width = dout.compressBlkWidth;
1529          surf->u.gfx9.dcc_block_height = dout.compressBlkHeight;
1530          surf->u.gfx9.dcc_block_depth = dout.compressBlkDepth;
1531          surf->dcc_size = dout.dccRamSize;
1532          surf->dcc_alignment = dout.dccRamBaseAlign;
1533          surf->num_dcc_levels = in->numMipLevels;
1534 
1535          /* Disable DCC for levels that are in the mip tail.
1536           *
1537           * There are two issues that this is intended to
1538           * address:
1539           *
1540           * 1. Multiple mip levels may share a cache line. This
1541           *    can lead to corruption when switching between
1542           *    rendering to different mip levels because the
1543           *    RBs don't maintain coherency.
1544           *
1545           * 2. Texturing with metadata after rendering sometimes
1546           *    fails with corruption, probably for a similar
1547           *    reason.
1548           *
1549           * Working around these issues for all levels in the
1550           * mip tail may be overly conservative, but it's what
1551           * Vulkan does.
1552           *
1553           * Alternative solutions that also work but are worse:
1554           * - Disable DCC entirely.
1555           * - Flush TC L2 after rendering.
1556           */
1557          for (unsigned i = 0; i < in->numMipLevels; i++) {
1558             if (meta_mip_info[i].inMiptail) {
1559                /* GFX10 can only compress the first level
1560                 * in the mip tail.
1561                 *
1562                 * TODO: Try to do the same thing for gfx9
1563                 *       if there are no regressions.
1564                 */
1565                if (info->chip_class >= GFX10)
1566                   surf->num_dcc_levels = i + 1;
1567                else
1568                   surf->num_dcc_levels = i;
1569                break;
1570             }
1571          }
1572 
1573          if (!surf->num_dcc_levels)
1574             surf->dcc_size = 0;
1575 
1576          surf->u.gfx9.display_dcc_size = surf->dcc_size;
1577          surf->u.gfx9.display_dcc_alignment = surf->dcc_alignment;
1578          surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1579          surf->u.gfx9.dcc_pitch_max = dout.pitch - 1;
1580 
1581          /* Compute displayable DCC. */
1582          if (in->flags.display && surf->num_dcc_levels && info->use_display_dcc_with_retile_blit) {
1583             /* Compute displayable DCC info. */
1584             din.dccKeyFlags.pipeAligned = 0;
1585             din.dccKeyFlags.rbAligned = 0;
1586 
1587             assert(din.numSlices == 1);
1588             assert(din.numMipLevels == 1);
1589             assert(din.numFrags == 1);
1590             assert(surf->tile_swizzle == 0);
1591             assert(surf->u.gfx9.dcc.pipe_aligned || surf->u.gfx9.dcc.rb_aligned);
1592 
1593             ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
1594             if (ret != ADDR_OK)
1595                return ret;
1596 
1597             surf->u.gfx9.display_dcc_size = dout.dccRamSize;
1598             surf->u.gfx9.display_dcc_alignment = dout.dccRamBaseAlign;
1599             surf->u.gfx9.display_dcc_pitch_max = dout.pitch - 1;
1600             assert(surf->u.gfx9.display_dcc_size <= surf->dcc_size);
1601 
1602             surf->u.gfx9.dcc_retile_use_uint16 =
1603                surf->u.gfx9.display_dcc_size <= UINT16_MAX + 1 && surf->dcc_size <= UINT16_MAX + 1;
1604 
1605             /* Align the retile map size to get more hash table hits and
1606              * decrease the maximum memory footprint when all retile maps
1607              * are cached in the hash table.
1608              */
1609             unsigned retile_dim[2] = {in->width, in->height};
1610 
1611             for (unsigned i = 0; i < 2; i++) {
1612                /* Increase the alignment as the size increases.
1613                 * Greater alignment increases retile compute work,
1614                 * but decreases maximum memory footprint for the cache.
1615                 *
1616                 * With this alignment, the worst case memory footprint of
1617                 * the cache is:
1618                 *   1920x1080: 55 MB
1619                 *   2560x1440: 99 MB
1620                 *   3840x2160: 305 MB
1621                 *
1622                 * The worst case size in MB can be computed in Haskell as follows:
1623                 *   (sum (map get_retile_size (map get_dcc_size (deduplicate (map align_pair
1624                 *       [(i*16,j*16) | i <- [1..maxwidth`div`16], j <- [1..maxheight`div`16]])))))
1625                 * `div` 1024^2 where alignment x = if x <= 512 then 16 else if x <= 1024 then 32
1626                 * else if x <= 2048 then 64 else 128 align x = (x + (alignment x) - 1) `div`
1627                 * (alignment x) * (alignment x) align_pair e = (align (fst e), align (snd e))
1628                 *       deduplicate = map head . groupBy (\ a b -> ((fst a) == (fst b)) && ((snd a)
1629                 * == (snd b))) . sortBy compare get_dcc_size e = ((fst e) * (snd e) * bpp) `div` 256
1630                 *       get_retile_size dcc_size = dcc_size * 2 * (if dcc_size <= 2^16 then 2 else
1631                 * 4) bpp = 4; maxwidth = 3840; maxheight = 2160
1632                 */
1633                if (retile_dim[i] <= 512)
1634                   retile_dim[i] = align(retile_dim[i], 16);
1635                else if (retile_dim[i] <= 1024)
1636                   retile_dim[i] = align(retile_dim[i], 32);
1637                else if (retile_dim[i] <= 2048)
1638                   retile_dim[i] = align(retile_dim[i], 64);
1639                else
1640                   retile_dim[i] = align(retile_dim[i], 128);
1641 
1642                /* Don't align more than the DCC pixel alignment. */
1643                assert(dout.metaBlkWidth >= 128 && dout.metaBlkHeight >= 128);
1644             }
1645 
1646             surf->u.gfx9.dcc_retile_num_elements =
1647                DIV_ROUND_UP(retile_dim[0], dout.compressBlkWidth) *
1648                DIV_ROUND_UP(retile_dim[1], dout.compressBlkHeight) * 2;
1649             /* Align the size to 4 (for the compute shader). */
1650             surf->u.gfx9.dcc_retile_num_elements = align(surf->u.gfx9.dcc_retile_num_elements, 4);
1651 
1652             /* Compute address mapping from non-displayable to displayable DCC. */
1653             ADDR2_COMPUTE_DCC_ADDRFROMCOORD_INPUT addrin;
1654             memset(&addrin, 0, sizeof(addrin));
1655             addrin.size = sizeof(addrin);
1656             addrin.swizzleMode = din.swizzleMode;
1657             addrin.resourceType = din.resourceType;
1658             addrin.bpp = din.bpp;
1659             addrin.numSlices = 1;
1660             addrin.numMipLevels = 1;
1661             addrin.numFrags = 1;
1662             addrin.pitch = dout.pitch;
1663             addrin.height = dout.height;
1664             addrin.compressBlkWidth = dout.compressBlkWidth;
1665             addrin.compressBlkHeight = dout.compressBlkHeight;
1666             addrin.compressBlkDepth = dout.compressBlkDepth;
1667             addrin.metaBlkWidth = dout.metaBlkWidth;
1668             addrin.metaBlkHeight = dout.metaBlkHeight;
1669             addrin.metaBlkDepth = dout.metaBlkDepth;
1670             addrin.dccRamSliceSize = 0; /* Don't care for non-layered images. */
1671 
1672             surf->u.gfx9.dcc_retile_map = ac_compute_dcc_retile_map(
1673                addrlib, info, retile_dim[0], retile_dim[1], surf->u.gfx9.dcc.rb_aligned,
1674                surf->u.gfx9.dcc.pipe_aligned, surf->u.gfx9.dcc_retile_use_uint16,
1675                surf->u.gfx9.dcc_retile_num_elements, &addrin);
1676             if (!surf->u.gfx9.dcc_retile_map)
1677                return ADDR_OUTOFMEMORY;
1678          }
1679       }
1680 
1681       /* FMASK */
1682       if (in->numSamples > 1 && info->has_graphics && !(surf->flags & RADEON_SURF_NO_FMASK)) {
1683          ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
1684          ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1685 
1686          fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
1687          fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
1688 
1689          ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, in, true, &fin.swizzleMode);
1690          if (ret != ADDR_OK)
1691             return ret;
1692 
1693          fin.unalignedWidth = in->width;
1694          fin.unalignedHeight = in->height;
1695          fin.numSlices = in->numSlices;
1696          fin.numSamples = in->numSamples;
1697          fin.numFrags = in->numFrags;
1698 
1699          ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
1700          if (ret != ADDR_OK)
1701             return ret;
1702 
1703          surf->u.gfx9.fmask.swizzle_mode = fin.swizzleMode;
1704          surf->u.gfx9.fmask.epitch = fout.pitch - 1;
1705          surf->fmask_size = fout.fmaskBytes;
1706          surf->fmask_alignment = fout.baseAlign;
1707 
1708          /* Compute tile swizzle for the FMASK surface. */
1709          if (config->info.fmask_surf_index && fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
1710              !(surf->flags & RADEON_SURF_SHAREABLE)) {
1711             ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1712             ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1713 
1714             xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1715             xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1716 
1717             /* This counter starts from 1 instead of 0. */
1718             xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1719             xin.flags = in->flags;
1720             xin.swizzleMode = fin.swizzleMode;
1721             xin.resourceType = in->resourceType;
1722             xin.format = in->format;
1723             xin.numSamples = in->numSamples;
1724             xin.numFrags = in->numFrags;
1725 
1726             ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1727             if (ret != ADDR_OK)
1728                return ret;
1729 
1730             assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
1731             surf->fmask_tile_swizzle = xout.pipeBankXor;
1732          }
1733       }
1734 
1735       /* CMASK -- on GFX10 only for FMASK */
1736       if (in->swizzleMode != ADDR_SW_LINEAR && in->resourceType == ADDR_RSRC_TEX_2D &&
1737           ((info->chip_class <= GFX9 && in->numSamples == 1 && in->flags.metaPipeUnaligned == 0 &&
1738             in->flags.metaRbUnaligned == 0) ||
1739            (surf->fmask_size && in->numSamples >= 2))) {
1740          ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
1741          ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
1742 
1743          cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
1744          cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
1745 
1746          assert(in->flags.metaPipeUnaligned == 0);
1747          assert(in->flags.metaRbUnaligned == 0);
1748 
1749          cin.cMaskFlags.pipeAligned = 1;
1750          cin.cMaskFlags.rbAligned = 1;
1751          cin.resourceType = in->resourceType;
1752          cin.unalignedWidth = in->width;
1753          cin.unalignedHeight = in->height;
1754          cin.numSlices = in->numSlices;
1755 
1756          if (in->numSamples > 1)
1757             cin.swizzleMode = surf->u.gfx9.fmask.swizzle_mode;
1758          else
1759             cin.swizzleMode = in->swizzleMode;
1760 
1761          ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
1762          if (ret != ADDR_OK)
1763             return ret;
1764 
1765          surf->cmask_size = cout.cmaskBytes;
1766          surf->cmask_alignment = cout.baseAlign;
1767       }
1768    }
1769 
1770    return 0;
1771 }
1772 
gfx9_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)1773 static int gfx9_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
1774                                 const struct ac_surf_config *config, enum radeon_surf_mode mode,
1775                                 struct radeon_surf *surf)
1776 {
1777    bool compressed;
1778    ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1779    int r;
1780 
1781    AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
1782 
1783    compressed = surf->blk_w == 4 && surf->blk_h == 4;
1784 
1785    /* The format must be set correctly for the allocation of compressed
1786     * textures to work. In other cases, setting the bpp is sufficient. */
1787    if (compressed) {
1788       switch (surf->bpe) {
1789       case 8:
1790          AddrSurfInfoIn.format = ADDR_FMT_BC1;
1791          break;
1792       case 16:
1793          AddrSurfInfoIn.format = ADDR_FMT_BC3;
1794          break;
1795       default:
1796          assert(0);
1797       }
1798    } else {
1799       switch (surf->bpe) {
1800       case 1:
1801          assert(!(surf->flags & RADEON_SURF_ZBUFFER));
1802          AddrSurfInfoIn.format = ADDR_FMT_8;
1803          break;
1804       case 2:
1805          assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
1806          AddrSurfInfoIn.format = ADDR_FMT_16;
1807          break;
1808       case 4:
1809          assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
1810          AddrSurfInfoIn.format = ADDR_FMT_32;
1811          break;
1812       case 8:
1813          assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1814          AddrSurfInfoIn.format = ADDR_FMT_32_32;
1815          break;
1816       case 12:
1817          assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1818          AddrSurfInfoIn.format = ADDR_FMT_32_32_32;
1819          break;
1820       case 16:
1821          assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1822          AddrSurfInfoIn.format = ADDR_FMT_32_32_32_32;
1823          break;
1824       default:
1825          assert(0);
1826       }
1827       AddrSurfInfoIn.bpp = surf->bpe * 8;
1828    }
1829 
1830    bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1831    AddrSurfInfoIn.flags.color = is_color_surface && !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1832    AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1833    AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1834    /* flags.texture currently refers to TC-compatible HTILE */
1835    AddrSurfInfoIn.flags.texture = is_color_surface || surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
1836    AddrSurfInfoIn.flags.opt4space = 1;
1837 
1838    AddrSurfInfoIn.numMipLevels = config->info.levels;
1839    AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1840    AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
1841 
1842    if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
1843       AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1844 
1845    /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
1846     * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
1847     * must sample 1D textures as 2D. */
1848    if (config->is_3d)
1849       AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
1850    else if (info->chip_class != GFX9 && config->is_1d)
1851       AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
1852    else
1853       AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
1854 
1855    AddrSurfInfoIn.width = config->info.width;
1856    AddrSurfInfoIn.height = config->info.height;
1857 
1858    if (config->is_3d)
1859       AddrSurfInfoIn.numSlices = config->info.depth;
1860    else if (config->is_cube)
1861       AddrSurfInfoIn.numSlices = 6;
1862    else
1863       AddrSurfInfoIn.numSlices = config->info.array_size;
1864 
1865    /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
1866    AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
1867    AddrSurfInfoIn.flags.metaRbUnaligned = 0;
1868 
1869    /* Optimal values for the L2 cache. */
1870    if (info->chip_class == GFX9) {
1871       surf->u.gfx9.dcc.independent_64B_blocks = 1;
1872       surf->u.gfx9.dcc.independent_128B_blocks = 0;
1873       surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1874    } else if (info->chip_class >= GFX10) {
1875       surf->u.gfx9.dcc.independent_64B_blocks = 0;
1876       surf->u.gfx9.dcc.independent_128B_blocks = 1;
1877       surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
1878    }
1879 
1880    if (AddrSurfInfoIn.flags.display) {
1881       /* The display hardware can only read DCC with RB_ALIGNED=0 and
1882        * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
1883        *
1884        * The CB block requires RB_ALIGNED=1 except 1 RB chips.
1885        * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
1886        * after rendering, so PIPE_ALIGNED=1 is recommended.
1887        */
1888       if (info->use_display_dcc_unaligned) {
1889          AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
1890          AddrSurfInfoIn.flags.metaRbUnaligned = 1;
1891       }
1892 
1893       /* Adjust DCC settings to meet DCN requirements. */
1894       if (info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit) {
1895          /* Only Navi12/14 support independent 64B blocks in L2,
1896           * but without DCC image stores.
1897           */
1898          if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
1899             surf->u.gfx9.dcc.independent_64B_blocks = 1;
1900             surf->u.gfx9.dcc.independent_128B_blocks = 0;
1901             surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1902          }
1903 
1904          if (info->chip_class >= GFX10_3) {
1905             surf->u.gfx9.dcc.independent_64B_blocks = 1;
1906             surf->u.gfx9.dcc.independent_128B_blocks = 1;
1907             surf->u.gfx9.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
1908          }
1909       }
1910    }
1911 
1912    switch (mode) {
1913    case RADEON_SURF_MODE_LINEAR_ALIGNED:
1914       assert(config->info.samples <= 1);
1915       assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1916       AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
1917       break;
1918 
1919    case RADEON_SURF_MODE_1D:
1920    case RADEON_SURF_MODE_2D:
1921       if (surf->flags & RADEON_SURF_IMPORTED ||
1922           (info->chip_class >= GFX10 && surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
1923          AddrSurfInfoIn.swizzleMode = surf->u.gfx9.surf.swizzle_mode;
1924          break;
1925       }
1926 
1927       r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn, false,
1928                                           &AddrSurfInfoIn.swizzleMode);
1929       if (r)
1930          return r;
1931       break;
1932 
1933    default:
1934       assert(0);
1935    }
1936 
1937    surf->u.gfx9.resource_type = AddrSurfInfoIn.resourceType;
1938    surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1939 
1940    surf->num_dcc_levels = 0;
1941    surf->surf_size = 0;
1942    surf->fmask_size = 0;
1943    surf->dcc_size = 0;
1944    surf->htile_size = 0;
1945    surf->htile_slice_size = 0;
1946    surf->u.gfx9.surf_offset = 0;
1947    surf->u.gfx9.stencil_offset = 0;
1948    surf->cmask_size = 0;
1949    surf->u.gfx9.dcc_retile_use_uint16 = false;
1950    surf->u.gfx9.dcc_retile_num_elements = 0;
1951    surf->u.gfx9.dcc_retile_map = NULL;
1952 
1953    /* Calculate texture layout information. */
1954    r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
1955    if (r)
1956       return r;
1957 
1958    /* Calculate texture layout information for stencil. */
1959    if (surf->flags & RADEON_SURF_SBUFFER) {
1960       AddrSurfInfoIn.flags.stencil = 1;
1961       AddrSurfInfoIn.bpp = 8;
1962       AddrSurfInfoIn.format = ADDR_FMT_8;
1963 
1964       if (!AddrSurfInfoIn.flags.depth) {
1965          r = gfx9_get_preferred_swizzle_mode(addrlib->handle, surf, &AddrSurfInfoIn, false,
1966                                              &AddrSurfInfoIn.swizzleMode);
1967          if (r)
1968             return r;
1969       } else
1970          AddrSurfInfoIn.flags.depth = 0;
1971 
1972       r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
1973       if (r)
1974          return r;
1975    }
1976 
1977    surf->is_linear = surf->u.gfx9.surf.swizzle_mode == ADDR_SW_LINEAR;
1978 
1979    /* Query whether the surface is displayable. */
1980    /* This is only useful for surfaces that are allocated without SCANOUT. */
1981    bool displayable = false;
1982    if (!config->is_3d && !config->is_cube) {
1983       r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.surf.swizzle_mode,
1984                                          surf->bpe * 8, &displayable);
1985       if (r)
1986          return r;
1987 
1988       /* Display needs unaligned DCC. */
1989       if (surf->num_dcc_levels &&
1990           (!is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.dcc.rb_aligned,
1991                                     surf->u.gfx9.dcc.pipe_aligned) ||
1992            /* Don't set is_displayable if displayable DCC is missing. */
1993            (info->use_display_dcc_with_retile_blit && !surf->u.gfx9.dcc_retile_num_elements)))
1994          displayable = false;
1995    }
1996    surf->is_displayable = displayable;
1997 
1998    /* Validate that we allocated a displayable surface if requested. */
1999    assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
2000 
2001    /* Validate that DCC is set up correctly. */
2002    if (surf->num_dcc_levels) {
2003       assert(is_dcc_supported_by_L2(info, surf));
2004       if (AddrSurfInfoIn.flags.color)
2005          assert(is_dcc_supported_by_CB(info, surf->u.gfx9.surf.swizzle_mode));
2006       if (AddrSurfInfoIn.flags.display) {
2007          assert(is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.dcc.rb_aligned,
2008                                         surf->u.gfx9.dcc.pipe_aligned));
2009       }
2010    }
2011 
2012    if (info->has_graphics && !compressed && !config->is_3d && config->info.levels == 1 &&
2013        AddrSurfInfoIn.flags.color && !surf->is_linear &&
2014        surf->surf_alignment >= 64 * 1024 && /* 64KB tiling */
2015        !(surf->flags & (RADEON_SURF_DISABLE_DCC | RADEON_SURF_FORCE_SWIZZLE_MODE |
2016                         RADEON_SURF_FORCE_MICRO_TILE_MODE))) {
2017       /* Validate that DCC is enabled if DCN can do it. */
2018       if ((info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit) &&
2019           AddrSurfInfoIn.flags.display && surf->bpe == 4) {
2020          assert(surf->num_dcc_levels);
2021       }
2022 
2023       /* Validate that non-scanout DCC is always enabled. */
2024       if (!AddrSurfInfoIn.flags.display)
2025          assert(surf->num_dcc_levels);
2026    }
2027 
2028    if (!surf->htile_size) {
2029       /* Unset this if HTILE is not present. */
2030       surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
2031    }
2032 
2033    switch (surf->u.gfx9.surf.swizzle_mode) {
2034    /* S = standard. */
2035    case ADDR_SW_256B_S:
2036    case ADDR_SW_4KB_S:
2037    case ADDR_SW_64KB_S:
2038    case ADDR_SW_64KB_S_T:
2039    case ADDR_SW_4KB_S_X:
2040    case ADDR_SW_64KB_S_X:
2041       surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
2042       break;
2043 
2044    /* D = display. */
2045    case ADDR_SW_LINEAR:
2046    case ADDR_SW_256B_D:
2047    case ADDR_SW_4KB_D:
2048    case ADDR_SW_64KB_D:
2049    case ADDR_SW_64KB_D_T:
2050    case ADDR_SW_4KB_D_X:
2051    case ADDR_SW_64KB_D_X:
2052       surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2053       break;
2054 
2055    /* R = rotated (gfx9), render target (gfx10). */
2056    case ADDR_SW_256B_R:
2057    case ADDR_SW_4KB_R:
2058    case ADDR_SW_64KB_R:
2059    case ADDR_SW_64KB_R_T:
2060    case ADDR_SW_4KB_R_X:
2061    case ADDR_SW_64KB_R_X:
2062    case ADDR_SW_VAR_R_X:
2063       /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2064        * used at the same time. We currently do not use rotated
2065        * in gfx9.
2066        */
2067       assert(info->chip_class >= GFX10 || !"rotate micro tile mode is unsupported");
2068       surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2069       break;
2070 
2071    /* Z = depth. */
2072    case ADDR_SW_4KB_Z:
2073    case ADDR_SW_64KB_Z:
2074    case ADDR_SW_64KB_Z_T:
2075    case ADDR_SW_4KB_Z_X:
2076    case ADDR_SW_64KB_Z_X:
2077    case ADDR_SW_VAR_Z_X:
2078       surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2079       break;
2080 
2081    default:
2082       assert(0);
2083    }
2084 
2085    return 0;
2086 }
2087 
ac_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)2088 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2089                        const struct ac_surf_config *config, enum radeon_surf_mode mode,
2090                        struct radeon_surf *surf)
2091 {
2092    int r;
2093 
2094    r = surf_config_sanity(config, surf->flags);
2095    if (r)
2096       return r;
2097 
2098    if (info->chip_class >= GFX9)
2099       r = gfx9_compute_surface(addrlib, info, config, mode, surf);
2100    else
2101       r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
2102 
2103    if (r)
2104       return r;
2105 
2106    /* Determine the memory layout of multiple allocations in one buffer. */
2107    surf->total_size = surf->surf_size;
2108    surf->alignment = surf->surf_alignment;
2109 
2110    /* Ensure the offsets are always 0 if not available. */
2111    surf->dcc_offset = surf->display_dcc_offset = 0;
2112    surf->fmask_offset = surf->cmask_offset = 0;
2113    surf->htile_offset = 0;
2114 
2115    if (surf->htile_size) {
2116       surf->htile_offset = align64(surf->total_size, surf->htile_alignment);
2117       surf->total_size = surf->htile_offset + surf->htile_size;
2118       surf->alignment = MAX2(surf->alignment, surf->htile_alignment);
2119    }
2120 
2121    if (surf->fmask_size) {
2122       assert(config->info.samples >= 2);
2123       surf->fmask_offset = align64(surf->total_size, surf->fmask_alignment);
2124       surf->total_size = surf->fmask_offset + surf->fmask_size;
2125       surf->alignment = MAX2(surf->alignment, surf->fmask_alignment);
2126    }
2127 
2128    /* Single-sample CMASK is in a separate buffer. */
2129    if (surf->cmask_size && config->info.samples >= 2) {
2130       surf->cmask_offset = align64(surf->total_size, surf->cmask_alignment);
2131       surf->total_size = surf->cmask_offset + surf->cmask_size;
2132       surf->alignment = MAX2(surf->alignment, surf->cmask_alignment);
2133    }
2134 
2135    if (surf->is_displayable)
2136       surf->flags |= RADEON_SURF_SCANOUT;
2137 
2138    if (surf->dcc_size &&
2139        /* dcc_size is computed on GFX9+ only if it's displayable. */
2140        (info->chip_class >= GFX9 || !get_display_flag(config, surf))) {
2141       /* It's better when displayable DCC is immediately after
2142        * the image due to hw-specific reasons.
2143        */
2144       if (info->chip_class >= GFX9 && surf->u.gfx9.dcc_retile_num_elements) {
2145          /* Add space for the displayable DCC buffer. */
2146          surf->display_dcc_offset = align64(surf->total_size, surf->u.gfx9.display_dcc_alignment);
2147          surf->total_size = surf->display_dcc_offset + surf->u.gfx9.display_dcc_size;
2148       }
2149 
2150       surf->dcc_offset = align64(surf->total_size, surf->dcc_alignment);
2151       surf->total_size = surf->dcc_offset + surf->dcc_size;
2152       surf->alignment = MAX2(surf->alignment, surf->dcc_alignment);
2153    }
2154 
2155    return 0;
2156 }
2157 
2158 /* This is meant to be used for disabling DCC. */
ac_surface_zero_dcc_fields(struct radeon_surf * surf)2159 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
2160 {
2161    surf->dcc_offset = 0;
2162    surf->display_dcc_offset = 0;
2163 }
2164 
eg_tile_split(unsigned tile_split)2165 static unsigned eg_tile_split(unsigned tile_split)
2166 {
2167    switch (tile_split) {
2168    case 0:
2169       tile_split = 64;
2170       break;
2171    case 1:
2172       tile_split = 128;
2173       break;
2174    case 2:
2175       tile_split = 256;
2176       break;
2177    case 3:
2178       tile_split = 512;
2179       break;
2180    default:
2181    case 4:
2182       tile_split = 1024;
2183       break;
2184    case 5:
2185       tile_split = 2048;
2186       break;
2187    case 6:
2188       tile_split = 4096;
2189       break;
2190    }
2191    return tile_split;
2192 }
2193 
eg_tile_split_rev(unsigned eg_tile_split)2194 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
2195 {
2196    switch (eg_tile_split) {
2197    case 64:
2198       return 0;
2199    case 128:
2200       return 1;
2201    case 256:
2202       return 2;
2203    case 512:
2204       return 3;
2205    default:
2206    case 1024:
2207       return 4;
2208    case 2048:
2209       return 5;
2210    case 4096:
2211       return 6;
2212    }
2213 }
2214 
2215 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2216 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK  0x3
2217 
2218 /* This should be called before ac_compute_surface. */
ac_surface_set_bo_metadata(const struct radeon_info * info,struct radeon_surf * surf,uint64_t tiling_flags,enum radeon_surf_mode * mode)2219 void ac_surface_set_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2220                                 uint64_t tiling_flags, enum radeon_surf_mode *mode)
2221 {
2222    bool scanout;
2223 
2224    if (info->chip_class >= GFX9) {
2225       surf->u.gfx9.surf.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2226       surf->u.gfx9.dcc.independent_64B_blocks =
2227          AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
2228       surf->u.gfx9.dcc.independent_128B_blocks =
2229          AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
2230       surf->u.gfx9.dcc.max_compressed_block_size =
2231          AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
2232       surf->u.gfx9.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
2233       scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
2234       *mode =
2235          surf->u.gfx9.surf.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
2236    } else {
2237       surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2238       surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2239       surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2240       surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
2241       surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2242       surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2243       scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
2244 
2245       if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
2246          *mode = RADEON_SURF_MODE_2D;
2247       else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
2248          *mode = RADEON_SURF_MODE_1D;
2249       else
2250          *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2251    }
2252 
2253    if (scanout)
2254       surf->flags |= RADEON_SURF_SCANOUT;
2255    else
2256       surf->flags &= ~RADEON_SURF_SCANOUT;
2257 }
2258 
ac_surface_get_bo_metadata(const struct radeon_info * info,struct radeon_surf * surf,uint64_t * tiling_flags)2259 void ac_surface_get_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2260                                 uint64_t *tiling_flags)
2261 {
2262    *tiling_flags = 0;
2263 
2264    if (info->chip_class >= GFX9) {
2265       uint64_t dcc_offset = 0;
2266 
2267       if (surf->dcc_offset) {
2268          dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset : surf->dcc_offset;
2269          assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
2270       }
2271 
2272       *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.surf.swizzle_mode);
2273       *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
2274       *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.display_dcc_pitch_max);
2275       *tiling_flags |=
2276          AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.dcc.independent_64B_blocks);
2277       *tiling_flags |=
2278          AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.dcc.independent_128B_blocks);
2279       *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE,
2280                                          surf->u.gfx9.dcc.max_compressed_block_size);
2281       *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
2282    } else {
2283       if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
2284          *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
2285       else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
2286          *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
2287       else
2288          *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
2289 
2290       *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
2291       *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
2292       *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
2293       if (surf->u.legacy.tile_split)
2294          *tiling_flags |=
2295             AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
2296       *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
2297       *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks) - 1);
2298 
2299       if (surf->flags & RADEON_SURF_SCANOUT)
2300          *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
2301       else
2302          *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
2303    }
2304 }
2305 
ac_get_umd_metadata_word1(const struct radeon_info * info)2306 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
2307 {
2308    return (ATI_VENDOR_ID << 16) | info->pci_id;
2309 }
2310 
2311 /* This should be called after ac_compute_surface. */
ac_surface_set_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_storage_samples,unsigned num_mipmap_levels,unsigned size_metadata,uint32_t metadata[64])2312 bool ac_surface_set_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2313                                  unsigned num_storage_samples, unsigned num_mipmap_levels,
2314                                  unsigned size_metadata, uint32_t metadata[64])
2315 {
2316    uint32_t *desc = &metadata[2];
2317    uint64_t offset;
2318 
2319    if (info->chip_class >= GFX9)
2320       offset = surf->u.gfx9.surf_offset;
2321    else
2322       offset = surf->u.legacy.level[0].offset;
2323 
2324    if (offset ||                 /* Non-zero planes ignore metadata. */
2325        size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2326        metadata[0] == 0 ||       /* invalid version number */
2327        metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
2328       /* Disable DCC because it might not be enabled. */
2329       ac_surface_zero_dcc_fields(surf);
2330 
2331       /* Don't report an error if the texture comes from an incompatible driver,
2332        * but this might not work.
2333        */
2334       return true;
2335    }
2336 
2337    /* Validate that sample counts and the number of mipmap levels match. */
2338    unsigned desc_last_level = G_008F1C_LAST_LEVEL(desc[3]);
2339    unsigned type = G_008F1C_TYPE(desc[3]);
2340 
2341    if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
2342       unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
2343 
2344       if (desc_last_level != log_samples) {
2345          fprintf(stderr,
2346                  "amdgpu: invalid MSAA texture import, "
2347                  "metadata has log2(samples) = %u, the caller set %u\n",
2348                  desc_last_level, log_samples);
2349          return false;
2350       }
2351    } else {
2352       if (desc_last_level != num_mipmap_levels - 1) {
2353          fprintf(stderr,
2354                  "amdgpu: invalid mipmapped texture import, "
2355                  "metadata has last_level = %u, the caller set %u\n",
2356                  desc_last_level, num_mipmap_levels - 1);
2357          return false;
2358       }
2359    }
2360 
2361    if (info->chip_class >= GFX8 && G_008F28_COMPRESSION_EN(desc[6])) {
2362       /* Read DCC information. */
2363       switch (info->chip_class) {
2364       case GFX8:
2365          surf->dcc_offset = (uint64_t)desc[7] << 8;
2366          break;
2367 
2368       case GFX9:
2369          surf->dcc_offset =
2370             ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
2371          surf->u.gfx9.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
2372          surf->u.gfx9.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
2373 
2374          /* If DCC is unaligned, this can only be a displayable image. */
2375          if (!surf->u.gfx9.dcc.pipe_aligned && !surf->u.gfx9.dcc.rb_aligned)
2376             assert(surf->is_displayable);
2377          break;
2378 
2379       case GFX10:
2380       case GFX10_3:
2381          surf->dcc_offset =
2382             ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
2383          surf->u.gfx9.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
2384          break;
2385 
2386       default:
2387          assert(0);
2388          return false;
2389       }
2390    } else {
2391       /* Disable DCC. dcc_offset is always set by texture_from_handle
2392        * and must be cleared here.
2393        */
2394       ac_surface_zero_dcc_fields(surf);
2395    }
2396 
2397    return true;
2398 }
2399 
ac_surface_get_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_mipmap_levels,uint32_t desc[8],unsigned * size_metadata,uint32_t metadata[64])2400 void ac_surface_get_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2401                                  unsigned num_mipmap_levels, uint32_t desc[8],
2402                                  unsigned *size_metadata, uint32_t metadata[64])
2403 {
2404    /* Clear the base address and set the relative DCC offset. */
2405    desc[0] = 0;
2406    desc[1] &= C_008F14_BASE_ADDRESS_HI;
2407 
2408    switch (info->chip_class) {
2409    case GFX6:
2410    case GFX7:
2411       break;
2412    case GFX8:
2413       desc[7] = surf->dcc_offset >> 8;
2414       break;
2415    case GFX9:
2416       desc[7] = surf->dcc_offset >> 8;
2417       desc[5] &= C_008F24_META_DATA_ADDRESS;
2418       desc[5] |= S_008F24_META_DATA_ADDRESS(surf->dcc_offset >> 40);
2419       break;
2420    case GFX10:
2421    case GFX10_3:
2422       desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
2423       desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->dcc_offset >> 8);
2424       desc[7] = surf->dcc_offset >> 16;
2425       break;
2426    default:
2427       assert(0);
2428    }
2429 
2430    /* Metadata image format format version 1:
2431     * [0] = 1 (metadata format identifier)
2432     * [1] = (VENDOR_ID << 16) | PCI_ID
2433     * [2:9] = image descriptor for the whole resource
2434     *         [2] is always 0, because the base address is cleared
2435     *         [9] is the DCC offset bits [39:8] from the beginning of
2436     *             the buffer
2437     * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
2438     */
2439 
2440    metadata[0] = 1; /* metadata image format version 1 */
2441 
2442    /* Tiling modes are ambiguous without a PCI ID. */
2443    metadata[1] = ac_get_umd_metadata_word1(info);
2444 
2445    /* Dwords [2:9] contain the image descriptor. */
2446    memcpy(&metadata[2], desc, 8 * 4);
2447    *size_metadata = 10 * 4;
2448 
2449    /* Dwords [10:..] contain the mipmap level offsets. */
2450    if (info->chip_class <= GFX8) {
2451       for (unsigned i = 0; i < num_mipmap_levels; i++)
2452          metadata[10 + i] = surf->u.legacy.level[i].offset >> 8;
2453 
2454       *size_metadata += num_mipmap_levels * 4;
2455    }
2456 }
2457 
ac_surface_override_offset_stride(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_mipmap_levels,uint64_t offset,unsigned pitch)2458 void ac_surface_override_offset_stride(const struct radeon_info *info, struct radeon_surf *surf,
2459                                        unsigned num_mipmap_levels, uint64_t offset, unsigned pitch)
2460 {
2461    if (info->chip_class >= GFX9) {
2462       if (pitch) {
2463          surf->u.gfx9.surf_pitch = pitch;
2464          if (num_mipmap_levels == 1)
2465             surf->u.gfx9.surf.epitch = pitch - 1;
2466          surf->u.gfx9.surf_slice_size = (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
2467       }
2468       surf->u.gfx9.surf_offset = offset;
2469       if (surf->u.gfx9.stencil_offset)
2470          surf->u.gfx9.stencil_offset += offset;
2471    } else {
2472       if (pitch) {
2473          surf->u.legacy.level[0].nblk_x = pitch;
2474          surf->u.legacy.level[0].slice_size_dw =
2475             ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
2476       }
2477 
2478       if (offset) {
2479          for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
2480             surf->u.legacy.level[i].offset += offset;
2481       }
2482    }
2483 
2484    if (surf->htile_offset)
2485       surf->htile_offset += offset;
2486    if (surf->fmask_offset)
2487       surf->fmask_offset += offset;
2488    if (surf->cmask_offset)
2489       surf->cmask_offset += offset;
2490    if (surf->dcc_offset)
2491       surf->dcc_offset += offset;
2492    if (surf->display_dcc_offset)
2493       surf->display_dcc_offset += offset;
2494 }
2495