1 /*
2  * Copyright (C) 2019-2020 Collabora, Ltd.
3  * Copyright (C) 2019 Alyssa Rosenzweig
4  * Copyright (C) 2014-2017 Broadcom
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  */
26 
27 #include <assert.h>
28 
29 #include "drm-uapi/panfrost_drm.h"
30 
31 #include "pan_bo.h"
32 #include "pan_context.h"
33 #include "util/hash_table.h"
34 #include "util/ralloc.h"
35 #include "util/format/u_format.h"
36 #include "util/u_pack_color.h"
37 #include "util/rounding.h"
38 #include "pan_util.h"
39 #include "pan_blending.h"
40 #include "pan_cmdstream.h"
41 #include "decode.h"
42 #include "panfrost-quirks.h"
43 
44 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
45  * and build a proper dependency graph such that batches can be pipelined for
46  * better GPU utilization.
47  *
48  * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
49  * A BO is either being written or read at any time (see last_is_write).
50  * When the last access is a write, the batch writing the BO might have read
51  * dependencies (readers that have not been executed yet and want to read the
52  * previous BO content), and when the last access is a read, all readers might
53  * depend on another batch to push its results to memory. That's what the
54  * readers/writers keep track off.
55  * There can only be one writer at any given time, if a new batch wants to
56  * write to the same BO, a dependency will be added between the new writer and
57  * the old writer (at the batch level), and panfrost_bo_access->writer will be
58  * updated to point to the new writer.
59  */
60 struct panfrost_bo_access {
61         struct util_dynarray readers;
62         struct panfrost_batch_fence *writer;
63         bool last_is_write;
64 };
65 
66 static struct panfrost_batch_fence *
panfrost_create_batch_fence(struct panfrost_batch * batch)67 panfrost_create_batch_fence(struct panfrost_batch *batch)
68 {
69         struct panfrost_batch_fence *fence;
70 
71         fence = rzalloc(NULL, struct panfrost_batch_fence);
72         assert(fence);
73         pipe_reference_init(&fence->reference, 1);
74         fence->batch = batch;
75 
76         return fence;
77 }
78 
79 static void
panfrost_free_batch_fence(struct panfrost_batch_fence * fence)80 panfrost_free_batch_fence(struct panfrost_batch_fence *fence)
81 {
82         ralloc_free(fence);
83 }
84 
85 void
panfrost_batch_fence_unreference(struct panfrost_batch_fence * fence)86 panfrost_batch_fence_unreference(struct panfrost_batch_fence *fence)
87 {
88         if (pipe_reference(&fence->reference, NULL))
89                  panfrost_free_batch_fence(fence);
90 }
91 
92 void
panfrost_batch_fence_reference(struct panfrost_batch_fence * fence)93 panfrost_batch_fence_reference(struct panfrost_batch_fence *fence)
94 {
95         pipe_reference(NULL, &fence->reference);
96 }
97 
98 static void
99 panfrost_batch_add_fbo_bos(struct panfrost_batch *batch);
100 
101 static struct panfrost_batch *
panfrost_create_batch(struct panfrost_context * ctx,const struct pipe_framebuffer_state * key)102 panfrost_create_batch(struct panfrost_context *ctx,
103                       const struct pipe_framebuffer_state *key)
104 {
105         struct panfrost_batch *batch = rzalloc(ctx, struct panfrost_batch);
106         struct panfrost_device *dev = pan_device(ctx->base.screen);
107 
108         batch->ctx = ctx;
109 
110         batch->bos = _mesa_hash_table_create(batch, _mesa_hash_pointer,
111                         _mesa_key_pointer_equal);
112 
113         batch->minx = batch->miny = ~0;
114         batch->maxx = batch->maxy = 0;
115 
116         batch->out_sync = panfrost_create_batch_fence(batch);
117         util_copy_framebuffer_state(&batch->key, key);
118 
119         /* Preallocate the main pool, since every batch has at least one job
120          * structure so it will be used */
121         panfrost_pool_init(&batch->pool, batch, dev, 0, true);
122 
123         /* Don't preallocate the invisible pool, since not every batch will use
124          * the pre-allocation, particularly if the varyings are larger than the
125          * preallocation and a reallocation is needed after anyway. */
126         panfrost_pool_init(&batch->invisible_pool, batch, dev, PAN_BO_INVISIBLE, false);
127 
128         panfrost_batch_add_fbo_bos(batch);
129 
130         return batch;
131 }
132 
133 static void
panfrost_freeze_batch(struct panfrost_batch * batch)134 panfrost_freeze_batch(struct panfrost_batch *batch)
135 {
136         struct panfrost_context *ctx = batch->ctx;
137         struct hash_entry *entry;
138 
139         /* Remove the entry in the FBO -> batch hash table if the batch
140          * matches and drop the context reference. This way, next draws/clears
141          * targeting this FBO will trigger the creation of a new batch.
142          */
143         entry = _mesa_hash_table_search(ctx->batches, &batch->key);
144         if (entry && entry->data == batch)
145                 _mesa_hash_table_remove(ctx->batches, entry);
146 
147         if (ctx->batch == batch)
148                 ctx->batch = NULL;
149 }
150 
151 #ifdef PAN_BATCH_DEBUG
panfrost_batch_is_frozen(struct panfrost_batch * batch)152 static bool panfrost_batch_is_frozen(struct panfrost_batch *batch)
153 {
154         struct panfrost_context *ctx = batch->ctx;
155         struct hash_entry *entry;
156 
157         entry = _mesa_hash_table_search(ctx->batches, &batch->key);
158         if (entry && entry->data == batch)
159                 return false;
160 
161         if (ctx->batch == batch)
162                 return false;
163 
164         return true;
165 }
166 #endif
167 
168 static void
panfrost_free_batch(struct panfrost_batch * batch)169 panfrost_free_batch(struct panfrost_batch *batch)
170 {
171         if (!batch)
172                 return;
173 
174 #ifdef PAN_BATCH_DEBUG
175         assert(panfrost_batch_is_frozen(batch));
176 #endif
177 
178         hash_table_foreach(batch->bos, entry)
179                 panfrost_bo_unreference((struct panfrost_bo *)entry->key);
180 
181         panfrost_pool_cleanup(&batch->pool);
182         panfrost_pool_cleanup(&batch->invisible_pool);
183 
184         util_dynarray_foreach(&batch->dependencies,
185                               struct panfrost_batch_fence *, dep) {
186                 panfrost_batch_fence_unreference(*dep);
187         }
188 
189         util_dynarray_fini(&batch->dependencies);
190 
191         /* The out_sync fence lifetime is different from the the batch one
192          * since other batches might want to wait on a fence of already
193          * submitted/signaled batch. All we need to do here is make sure the
194          * fence does not point to an invalid batch, which the core will
195          * interpret as 'batch is already submitted'.
196          */
197         batch->out_sync->batch = NULL;
198         panfrost_batch_fence_unreference(batch->out_sync);
199 
200         util_unreference_framebuffer_state(&batch->key);
201         ralloc_free(batch);
202 }
203 
204 #ifdef PAN_BATCH_DEBUG
205 static bool
panfrost_dep_graph_contains_batch(struct panfrost_batch * root,struct panfrost_batch * batch)206 panfrost_dep_graph_contains_batch(struct panfrost_batch *root,
207                                   struct panfrost_batch *batch)
208 {
209         if (!root)
210                 return false;
211 
212         util_dynarray_foreach(&root->dependencies,
213                               struct panfrost_batch_fence *, dep) {
214                 if ((*dep)->batch == batch ||
215                     panfrost_dep_graph_contains_batch((*dep)->batch, batch))
216                         return true;
217         }
218 
219         return false;
220 }
221 #endif
222 
223 static void
panfrost_batch_add_dep(struct panfrost_batch * batch,struct panfrost_batch_fence * newdep)224 panfrost_batch_add_dep(struct panfrost_batch *batch,
225                        struct panfrost_batch_fence *newdep)
226 {
227         if (batch == newdep->batch)
228                 return;
229 
230         /* We might want to turn ->dependencies into a set if the number of
231          * deps turns out to be big enough to make this 'is dep already there'
232          * search inefficient.
233          */
234         util_dynarray_foreach(&batch->dependencies,
235                               struct panfrost_batch_fence *, dep) {
236                 if (*dep == newdep)
237                         return;
238         }
239 
240 #ifdef PAN_BATCH_DEBUG
241         /* Make sure the dependency graph is acyclic. */
242         assert(!panfrost_dep_graph_contains_batch(newdep->batch, batch));
243 #endif
244 
245         panfrost_batch_fence_reference(newdep);
246         util_dynarray_append(&batch->dependencies,
247                              struct panfrost_batch_fence *, newdep);
248 
249         /* We now have a batch depending on us, let's make sure new draw/clear
250          * calls targeting the same FBO use a new batch object.
251          */
252         if (newdep->batch)
253                 panfrost_freeze_batch(newdep->batch);
254 }
255 
256 static struct panfrost_batch *
panfrost_get_batch(struct panfrost_context * ctx,const struct pipe_framebuffer_state * key)257 panfrost_get_batch(struct panfrost_context *ctx,
258                    const struct pipe_framebuffer_state *key)
259 {
260         /* Lookup the job first */
261         struct hash_entry *entry = _mesa_hash_table_search(ctx->batches, key);
262 
263         if (entry)
264                 return entry->data;
265 
266         /* Otherwise, let's create a job */
267 
268         struct panfrost_batch *batch = panfrost_create_batch(ctx, key);
269 
270         /* Save the created job */
271         _mesa_hash_table_insert(ctx->batches, &batch->key, batch);
272 
273         return batch;
274 }
275 
276 /* Get the job corresponding to the FBO we're currently rendering into */
277 
278 struct panfrost_batch *
panfrost_get_batch_for_fbo(struct panfrost_context * ctx)279 panfrost_get_batch_for_fbo(struct panfrost_context *ctx)
280 {
281         /* If we already began rendering, use that */
282 
283         if (ctx->batch) {
284                 assert(util_framebuffer_state_equal(&ctx->batch->key,
285                                                     &ctx->pipe_framebuffer));
286                 return ctx->batch;
287         }
288 
289         /* If not, look up the job */
290         struct panfrost_batch *batch = panfrost_get_batch(ctx,
291                                                           &ctx->pipe_framebuffer);
292 
293         /* Set this job as the current FBO job. Will be reset when updating the
294          * FB state and when submitting or releasing a job.
295          */
296         ctx->batch = batch;
297         return batch;
298 }
299 
300 struct panfrost_batch *
panfrost_get_fresh_batch_for_fbo(struct panfrost_context * ctx)301 panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx)
302 {
303         struct panfrost_batch *batch;
304 
305         batch = panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
306 
307         /* The batch has no draw/clear queued, let's return it directly.
308          * Note that it's perfectly fine to re-use a batch with an
309          * existing clear, we'll just update it with the new clear request.
310          */
311         if (!batch->scoreboard.first_job)
312                 return batch;
313 
314         /* Otherwise, we need to freeze the existing one and instantiate a new
315          * one.
316          */
317         panfrost_freeze_batch(batch);
318         return panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
319 }
320 
321 static void
panfrost_bo_access_gc_fences(struct panfrost_context * ctx,struct panfrost_bo_access * access,const struct panfrost_bo * bo)322 panfrost_bo_access_gc_fences(struct panfrost_context *ctx,
323                              struct panfrost_bo_access *access,
324 			     const struct panfrost_bo *bo)
325 {
326         if (access->writer) {
327                 panfrost_batch_fence_unreference(access->writer);
328                 access->writer = NULL;
329         }
330 
331         struct panfrost_batch_fence **readers_array = util_dynarray_begin(&access->readers);
332         struct panfrost_batch_fence **new_readers = readers_array;
333 
334         util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
335                               reader) {
336                 if (!(*reader))
337                         continue;
338 
339                 panfrost_batch_fence_unreference(*reader);
340                 *reader = NULL;
341         }
342 
343         if (!util_dynarray_resize(&access->readers, struct panfrost_batch_fence *,
344                                   new_readers - readers_array) &&
345             new_readers != readers_array)
346                 unreachable("Invalid dynarray access->readers");
347 }
348 
349 /* Collect signaled fences to keep the kernel-side syncobj-map small. The
350  * idea is to collect those signaled fences at the end of each flush_all
351  * call. This function is likely to collect only fences from previous
352  * batch flushes not the one that have just have just been submitted and
353  * are probably still in flight when we trigger the garbage collection.
354  * Anyway, we need to do this garbage collection at some point if we don't
355  * want the BO access map to keep invalid entries around and retain
356  * syncobjs forever.
357  */
358 static void
panfrost_gc_fences(struct panfrost_context * ctx)359 panfrost_gc_fences(struct panfrost_context *ctx)
360 {
361         hash_table_foreach(ctx->accessed_bos, entry) {
362                 struct panfrost_bo_access *access = entry->data;
363 
364                 assert(access);
365                 panfrost_bo_access_gc_fences(ctx, access, entry->key);
366                 if (!util_dynarray_num_elements(&access->readers,
367                                                 struct panfrost_batch_fence *) &&
368                     !access->writer) {
369                         ralloc_free(access);
370                         _mesa_hash_table_remove(ctx->accessed_bos, entry);
371                 }
372         }
373 }
374 
375 #ifdef PAN_BATCH_DEBUG
376 static bool
panfrost_batch_in_readers(struct panfrost_batch * batch,struct panfrost_bo_access * access)377 panfrost_batch_in_readers(struct panfrost_batch *batch,
378                           struct panfrost_bo_access *access)
379 {
380         util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
381                               reader) {
382                 if (*reader && (*reader)->batch == batch)
383                         return true;
384         }
385 
386         return false;
387 }
388 #endif
389 
390 static void
panfrost_batch_update_bo_access(struct panfrost_batch * batch,struct panfrost_bo * bo,bool writes,bool already_accessed)391 panfrost_batch_update_bo_access(struct panfrost_batch *batch,
392                                 struct panfrost_bo *bo, bool writes,
393                                 bool already_accessed)
394 {
395         struct panfrost_context *ctx = batch->ctx;
396         struct panfrost_bo_access *access;
397         bool old_writes = false;
398         struct hash_entry *entry;
399 
400         entry = _mesa_hash_table_search(ctx->accessed_bos, bo);
401         access = entry ? entry->data : NULL;
402         if (access) {
403                 old_writes = access->last_is_write;
404         } else {
405                 access = rzalloc(ctx, struct panfrost_bo_access);
406                 util_dynarray_init(&access->readers, access);
407                 _mesa_hash_table_insert(ctx->accessed_bos, bo, access);
408                 /* We are the first to access this BO, let's initialize
409                  * old_writes to our own access type in that case.
410                  */
411                 old_writes = writes;
412         }
413 
414         assert(access);
415 
416         if (writes && !old_writes) {
417                 /* Previous access was a read and we want to write this BO.
418                  * We first need to add explicit deps between our batch and
419                  * the previous readers.
420                  */
421                 util_dynarray_foreach(&access->readers,
422                                       struct panfrost_batch_fence *, reader) {
423                         /* We were already reading the BO, no need to add a dep
424                          * on ourself (the acyclic check would complain about
425                          * that).
426                          */
427                         if (!(*reader) || (*reader)->batch == batch)
428                                 continue;
429 
430                         panfrost_batch_add_dep(batch, *reader);
431                 }
432                 panfrost_batch_fence_reference(batch->out_sync);
433 
434                 if (access->writer)
435                         panfrost_batch_fence_unreference(access->writer);
436 
437                 /* We now are the new writer. */
438                 access->writer = batch->out_sync;
439 
440                 /* Release the previous readers and reset the readers array. */
441                 util_dynarray_foreach(&access->readers,
442                                       struct panfrost_batch_fence *,
443                                       reader) {
444                         if (!*reader)
445                                 continue;
446                         panfrost_batch_fence_unreference(*reader);
447                 }
448 
449                 util_dynarray_clear(&access->readers);
450         } else if (writes && old_writes) {
451                 /* First check if we were the previous writer, in that case
452                  * there's nothing to do. Otherwise we need to add a
453                  * dependency between the new writer and the old one.
454                  */
455 		if (access->writer != batch->out_sync) {
456                         if (access->writer) {
457                                 panfrost_batch_add_dep(batch, access->writer);
458                                 panfrost_batch_fence_unreference(access->writer);
459                         }
460                         panfrost_batch_fence_reference(batch->out_sync);
461                         access->writer = batch->out_sync;
462                 }
463         } else if (!writes && old_writes) {
464                 /* First check if we were the previous writer, in that case
465                  * we want to keep the access type unchanged, as a write is
466                  * more constraining than a read.
467                  */
468                 if (access->writer != batch->out_sync) {
469                         /* Add a dependency on the previous writer. */
470                         panfrost_batch_add_dep(batch, access->writer);
471 
472                         /* The previous access was a write, there's no reason
473                          * to have entries in the readers array.
474                          */
475                         assert(!util_dynarray_num_elements(&access->readers,
476                                                            struct panfrost_batch_fence *));
477 
478                         /* Add ourselves to the readers array. */
479                         panfrost_batch_fence_reference(batch->out_sync);
480                         util_dynarray_append(&access->readers,
481                                              struct panfrost_batch_fence *,
482                                              batch->out_sync);
483                 }
484         } else {
485                 /* We already accessed this BO before, so we should already be
486                  * in the reader array.
487                  */
488 #ifdef PAN_BATCH_DEBUG
489                 if (already_accessed) {
490                         assert(panfrost_batch_in_readers(batch, access));
491                         return;
492                 }
493 #endif
494 
495                 /* Previous access was a read and we want to read this BO.
496                  * Add ourselves to the readers array and add a dependency on
497                  * the previous writer if any.
498                  */
499                 panfrost_batch_fence_reference(batch->out_sync);
500                 util_dynarray_append(&access->readers,
501                                      struct panfrost_batch_fence *,
502                                      batch->out_sync);
503 
504                 if (access->writer)
505                         panfrost_batch_add_dep(batch, access->writer);
506         }
507 
508         access->last_is_write = writes;
509 }
510 
511 void
panfrost_batch_add_bo(struct panfrost_batch * batch,struct panfrost_bo * bo,uint32_t flags)512 panfrost_batch_add_bo(struct panfrost_batch *batch, struct panfrost_bo *bo,
513                       uint32_t flags)
514 {
515         if (!bo)
516                 return;
517 
518         struct hash_entry *entry;
519         uint32_t old_flags = 0;
520 
521         entry = _mesa_hash_table_search(batch->bos, bo);
522         if (!entry) {
523                 entry = _mesa_hash_table_insert(batch->bos, bo,
524                                                 (void *)(uintptr_t)flags);
525                 panfrost_bo_reference(bo);
526 	} else {
527                 old_flags = (uintptr_t)entry->data;
528 
529                 /* All batches have to agree on the shared flag. */
530                 assert((old_flags & PAN_BO_ACCESS_SHARED) ==
531                        (flags & PAN_BO_ACCESS_SHARED));
532         }
533 
534         assert(entry);
535 
536         if (old_flags == flags)
537                 return;
538 
539         flags |= old_flags;
540         entry->data = (void *)(uintptr_t)flags;
541 
542         /* If this is not a shared BO, we don't really care about dependency
543          * tracking.
544          */
545         if (!(flags & PAN_BO_ACCESS_SHARED))
546                 return;
547 
548         assert(flags & PAN_BO_ACCESS_RW);
549         panfrost_batch_update_bo_access(batch, bo, flags & PAN_BO_ACCESS_WRITE,
550                         old_flags != 0);
551 }
552 
553 static void
panfrost_batch_add_resource_bos(struct panfrost_batch * batch,struct panfrost_resource * rsrc,uint32_t flags)554 panfrost_batch_add_resource_bos(struct panfrost_batch *batch,
555                                 struct panfrost_resource *rsrc,
556                                 uint32_t flags)
557 {
558         panfrost_batch_add_bo(batch, rsrc->bo, flags);
559 
560         for (unsigned i = 0; i < MAX_MIP_LEVELS; i++)
561                 if (rsrc->slices[i].checksum_bo)
562                         panfrost_batch_add_bo(batch, rsrc->slices[i].checksum_bo, flags);
563 
564         if (rsrc->separate_stencil)
565                 panfrost_batch_add_bo(batch, rsrc->separate_stencil->bo, flags);
566 }
567 
568 static void
panfrost_batch_add_fbo_bos(struct panfrost_batch * batch)569 panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
570 {
571         uint32_t flags = PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_WRITE |
572                          PAN_BO_ACCESS_VERTEX_TILER |
573                          PAN_BO_ACCESS_FRAGMENT;
574 
575         for (unsigned i = 0; i < batch->key.nr_cbufs; ++i) {
576                 struct panfrost_resource *rsrc = pan_resource(batch->key.cbufs[i]->texture);
577                 panfrost_batch_add_resource_bos(batch, rsrc, flags);
578         }
579 
580         if (batch->key.zsbuf) {
581                 struct panfrost_resource *rsrc = pan_resource(batch->key.zsbuf->texture);
582                 panfrost_batch_add_resource_bos(batch, rsrc, flags);
583         }
584 }
585 
586 struct panfrost_bo *
panfrost_batch_create_bo(struct panfrost_batch * batch,size_t size,uint32_t create_flags,uint32_t access_flags)587 panfrost_batch_create_bo(struct panfrost_batch *batch, size_t size,
588                          uint32_t create_flags, uint32_t access_flags)
589 {
590         struct panfrost_bo *bo;
591 
592         bo = panfrost_bo_create(pan_device(batch->ctx->base.screen), size,
593                                 create_flags);
594         panfrost_batch_add_bo(batch, bo, access_flags);
595 
596         /* panfrost_batch_add_bo() has retained a reference and
597          * panfrost_bo_create() initialize the refcnt to 1, so let's
598          * unreference the BO here so it gets released when the batch is
599          * destroyed (unless it's retained by someone else in the meantime).
600          */
601         panfrost_bo_unreference(bo);
602         return bo;
603 }
604 
605 /* Returns the polygon list's GPU address if available, or otherwise allocates
606  * the polygon list.  It's perfectly fast to use allocate/free BO directly,
607  * since we'll hit the BO cache and this is one-per-batch anyway. */
608 
609 mali_ptr
panfrost_batch_get_polygon_list(struct panfrost_batch * batch,unsigned size)610 panfrost_batch_get_polygon_list(struct panfrost_batch *batch, unsigned size)
611 {
612         if (batch->polygon_list) {
613                 assert(batch->polygon_list->size >= size);
614         } else {
615                 /* Create the BO as invisible, as there's no reason to map */
616                 size = util_next_power_of_two(size);
617 
618                 batch->polygon_list = panfrost_batch_create_bo(batch, size,
619                                                                PAN_BO_INVISIBLE,
620                                                                PAN_BO_ACCESS_PRIVATE |
621                                                                PAN_BO_ACCESS_RW |
622                                                                PAN_BO_ACCESS_VERTEX_TILER |
623                                                                PAN_BO_ACCESS_FRAGMENT);
624         }
625 
626         return batch->polygon_list->ptr.gpu;
627 }
628 
629 struct panfrost_bo *
panfrost_batch_get_scratchpad(struct panfrost_batch * batch,unsigned size_per_thread,unsigned thread_tls_alloc,unsigned core_count)630 panfrost_batch_get_scratchpad(struct panfrost_batch *batch,
631                 unsigned size_per_thread,
632                 unsigned thread_tls_alloc,
633                 unsigned core_count)
634 {
635         unsigned size = panfrost_get_total_stack_size(size_per_thread,
636                         thread_tls_alloc,
637                         core_count);
638 
639         if (batch->scratchpad) {
640                 assert(batch->scratchpad->size >= size);
641         } else {
642                 batch->scratchpad = panfrost_batch_create_bo(batch, size,
643                                              PAN_BO_INVISIBLE,
644                                              PAN_BO_ACCESS_PRIVATE |
645                                              PAN_BO_ACCESS_RW |
646                                              PAN_BO_ACCESS_VERTEX_TILER |
647                                              PAN_BO_ACCESS_FRAGMENT);
648         }
649 
650         return batch->scratchpad;
651 }
652 
653 struct panfrost_bo *
panfrost_batch_get_shared_memory(struct panfrost_batch * batch,unsigned size,unsigned workgroup_count)654 panfrost_batch_get_shared_memory(struct panfrost_batch *batch,
655                 unsigned size,
656                 unsigned workgroup_count)
657 {
658         if (batch->shared_memory) {
659                 assert(batch->shared_memory->size >= size);
660         } else {
661                 batch->shared_memory = panfrost_batch_create_bo(batch, size,
662                                              PAN_BO_INVISIBLE,
663                                              PAN_BO_ACCESS_PRIVATE |
664                                              PAN_BO_ACCESS_RW |
665                                              PAN_BO_ACCESS_VERTEX_TILER);
666         }
667 
668         return batch->shared_memory;
669 }
670 
671 mali_ptr
panfrost_batch_get_bifrost_tiler(struct panfrost_batch * batch,unsigned vertex_count)672 panfrost_batch_get_bifrost_tiler(struct panfrost_batch *batch, unsigned vertex_count)
673 {
674         if (!vertex_count)
675                 return 0;
676 
677         if (batch->tiler_meta)
678                 return batch->tiler_meta;
679 
680         struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
681         struct panfrost_ptr t =
682                 panfrost_pool_alloc_aligned(&batch->pool, MALI_BIFROST_TILER_HEAP_LENGTH, 64);
683 
684         pan_pack(t.cpu, BIFROST_TILER_HEAP, heap) {
685                 heap.size = dev->tiler_heap->size;
686                 heap.base = dev->tiler_heap->ptr.gpu;
687                 heap.bottom = dev->tiler_heap->ptr.gpu;
688                 heap.top = dev->tiler_heap->ptr.gpu + dev->tiler_heap->size;
689         }
690 
691         mali_ptr heap = t.gpu;
692 
693         t = panfrost_pool_alloc_aligned(&batch->pool, MALI_BIFROST_TILER_LENGTH, 64);
694         pan_pack(t.cpu, BIFROST_TILER, tiler) {
695                 tiler.hierarchy_mask = 0x28;
696                 tiler.fb_width = batch->key.width;
697                 tiler.fb_height = batch->key.height;
698                 tiler.heap = heap;
699         }
700 
701         batch->tiler_meta = t.gpu;
702         return batch->tiler_meta;
703 }
704 
705 struct panfrost_bo *
panfrost_batch_get_tiler_dummy(struct panfrost_batch * batch)706 panfrost_batch_get_tiler_dummy(struct panfrost_batch *batch)
707 {
708         struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
709 
710         uint32_t create_flags = 0;
711 
712         if (batch->tiler_dummy)
713                 return batch->tiler_dummy;
714 
715         if (!(dev->quirks & MIDGARD_NO_HIER_TILING))
716                 create_flags = PAN_BO_INVISIBLE;
717 
718         batch->tiler_dummy = panfrost_batch_create_bo(batch, 4096,
719                                                       create_flags,
720                                                       PAN_BO_ACCESS_PRIVATE |
721                                                       PAN_BO_ACCESS_RW |
722                                                       PAN_BO_ACCESS_VERTEX_TILER |
723                                                       PAN_BO_ACCESS_FRAGMENT);
724         assert(batch->tiler_dummy);
725         return batch->tiler_dummy;
726 }
727 
728 mali_ptr
panfrost_batch_reserve_framebuffer(struct panfrost_batch * batch)729 panfrost_batch_reserve_framebuffer(struct panfrost_batch *batch)
730 {
731         struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
732 
733         /* If we haven't, reserve space for the thread storage descriptor (or a
734          * full framebuffer descriptor on Midgard) */
735 
736         if (!batch->framebuffer.gpu) {
737                 unsigned size = (dev->quirks & IS_BIFROST) ?
738                         MALI_LOCAL_STORAGE_LENGTH :
739                         (dev->quirks & MIDGARD_SFBD) ?
740                         MALI_SINGLE_TARGET_FRAMEBUFFER_LENGTH :
741                         MALI_MULTI_TARGET_FRAMEBUFFER_LENGTH;
742 
743                 batch->framebuffer = panfrost_pool_alloc_aligned(&batch->pool, size, 64);
744 
745                 /* Tag the pointer */
746                 if (!(dev->quirks & (MIDGARD_SFBD | IS_BIFROST)))
747                         batch->framebuffer.gpu |= MALI_FBD_TAG_IS_MFBD;
748         }
749 
750         return batch->framebuffer.gpu;
751 }
752 
753 static void
panfrost_load_surface(struct panfrost_batch * batch,struct pipe_surface * surf,unsigned loc)754 panfrost_load_surface(struct panfrost_batch *batch, struct pipe_surface *surf, unsigned loc)
755 {
756         if (!surf)
757                 return;
758 
759         struct panfrost_resource *rsrc = pan_resource(surf->texture);
760         unsigned level = surf->u.tex.level;
761 
762         if (!rsrc->slices[level].initialized)
763                 return;
764 
765         if (!rsrc->damage.inverted_len)
766                 return;
767 
768         /* Clamp the rendering area to the damage extent. The
769          * KHR_partial_update() spec states that trying to render outside of
770          * the damage region is "undefined behavior", so we should be safe.
771          */
772         unsigned damage_width = (rsrc->damage.extent.maxx - rsrc->damage.extent.minx);
773         unsigned damage_height = (rsrc->damage.extent.maxy - rsrc->damage.extent.miny);
774 
775         if (damage_width && damage_height) {
776                 panfrost_batch_intersection_scissor(batch,
777                                                     rsrc->damage.extent.minx,
778                                                     rsrc->damage.extent.miny,
779                                                     rsrc->damage.extent.maxx,
780                                                     rsrc->damage.extent.maxy);
781         }
782 
783         enum pipe_format format = rsrc->base.format;
784 
785         if (loc == FRAG_RESULT_DEPTH) {
786                 if (!util_format_has_depth(util_format_description(format)))
787                         return;
788 
789                 format = util_format_get_depth_only(format);
790         } else if (loc == FRAG_RESULT_STENCIL) {
791                 if (!util_format_has_stencil(util_format_description(format)))
792                         return;
793 
794                 if (rsrc->separate_stencil) {
795                         rsrc = rsrc->separate_stencil;
796                         format = rsrc->base.format;
797                 }
798 
799                 format = util_format_stencil_only(format);
800         }
801 
802         enum mali_texture_dimension dim =
803                 panfrost_translate_texture_dimension(rsrc->base.target);
804 
805         struct pan_image img = {
806                 .width0 = rsrc->base.width0,
807                 .height0 = rsrc->base.height0,
808                 .depth0 = rsrc->base.depth0,
809                 .format = format,
810                 .dim = dim,
811                 .modifier = rsrc->modifier,
812                 .array_size = rsrc->base.array_size,
813                 .first_level = level,
814                 .last_level = level,
815                 .first_layer = surf->u.tex.first_layer,
816                 .last_layer = surf->u.tex.last_layer,
817                 .nr_samples = rsrc->base.nr_samples,
818                 .cubemap_stride = rsrc->cubemap_stride,
819                 .bo = rsrc->bo,
820                 .slices = rsrc->slices
821         };
822 
823         mali_ptr blend_shader = 0;
824 
825         if (loc >= FRAG_RESULT_DATA0 && !panfrost_can_fixed_blend(rsrc->base.format)) {
826                 struct panfrost_blend_shader *b =
827                         panfrost_get_blend_shader(batch->ctx, batch->ctx->blit_blend,
828                                                   rsrc->base.format, loc - FRAG_RESULT_DATA0,
829                                                   NULL);
830 
831                 struct panfrost_bo *bo = panfrost_batch_create_bo(batch, b->size,
832                    PAN_BO_EXECUTE,
833                    PAN_BO_ACCESS_PRIVATE |
834                    PAN_BO_ACCESS_READ |
835                    PAN_BO_ACCESS_FRAGMENT);
836 
837                 memcpy(bo->ptr.cpu, b->buffer, b->size);
838                 assert(b->work_count <= 4);
839 
840                 blend_shader = bo->ptr.gpu | b->first_tag;
841         }
842 
843         struct panfrost_ptr transfer = panfrost_pool_alloc_aligned(&batch->pool,
844                         4 * 4 * 6 * rsrc->damage.inverted_len, 64);
845 
846         for (unsigned i = 0; i < rsrc->damage.inverted_len; ++i) {
847                 float *o = (float *) (transfer.cpu + (4 * 4 * 6 * i));
848                 struct pan_rect r = rsrc->damage.inverted_rects[i];
849 
850                 float rect[] = {
851                         r.minx, rsrc->base.height0 - r.miny, 0.0, 1.0,
852                         r.maxx, rsrc->base.height0 - r.miny, 0.0, 1.0,
853                         r.minx, rsrc->base.height0 - r.maxy, 0.0, 1.0,
854 
855                         r.maxx, rsrc->base.height0 - r.miny, 0.0, 1.0,
856                         r.minx, rsrc->base.height0 - r.maxy, 0.0, 1.0,
857                         r.maxx, rsrc->base.height0 - r.maxy, 0.0, 1.0,
858                 };
859 
860                 assert(sizeof(rect) == 4 * 4 * 6);
861                 memcpy(o, rect, sizeof(rect));
862         }
863 
864         unsigned vertex_count = rsrc->damage.inverted_len * 6;
865         if (batch->pool.dev->quirks & IS_BIFROST) {
866                 mali_ptr tiler =
867                         panfrost_batch_get_bifrost_tiler(batch, vertex_count);
868                 panfrost_load_bifrost(&batch->pool, &batch->scoreboard,
869                                       blend_shader,
870                                       batch->framebuffer.gpu,
871                                       tiler,
872                                       transfer.gpu, vertex_count,
873                                       &img, loc);
874         } else {
875                 panfrost_load_midg(&batch->pool, &batch->scoreboard,
876                                    blend_shader,
877                                    batch->framebuffer.gpu,
878                                    transfer.gpu, vertex_count,
879                                    &img, loc);
880         }
881 
882         panfrost_batch_add_bo(batch, batch->pool.dev->blit_shaders.bo,
883                         PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_READ | PAN_BO_ACCESS_FRAGMENT);
884 }
885 
886 static void
panfrost_batch_draw_wallpaper(struct panfrost_batch * batch)887 panfrost_batch_draw_wallpaper(struct panfrost_batch *batch)
888 {
889         panfrost_batch_reserve_framebuffer(batch);
890 
891         /* Assume combined. If either depth or stencil is written, they will
892          * both be written so we need to be careful for reloading */
893 
894         unsigned draws = batch->draws;
895 
896         if (draws & PIPE_CLEAR_DEPTHSTENCIL)
897                 draws |= PIPE_CLEAR_DEPTHSTENCIL;
898 
899         /* Mask of buffers which need reload since they are not cleared and
900          * they are drawn. (If they are cleared, reload is useless; if they are
901          * not drawn and also not cleared, we can generally omit the attachment
902          * at the framebuffer descriptor level */
903 
904         unsigned reload = ~batch->clear & draws;
905 
906         for (unsigned i = 0; i < batch->key.nr_cbufs; ++i) {
907                 if (reload & (PIPE_CLEAR_COLOR0 << i))
908                         panfrost_load_surface(batch, batch->key.cbufs[i], FRAG_RESULT_DATA0 + i);
909         }
910 
911         if (reload & PIPE_CLEAR_DEPTH)
912                 panfrost_load_surface(batch, batch->key.zsbuf, FRAG_RESULT_DEPTH);
913 
914         if (reload & PIPE_CLEAR_STENCIL)
915                 panfrost_load_surface(batch, batch->key.zsbuf, FRAG_RESULT_STENCIL);
916 }
917 
918 static void
panfrost_batch_record_bo(struct hash_entry * entry,unsigned * bo_handles,unsigned idx)919 panfrost_batch_record_bo(struct hash_entry *entry, unsigned *bo_handles, unsigned idx)
920 {
921         struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
922         uint32_t flags = (uintptr_t)entry->data;
923 
924         assert(bo->gem_handle > 0);
925         bo_handles[idx] = bo->gem_handle;
926 
927         /* Update the BO access flags so that panfrost_bo_wait() knows
928          * about all pending accesses.
929          * We only keep the READ/WRITE info since this is all the BO
930          * wait logic cares about.
931          * We also preserve existing flags as this batch might not
932          * be the first one to access the BO.
933          */
934         bo->gpu_access |= flags & (PAN_BO_ACCESS_RW);
935 }
936 
937 static int
panfrost_batch_submit_ioctl(struct panfrost_batch * batch,mali_ptr first_job_desc,uint32_t reqs,uint32_t in_sync,uint32_t out_sync)938 panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
939                             mali_ptr first_job_desc,
940                             uint32_t reqs,
941                             uint32_t in_sync,
942                             uint32_t out_sync)
943 {
944         struct panfrost_context *ctx = batch->ctx;
945         struct pipe_context *gallium = (struct pipe_context *) ctx;
946         struct panfrost_device *dev = pan_device(gallium->screen);
947         struct drm_panfrost_submit submit = {0,};
948         uint32_t *bo_handles;
949         int ret;
950 
951         /* If we trace, we always need a syncobj, so make one of our own if we
952          * weren't given one to use. Remember that we did so, so we can free it
953          * after we're done but preventing double-frees if we were given a
954          * syncobj */
955 
956         if (!out_sync && dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
957                 out_sync = ctx->syncobj;
958 
959         submit.out_sync = out_sync;
960         submit.jc = first_job_desc;
961         submit.requirements = reqs;
962         if (in_sync) {
963                 submit.in_syncs = (u64)(uintptr_t)(&in_sync);
964                 submit.in_sync_count = 1;
965         }
966 
967         bo_handles = calloc(panfrost_pool_num_bos(&batch->pool) +
968                             panfrost_pool_num_bos(&batch->invisible_pool) +
969                             batch->bos->entries + 1,
970                             sizeof(*bo_handles));
971         assert(bo_handles);
972 
973         hash_table_foreach(batch->bos, entry)
974                 panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
975 
976         panfrost_pool_get_bo_handles(&batch->pool, bo_handles + submit.bo_handle_count);
977         submit.bo_handle_count += panfrost_pool_num_bos(&batch->pool);
978         panfrost_pool_get_bo_handles(&batch->invisible_pool, bo_handles + submit.bo_handle_count);
979         submit.bo_handle_count += panfrost_pool_num_bos(&batch->invisible_pool);
980 
981         /* Used by all tiler jobs (XXX: skip for compute-only) */
982         if (!(reqs & PANFROST_JD_REQ_FS))
983                 bo_handles[submit.bo_handle_count++] = dev->tiler_heap->gem_handle;
984 
985         submit.bo_handles = (u64) (uintptr_t) bo_handles;
986         ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
987         free(bo_handles);
988 
989         if (ret) {
990                 if (dev->debug & PAN_DBG_MSGS)
991                         fprintf(stderr, "Error submitting: %m\n");
992 
993                 return errno;
994         }
995 
996         /* Trace the job if we're doing that */
997         if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
998                 /* Wait so we can get errors reported back */
999                 drmSyncobjWait(dev->fd, &out_sync, 1,
1000                                INT64_MAX, 0, NULL);
1001 
1002                 /* Trace gets priority over sync */
1003                 bool minimal = !(dev->debug & PAN_DBG_TRACE);
1004                 pandecode_jc(submit.jc, dev->quirks & IS_BIFROST, dev->gpu_id, minimal);
1005         }
1006 
1007         return 0;
1008 }
1009 
1010 /* Submit both vertex/tiler and fragment jobs for a batch, possibly with an
1011  * outsync corresponding to the later of the two (since there will be an
1012  * implicit dep between them) */
1013 
1014 static int
panfrost_batch_submit_jobs(struct panfrost_batch * batch,uint32_t in_sync,uint32_t out_sync)1015 panfrost_batch_submit_jobs(struct panfrost_batch *batch, uint32_t in_sync, uint32_t out_sync)
1016 {
1017         bool has_draws = batch->scoreboard.first_job;
1018         bool has_frag = batch->scoreboard.tiler_dep || batch->clear;
1019         int ret = 0;
1020 
1021         if (has_draws) {
1022                 ret = panfrost_batch_submit_ioctl(batch, batch->scoreboard.first_job,
1023                                                   0, in_sync, has_frag ? 0 : out_sync);
1024                 assert(!ret);
1025         }
1026 
1027         if (has_frag) {
1028                 /* Whether we program the fragment job for draws or not depends
1029                  * on whether there is any *tiler* activity (so fragment
1030                  * shaders). If there are draws but entirely RASTERIZER_DISCARD
1031                  * (say, for transform feedback), we want a fragment job that
1032                  * *only* clears, since otherwise the tiler structures will be
1033                  * uninitialized leading to faults (or state leaks) */
1034 
1035                 mali_ptr fragjob = panfrost_fragment_job(batch,
1036                                 batch->scoreboard.tiler_dep != 0);
1037                 ret = panfrost_batch_submit_ioctl(batch, fragjob,
1038                                                   PANFROST_JD_REQ_FS, 0,
1039                                                   out_sync);
1040                 assert(!ret);
1041         }
1042 
1043         return ret;
1044 }
1045 
1046 static void
panfrost_batch_submit(struct panfrost_batch * batch,uint32_t in_sync,uint32_t out_sync)1047 panfrost_batch_submit(struct panfrost_batch *batch,
1048                       uint32_t in_sync, uint32_t out_sync)
1049 {
1050         assert(batch);
1051         struct panfrost_device *dev = pan_device(batch->ctx->base.screen);
1052 
1053         /* Submit the dependencies first. Don't pass along the out_sync since
1054          * they are guaranteed to terminate sooner */
1055         util_dynarray_foreach(&batch->dependencies,
1056                               struct panfrost_batch_fence *, dep) {
1057                 if ((*dep)->batch)
1058                         panfrost_batch_submit((*dep)->batch, 0, 0);
1059         }
1060 
1061         int ret;
1062 
1063         /* Nothing to do! */
1064         if (!batch->scoreboard.first_job && !batch->clear)
1065                 goto out;
1066 
1067         panfrost_batch_draw_wallpaper(batch);
1068 
1069         /* Now that all draws are in, we can finally prepare the
1070          * FBD for the batch */
1071 
1072         if (batch->framebuffer.gpu && batch->scoreboard.first_job) {
1073                 struct panfrost_context *ctx = batch->ctx;
1074                 struct pipe_context *gallium = (struct pipe_context *) ctx;
1075                 struct panfrost_device *dev = pan_device(gallium->screen);
1076 
1077                 if (dev->quirks & MIDGARD_SFBD)
1078                         panfrost_attach_sfbd(batch, ~0);
1079                 else
1080                         panfrost_attach_mfbd(batch, ~0);
1081         }
1082 
1083         mali_ptr polygon_list = panfrost_batch_get_polygon_list(batch,
1084                 MALI_MIDGARD_TILER_MINIMUM_HEADER_SIZE);
1085 
1086         panfrost_scoreboard_initialize_tiler(&batch->pool, &batch->scoreboard, polygon_list);
1087 
1088         ret = panfrost_batch_submit_jobs(batch, in_sync, out_sync);
1089 
1090         if (ret && dev->debug & PAN_DBG_MSGS)
1091                 fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
1092 
1093         /* We must reset the damage info of our render targets here even
1094          * though a damage reset normally happens when the DRI layer swaps
1095          * buffers. That's because there can be implicit flushes the GL
1096          * app is not aware of, and those might impact the damage region: if
1097          * part of the damaged portion is drawn during those implicit flushes,
1098          * you have to reload those areas before next draws are pushed, and
1099          * since the driver can't easily know what's been modified by the draws
1100          * it flushed, the easiest solution is to reload everything.
1101          */
1102         for (unsigned i = 0; i < batch->key.nr_cbufs; i++) {
1103                 if (!batch->key.cbufs[i])
1104                         continue;
1105 
1106                 panfrost_resource_set_damage_region(NULL,
1107                                 batch->key.cbufs[i]->texture, 0, NULL);
1108         }
1109 
1110 out:
1111         panfrost_freeze_batch(batch);
1112         panfrost_free_batch(batch);
1113 }
1114 
1115 /* Submit all batches, applying the out_sync to the currently bound batch */
1116 
1117 void
panfrost_flush_all_batches(struct panfrost_context * ctx)1118 panfrost_flush_all_batches(struct panfrost_context *ctx)
1119 {
1120         struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
1121         panfrost_batch_submit(batch, ctx->syncobj, ctx->syncobj);
1122 
1123         hash_table_foreach(ctx->batches, hentry) {
1124                 struct panfrost_batch *batch = hentry->data;
1125                 assert(batch);
1126 
1127                 panfrost_batch_submit(batch, ctx->syncobj, ctx->syncobj);
1128         }
1129 
1130         assert(!ctx->batches->entries);
1131 
1132         /* Collect batch fences before returning */
1133         panfrost_gc_fences(ctx);
1134 }
1135 
1136 bool
panfrost_pending_batches_access_bo(struct panfrost_context * ctx,const struct panfrost_bo * bo)1137 panfrost_pending_batches_access_bo(struct panfrost_context *ctx,
1138                                    const struct panfrost_bo *bo)
1139 {
1140         struct panfrost_bo_access *access;
1141         struct hash_entry *hentry;
1142 
1143         hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
1144         access = hentry ? hentry->data : NULL;
1145         if (!access)
1146                 return false;
1147 
1148         if (access->writer && access->writer->batch)
1149                 return true;
1150 
1151         util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
1152                               reader) {
1153                 if (*reader && (*reader)->batch)
1154                         return true;
1155         }
1156 
1157         return false;
1158 }
1159 
1160 /* We always flush writers. We might also need to flush readers */
1161 
1162 void
panfrost_flush_batches_accessing_bo(struct panfrost_context * ctx,struct panfrost_bo * bo,bool flush_readers)1163 panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
1164                                     struct panfrost_bo *bo,
1165                                     bool flush_readers)
1166 {
1167         struct panfrost_bo_access *access;
1168         struct hash_entry *hentry;
1169 
1170         hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
1171         access = hentry ? hentry->data : NULL;
1172         if (!access)
1173                 return;
1174 
1175         if (access->writer && access->writer->batch)
1176                 panfrost_batch_submit(access->writer->batch, ctx->syncobj, ctx->syncobj);
1177 
1178         if (!flush_readers)
1179                 return;
1180 
1181         util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
1182                               reader) {
1183                 if (*reader && (*reader)->batch)
1184                         panfrost_batch_submit((*reader)->batch, ctx->syncobj, ctx->syncobj);
1185         }
1186 }
1187 
1188 void
panfrost_batch_set_requirements(struct panfrost_batch * batch)1189 panfrost_batch_set_requirements(struct panfrost_batch *batch)
1190 {
1191         struct panfrost_context *ctx = batch->ctx;
1192 
1193         if (ctx->rasterizer->base.multisample)
1194                 batch->requirements |= PAN_REQ_MSAA;
1195 
1196         if (ctx->depth_stencil && ctx->depth_stencil->base.depth.writemask) {
1197                 batch->requirements |= PAN_REQ_DEPTH_WRITE;
1198                 batch->draws |= PIPE_CLEAR_DEPTH;
1199         }
1200 
1201         if (ctx->depth_stencil && ctx->depth_stencil->base.stencil[0].enabled)
1202                 batch->draws |= PIPE_CLEAR_STENCIL;
1203 }
1204 
1205 void
panfrost_batch_adjust_stack_size(struct panfrost_batch * batch)1206 panfrost_batch_adjust_stack_size(struct panfrost_batch *batch)
1207 {
1208         struct panfrost_context *ctx = batch->ctx;
1209 
1210         for (unsigned i = 0; i < PIPE_SHADER_TYPES; ++i) {
1211                 struct panfrost_shader_state *ss;
1212 
1213                 ss = panfrost_get_shader_state(ctx, i);
1214                 if (!ss)
1215                         continue;
1216 
1217                 batch->stack_size = MAX2(batch->stack_size, ss->stack_size);
1218         }
1219 }
1220 
1221 /* Helper to smear a 32-bit color across 128-bit components */
1222 
1223 static void
pan_pack_color_32(uint32_t * packed,uint32_t v)1224 pan_pack_color_32(uint32_t *packed, uint32_t v)
1225 {
1226         for (unsigned i = 0; i < 4; ++i)
1227                 packed[i] = v;
1228 }
1229 
1230 static void
pan_pack_color_64(uint32_t * packed,uint32_t lo,uint32_t hi)1231 pan_pack_color_64(uint32_t *packed, uint32_t lo, uint32_t hi)
1232 {
1233         for (unsigned i = 0; i < 4; i += 2) {
1234                 packed[i + 0] = lo;
1235                 packed[i + 1] = hi;
1236         }
1237 }
1238 
1239 static void
pan_pack_color(uint32_t * packed,const union pipe_color_union * color,enum pipe_format format)1240 pan_pack_color(uint32_t *packed, const union pipe_color_union *color, enum pipe_format format)
1241 {
1242         /* Alpha magicked to 1.0 if there is no alpha */
1243 
1244         bool has_alpha = util_format_has_alpha(format);
1245         float clear_alpha = has_alpha ? color->f[3] : 1.0f;
1246 
1247         /* Packed color depends on the framebuffer format */
1248 
1249         const struct util_format_description *desc =
1250                 util_format_description(format);
1251 
1252         if (util_format_is_rgba8_variant(desc) && desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
1253                 pan_pack_color_32(packed,
1254                                   ((uint32_t) float_to_ubyte(clear_alpha) << 24) |
1255                                   ((uint32_t) float_to_ubyte(color->f[2]) << 16) |
1256                                   ((uint32_t) float_to_ubyte(color->f[1]) <<  8) |
1257                                   ((uint32_t) float_to_ubyte(color->f[0]) <<  0));
1258         } else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
1259                 /* First, we convert the components to R5, G6, B5 separately */
1260                 unsigned r5 = _mesa_roundevenf(SATURATE(color->f[0]) * 31.0);
1261                 unsigned g6 = _mesa_roundevenf(SATURATE(color->f[1]) * 63.0);
1262                 unsigned b5 = _mesa_roundevenf(SATURATE(color->f[2]) * 31.0);
1263 
1264                 /* Then we pack into a sparse u32. TODO: Why these shifts? */
1265                 pan_pack_color_32(packed, (b5 << 25) | (g6 << 14) | (r5 << 5));
1266         } else if (format == PIPE_FORMAT_B4G4R4A4_UNORM) {
1267                 /* Convert to 4-bits */
1268                 unsigned r4 = _mesa_roundevenf(SATURATE(color->f[0]) * 15.0);
1269                 unsigned g4 = _mesa_roundevenf(SATURATE(color->f[1]) * 15.0);
1270                 unsigned b4 = _mesa_roundevenf(SATURATE(color->f[2]) * 15.0);
1271                 unsigned a4 = _mesa_roundevenf(SATURATE(clear_alpha) * 15.0);
1272 
1273                 /* Pack on *byte* intervals */
1274                 pan_pack_color_32(packed, (a4 << 28) | (b4 << 20) | (g4 << 12) | (r4 << 4));
1275         } else if (format == PIPE_FORMAT_B5G5R5A1_UNORM) {
1276                 /* Scale as expected but shift oddly */
1277                 unsigned r5 = _mesa_roundevenf(SATURATE(color->f[0]) * 31.0);
1278                 unsigned g5 = _mesa_roundevenf(SATURATE(color->f[1]) * 31.0);
1279                 unsigned b5 = _mesa_roundevenf(SATURATE(color->f[2]) * 31.0);
1280                 unsigned a1 = _mesa_roundevenf(SATURATE(clear_alpha) * 1.0);
1281 
1282                 pan_pack_color_32(packed, (a1 << 31) | (b5 << 25) | (g5 << 15) | (r5 << 5));
1283         } else {
1284                 /* Otherwise, it's generic subject to replication */
1285 
1286                 union util_color out = { 0 };
1287                 unsigned size = util_format_get_blocksize(format);
1288 
1289                 util_pack_color(color->f, format, &out);
1290 
1291                 if (size == 1) {
1292                         unsigned b = out.ui[0];
1293                         unsigned s = b | (b << 8);
1294                         pan_pack_color_32(packed, s | (s << 16));
1295                 } else if (size == 2)
1296                         pan_pack_color_32(packed, out.ui[0] | (out.ui[0] << 16));
1297                 else if (size == 3 || size == 4)
1298                         pan_pack_color_32(packed, out.ui[0]);
1299                 else if (size == 6)
1300                         pan_pack_color_64(packed, out.ui[0], out.ui[1] | (out.ui[1] << 16)); /* RGB16F -- RGBB */
1301                 else if (size == 8)
1302                         pan_pack_color_64(packed, out.ui[0], out.ui[1]);
1303                 else if (size == 16)
1304                         memcpy(packed, out.ui, 16);
1305                 else
1306                         unreachable("Unknown generic format size packing clear colour");
1307         }
1308 }
1309 
1310 void
panfrost_batch_clear(struct panfrost_batch * batch,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)1311 panfrost_batch_clear(struct panfrost_batch *batch,
1312                      unsigned buffers,
1313                      const union pipe_color_union *color,
1314                      double depth, unsigned stencil)
1315 {
1316         struct panfrost_context *ctx = batch->ctx;
1317 
1318         if (buffers & PIPE_CLEAR_COLOR) {
1319                 for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
1320                         if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1321                                 continue;
1322 
1323                         enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
1324                         pan_pack_color(batch->clear_color[i], color, format);
1325                 }
1326         }
1327 
1328         if (buffers & PIPE_CLEAR_DEPTH) {
1329                 batch->clear_depth = depth;
1330         }
1331 
1332         if (buffers & PIPE_CLEAR_STENCIL) {
1333                 batch->clear_stencil = stencil;
1334         }
1335 
1336         batch->clear |= buffers;
1337 
1338         /* Clearing affects the entire framebuffer (by definition -- this is
1339          * the Gallium clear callback, which clears the whole framebuffer. If
1340          * the scissor test were enabled from the GL side, the gallium frontend
1341          * would emit a quad instead and we wouldn't go down this code path) */
1342 
1343         panfrost_batch_union_scissor(batch, 0, 0,
1344                                      ctx->pipe_framebuffer.width,
1345                                      ctx->pipe_framebuffer.height);
1346 }
1347 
1348 static bool
panfrost_batch_compare(const void * a,const void * b)1349 panfrost_batch_compare(const void *a, const void *b)
1350 {
1351         return util_framebuffer_state_equal(a, b);
1352 }
1353 
1354 static uint32_t
panfrost_batch_hash(const void * key)1355 panfrost_batch_hash(const void *key)
1356 {
1357         return _mesa_hash_data(key, sizeof(struct pipe_framebuffer_state));
1358 }
1359 
1360 /* Given a new bounding rectangle (scissor), let the job cover the union of the
1361  * new and old bounding rectangles */
1362 
1363 void
panfrost_batch_union_scissor(struct panfrost_batch * batch,unsigned minx,unsigned miny,unsigned maxx,unsigned maxy)1364 panfrost_batch_union_scissor(struct panfrost_batch *batch,
1365                              unsigned minx, unsigned miny,
1366                              unsigned maxx, unsigned maxy)
1367 {
1368         batch->minx = MIN2(batch->minx, minx);
1369         batch->miny = MIN2(batch->miny, miny);
1370         batch->maxx = MAX2(batch->maxx, maxx);
1371         batch->maxy = MAX2(batch->maxy, maxy);
1372 }
1373 
1374 void
panfrost_batch_intersection_scissor(struct panfrost_batch * batch,unsigned minx,unsigned miny,unsigned maxx,unsigned maxy)1375 panfrost_batch_intersection_scissor(struct panfrost_batch *batch,
1376                                   unsigned minx, unsigned miny,
1377                                   unsigned maxx, unsigned maxy)
1378 {
1379         batch->minx = MAX2(batch->minx, minx);
1380         batch->miny = MAX2(batch->miny, miny);
1381         batch->maxx = MIN2(batch->maxx, maxx);
1382         batch->maxy = MIN2(batch->maxy, maxy);
1383 }
1384 
1385 /* Are we currently rendering to the dev (rather than an FBO)? */
1386 
1387 bool
panfrost_batch_is_scanout(struct panfrost_batch * batch)1388 panfrost_batch_is_scanout(struct panfrost_batch *batch)
1389 {
1390         /* If there is no color buffer, it's an FBO */
1391         if (batch->key.nr_cbufs != 1)
1392                 return false;
1393 
1394         /* If we're too early that no framebuffer was sent, it's scanout */
1395         if (!batch->key.cbufs[0])
1396                 return true;
1397 
1398         return batch->key.cbufs[0]->texture->bind & PIPE_BIND_DISPLAY_TARGET ||
1399                batch->key.cbufs[0]->texture->bind & PIPE_BIND_SCANOUT ||
1400                batch->key.cbufs[0]->texture->bind & PIPE_BIND_SHARED;
1401 }
1402 
1403 void
panfrost_batch_init(struct panfrost_context * ctx)1404 panfrost_batch_init(struct panfrost_context *ctx)
1405 {
1406         ctx->batches = _mesa_hash_table_create(ctx,
1407                                                panfrost_batch_hash,
1408                                                panfrost_batch_compare);
1409         ctx->accessed_bos = _mesa_hash_table_create(ctx, _mesa_hash_pointer,
1410                                                     _mesa_key_pointer_equal);
1411 }
1412