Home
last modified time | relevance | path

Searched +full:- +full:- +full:batch (Results 1 – 25 of 1175) sorted by relevance

12345678910>>...47

/external/mesa3d/src/gallium/drivers/freedreno/
Dfreedreno_batch.c39 batch_init(struct fd_batch *batch) in batch_init() argument
41 struct fd_context *ctx = batch->ctx; in batch_init()
44 if (ctx->screen->reorder) in batch_init()
45 util_queue_fence_init(&batch->flush_fence); in batch_init()
48 * have no option but to allocate large worst-case sizes so that in batch_init()
52 if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) || in batch_init()
57 batch->draw = fd_ringbuffer_new(ctx->pipe, size); in batch_init()
58 if (!batch->nondraw) { in batch_init()
59 batch->binning = fd_ringbuffer_new(ctx->pipe, size); in batch_init()
60 batch->gmem = fd_ringbuffer_new(ctx->pipe, size); in batch_init()
[all …]
Dfreedreno_batch_cache.c39 * The batch cache provides lookup for mapping pipe_framebuffer_state
40 * to a batch.
45 * Batch Cache hashtable key:
52 * Batch:
54 * Each batch needs to hold a reference to each resource it depends on (ie.
59 * When a resource is destroyed, we need to remove entries in the batch
64 * When a batch has weak reference to no more resources (ie. all the
65 * surfaces it rendered to are destroyed) the batch can be destroyed.
68 * surfaces are destroyed before the batch is submitted.
70 * If (for example), batch writes to zsbuf but that surface is destroyed
[all …]
Dfreedreno_query_hw.c1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
44 get_sample(struct fd_batch *batch, struct fd_ringbuffer *ring, in get_sample() argument
47 struct fd_context *ctx = batch->ctx; in get_sample()
53 if (!batch->sample_cache[idx]) { in get_sample()
55 ctx->hw_sample_providers[idx]->get_sample(batch, ring); in get_sample()
56 fd_hw_sample_reference(ctx, &batch->sample_cache[idx], new_samp); in get_sample()
57 util_dynarray_append(&batch->samples, struct fd_hw_sample *, new_samp); in get_sample()
58 batch->needs_flush = true; in get_sample()
61 fd_hw_sample_reference(ctx, &samp, batch->sample_cache[idx]); in get_sample()
67 clear_sample_cache(struct fd_batch *batch) in clear_sample_cache() argument
[all …]
Dfreedreno_draw.c1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
46 resource_read(struct fd_batch *batch, struct pipe_resource *prsc) in resource_read() argument
50 fd_batch_resource_used(batch, fd_resource(prsc), false); in resource_read()
54 resource_written(struct fd_batch *batch, struct pipe_resource *prsc) in resource_written() argument
58 fd_batch_resource_used(batch, fd_resource(prsc), true); in resource_written()
65 struct fd_batch *batch = ctx->batch; in fd_draw_vbo() local
66 struct pipe_framebuffer_state *pfb = &batch->framebuffer; in fd_draw_vbo()
74 if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) { in fd_draw_vbo()
79 if (!info->count_from_stream_output && !info->indirect && in fd_draw_vbo()
80 !info->primitive_restart && in fd_draw_vbo()
[all …]
Dfreedreno_gmem.c1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
48 * to restore the previous tiles contents (system mem -> GMEM), and after all
50 * contents (GMEM -> system mem).
56 * +--<---<-- IB ---<---+---<---+---<---<---<--+
59 * ------------------------------------------------------
61 * ------------------------------------------------------
66 * Where the per-tile section handles scissor setup, mem2gmem restore (if
88 gmem->cbuf_base[i] = align(total, 0x4000); in total_size()
89 total = gmem->cbuf_base[i] + cbuf_cpp[i] * bin_w * bin_h; in total_size()
94 gmem->zsbuf_base[0] = align(total, 0x4000); in total_size()
[all …]
Dfreedreno_batch.h62 /* A batch tracks everything about a cmdstream batch/submit, including the
64 * fd_resource-s, etc.
69 unsigned idx; /* index into cache->batches[] */
96 /* is this a non-draw batch (ie compute/blit which has no pfb state)? */
100 bool back_blit : 1; /* only blit so far is resource shadowing back-blit */
110 * color_logic_Op (since those functions are disabled when by-
122 unsigned num_draws; /* number of draws in current batch */
125 * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
132 * if the corresponding bit in ctx->cleared is set.
167 /* next sample offset.. incremented for each sample in the batch/
[all …]
/external/mesa3d/src/mesa/drivers/dri/i965/
Dintel_batchbuffer.c44 * Target sizes of the batch and state buffers. We create the initial
49 * should flush. Each time we flush the batch, we recreate both buffers
73 rlist->reloc_count = 0; in init_reloc_list()
74 rlist->reloc_array_size = count; in init_reloc_list()
75 rlist->relocs = malloc(rlist->reloc_array_size * in init_reloc_list()
82 struct intel_screen *screen = brw->screen; in intel_batchbuffer_init()
83 struct intel_batchbuffer *batch = &brw->batch; in intel_batchbuffer_init() local
84 const struct gen_device_info *devinfo = &screen->devinfo; in intel_batchbuffer_init()
86 batch->use_shadow_copy = !devinfo->has_llc; in intel_batchbuffer_init()
88 if (batch->use_shadow_copy) { in intel_batchbuffer_init()
[all …]
DgenX_blorp_exec.c42 blorp_emit_dwords(struct blorp_batch *batch, unsigned n) in blorp_emit_dwords() argument
44 assert(batch->blorp->driver_ctx == batch->driver_batch); in blorp_emit_dwords()
45 struct brw_context *brw = batch->driver_batch; in blorp_emit_dwords()
48 uint32_t *map = brw->batch.map_next; in blorp_emit_dwords()
49 brw->batch.map_next += n; in blorp_emit_dwords()
55 blorp_emit_reloc(struct blorp_batch *batch, in blorp_emit_reloc() argument
58 assert(batch->blorp->driver_ctx == batch->driver_batch); in blorp_emit_reloc()
59 struct brw_context *brw = batch->driver_batch; in blorp_emit_reloc()
62 if (GEN_GEN < 6 && brw_ptr_in_state_buffer(&brw->batch, location)) { in blorp_emit_reloc()
63 offset = (char *)location - (char *)brw->batch.state.map; in blorp_emit_reloc()
[all …]
Dgen4_blorp_exec.h25 dynamic_state_address(struct blorp_batch *batch, uint32_t offset) in dynamic_state_address() argument
27 assert(batch->blorp->driver_ctx == batch->driver_batch); in dynamic_state_address()
28 struct brw_context *brw = batch->driver_batch; in dynamic_state_address()
31 .buffer = brw->batch.state.bo, in dynamic_state_address()
37 instruction_state_address(struct blorp_batch *batch, uint32_t offset) in instruction_state_address() argument
39 assert(batch->blorp->driver_ctx == batch->driver_batch); in instruction_state_address()
40 struct brw_context *brw = batch->driver_batch; in instruction_state_address()
43 .buffer = brw->cache.bo, in instruction_state_address()
49 blorp_emit_vs_state(struct blorp_batch *batch, in blorp_emit_vs_state() argument
52 assert(batch->blorp->driver_ctx == batch->driver_batch); in blorp_emit_vs_state()
[all …]
Dintel_batchbuffer.h25 void intel_batchbuffer_free(struct intel_batchbuffer *batch);
35 _intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
52 bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
56 uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
61 uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
68 ((uintptr_t)((_batch).map_next - (_batch).batch.map))
87 brw->batch.emit = USED_BATCH(brw->batch); in intel_batchbuffer_begin()
88 brw->batch.total = n; in intel_batchbuffer_begin()
96 struct intel_batchbuffer *batch = &brw->batch; in intel_batchbuffer_advance() local
97 unsigned int _n = USED_BATCH(*batch) - batch->emit; in intel_batchbuffer_advance()
[all …]
/external/mesa3d/src/gallium/winsys/i915/drm/
Di915_drm_batchbuffer.c30 i915_drm_batchbuffer(struct i915_winsys_batchbuffer *batch) in i915_drm_batchbuffer() argument
32 return (struct i915_drm_batchbuffer *)batch; in i915_drm_batchbuffer()
36 i915_drm_batchbuffer_reset(struct i915_drm_batchbuffer *batch) in i915_drm_batchbuffer_reset() argument
38 struct i915_drm_winsys *idws = i915_drm_winsys(batch->base.iws); in i915_drm_batchbuffer_reset()
40 if (batch->bo) in i915_drm_batchbuffer_reset()
41 drm_intel_bo_unreference(batch->bo); in i915_drm_batchbuffer_reset()
42 batch->bo = drm_intel_bo_alloc(idws->gem_manager, in i915_drm_batchbuffer_reset()
44 batch->actual_size, in i915_drm_batchbuffer_reset()
47 memset(batch->base.map, 0, batch->actual_size); in i915_drm_batchbuffer_reset()
48 batch->base.ptr = batch->base.map; in i915_drm_batchbuffer_reset()
[all …]
/external/tensorflow/tensorflow/core/kernels/batching_util/
Dbatch_scheduler_test.cc7 http://www.apache.org/licenses/LICENSE-2.0
41 Batch<FakeTask> batch; in TEST() local
43 EXPECT_EQ(0, batch.num_tasks()); in TEST()
44 EXPECT_TRUE(batch.empty()); in TEST()
45 EXPECT_EQ(0, batch.size()); in TEST()
46 EXPECT_FALSE(batch.IsClosed()); in TEST()
49 batch.AddTask(std::unique_ptr<FakeTask>(task0)); in TEST()
51 EXPECT_EQ(1, batch.num_tasks()); in TEST()
52 EXPECT_FALSE(batch.empty()); in TEST()
53 EXPECT_EQ(task0->size(), batch.size()); in TEST()
[all …]
Dshared_batch_scheduler.h7 http://www.apache.org/licenses/LICENSE-2.0
51 // A batch scheduler for server instances that service multiple request types
52 // (e.g. multiple machine-learned models, or multiple versions of a model served
54 // scheduler multiplexes batches of different kinds of tasks onto a fixed-size
55 // thread pool (each batch contains tasks of a single type), in a carefully
73 // The batch thread pool round-robins through the queues, running one batch
75 // BasicBatchScheduler instance, in the sense that it has maximum batch size and
76 // timeout parameters, which govern when a batch is eligible to be processed.
81 // of the active queues roughly equal the number of batch threads. (The idea is
86 // a task can spend in a queue before being placed in a batch and assigned to a
[all …]
Dbatch_scheduler.h7 http://www.apache.org/licenses/LICENSE-2.0
22 // batch, and kicking off processing of a batch on a thread pool it manages.
47 // The abstract superclass for a unit of work to be done as part of a batch.
51 // (b) a thread-safe completion signal (e.g. a Notification);
55 // Items (b), (c) and (d) are typically non-owned pointers to data homed
63 // size of a batch. (A batch's size is the sum of its task sizes.)
67 // A thread-safe collection of BatchTasks, to be executed together in some
70 // At a given time, a batch is either "open" or "closed": an open batch can
71 // accept new tasks; a closed one cannot. A batch is monotonic: initially it is
73 // remains fixed for the remainder of its life. A closed batch cannot be re-
[all …]
Dshared_batch_scheduler_test.cc7 http://www.apache.org/licenses/LICENSE-2.0
43 // Creates a FakeTask of size 'task_size', and calls 'scheduler->Schedule()' on
47 Status status = scheduler->Schedule(&task); in ScheduleTask()
58 return std::unique_ptr<Thread>(Env::Default()->StartThread( in CreateFakeClockAdvancerThread()
60 start->WaitForNotification(); in CreateFakeClockAdvancerThread()
61 while (!stop->HasBeenNotified()) { in CreateFakeClockAdvancerThread()
62 env->AdvanceByMicroseconds(10); in CreateFakeClockAdvancerThread()
63 Env::Default()->SleepForMicroseconds(10); in CreateFakeClockAdvancerThread()
74 [&queue_0_callback_called](std::unique_ptr<Batch<FakeTask>> batch) { in TEST() argument
76 ASSERT_TRUE(batch->IsClosed()); in TEST()
[all …]
/external/grpc-grpc/src/core/lib/transport/
Dmetadata_batch.cc9 * http://www.apache.org/licenses/LICENSE-2.0
37 GPR_ASSERT((list->head == nullptr) == (list->tail == nullptr)); in assert_valid_list()
38 if (!list->head) return; in assert_valid_list()
39 GPR_ASSERT(list->head->prev == nullptr); in assert_valid_list()
40 GPR_ASSERT(list->tail->next == nullptr); in assert_valid_list()
41 GPR_ASSERT((list->head == list->tail) == (list->head->next == nullptr)); in assert_valid_list()
44 for (l = list->head; l; l = l->next) { in assert_valid_list()
45 GPR_ASSERT(!GRPC_MDISNULL(l->md)); in assert_valid_list()
46 GPR_ASSERT((l->prev == nullptr) == (l == list->head)); in assert_valid_list()
47 GPR_ASSERT((l->next == nullptr) == (l == list->tail)); in assert_valid_list()
[all …]
/external/mesa3d/src/intel/blorp/
Dblorp_genX_exec.h48 blorp_emit_dwords(struct blorp_batch *batch, unsigned n);
51 blorp_emit_reloc(struct blorp_batch *batch,
55 blorp_alloc_dynamic_state(struct blorp_batch *batch,
60 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
65 blorp_get_workaround_page(struct blorp_batch *batch);
69 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
75 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size);
78 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
83 blorp_get_surface_base_address(struct blorp_batch *batch);
87 blorp_emit_urb_config(struct blorp_batch *batch,
[all …]
/external/mesa3d/src/mesa/drivers/dri/i915/
Dintel_batchbuffer.c20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
43 intel->batch.cpu_map = malloc(intel->maxBatchSize); in intel_batchbuffer_init()
44 intel->batch.map = intel->batch.cpu_map; in intel_batchbuffer_init()
50 if (intel->batch.last_bo != NULL) { in intel_batchbuffer_reset()
51 drm_intel_bo_unreference(intel->batch.last_bo); in intel_batchbuffer_reset()
52 intel->batch.last_bo = NULL; in intel_batchbuffer_reset()
54 intel->batch.last_bo = intel->batch.bo; in intel_batchbuffer_reset()
56 intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer", in intel_batchbuffer_reset()
57 intel->maxBatchSize, 4096); in intel_batchbuffer_reset()
59 intel->batch.reserved_space = BATCH_RESERVED; in intel_batchbuffer_reset()
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/a5xx/
Dfd5_gmem.c71 enum pipe_format pformat = psurf->format; in emit_mrt()
73 rsc = fd_resource(psurf->texture); in emit_mrt()
75 slice = fd_resource_slice(rsc, psurf->u.tex.level); in emit_mrt()
82 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer); in emit_mrt()
84 offset = fd_resource_offset(rsc, psurf->u.tex.level, in emit_mrt()
85 psurf->u.tex.first_layer); in emit_mrt()
88 stride = gmem->bin_w * rsc->cpp; in emit_mrt()
89 size = stride * gmem->bin_h; in emit_mrt()
90 base = gmem->cbuf_base[i]; in emit_mrt()
92 stride = slice->pitch * rsc->cpp; in emit_mrt()
[all …]
/external/mesa3d/src/gallium/drivers/i915/
Di915_batchbuffer.h20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
37 i915_winsys_batchbuffer_space(struct i915_winsys_batchbuffer *batch) in i915_winsys_batchbuffer_space() argument
39 return batch->size - (batch->ptr - batch->map); in i915_winsys_batchbuffer_space()
43 i915_winsys_batchbuffer_check(struct i915_winsys_batchbuffer *batch, in i915_winsys_batchbuffer_check() argument
46 return dwords * 4 <= i915_winsys_batchbuffer_space(batch); in i915_winsys_batchbuffer_check()
50 i915_winsys_batchbuffer_dword_unchecked(struct i915_winsys_batchbuffer *batch, in i915_winsys_batchbuffer_dword_unchecked() argument
53 *(unsigned *)batch->ptr = dword; in i915_winsys_batchbuffer_dword_unchecked()
54 batch->ptr += 4; in i915_winsys_batchbuffer_dword_unchecked()
58 i915_winsys_batchbuffer_float(struct i915_winsys_batchbuffer *batch, in i915_winsys_batchbuffer_float() argument
63 assert (i915_winsys_batchbuffer_space(batch) >= 4); in i915_winsys_batchbuffer_float()
[all …]
/external/mesa3d/src/intel/vulkan/
DgenX_blorp_exec.c38 blorp_emit_dwords(struct blorp_batch *batch, unsigned n) in blorp_emit_dwords() argument
40 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch; in blorp_emit_dwords()
41 return anv_batch_emit_dwords(&cmd_buffer->batch, n); in blorp_emit_dwords()
45 blorp_emit_reloc(struct blorp_batch *batch, in blorp_emit_reloc() argument
48 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch; in blorp_emit_reloc()
49 assert(cmd_buffer->batch.start <= location && in blorp_emit_reloc()
50 location < cmd_buffer->batch.end); in blorp_emit_reloc()
51 return anv_batch_emit_reloc(&cmd_buffer->batch, location, in blorp_emit_reloc()
56 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset, in blorp_surface_reloc() argument
59 struct anv_cmd_buffer *cmd_buffer = batch->driver_batch; in blorp_surface_reloc()
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/a3xx/
Dfd3_gmem.c1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
74 rsc = fd_resource(psurf->texture); in emit_mrt()
75 pformat = psurf->format; in emit_mrt()
79 if (rsc->stencil) { in emit_mrt()
80 rsc = rsc->stencil; in emit_mrt()
81 pformat = rsc->base.format; in emit_mrt()
85 slice = fd_resource_slice(rsc, psurf->u.tex.level); in emit_mrt()
93 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer); in emit_mrt()
95 offset = fd_resource_offset(rsc, psurf->u.tex.level, in emit_mrt()
96 psurf->u.tex.first_layer); in emit_mrt()
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dbatch_kernels.cc7 http://www.apache.org/licenses/LICENSE-2.0
50 // Note that we reduce the concat of k-dimensional tensors into a two in Concat()
52 // {y0, y1,...,ym-1}, we flatten it to {1, y}, where y = Prod_i(yi). in Concat()
82 context->allocate_temp(DataTypeToEnum<T>::value, output_shape, output)); in Concat()
83 if (output->NumElements() > 0) { in Concat()
84 auto output_flat = output->shaped<T, 2>({1, output->NumElements()}); in Concat()
91 ConcatCPU<T>(context->device(), inputs_flat, &output_flat); in Concat()
98 // tensors along the zeroth dimension, with the ith split having zeroth-
103 // applicable special case and wrote to the outputs. Otherwise acts as a no-op.
116 "Sum of split sizes must not exceed dim0-size of input tensor"); in SplitEasyCases()
[all …]
/external/mesa3d/src/gallium/drivers/freedreno/a4xx/
Dfd4_gmem.c1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
73 enum pipe_format pformat = psurf->format; in emit_mrt()
75 rsc = fd_resource(psurf->texture); in emit_mrt()
80 if (rsc->stencil) { in emit_mrt()
81 rsc = rsc->stencil; in emit_mrt()
82 pformat = rsc->base.format; in emit_mrt()
87 slice = fd_resource_slice(rsc, psurf->u.tex.level); in emit_mrt()
96 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer); in emit_mrt()
98 offset = fd_resource_offset(rsc, psurf->u.tex.level, in emit_mrt()
99 psurf->u.tex.first_layer); in emit_mrt()
[all …]
/external/grpc-grpc/src/core/ext/filters/client_channel/
Dclient_channel.cc9 * http://www.apache.org/licenses/LICENSE-2.0
81 * CHANNEL-WIDE FUNCTIONS
106 /** incoming resolver result - set by resolver.next() */
137 * non-existing when the callback is run */
161 * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE. in set_channel_connectivity_state_locked()
162 * - Hand over pending picks from old policies during the switch that happens in set_channel_connectivity_state_locked()
164 if (chand->lb_policy != nullptr) { in set_channel_connectivity_state_locked()
167 chand->lb_policy->CancelMatchingPicksLocked( in set_channel_connectivity_state_locked()
172 chand->lb_policy->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0, in set_channel_connectivity_state_locked()
180 grpc_connectivity_state_set(&chand->state_tracker, state, error, reason); in set_channel_connectivity_state_locked()
[all …]

12345678910>>...47