Home
last modified time | relevance | path

Searched refs:fences (Results 1 – 25 of 77) sorted by relevance

1234

/external/libdrm/amdgpu/
Damdgpu_cs.c441 static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences, in amdgpu_ioctl_wait_fences() argument
449 amdgpu_device_handle dev = fences[0].context->dev; in amdgpu_ioctl_wait_fences()
456 drm_fences[i].ctx_id = fences[i].context->id; in amdgpu_ioctl_wait_fences()
457 drm_fences[i].ip_type = fences[i].ip_type; in amdgpu_ioctl_wait_fences()
458 drm_fences[i].ip_instance = fences[i].ip_instance; in amdgpu_ioctl_wait_fences()
459 drm_fences[i].ring = fences[i].ring; in amdgpu_ioctl_wait_fences()
460 drm_fences[i].seq_no = fences[i].fence; in amdgpu_ioctl_wait_fences()
464 args.in.fences = (uint64_t)(uintptr_t)drm_fences; in amdgpu_ioctl_wait_fences()
481 int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences, in amdgpu_cs_wait_fences() argument
491 if (!fences || !status || !fence_count) in amdgpu_cs_wait_fences()
[all …]
/external/mesa3d/src/vulkan/wsi/
Dwsi_common.c152 for (unsigned i = 0; i < ARRAY_SIZE(chain->fences); i++) in wsi_swapchain_finish()
153 chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc); in wsi_swapchain_finish()
686 if (swapchain->fences[0] == VK_NULL_HANDLE) { in wsi_common_queue_present()
694 &swapchain->fences[0]); in wsi_common_queue_present()
698 wsi->ResetFences(device, 1, &swapchain->fences[0]); in wsi_common_queue_present()
741 result = wsi->QueueSubmit(queue, 1, &submit_info, swapchain->fences[0]); in wsi_common_queue_present()
756 VkFence last = swapchain->fences[2]; in wsi_common_queue_present()
757 swapchain->fences[2] = swapchain->fences[1]; in wsi_common_queue_present()
758 swapchain->fences[1] = swapchain->fences[0]; in wsi_common_queue_present()
759 swapchain->fences[0] = last; in wsi_common_queue_present()
Dwsi_common_private.h49 VkFence fences[3]; member
/external/mesa3d/src/util/
Du_queue.c530 struct util_queue_fence *fences = malloc(queue->num_threads * sizeof(*fences)); in util_queue_finish() local
541 util_queue_fence_init(&fences[i]); in util_queue_finish()
542 util_queue_add_job(queue, &barrier, &fences[i], util_queue_finish_execute, NULL); in util_queue_finish()
546 util_queue_fence_wait(&fences[i]); in util_queue_finish()
547 util_queue_fence_destroy(&fences[i]); in util_queue_finish()
553 free(fences); in util_queue_finish()
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AVR/atomics/
Dfence.ll3 ; Checks that atomic fences are simply removed from IR.
4 ; AVR is always singlethreaded so fences do nothing.
/external/deqp/external/vulkancts/modules/vulkan/synchronization/
DvktSynchronizationSmokeTests.cpp545 VkFence fences[2]; member
576 createFences(vkd, device, false, DE_LENGTH_OF_ARRAY(fences), fences); in TestContext()
581 destroyFences(vkd, device, DE_LENGTH_OF_ARRAY(fences), fences); in ~TestContext()
1006 fenceStatus = deviceInterface.getFenceStatus(device, testContext.fences[0]); in testFences()
1012 fenceStatus = deviceInterface.getFenceStatus(device, testContext.fences[1]); in testFences()
1019 VK_CHECK(deviceInterface.queueSubmit(queue, 1, &submitInfo, testContext.fences[0])); in testFences()
1022 waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[0], true, 0u); in testFences()
1031 …waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[0], true, DEFAULT_TIMEOU… in testFences()
1040 …waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[0], true, std::numeric_l… in testFences()
1048 waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[1], true, 1); in testFences()
[all …]
/external/llvm/lib/Target/WebAssembly/
DWebAssemblyInstrAtomics.td18 // Atomic fences
21 // TODO: add atomic fences here...
/external/mesa3d/src/gallium/winsys/radeon/drm/
Dradeon_drm_cs.c511 if (bo->u.slab.fences[src]->num_cs_references) { in radeon_bo_slab_fence()
512 bo->u.slab.fences[dst] = bo->u.slab.fences[src]; in radeon_bo_slab_fence()
515 radeon_bo_reference(&bo->u.slab.fences[src], NULL); in radeon_bo_slab_fence()
523 struct radeon_bo **new_fences = REALLOC(bo->u.slab.fences, in radeon_bo_slab_fence()
531 bo->u.slab.fences = new_fences; in radeon_bo_slab_fence()
536 bo->u.slab.fences[bo->u.slab.num_fences] = NULL; in radeon_bo_slab_fence()
537 radeon_bo_reference(&bo->u.slab.fences[bo->u.slab.num_fences], fence); in radeon_bo_slab_fence()
Dradeon_drm_bo.c82 if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) { in radeon_bo_is_busy()
86 radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL); in radeon_bo_is_busy()
88 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle], in radeon_bo_is_busy()
89 (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0])); in radeon_bo_is_busy()
113 radeon_bo_reference(&fence, bo->u.slab.fences[0]); in radeon_bo_wait_idle()
120 if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) { in radeon_bo_wait_idle()
121 radeon_bo_reference(&bo->u.slab.fences[0], NULL); in radeon_bo_wait_idle()
122 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1], in radeon_bo_wait_idle()
123 (bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0])); in radeon_bo_wait_idle()
798 radeon_bo_reference(&bo->u.slab.fences[j], NULL); in radeon_bo_slab_free()
[all …]
Dradeon_drm_bo.h52 struct radeon_bo **fences; member
/external/mesa3d/src/gallium/winsys/amdgpu/drm/
Damdgpu_bo.c96 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false)) in amdgpu_bo_wait()
102 amdgpu_fence_reference(&bo->fences[i], NULL); in amdgpu_bo_wait()
104 memmove(&bo->fences[0], &bo->fences[idle_fences], in amdgpu_bo_wait()
105 (bo->num_fences - idle_fences) * sizeof(*bo->fences)); in amdgpu_bo_wait()
120 amdgpu_fence_reference(&fence, bo->fences[0]); in amdgpu_bo_wait()
133 if (fence_idle && bo->num_fences && bo->fences[0] == fence) { in amdgpu_bo_wait()
134 amdgpu_fence_reference(&bo->fences[0], NULL); in amdgpu_bo_wait()
135 memmove(&bo->fences[0], &bo->fences[1], in amdgpu_bo_wait()
136 (bo->num_fences - 1) * sizeof(*bo->fences)); in amdgpu_bo_wait()
157 amdgpu_fence_reference(&bo->fences[i], NULL); in amdgpu_bo_remove_fences()
[all …]
Damdgpu_cs.c1080 struct amdgpu_fence *bo_fence = (void *)bo->fences[j]; in amdgpu_add_bo_fence_dependencies()
1085 amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]); in amdgpu_add_bo_fence_dependencies()
1097 amdgpu_fence_reference(&bo->fences[j], NULL); in amdgpu_add_bo_fence_dependencies()
1108 struct pipe_fence_handle **fences) in amdgpu_add_fences() argument
1113 REALLOC(bo->fences, in amdgpu_add_fences()
1117 bo->fences = new_fences; in amdgpu_add_fences()
1127 amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL); in amdgpu_add_fences()
1131 fences += drop; in amdgpu_add_fences()
1136 bo->fences[bo->num_fences] = NULL; in amdgpu_add_fences()
1137 amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]); in amdgpu_add_fences()
Damdgpu_bo.h113 struct pipe_fence_handle **fences; member
Damdgpu_cs.h259 struct pipe_fence_handle **fences);
/external/deqp/external/vulkancts/modules/vulkan/wsi/
DvktWsiSharedPresentableImageTests.cpp259 std::vector<vk::VkFence>& fences) in initFences() argument
261 for (size_t ndx = 0; ndx < fences.size(); ndx++) in initFences()
262 fences[ndx] = createFence(vkd, device).disown(); in initFences()
267 std::vector<vk::VkFence>& fences) in deinitFences() argument
269 for (size_t ndx = 0; ndx < fences.size(); ndx++) in deinitFences()
271 if (fences[ndx] != (vk::VkFence)0) in deinitFences()
272 vkd.destroyFence(device, fences[ndx], DE_NULL); in deinitFences()
274 fences[ndx] = (vk::VkFence)0; in deinitFences()
277 fences.clear(); in deinitFences()
DvktWsiDisplayTimingTests.cpp246 std::vector<vk::VkFence>& fences) in initFences() argument
248 for (size_t ndx = 0; ndx < fences.size(); ndx++) in initFences()
249 fences[ndx] = createFence(vkd, device).disown(); in initFences()
254 std::vector<vk::VkFence>& fences) in deinitFences() argument
256 for (size_t ndx = 0; ndx < fences.size(); ndx++) in deinitFences()
258 if (fences[ndx] != (vk::VkFence)0) in deinitFences()
259 vkd.destroyFence(device, fences[ndx], DE_NULL); in deinitFences()
261 fences[ndx] = (vk::VkFence)0; in deinitFences()
264 fences.clear(); in deinitFences()
DvktWsiIncrementalPresentTests.cpp248 std::vector<vk::VkFence>& fences) in initFences() argument
250 for (size_t ndx = 0; ndx < fences.size(); ndx++) in initFences()
251 fences[ndx] = createFence(vkd, device).disown(); in initFences()
256 std::vector<vk::VkFence>& fences) in deinitFences() argument
258 for (size_t ndx = 0; ndx < fences.size(); ndx++) in deinitFences()
260 if (fences[ndx] != (vk::VkFence)0) in deinitFences()
261 vkd.destroyFence(device, fences[ndx], DE_NULL); in deinitFences()
263 fences[ndx] = (vk::VkFence)0; in deinitFences()
266 fences.clear(); in deinitFences()
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Dconsecutive-fences.ll3 ; Make sure we collapse the fences in this case
68 !3 = !DIFile(filename: "consecutive-fences.ll", directory: "")
/external/deqp/external/vulkancts/modules/vulkan/sparse_resources/
DvktSparseResourcesQueueBindSparseTests.cpp134 …aitForFences (const DeviceInterface& vk, const VkDevice device, const std::vector<FenceSp>& fences) in waitForFences() argument
136 …for (std::vector<FenceSp>::const_iterator fenceSpIter = fences.begin(); fenceSpIter != fences.end(… in waitForFences()
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/DeadStoreElimination/
Dfence.ll4 ; across release or stronger fences. It's not required
50 ; We DSE stack alloc'ed and byval locations, in the presence of fences.
/external/llvm/test/Transforms/DeadStoreElimination/
Dfence.ll4 ; across release or stronger fences. It's not required
50 ; We DSE stack alloc'ed and byval locations, in the presence of fences.
/external/mesa3d/src/intel/vulkan/
Danv_batch_chain.c963 struct drm_i915_gem_exec_fence * fences; member
979 vk_free(alloc, exec->fences); in anv_execbuf_finish()
1081 exec->fences = vk_realloc(alloc, exec->fences, in anv_execbuf_add_syncobj()
1082 new_len * sizeof(*exec->fences), in anv_execbuf_add_syncobj()
1084 if (exec->fences == NULL) in anv_execbuf_add_syncobj()
1090 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) { in anv_execbuf_add_syncobj()
1600 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences; in anv_cmd_buffer_execbuf()
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/MemorySSA/
Datomic-clobber.ll52 ; CHECK-LABEL: define void @fences
53 define void @fences(i32* %a) {
/external/llvm/docs/
DAtomics.rst115 equivalent to a Release store. SequentiallyConsistent fences behave as both
243 stores. No fences are required. ``cmpxchg`` and ``atomicrmw`` are required
270 semantics. The precise fences required varies widely by architecture, but for
299 implement Release semantics; store-store fences are generally not exposed to
306 barrier (for fences and operations which both read and write memory).
409 ARM), appropriate fences can be emitted by the AtomicExpand Codegen pass if
436 fences generate an ``MFENCE``, other fences do not cause any code to be
458 * strong atomic accesses -> monotonic accesses + fences by overriding
/external/swiftshader/third_party/llvm-7.0/llvm/docs/
DAtomics.rst115 equivalent to a Release store. SequentiallyConsistent fences behave as both
243 stores. No fences are required. ``cmpxchg`` and ``atomicrmw`` are required
270 semantics. The precise fences required varies widely by architecture, but for
299 implement Release semantics; store-store fences are generally not exposed to
306 barrier (for fences and operations which both read and write memory).
409 ARM), appropriate fences can be emitted by the AtomicExpand Codegen pass if
436 fences generate an ``MFENCE``, other fences do not cause any code to be
458 * strong atomic accesses -> monotonic accesses + fences by overriding

1234