/external/libdrm/amdgpu/ |
D | amdgpu_cs.c | 441 static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences, in amdgpu_ioctl_wait_fences() argument 449 amdgpu_device_handle dev = fences[0].context->dev; in amdgpu_ioctl_wait_fences() 456 drm_fences[i].ctx_id = fences[i].context->id; in amdgpu_ioctl_wait_fences() 457 drm_fences[i].ip_type = fences[i].ip_type; in amdgpu_ioctl_wait_fences() 458 drm_fences[i].ip_instance = fences[i].ip_instance; in amdgpu_ioctl_wait_fences() 459 drm_fences[i].ring = fences[i].ring; in amdgpu_ioctl_wait_fences() 460 drm_fences[i].seq_no = fences[i].fence; in amdgpu_ioctl_wait_fences() 464 args.in.fences = (uint64_t)(uintptr_t)drm_fences; in amdgpu_ioctl_wait_fences() 481 int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences, in amdgpu_cs_wait_fences() argument 491 if (!fences || !status || !fence_count) in amdgpu_cs_wait_fences() [all …]
|
/external/mesa3d/src/vulkan/wsi/ |
D | wsi_common.c | 152 for (unsigned i = 0; i < ARRAY_SIZE(chain->fences); i++) in wsi_swapchain_finish() 153 chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc); in wsi_swapchain_finish() 686 if (swapchain->fences[0] == VK_NULL_HANDLE) { in wsi_common_queue_present() 694 &swapchain->fences[0]); in wsi_common_queue_present() 698 wsi->ResetFences(device, 1, &swapchain->fences[0]); in wsi_common_queue_present() 741 result = wsi->QueueSubmit(queue, 1, &submit_info, swapchain->fences[0]); in wsi_common_queue_present() 756 VkFence last = swapchain->fences[2]; in wsi_common_queue_present() 757 swapchain->fences[2] = swapchain->fences[1]; in wsi_common_queue_present() 758 swapchain->fences[1] = swapchain->fences[0]; in wsi_common_queue_present() 759 swapchain->fences[0] = last; in wsi_common_queue_present()
|
D | wsi_common_private.h | 49 VkFence fences[3]; member
|
/external/mesa3d/src/util/ |
D | u_queue.c | 530 struct util_queue_fence *fences = malloc(queue->num_threads * sizeof(*fences)); in util_queue_finish() local 541 util_queue_fence_init(&fences[i]); in util_queue_finish() 542 util_queue_add_job(queue, &barrier, &fences[i], util_queue_finish_execute, NULL); in util_queue_finish() 546 util_queue_fence_wait(&fences[i]); in util_queue_finish() 547 util_queue_fence_destroy(&fences[i]); in util_queue_finish() 553 free(fences); in util_queue_finish()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AVR/atomics/ |
D | fence.ll | 3 ; Checks that atomic fences are simply removed from IR. 4 ; AVR is always singlethreaded so fences do nothing.
|
/external/deqp/external/vulkancts/modules/vulkan/synchronization/ |
D | vktSynchronizationSmokeTests.cpp | 545 VkFence fences[2]; member 576 createFences(vkd, device, false, DE_LENGTH_OF_ARRAY(fences), fences); in TestContext() 581 destroyFences(vkd, device, DE_LENGTH_OF_ARRAY(fences), fences); in ~TestContext() 1006 fenceStatus = deviceInterface.getFenceStatus(device, testContext.fences[0]); in testFences() 1012 fenceStatus = deviceInterface.getFenceStatus(device, testContext.fences[1]); in testFences() 1019 VK_CHECK(deviceInterface.queueSubmit(queue, 1, &submitInfo, testContext.fences[0])); in testFences() 1022 waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[0], true, 0u); in testFences() 1031 …waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[0], true, DEFAULT_TIMEOU… in testFences() 1040 …waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[0], true, std::numeric_l… in testFences() 1048 waitStatus = deviceInterface.waitForFences(device, 1, &testContext.fences[1], true, 1); in testFences() [all …]
|
/external/llvm/lib/Target/WebAssembly/ |
D | WebAssemblyInstrAtomics.td | 18 // Atomic fences 21 // TODO: add atomic fences here...
|
/external/mesa3d/src/gallium/winsys/radeon/drm/ |
D | radeon_drm_cs.c | 511 if (bo->u.slab.fences[src]->num_cs_references) { in radeon_bo_slab_fence() 512 bo->u.slab.fences[dst] = bo->u.slab.fences[src]; in radeon_bo_slab_fence() 515 radeon_bo_reference(&bo->u.slab.fences[src], NULL); in radeon_bo_slab_fence() 523 struct radeon_bo **new_fences = REALLOC(bo->u.slab.fences, in radeon_bo_slab_fence() 531 bo->u.slab.fences = new_fences; in radeon_bo_slab_fence() 536 bo->u.slab.fences[bo->u.slab.num_fences] = NULL; in radeon_bo_slab_fence() 537 radeon_bo_reference(&bo->u.slab.fences[bo->u.slab.num_fences], fence); in radeon_bo_slab_fence()
|
D | radeon_drm_bo.c | 82 if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) { in radeon_bo_is_busy() 86 radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL); in radeon_bo_is_busy() 88 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle], in radeon_bo_is_busy() 89 (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0])); in radeon_bo_is_busy() 113 radeon_bo_reference(&fence, bo->u.slab.fences[0]); in radeon_bo_wait_idle() 120 if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) { in radeon_bo_wait_idle() 121 radeon_bo_reference(&bo->u.slab.fences[0], NULL); in radeon_bo_wait_idle() 122 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1], in radeon_bo_wait_idle() 123 (bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0])); in radeon_bo_wait_idle() 798 radeon_bo_reference(&bo->u.slab.fences[j], NULL); in radeon_bo_slab_free() [all …]
|
D | radeon_drm_bo.h | 52 struct radeon_bo **fences; member
|
/external/mesa3d/src/gallium/winsys/amdgpu/drm/ |
D | amdgpu_bo.c | 96 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false)) in amdgpu_bo_wait() 102 amdgpu_fence_reference(&bo->fences[i], NULL); in amdgpu_bo_wait() 104 memmove(&bo->fences[0], &bo->fences[idle_fences], in amdgpu_bo_wait() 105 (bo->num_fences - idle_fences) * sizeof(*bo->fences)); in amdgpu_bo_wait() 120 amdgpu_fence_reference(&fence, bo->fences[0]); in amdgpu_bo_wait() 133 if (fence_idle && bo->num_fences && bo->fences[0] == fence) { in amdgpu_bo_wait() 134 amdgpu_fence_reference(&bo->fences[0], NULL); in amdgpu_bo_wait() 135 memmove(&bo->fences[0], &bo->fences[1], in amdgpu_bo_wait() 136 (bo->num_fences - 1) * sizeof(*bo->fences)); in amdgpu_bo_wait() 157 amdgpu_fence_reference(&bo->fences[i], NULL); in amdgpu_bo_remove_fences() [all …]
|
D | amdgpu_cs.c | 1080 struct amdgpu_fence *bo_fence = (void *)bo->fences[j]; in amdgpu_add_bo_fence_dependencies() 1085 amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]); in amdgpu_add_bo_fence_dependencies() 1097 amdgpu_fence_reference(&bo->fences[j], NULL); in amdgpu_add_bo_fence_dependencies() 1108 struct pipe_fence_handle **fences) in amdgpu_add_fences() argument 1113 REALLOC(bo->fences, in amdgpu_add_fences() 1117 bo->fences = new_fences; in amdgpu_add_fences() 1127 amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL); in amdgpu_add_fences() 1131 fences += drop; in amdgpu_add_fences() 1136 bo->fences[bo->num_fences] = NULL; in amdgpu_add_fences() 1137 amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]); in amdgpu_add_fences()
|
D | amdgpu_bo.h | 113 struct pipe_fence_handle **fences; member
|
D | amdgpu_cs.h | 259 struct pipe_fence_handle **fences);
|
/external/deqp/external/vulkancts/modules/vulkan/wsi/ |
D | vktWsiSharedPresentableImageTests.cpp | 259 std::vector<vk::VkFence>& fences) in initFences() argument 261 for (size_t ndx = 0; ndx < fences.size(); ndx++) in initFences() 262 fences[ndx] = createFence(vkd, device).disown(); in initFences() 267 std::vector<vk::VkFence>& fences) in deinitFences() argument 269 for (size_t ndx = 0; ndx < fences.size(); ndx++) in deinitFences() 271 if (fences[ndx] != (vk::VkFence)0) in deinitFences() 272 vkd.destroyFence(device, fences[ndx], DE_NULL); in deinitFences() 274 fences[ndx] = (vk::VkFence)0; in deinitFences() 277 fences.clear(); in deinitFences()
|
D | vktWsiDisplayTimingTests.cpp | 246 std::vector<vk::VkFence>& fences) in initFences() argument 248 for (size_t ndx = 0; ndx < fences.size(); ndx++) in initFences() 249 fences[ndx] = createFence(vkd, device).disown(); in initFences() 254 std::vector<vk::VkFence>& fences) in deinitFences() argument 256 for (size_t ndx = 0; ndx < fences.size(); ndx++) in deinitFences() 258 if (fences[ndx] != (vk::VkFence)0) in deinitFences() 259 vkd.destroyFence(device, fences[ndx], DE_NULL); in deinitFences() 261 fences[ndx] = (vk::VkFence)0; in deinitFences() 264 fences.clear(); in deinitFences()
|
D | vktWsiIncrementalPresentTests.cpp | 248 std::vector<vk::VkFence>& fences) in initFences() argument 250 for (size_t ndx = 0; ndx < fences.size(); ndx++) in initFences() 251 fences[ndx] = createFence(vkd, device).disown(); in initFences() 256 std::vector<vk::VkFence>& fences) in deinitFences() argument 258 for (size_t ndx = 0; ndx < fences.size(); ndx++) in deinitFences() 260 if (fences[ndx] != (vk::VkFence)0) in deinitFences() 261 vkd.destroyFence(device, fences[ndx], DE_NULL); in deinitFences() 263 fences[ndx] = (vk::VkFence)0; in deinitFences() 266 fences.clear(); in deinitFences()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | consecutive-fences.ll | 3 ; Make sure we collapse the fences in this case 68 !3 = !DIFile(filename: "consecutive-fences.ll", directory: "")
|
/external/deqp/external/vulkancts/modules/vulkan/sparse_resources/ |
D | vktSparseResourcesQueueBindSparseTests.cpp | 134 …aitForFences (const DeviceInterface& vk, const VkDevice device, const std::vector<FenceSp>& fences) in waitForFences() argument 136 …for (std::vector<FenceSp>::const_iterator fenceSpIter = fences.begin(); fenceSpIter != fences.end(… in waitForFences()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/DeadStoreElimination/ |
D | fence.ll | 4 ; across release or stronger fences. It's not required 50 ; We DSE stack alloc'ed and byval locations, in the presence of fences.
|
/external/llvm/test/Transforms/DeadStoreElimination/ |
D | fence.ll | 4 ; across release or stronger fences. It's not required 50 ; We DSE stack alloc'ed and byval locations, in the presence of fences.
|
/external/mesa3d/src/intel/vulkan/ |
D | anv_batch_chain.c | 963 struct drm_i915_gem_exec_fence * fences; member 979 vk_free(alloc, exec->fences); in anv_execbuf_finish() 1081 exec->fences = vk_realloc(alloc, exec->fences, in anv_execbuf_add_syncobj() 1082 new_len * sizeof(*exec->fences), in anv_execbuf_add_syncobj() 1084 if (exec->fences == NULL) in anv_execbuf_add_syncobj() 1090 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) { in anv_execbuf_add_syncobj() 1600 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences; in anv_cmd_buffer_execbuf()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/MemorySSA/ |
D | atomic-clobber.ll | 52 ; CHECK-LABEL: define void @fences 53 define void @fences(i32* %a) {
|
/external/llvm/docs/ |
D | Atomics.rst | 115 equivalent to a Release store. SequentiallyConsistent fences behave as both 243 stores. No fences are required. ``cmpxchg`` and ``atomicrmw`` are required 270 semantics. The precise fences required varies widely by architecture, but for 299 implement Release semantics; store-store fences are generally not exposed to 306 barrier (for fences and operations which both read and write memory). 409 ARM), appropriate fences can be emitted by the AtomicExpand Codegen pass if 436 fences generate an ``MFENCE``, other fences do not cause any code to be 458 * strong atomic accesses -> monotonic accesses + fences by overriding
|
/external/swiftshader/third_party/llvm-7.0/llvm/docs/ |
D | Atomics.rst | 115 equivalent to a Release store. SequentiallyConsistent fences behave as both 243 stores. No fences are required. ``cmpxchg`` and ``atomicrmw`` are required 270 semantics. The precise fences required varies widely by architecture, but for 299 implement Release semantics; store-store fences are generally not exposed to 306 barrier (for fences and operations which both read and write memory). 409 ARM), appropriate fences can be emitted by the AtomicExpand Codegen pass if 436 fences generate an ``MFENCE``, other fences do not cause any code to be 458 * strong atomic accesses -> monotonic accesses + fences by overriding
|