Home
last modified time | relevance | path

Searched refs:aligned_size (Results 1 – 23 of 23) sorted by relevance

/external/v8/src/base/platform/
Dplatform-fuchsia.cc81 size_t aligned_size = RoundUp(size, page_size); in Allocate() local
83 if (aligned_size != request_size) { in Allocate()
84 DCHECK_LT(aligned_size, request_size); in Allocate()
85 size_t suffix_size = request_size - aligned_size; in Allocate()
87 reinterpret_cast<uintptr_t>(aligned_base + aligned_size), in Allocate()
92 DCHECK(aligned_size == request_size); in Allocate()
/external/libvpx/libvpx/vpx_mem/
Dvpx_mem.c59 const uint64_t aligned_size = get_aligned_malloc_size(size, align); in vpx_memalign() local
60 if (!check_size_argument_overflow(1, aligned_size)) return NULL; in vpx_memalign()
62 addr = malloc((size_t)aligned_size); in vpx_memalign()
/external/libaom/libaom/aom_mem/
Daom_mem.c51 const size_t aligned_size = GetAlignedMallocSize(size, align); in aom_memalign() local
53 if (!check_size_argument_overflow(1, aligned_size)) return NULL; in aom_memalign()
55 void *const addr = malloc(aligned_size); in aom_memalign()
/external/google-breakpad/src/client/
Dminidump_file_writer.cc223 size_t aligned_size = (size + 7) & ~7; // 64-bit alignment in Allocate() local
225 if (position_ + aligned_size > size_) { in Allocate()
226 size_t growth = aligned_size; in Allocate()
241 position_ += static_cast<MDRVA>(aligned_size); in Allocate()
/external/libchrome/base/files/
Dmemory_mapped_file_posix.cc53 size_t aligned_size = 0; in MapFileRegionToMemory() local
57 &aligned_size, in MapFileRegionToMemory()
68 map_size = aligned_size; in MapFileRegionToMemory()
Dmemory_mapped_file.cc119 size_t* aligned_size, in CalculateVMAlignedBoundaries() argument
126 *aligned_size = (size + *offset + mask) & ~mask; in CalculateVMAlignedBoundaries()
Dmemory_mapped_file.h113 size_t* aligned_size,
/external/tensorflow/tensorflow/lite/experimental/micro/
Dsimple_tensor_allocator.cc153 size_t aligned_size = (next_free - current_data); in AllocateMemory() local
154 if ((data_size_ + aligned_size) > data_size_max_) { in AllocateMemory()
158 data_size_ += aligned_size; in AllocateMemory()
/external/mesa3d/src/mesa/main/
Dmarshal.h58 const size_t aligned_size = ALIGN(size, 8); in _mesa_glthread_allocate_command() local
66 next->used += aligned_size; in _mesa_glthread_allocate_command()
68 cmd_base->cmd_size = aligned_size; in _mesa_glthread_allocate_command()
/external/python/cpython2/Modules/_ctypes/libffi/src/microblaze/
Dffi.c77 int aligned_size = WORD_ALIGN(size); in ffi_prep_args() local
80 stack_args_p += aligned_size; in ffi_prep_args()
133 memcpy(addr, value, aligned_size); in ffi_prep_args()
/external/libffi/src/microblaze/
Dffi.c77 int aligned_size = WORD_ALIGN(size); in ffi_prep_args() local
80 stack_args_p += aligned_size; in ffi_prep_args()
133 memcpy(addr, value, aligned_size); in ffi_prep_args()
/external/compiler-rt/lib/asan/
Dasan_poisoning.h39 ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, in FastPoisonShadow() argument
44 aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; in FastPoisonShadow()
Dasan_poisoning.cc296 uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1); in PoisonAlignedStackMemory() local
297 PoisonShadow(addr, aligned_size, in PoisonAlignedStackMemory()
299 if (size == aligned_size) in PoisonAlignedStackMemory()
301 s8 end_offset = (s8)(size - aligned_size); in PoisonAlignedStackMemory()
302 s8* shadow_end = (s8*)MemToShadow(addr + aligned_size); in PoisonAlignedStackMemory()
Dasan_globals.cc64 uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); in PoisonRedZones() local
65 FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, in PoisonRedZones()
67 if (g.size != aligned_size) { in PoisonRedZones()
/external/skqp/src/core/
DSkMask.cpp41 size_t aligned_size = SkSafeMath::Align4(size); in AllocImage() local
46 return static_cast<uint8_t*>(sk_malloc_flags(aligned_size, flags)); in AllocImage()
/external/skia/src/core/
DSkMask.cpp41 size_t aligned_size = SkSafeMath::Align4(size); in AllocImage() local
46 return static_cast<uint8_t*>(sk_malloc_flags(aligned_size, flags)); in AllocImage()
/external/libchrome/base/
Dpickle.cc44 size_t aligned_size = bits::Align(size, sizeof(uint32_t)); in Advance() local
45 if (end_index_ - read_index_ < aligned_size) { in Advance()
48 read_index_ += aligned_size; in Advance()
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dcudnn_conv_algorithm_picker.cc192 size_t aligned_size = buffer.size() / 4 * 4; in PickBestAlgorithm() local
193 stream.ThenMemset32(&buffer, bits, aligned_size); in PickBestAlgorithm()
196 static_cast<char*>(buffer.opaque()) + aligned_size, left_over_bytes); in PickBestAlgorithm()
/external/mesa3d/src/gallium/drivers/r600/
Dcompute_memory_pool.c664 int64_t aligned_size = pool->size_in_dw; in compute_memory_transfer() local
679 &(struct pipe_box) { .width = aligned_size * 4, in compute_memory_transfer()
687 &(struct pipe_box) { .width = aligned_size * 4, in compute_memory_transfer()
/external/libcxxabi/src/
Dcxa_exception.cpp169 constexpr size_t aligned_size = in get_cxa_exception_offset() local
171 constexpr size_t offset = aligned_size - excp_size; in get_cxa_exception_offset()
/external/mesa3d/src/gallium/winsys/amdgpu/drm/
Damdgpu_bo.c1398 uint64_t aligned_size = align64(size, ws->info.gart_page_size); in amdgpu_bo_from_ptr() local
1405 aligned_size, &buf_handle)) in amdgpu_bo_from_ptr()
1409 aligned_size, 1 << 12, 0, &va, &va_handle, 0)) in amdgpu_bo_from_ptr()
1412 if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP)) in amdgpu_bo_from_ptr()
1428 ws->allocated_gtt += aligned_size; in amdgpu_bo_from_ptr()
/external/mesa3d/src/amd/vulkan/
Dsi_cmd_buffer.c1269 …uint64_t aligned_size = ((va + size + SI_CPDMA_ALIGNMENT -1) & ~(SI_CPDMA_ALIGNMENT - 1)) - aligne… in si_cp_dma_prefetch() local
1272 aligned_size, CP_DMA_USE_L2); in si_cp_dma_prefetch()
/external/vulkan-validation-layers/tests/
Dlayer_validation_tests.cpp10455 …VkDeviceSize aligned_size = ((align_mod == 0) ? memory_reqs.size : (memory_reqs.size + memory_reqs… in TEST_F() local
10456 memory_info.allocationSize = aligned_size * 2; in TEST_F()
10464 err = vkBindImageMemory(m_device->device(), image2, image_memory, aligned_size); in TEST_F()