/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_schedule_test.cc | 60 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 94 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 143 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 187 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 248 return ShapeUtil::ByteSizeOf(buffer.shape(), in TEST_F() 317 return ShapeUtil::ByteSizeOf(buffer.shape(), in TEST_F()
|
D | hlo_memory_scheduler_test.cc | 72 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F() 123 return ShapeUtil::ByteSizeOf(buffer.shape(), /*pointer_size=*/8); in TEST_F() 181 return ShapeUtil::ByteSizeOf(buffer.shape(), in TEST_F() 232 return ShapeUtil::ByteSizeOf(buffer.shape(), 2); in TEST_F() 285 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F()
|
D | hlo_input_output_alias_config_test.cc | 170 return ShapeUtil::ByteSizeOf(shape); in TEST_F() 196 return ShapeUtil::ByteSizeOf(shape); in TEST_F()
|
D | hlo_rematerialization_test.cc | 140 static int64 ByteSizeOf(const Shape& shape) { in ByteSizeOf() function in xla::__anon7066800c0111::HloRematerializationTest 141 return ShapeUtil::ByteSizeOf(shape, sizeof(void*)); in ByteSizeOf() 148 [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); }, in RunHloRematerialization() 151 HloRematerialization remat(ByteSizeOf, memory_limit_bytes, in RunHloRematerialization() 393 /*memory_limit_bytes=*/4 * ByteSizeOf(vec1024_shape_), module.get())); in TEST_F()
|
D | hlo_execution_profile_test.cc | 51 return ShapeUtil::ByteSizeOf(shape, pointer_size); in TEST_F()
|
D | optimize_input_output_buffer_alias_test.cc | 41 return ShapeUtil::ByteSizeOf(shape); in OptimizeInputOutputBufferAliasTest()
|
D | generic_transfer_manager.cc | 173 return ShapeUtil::ByteSizeOf(shape, pointer_size_); in GetByteSizeRequirement()
|
D | instruction_fusion.cc | 51 ShapeUtil::ByteSizeOf(instruction.operand(0)->shape()) < in IsAlwaysDuplicable() 52 ShapeUtil::ByteSizeOf(instruction.shape()); in IsAlwaysDuplicable()
|
D | buffer_assignment_test.cc | 1891 ScheduleModule(module, ByteSizeOf).ConsumeValueOrDie(); in RunBufferAssignment() 1894 ByteSizeOf, in RunBufferAssignment() 1901 static int64 ByteSizeOf(const BufferValue& buffer) { in ByteSizeOf() function in xla::__anon3198ac380111::WhileBufferAssignmentTest 1902 return ShapeUtil::ByteSizeOf(buffer.shape(), sizeof(void*)); in ByteSizeOf() 2202 return ShapeUtil::ByteSizeOf(buffer.shape(), in TEST_F() 2430 ScheduleModule(module.get(), ByteSizeOf).ConsumeValueOrDie(); in TEST_F() 2448 ByteSizeOf, [](LogicalBuffer::Color) { return 1; }, in TEST_F()
|
D | hlo_module_test.cc | 299 return ShapeUtil::ByteSizeOf(buffer.shape()); in TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | fusion_merger.cc | 77 bytes += ShapeUtil::ByteSizeOf(user->shape()); in CalculateBytesReadByFusionParameter() 81 bytes += ShapeUtil::ByteSizeOf(instruction->shape()); in CalculateBytesReadByFusionParameter() 107 bytes_written += ShapeUtil::ByteSizeOf(operand->shape()); in GetCurrentBytesTransferred() 111 ShapeUtil::ByteSizeOf(fusion->fused_expression_root()->shape()); in GetCurrentBytesTransferred()
|
D | ir_emitter_nested.cc | 59 llvm_ir::ByteSizeOf(param_shape, module_->getDataLayout()); in EmitBasePointersForNestedComputation() 68 int64 root_size = llvm_ir::ByteSizeOf( in EmitBasePointersForNestedComputation()
|
D | cudnn_conv_pad_for_tensor_cores.cc | 186 int64 old_bytes = ShapeUtil::ByteSizeOf(old_shape); in PadForTensorCores() 187 int64 new_bytes = ShapeUtil::ByteSizeOf(new_shape); in PadForTensorCores()
|
D | nvptx_compiler.h | 71 return ShapeUtil::ByteSizeOf(shape, pointer_size); in ShapeSizeBytesFunction()
|
D | cudnn_conv_algorithm_picker.cc | 215 &stream, ShapeUtil::ByteSizeOf(operand->shape()))); in PickBestAlgorithm() 222 &stream, ShapeUtil::ByteSizeOf(instr->shape().tuple_shapes(0)))); in PickBestAlgorithm()
|
D | gpu_hlo_schedule.cc | 206 return ShapeUtil::ByteSizeOf(buffer.shape(), pointer_size); in Build()
|
D | multi_output_fusion.cc | 73 profit += ShapeUtil::ByteSizeOf(instr->shape()); in GetProfit()
|
D | ir_emitter_unnested.cc | 510 /*mem_size=*/ShapeUtil::ByteSizeOf(shape), custom_call)); in HandleCustomCall() 560 /*mem_size=*/ShapeUtil::ByteSizeOf(hlo->operand(1)->shape()), hlo)); in HandleTriangularSolve() 1043 /*mem_size=*/ShapeUtil::ByteSizeOf(operand->shape()), scatter)); in HandleScatter() 1233 /*mem_size=*/ShapeUtil::ByteSizeOf(sort->operand(i)->shape()), in HandleSort() 1443 /*mem_size=*/ShapeUtil::ByteSizeOf(crs->shape()), crs)); in HandleAllReduce() 1458 /*mem_size=*/ShapeUtil::ByteSizeOf(crs->operand(i)->shape()), nullptr)); in HandleAllReduce() 1711 llvm_ir::ByteSizeOf(operand->shape(), in BuildHostToDeviceCopyThunk() 1723 llvm_ir::ByteSizeOf(operand->shape(), in BuildDeviceToDeviceCopyThunk() 1845 /*mem_size=*/ShapeUtil::ByteSizeOf(inst->shape()), nullptr)); in BuildGemmThunk() 1963 ShapeUtil::ByteSizeOf(output_shape) % 4 == 0) { in BuildInitializerThunk()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/tests/ |
D | cpu_bytesizeof_test.cc | 27 EXPECT_EQ(xla::llvm_ir::ByteSizeOf(tuple_shape, data_layout), in TEST_F() 35 EXPECT_EQ(xla::llvm_ir::ByteSizeOf(tuple_shape, data_layout), in TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util_test.cc | 289 EXPECT_EQ(4, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {}))); in TEST() 290 EXPECT_EQ(800, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {10, 20}))); in TEST() 293 EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {}))); in TEST() 294 EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {10, 20}))); in TEST() 297 EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {}))); in TEST() 298 EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {10, 20}))); in TEST() 301 EXPECT_EQ(0, ShapeUtil::ByteSizeOf(ShapeUtil::MakeTokenShape())); in TEST()
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | testing.cc | 38 return ShapeUtil::ByteSizeOf(shape); in DataSizeOfShape()
|
/external/tensorflow/tensorflow/compiler/xla/service/interpreter/ |
D | executable.cc | 135 return ShapeUtil::ByteSizeOf(shape, sizeof(void*)); in ShapeSizeBytes()
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | tuple_ops.cc | 106 ByteSizeOf(target_shape, src_buffer->getModule()->getDataLayout())); in EmitGetTupleElement()
|
D | llvm_util.h | 262 int64 ByteSizeOf(const Shape& shape, const llvm::DataLayout& data_layout);
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | ir_emitter.cc | 246 int64 IrEmitter::ByteSizeOf(const Shape& shape) const { in ByteSizeOf() function in xla::cpu::IrEmitter 247 return llvm_ir::ByteSizeOf(shape, module_->getDataLayout()); in ByteSizeOf() 256 int64 buffer_size = ByteSizeOf(shape); in MinimumAlignmentForShape() 282 AttachDereferenceableMetadataForLoad(load, ByteSizeOf(shape)); in AttachDereferenceableMetadataForLoad() 386 int64 length = ByteSizeOf(shape); in EmitXfeedTransfer() 551 int64 size = ByteSizeOf(operand->shape()); in HandleSort() 1344 /*SrcAlign=*/1, ShapeUtil::ByteSizeOf(operand_shape)); in HandleAllReduce() 2043 ShapeUtil::ByteSizeOf(logical_element_shape) * memcpy_elements; in HandleSlice() 2639 TF_RET_CHECK(ByteSizeOf(after_all->shape()) == 0); in HandleAfterAll() 2843 attr_builder.addDereferenceableAttr(ByteSizeOf(target_shape)); in EmitThreadLocalBufferPointer() [all …]
|