Lines Matching refs:testModel
107 const TestModel& testModel) in DeviceMemoryAllocator() argument
108 : kDevice(device), kPreparedModel(preparedModel), kTestModel(testModel) {} in DeviceMemoryAllocator()
276 Model createModel(const TestModel& testModel) { in createModel() argument
282 Subgraph mainSubgraph = createSubgraph(testModel.main, &constCopySize, &constCopies, in createModel()
284 hidl_vec<Subgraph> refSubgraphs(testModel.referenced.size()); in createModel()
285 std::transform(testModel.referenced.begin(), testModel.referenced.end(), refSubgraphs.begin(), in createModel()
316 .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; in createModel()
319 static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) { in isOutputSizeGreaterThanOne() argument
320 const auto byteSize = testModel.main.operands[testModel.main.outputIndexes[index]].data.size(); in isOutputSizeGreaterThanOne()
342 std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
343 std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
360 std::optional<Request> ExecutionContextV1_3::createRequest(const TestModel& testModel, in createRequest() argument
367 DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel); in createRequest()
372 hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size()); in createRequest()
374 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { in createRequest()
375 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]]; in createRequest()
402 hidl_vec<RequestArgument> outputs(testModel.main.outputIndexes.size()); in createRequest()
404 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { in createRequest()
405 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]]; in createRequest()
458 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { in createRequest()
460 const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]]; in createRequest()
470 std::vector<TestBuffer> ExecutionContextV1_3::getOutputBuffers(const TestModel& testModel, in getOutputBuffers() argument
480 const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]]; in getOutputBuffers()
513 static bool hasZeroSizedOutput(const TestModel& testModel) { in hasZeroSizedOutput() argument
514 return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(), in hasZeroSizedOutput()
515 [&testModel](uint32_t index) { in hasZeroSizedOutput()
516 return testModel.main.operands[index].data.size() == 0; in hasZeroSizedOutput()
552 const TestModel& testModel, const TestConfig& testConfig, in EvaluatePreparedModel() argument
559 !isOutputSizeGreaterThanOne(testModel, 0)) { in EvaluatePreparedModel()
564 auto maybeRequest = context.createRequest(testModel, testConfig.memoryType); in EvaluatePreparedModel()
720 if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) { in EvaluatePreparedModel()
729 outputShapes.size() == testModel.main.outputIndexes.size()); in EvaluatePreparedModel()
740 ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); in EvaluatePreparedModel()
749 ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size()); in EvaluatePreparedModel()
758 testModel.main.operands[testModel.main.outputIndexes[i]].dimensions; in EvaluatePreparedModel()
776 const auto& expect = testModel.main.operands[testModel.main.outputIndexes[i]].dimensions; in EvaluatePreparedModel()
782 const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request); in EvaluatePreparedModel()
785 checkResults(testModel, outputs); in EvaluatePreparedModel()
789 const TestModel& testModel, TestKind testKind) { in EvaluatePreparedModel() argument
838 EvaluatePreparedModel(device, preparedModel, testModel, testConfig); in EvaluatePreparedModel()
847 const TestModel& testModel, in EvaluatePreparedCoupledModels() argument
861 EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped); in EvaluatePreparedCoupledModels()
880 void Execute(const sp<IDevice>& device, const TestModel& testModel, TestKind testKind) { in Execute() argument
881 Model model = createModel(testModel); in Execute()
895 EvaluatePreparedModel(device, preparedModel, testModel, testKind); in Execute()
898 ASSERT_TRUE(testModel.hasQuant8CoupledOperands()); in Execute()
901 TestModel signedQuantizedModel = convertQuant8AsymmOperandsToSigned(testModel); in Execute()
920 EvaluatePreparedCoupledModels(device, preparedModel, testModel, preparedCoupledModel, in Execute()
989 [](const TestModel& testModel) { return !testModel.expectFailure; }); in __anon404ff7050902() argument
991 INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) { in __anon404ff7050a02() argument
992 return !testModel.expectFailure && !testModel.hasScalarOutputs(); in __anon404ff7050a02()
996 [](const TestModel& testModel) { return !testModel.expectFailure; }); in __anon404ff7050b02() argument
999 [](const TestModel& testModel) { return !testModel.expectFailure; }); in __anon404ff7050c02() argument
1001 INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) { in __anon404ff7050d02() argument
1002 return !testModel.expectFailure && testModel.hasQuant8CoupledOperands() && in __anon404ff7050d02()
1003 testModel.main.operations.size() == 1; in __anon404ff7050d02()
1006 INSTANTIATE_GENERATED_TEST(InfiniteLoopTimeoutTest, [](const TestModel& testModel) { in __anon404ff7050e02() argument
1007 return testModel.isInfiniteLoopTimeoutTest(); in __anon404ff7050e02()