Searched refs:nnToken (Results 1 – 2 of 2) sorted by relevance
/hardware/interfaces/neuralnetworks/utils/adapter/aidl/src/ |
D | Device.cpp | 74 nn::CacheToken nnToken; in convertCacheToken() local 75 if (token.size() != nnToken.size()) { in convertCacheToken() 78 std::copy(token.begin(), token.end(), nnToken.begin()); in convertCacheToken() 79 return nnToken; in convertCacheToken() 179 const auto nnToken = NN_TRY(convertCacheToken(token)); in prepareModel() local 185 nnToken, nnHints = std::move(nnHints), in prepareModel() 189 nnDataCache, nnToken, nnHints, nnExtensionNameToPrefix); in prepareModel() 209 const auto nnToken = NN_TRY(convertCacheToken(token)); in prepareModelFromCache() local 212 nnDataCache = std::move(nnDataCache), nnToken, callback] { in prepareModelFromCache() 213 auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken); in prepareModelFromCache()
|
/hardware/interfaces/neuralnetworks/utils/adapter/hidl/src/ |
D | Device.cpp | 182 const auto nnToken = nn::CacheToken(token); in prepareModel_1_2() local 186 nnToken, callback] { in prepareModel_1_2() 188 nnModelCache, nnDataCache, nnToken, {}, {}); in prepareModel_1_2() 212 const auto nnToken = nn::CacheToken(token); in prepareModel_1_3() local 216 nnToken, callback] { in prepareModel_1_3() 218 nnModelCache, nnDataCache, nnToken, {}, {}); in prepareModel_1_3() 238 const auto nnToken = nn::CacheToken(token); in prepareModelFromCache() local 241 nnDataCache = std::move(nnDataCache), nnToken, callback] { in prepareModelFromCache() 242 auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken); in prepareModelFromCache() 262 const auto nnToken = nn::CacheToken(token); in prepareModelFromCache_1_3() local [all …]
|