1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_HARDWARE_NEURALNETWORKS_AIDL_CALLBACKS_H
18 #define ANDROID_HARDWARE_NEURALNETWORKS_AIDL_CALLBACKS_H
19 
20 #include <android-base/thread_annotations.h>
21 #include <condition_variable>
22 #include <mutex>
23 
24 #include <aidl/android/hardware/neuralnetworks/BnPreparedModelCallback.h>
25 #include <aidl/android/hardware/neuralnetworks/ErrorStatus.h>
26 #include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
27 
28 /*
29  * The Callback classes are used internally by the NeuralNetworks runtime to
30  * synchronize between different threads. An asynchronous task is launched
31  * paired with a callback object. When a client thread requires the output being
32  * generated by the asynchronous task, the client thread can wait for the result
33  * and be blocked until it has completed. Any wait may safely be called
34  * concurrently, even on the same callback object. When the asynchronous task
35  * has finished its workload, it must immediately call "notify". If the
36  * asynchronous task has failed to launch, the function that tried to launch the
37  * asynchronous task must immediately call "notify". This "notify" call
38  * awakens any client threads waiting on the callback object.
39  *
40  * These classes exist to enable synchronization across AIDL. When
41  * synchronization is only required in the same process, consider using
42  * std::future, std::mutex, std::condition_variable, or std::experimental::latch
43  * instead.
44  */
45 
46 namespace aidl::android::hardware::neuralnetworks::implementation {
47 
48 /**
49  * The PreparedModelCallback class is used to receive the error status of
50  * preparing a model as well as the prepared model from a task executing
51  * asynchronously with respect to the runtime. If a calling thread calls wait
52  * or get* on a PreparedModelCallback object and the corresponding asynchronous
53  * task has not finished preparing the model, the calling thread will block
54  * until the asynchronous task has called notify.
55  *
56  * If the callback object is notified more than once, only the results of the
57  * first call to notify are used, and the results from subsequent calls are
58  * discarded.
59  *
60  * This callback object is passed as an argument to IDevice::prepareModel*.
61  */
62 class PreparedModelCallback : public BnPreparedModelCallback {
63   public:
64     /**
65      * IPreparedModelCallback::notify marks the callback object with the return
66      * status of the asynchronous model preparation along with the prepared
67      * model, and allows all prior and future wait calls on the
68      * PreparedModelCallback object to proceed.
69      *
70      * IPreparedModelCallback::notify must be called on a given PreparedModelCallback object.
71      *
72      * If the callback object is notified more than once, only the results of
73      * the first call to notify are used, and the results from subsequent calls
74      * are discarded.
75      *
76      * @param status Error status returned from asynchronously preparing the
77      *     model; will be:
78      *     - NONE if the asynchronous preparation was successful
79      *     - DEVICE_UNAVAILABLE if driver is offline or busy
80      *     - GENERAL_FAILURE if there is an unspecified error
81      *     - INVALID_ARGUMENT if the input model is invalid
82      * @param preparedModel Returned model that has been prepared for execution,
83      *     nullptr if the model was unable to be prepared.
84      */
85     ndk::ScopedAStatus notify(ErrorStatus status,
86                               const std::shared_ptr<IPreparedModel>& preparedModel) override;
87 
88     /**
89      * PreparedModelCallback::wait blocks until notify has been called on the
90      * callback object.
91      */
92     void wait() const;
93 
94     /**
95      * Retrieves the error status returned from the asynchronous task launched
96      * by IDevice::prepareModel*. If IDevice::prepareModel* has not finished
97      * asynchronously preparing the model, this call will block until the
98      * asynchronous task notifies the object.
99      *
100      * @return status Error status returned from asynchronously preparing the
101      *     model; will be:
102      *     - NONE if the asynchronous preparation was successful
103      *     - DEVICE_UNAVAILABLE if driver is offline or busy
104      *     - GENERAL_FAILURE if there is an unspecified error
105      *     - INVALID_ARGUMENT if the input model is invalid
106      */
107     ErrorStatus getStatus() const;
108 
109     /**
110      * Retrieves the model that has been prepared for execution from the
111      * asynchronous task launched by IDevice::prepareModel*. If
112      * IDevice::prepareModel* has not finished asynchronously preparing the
113      * model, this call will block until the asynchronous task notifies the
114      * object.
115      *
116      * @return preparedModel Returned model that has been prepared for
117      *     execution, nullptr if the model was unable to be prepared.
118      */
119     std::shared_ptr<IPreparedModel> getPreparedModel() const;
120 
121   private:
122     mutable std::mutex mMutex;
123     mutable std::condition_variable mCondition;
124     bool mNotified GUARDED_BY(mMutex) = false;
125     ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE;
126     std::shared_ptr<IPreparedModel> mPreparedModel;
127 };
128 
129 }  // namespace aidl::android::hardware::neuralnetworks::implementation
130 
131 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_AIDL_CALLBACKS_H
132