1 /*
2  * Copyright (C) 2024 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.adservices.ondevicepersonalization;
18 
19 import android.annotation.IntRange;
20 import android.annotation.NonNull;
21 import android.os.Parcelable;
22 
23 import com.android.ondevicepersonalization.internal.util.AnnotationValidations;
24 import com.android.ondevicepersonalization.internal.util.ByteArrayParceledListSlice;
25 import com.android.ondevicepersonalization.internal.util.DataClass;
26 
27 /**
28  * Parcelable version of {@link InferenceInput}.
29  *
30  * @hide
31  */
32 @DataClass(genAidl = false, genBuilder = false)
33 public class InferenceInputParcel implements Parcelable {
34     /**
35      * The location of TFLite model. The model is usually store in REMOTE_DATA or LOCAL_DATA table.
36      */
37     @NonNull private ModelId mModelId;
38 
39     /** The delegate to run model inference. If not specified, CPU delegate is used by default. */
40     private @InferenceInput.Params.Delegate int mDelegate;
41 
42     /**
43      * The number of threads available to the interpreter. Only set and take effective when input
44      * tensors are on CPU. Setting cpuNumThread to 0 has the effect to disable multithreading, which
45      * is equivalent to setting cpuNumThread to 1. If set to the value -1, the number of threads
46      * used will be implementation-defined and platform-dependent.
47      */
48     private @IntRange(from = 1) int mCpuNumThread;
49 
50     /** An array of input data. The inputs should be in the same order as inputs of the model. */
51     @NonNull private ByteArrayParceledListSlice mInputData;
52 
53     /**
54      * The number of input examples. Adopter can set this field to run batching inference. The batch
55      * size is 1 by default.
56      */
57     private int mBatchSize;
58 
59     private @InferenceInput.Params.ModelType int mModelType =
60             InferenceInput.Params.MODEL_TYPE_TENSORFLOW_LITE;
61 
62     /**
63      * The empty InferenceOutput representing the expected output structure. For TFLite, the
64      * inference code will verify whether this expected output structure matches model output
65      * signature.
66      */
67     @NonNull private InferenceOutputParcel mExpectedOutputStructure;
68 
69     /** @hide */
InferenceInputParcel(@onNull InferenceInput value)70     public InferenceInputParcel(@NonNull InferenceInput value) {
71         this(
72                 new ModelId.Builder()
73                         .setTableId(value.getParams().getKeyValueStore().getTableId())
74                         .setKey(value.getParams().getModelKey())
75                         .build(),
76                 value.getParams().getDelegateType(),
77                 value.getParams().getRecommendedNumThreads(),
78                 ByteArrayParceledListSlice.create(value.getInputData()),
79                 value.getBatchSize(),
80                 value.getParams().getModelType(),
81                 new InferenceOutputParcel(value.getExpectedOutputStructure()));
82     }
83 
84     // Code below generated by codegen v1.0.23.
85     //
86     // DO NOT MODIFY!
87     // CHECKSTYLE:OFF Generated code
88     //
89     // To regenerate run:
90     // $ codegen
91     // $ANDROID_BUILD_TOP/packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInputParcel.java
92     //
93     // To exclude the generated code from IntelliJ auto-formatting enable (one-time):
94     //   Settings > Editor > Code Style > Formatter Control
95     // @formatter:off
96 
97     /**
98      * Creates a new InferenceInputParcel.
99      *
100      * @param modelId The location of TFLite model. The model is usually store in REMOTE_DATA or
101      *     LOCAL_DATA table.
102      * @param delegate The delegate to run model inference. If not specified, CPU delegate is used
103      *     by default.
104      * @param cpuNumThread The number of threads available to the interpreter. Only set and take
105      *     effective when input tensors are on CPU. Setting cpuNumThread to 0 has the effect to
106      *     disable multithreading, which is equivalent to setting cpuNumThread to 1. If set to the
107      *     value -1, the number of threads used will be implementation-defined and
108      *     platform-dependent.
109      * @param inputData An array of input data. The inputs should be in the same order as inputs of
110      *     the model.
111      * @param batchSize The number of input examples. Adopter can set this field to run batching
112      *     inference. The batch size is 1 by default.
113      * @param expectedOutputStructure The empty InferenceOutput representing the expected output
114      *     structure. For TFLite, the inference code will verify whether this expected output
115      *     structure matches model output signature.
116      */
117     @DataClass.Generated.Member
InferenceInputParcel( @onNull ModelId modelId, @InferenceInput.Params.Delegate int delegate, @IntRange(from = 1) int cpuNumThread, @NonNull ByteArrayParceledListSlice inputData, int batchSize, @InferenceInput.Params.ModelType int modelType, @NonNull InferenceOutputParcel expectedOutputStructure)118     public InferenceInputParcel(
119             @NonNull ModelId modelId,
120             @InferenceInput.Params.Delegate int delegate,
121             @IntRange(from = 1) int cpuNumThread,
122             @NonNull ByteArrayParceledListSlice inputData,
123             int batchSize,
124             @InferenceInput.Params.ModelType int modelType,
125             @NonNull InferenceOutputParcel expectedOutputStructure) {
126         this.mModelId = modelId;
127         AnnotationValidations.validate(NonNull.class, null, mModelId);
128         this.mDelegate = delegate;
129         AnnotationValidations.validate(InferenceInput.Params.Delegate.class, null, mDelegate);
130         this.mCpuNumThread = cpuNumThread;
131         AnnotationValidations.validate(IntRange.class, null, mCpuNumThread, "from", 1);
132         this.mInputData = inputData;
133         AnnotationValidations.validate(NonNull.class, null, mInputData);
134         this.mBatchSize = batchSize;
135         this.mModelType = modelType;
136         AnnotationValidations.validate(InferenceInput.Params.ModelType.class, null, mModelType);
137         this.mExpectedOutputStructure = expectedOutputStructure;
138         AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);
139 
140         // onConstructed(); // You can define this method to get a callback
141     }
142 
143     /**
144      * The location of TFLite model. The model is usually store in REMOTE_DATA or LOCAL_DATA table.
145      */
146     @DataClass.Generated.Member
getModelId()147     public @NonNull ModelId getModelId() {
148         return mModelId;
149     }
150 
151     /** The delegate to run model inference. If not specified, CPU delegate is used by default. */
152     @DataClass.Generated.Member
getDelegate()153     public @InferenceInput.Params.Delegate int getDelegate() {
154         return mDelegate;
155     }
156 
157     /**
158      * The number of threads available to the interpreter. Only set and take effective when input
159      * tensors are on CPU. Setting cpuNumThread to 0 has the effect to disable multithreading, which
160      * is equivalent to setting cpuNumThread to 1. If set to the value -1, the number of threads
161      * used will be implementation-defined and platform-dependent.
162      */
163     @DataClass.Generated.Member
getCpuNumThread()164     public @IntRange(from = 1) int getCpuNumThread() {
165         return mCpuNumThread;
166     }
167 
168     /** An array of input data. The inputs should be in the same order as inputs of the model. */
169     @DataClass.Generated.Member
getInputData()170     public @NonNull ByteArrayParceledListSlice getInputData() {
171         return mInputData;
172     }
173 
174     /**
175      * The number of input examples. Adopter can set this field to run batching inference. The batch
176      * size is 1 by default.
177      */
178     @DataClass.Generated.Member
getBatchSize()179     public int getBatchSize() {
180         return mBatchSize;
181     }
182 
183     @DataClass.Generated.Member
getModelType()184     public @InferenceInput.Params.ModelType int getModelType() {
185         return mModelType;
186     }
187 
188     /**
189      * The empty InferenceOutput representing the expected output structure. For TFLite, the
190      * inference code will verify whether this expected output structure matches model output
191      * signature.
192      */
193     @DataClass.Generated.Member
getExpectedOutputStructure()194     public @NonNull InferenceOutputParcel getExpectedOutputStructure() {
195         return mExpectedOutputStructure;
196     }
197 
198     @Override
199     @DataClass.Generated.Member
writeToParcel(@onNull android.os.Parcel dest, int flags)200     public void writeToParcel(@NonNull android.os.Parcel dest, int flags) {
201         // You can override field parcelling by defining methods like:
202         // void parcelFieldName(Parcel dest, int flags) { ... }
203 
204         dest.writeTypedObject(mModelId, flags);
205         dest.writeInt(mDelegate);
206         dest.writeInt(mCpuNumThread);
207         dest.writeTypedObject(mInputData, flags);
208         dest.writeInt(mBatchSize);
209         dest.writeInt(mModelType);
210         dest.writeTypedObject(mExpectedOutputStructure, flags);
211     }
212 
213     @Override
214     @DataClass.Generated.Member
describeContents()215     public int describeContents() {
216         return 0;
217     }
218 
219     /** @hide */
220     @SuppressWarnings({"unchecked", "RedundantCast"})
221     @DataClass.Generated.Member
InferenceInputParcel(@onNull android.os.Parcel in)222     protected InferenceInputParcel(@NonNull android.os.Parcel in) {
223         // You can override field unparcelling by defining methods like:
224         // static FieldType unparcelFieldName(Parcel in) { ... }
225 
226         ModelId modelId = (ModelId) in.readTypedObject(ModelId.CREATOR);
227         int delegate = in.readInt();
228         int cpuNumThread = in.readInt();
229         ByteArrayParceledListSlice inputData =
230                 (ByteArrayParceledListSlice) in.readTypedObject(ByteArrayParceledListSlice.CREATOR);
231         int batchSize = in.readInt();
232         int modelType = in.readInt();
233         InferenceOutputParcel expectedOutputStructure =
234                 (InferenceOutputParcel) in.readTypedObject(InferenceOutputParcel.CREATOR);
235 
236         this.mModelId = modelId;
237         AnnotationValidations.validate(NonNull.class, null, mModelId);
238         this.mDelegate = delegate;
239         AnnotationValidations.validate(InferenceInput.Params.Delegate.class, null, mDelegate);
240         this.mCpuNumThread = cpuNumThread;
241         AnnotationValidations.validate(IntRange.class, null, mCpuNumThread, "from", 1);
242         this.mInputData = inputData;
243         AnnotationValidations.validate(NonNull.class, null, mInputData);
244         this.mBatchSize = batchSize;
245         this.mModelType = modelType;
246         AnnotationValidations.validate(InferenceInput.Params.ModelType.class, null, mModelType);
247         this.mExpectedOutputStructure = expectedOutputStructure;
248         AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);
249 
250         // onConstructed(); // You can define this method to get a callback
251     }
252 
253     @DataClass.Generated.Member
254     public static final @NonNull Parcelable.Creator<InferenceInputParcel> CREATOR =
255             new Parcelable.Creator<InferenceInputParcel>() {
256                 @Override
257                 public InferenceInputParcel[] newArray(int size) {
258                     return new InferenceInputParcel[size];
259                 }
260 
261                 @Override
262                 public InferenceInputParcel createFromParcel(@NonNull android.os.Parcel in) {
263                     return new InferenceInputParcel(in);
264                 }
265             };
266 
267     @DataClass.Generated(
268             time = 1708579683131L,
269             codegenVersion = "1.0.23",
270             sourceFile =
271                     "packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInputParcel.java",
272             inputSignatures =
273                     "private @android.annotation.NonNull android.adservices.ondevicepersonalization.ModelId mModelId\nprivate @android.adservices.ondevicepersonalization.InferenceInput.Params.Delegate int mDelegate\nprivate @android.annotation.IntRange int mCpuNumThread\nprivate @android.annotation.NonNull com.android.ondevicepersonalization.internal.util.ByteArrayParceledListSlice mInputData\nprivate  int mBatchSize\nprivate @android.adservices.ondevicepersonalization.InferenceInput.Params.ModelType int mModelType\nprivate @android.annotation.NonNull android.adservices.ondevicepersonalization.InferenceOutputParcel mExpectedOutputStructure\nclass InferenceInputParcel extends java.lang.Object implements [android.os.Parcelable]\n@com.android.ondevicepersonalization.internal.util.DataClass(genAidl=false, genBuilder=false)")
274     @Deprecated
__metadata()275     private void __metadata() {}
276 
277     // @formatter:on
278     // End of generated code
279 
280 }
281