1 /*
2  * Copyright (C) 2024 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.adservices.ondevicepersonalization;
18 
19 import android.annotation.FlaggedApi;
20 import android.annotation.IntDef;
21 import android.annotation.IntRange;
22 import android.annotation.NonNull;
23 import android.annotation.SuppressLint;
24 
25 import com.android.adservices.ondevicepersonalization.flags.Flags;
26 import com.android.ondevicepersonalization.internal.util.AnnotationValidations;
27 import com.android.ondevicepersonalization.internal.util.DataClass;
28 
29 import java.lang.annotation.Retention;
30 import java.lang.annotation.RetentionPolicy;
31 
32 /**
33  * Contains all the information needed for a run of model inference. The input of {@link
34  * ModelManager#run}.
35  */
36 @FlaggedApi(Flags.FLAG_ON_DEVICE_PERSONALIZATION_APIS_ENABLED)
37 @DataClass(genBuilder = true, genEqualsHashCode = true)
38 public final class InferenceInput {
39     /** The configuration that controls runtime interpreter behavior. */
40     @NonNull private Params mParams;
41 
42     /**
43      * An array of input data. The inputs should be in the same order as inputs of the model.
44      *
45      * <p>For example, if a model takes multiple inputs:
46      *
47      * <pre>{@code
48      * String[] input0 = {"foo", "bar"}; // string tensor shape is [2].
49      * int[] input1 = new int[]{3, 2, 1}; // int tensor shape is [3].
50      * Object[] inputData = {input0, input1, ...};
51      * }</pre>
52      *
53      * For TFLite, this field is mapped to inputs of runForMultipleInputsOutputs:
54      * https://www.tensorflow.org/lite/api_docs/java/org/tensorflow/lite/InterpreterApi#parameters_9
55      */
56     @NonNull private Object[] mInputData;
57 
58     /**
59      * The number of input examples. Adopter can set this field to run batching inference. The batch
60      * size is 1 by default. The batch size should match the input data size.
61      */
62     private int mBatchSize = 1;
63 
64     /**
65      * The empty InferenceOutput representing the expected output structure. For TFLite, the
66      * inference code will verify whether this expected output structure matches model output
67      * signature.
68      *
69      * <p>If a model produce string tensors:
70      *
71      * <pre>{@code
72      * String[] output = new String[3][2];  // Output tensor shape is [3, 2].
73      * HashMap<Integer, Object> outputs = new HashMap<>();
74      * outputs.put(0, output);
75      * expectedOutputStructure = new InferenceOutput.Builder().setDataOutputs(outputs).build();
76      * }</pre>
77      */
78     @NonNull private InferenceOutput mExpectedOutputStructure;
79 
80     @DataClass(genBuilder = true, genHiddenConstructor = true, genEqualsHashCode = true)
81     public static class Params {
82         /**
83          * A {@link KeyValueStore} where pre-trained model is stored. Only supports TFLite model
84          * now.
85          */
86         @NonNull private KeyValueStore mKeyValueStore;
87 
88         /**
89          * The key of the table where the corresponding value stores a pre-trained model. Only
90          * supports TFLite model now.
91          */
92         @NonNull private String mModelKey;
93 
94         /** The model inference will run on CPU. */
95         public static final int DELEGATE_CPU = 1;
96 
97         /**
98          * The delegate to run model inference.
99          *
100          * @hide
101          */
102         @IntDef(
103                 prefix = "DELEGATE_",
104                 value = {DELEGATE_CPU})
105         @Retention(RetentionPolicy.SOURCE)
106         public @interface Delegate {}
107 
108         /**
109          * The delegate to run model inference. If not set, the default value is {@link
110          * #DELEGATE_CPU}.
111          */
112         private @Delegate int mDelegateType = DELEGATE_CPU;
113 
114         /** The model is a tensorflow lite model. */
115         public static final int MODEL_TYPE_TENSORFLOW_LITE = 1;
116 
117         /**
118          * The type of the model.
119          *
120          * @hide
121          */
122         @IntDef(
123                 prefix = "MODEL_TYPE",
124                 value = {MODEL_TYPE_TENSORFLOW_LITE})
125         @Retention(RetentionPolicy.SOURCE)
126         public @interface ModelType {}
127 
128         /**
129          * The type of the pre-trained model. If not set, the default value is {@link
130          * #MODEL_TYPE_TENSORFLOW_LITE} . Only supports {@link #MODEL_TYPE_TENSORFLOW_LITE} for now.
131          */
132         private @ModelType int mModelType = MODEL_TYPE_TENSORFLOW_LITE;
133 
134         /**
135          * The number of threads used for intraop parallelism on CPU, must be positive number.
136          * Adopters can set this field based on model architecture. The actual thread number depends
137          * on system resources and other constraints.
138          */
139         private @IntRange(from = 1) int mRecommendedNumThreads = 1;
140 
141         // Code below generated by codegen v1.0.23.
142         //
143         // DO NOT MODIFY!
144         // CHECKSTYLE:OFF Generated code
145         //
146         // To regenerate run:
147         // $ codegen
148         // $ANDROID_BUILD_TOP/packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInput.java
149         //
150         // To exclude the generated code from IntelliJ auto-formatting enable (one-time):
151         //   Settings > Editor > Code Style > Formatter Control
152         // @formatter:off
153 
154         /**
155          * Creates a new Params.
156          *
157          * @param keyValueStore A {@link KeyValueStore} where pre-trained model is stored. Only
158          *     supports TFLite model now.
159          * @param modelKey The key of the table where the corresponding value stores a pre-trained
160          *     model. Only supports TFLite model now.
161          * @param delegateType The delegate to run model inference. If not set, the default value is
162          *     {@link #DELEGATE_CPU}.
163          * @param modelType The type of the pre-trained model. If not set, the default value is
164          *     {@link #MODEL_TYPE_TENSORFLOW_LITE} . Only supports {@link
165          *     #MODEL_TYPE_TENSORFLOW_LITE} for now.
166          * @param recommendedNumThreads The number of threads used for intraop parallelism on CPU,
167          *     must be positive number. Adopters can set this field based on model architecture. The
168          *     actual thread number depends on system resources and other constraints.
169          * @hide
170          */
171         @DataClass.Generated.Member
Params( @onNull KeyValueStore keyValueStore, @NonNull String modelKey, @Delegate int delegateType, @ModelType int modelType, @IntRange(from = 1) int recommendedNumThreads)172         public Params(
173                 @NonNull KeyValueStore keyValueStore,
174                 @NonNull String modelKey,
175                 @Delegate int delegateType,
176                 @ModelType int modelType,
177                 @IntRange(from = 1) int recommendedNumThreads) {
178             this.mKeyValueStore = keyValueStore;
179             AnnotationValidations.validate(NonNull.class, null, mKeyValueStore);
180             this.mModelKey = modelKey;
181             AnnotationValidations.validate(NonNull.class, null, mModelKey);
182             this.mDelegateType = delegateType;
183             AnnotationValidations.validate(Delegate.class, null, mDelegateType);
184             this.mModelType = modelType;
185             AnnotationValidations.validate(ModelType.class, null, mModelType);
186             this.mRecommendedNumThreads = recommendedNumThreads;
187             AnnotationValidations.validate(IntRange.class, null, mRecommendedNumThreads, "from", 1);
188 
189             // onConstructed(); // You can define this method to get a callback
190         }
191 
192         /**
193          * A {@link KeyValueStore} where pre-trained model is stored. Only supports TFLite model
194          * now.
195          */
196         @DataClass.Generated.Member
getKeyValueStore()197         public @NonNull KeyValueStore getKeyValueStore() {
198             return mKeyValueStore;
199         }
200 
201         /**
202          * The key of the table where the corresponding value stores a pre-trained model. Only
203          * supports TFLite model now.
204          */
205         @DataClass.Generated.Member
getModelKey()206         public @NonNull String getModelKey() {
207             return mModelKey;
208         }
209 
210         /**
211          * The delegate to run model inference. If not set, the default value is {@link
212          * #DELEGATE_CPU}.
213          */
214         @DataClass.Generated.Member
getDelegateType()215         public @Delegate int getDelegateType() {
216             return mDelegateType;
217         }
218 
219         /**
220          * The type of the pre-trained model. If not set, the default value is {@link
221          * #MODEL_TYPE_TENSORFLOW_LITE} . Only supports {@link #MODEL_TYPE_TENSORFLOW_LITE} for now.
222          */
223         @DataClass.Generated.Member
getModelType()224         public @ModelType int getModelType() {
225             return mModelType;
226         }
227 
228         /**
229          * The number of threads used for intraop parallelism on CPU, must be positive number.
230          * Adopters can set this field based on model architecture. The actual thread number depends
231          * on system resources and other constraints.
232          */
233         @DataClass.Generated.Member
getRecommendedNumThreads()234         public @IntRange(from = 1) int getRecommendedNumThreads() {
235             return mRecommendedNumThreads;
236         }
237 
238         @Override
239         @DataClass.Generated.Member
equals(@ndroid.annotation.Nullable Object o)240         public boolean equals(@android.annotation.Nullable Object o) {
241             // You can override field equality logic by defining either of the methods like:
242             // boolean fieldNameEquals(Params other) { ... }
243             // boolean fieldNameEquals(FieldType otherValue) { ... }
244 
245             if (this == o) return true;
246             if (o == null || getClass() != o.getClass()) return false;
247             @SuppressWarnings("unchecked")
248             Params that = (Params) o;
249             //noinspection PointlessBooleanExpression
250             return true
251                     && java.util.Objects.equals(mKeyValueStore, that.mKeyValueStore)
252                     && java.util.Objects.equals(mModelKey, that.mModelKey)
253                     && mDelegateType == that.mDelegateType
254                     && mModelType == that.mModelType
255                     && mRecommendedNumThreads == that.mRecommendedNumThreads;
256         }
257 
258         @Override
259         @DataClass.Generated.Member
hashCode()260         public int hashCode() {
261             // You can override field hashCode logic by defining methods like:
262             // int fieldNameHashCode() { ... }
263 
264             int _hash = 1;
265             _hash = 31 * _hash + java.util.Objects.hashCode(mKeyValueStore);
266             _hash = 31 * _hash + java.util.Objects.hashCode(mModelKey);
267             _hash = 31 * _hash + mDelegateType;
268             _hash = 31 * _hash + mModelType;
269             _hash = 31 * _hash + mRecommendedNumThreads;
270             return _hash;
271         }
272 
273         /** A builder for {@link Params} */
274         @SuppressWarnings("WeakerAccess")
275         @DataClass.Generated.Member
276         public static final class Builder {
277 
278             private @NonNull KeyValueStore mKeyValueStore;
279             private @NonNull String mModelKey;
280             private @Delegate int mDelegateType;
281             private @ModelType int mModelType;
282             private @IntRange(from = 1) int mRecommendedNumThreads;
283 
284             private long mBuilderFieldsSet = 0L;
285 
286             /**
287              * Creates a new Builder.
288              *
289              * @param keyValueStore A {@link KeyValueStore} where pre-trained model is stored. Only
290              *     supports TFLite model now.
291              * @param modelKey The key of the table where the corresponding value stores a
292              *     pre-trained model. Only supports TFLite model now.
293              */
Builder(@onNull KeyValueStore keyValueStore, @NonNull String modelKey)294             public Builder(@NonNull KeyValueStore keyValueStore, @NonNull String modelKey) {
295                 mKeyValueStore = keyValueStore;
296                 AnnotationValidations.validate(NonNull.class, null, mKeyValueStore);
297                 mModelKey = modelKey;
298                 AnnotationValidations.validate(NonNull.class, null, mModelKey);
299             }
300 
301             /**
302              * A {@link KeyValueStore} where pre-trained model is stored. Only supports TFLite model
303              * now.
304              */
305             @DataClass.Generated.Member
setKeyValueStore(@onNull KeyValueStore value)306             public @NonNull Builder setKeyValueStore(@NonNull KeyValueStore value) {
307                 mBuilderFieldsSet |= 0x1;
308                 mKeyValueStore = value;
309                 return this;
310             }
311 
312             /**
313              * The key of the table where the corresponding value stores a pre-trained model. Only
314              * supports TFLite model now.
315              */
316             @DataClass.Generated.Member
setModelKey(@onNull String value)317             public @NonNull Builder setModelKey(@NonNull String value) {
318                 mBuilderFieldsSet |= 0x2;
319                 mModelKey = value;
320                 return this;
321             }
322 
323             /**
324              * The delegate to run model inference. If not set, the default value is {@link
325              * #DELEGATE_CPU}.
326              */
327             @DataClass.Generated.Member
setDelegateType(@elegate int value)328             public @NonNull Builder setDelegateType(@Delegate int value) {
329                 mBuilderFieldsSet |= 0x4;
330                 mDelegateType = value;
331                 return this;
332             }
333 
334             /**
335              * The type of the pre-trained model. If not set, the default value is {@link
336              * #MODEL_TYPE_TENSORFLOW_LITE} . Only supports {@link #MODEL_TYPE_TENSORFLOW_LITE} for
337              * now.
338              */
339             @DataClass.Generated.Member
setModelType(@odelType int value)340             public @NonNull Builder setModelType(@ModelType int value) {
341                 mBuilderFieldsSet |= 0x8;
342                 mModelType = value;
343                 return this;
344             }
345 
346             /**
347              * The number of threads used for intraop parallelism on CPU, must be positive number.
348              * Adopters can set this field based on model architecture. The actual thread number
349              * depends on system resources and other constraints.
350              */
351             @DataClass.Generated.Member
setRecommendedNumThreads(@ntRangefrom = 1) int value)352             public @NonNull Builder setRecommendedNumThreads(@IntRange(from = 1) int value) {
353                 mBuilderFieldsSet |= 0x10;
354                 mRecommendedNumThreads = value;
355                 return this;
356             }
357 
358             /** Builds the instance. */
build()359             public @NonNull Params build() {
360                 mBuilderFieldsSet |= 0x20; // Mark builder used
361 
362                 if ((mBuilderFieldsSet & 0x4) == 0) {
363                     mDelegateType = DELEGATE_CPU;
364                 }
365                 if ((mBuilderFieldsSet & 0x8) == 0) {
366                     mModelType = MODEL_TYPE_TENSORFLOW_LITE;
367                 }
368                 if ((mBuilderFieldsSet & 0x10) == 0) {
369                     mRecommendedNumThreads = 1;
370                 }
371                 Params o =
372                         new Params(
373                                 mKeyValueStore,
374                                 mModelKey,
375                                 mDelegateType,
376                                 mModelType,
377                                 mRecommendedNumThreads);
378                 return o;
379             }
380         }
381 
382         @DataClass.Generated(
383                 time = 1709250081597L,
384                 codegenVersion = "1.0.23",
385                 sourceFile =
386                         "packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInput.java",
387                 inputSignatures =
388                         "private @android.annotation.NonNull android.adservices.ondevicepersonalization.KeyValueStore mKeyValueStore\nprivate @android.annotation.NonNull java.lang.String mModelKey\npublic static final  int DELEGATE_CPU\nprivate @android.adservices.ondevicepersonalization.Params.Delegate int mDelegateType\npublic static final  int MODEL_TYPE_TENSORFLOW_LITE\nprivate @android.adservices.ondevicepersonalization.Params.ModelType int mModelType\nprivate @android.annotation.IntRange int mRecommendedNumThreads\nclass Params extends java.lang.Object implements []\n@com.android.ondevicepersonalization.internal.util.DataClass(genBuilder=true, genHiddenConstructor=true, genEqualsHashCode=true)")
389         @Deprecated
__metadata()390         private void __metadata() {}
391 
392         // @formatter:on
393         // End of generated code
394 
395     }
396 
397     // Code below generated by codegen v1.0.23.
398     //
399     // DO NOT MODIFY!
400     // CHECKSTYLE:OFF Generated code
401     //
402     // To regenerate run:
403     // $ codegen
404     // $ANDROID_BUILD_TOP/packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInput.java
405     //
406     // To exclude the generated code from IntelliJ auto-formatting enable (one-time):
407     //   Settings > Editor > Code Style > Formatter Control
408     // @formatter:off
409 
410     @DataClass.Generated.Member
InferenceInput( @onNull Params params, @NonNull Object[] inputData, int batchSize, @NonNull InferenceOutput expectedOutputStructure)411     /* package-private */ InferenceInput(
412             @NonNull Params params,
413             @NonNull Object[] inputData,
414             int batchSize,
415             @NonNull InferenceOutput expectedOutputStructure) {
416         this.mParams = params;
417         AnnotationValidations.validate(NonNull.class, null, mParams);
418         this.mInputData = inputData;
419         AnnotationValidations.validate(NonNull.class, null, mInputData);
420         this.mBatchSize = batchSize;
421         this.mExpectedOutputStructure = expectedOutputStructure;
422         AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);
423 
424         // onConstructed(); // You can define this method to get a callback
425     }
426 
427     /** The configuration that controls runtime interpreter behavior. */
428     @DataClass.Generated.Member
getParams()429     public @NonNull Params getParams() {
430         return mParams;
431     }
432 
433     /**
434      * An array of input data. The inputs should be in the same order as inputs of the model.
435      *
436      * <p>For example, if a model takes multiple inputs:
437      *
438      * <pre>{@code
439      * String[] input0 = {"foo", "bar"}; // string tensor shape is [2].
440      * int[] input1 = new int[]{3, 2, 1}; // int tensor shape is [3].
441      * Object[] inputData = {input0, input1, ...};
442      * }</pre>
443      *
444      * For TFLite, this field is mapped to inputs of runForMultipleInputsOutputs:
445      * https://www.tensorflow.org/lite/api_docs/java/org/tensorflow/lite/InterpreterApi#parameters_9
446      */
447     @SuppressLint("ArrayReturn")
448     @DataClass.Generated.Member
getInputData()449     public @NonNull Object[] getInputData() {
450         return mInputData;
451     }
452 
453     /**
454      * The number of input examples. Adopter can set this field to run batching inference. The batch
455      * size is 1 by default. The batch size should match the input data size.
456      */
457     @DataClass.Generated.Member
getBatchSize()458     public int getBatchSize() {
459         return mBatchSize;
460     }
461 
462     /**
463      * The empty InferenceOutput representing the expected output structure. For TFLite, the
464      * inference code will verify whether this expected output structure matches model output
465      * signature.
466      *
467      * <p>If a model produce string tensors:
468      *
469      * <pre>{@code
470      * String[] output = new String[3][2];  // Output tensor shape is [3, 2].
471      * HashMap<Integer, Object> outputs = new HashMap<>();
472      * outputs.put(0, output);
473      * expectedOutputStructure = new InferenceOutput.Builder().setDataOutputs(outputs).build();
474      * }</pre>
475      */
476     @DataClass.Generated.Member
getExpectedOutputStructure()477     public @NonNull InferenceOutput getExpectedOutputStructure() {
478         return mExpectedOutputStructure;
479     }
480 
481     @Override
482     @DataClass.Generated.Member
equals(@ndroid.annotation.Nullable Object o)483     public boolean equals(@android.annotation.Nullable Object o) {
484         // You can override field equality logic by defining either of the methods like:
485         // boolean fieldNameEquals(InferenceInput other) { ... }
486         // boolean fieldNameEquals(FieldType otherValue) { ... }
487 
488         if (this == o) return true;
489         if (o == null || getClass() != o.getClass()) return false;
490         @SuppressWarnings("unchecked")
491         InferenceInput that = (InferenceInput) o;
492         //noinspection PointlessBooleanExpression
493         return true
494                 && java.util.Objects.equals(mParams, that.mParams)
495                 && java.util.Arrays.equals(mInputData, that.mInputData)
496                 && mBatchSize == that.mBatchSize
497                 && java.util.Objects.equals(
498                         mExpectedOutputStructure, that.mExpectedOutputStructure);
499     }
500 
501     @Override
502     @DataClass.Generated.Member
hashCode()503     public int hashCode() {
504         // You can override field hashCode logic by defining methods like:
505         // int fieldNameHashCode() { ... }
506 
507         int _hash = 1;
508         _hash = 31 * _hash + java.util.Objects.hashCode(mParams);
509         _hash = 31 * _hash + java.util.Arrays.hashCode(mInputData);
510         _hash = 31 * _hash + mBatchSize;
511         _hash = 31 * _hash + java.util.Objects.hashCode(mExpectedOutputStructure);
512         return _hash;
513     }
514 
515     /** A builder for {@link InferenceInput} */
516     @SuppressWarnings("WeakerAccess")
517     @DataClass.Generated.Member
518     public static final class Builder {
519 
520         private @NonNull Params mParams;
521         private @NonNull Object[] mInputData;
522         private int mBatchSize;
523         private @NonNull InferenceOutput mExpectedOutputStructure;
524 
525         private long mBuilderFieldsSet = 0L;
526 
527         /**
528          * Creates a new Builder.
529          *
530          * @param params The configuration that controls runtime interpreter behavior.
531          * @param inputData An array of input data. The inputs should be in the same order as inputs
532          *     of the model.
533          *     <p>For example, if a model takes multiple inputs:
534          *     <pre>{@code
535          * String[] input0 = {"foo", "bar"}; // string tensor shape is [2].
536          * int[] input1 = new int[]{3, 2, 1}; // int tensor shape is [3].
537          * Object[] inputData = {input0, input1, ...};
538          *
539          * }</pre>
540          *     For TFLite, this field is mapped to inputs of runForMultipleInputsOutputs:
541          *     https://www.tensorflow.org/lite/api_docs/java/org/tensorflow/lite/InterpreterApi#parameters_9
542          * @param expectedOutputStructure The empty InferenceOutput representing the expected output
543          *     structure. For TFLite, the inference code will verify whether this expected output
544          *     structure matches model output signature.
545          *     <p>If a model produce string tensors:
546          *     <pre>{@code
547          * String[] output = new String[3][2];  // Output tensor shape is [3, 2].
548          * HashMap<Integer, Object> outputs = new HashMap<>();
549          * outputs.put(0, output);
550          * expectedOutputStructure = new InferenceOutput.Builder().setDataOutputs(outputs).build();
551          *
552          * }</pre>
553          */
Builder( @onNull Params params, @SuppressLint("ArrayReturn") @NonNull Object[] inputData, @NonNull InferenceOutput expectedOutputStructure)554         public Builder(
555                 @NonNull Params params,
556                 @SuppressLint("ArrayReturn") @NonNull Object[] inputData,
557                 @NonNull InferenceOutput expectedOutputStructure) {
558             mParams = params;
559             AnnotationValidations.validate(NonNull.class, null, mParams);
560             mInputData = inputData;
561             AnnotationValidations.validate(NonNull.class, null, mInputData);
562             mExpectedOutputStructure = expectedOutputStructure;
563             AnnotationValidations.validate(NonNull.class, null, mExpectedOutputStructure);
564         }
565 
566         /** The configuration that controls runtime interpreter behavior. */
567         @DataClass.Generated.Member
setParams(@onNull Params value)568         public @NonNull Builder setParams(@NonNull Params value) {
569             mBuilderFieldsSet |= 0x1;
570             mParams = value;
571             return this;
572         }
573 
574         /**
575          * An array of input data. The inputs should be in the same order as inputs of the model.
576          *
577          * <p>For example, if a model takes multiple inputs:
578          *
579          * <pre>{@code
580          * String[] input0 = {"foo", "bar"}; // string tensor shape is [2].
581          * int[] input1 = new int[]{3, 2, 1}; // int tensor shape is [3].
582          * Object[] inputData = {input0, input1, ...};
583          * }</pre>
584          *
585          * For TFLite, this field is mapped to inputs of runForMultipleInputsOutputs:
586          * https://www.tensorflow.org/lite/api_docs/java/org/tensorflow/lite/InterpreterApi#parameters_9
587          */
588         @DataClass.Generated.Member
setInputData(@onNull Object... value)589         public @NonNull Builder setInputData(@NonNull Object... value) {
590             mBuilderFieldsSet |= 0x2;
591             mInputData = value;
592             return this;
593         }
594 
595         /**
596          * The number of input examples. Adopter can set this field to run batching inference. The
597          * batch size is 1 by default. The batch size should match the input data size.
598          */
599         @DataClass.Generated.Member
setBatchSize(int value)600         public @NonNull Builder setBatchSize(int value) {
601             mBuilderFieldsSet |= 0x4;
602             mBatchSize = value;
603             return this;
604         }
605 
606         /**
607          * The empty InferenceOutput representing the expected output structure. For TFLite, the
608          * inference code will verify whether this expected output structure matches model output
609          * signature.
610          *
611          * <p>If a model produce string tensors:
612          *
613          * <pre>{@code
614          * String[] output = new String[3][2];  // Output tensor shape is [3, 2].
615          * HashMap<Integer, Object> outputs = new HashMap<>();
616          * outputs.put(0, output);
617          * expectedOutputStructure = new InferenceOutput.Builder().setDataOutputs(outputs).build();
618          * }</pre>
619          */
620         @DataClass.Generated.Member
setExpectedOutputStructure(@onNull InferenceOutput value)621         public @NonNull Builder setExpectedOutputStructure(@NonNull InferenceOutput value) {
622             mBuilderFieldsSet |= 0x8;
623             mExpectedOutputStructure = value;
624             return this;
625         }
626 
627         /** Builds the instance. */
build()628         public @NonNull InferenceInput build() {
629             mBuilderFieldsSet |= 0x10; // Mark builder used
630 
631             if ((mBuilderFieldsSet & 0x4) == 0) {
632                 mBatchSize = 1;
633             }
634             InferenceInput o =
635                     new InferenceInput(mParams, mInputData, mBatchSize, mExpectedOutputStructure);
636             return o;
637         }
638     }
639 
640     @DataClass.Generated(
641             time = 1709250081618L,
642             codegenVersion = "1.0.23",
643             sourceFile =
644                     "packages/modules/OnDevicePersonalization/framework/java/android/adservices/ondevicepersonalization/InferenceInput.java",
645             inputSignatures =
646                     "private @android.annotation.NonNull android.adservices.ondevicepersonalization.Params mParams\nprivate @android.annotation.NonNull java.lang.Object[] mInputData\nprivate  int mBatchSize\nprivate @android.annotation.NonNull android.adservices.ondevicepersonalization.InferenceOutput mExpectedOutputStructure\nclass InferenceInput extends java.lang.Object implements []\n@com.android.ondevicepersonalization.internal.util.DataClass(genBuilder=true, genEqualsHashCode=true)")
647     @Deprecated
__metadata()648     private void __metadata() {}
649 
650     // @formatter:on
651     // End of generated code
652 
653 }
654