Home
last modified time | relevance | path

Searched refs:out0 (Results 1 – 25 of 26) sorted by relevance

12

/frameworks/ml/nn/runtime/test/specs/V1_3/
Dfully_connected_quant8_signed.mod.py24 out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{2, 3}, 1.f, -1") variable
26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
32 output0 = {out0: # output 0
44 out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 1.f, -128") # batch = 1, number_units =… variable
46 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
51 output0 = {out0: # output 0
63 out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1}, 1.f, -128") # batch = 1, number_units =… variable
65 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
74 output0 = {out0: # output 0
86 out0 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1}, 1.f, -128") variable
[all …]
/frameworks/ml/nn/runtime/test/specs/V1_0/
Dfully_connected_quant8_large.mod.py21 out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1, 1}, 1.f, 0") # batch = 1, number_units = 1 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
28 output0 = {out0: # output 0
Dfully_connected_quant8.mod.py21 out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{3, 1}, 1.f, 0") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
28 output0 = {out0: # output 0
Dfully_connected_float_2.mod.py46 out0 = Output("op3", "TENSOR_FLOAT32", "{2, 16}") variable
48 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
54 output0 = {out0: # output 0
Dfully_connected_float.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
28 output0 = {out0: # output 0
Dfully_connected_float_3.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{2, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
28 output0 = {out0: # output 0
Dfully_connected_float_large.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") # batch = 1, number_units = 1 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
28 output0 = {out0: # output 0
Dfully_connected_quant8_2.mod.py24 out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.f, 127") variable
26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
32 output0 = {out0: # output 0
Dfully_connected_float_weights_as_inputs.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
30 output0 = {out0: # output 0
Dfully_connected_float_large_weights_as_inputs.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") # batch = 1, number_units = 1 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
32 output0 = {out0: # output 0
Dfully_connected_quant8_large_weights_as_inputs.mod.py21 out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1, 1}, 1.f, 0") # batch = 1, number_units = 1 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
32 output0 = {out0: # output 0
Dfully_connected_quant8_weights_as_inputs.mod.py21 out0 = Output("op3", "TENSOR_QUANT8_ASYMM", "{3, 1}, 1.f, 0") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
30 output0 = {out0: # output 0
/frameworks/ml/nn/runtime/test/specs/V1_1/
Dfully_connected_float_4d_simple.mod.py29 out0 = Output("op3", "TENSOR_FLOAT32", "{2, 3}") variable
31 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
37 output0 = {out0: # output 0
Dfully_connected_float_relaxed.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
29 output0 = {out0: # output 0
Dfully_connected_float_2_relaxed.mod.py46 out0 = Output("op3", "TENSOR_FLOAT32", "{2, 16}") variable
48 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
55 output0 = {out0: # output 0
Dfully_connected_float_4d_simple_relaxed.mod.py29 out0 = Output("op3", "TENSOR_FLOAT32", "{2, 3}") variable
31 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
38 output0 = {out0: # output 0
Dfully_connected_float_large_relaxed.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") # batch = 1, number_units = 1 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
29 output0 = {out0: # output 0
Dfully_connected_float_large_weights_as_inputs_relaxed.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") # batch = 1, number_units = 1 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
33 output0 = {out0: # output 0
Dfully_connected_float_weights_as_inputs_relaxed.mod.py21 out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
31 output0 = {out0: # output 0
/frameworks/ml/nn/runtime/test/specs/V1_2/
Dfully_connected_v1_2.mod.py22 out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}") variable
24 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
30 out0: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
36 output0 = {out0: # output 0
/frameworks/rs/cpu_ref/
DrsCpuIntrinsics_x86.cpp762 __m128i in0, in1, out0, out1; in rsdIntrinsicBlendSrcOver_K() local
771 out0 = _mm_loadu_si128((const __m128i *)dst); in rsdIntrinsicBlendSrcOver_K()
777 t0 = _mm_unpacklo_epi8(out0, _mm_setzero_si128()); in rsdIntrinsicBlendSrcOver_K()
785 t1 = _mm_unpackhi_epi8(out0, _mm_setzero_si128()); in rsdIntrinsicBlendSrcOver_K()
818 __m128i in0, in1, out0, out1; in rsdIntrinsicBlendDstOver_K() local
827 out0 = _mm_loadu_si128((const __m128i *)dst); in rsdIntrinsicBlendDstOver_K()
831 outs = _mm_unpacklo_epi8(out0, _mm_setzero_si128()); in rsdIntrinsicBlendDstOver_K()
839 outs = _mm_unpackhi_epi8(out0, _mm_setzero_si128()); in rsdIntrinsicBlendDstOver_K()
875 __m128i in0, in1, out0, out1; in rsdIntrinsicBlendSrcIn_K() local
882 out0 = _mm_loadu_si128((const __m128i *)dst); in rsdIntrinsicBlendSrcIn_K()
[all …]
/frameworks/rs/tests/java_api/VrDemo/src/com/example/android/rs/vr/engine/
DVectorUtil.java44 double out0 = a[1] * b[2] - b[1] * a[2]; in cross() local
47 out[0] = out0; in cross()
DQuaternion.java38 double out0 = a[1] * b[2] - b[1] * a[2]; in cross() local
41 return new double[]{out0, out1, out2}; in cross()
/frameworks/ml/nn/runtime/test/
DTestValidation.cpp1207 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; in TEST_F() local
1215 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)), in TEST_F()
1218 const size_t memorySize = std::max(sizeof(in0), sizeof(out0)); in TEST_F()
1226 auto testTooLate = [this, execution, &in0, &out0, memory] { in TEST_F()
1237 ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)), in TEST_F()
1243 sizeof(out0)), in TEST_F()
2587 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; in TEST_F() local
2595 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)), in TEST_F()
2744 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; in runExecutionSetTimeoutTest() local
2752 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)), in runExecutionSetTimeoutTest()
/frameworks/compile/slang/tests/F_reduce_general_bad_function/
Dreduce_general_bad_function.rscript203 #pragma rs reduce(out0) accumulator(AccumInt) outconverter(out0)
204 static void out0() { }

12