/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
D | bidirectional_sequence_lstm_float16_batch_major_merge_outputs.mod.py | 212 proj_clip = Float16Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
D | bidirectional_sequence_lstm_merge_outputs.mod.py | 211 proj_clip = Float32Scalar("proj_clip", 0.0) 265 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
D | bidirectional_sequence_lstm_float16_batch_major.mod.py | 212 proj_clip = Float16Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
D | bidirectional_sequence_lstm.mod.py | 212 proj_clip = Float32Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
D | bidirectional_sequence_lstm_cifg_peephole.mod.py | 212 proj_clip = Float32Scalar("proj_clip", 0.0) 266 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
D | bidirectional_sequence_lstm_aux_input.mod.py | 214 proj_clip = Float32Scalar("proj_clip", 0.0) 268 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
D | bidirectional_sequence_lstm_float16_batch_major_aux_input.mod.py | 215 proj_clip = Float16Scalar("proj_clip", 0.0) 269 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
D | bidirectional_sequence_lstm_norm_fw_output.mod.py | 213 proj_clip = Float32Scalar("proj_clip", 0.0) 267 activation, cell_clip, proj_clip, merge_outputs, time_major,
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
D | bidirectional_sequence_lstm.mod.py | 240 proj_clip = Float32Scalar("proj_clip", 0.0) 296 proj_clip,
|
D | bidirectional_sequence_lstm_state_output.mod.py | 228 proj_clip = Float32Scalar("proj_clip", 0.0) 284 proj_clip,
|
/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | LayerNormLSTMTest.cpp | 89 bool use_projection_bias, float cell_clip, float proj_clip, in LayerNormLSTMOpModel() argument 99 proj_clip_(proj_clip) { in LayerNormLSTMOpModel()
|
D | LSTM.cpp | 94 params_.proj_clip = getScalarDataWithDefault<float>(projClipOperand, 0.0f); in LSTMCell() 98 params_.proj_clip = in LSTMCell() 156 NN_CHECK(params->proj_clip >= 0); in CheckInputTensorDimensions() 981 if (params.proj_clip > 0.0) { in LSTMStep() 983 params.proj_clip); in LSTMStep()
|
D | LSTMTest.cpp | 80 bool use_projection_bias, float cell_clip, float proj_clip, in LSTMOpModel() argument 90 proj_clip_(proj_clip) { in LSTMOpModel()
|
D | BidirectionalSequenceLSTM.cpp | 178 params_.proj_clip = getScalarDataWithDefault<float>(projOperand, 0.0f); in BidirectionalSequenceLSTM() 182 params_.proj_clip = in BidirectionalSequenceLSTM()
|
D | UnidirectionalSequenceLSTM.cpp | 54 params.proj_clip = static_cast<float>(context->getInputValue<T>(kProjClipParam)); in getLSTMParams()
|
/packages/modules/NeuralNetworks/common/types/operations/include/ |
D | LSTM.h | 34 float proj_clip; member
|
/packages/modules/NeuralNetworks/tools/api/ |
D | types.spec | 1768 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 5979 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
|