/frameworks/opt/gamesdk/third_party/protobuf-3.0.0/src/google/protobuf/io/ |
D | zero_copy_stream_impl.cc | 298 : input_(input) { in CopyingIstreamInputStream() 305 input_->read(reinterpret_cast<char*>(buffer), size); in Read() 306 int result = input_->gcount(); in Read() 307 if (result == 0 && input_->fail() && !input_->eof()) { in Read() 417 : input_(input), limit_(limit) { in LimitingInputStream() 418 prior_bytes_read_ = input_->ByteCount(); in LimitingInputStream() 423 if (limit_ < 0) input_->BackUp(-limit_); in ~LimitingInputStream() 428 if (!input_->Next(data, size)) return false; in Next() 440 input_->BackUp(count - limit_); in BackUp() 443 input_->BackUp(count); in BackUp() [all …]
|
D | zero_copy_stream_impl.h | 242 istream* input_; 345 ZeroCopyInputStream* input_;
|
D | coded_stream.cc | 76 if (input_ != NULL) { in ~CodedInputStream() 96 input_->BackUp(backup_bytes); in BackUpInputToCurrentPosition() 242 input_->Skip(bytes_until_limit); in Skip() 248 return input_->Skip(count); in Skip() 618 if (NextNonEmpty(input_, &void_buffer, &buffer_size)) { in Refresh()
|
D | coded_stream.h | 525 ZeroCopyInputStream* input_; variable 1315 input_(input), in CodedInputStream() 1336 input_(NULL), in CodedInputStream() 1355 return input_ == NULL; in IsFlat()
|
D | tokenizer.h | 262 ZeroCopyInputStream* input_; variable
|
/frameworks/ml/nn/common/operations/ |
D | Multinomial.cpp | 49 input_ = GetInput(operation, operands, kInputTensor); in Multinomial() 79 switch (input_->type) { in Eval() 81 std::vector<float> inputDataFloat32(getNumberOfElements(input_->shape())); in Eval() 82 convertFloat16ToFloat32(GetBuffer<_Float16>(input_), &inputDataFloat32); in Eval() 87 EvalFloat32(GetBuffer<float>(input_)); in Eval() 91 LOG(ERROR) << "Unsupported data type: " << static_cast<int>(input_->type); in Eval() 99 const int batch_size = SizeOfDimension(input_, 0); in EvalFloat32() 100 const int class_size = SizeOfDimension(input_, 1); in EvalFloat32()
|
D | BidirectionalSequenceLSTM.cpp | 50 input_ = GetInput(operation, operands, kInputTensor); in BidirectionalSequenceLSTM() 141 if (input_->type == OperandType::TENSOR_FLOAT32) { in BidirectionalSequenceLSTM() 165 NN_CHECK(NumDimensions(input_) == 3); in Prepare() 166 const uint32_t max_time = SizeOfDimension(input_, params_.time_major ? 0 : 1); in Prepare() 167 const uint32_t n_batch = SizeOfDimension(input_, params_.time_major ? 1 : 0); in Prepare() 168 const uint32_t n_input = SizeOfDimension(input_, 2); in Prepare() 180 input_, fw_input_to_input_weights_, fw_input_to_forget_weights_, in Prepare() 208 NN_CHECK_EQ(aux_input_->shape().dimensions[0], input_->shape().dimensions[0]); in Prepare() 209 NN_CHECK_EQ(aux_input_->shape().dimensions[1], input_->shape().dimensions[1]); in Prepare() 220 const Shape& inputShape = input_->shape(); in Prepare() [all …]
|
D | SVDF.cpp | 31 input_ = GetInput(operation, operands, kInputTensor); in SVDF() 98 switch (input_->type) { in Eval() 100 std::vector<float> inputDataFloat32(getNumberOfElements(input_->shape())); in Eval() 101 convertFloat16ToFloat32(reinterpret_cast<_Float16*>(input_->buffer), &inputDataFloat32); in Eval() 131 EvalFloat32(reinterpret_cast<float*>(input_->buffer), in Eval() 141 LOG(ERROR) << "Unsupported data type: " << static_cast<int>(input_->type); in Eval() 154 const int batch_size = SizeOfDimension(input_, 0); in EvalFloat32() 155 const int input_size = SizeOfDimension(input_, 1); in EvalFloat32()
|
D | MultinomialTest.cpp | 69 input_.push_back(srng.RandDouble()); in Invoke() 71 ASSERT_EQ(execution.setInput(Multinomial::kInputTensor, input_.data(), in Invoke() 72 sizeof(float) * input_.size()), in Invoke() 91 const std::vector<float>& GetInput() const { return input_; } in GetInput() 101 std::vector<float> input_; member in android::nn::wrapper::MultinomialOpModel
|
D | RNN.cpp | 31 input_ = GetInput(operation, operands, kInputTensor); in RNN() 86 switch (input_->type) { in Eval() 88 RNNStep<_Float16>(reinterpret_cast<_Float16*>(input_->buffer), input_->shape(), in Eval() 100 RNNStep<float>(reinterpret_cast<float*>(input_->buffer), input_->shape(), in Eval() 112 LOG(ERROR) << "Unsupported data type: " << static_cast<int>(input_->type); in Eval()
|
D | LSTM.cpp | 50 input_ = GetInput(operation, operands, kInputTensor); in LSTMCell() 83 if (input_->type == OperandType::TENSOR_FLOAT32) { in LSTMCell() 124 const RunTimeOperandInfo* input_, const RunTimeOperandInfo* input_to_input_weights, in CheckInputTensorDimensions() argument 304 NN_CHECK(NumDimensions(input_) > 1); in Prepare() 305 const uint32_t n_batch = SizeOfDimension(input_, 0); in Prepare() 306 const uint32_t n_input = SizeOfDimension(input_, 1); in Prepare() 318 input_, input_to_input_weights_, input_to_forget_weights_, input_to_cell_weights_, in Prepare() 330 const Shape& inputShape = input_->shape(); in Prepare() 953 switch (input_->type) { in Eval() 955 LSTMEvalFloat32(params_, GetBuffer<const float>(input_), input_->shape(), in Eval() [all …]
|
D | LSHProjection.cpp | 30 input_ = GetInput(operation, operands, kInputTensor); in LSHProjection() 158 DenseLshProjection<T>(hash_, input_, weight_, out_buf); in Eval() 162 SparseLshProjection<T>(type_, hash_, input_, weight_, out_buf); in Eval()
|
D | QuantizedLSTM.cpp | 218 input_ = GetInput(operation, operands, kInputTensor); in QuantizedLSTMCell() 377 SizeOfDimension(input_, 1) + SizeOfDimension(prevOutput_, 1)}; in eval() 387 concatTempShape.dimensions = {SizeOfDimension(input_, 0), getSizeOfDimension(weightsShape, 1)}; in eval() 390 activationTempShape.dimensions = {SizeOfDimension(input_, 0), in eval() 410 GetBuffer<const uint8_t>(input_), convertShapeToDims(input_->shape()), in eval()
|
D | Multinomial.h | 51 RunTimeOperandInfo* input_; variable
|
D | QuantizedLSTMTest.cpp | 76 initializeInputData(inputOperandTypeParams[QuantizedLSTMCell::kInputTensor], &input_); in QuantizedLSTMOpModel() 96 ASSERT_EQ(setInputTensor(&execution, QuantizedLSTMCell::kInputTensor, input_), in invoke() 156 void setInput(const std::vector<uint8_t>& input) { input_ = input; } in setInput() 202 std::vector<uint8_t> input_; member in android::nn::wrapper::QuantizedLSTMOpModel
|
D | SVDF.h | 63 const RunTimeOperandInfo* input_; variable
|
D | LSHProjection.h | 58 const RunTimeOperandInfo* input_; variable
|
D | LSTM.h | 185 const RunTimeOperandInfo* input_, const RunTimeOperandInfo* input_to_input_weights, 207 const RunTimeOperandInfo* input_; variable
|
D | QuantizedLSTM.h | 52 const RunTimeOperandInfo* input_;
|
D | RNN.h | 65 const RunTimeOperandInfo* input_; variable
|
/frameworks/opt/gamesdk/third_party/protobuf-3.0.0/src/google/protobuf/compiler/ |
D | parser.cc | 124 : input_(NULL), in Parser() 138 return input_->current().text == text; in LookingAt() 142 return input_->current().type == token_type; in LookingAtType() 151 input_->Next(); in TryConsume() 178 *output = input_->current().text; in ConsumeIdentifier() 179 input_->Next(); in ConsumeIdentifier() 190 if (!io::Tokenizer::ParseInteger(input_->current().text, in ConsumeInteger() 196 input_->Next(); in ConsumeInteger() 221 if (!io::Tokenizer::ParseInteger(input_->current().text, max_value, in ConsumeInteger64() 227 input_->Next(); in ConsumeInteger64() [all …]
|
D | parser_unittest.cc | 111 input_.reset(new io::Tokenizer(raw_input_.get(), &error_collector_)); in SetupParser() 124 parser_->Parse(input_.get(), &actual); in ExpectParsesTo() 125 EXPECT_EQ(io::Tokenizer::TYPE_END, input_->current().type); in ExpectParsesTo() 145 EXPECT_EQ(io::Tokenizer::TYPE_END, input_->current().type); in ExpectHasErrors() 153 parser_->Parse(input_.get(), &file); in ExpectHasEarlyExitErrors() 167 parser_->Parse(input_.get(), &file); in ExpectHasValidationErrors() 168 EXPECT_EQ(io::Tokenizer::TYPE_END, input_->current().type); in ExpectHasValidationErrors() 182 google::protobuf::scoped_ptr<io::Tokenizer> input_; member in google::protobuf::compiler::__anon458863de0111::ParserTest 195 EXPECT_TRUE(parser_->Parse(input_.get(), NULL)); in TEST_F() 205 EXPECT_TRUE(parser_->Parse(input_.get(), NULL)); in TEST_F() [all …]
|
/frameworks/ml/nn/runtime/test/specs/V1_2/ |
D | quantized_lstm.mod.py | 25 input_ = Input("input", ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128)) variable 53 input_, 71 input_: [166, 179, 50, 150], 104 input_ = Input("input", variable 179 model = model.Operation("QUANTIZED_16BIT_LSTM", input_, input_to_input_weights, 189 input_: [166, 179],
|
/frameworks/opt/gamesdk/third_party/protobuf-3.0.0/src/google/protobuf/stubs/ |
D | bytestream.cc | 147 return input_.size(); in Available() 151 return input_; in Peek() 155 GOOGLE_DCHECK_LE(n, input_.size()); in Skip() 156 input_.remove_prefix(n); in Skip()
|
D | bytestream.h | 297 explicit ArrayByteSource(StringPiece s) : input_(s) {} in ArrayByteSource() 304 StringPiece input_;
|