/external/tensorflow/tensorflow/core/kernels/ |
D | svd_op_gpu.cu.cc | 140 Tensor input_copy; in RunSVD() local 145 TensorShape({batch_size, m}), &input_copy), in RunSVD() 148 d.memcpy(input_copy.flat<Scalar>().data(), input_ptr, in RunSVD() 201 cfg2D, m, full_matrices_ ? m : p, input_copy.flat<Scalar>().data(), in RunSVD() 247 Tensor input_copy; in PerformSVD_MgeqN() local 252 solver->allocate_scoped_tensor(M.dtype(), input_shape, &input_copy), in PerformSVD_MgeqN() 255 OP_REQUIRES_OK_ASYNC(context, DoMatrixTranspose(device, M, &input_copy), in PerformSVD_MgeqN() 259 RunSVD(context, done, m, n, p, input_copy, S, U, V, std::move(solver)); in PerformSVD_MgeqN() 274 Tensor input_copy; in PerformSVD_MlessN() local 278 {0}, DataTypeToEnum<Scalar>::value, M.shape(), &input_copy), in PerformSVD_MlessN() [all …]
|
D | self_adjoint_eig_v2_op_gpu.cc | 98 Tensor input_copy; in ComputeAsync() local 102 {0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), in ComputeAsync() 108 if (!input.SharesBufferWith(input_copy)) { in ComputeAsync() 111 conj(device, input_copy.flat<Scalar>() /*out*/, in ComputeAsync() 114 device.memcpy(input_copy.flat<Scalar>().data(), in ComputeAsync() 127 auto input_copy_reshaped = input_copy.flat_inner_dims<Scalar, 3>(); in ComputeAsync() 152 context, DoMatrixTranspose(device, input_copy, eigenvectors), done); in ComputeAsync()
|
D | determinant_op.cc | 172 Tensor input_copy; in ComputeAsync() local 176 {0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), in ComputeAsync() 178 if (!input.SharesBufferWith(input_copy)) { in ComputeAsync() 179 d.memcpy(input_copy.flat<Scalar>().data(), input.flat<Scalar>().data(), in ComputeAsync() 182 auto input_copy_reshaped = input_copy.template flat_inner_dims<Scalar, 3>(); in ComputeAsync() 238 const_cast<const Tensor*>(&input_copy) in ComputeAsync() 317 Tensor input_copy; in ComputeAsync() local 321 {0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), in ComputeAsync() 323 if (!input.SharesBufferWith(input_copy)) { in ComputeAsync() 324 d.memcpy(input_copy.flat<Scalar>().data(), input.flat<Scalar>().data(), in ComputeAsync() [all …]
|
D | matrix_solve_op.cc | 175 Tensor input_copy; in ComputeAsync() local 183 input.shape(), &input_copy), in ComputeAsync() 186 DoMatrixTranspose(device, input, &input_copy), done); in ComputeAsync() 191 {0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy), in ComputeAsync() 193 if (!input.SharesBufferWith(input_copy)) { in ComputeAsync() 194 device.memcpy(input_copy.flat<Scalar>().data(), in ComputeAsync() 199 auto input_copy_reshaped = input_copy.template flat_inner_dims<Scalar, 3>(); in ComputeAsync()
|
D | matrix_inverse_op.cc | 132 Tensor input_copy; in ComputeAsync() local 136 input.shape(), &input_copy), in ComputeAsync() 138 auto input_copy_reshaped = input_copy.template flat_inner_dims<Scalar, 3>(); in ComputeAsync() 141 device.memcpy(input_copy.flat<Scalar>().data(), in ComputeAsync() 146 context, DoConjugateMatrixTranspose(device, input, &input_copy), in ComputeAsync()
|
D | scatter_nd_op.cc | 295 const Tensor& input_copy = c->input(0); in DoCompute() local 296 copy(c->eigen_device<Device>(), params.flat<T>(), input_copy.flat<T>()); in DoCompute()
|
/external/v8/src/compiler/ |
D | register-allocator.cc | 1765 UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT, in MeetConstraintsBefore() local 1769 data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); in MeetConstraintsBefore() 1783 UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT, in MeetConstraintsBefore() local 1788 input_copy, *cur_input); in MeetConstraintsBefore()
|