Home
last modified time | relevance | path

Searched refs:cudnn (Results 1 – 19 of 19) sorted by relevance

/external/tensorflow/tensorflow/stream_executor/cuda/
Dcuda_dnn.cc993 const CudnnHandle& cudnn, float dropout, uint64 seed, in Create() argument
1006 cudnnDropoutGetStatesSize(cudnn.handle(), &state_sizes_in_bytes)); in Create()
1011 handle.get(), cudnn.handle(), dropout, state_memory.opaque(), in Create()
1038 const CudnnHandle& cudnn, int input_size, cudnnDataType_t data_type,
1058 CudnnRnnDescriptor(const CudnnHandle& cudnn, gpu::RnnDescriptor rnn_desc, in CudnnRnnDescriptor() argument
1089 const CudnnHandle& cudnn, int num_layers, int hidden_size, int input_size, in Create() argument
1097 CudnnDropoutDescriptor::Create(cudnn, dropout, seed, state_allocator)); in Create()
1151 cudnn.handle(), /*rnnDesc=*/rnn_desc.get(), in Create()
1160 cudnn.handle(), /*rnnDesc=*/rnn_desc.get(), in Create()
1191 cudnn, input_size, data_type, rnn_desc.get(), in Create()
[all …]
DBUILD386 "//conditions:default": "@local_config_cuda//cuda:cudnn",
/external/tensorflow/third_party/gpus/cuda/
DBUILD.tpl146 name = "cudnn",
154 hdrs = [":cudnn-include"],
155 include_prefix = "third_party/gpus/cudnn",
156 strip_include_prefix = "cudnn/include",
181 ":cudnn",
DBUILD.windows.tpl137 name = "cudnn",
144 hdrs = [":cudnn-include"],
145 include_prefix = "third_party/gpus/cudnn",
146 strip_include_prefix = "cudnn/include",
168 ":cudnn",
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dbackend_configs.proto20 // Backend config for a convolution that runs through cudnn.
22 // Opaque algorithm number of cudnn algorithm chosen for this conv.
26 // true, cudnn may choose not to use tensor cores, e.g. because the GPU or
/external/tensorflow/tensorflow/tools/ci_build/
DDockerfile.rbe.gpu5 # In the Ubuntu 16.04 images, cudnn is placed in system paths. Move them to
7 RUN cp -P /usr/include/cudnn.h /usr/local/cuda/include
DDockerfile.gpu.ppc64le5 # In the Ubuntu 16.04 images, cudnn is placed in system paths. Move them to
7 RUN cp -P /usr/include/cudnn.h /usr/local/cuda/include
DDockerfile.gpu5 # In the Ubuntu 16.04 images, cudnn is placed in system paths. Move them to
7 RUN cp -P /usr/include/cudnn.h /usr/local/cuda/include
DDockerfile.rbe.cuda10.0-cudnn7-ubuntu14.0436 LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}"
DDockerfile.rbe.cuda9.0-cudnn7-ubuntu14.0441 LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}"
/external/tensorflow/
Dconfigure.py925 cudnn = None
931 cudnn = cudnn_pattern.search(line)
936 if cudnn and len(cudnn.group(1)):
937 cudnn = convert_version_to_int(cudnn.group(1))
940 if cudnn is not None:
941 cudnn_ok = (cudnn == cudnn_ver)
DRELEASE.md4589 * GPU Pip wheels are built with cuda 7.5 and cudnn-v4, making them
4590 required for the binary releases. Lower versions of cuda/cudnn can
4714 roughly equivalent with native cudnn v2 performance. Improvements mostly due
/external/tensorflow/tensorflow/tools/tensorflow_builder/compat_checker/
Dsample_config.ini5 cudnn = [range(7.0.0, 8.0.0)] key
/external/tensorflow/tensorflow/python/keras/layers/
Dcudnn_recurrent_test.py464 def gru(cudnn=False, **kwargs): argument
465 layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRUV1
481 gru(cudnn=True),
484 gru(cudnn=True),
/external/tensorflow/third_party/gpus/
Dcuda_configure.bzl582 "cudnn": _check_cuda_lib_params(
583 "cudnn",
666 config = find_cuda_config(repository_ctx, find_cuda_config_script, ["cuda", "cudnn"])
792 "%{cudnn_lib}": lib_name("cudnn", cpu_value),
805 filegroup(name="cudnn-include")
814 repository_ctx.file("cuda/cuda/include/cudnn.h")
824 repository_ctx.file("cuda/cuda/lib/%s" % lib_name("cudnn", cpu_value))
1102 cudnn_headers = ["cudnn.h"]
1119 cudnn_outs.append("cudnn/include/" + header)
1123 name = "cudnn-include",
[all …]
/external/tensorflow/tensorflow/core/protobuf/
Ddevice_properties.proto36 // cudnn 5.1)
/external/tensorflow/third_party/toolchains/preconfig/generate/
Dgenerate.bzl32 base = "@cuda%s-cudnn%s-%s//image" % (cuda_version, cudnn_version, os)
/external/tensorflow/tensorflow/compiler/xla/
Dxla.proto111 // If true, the GPU backend is free to use cudnn for HLO batch normalization
/external/tensorflow/tensorflow/
Dtensorflow.bzl2608 "cudnn_dll_name": "cudnn{cudnn_version}.dll",