Home
last modified time | relevance | path

Searched full:cuda (Results 1 – 25 of 1483) sorted by relevance

12345678910>>...60

/external/llvm-project/clang/test/Driver/
Dcuda-bindings.cu1 // Tests the bindings generated for a CUDA offloading target for different
6 // It parallels cuda-phases.cu test, but verifies whether output file is temporary or not.
10 // device side, which appends '-device-cuda-<triple>' suffix.
18 // No intermediary device files should have "-device-cuda..." in the name.
20 // RUN: %clang -target powerpc64le-ibm-linux-gnu -ccc-print-bindings --cuda-gpu-arch=sm_30 %s 2>&1 \
22 // BIN: # "nvptx64-nvidia-cuda" - "clang",{{.*}} output:
23 // BIN-NOT: cuda-bindings-device-cuda-nvptx64
24 // BIN: # "nvptx64-nvidia-cuda" - "NVPTX::Assembler",{{.*}} output:
25 // BIN-NOT: cuda-bindings-device-cuda-nvptx64
26 // BIN: # "nvptx64-nvidia-cuda" - "NVPTX::Linker",{{.*}} output:
[all …]
Dcuda-detect.cu5 // Check that we properly detect CUDA installation.
7 // RUN: --sysroot=%S/no-cuda-there --cuda-path-ignore-env 2>&1 | FileCheck %s -check-prefix NOCUDA
9 // RUN: --sysroot=%S/no-cuda-there --cuda-path-ignore-env 2>&1 | FileCheck %s -check-prefix NOCUDA
11 // RUN: --sysroot=%S/no-cuda-there --cuda-path-ignore-env 2>&1 | FileCheck %s -check-prefix NOCUDA
13 // RUN: --sysroot=%S/no-cuda-there --cuda-path-ignore-env 2>&1 | FileCheck %s -check-prefix NOCUDA
17 // RUN: --sysroot=%S/Inputs/CUDA --cuda-path-ignore-env 2>&1 | FileCheck %s
19 // RUN: --sysroot=%S/Inputs/CUDA --cuda-path-ignore-env 2>&1 | FileCheck %s
22 // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 | FileCheck %s
24 // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 | FileCheck %s
26 // Check that we don't find a CUDA installation without libdevice ...
[all …]
Dcuda-version-check.cu5 …/ RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_20 --cuda-path=%S/Inputs/CUDA/usr/l…
7 …RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_20 --cuda-path=%S/Inputs/CUDA_80/usr/…
9 …RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA_80/usr/…
11 // Test version guess when no version.txt or cuda.h are found
12 …N: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA-unknown/u…
15 …RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA_102/usr…
17 // Unknown version with no version.txt but with version info present in cuda.h
18 …RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA_111/usr…
20 // Make sure that we don't warn about CUDA version during C++ compilation.
21 // RUN: %clang --target=x86_64-linux -v -### -x c++ --cuda-gpu-arch=sm_60 \
[all …]
Dcuda-detect-path.cu7 // RUN: env PATH=%S/Inputs/CUDA/usr/local/cuda/bin \
8 // RUN: %clang -v --target=i386-unknown-linux --sysroot=%S/no-cuda-there \
10 // RUN: env PATH=%S/Inputs/CUDA/usr/local/cuda/bin \
11 // RUN: %clang -v --target=i386-apple-macosx --sysroot=%S/no-cuda-there \
13 // RUN: env PATH=%S/Inputs/CUDA/usr/local/cuda/bin \
14 // RUN: %clang -v --target=x86_64-unknown-linux --sysroot=%S/no-cuda-there \
16 // RUN: env PATH=%S/Inputs/CUDA/usr/local/cuda/bin \
17 // RUN: %clang -v --target=x86_64-apple-macosx --sysroot=%S/no-cuda-there \
22 // RUN: env PATH=%S/Inputs/CUDA-symlinks/usr/bin \
23 // RUN: %clang -v --target=i386-unknown-linux --sysroot=%S/no-cuda-there \
[all …]
Dcuda-options.cu1 // Tests CUDA compilation pipeline construction in Driver.
19 // Verify that --cuda-host-only disables device-side compilation, but doesn't
21 // RUN: %clang -### -target x86_64-linux-gnu --cuda-host-only %s 2>&1 \
25 // Verify that --cuda-device-only disables host-side compilation and linking.
26 // RUN: %clang -### -target x86_64-linux-gnu --cuda-device-only %s 2>&1 \
30 // Check that the last of --cuda-compile-host-device, --cuda-host-only, and
31 // --cuda-device-only wins.
33 // RUN: %clang -### -target x86_64-linux-gnu --cuda-device-only \
34 // RUN: --cuda-host-only %s 2>&1 \
38 // RUN: %clang -### -target x86_64-linux-gnu --cuda-compile-host-device \
[all …]
Dcuda-options-freebsd.cu1 // Tests CUDA compilation pipeline construction in Driver.
19 // Verify that --cuda-host-only disables device-side compilation, but doesn't
21 // RUN: %clang -### -target x86_64-unknown-freebsd --cuda-host-only %s 2>&1 \
25 // Verify that --cuda-device-only disables host-side compilation and linking.
26 // RUN: %clang -### -target x86_64-unknown-freebsd --cuda-device-only %s 2>&1 \
30 // Check that the last of --cuda-compile-host-device, --cuda-host-only, and
31 // --cuda-device-only wins.
33 // RUN: %clang -### -target x86_64-unknown-freebsd --cuda-device-only \
34 // RUN: --cuda-host-only %s 2>&1 \
38 // RUN: %clang -### -target x86_64-unknown-freebsd --cuda-compile-host-device \
[all …]
Dthinlto.cu9 // CHECK-COMPILE-ACTIONS: 2: compiler, {1}, ir, (host-cuda)
11 // CHECK-COMPILE-ACTIONS: 12: backend, {11}, lto-bc, (host-cuda)
16 // CHECK-COMPILELINK-ACTIONS: 0: input, "{{.*}}thinlto.cu", cuda, (host-cuda)
17 // CHECK-COMPILELINK-ACTIONS: 1: preprocessor, {0}, cuda-cpp-output
18 // CHECK-COMPILELINK-ACTIONS: 2: compiler, {1}, ir, (host-cuda)
19 // CHECK-COMPILELINK-ACTIONS: 3: input, "{{.*}}thinlto.cu", cuda, (device-cuda, sm_20)
20 // CHECK-COMPILELINK-ACTIONS: 4: preprocessor, {3}, cuda-cpp-output, (device-cuda, sm_20)
21 // CHECK-COMPILELINK-ACTIONS: 5: compiler, {4}, ir, (device-cuda, sm_20)
22 // CHECK-COMPILELINK-ACTIONS: 6: backend, {5}, assembler, (device-cuda, sm_20)
23 // CHECK-COMPILELINK-ACTIONS: 7: assembler, {6}, object, (device-cuda, sm_20)
[all …]
Dopenmp-offload-gpu.c14 // RUN: %clang -### -no-canonical-prefixes -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda \
25 // RUN: -fopenmp-targets=powerpc64le-ibm-linux-gnu,nvptx64-nvidia-cuda \
26 // RUN: -Xopenmp-target=nvptx64-nvidia-cuda -march=sm_35 %s 2>&1 \
36 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda %s 2>&1 \
41 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda %s 2>&1 \
52 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -save-temps %s 2>&1 \
56 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda %s 2>&1 \
67 …g -### -target powerpc64le-unknown-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda \
80 …g -### -target powerpc64le-unknown-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda \
92 …g -### -target powerpc64le-unknown-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda \
[all …]
/external/tensorflow/tensorflow/tools/dockerfiles/partials/ubuntu/
Ddevel-nvidia.partial.Dockerfile2 ARG CUDA=11.0
3 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
4 # ARCH and CUDA are specified again because the FROM directive resets ARGs
7 ARG CUDA
18 cuda-command-line-tools-${CUDA/./-} \
19 libcublas-${CUDA/./-} \
20 libcublas-dev-${CUDA/./-} \
21 cuda-nvprune-${CUDA/./-} \
22 cuda-nvrtc-${CUDA/./-} \
23 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Dnvidia.partial.Dockerfile2 ARG CUDA=11.0
3 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
4 # ARCH and CUDA are specified again because the FROM directive resets ARGs
7 ARG CUDA
19 cuda-command-line-tools-${CUDA/./-} \
20 libcublas-${CUDA/./-} \
21 cuda-nvrtc-${CUDA/./-} \
22 libcufft-${CUDA/./-} \
23 libcurand-${CUDA/./-} \
24 libcusolver-${CUDA/./-} \
[all …]
/external/clang/test/Driver/
Dcuda-detect.cu5 // # Check that we properly detect CUDA installation.
7 // RUN: --sysroot=%S/no-cuda-there 2>&1 | FileCheck %s -check-prefix NOCUDA
9 // RUN: --sysroot=%S/Inputs/CUDA 2>&1 | FileCheck %s
11 // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 | FileCheck %s
14 // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_21 \
15 // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \
18 // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \
19 // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \
22 // Verify that -nocudainc prevents adding include path to CUDA headers.
23 // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \
[all …]
Dcuda-version-check.cu5 // RUN: %clang -v -### --cuda-gpu-arch=sm_20 --sysroot=%S/Inputs/CUDA 2>&1 %s | \
7 // RUN: %clang -v -### --cuda-gpu-arch=sm_20 --sysroot=%S/Inputs/CUDA_80 2>&1 %s | \
9 // RUN: %clang -v -### --cuda-gpu-arch=sm_60 --sysroot=%S/Inputs/CUDA_80 2>&1 %s | \
12 // The installation at Inputs/CUDA is CUDA 7.0, which doesn't support sm_60.
13 // RUN: %clang -v -### --cuda-gpu-arch=sm_60 --sysroot=%S/Inputs/CUDA 2>&1 %s | \
17 // RUN: %clang -v -### --cuda-gpu-arch=sm_60 --cuda-gpu-arch=sm_35 \
18 // RUN: --sysroot=%S/Inputs/CUDA 2>&1 %s | \
22 // RUN: %clang -v -### --cuda-gpu-arch=sm_60 --cuda-gpu-arch=sm_61 \
23 // RUN: --sysroot=%S/Inputs/CUDA 2>&1 %s | \
28 // RUN: %clang -v -### --cuda-gpu-arch=sm_60 -nocudainc --sysroot=%S/Inputs/CUDA 2>&1 %s | \
[all …]
/external/tensorflow/tensorflow/stream_executor/gpu/
Dgpu_driver.h16 // CUDA userspace driver library wrapper functionality.
50 // The order of parameters is generally kept symmetric with the underlying CUDA
54 // http://docs.nvidia.com/cuda/cuda-driver-api/
62 …// http://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__INITIALIZE.html#group__CUDA__INITIALIZ…
67 …// http://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__CTX.html#group__CUDA__CTX_1g4e84b109eb…
70 // Creates a new CUDA stream associated with the given context via
73 …// http://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__STREAM.html#group__CUDA__STREAM_1ga581…
77 // Destroys a CUDA stream associated with the given context.
80 …// http://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__STREAM.html#group__CUDA__STREAM_1g244c…
83 // CUDA events can explicitly disable event TSC retrieval for some presumed
[all …]
/external/tensorflow/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/
Ddevel-gpu-ppc64le.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Ddevel-gpu-ppc64le-jupyter.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
/external/tensorflow/tensorflow/tools/dockerfiles/dockerfiles/
Ddevel-gpu.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Ddevel-gpu-jupyter.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Dgpu.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
42 cuda-command-line-tools-${CUDA/./-} \
43 libcublas-${CUDA/./-} \
44 cuda-nvrtc-${CUDA/./-} \
45 libcufft-${CUDA/./-} \
46 libcurand-${CUDA/./-} \
47 libcusolver-${CUDA/./-} \
[all …]
/external/tensorflow/third_party/gpus/cuda/
DBUILD.tpl44 "cuda/cuda_config.h",
45 ":cuda-include"
49 ".", # required to include cuda/cuda/cuda_config.h as cuda/config.h
50 "cuda/include",
56 srcs = ["cuda/lib/%{cudart_static_lib}"],
68 srcs = ["cuda/lib/%{cuda_driver_lib}"],
73 srcs = ["cuda/lib/%{cudart_lib}"],
74 data = ["cuda/lib/%{cudart_lib}"],
81 include_prefix = "third_party/gpus/cuda/include",
90 include_prefix = "third_party/gpus/cuda/include",
[all …]
DBUILD.windows.tpl41 # Provides CUDA headers for '#include "third_party/gpus/cuda/include/cuda.h"'
46 "cuda/cuda_config.h",
47 ":cuda-include"
51 ".", # required to include cuda/cuda/cuda_config.h as cuda/config.h
52 "cuda/include",
63 interface_library = "cuda/lib/%{cudart_static_lib}",
69 interface_library = "cuda/lib/%{cuda_driver_lib}",
75 interface_library = "cuda/lib/%{cudart_lib}",
82 include_prefix = "third_party/gpus/cuda/include",
91 include_prefix = "third_party/gpus/cuda/include",
[all …]
/external/tensorflow/third_party/gpus/
Dcuda_configure.bzl1 """Repository rule for CUDA autoconfiguration.
5 * `TF_NEED_CUDA`: Whether to enable building with CUDA.
7 * `TF_CUDA_CLANG`: Whether to use clang as a cuda compiler.
14 * `TF_CUDA_PATHS`: The base paths to look for CUDA and cuDNN. Default is
15 `/usr/local/cuda,usr/`.
16 * `CUDA_TOOLKIT_PATH` (deprecated): The path to the CUDA toolkit. Default is
17 `/usr/local/cuda`.
18 * `TF_CUDA_VERSION`: The version of the CUDA toolkit. If this is blank, then
22 `/usr/local/cuda`.
23 * `TF_CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is
[all …]
/external/llvm-project/clang/test/OpenMP/
Drequires_codegen.cpp1 …x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o…
2 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
3 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
4 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
5 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
6 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
7 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
8 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
9 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
10 …lang_cc1 -verify -fopenmp -x c++ -triple nvptx64-nvidia-cuda -fopenmp-targets=nvptx64-nvidia-cuda
[all …]
/external/llvm-project/clang/test/Index/
Dattributes-cuda.cu1 // RUN: c-index-test -test-load-source all -x cuda %s | FileCheck %s
2 // RUN: c-index-test -test-load-source all -x cuda --cuda-host-only %s | FileCheck %s
3 // RUN: c-index-test -test-load-source all -x cuda --cuda-device-only %s | FileCheck %s
19 // CHECK: attributes-cuda.cu:5:30: FunctionDecl=f_device:5:30
20 // CHECK-NEXT: attributes-cuda.cu:5:16: attribute(device)
21 // CHECK: attributes-cuda.cu:6:30: FunctionDecl=f_global:6:30
22 // CHECK-NEXT: attributes-cuda.cu:6:16: attribute(global)
23 // CHECK: attributes-cuda.cu:7:32: VarDecl=g_constant:7:32 (Definition)
24 // CHECK-NEXT: attributes-cuda.cu:7:16: attribute(constant)
25 // CHECK: attributes-cuda.cu:8:32: VarDecl=g_shared:8:32 (Definition)
[all …]
/external/clang/test/Index/
Dattributes-cuda.cu1 // RUN: c-index-test -test-load-source all -x cuda %s | FileCheck %s
2 // RUN: c-index-test -test-load-source all -x cuda --cuda-host-only %s | FileCheck %s
3 // RUN: c-index-test -test-load-source all -x cuda --cuda-device-only %s | FileCheck %s
19 // CHECK: attributes-cuda.cu:5:30: FunctionDecl=f_device:5:30
20 // CHECK-NEXT: attributes-cuda.cu:5:16: attribute(device)
21 // CHECK: attributes-cuda.cu:6:30: FunctionDecl=f_global:6:30
22 // CHECK-NEXT: attributes-cuda.cu:6:16: attribute(global)
23 // CHECK: attributes-cuda.cu:7:32: VarDecl=g_constant:7:32 (Definition)
24 // CHECK-NEXT: attributes-cuda.cu:7:16: attribute(constant)
25 // CHECK: attributes-cuda.cu:8:32: VarDecl=g_shared:8:32 (Definition)
[all …]
/external/fmtlib/test/cuda-test/
DCMakeLists.txt2 # `enable_language(CUDA)` instead of `find_package(CUDA)` and let the CMake
8 # of the CUDA projects are using those.
10 # This test relies on `find_package(CUDA)` in the parent CMake config.
13 # https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#cpp14-language-features
17 # In this test, we assume that the user is going to compile CUDA source code
21 # by providing another (non-CUDA) C++ source code.
23 # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html
32 cuda_add_executable(fmt-in-cuda-test cuda-cpp14.cu cpp14.cc)
33 target_compile_features(fmt-in-cuda-test PRIVATE cxx_std_14)
35 # This part is for (non-CUDA) C++ code. MSVC can define incorrect
[all …]

12345678910>>...60