/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | strqro.ll | 8 define void @strqrox(fp128 %val64, i64 %base, i64 %offset) { 11 store volatile fp128 %val64, fp128* %addr 19 define void @strqrox_optsize(fp128 %val64, i64 %base, i64 %offset) minsize { 22 store volatile fp128 %val64, fp128* %addr 29 define void @strqrow(fp128 %val64, i64 %base, i32 %offset) { 33 store volatile fp128 %val64, fp128* %addr 41 define void @strqrow_optsize(fp128 %val64, i64 %base, i32 %offset) minsize { 45 store volatile fp128 %val64, fp128* %addr
|
D | addsub.ll | 21 %val64 = load i64, i64* @var_i64 22 %newval64 = add i64 %val64, 52 66 %val64 = load i64, i64* @var_i64 67 %newval64 = add i64 %val64, 16773120 ; =0xfff000 83 %val64 = load i64, i64* @var_i64 84 %newval64 = sub i64 %val64, 52 100 %val64 = load i64, i64* @var_i64 101 %newval64 = sub i64 %val64, 16773120 ; =0xfff000
|
D | arm64-fp128.ll | 69 %val64 = fptosi fp128 %val to i64 70 store i64 %val64, i64* @var64 84 %val64 = fptoui fp128 %val to i64 85 store i64 %val64, i64* @var64 100 %val64 = sitofp i64 %src64 to fp128 101 store volatile fp128 %val64, fp128* @lhs 116 %val64 = uitofp i64 %src64 to fp128 117 store volatile fp128 %val64, fp128* @lhs
|
D | tst-br.ll | 13 %val64 = load i64, i64* @var64 27 %tbit2 = and i64 %val64, 32768 33 %tbit3 = and i64 %val64, 4096
|
D | misched-fusion-addr.ll | 50 %val64 = add i64 %ext, 1 51 store volatile i64 %val64, i64* @var_64bit 62 %val64 = load volatile i64, i64* @var_64bit 63 %ext = zext i64 %val64 to i128
|
D | bitfield.ll | 83 define void @test_shifts(i32 %val32, i64 %val64) { 98 %shift4 = ashr i64 %val64, 31 102 %shift5 = lshr i64 %val64, 8 106 %shift6 = shl i64 %val64, 63 110 %shift7 = ashr i64 %val64, 63 114 %shift8 = lshr i64 %val64, 63
|
D | ldst-regoffset.ll | 188 %val64 = load volatile i64, i64* @var_64bit 189 store volatile i64 %val64, i64* %addr_uxtwN 240 %val64 = load volatile float, float* @var_float 241 store volatile float %val64, float* %addr_uxtwN 293 %val64 = load volatile double, double* @var_double 294 store volatile double %val64, double* %addr_uxtwN 347 %val64 = load volatile fp128, fp128* %base 348 store volatile fp128 %val64, fp128* %addr_uxtwN
|
D | floatdp_1source.ll | 111 %val64 = load volatile double, double* @vardouble 129 %val64to16 = fptrunc double %val64 to half 133 %val64to32 = fptrunc double %val64 to float
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/RISCV/ |
D | wide-mem.ll | 18 @val64 = local_unnamed_addr global i64 2863311530, align 8 23 ; RV32I-NEXT: lui a1, %hi(val64) 24 ; RV32I-NEXT: lw a0, %lo(val64)(a1) 25 ; RV32I-NEXT: addi a1, a1, %lo(val64) 28 %1 = load i64, i64* @val64
|
/external/deqp-deps/SPIRV-Tools/source/comp/ |
D | bit_stream.cpp | 57 uint64_t val64 = 0; in ToU64() local 58 std::memcpy(&val64, &val, sizeof(T)); in ToU64() 59 return val64; in ToU64() 66 T FromU64(uint64_t val64) { in FromU64() argument 67 assert(sizeof(T) == 8 || (val64 >> (sizeof(T) * 8)) == 0); in FromU64() 70 std::memcpy(&val, &val64, sizeof(T)); in FromU64() 161 uint64_t val64 = 0; in ReadVariableWidthUnsigned() local 162 if (!ReadVariableWidthInternal(reader, &val64, chunk_length, sizeof(T) * 8)) in ReadVariableWidthUnsigned() 164 *val = static_cast<T>(val64); in ReadVariableWidthUnsigned() 165 assert(*val == val64); in ReadVariableWidthUnsigned()
|
/external/swiftshader/third_party/SPIRV-Tools/source/comp/ |
D | bit_stream.cpp | 57 uint64_t val64 = 0; in ToU64() local 58 std::memcpy(&val64, &val, sizeof(T)); in ToU64() 59 return val64; in ToU64() 66 T FromU64(uint64_t val64) { in FromU64() argument 67 assert(sizeof(T) == 8 || (val64 >> (sizeof(T) * 8)) == 0); in FromU64() 70 std::memcpy(&val, &val64, sizeof(T)); in FromU64() 161 uint64_t val64 = 0; in ReadVariableWidthUnsigned() local 162 if (!ReadVariableWidthInternal(reader, &val64, chunk_length, sizeof(T) * 8)) in ReadVariableWidthUnsigned() 164 *val = static_cast<T>(val64); in ReadVariableWidthUnsigned() 165 assert(*val == val64); in ReadVariableWidthUnsigned()
|
/external/elfutils/libdwfl/ |
D | linux-core-attach.c | 231 uint64_t val64 = read_8ubyte_unaligned_noncvt (desc + item->offset); in core_set_initial_registers() local 232 val64 = (elf_getident (core, NULL)[EI_DATA] == ELFDATA2MSB in core_set_initial_registers() 233 ? be64toh (val64) : le64toh (val64)); in core_set_initial_registers() 234 pc = val64; in core_set_initial_registers() 273 uint64_t val64 = read_8ubyte_unaligned_noncvt (reg_desc); in core_set_initial_registers() local 274 reg_desc += sizeof val64; in core_set_initial_registers() 275 val64 = (elf_getident (core, NULL)[EI_DATA] == ELFDATA2MSB in core_set_initial_registers() 276 ? be64toh (val64) : le64toh (val64)); in core_set_initial_registers() 277 assert (sizeof (*thread->unwound->regs) == sizeof val64); in core_set_initial_registers() 278 val = val64; in core_set_initial_registers()
|
/external/llvm/test/CodeGen/AArch64/ |
D | addsub.ll | 21 %val64 = load i64, i64* @var_i64 22 %newval64 = add i64 %val64, 52 66 %val64 = load i64, i64* @var_i64 67 %newval64 = add i64 %val64, 16773120 ; =0xfff000 83 %val64 = load i64, i64* @var_i64 84 %newval64 = sub i64 %val64, 52 100 %val64 = load i64, i64* @var_i64 101 %newval64 = sub i64 %val64, 16773120 ; =0xfff000
|
D | arm64-fp128.ll | 69 %val64 = fptosi fp128 %val to i64 70 store i64 %val64, i64* @var64 84 %val64 = fptoui fp128 %val to i64 85 store i64 %val64, i64* @var64 100 %val64 = sitofp i64 %src64 to fp128 101 store volatile fp128 %val64, fp128* @lhs 116 %val64 = uitofp i64 %src64 to fp128 117 store volatile fp128 %val64, fp128* @lhs
|
D | tst-br.ll | 13 %val64 = load i64, i64* @var64 27 %tbit2 = and i64 %val64, 32768 33 %tbit3 = and i64 %val64, 4096
|
D | bitfield.ll | 83 define void @test_shifts(i32 %val32, i64 %val64) { 98 %shift4 = ashr i64 %val64, 31 102 %shift5 = lshr i64 %val64, 8 106 %shift6 = shl i64 %val64, 63 110 %shift7 = ashr i64 %val64, 63 114 %shift8 = lshr i64 %val64, 63
|
D | ldst-regoffset.ll | 188 %val64 = load volatile i64, i64* @var_64bit 189 store volatile i64 %val64, i64* %addr_uxtwN 240 %val64 = load volatile float, float* @var_float 241 store volatile float %val64, float* %addr_uxtwN 293 %val64 = load volatile double, double* @var_double 294 store volatile double %val64, double* %addr_uxtwN 347 %val64 = load volatile fp128, fp128* %base 348 store volatile fp128 %val64, fp128* %addr_uxtwN
|
D | floatdp_1source.ll | 111 %val64 = load volatile double, double* @vardouble 129 %val64to16 = fptrunc double %val64 to half 133 %val64to32 = fptrunc double %val64 to float
|
/external/deqp/framework/delibs/debase/ |
D | deFloat16.h | 46 deFloat16 deFloat64To16 (double val64); 47 deFloat16 deFloat64To16Round (double val64, deRoundingMode mode);
|
D | deFloat16.c | 102 deFloat16 deFloat64To16 (double val64) in deFloat64To16() argument 113 x.f = val64; in deFloat64To16() 368 deFloat16 deFloat64To16Round (double val64, deRoundingMode mode) in deFloat64To16Round() argument 383 x.f = val64; in deFloat64To16Round()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/MIR/AArch64/ |
D | target-flags.mir | 13 %val64 = load i64, i64* @var_i64 14 %newval64 = sub i64 %val64, 52
|
/external/llvm/test/CodeGen/MIR/AArch64/ |
D | target-flags.mir | 13 %val64 = load i64, i64* @var_i64 14 %newval64 = sub i64 %val64, 52
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/ |
D | elf-globaladdress.ll | 24 %val64 = load i64, i64* @var64 25 store volatile i64 %val64, i64* @var64
|
/external/llvm/test/MC/AArch64/ |
D | elf-globaladdress.ll | 24 %val64 = load i64, i64* @var64 25 store volatile i64 %val64, i64* @var64
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/NVPTX/ |
D | half.ll | 51 %val64 = fpext half %val16 to double 52 store double %val64, double addrspace(1)* %out
|