Home
last modified time | relevance | path

Searched refs:umin (Results 1 – 25 of 101) sorted by relevance

12345

/external/llvm/test/CodeGen/AMDGPU/
Dumed3.ll119 define internal i32 @umin(i32 %x, i32 %y) #2 {
176 %tmp0 = call i32 @umin(i32 %x, i32 %y)
178 %tmp2 = call i32 @umin(i32 %tmp1, i32 %z)
188 %tmp0 = call i32 @umin(i32 %x, i32 %y)
190 %tmp2 = call i32 @umin(i32 %tmp1, i32 %z)
200 %tmp0 = call i32 @umin(i32 %x, i32 %y)
202 %tmp2 = call i32 @umin(i32 %z, i32 %tmp1)
212 %tmp0 = call i32 @umin(i32 %x, i32 %y)
214 %tmp2 = call i32 @umin(i32 %z, i32 %tmp1)
224 %tmp0 = call i32 @umin(i32 %y, i32 %x)
[all …]
Dlocal-atomics64.ll218 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
228 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
442 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
451 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
Dflat_atomics.ll489 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
499 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
510 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
521 %val = atomicrmw volatile umin i32 addrspace(4)* %gep, i32 %in seq_cst
530 %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst
539 %val = atomicrmw volatile umin i32 addrspace(4)* %out, i32 %in seq_cst
549 %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst
559 %val = atomicrmw volatile umin i32 addrspace(4)* %ptr, i32 %in seq_cst
Dflat_atomics_i64.ll489 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %gep, i64 %in seq_cst
499 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %gep, i64 %in seq_cst
510 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %gep, i64 %in seq_cst
521 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %gep, i64 %in seq_cst
530 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %out, i64 %in seq_cst
539 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %out, i64 %in seq_cst
549 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %ptr, i64 %in seq_cst
559 %tmp0 = atomicrmw volatile umin i64 addrspace(4)* %ptr, i64 %in seq_cst
/external/llvm/test/MC/AArch64/
Dneon-max-min.s53 umin v0.8b, v1.8b, v2.8b
54 umin v0.16b, v1.16b, v2.16b
55 umin v0.4h, v1.4h, v2.4h
56 umin v0.8h, v1.8h, v2.8h
57 umin v0.2s, v1.2s, v2.2s
58 umin v0.4s, v1.4s, v2.4s
/external/llvm/test/Transforms/IndVarSimplify/
Dbackedge-on-min-max.ll235 %umin.cmp = icmp ult i32 %a_len, %n
236 %umin = select i1 %umin.cmp, i32 %a_len, i32 %n
237 %entry.cond = icmp ult i32 5, %umin
253 %be.cond = icmp ult i32 %idx.inc, %umin
263 %umin.cmp = icmp ult i32 %a_len, %n
264 %umin = select i1 %umin.cmp, i32 %a_len, i32 %n
265 %entry.cond = icmp ult i32 5, %umin
281 %be.cond = icmp ult i32 %idx.inc, %umin
291 %umin.cmp = icmp ult i32 42, %n
292 %umin = select i1 %umin.cmp, i32 42, i32 %n
[all …]
/external/llvm/test/CodeGen/AArch64/
Dminmax.ll28 ; CHECK: umin
52 ; CHECK: umin
68 ; CHECK: umin
99 ; CHECK-NOT: umin
100 ; The icmp is used by two instructions, so don't produce a umin node.
Darm64-vmax.ll188 ;CHECK: umin.8b
191 %tmp3 = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
197 ;CHECK: umin.16b
200 %tmp3 = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
206 ;CHECK: umin.4h
209 %tmp3 = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
215 ;CHECK: umin.8h
218 %tmp3 = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
224 ;CHECK: umin.2s
227 %tmp3 = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
[all …]
/external/llvm/unittests/IR/
DConstantRangeTest.cpp454 EXPECT_EQ(Full.umin(Full), Full); in TEST_F()
455 EXPECT_EQ(Full.umin(Empty), Empty); in TEST_F()
456 EXPECT_EQ(Full.umin(Some), ConstantRange(APInt(16, 0), APInt(16, 0xaaa))); in TEST_F()
457 EXPECT_EQ(Full.umin(Wrap), Full); in TEST_F()
458 EXPECT_EQ(Empty.umin(Empty), Empty); in TEST_F()
459 EXPECT_EQ(Empty.umin(Some), Empty); in TEST_F()
460 EXPECT_EQ(Empty.umin(Wrap), Empty); in TEST_F()
461 EXPECT_EQ(Empty.umin(One), Empty); in TEST_F()
462 EXPECT_EQ(Some.umin(Some), Some); in TEST_F()
463 EXPECT_EQ(Some.umin(Wrap), ConstantRange(APInt(16, 0), APInt(16, 0xaaa))); in TEST_F()
[all …]
/external/llvm/lib/IR/
DConstantRange.cpp783 ConstantRange::umin(const ConstantRange &Other) const { in umin() function in ConstantRange
788 APInt NewL = APIntOps::umin(getUnsignedMin(), Other.getUnsignedMin()); in umin()
789 APInt NewU = APIntOps::umin(getUnsignedMax(), Other.getUnsignedMax()) + 1; in umin()
831 APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax()); in binaryAnd() local
832 if (umin.isAllOnesValue()) in binaryAnd()
834 return ConstantRange(APInt::getNullValue(getBitWidth()), umin + 1); in binaryAnd()
/external/llvm/test/CodeGen/ARM/
Datomic-op.ll124 %11 = atomicrmw umin i32* %val2, i32 16 monotonic
133 %12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
166 %0 = atomicrmw umin i16* %val, i16 16 monotonic
175 %1 = atomicrmw umin i16* %val, i16 %uneg monotonic
207 %0 = atomicrmw umin i8* %val, i8 16 monotonic
216 %1 = atomicrmw umin i8* %val, i8 %uneg monotonic
Datomicrmw_minmax.ll19 %old = atomicrmw umin i32* %ptr, i32 %val monotonic
/external/libhevc/common/arm64/
Dihevc_deblk_luma_horz.s232 umin v18.8b, v20.8b , v30.8b
248 umin v18.8b, v21.8b , v16.8b
283 umin v18.8b, v19.8b , v30.8b
309 umin v18.8b, v20.8b , v30.8b
397 umin v18.8b, v21.8b , v16.8b
423 umin v18.8b, v19.8b , v30.8b
Dihevc_deblk_luma_vert.s228 umin v21.8b, v22.8b , v31.8b
259 umin v26.8b, v20.8b , v21.8b
265 umin v19.8b, v0.8b , v30.8b
295 umin v16.8b, v26.8b , v30.8b
425 umin v16.8b, v2.8b , v27.8b
436 umin v1.8b, v0.8b , v30.8b
/external/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/
Dllvm.amdgcn.buffer.atomic.ll35 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(
38 %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
95 declare i32 @llvm.amdgcn.buffer.atomic.umin(i32, <4 x i32>, i32, i32, i1) #0
Dllvm.amdgcn.image.atomic.ll35 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.image.atomic.umin.i32(
38 …%orig = call i32 @llvm.amdgcn.image.atomic.umin.i32(i32 %data, i32 %addr, <8 x i32> %rsrc, i1 0, i…
111 declare i32 @llvm.amdgcn.image.atomic.umin.i32(i32, i32, <8 x i32>, i1, i1, i1) #0
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Datomic_op.ll90 %13 = atomicrmw umin i32* %val2, i32 1 monotonic
95 %14 = atomicrmw umin i32* %val2, i32 10 monotonic
/external/llvm/test/CodeGen/X86/
Dpr5145.ll24 %4 = atomicrmw umin i8* @sc8, i8 8 acquire
Datomic_op.ll91 %13 = atomicrmw umin i32* %val2, i32 1 monotonic
96 %14 = atomicrmw umin i32* %val2, i32 10 monotonic
Datomic-minmax-i6432.ll32 %4 = atomicrmw umin i64* @sc64, i64 8 acquire
/external/swiftshader/third_party/LLVM/lib/Support/
DConstantRange.cpp628 APInt umin = APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax()); in binaryAnd() local
629 if (umin.isAllOnesValue()) in binaryAnd()
631 return ConstantRange(APInt::getNullValue(getBitWidth()), umin + 1); in binaryAnd()
/external/llvm/include/llvm/IR/
DConstantRange.h279 ConstantRange umin(const ConstantRange &Other) const;
/external/mesa3d/src/gallium/state_trackers/d3d1x/d3d1xshader/defs/
Dopcodes.txt85 umin
/external/llvm/test/CodeGen/NVPTX/
Datomics.ll132 %ret = atomicrmw umin i32* %subr, i32 %val seq_cst
139 %ret = atomicrmw umin i64* %subr, i64 %val seq_cst
/external/llvm/test/CodeGen/SystemZ/
Datomicrmw-minmax-02.ll118 %res = atomicrmw umin i16 *%src, i16 %b seq_cst
205 %res = atomicrmw umin i16 *%src, i16 1 seq_cst

12345