Home
last modified time | relevance | path

Searched refs:smin (Results 1 – 25 of 135) sorted by relevance

123456

/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/SVE/
Dsmin.s10 smin z0.b, z0.b, #-128 label
16 smin z31.b, z31.b, #127 label
22 smin z0.h, z0.h, #-128 label
28 smin z31.h, z31.h, #127 label
34 smin z0.s, z0.s, #-128 label
40 smin z31.s, z31.s, #127 label
46 smin z0.d, z0.d, #-128 label
52 smin z31.d, z31.d, #127 label
58 smin z31.b, p7/m, z31.b, z31.b label
64 smin z31.h, p7/m, z31.h, z31.h label
[all …]
Dsmin-diagnostics.s3 smin z0.b, z0.b, #-129 label
8 smin z31.b, z31.b, #128 label
13 smin z0.b, p8/m, z0.b, z0.b label
23 smin z31.d, z31.d, #127 label
/external/llvm/test/CodeGen/AMDGPU/
Dsmed3.ll120 define internal i32 @smin(i32 %x, i32 %y) #2 {
177 %tmp0 = call i32 @smin(i32 %x, i32 %y)
179 %tmp2 = call i32 @smin(i32 %tmp1, i32 %z)
189 %tmp0 = call i32 @smin(i32 %x, i32 %y)
191 %tmp2 = call i32 @smin(i32 %tmp1, i32 %z)
201 %tmp0 = call i32 @smin(i32 %x, i32 %y)
203 %tmp2 = call i32 @smin(i32 %z, i32 %tmp1)
213 %tmp0 = call i32 @smin(i32 %x, i32 %y)
215 %tmp2 = call i32 @smin(i32 %z, i32 %tmp1)
225 %tmp0 = call i32 @smin(i32 %y, i32 %x)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dsmed3.ll122 define internal i32 @smin(i32 %x, i32 %y) #2 {
179 %tmp0 = call i32 @smin(i32 %x, i32 %y)
181 %tmp2 = call i32 @smin(i32 %tmp1, i32 %z)
191 %tmp0 = call i32 @smin(i32 %x, i32 %y)
193 %tmp2 = call i32 @smin(i32 %tmp1, i32 %z)
203 %tmp0 = call i32 @smin(i32 %x, i32 %y)
205 %tmp2 = call i32 @smin(i32 %z, i32 %tmp1)
215 %tmp0 = call i32 @smin(i32 %x, i32 %y)
217 %tmp2 = call i32 @smin(i32 %z, i32 %tmp1)
227 %tmp0 = call i32 @smin(i32 %y, i32 %x)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/AArch64/
Dvector-reduce.ll139 ; COST-LABEL: smin.i8.v8i8
140 …ost of 157 for instruction: %r = call i8 @llvm.experimental.vector.reduce.smin.i8.v8i8(<8 x i8> %v)
141 ; CODE-LABEL: smin.i8.v8i8
143 define i8 @smin.i8.v8i8(<8 x i8> %v) {
144 %r = call i8 @llvm.experimental.vector.reduce.smin.i8.v8i8(<8 x i8> %v)
148 ; COST-LABEL: smin.i8.v16i8
149 …ost of 388 for instruction: %r = call i8 @llvm.experimental.vector.reduce.smin.i8.v16i8(<16 x i8> …
150 ; CODE-LABEL: smin.i8.v16i8
152 define i8 @smin.i8.v16i8(<16 x i8> %v) {
153 %r = call i8 @llvm.experimental.vector.reduce.smin.i8.v16i8(<16 x i8> %v)
[all …]
/external/capstone/suite/MC/AArch64/
Dneon-max-min.s.cs14 0x20,0x6c,0x22,0x0e = smin v0.8b, v1.8b, v2.8b
15 0x20,0x6c,0x22,0x4e = smin v0.16b, v1.16b, v2.16b
16 0x20,0x6c,0x62,0x0e = smin v0.4h, v1.4h, v2.4h
17 0x20,0x6c,0x62,0x4e = smin v0.8h, v1.8h, v2.8h
18 0x20,0x6c,0xa2,0x0e = smin v0.2s, v1.2s, v2.2s
19 0x20,0x6c,0xa2,0x4e = smin v0.4s, v1.4s, v2.4s
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/AArch64/
Dneon-max-min.s39 smin v0.8b, v1.8b, v2.8b
40 smin v0.16b, v1.16b, v2.16b
41 smin v0.4h, v1.4h, v2.4h
42 smin v0.8h, v1.8h, v2.8h
43 smin v0.2s, v1.2s, v2.2s
44 smin v0.4s, v1.4s, v2.4s
/external/llvm/test/MC/AArch64/
Dneon-max-min.s39 smin v0.8b, v1.8b, v2.8b
40 smin v0.16b, v1.16b, v2.16b
41 smin v0.4h, v1.4h, v2.4h
42 smin v0.8h, v1.8h, v2.8h
43 smin v0.2s, v1.2s, v2.2s
44 smin v0.4s, v1.4s, v2.4s
/external/antlr/tool/src/main/java/org/antlr/analysis/
DDFA.java501 int smin = Label.MAX_CHAR_VALUE + 1;
508 if ( label.getAtom()<smin ) {
509 smin = label.getAtom();
520 if ( lmin<smin && lmin>=Label.MIN_CHAR_VALUE ) {
521 smin = labels.getMinElement();
531 smin = Label.MIN_CHAR_VALUE;
535 min.set(s.stateNumber, Utils.integer((char)smin));
538 if ( smax<0 || smin>Label.MAX_CHAR_VALUE || smin<0 ) {
549 int smin = min.get(s.stateNumber);
551 Vector<Integer> stateTransitions = new Vector<Integer>(smax-smin+1);
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Daarch64-minmaxv.ll12 declare i8 @llvm.experimental.vector.reduce.smin.i8.v16i8(<16 x i8>)
13 declare i16 @llvm.experimental.vector.reduce.smin.i16.v8i16(<8 x i16>)
14 declare i32 @llvm.experimental.vector.reduce.smin.i32.v4i32(<4 x i32>)
74 %r = call i8 @llvm.experimental.vector.reduce.smin.i8.v16i8(<16 x i8> %arr.load)
82 %r = call i16 @llvm.experimental.vector.reduce.smin.i16.v8i16(<8 x i16> %arr.load)
90 %r = call i32 @llvm.experimental.vector.reduce.smin.i32.v4i32(<4 x i32> %arr.load)
206 declare i16 @llvm.experimental.vector.reduce.smin.i16.v16i16(<16 x i16>)
210 ; CHECK: smin [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
213 %r = call i16 @llvm.experimental.vector.reduce.smin.i16.v16i16(<16 x i16> %arr.load)
217 declare i32 @llvm.experimental.vector.reduce.smin.i32.v16i32(<16 x i32>)
[all …]
Dminmax.ll12 ; CHECK: smin
36 ; CHECK: smin
88 ; CHECK: smin
89 ; CHECK: smin
90 ; CHECK: smin
91 ; CHECK: smin
Dminmax-of-minmax.ll6 ; 4 min/max flavors (smin/smax/umin/umax) *
13 ; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s
14 ; CHECK-NEXT: smin v1.4s, v1.4s, v2.4s
15 ; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s
29 ; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s
30 ; CHECK-NEXT: smin v1.4s, v2.4s, v1.4s
31 ; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s
45 ; CHECK-NEXT: smin v2.4s, v1.4s, v2.4s
46 ; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s
47 ; CHECK-NEXT: smin v0.4s, v2.4s, v0.4s
[all …]
Darm64-vmax.ll127 ;CHECK: smin.8b
130 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
136 ;CHECK: smin.16b
139 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
145 ;CHECK: smin.4h
148 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
154 ;CHECK: smin.8h
157 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
163 ;CHECK: smin.2s
166 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/IndVarSimplify/
Dbackedge-on-min-max.ll9 %smin.cmp = icmp slt i32 %a_len, %n
10 %smin = select i1 %smin.cmp, i32 %a_len, i32 %n
11 %entry.cond = icmp slt i32 0, %smin
27 %be.cond = icmp slt i32 %idx.inc, %smin
37 %smin.cmp = icmp slt i32 %a_len, %n
38 %smin = select i1 %smin.cmp, i32 %a_len, i32 %n
39 %entry.cond = icmp slt i32 0, %smin
55 %be.cond = icmp slt i32 %idx.inc, %smin
65 %smin.cmp = icmp slt i32 42, %n
66 %smin = select i1 %smin.cmp, i32 42, i32 %n
[all …]
/external/llvm/test/Transforms/IndVarSimplify/
Dbackedge-on-min-max.ll9 %smin.cmp = icmp slt i32 %a_len, %n
10 %smin = select i1 %smin.cmp, i32 %a_len, i32 %n
11 %entry.cond = icmp slt i32 0, %smin
27 %be.cond = icmp slt i32 %idx.inc, %smin
37 %smin.cmp = icmp slt i32 %a_len, %n
38 %smin = select i1 %smin.cmp, i32 %a_len, i32 %n
39 %entry.cond = icmp slt i32 0, %smin
55 %be.cond = icmp slt i32 %idx.inc, %smin
65 %smin.cmp = icmp slt i32 42, %n
66 %smin = select i1 %smin.cmp, i32 42, i32 %n
[all …]
/external/llvm/test/CodeGen/AArch64/
Dminmax.ll12 ; CHECK: smin
36 ; CHECK: smin
88 ; CHECK: smin
89 ; CHECK: smin
90 ; CHECK: smin
91 ; CHECK: smin
Darm64-vmax.ll127 ;CHECK: smin.8b
130 %tmp3 = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
136 ;CHECK: smin.16b
139 %tmp3 = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
145 ;CHECK: smin.4h
148 %tmp3 = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
154 ;CHECK: smin.8h
157 %tmp3 = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
163 ;CHECK: smin.2s
166 %tmp3 = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
[all …]
/external/ltp/testcases/realtime/func/matrix_mult/
Dmatrix_mult.c195 long smin = 0, smax = 0, cmin = 0, cmax = 0, delta = 0; in main_thread() local
232 smin = stats_min(&sdat); in main_thread()
235 printf("Min: %ld us\n", smin); in main_thread()
299 printf("Min: %.4f\n", (float)smin / cmin); in main_thread()
/external/llvm/unittests/IR/
DConstantRangeTest.cpp472 EXPECT_EQ(Full.smin(Full), Full); in TEST_F()
473 EXPECT_EQ(Full.smin(Empty), Empty); in TEST_F()
474 EXPECT_EQ(Full.smin(Some), ConstantRange(APInt(16, (uint64_t)INT16_MIN), in TEST_F()
476 EXPECT_EQ(Full.smin(Wrap), Full); in TEST_F()
477 EXPECT_EQ(Empty.smin(Empty), Empty); in TEST_F()
478 EXPECT_EQ(Empty.smin(Some), Empty); in TEST_F()
479 EXPECT_EQ(Empty.smin(Wrap), Empty); in TEST_F()
480 EXPECT_EQ(Empty.smin(One), Empty); in TEST_F()
481 EXPECT_EQ(Some.smin(Some), Some); in TEST_F()
482 EXPECT_EQ(Some.smin(Wrap), ConstantRange(APInt(16, (uint64_t)INT16_MIN), in TEST_F()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Dsmin-icmp.ll4 ; If we have an smin feeding a signed or equality icmp that shares an
5 ; operand with the smin, the compare should always be folded.
10 ; smin(X, Y) == X --> X <= Y
66 ; smin(X, Y) >= X --> X <= Y
122 ; smin(X, Y) != X --> X > Y
178 ; smin(X, Y) < X --> X > Y
234 ; smin(X, Y) <= X --> true
284 ; smin(X, Y) > X --> false
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/IR/
DConstantRangeTest.cpp529 EXPECT_EQ(Full.smin(Full), Full); in TEST_F()
530 EXPECT_EQ(Full.smin(Empty), Empty); in TEST_F()
531 EXPECT_EQ(Full.smin(Some), ConstantRange(APInt(16, (uint64_t)INT16_MIN), in TEST_F()
533 EXPECT_EQ(Full.smin(Wrap), Full); in TEST_F()
534 EXPECT_EQ(Empty.smin(Empty), Empty); in TEST_F()
535 EXPECT_EQ(Empty.smin(Some), Empty); in TEST_F()
536 EXPECT_EQ(Empty.smin(Wrap), Empty); in TEST_F()
537 EXPECT_EQ(Empty.smin(One), Empty); in TEST_F()
538 EXPECT_EQ(Some.smin(Some), Some); in TEST_F()
539 EXPECT_EQ(Some.smin(Wrap), ConstantRange(APInt(16, (uint64_t)INT16_MIN), in TEST_F()
[all …]
/external/mksh/src/
Dmisc.c767 const unsigned char *smin) in do_gmatch() argument
775 if (s > smin && s <= se) in do_gmatch()
830 if (do_gmatch(s, se, p, pe, smin)) in do_gmatch()
849 do_gmatch(s, se, prest, pe, smin)) in do_gmatch()
854 if (do_gmatch(s, srest, psub, pnext - 2, smin) && in do_gmatch()
855 (do_gmatch(srest, se, prest, pe, smin) || in do_gmatch()
857 do_gmatch(srest, se, p - 2, pe, smin)))) in do_gmatch()
876 do_gmatch(s, se, prest, pe, smin)) in do_gmatch()
882 if (do_gmatch(s, srest, psub, pnext - 2, smin) && in do_gmatch()
883 do_gmatch(srest, se, prest, pe, smin)) in do_gmatch()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dvector-reduce-smin.ll78 %1 = call i64 @llvm.experimental.vector.reduce.smin.i64.v2i64(<2 x i64> %a0)
200 %1 = call i64 @llvm.experimental.vector.reduce.smin.i64.v4i64(<4 x i64> %a0)
378 %1 = call i64 @llvm.experimental.vector.reduce.smin.i64.v8i64(<8 x i64> %a0)
685 %1 = call i64 @llvm.experimental.vector.reduce.smin.i64.v16i64(<16 x i64> %a0)
737 %1 = call i32 @llvm.experimental.vector.reduce.smin.i32.v4i32(<4 x i32> %a0)
809 %1 = call i32 @llvm.experimental.vector.reduce.smin.i32.v8i32(<8 x i32> %a0)
899 %1 = call i32 @llvm.experimental.vector.reduce.smin.i32.v16i32(<16 x i32> %a0)
1022 %1 = call i32 @llvm.experimental.vector.reduce.smin.i32.v32i32(<32 x i32> %a0)
1073 %1 = call i16 @llvm.experimental.vector.reduce.smin.i16.v8i16(<8 x i16> %a0)
1141 %1 = call i16 @llvm.experimental.vector.reduce.smin.i16.v16i16(<16 x i16> %a0)
[all …]
/external/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/
Dllvm.amdgcn.buffer.atomic.ll27 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(
30 %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
94 declare i32 @llvm.amdgcn.buffer.atomic.smin(i32, <4 x i32>, i32, i32, i1) #0
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/
Dllvm.amdgcn.buffer.atomic.ll27 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(
30 %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0)
94 declare i32 @llvm.amdgcn.buffer.atomic.smin(i32, <4 x i32>, i32, i32, i1) #0

123456