HomeSort by relevance Sort by last modified time
    Searched full:usub (Results 1 - 25 of 60) sorted by null

1 2 3

  /external/llvm/test/CodeGen/AMDGPU/
usubo.ll 5 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
6 declare { i64, i1 } @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
13 %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) nounwind
14 %val = extractvalue { i64, i1 } %usub, 0
15 %carry = extractvalue { i64, i1 } %usub, 1
28 %usub = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b) nounwind
29 %val = extractvalue { i32, i1 } %usub, 0
30 %carry = extractvalue { i32, i1 } %usub,
    [all...]
  /external/llvm/test/CodeGen/Generic/
overflow.ll 113 ;; usub
117 %usub = tail call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %a, i8 %b)
118 %cmp = extractvalue { i8, i1 } %usub, 1
119 %usub.result = extractvalue { i8, i1 } %usub, 0
120 %X = select i1 %cmp, i8 %usub.result, i8 42
124 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) nounwind readnone
128 %usub = tail call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %a, i16 %b
    [all...]
  /external/llvm/test/Transforms/GVN/
2011-07-07-MatchIntrinsicExtract.ll 20 %usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
21 %usub.0 = extractvalue %0 %usub, 0
80 declare %0 @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
  /external/libdrm/tests/
drmtest.c 65 const char *usub, *dnode; local
82 usub = udev_device_get_subsystem(parent);
84 if (!usub || (strcmp(usub, "pci") != 0))
  /external/eigen/Eigen/src/SparseLU/
SparseLU_copy_to_ucol.h 79 mem = memXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions);
87 glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order
SparseLU_Structs.h 62 * (xusub,ucol,usub): ucol[*] stores the numerical values of
64 * subscript of nonzero ucol[k] is stored in usub[k].
74 typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType;
88 IndexVector usub; // row indices of U columns in ucol member in struct:Eigen::internal::LU_GlobalLU_t
SparseLU_Memory.h 184 || (expand<IndexVector> (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) )
192 } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size());
213 if (memtype == USUB)
  /external/llvm/test/CodeGen/ARM/
intrinsics-overflow.ll 31 %sadd = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
56 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) #3
  /external/llvm/test/CodeGen/X86/
sub-with-overflow.ll 28 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
48 declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32)
xaluo.ll 275 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
287 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
468 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
479 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
640 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
657 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
747 declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
748 declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
  /external/clang/test/CodeGen/
builtins-multiprecision.c 101 // CHECK: %{{.+}} = {{.*}} call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %x, i8 %y)
104 // CHECK: %{{.+}} = {{.*}} call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %{{.+}}, i8 %carryin)
120 // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %x, i16 %y)
123 // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %{{.+}}, i16 %carryin)
138 // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y)
141 // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %carryin)
156 // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %x, [[UL]] %y)
159 // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %carryin)
175 // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %x, i64 %y)
178 // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %carryin
    [all...]
unsigned-overflow.c 28 // CHECK-NEXT: [[T3:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[T1]], i64 [[T2]])
86 // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[T1]], i32 [[T2]])
builtins-overflow.c 47 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
209 // CHECK: %{{.+}} = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
218 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
227 // CHECK: %{{.+}} = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
unsigned-promotion.c 52 // CHECKU-NOT: llvm.usub
117 // CHECKU-NOT: llvm.usub
  /external/autotest/client/site_tests/graphics_GpuReset/src/
gpureset.c 115 const char *usub, *dnode; local
130 usub = udev_device_get_subsystem(parent);
132 if (!usub || (strcmp(usub, "pci") != 0))
  /external/compiler-rt/test/ubsan/TestCases/Integer/
usub-overflow.cpp 15 // CHECK-SUB_I32: usub-overflow.cpp:[[@LINE-1]]:22: runtime error: unsigned integer overflow: 1 - 2 cannot be represented in type 'unsigned int'
  /external/llvm/test/Transforms/ConstProp/
overflow-ops.ll 5 declare {i8, i1} @llvm.usub.with.overflow.i8(i8, i8)
35 ;; usub
40 %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 2)
49 %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 6)
  /external/llvm/test/CodeGen/AArch64/
arm64-xaluo.ll 173 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
185 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
345 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
356 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
523 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
540 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
665 declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
666 declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
  /external/llvm/test/Transforms/InstSimplify/
call.ll 4 declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b)
26 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 %V)
  /prebuilts/go/darwin-x86/pkg/bootstrap/src/bootstrap/compile/internal/big/
float.go 1250 func (z *Float) usub(x, y *Float) { func
1253 // eventually uadd (and usub) should be optimized
    [all...]
  /prebuilts/go/darwin-x86/src/cmd/compile/internal/big/
float.go 1247 func (z *Float) usub(x, y *Float) { func
1250 // eventually uadd (and usub) should be optimized
    [all...]
  /prebuilts/go/darwin-x86/src/math/big/
float.go 1247 func (z *Float) usub(x, y *Float) { func
1250 // eventually uadd (and usub) should be optimized
    [all...]
  /prebuilts/go/linux-x86/pkg/bootstrap/src/bootstrap/compile/internal/big/
float.go 1250 func (z *Float) usub(x, y *Float) { func
1253 // eventually uadd (and usub) should be optimized
    [all...]
  /prebuilts/go/linux-x86/src/cmd/compile/internal/big/
float.go 1247 func (z *Float) usub(x, y *Float) { func
1250 // eventually uadd (and usub) should be optimized
    [all...]
  /prebuilts/go/linux-x86/src/math/big/
float.go 1247 func (z *Float) usub(x, y *Float) { func
1250 // eventually uadd (and usub) should be optimized
    [all...]

Completed in 606 milliseconds

1 2 3