Lines Matching full:i64
516 %cast210 = <a href="#i_getelementptr">getelementptr</a> [13 x i8]* @.LC0, i64 0, i64 0 <i>; i8*</i>
1378 <li><tt>i64:32:64</tt> - i64 has ABI alignment of 32-bits but preferred
1400 largest) while both i65 and i256 will use the alignment of i64 (largest
1718 <td><tt>i1, i2, i3, ... i8, ... i16, ... i32, ... i64, ... </tt></td>
2219 <td class="left"><tt><2 x i64></tt></td>
2602 %wideaddr = bitcast i32* @g to i64*
2604 %trap4 = load i64* %wideaddr ; Returns a trap value.
2922 call void @llvm.dbg.value(metadata !24, i64 0, metadata !25)
2931 %indvar.next = add i64 %indvar, 1, !dbg !21
3676 (e.g. <tt>i32</tt>x<tt>i32</tt>-><tt>i64</tt>) is needed, the operands should
5080 %aptr = getelementptr {i32, [12 x i8]}* %saptr, i64 0, i32 1
5082 %vptr = getelementptr {i32, <2 x i8>}* %svptr, i64 0, i32 1, i32 1
5084 %eptr = getelementptr [12 x i8]* %aptr, i64 0, i32 1
5177 %X = zext i32 257 to i64 <i>; yields i64:257</i>
5481 %Y = ptrtoint i32* %x to i64 <i>; yields zero extension on 32-bit architecture</i>
5519 %Z = inttoptr i64 0 to i32* <i>; yields truncation on 32-bit architecture</i>
5563 %Z = bitcast <2 x int> %V to i64; <i>; yields i64: %V</i>
6652 declare i64 @llvm.readcyclecounter()
6699 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* <dest>, i8* <src>,
6700 i64 <len>, i32 <align>, i1 <isvolatile>)
6753 declare void @llvm.memmove.p0i8.p0i8.i64(i8* <dest>, i8* <src>,
6754 i64 <len>, i32 <align>, i1 <isvolatile>)
6809 declare void @llvm.memset.p0i8.i64(i8* <dest>, i8 <val>,
6810 i64 <len>, i32 <align>, i1 <isvolatile>)
7144 declare i64 @llvm.bswap.i64(i64 <id>)
7159 The <tt>llvm.bswap.i48</tt>, <tt>llvm.bswap.i64</tt> and other intrinsics
7181 declare i64 @llvm.ctpop.i64(i64 <src>)
7217 declare i64 @llvm.ctlz.i64(i64 <src>)
7255 declare i64 @llvm.cttz.i64(i64 <src>)
7304 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
7352 declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
7399 declare {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
7447 declare {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
7495 declare {i64, i1} @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
7544 declare {i64, i1} @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
7926 declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* <ptr>, i64 <cmp>, i64 <val>)
7985 declare i64 @llvm.atomic.swap.i64.p0i64(i64* <ptr>, i64 <val>)
8042 declare i64 @llvm.atomic.load.add.i64.p0i64(i64* <ptr>, i64 <delta>)
8092 declare i64 @llvm.atomic.load.sub.i64.p0i32(i64* <ptr>, i64 <delta>)
8159 declare i64 @llvm.atomic.load.and.i64.p0i64(i64* <ptr>, i64 <delta>)
8166 declare i64 @llvm.atomic.load.or.i64.p0i64(i64* <ptr>, i64 <delta>)
8173 declare i64 @llvm.atomic.load.nand.i64.p0i32(i64* <ptr>, i64 <delta>)
8180 declare i64 @llvm.atomic.load.xor.i64.p0i32(i64* <ptr>, i64 <delta>)
8249 declare i64 @llvm.atomic.load.max.i64.p0i64(i64* <ptr>, i64 <delta>)
8256 declare i64 @llvm.atomic.load.min.i64.p0i64(i64* <ptr>, i64 <delta>)
8263 declare i64 @llvm.atomic.load.umax.i64.p0i64(i64* <ptr>, i64 <delta>)
8270 declare i64 @llvm.atomic.load.umin.i64.p0i64(i64* <ptr>, i64 <delta>)
8329 declare void @llvm.lifetime.start(i64 <size>, i8* nocapture <ptr>)
8359 declare void @llvm.lifetime.end(i64 <size>, i8* nocapture <ptr>)
8388 declare {}* @llvm.invariant.start(i64 <size>, i8* nocapture <ptr>)
8416 declare void @llvm.invariant.end({}* <start>, i64 <size>, i8* nocapture <ptr>)
8489 declare i64 @llvm.annotation.i64(i64 <val>, i8* <str>, i8* <str>, i32 <int>)
8579 declare i64 @llvm.objectsize.i64(i8* <object>, i1 <type>)
8598 representing the size of the object concerned, or <tt>i32/i64 -1 or 0</tt>,