Lines Matching full:i64
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
108 ;; i64 -> float forwarding
109 define float @coerce_mustalias6(i64 %V, i64* %P) {
110 store i64 %V, i64* %P
112 %P2 = bitcast i64* %P to float*
121 ;; i64 -> i8* (32-bit) forwarding
122 define i8* @coerce_mustalias7(i64 %V, i64* %P) {
123 store i64 %V, i64* %P
125 %P2 = bitcast i64* %P to i8**
138 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false)
139 %arrayidx = getelementptr inbounds i16* %A, i64 42
151 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 %Val, i64 400, i32 1, i1 false)
152 %arrayidx = getelementptr inbounds float* %A, i64 42 ; <float*> [#uses=1]
171 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 1, i64 400, i32 1, i1 false)
175 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 2, i64 400, i32 1, i1 false)
196 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false)
197 %arrayidx = getelementptr inbounds float* %A, i64 1 ; <float*> [#uses=1]
537 call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false)
554 %add.ptr = getelementptr inbounds i8* %P, i64 1
580 %arrayidx4 = getelementptr inbounds i8* %P, i64 1
604 %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
606 %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
619 %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
621 %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
625 %tmp2 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 3), align 2
629 %tmp3 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 4), align 1
641 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
643 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind