1 ; RUN: opt -loop-accesses -analyze < %s | FileCheck %s 2 ; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output < %s 2>&1 | FileCheck %s 3 4 ; The runtime memory check code and the access grouping 5 ; algorithm both assume that the start and end values 6 ; for an access range are ordered (start <= stop). 7 ; When generating checks for accesses with negative stride 8 ; we need to take this into account and swap the interval 9 ; ends. 10 ; 11 ; for (i = 0; i < 10000; i++) { 12 ; B[i] = A[15000 - i] * 3; 13 ; } 14 15 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" 16 target triple = "aarch64--linux-gnueabi" 17 18 ; CHECK: function 'f': 19 ; CHECK: (Low: (20000 + %a) High: (60000 + %a)<nsw>) 20 21 @B = common global i32* null, align 8 22 @A = common global i32* null, align 8 23 24 define void @f() { 25 entry: 26 %a = load i32*, i32** @A, align 8 27 %b = load i32*, i32** @B, align 8 28 br label %for.body 29 30 for.body: ; preds = %for.body, %entry 31 %idx = phi i64 [ 0, %entry ], [ %add, %for.body ] 32 %negidx = sub i64 15000, %idx 33 34 %arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx 35 %loadA0 = load i32, i32* %arrayidxA0, align 2 36 37 %res = mul i32 %loadA0, 3 38 39 %add = add nuw nsw i64 %idx, 1 40 41 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx 42 store i32 %res, i32* %arrayidxB, align 2 43 44 %exitcond = icmp eq i64 %idx, 10000 45 br i1 %exitcond, label %for.end, label %for.body 46 47 for.end: ; preds = %for.body 48 ret void 49 } 50 51 ; CHECK: function 'g': 52 ; When the stride is not constant, we are forced to do umin/umax to get 53 ; the interval limits. 54 55 ; for (i = 0; i < 10000; i++) { 56 ; B[i] = A[15000 - step * i] * 3; 57 ; } 58 59 ; Here it is not obvious what the limits are, since 'step' could be negative. 60 61 ; CHECK: Low: (-1 + (-1 * ((-60001 + (-1 * %a)) umax (-60001 + (40000 * %step) + (-1 * %a))))) 62 ; CHECK: High: ((60000 + %a)<nsw> umax (60000 + (-40000 * %step) + %a)) 63 64 define void @g(i64 %step) { 65 entry: 66 %a = load i32*, i32** @A, align 8 67 %b = load i32*, i32** @B, align 8 68 br label %for.body 69 70 for.body: ; preds = %for.body, %entry 71 %idx = phi i64 [ 0, %entry ], [ %add, %for.body ] 72 %idx_mul = mul i64 %idx, %step 73 %negidx = sub i64 15000, %idx_mul 74 75 %arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx 76 %loadA0 = load i32, i32* %arrayidxA0, align 2 77 78 %res = mul i32 %loadA0, 3 79 80 %add = add nuw nsw i64 %idx, 1 81 82 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx 83 store i32 %res, i32* %arrayidxB, align 2 84 85 %exitcond = icmp eq i64 %idx, 10000 86 br i1 %exitcond, label %for.end, label %for.body 87 88 for.end: ; preds = %for.body 89 ret void 90 } 91