Lines Matching full:next
83 // CHECK-NEXT: add nsw i32 [[tmp_1]], 1
92 // CHECK-NEXT: [[PV:%.*]] = alloca [5 x double]*, align 4
93 // CHECK-NEXT: store
94 // CHECK-NEXT: store
95 // CHECK-NEXT: [[N:%.*]] = load i32* [[NV]], align 4
96 // CHECK-NEXT: [[P:%.*]] = load [5 x double]** [[PV]], align 4
97 // CHECK-NEXT: [[T0:%.*]] = mul nsw i32 1, [[N]]
98 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [5 x double]* [[P]], i32 [[T0]]
99 // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [5 x double]* [[T1]], i32 2
100 // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [5 x double]* [[T2]], i32 0, i32 3
101 // CHECK-NEXT: [[T4:%.*]] = load double* [[T3]]
102 // CHECK-NEXT: ret double [[T4]]
109 // CHECK-NEXT: [[P:%.*]] = alloca [6 x i8]*, align 4
110 // CHECK-NEXT: [[P2:%.*]] = alloca [6 x i8]*, align 4
111 // CHECK-NEXT: store i32
112 // CHECK-NEXT: store [6 x i8]*
115 // CHECK-NEXT: [[DIM0:%.*]] = load i32* [[N]], align 4
116 // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4
117 // CHECK-NEXT: [[DIM1:%.*]] = add i32 [[T0]], 1
119 // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P]], align 4
120 // CHECK-NEXT: [[T1:%.*]] = load i32* [[N]], align 4
121 // CHECK-NEXT: [[T2:%.*]] = udiv i32 [[T1]], 2
122 // CHECK-NEXT: [[T3:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
123 // CHECK-NEXT: [[T4:%.*]] = mul nsw i32 [[T2]], [[T3]]
124 // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [6 x i8]* [[T0]], i32 [[T4]]
125 // CHECK-NEXT: [[T6:%.*]] = load i32* [[N]], align 4
126 // CHECK-NEXT: [[T7:%.*]] = udiv i32 [[T6]], 4
127 // CHECK-NEXT: [[T8:%.*]] = sub i32 0, [[T7]]
128 // CHECK-NEXT: [[T9:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
129 // CHECK-NEXT: [[T10:%.*]] = mul nsw i32 [[T8]], [[T9]]
130 // CHECK-NEXT: [[T11:%.*]] = getelementptr inbounds [6 x i8]* [[T5]], i32 [[T10]]
131 // CHECK-NEXT: store [6 x i8]* [[T11]], [6 x i8]** [[P2]], align 4
134 // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P2]], align 4
135 // CHECK-NEXT: [[T1:%.*]] = load [6 x i8]** [[P]], align 4
136 // CHECK-NEXT: [[T2:%.*]] = ptrtoint [6 x i8]* [[T0]] to i32
137 // CHECK-NEXT: [[T3:%.*]] = ptrtoint [6 x i8]* [[T1]] to i32
138 // CHECK-NEXT: [[T4:%.*]] = sub i32 [[T2]], [[T3]]
139 // CHECK-NEXT: [[T5:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
140 // CHECK-NEXT: [[T6:%.*]] = mul nuw i32 6, [[T5]]
141 // CHECK-NEXT: [[T7:%.*]] = sdiv exact i32 [[T4]], [[T6]]
142 // CHECK-NEXT: ret i32 [[T7]]
152 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
153 // CHECK-NEXT: [[CL:%.*]] = alloca i32*, align 4
154 // CHECK-NEXT: store i32 0, i32* [[I]], align 4
157 // CHECK-NEXT: [[Z:%.*]] = load i32* [[I]], align 4
158 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[Z]], 1
159 // CHECK-NEXT: store i32 [[INC]], i32* [[I]], align 4
160 // CHECK-NEXT: [[O:%.*]] = load i32* [[I]], align 4
161 // CHECK-NEXT: [[AR:%.*]] = getelementptr inbounds [5 x i32]* [[A]], i32 0, i32 0
162 // CHECK-NEXT: [[T:%.*]] = bitcast [5 x i32]* [[A]] to i32*
163 // CHECK-NEXT: store i32* [[T]], i32** [[CL]]
164 // CHECK-NEXT: [[TH:%.*]] = load i32** [[CL]]
165 // CHECK-NEXT: [[VLAIX:%.*]] = mul nsw i32 0, [[O]]
166 // CHECK-NEXT: [[ADDPTR:%.*]] = getelementptr inbounds i32* [[TH]], i32 [[VLAIX]]
167 // CHECK-NEXT: store i32* [[ADDPTR]], i32** [[CL]]
175 // CHECK-NEXT: [[A:%.*]] = alloca i32**, align 4
176 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
178 // CHECK-NEXT: [[CL:%.*]] = alloca i32**, align 4
179 // CHECK-NEXT: store i32 20, i32* [[N]], align 4
180 // CHECK-NEXT: store i32 0, i32* [[I]], align 4
181 // CHECK-NEXT: [[Z:%.*]] = load i32* [[I]], align 4
182 // CHECK-NEXT: [[O:%.*]] = bitcast i32*** [[A]] to i32**
183 // CHECK-NEXT: store i32** [[O]], i32*** [[CL]]
184 // CHECK-NEXT: [[T:%.*]] = load i32*** [[CL]]
185 // CHECK-NEXT: [[IX:%.*]] = getelementptr inbounds i32** [[T]], i32 0
186 // CHECK-NEXT: [[TH:%.*]] = load i32** [[IX]], align 4
187 // CHECK-NEXT: [[F:%.*]] = mul nsw i32 1, [[Z]]
188 // CHECK-NEXT: [[IX1:%.*]] = getelementptr inbounds i32* [[TH]], i32 [[F]]
189 // CHECK-NEXT: [[IX2:%.*]] = getelementptr inbounds i32* [[IX1]], i32 5
190 // CHECK-NEXT: store i32 0, i32* [[IX2]], align 4