1 ; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s 2 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s 3 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s 4 5 6 ; FUNC-LABEL: {{^}}xor_v2i32: 7 ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 8 ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 9 10 ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 11 ; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}} 12 13 define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) { 14 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in0 15 %b = load <2 x i32>, <2 x i32> addrspace(1) * %in1 16 %result = xor <2 x i32> %a, %b 17 store <2 x i32> %result, <2 x i32> addrspace(1)* %out 18 ret void 19 } 20 21 ; FUNC-LABEL: {{^}}xor_v4i32: 22 ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 23 ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 24 ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 25 ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} 26 27 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 28 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 29 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 30 ; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}} 31 32 define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) { 33 %a = load <4 x i32>, <4 x i32> addrspace(1) * %in0 34 %b = load <4 x i32>, <4 x i32> addrspace(1) * %in1 35 %result = xor <4 x i32> %a, %b 36 store <4 x i32> %result, <4 x i32> addrspace(1)* %out 37 ret void 38 } 39 40 ; FUNC-LABEL: {{^}}xor_i1: 41 ; EG: XOR_INT {{\** *}}{{T[0-9]+\.[XYZW]}}, {{PS|PV\.[XYZW]}}, {{PS|PV\.[XYZW]}} 42 43 ; SI-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 0, {{v[0-9]+}} 44 ; SI-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1.0, {{v[0-9]+}} 45 ; SI: s_xor_b64 [[XOR:vcc]], [[CMP0]], [[CMP1]] 46 ; SI: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}} 47 ; SI: buffer_store_dword [[RESULT]] 48 ; SI: s_endpgm 49 define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) { 50 %a = load float, float addrspace(1) * %in0 51 %b = load float, float addrspace(1) * %in1 52 %acmp = fcmp oge float %a, 0.000000e+00 53 %bcmp = fcmp oge float %b, 1.000000e+00 54 %xor = xor i1 %acmp, %bcmp 55 %result = select i1 %xor, float %a, float %b 56 store float %result, float addrspace(1)* %out 57 ret void 58 } 59 60 ; FUNC-LABEL: {{^}}v_xor_i1: 61 ; SI: buffer_load_ubyte [[B:v[0-9]+]] 62 ; SI: buffer_load_ubyte [[A:v[0-9]+]] 63 ; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[A]], [[B]] 64 ; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]] 65 ; SI: buffer_store_byte [[RESULT]] 66 define void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) { 67 %a = load volatile i1, i1 addrspace(1)* %in0 68 %b = load volatile i1, i1 addrspace(1)* %in1 69 %xor = xor i1 %a, %b 70 store i1 %xor, i1 addrspace(1)* %out 71 ret void 72 } 73 74 ; FUNC-LABEL: {{^}}vector_xor_i32: 75 ; SI: v_xor_b32_e32 76 define void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) { 77 %a = load i32, i32 addrspace(1)* %in0 78 %b = load i32, i32 addrspace(1)* %in1 79 %result = xor i32 %a, %b 80 store i32 %result, i32 addrspace(1)* %out 81 ret void 82 } 83 84 ; FUNC-LABEL: {{^}}scalar_xor_i32: 85 ; SI: s_xor_b32 86 define void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) { 87 %result = xor i32 %a, %b 88 store i32 %result, i32 addrspace(1)* %out 89 ret void 90 } 91 92 ; FUNC-LABEL: {{^}}scalar_not_i32: 93 ; SI: s_not_b32 94 define void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) { 95 %result = xor i32 %a, -1 96 store i32 %result, i32 addrspace(1)* %out 97 ret void 98 } 99 100 ; FUNC-LABEL: {{^}}vector_not_i32: 101 ; SI: v_not_b32 102 define void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) { 103 %a = load i32, i32 addrspace(1)* %in0 104 %b = load i32, i32 addrspace(1)* %in1 105 %result = xor i32 %a, -1 106 store i32 %result, i32 addrspace(1)* %out 107 ret void 108 } 109 110 ; FUNC-LABEL: {{^}}vector_xor_i64: 111 ; SI: v_xor_b32_e32 112 ; SI: v_xor_b32_e32 113 ; SI: s_endpgm 114 define void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) { 115 %a = load i64, i64 addrspace(1)* %in0 116 %b = load i64, i64 addrspace(1)* %in1 117 %result = xor i64 %a, %b 118 store i64 %result, i64 addrspace(1)* %out 119 ret void 120 } 121 122 ; FUNC-LABEL: {{^}}scalar_xor_i64: 123 ; SI: s_xor_b64 124 ; SI: s_endpgm 125 define void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) { 126 %result = xor i64 %a, %b 127 store i64 %result, i64 addrspace(1)* %out 128 ret void 129 } 130 131 ; FUNC-LABEL: {{^}}scalar_not_i64: 132 ; SI: s_not_b64 133 define void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) { 134 %result = xor i64 %a, -1 135 store i64 %result, i64 addrspace(1)* %out 136 ret void 137 } 138 139 ; FUNC-LABEL: {{^}}vector_not_i64: 140 ; SI: v_not_b32 141 ; SI: v_not_b32 142 define void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) { 143 %a = load i64, i64 addrspace(1)* %in0 144 %b = load i64, i64 addrspace(1)* %in1 145 %result = xor i64 %a, -1 146 store i64 %result, i64 addrspace(1)* %out 147 ret void 148 } 149 150 ; Test that we have a pattern to match xor inside a branch. 151 ; Note that in the future the backend may be smart enough to 152 ; use an SALU instruction for this. 153 154 ; FUNC-LABEL: {{^}}xor_cf: 155 ; SI: s_xor_b64 156 define void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) { 157 entry: 158 %0 = icmp eq i64 %a, 0 159 br i1 %0, label %if, label %else 160 161 if: 162 %1 = xor i64 %a, %b 163 br label %endif 164 165 else: 166 %2 = load i64, i64 addrspace(1)* %in 167 br label %endif 168 169 endif: 170 %3 = phi i64 [%1, %if], [%2, %else] 171 store i64 %3, i64 addrspace(1)* %out 172 ret void 173 } 174