1 /* 2 * Copyright (C) 2008 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #ifndef MacroAssembler_h 27 #define MacroAssembler_h 28 29 #if ENABLE(ASSEMBLER) 30 31 #if CPU(ARM_THUMB2) 32 #include "MacroAssemblerARMv7.h" 33 namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; 34 35 #elif CPU(ARM_TRADITIONAL) 36 #include "MacroAssemblerARM.h" 37 namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; 38 39 #elif CPU(MIPS) 40 #include "MacroAssemblerMIPS.h" 41 namespace JSC { 42 typedef MacroAssemblerMIPS MacroAssemblerBase; 43 }; 44 45 #elif CPU(X86) 46 #include "MacroAssemblerX86.h" 47 namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; 48 49 #elif CPU(X86_64) 50 #include "MacroAssemblerX86_64.h" 51 namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; 52 53 #elif CPU(SH4) 54 #include "MacroAssemblerSH4.h" 55 namespace JSC { 56 typedef MacroAssemblerSH4 MacroAssemblerBase; 57 }; 58 59 #else 60 #error "The MacroAssembler is not supported on this platform." 61 #endif 62 63 64 namespace JSC { 65 66 class MacroAssembler : public MacroAssemblerBase { 67 public: 68 69 using MacroAssemblerBase::pop; 70 using MacroAssemblerBase::jump; 71 using MacroAssemblerBase::branch32; 72 using MacroAssemblerBase::branch16; 73 #if CPU(X86_64) 74 using MacroAssemblerBase::branchPtr; 75 using MacroAssemblerBase::branchTestPtr; 76 #endif 77 78 79 // Platform agnostic onvenience functions, 80 // described in terms of other macro assembly methods. 81 void pop() 82 { 83 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister); 84 } 85 86 void peek(RegisterID dest, int index = 0) 87 { 88 loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest); 89 } 90 91 void poke(RegisterID src, int index = 0) 92 { 93 storePtr(src, Address(stackPointerRegister, (index * sizeof(void*)))); 94 } 95 96 void poke(TrustedImm32 value, int index = 0) 97 { 98 store32(value, Address(stackPointerRegister, (index * sizeof(void*)))); 99 } 100 101 void poke(TrustedImmPtr imm, int index = 0) 102 { 103 storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*)))); 104 } 105 106 107 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. 108 void branchPtr(Condition cond, RegisterID op1, TrustedImmPtr imm, Label target) 109 { 110 branchPtr(cond, op1, imm).linkTo(target, this); 111 } 112 113 void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target) 114 { 115 branch32(cond, op1, op2).linkTo(target, this); 116 } 117 118 void branch32(Condition cond, RegisterID op1, TrustedImm32 imm, Label target) 119 { 120 branch32(cond, op1, imm).linkTo(target, this); 121 } 122 123 void branch32(Condition cond, RegisterID left, Address right, Label target) 124 { 125 branch32(cond, left, right).linkTo(target, this); 126 } 127 128 void branch16(Condition cond, BaseIndex left, RegisterID right, Label target) 129 { 130 branch16(cond, left, right).linkTo(target, this); 131 } 132 133 void branchTestPtr(Condition cond, RegisterID reg, Label target) 134 { 135 branchTestPtr(cond, reg).linkTo(target, this); 136 } 137 138 void jump(Label target) 139 { 140 jump().linkTo(target, this); 141 } 142 143 144 // Ptr methods 145 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. 146 // FIXME: should this use a test for 32-bitness instead of this specific exception? 147 #if !CPU(X86_64) 148 void addPtr(RegisterID src, RegisterID dest) 149 { 150 add32(src, dest); 151 } 152 153 void addPtr(TrustedImm32 imm, RegisterID srcDest) 154 { 155 add32(imm, srcDest); 156 } 157 158 void addPtr(TrustedImmPtr imm, RegisterID dest) 159 { 160 add32(TrustedImm32(imm), dest); 161 } 162 163 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) 164 { 165 add32(imm, src, dest); 166 } 167 168 void andPtr(RegisterID src, RegisterID dest) 169 { 170 and32(src, dest); 171 } 172 173 void andPtr(TrustedImm32 imm, RegisterID srcDest) 174 { 175 and32(imm, srcDest); 176 } 177 178 void orPtr(RegisterID src, RegisterID dest) 179 { 180 or32(src, dest); 181 } 182 183 void orPtr(TrustedImmPtr imm, RegisterID dest) 184 { 185 or32(TrustedImm32(imm), dest); 186 } 187 188 void orPtr(TrustedImm32 imm, RegisterID dest) 189 { 190 or32(imm, dest); 191 } 192 193 void subPtr(RegisterID src, RegisterID dest) 194 { 195 sub32(src, dest); 196 } 197 198 void subPtr(TrustedImm32 imm, RegisterID dest) 199 { 200 sub32(imm, dest); 201 } 202 203 void subPtr(TrustedImmPtr imm, RegisterID dest) 204 { 205 sub32(TrustedImm32(imm), dest); 206 } 207 208 void xorPtr(RegisterID src, RegisterID dest) 209 { 210 xor32(src, dest); 211 } 212 213 void xorPtr(TrustedImm32 imm, RegisterID srcDest) 214 { 215 xor32(imm, srcDest); 216 } 217 218 219 void loadPtr(ImplicitAddress address, RegisterID dest) 220 { 221 load32(address, dest); 222 } 223 224 void loadPtr(BaseIndex address, RegisterID dest) 225 { 226 load32(address, dest); 227 } 228 229 void loadPtr(void* address, RegisterID dest) 230 { 231 load32(address, dest); 232 } 233 234 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) 235 { 236 return load32WithAddressOffsetPatch(address, dest); 237 } 238 239 void setPtr(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest) 240 { 241 set32Compare32(cond, left, right, dest); 242 } 243 244 void storePtr(RegisterID src, ImplicitAddress address) 245 { 246 store32(src, address); 247 } 248 249 void storePtr(RegisterID src, BaseIndex address) 250 { 251 store32(src, address); 252 } 253 254 void storePtr(RegisterID src, void* address) 255 { 256 store32(src, address); 257 } 258 259 void storePtr(TrustedImmPtr imm, ImplicitAddress address) 260 { 261 store32(TrustedImm32(imm), address); 262 } 263 264 void storePtr(TrustedImmPtr imm, void* address) 265 { 266 store32(TrustedImm32(imm), address); 267 } 268 269 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) 270 { 271 return store32WithAddressOffsetPatch(src, address); 272 } 273 274 275 Jump branchPtr(Condition cond, RegisterID left, RegisterID right) 276 { 277 return branch32(cond, left, right); 278 } 279 280 Jump branchPtr(Condition cond, RegisterID left, TrustedImmPtr right) 281 { 282 return branch32(cond, left, TrustedImm32(right)); 283 } 284 285 Jump branchPtr(Condition cond, RegisterID left, Address right) 286 { 287 return branch32(cond, left, right); 288 } 289 290 Jump branchPtr(Condition cond, Address left, RegisterID right) 291 { 292 return branch32(cond, left, right); 293 } 294 295 Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right) 296 { 297 return branch32(cond, left, right); 298 } 299 300 Jump branchPtr(Condition cond, Address left, TrustedImmPtr right) 301 { 302 return branch32(cond, left, TrustedImm32(right)); 303 } 304 305 Jump branchPtr(Condition cond, AbsoluteAddress left, TrustedImmPtr right) 306 { 307 return branch32(cond, left, TrustedImm32(right)); 308 } 309 310 Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask) 311 { 312 return branchTest32(cond, reg, mask); 313 } 314 315 Jump branchTestPtr(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) 316 { 317 return branchTest32(cond, reg, mask); 318 } 319 320 Jump branchTestPtr(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) 321 { 322 return branchTest32(cond, address, mask); 323 } 324 325 Jump branchTestPtr(Condition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) 326 { 327 return branchTest32(cond, address, mask); 328 } 329 330 331 Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest) 332 { 333 return branchAdd32(cond, src, dest); 334 } 335 336 Jump branchSubPtr(Condition cond, TrustedImm32 imm, RegisterID dest) 337 { 338 return branchSub32(cond, imm, dest); 339 } 340 using MacroAssemblerBase::branchTest8; 341 Jump branchTest8(Condition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) 342 { 343 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); 344 } 345 #endif 346 347 }; 348 349 } // namespace JSC 350 351 #endif // ENABLE(ASSEMBLER) 352 353 #endif // MacroAssembler_h 354