1 // Copyright 2013 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/v8.h" 6 7 #if V8_TARGET_ARCH_ARM64 8 9 #define ARM64_DEFINE_FP_STATICS 10 11 #include "src/arm64/assembler-arm64-inl.h" 12 #include "src/arm64/instructions-arm64.h" 13 14 namespace v8 { 15 namespace internal { 16 17 18 bool Instruction::IsLoad() const { 19 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { 20 return false; 21 } 22 23 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { 24 return Mask(LoadStorePairLBit) != 0; 25 } else { 26 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); 27 switch (op) { 28 case LDRB_w: 29 case LDRH_w: 30 case LDR_w: 31 case LDR_x: 32 case LDRSB_w: 33 case LDRSB_x: 34 case LDRSH_w: 35 case LDRSH_x: 36 case LDRSW_x: 37 case LDR_s: 38 case LDR_d: return true; 39 default: return false; 40 } 41 } 42 } 43 44 45 bool Instruction::IsStore() const { 46 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { 47 return false; 48 } 49 50 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { 51 return Mask(LoadStorePairLBit) == 0; 52 } else { 53 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask)); 54 switch (op) { 55 case STRB_w: 56 case STRH_w: 57 case STR_w: 58 case STR_x: 59 case STR_s: 60 case STR_d: return true; 61 default: return false; 62 } 63 } 64 } 65 66 67 static uint64_t RotateRight(uint64_t value, 68 unsigned int rotate, 69 unsigned int width) { 70 DCHECK(width <= 64); 71 rotate &= 63; 72 return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) | 73 (value >> rotate); 74 } 75 76 77 static uint64_t RepeatBitsAcrossReg(unsigned reg_size, 78 uint64_t value, 79 unsigned width) { 80 DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) || 81 (width == 32)); 82 DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); 83 uint64_t result = value & ((1UL << width) - 1UL); 84 for (unsigned i = width; i < reg_size; i *= 2) { 85 result |= (result << i); 86 } 87 return result; 88 } 89 90 91 // Logical immediates can't encode zero, so a return value of zero is used to 92 // indicate a failure case. Specifically, where the constraints on imm_s are not 93 // met. 94 uint64_t Instruction::ImmLogical() { 95 unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits; 96 int64_t n = BitN(); 97 int64_t imm_s = ImmSetBits(); 98 int64_t imm_r = ImmRotate(); 99 100 // An integer is constructed from the n, imm_s and imm_r bits according to 101 // the following table: 102 // 103 // N imms immr size S R 104 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) 105 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) 106 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) 107 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) 108 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) 109 // 0 11110s xxxxxr 2 UInt(s) UInt(r) 110 // (s bits must not be all set) 111 // 112 // A pattern is constructed of size bits, where the least significant S+1 113 // bits are set. The pattern is rotated right by R, and repeated across a 114 // 32 or 64-bit value, depending on destination register width. 115 // 116 117 if (n == 1) { 118 if (imm_s == 0x3F) { 119 return 0; 120 } 121 uint64_t bits = (1UL << (imm_s + 1)) - 1; 122 return RotateRight(bits, imm_r, 64); 123 } else { 124 if ((imm_s >> 1) == 0x1F) { 125 return 0; 126 } 127 for (int width = 0x20; width >= 0x2; width >>= 1) { 128 if ((imm_s & width) == 0) { 129 int mask = width - 1; 130 if ((imm_s & mask) == mask) { 131 return 0; 132 } 133 uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1; 134 return RepeatBitsAcrossReg(reg_size, 135 RotateRight(bits, imm_r & mask, width), 136 width); 137 } 138 } 139 } 140 UNREACHABLE(); 141 return 0; 142 } 143 144 145 float Instruction::ImmFP32() { 146 // ImmFP: abcdefgh (8 bits) 147 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) 148 // where B is b ^ 1 149 uint32_t bits = ImmFP(); 150 uint32_t bit7 = (bits >> 7) & 0x1; 151 uint32_t bit6 = (bits >> 6) & 0x1; 152 uint32_t bit5_to_0 = bits & 0x3f; 153 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); 154 155 return rawbits_to_float(result); 156 } 157 158 159 double Instruction::ImmFP64() { 160 // ImmFP: abcdefgh (8 bits) 161 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 162 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) 163 // where B is b ^ 1 164 uint32_t bits = ImmFP(); 165 uint64_t bit7 = (bits >> 7) & 0x1; 166 uint64_t bit6 = (bits >> 6) & 0x1; 167 uint64_t bit5_to_0 = bits & 0x3f; 168 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); 169 170 return rawbits_to_double(result); 171 } 172 173 174 LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { 175 switch (op) { 176 case STP_x: 177 case LDP_x: 178 case STP_d: 179 case LDP_d: return LSDoubleWord; 180 default: return LSWord; 181 } 182 } 183 184 185 int64_t Instruction::ImmPCOffset() { 186 int64_t offset; 187 if (IsPCRelAddressing()) { 188 // PC-relative addressing. Only ADR is supported. 189 offset = ImmPCRel(); 190 } else if (BranchType() != UnknownBranchType) { 191 // All PC-relative branches. 192 // Relative branch offsets are instruction-size-aligned. 193 offset = ImmBranch() << kInstructionSizeLog2; 194 } else { 195 // Load literal (offset from PC). 196 DCHECK(IsLdrLiteral()); 197 // The offset is always shifted by 2 bits, even for loads to 64-bits 198 // registers. 199 offset = ImmLLiteral() << kInstructionSizeLog2; 200 } 201 return offset; 202 } 203 204 205 Instruction* Instruction::ImmPCOffsetTarget() { 206 return InstructionAtOffset(ImmPCOffset()); 207 } 208 209 210 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, 211 int32_t offset) { 212 return is_intn(offset, ImmBranchRangeBitwidth(branch_type)); 213 } 214 215 216 bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) { 217 return IsValidImmPCOffset(BranchType(), DistanceTo(target)); 218 } 219 220 221 void Instruction::SetImmPCOffsetTarget(Instruction* target) { 222 if (IsPCRelAddressing()) { 223 SetPCRelImmTarget(target); 224 } else if (BranchType() != UnknownBranchType) { 225 SetBranchImmTarget(target); 226 } else { 227 SetImmLLiteral(target); 228 } 229 } 230 231 232 void Instruction::SetPCRelImmTarget(Instruction* target) { 233 // ADRP is not supported, so 'this' must point to an ADR instruction. 234 DCHECK(IsAdr()); 235 236 ptrdiff_t target_offset = DistanceTo(target); 237 Instr imm; 238 if (Instruction::IsValidPCRelOffset(target_offset)) { 239 imm = Assembler::ImmPCRelAddress(target_offset); 240 SetInstructionBits(Mask(~ImmPCRel_mask) | imm); 241 } else { 242 PatchingAssembler patcher(this, 243 PatchingAssembler::kAdrFarPatchableNInstrs); 244 patcher.PatchAdrFar(target_offset); 245 } 246 } 247 248 249 void Instruction::SetBranchImmTarget(Instruction* target) { 250 DCHECK(IsAligned(DistanceTo(target), kInstructionSize)); 251 Instr branch_imm = 0; 252 uint32_t imm_mask = 0; 253 ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2; 254 switch (BranchType()) { 255 case CondBranchType: { 256 branch_imm = Assembler::ImmCondBranch(offset); 257 imm_mask = ImmCondBranch_mask; 258 break; 259 } 260 case UncondBranchType: { 261 branch_imm = Assembler::ImmUncondBranch(offset); 262 imm_mask = ImmUncondBranch_mask; 263 break; 264 } 265 case CompareBranchType: { 266 branch_imm = Assembler::ImmCmpBranch(offset); 267 imm_mask = ImmCmpBranch_mask; 268 break; 269 } 270 case TestBranchType: { 271 branch_imm = Assembler::ImmTestBranch(offset); 272 imm_mask = ImmTestBranch_mask; 273 break; 274 } 275 default: UNREACHABLE(); 276 } 277 SetInstructionBits(Mask(~imm_mask) | branch_imm); 278 } 279 280 281 void Instruction::SetImmLLiteral(Instruction* source) { 282 DCHECK(IsAligned(DistanceTo(source), kInstructionSize)); 283 ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2; 284 Instr imm = Assembler::ImmLLiteral(offset); 285 Instr mask = ImmLLiteral_mask; 286 287 SetInstructionBits(Mask(~mask) | imm); 288 } 289 290 291 // TODO(jbramley): We can't put this inline in the class because things like 292 // xzr and Register are not defined in that header. Consider adding 293 // instructions-arm64-inl.h to work around this. 294 bool InstructionSequence::IsInlineData() const { 295 // Inline data is encoded as a single movz instruction which writes to xzr 296 // (x31). 297 return IsMovz() && SixtyFourBits() && (Rd() == xzr.code()); 298 // TODO(all): If we extend ::InlineData() to support bigger data, we need 299 // to update this method too. 300 } 301 302 303 // TODO(jbramley): We can't put this inline in the class because things like 304 // xzr and Register are not defined in that header. Consider adding 305 // instructions-arm64-inl.h to work around this. 306 uint64_t InstructionSequence::InlineData() const { 307 DCHECK(IsInlineData()); 308 uint64_t payload = ImmMoveWide(); 309 // TODO(all): If we extend ::InlineData() to support bigger data, we need 310 // to update this method too. 311 return payload; 312 } 313 314 315 } } // namespace v8::internal 316 317 #endif // V8_TARGET_ARCH_ARM64 318