1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/x87/codegen-x87.h" 6 7 #if V8_TARGET_ARCH_X87 8 9 #include "src/codegen.h" 10 #include "src/heap/heap.h" 11 #include "src/macro-assembler.h" 12 13 namespace v8 { 14 namespace internal { 15 16 17 // ------------------------------------------------------------------------- 18 // Platform-specific RuntimeCallHelper functions. 19 20 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 21 masm->EnterFrame(StackFrame::INTERNAL); 22 DCHECK(!masm->has_frame()); 23 masm->set_has_frame(true); 24 } 25 26 27 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 28 masm->LeaveFrame(StackFrame::INTERNAL); 29 DCHECK(masm->has_frame()); 30 masm->set_has_frame(false); 31 } 32 33 34 #define __ masm. 35 36 37 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { 38 size_t actual_size; 39 // Allocate buffer in executable space. 40 byte* buffer = 41 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); 42 if (buffer == nullptr) return nullptr; 43 44 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), 45 CodeObjectRequired::kNo); 46 // Load double input into registers. 47 __ fld_d(MemOperand(esp, 4)); 48 __ X87SetFPUCW(0x027F); 49 __ fsqrt(); 50 __ X87SetFPUCW(0x037F); 51 __ Ret(); 52 53 CodeDesc desc; 54 masm.GetCode(&desc); 55 DCHECK(!RelocInfo::RequiresRelocation(desc)); 56 57 Assembler::FlushICache(isolate, buffer, actual_size); 58 base::OS::ProtectCode(buffer, actual_size); 59 return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer); 60 } 61 62 63 // Helper functions for CreateMemMoveFunction. 64 #undef __ 65 #define __ ACCESS_MASM(masm) 66 67 enum Direction { FORWARD, BACKWARD }; 68 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED }; 69 70 71 void MemMoveEmitPopAndReturn(MacroAssembler* masm) { 72 __ pop(esi); 73 __ pop(edi); 74 __ ret(0); 75 } 76 77 78 #undef __ 79 #define __ masm. 80 81 82 class LabelConverter { 83 public: 84 explicit LabelConverter(byte* buffer) : buffer_(buffer) {} 85 int32_t address(Label* l) const { 86 return reinterpret_cast<int32_t>(buffer_) + l->pos(); 87 } 88 private: 89 byte* buffer_; 90 }; 91 92 93 MemMoveFunction CreateMemMoveFunction(Isolate* isolate) { 94 size_t actual_size; 95 // Allocate buffer in executable space. 96 byte* buffer = 97 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); 98 if (buffer == nullptr) return nullptr; 99 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), 100 CodeObjectRequired::kNo); 101 LabelConverter conv(buffer); 102 103 // Generated code is put into a fixed, unmovable buffer, and not into 104 // the V8 heap. We can't, and don't, refer to any relocatable addresses 105 // (e.g. the JavaScript nan-object). 106 107 // 32-bit C declaration function calls pass arguments on stack. 108 109 // Stack layout: 110 // esp[12]: Third argument, size. 111 // esp[8]: Second argument, source pointer. 112 // esp[4]: First argument, destination pointer. 113 // esp[0]: return address 114 115 const int kDestinationOffset = 1 * kPointerSize; 116 const int kSourceOffset = 2 * kPointerSize; 117 const int kSizeOffset = 3 * kPointerSize; 118 119 int stack_offset = 0; // Update if we change the stack height. 120 121 Label backward, backward_much_overlap; 122 Label forward_much_overlap, small_size, medium_size, pop_and_return; 123 __ push(edi); 124 __ push(esi); 125 stack_offset += 2 * kPointerSize; 126 Register dst = edi; 127 Register src = esi; 128 Register count = ecx; 129 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 130 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 131 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 132 133 __ cmp(dst, src); 134 __ j(equal, &pop_and_return); 135 136 // No SSE2. 137 Label forward; 138 __ cmp(count, 0); 139 __ j(equal, &pop_and_return); 140 __ cmp(dst, src); 141 __ j(above, &backward); 142 __ jmp(&forward); 143 { 144 // Simple forward copier. 145 Label forward_loop_1byte, forward_loop_4byte; 146 __ bind(&forward_loop_4byte); 147 __ mov(eax, Operand(src, 0)); 148 __ sub(count, Immediate(4)); 149 __ add(src, Immediate(4)); 150 __ mov(Operand(dst, 0), eax); 151 __ add(dst, Immediate(4)); 152 __ bind(&forward); // Entry point. 153 __ cmp(count, 3); 154 __ j(above, &forward_loop_4byte); 155 __ bind(&forward_loop_1byte); 156 __ cmp(count, 0); 157 __ j(below_equal, &pop_and_return); 158 __ mov_b(eax, Operand(src, 0)); 159 __ dec(count); 160 __ inc(src); 161 __ mov_b(Operand(dst, 0), eax); 162 __ inc(dst); 163 __ jmp(&forward_loop_1byte); 164 } 165 { 166 // Simple backward copier. 167 Label backward_loop_1byte, backward_loop_4byte, entry_shortcut; 168 __ bind(&backward); 169 __ add(src, count); 170 __ add(dst, count); 171 __ cmp(count, 3); 172 __ j(below_equal, &entry_shortcut); 173 174 __ bind(&backward_loop_4byte); 175 __ sub(src, Immediate(4)); 176 __ sub(count, Immediate(4)); 177 __ mov(eax, Operand(src, 0)); 178 __ sub(dst, Immediate(4)); 179 __ mov(Operand(dst, 0), eax); 180 __ cmp(count, 3); 181 __ j(above, &backward_loop_4byte); 182 __ bind(&backward_loop_1byte); 183 __ cmp(count, 0); 184 __ j(below_equal, &pop_and_return); 185 __ bind(&entry_shortcut); 186 __ dec(src); 187 __ dec(count); 188 __ mov_b(eax, Operand(src, 0)); 189 __ dec(dst); 190 __ mov_b(Operand(dst, 0), eax); 191 __ jmp(&backward_loop_1byte); 192 } 193 194 __ bind(&pop_and_return); 195 MemMoveEmitPopAndReturn(&masm); 196 197 CodeDesc desc; 198 masm.GetCode(&desc); 199 DCHECK(!RelocInfo::RequiresRelocation(desc)); 200 Assembler::FlushICache(isolate, buffer, actual_size); 201 base::OS::ProtectCode(buffer, actual_size); 202 // TODO(jkummerow): It would be nice to register this code creation event 203 // with the PROFILE / GDBJIT system. 204 return FUNCTION_CAST<MemMoveFunction>(buffer); 205 } 206 207 208 #undef __ 209 210 // ------------------------------------------------------------------------- 211 // Code generators 212 213 #define __ ACCESS_MASM(masm) 214 215 void StringCharLoadGenerator::Generate(MacroAssembler* masm, 216 Factory* factory, 217 Register string, 218 Register index, 219 Register result, 220 Label* call_runtime) { 221 Label indirect_string_loaded; 222 __ bind(&indirect_string_loaded); 223 224 // Fetch the instance type of the receiver into result register. 225 __ mov(result, FieldOperand(string, HeapObject::kMapOffset)); 226 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset)); 227 228 // We need special handling for indirect strings. 229 Label check_sequential; 230 __ test(result, Immediate(kIsIndirectStringMask)); 231 __ j(zero, &check_sequential, Label::kNear); 232 233 // Dispatch on the indirect string shape: slice or cons. 234 Label cons_string, thin_string; 235 __ and_(result, Immediate(kStringRepresentationMask)); 236 __ cmp(result, Immediate(kConsStringTag)); 237 __ j(equal, &cons_string, Label::kNear); 238 __ cmp(result, Immediate(kThinStringTag)); 239 __ j(equal, &thin_string, Label::kNear); 240 241 // Handle slices. 242 __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset)); 243 __ SmiUntag(result); 244 __ add(index, result); 245 __ mov(string, FieldOperand(string, SlicedString::kParentOffset)); 246 __ jmp(&indirect_string_loaded); 247 248 // Handle thin strings. 249 __ bind(&thin_string); 250 __ mov(string, FieldOperand(string, ThinString::kActualOffset)); 251 __ jmp(&indirect_string_loaded); 252 253 // Handle cons strings. 254 // Check whether the right hand side is the empty string (i.e. if 255 // this is really a flat string in a cons string). If that is not 256 // the case we would rather go to the runtime system now to flatten 257 // the string. 258 __ bind(&cons_string); 259 __ cmp(FieldOperand(string, ConsString::kSecondOffset), 260 Immediate(factory->empty_string())); 261 __ j(not_equal, call_runtime); 262 __ mov(string, FieldOperand(string, ConsString::kFirstOffset)); 263 __ jmp(&indirect_string_loaded); 264 265 // Distinguish sequential and external strings. Only these two string 266 // representations can reach here (slices and flat cons strings have been 267 // reduced to the underlying sequential or external string). 268 Label seq_string; 269 __ bind(&check_sequential); 270 STATIC_ASSERT(kSeqStringTag == 0); 271 __ test(result, Immediate(kStringRepresentationMask)); 272 __ j(zero, &seq_string, Label::kNear); 273 274 // Handle external strings. 275 Label one_byte_external, done; 276 if (FLAG_debug_code) { 277 // Assert that we do not have a cons or slice (indirect strings) here. 278 // Sequential strings have already been ruled out. 279 __ test(result, Immediate(kIsIndirectStringMask)); 280 __ Assert(zero, kExternalStringExpectedButNotFound); 281 } 282 // Rule out short external strings. 283 STATIC_ASSERT(kShortExternalStringTag != 0); 284 __ test_b(result, Immediate(kShortExternalStringMask)); 285 __ j(not_zero, call_runtime); 286 // Check encoding. 287 STATIC_ASSERT(kTwoByteStringTag == 0); 288 __ test_b(result, Immediate(kStringEncodingMask)); 289 __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset)); 290 __ j(not_equal, &one_byte_external, Label::kNear); 291 // Two-byte string. 292 __ movzx_w(result, Operand(result, index, times_2, 0)); 293 __ jmp(&done, Label::kNear); 294 __ bind(&one_byte_external); 295 // One-byte string. 296 __ movzx_b(result, Operand(result, index, times_1, 0)); 297 __ jmp(&done, Label::kNear); 298 299 // Dispatch on the encoding: one-byte or two-byte. 300 Label one_byte; 301 __ bind(&seq_string); 302 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); 303 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); 304 __ test(result, Immediate(kStringEncodingMask)); 305 __ j(not_zero, &one_byte, Label::kNear); 306 307 // Two-byte string. 308 // Load the two-byte character code into the result register. 309 __ movzx_w(result, FieldOperand(string, 310 index, 311 times_2, 312 SeqTwoByteString::kHeaderSize)); 313 __ jmp(&done, Label::kNear); 314 315 // One-byte string. 316 // Load the byte into the result register. 317 __ bind(&one_byte); 318 __ movzx_b(result, FieldOperand(string, 319 index, 320 times_1, 321 SeqOneByteString::kHeaderSize)); 322 __ bind(&done); 323 } 324 325 326 #undef __ 327 328 329 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { 330 USE(isolate); 331 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); 332 CodePatcher patcher(isolate, young_sequence_.start(), 333 young_sequence_.length()); 334 patcher.masm()->push(ebp); 335 patcher.masm()->mov(ebp, esp); 336 patcher.masm()->push(esi); 337 patcher.masm()->push(edi); 338 } 339 340 341 #ifdef DEBUG 342 bool CodeAgingHelper::IsOld(byte* candidate) const { 343 return *candidate == kCallOpcode; 344 } 345 #endif 346 347 348 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { 349 bool result = isolate->code_aging_helper()->IsYoung(sequence); 350 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); 351 return result; 352 } 353 354 Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) { 355 if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge; 356 357 sequence++; // Skip the kCallOpcode byte 358 Address target_address = sequence + *reinterpret_cast<int*>(sequence) + 359 Assembler::kCallTargetAddressOffset; 360 Code* stub = GetCodeFromTargetAddress(target_address); 361 return GetAgeOfCodeAgeStub(stub); 362 } 363 364 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, 365 Code::Age age) { 366 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); 367 if (age == kNoAgeCodeAge) { 368 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); 369 Assembler::FlushICache(isolate, sequence, young_length); 370 } else { 371 Code* stub = GetCodeAgeStub(isolate, age); 372 CodePatcher patcher(isolate, sequence, young_length); 373 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); 374 } 375 } 376 377 378 } // namespace internal 379 } // namespace v8 380 381 #endif // V8_TARGET_ARCH_X87 382