1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/mips64/codegen-mips64.h" 6 7 #if V8_TARGET_ARCH_MIPS64 8 9 #include "src/codegen.h" 10 #include "src/macro-assembler.h" 11 #include "src/mips64/simulator-mips64.h" 12 13 namespace v8 { 14 namespace internal { 15 16 17 #define __ masm. 18 19 20 #if defined(V8_HOST_ARCH_MIPS) 21 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, 22 MemCopyUint8Function stub) { 23 #if defined(USE_SIMULATOR) 24 return stub; 25 #else 26 27 size_t actual_size; 28 byte* buffer = 29 static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true)); 30 if (buffer == nullptr) return stub; 31 32 // This code assumes that cache lines are 32 bytes and if the cache line is 33 // larger it will not work correctly. 34 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), 35 CodeObjectRequired::kNo); 36 37 { 38 Label lastb, unaligned, aligned, chkw, 39 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop, 40 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw, 41 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; 42 43 // The size of each prefetch. 44 uint32_t pref_chunk = 32; 45 // The maximum size of a prefetch, it must not be less then pref_chunk. 46 // If the real size of a prefetch is greater then max_pref_size and 47 // the kPrefHintPrepareForStore hint is used, the code will not work 48 // correctly. 49 uint32_t max_pref_size = 128; 50 DCHECK(pref_chunk < max_pref_size); 51 52 // pref_limit is set based on the fact that we never use an offset 53 // greater then 5 on a store pref and that a single pref can 54 // never be larger then max_pref_size. 55 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; 56 int32_t pref_hint_load = kPrefHintLoadStreamed; 57 int32_t pref_hint_store = kPrefHintPrepareForStore; 58 uint32_t loadstore_chunk = 4; 59 60 // The initial prefetches may fetch bytes that are before the buffer being 61 // copied. Start copies with an offset of 4 so avoid this situation when 62 // using kPrefHintPrepareForStore. 63 DCHECK(pref_hint_store != kPrefHintPrepareForStore || 64 pref_chunk * 4 >= max_pref_size); 65 // If the size is less than 8, go to lastb. Regardless of size, 66 // copy dst pointer to v0 for the retuen value. 67 __ slti(a6, a2, 2 * loadstore_chunk); 68 __ bne(a6, zero_reg, &lastb); 69 __ mov(v0, a0); // In delay slot. 70 71 // If src and dst have different alignments, go to unaligned, if they 72 // have the same alignment (but are not actually aligned) do a partial 73 // load/store to make them aligned. If they are both already aligned 74 // we can start copying at aligned. 75 __ xor_(t8, a1, a0); 76 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement. 77 __ bne(t8, zero_reg, &unaligned); 78 __ subu(a3, zero_reg, a0); // In delay slot. 79 80 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. 81 __ beq(a3, zero_reg, &aligned); // Already aligned. 82 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count. 83 84 if (kArchEndian == kLittle) { 85 __ lwr(t8, MemOperand(a1)); 86 __ addu(a1, a1, a3); 87 __ swr(t8, MemOperand(a0)); 88 __ addu(a0, a0, a3); 89 } else { 90 __ lwl(t8, MemOperand(a1)); 91 __ addu(a1, a1, a3); 92 __ swl(t8, MemOperand(a0)); 93 __ addu(a0, a0, a3); 94 } 95 96 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to 97 // count how many bytes we have to copy after all the 64 byte chunks are 98 // copied and a3 to the dst pointer after all the 64 byte chunks have been 99 // copied. We will loop, incrementing a0 and a1 until a0 equals a3. 100 __ bind(&aligned); 101 __ andi(t8, a2, 0x3f); 102 __ beq(a2, t8, &chkw); // Less than 64? 103 __ subu(a3, a2, t8); // In delay slot. 104 __ addu(a3, a0, a3); // Now a3 is the final dst after loop. 105 106 // When in the loop we prefetch with kPrefHintPrepareForStore hint, 107 // in this case the a0+x should be past the "a4-32" address. This means: 108 // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for 109 // x=64 the last "safe" a0 address is "a4-96". In the current version we 110 // will use "pref hint, 128(a0)", so "a4-160" is the limit. 111 if (pref_hint_store == kPrefHintPrepareForStore) { 112 __ addu(a4, a0, a2); // a4 is the "past the end" address. 113 __ Subu(t9, a4, pref_limit); // t9 is the "last safe pref" address. 114 } 115 116 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); 117 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); 118 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); 119 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); 120 121 if (pref_hint_store != kPrefHintPrepareForStore) { 122 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); 123 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); 124 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); 125 } 126 __ bind(&loop16w); 127 __ lw(a4, MemOperand(a1)); 128 129 if (pref_hint_store == kPrefHintPrepareForStore) { 130 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch. 131 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg)); 132 } 133 __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot. 134 135 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); 136 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); 137 138 __ bind(&skip_pref); 139 __ lw(a6, MemOperand(a1, 2, loadstore_chunk)); 140 __ lw(a7, MemOperand(a1, 3, loadstore_chunk)); 141 __ lw(t0, MemOperand(a1, 4, loadstore_chunk)); 142 __ lw(t1, MemOperand(a1, 5, loadstore_chunk)); 143 __ lw(t2, MemOperand(a1, 6, loadstore_chunk)); 144 __ lw(t3, MemOperand(a1, 7, loadstore_chunk)); 145 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); 146 147 __ sw(a4, MemOperand(a0)); 148 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); 149 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); 150 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); 151 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); 152 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); 153 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); 154 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); 155 156 __ lw(a4, MemOperand(a1, 8, loadstore_chunk)); 157 __ lw(a5, MemOperand(a1, 9, loadstore_chunk)); 158 __ lw(a6, MemOperand(a1, 10, loadstore_chunk)); 159 __ lw(a7, MemOperand(a1, 11, loadstore_chunk)); 160 __ lw(t0, MemOperand(a1, 12, loadstore_chunk)); 161 __ lw(t1, MemOperand(a1, 13, loadstore_chunk)); 162 __ lw(t2, MemOperand(a1, 14, loadstore_chunk)); 163 __ lw(t3, MemOperand(a1, 15, loadstore_chunk)); 164 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); 165 166 __ sw(a4, MemOperand(a0, 8, loadstore_chunk)); 167 __ sw(a5, MemOperand(a0, 9, loadstore_chunk)); 168 __ sw(a6, MemOperand(a0, 10, loadstore_chunk)); 169 __ sw(a7, MemOperand(a0, 11, loadstore_chunk)); 170 __ sw(t0, MemOperand(a0, 12, loadstore_chunk)); 171 __ sw(t1, MemOperand(a0, 13, loadstore_chunk)); 172 __ sw(t2, MemOperand(a0, 14, loadstore_chunk)); 173 __ sw(t3, MemOperand(a0, 15, loadstore_chunk)); 174 __ addiu(a0, a0, 16 * loadstore_chunk); 175 __ bne(a0, a3, &loop16w); 176 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. 177 __ mov(a2, t8); 178 179 // Here we have src and dest word-aligned but less than 64-bytes to go. 180 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump 181 // down to chk1w to handle the tail end of the copy. 182 __ bind(&chkw); 183 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); 184 __ andi(t8, a2, 0x1f); 185 __ beq(a2, t8, &chk1w); // Less than 32? 186 __ nop(); // In delay slot. 187 __ lw(a4, MemOperand(a1)); 188 __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); 189 __ lw(a6, MemOperand(a1, 2, loadstore_chunk)); 190 __ lw(a7, MemOperand(a1, 3, loadstore_chunk)); 191 __ lw(t0, MemOperand(a1, 4, loadstore_chunk)); 192 __ lw(t1, MemOperand(a1, 5, loadstore_chunk)); 193 __ lw(t2, MemOperand(a1, 6, loadstore_chunk)); 194 __ lw(t3, MemOperand(a1, 7, loadstore_chunk)); 195 __ addiu(a1, a1, 8 * loadstore_chunk); 196 __ sw(a4, MemOperand(a0)); 197 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); 198 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); 199 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); 200 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); 201 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); 202 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); 203 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); 204 __ addiu(a0, a0, 8 * loadstore_chunk); 205 206 // Here we have less than 32 bytes to copy. Set up for a loop to copy 207 // one word at a time. Set a2 to count how many bytes we have to copy 208 // after all the word chunks are copied and a3 to the dst pointer after 209 // all the word chunks have been copied. We will loop, incrementing a0 210 // and a1 untill a0 equals a3. 211 __ bind(&chk1w); 212 __ andi(a2, t8, loadstore_chunk - 1); 213 __ beq(a2, t8, &lastb); 214 __ subu(a3, t8, a2); // In delay slot. 215 __ addu(a3, a0, a3); 216 217 __ bind(&wordCopy_loop); 218 __ lw(a7, MemOperand(a1)); 219 __ addiu(a0, a0, loadstore_chunk); 220 __ addiu(a1, a1, loadstore_chunk); 221 __ bne(a0, a3, &wordCopy_loop); 222 __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. 223 224 __ bind(&lastb); 225 __ Branch(&leave, le, a2, Operand(zero_reg)); 226 __ addu(a3, a0, a2); 227 228 __ bind(&lastbloop); 229 __ lb(v1, MemOperand(a1)); 230 __ addiu(a0, a0, 1); 231 __ addiu(a1, a1, 1); 232 __ bne(a0, a3, &lastbloop); 233 __ sb(v1, MemOperand(a0, -1)); // In delay slot. 234 235 __ bind(&leave); 236 __ jr(ra); 237 __ nop(); 238 239 // Unaligned case. Only the dst gets aligned so we need to do partial 240 // loads of the source followed by normal stores to the dst (once we 241 // have aligned the destination). 242 __ bind(&unaligned); 243 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. 244 __ beq(a3, zero_reg, &ua_chk16w); 245 __ subu(a2, a2, a3); // In delay slot. 246 247 if (kArchEndian == kLittle) { 248 __ lwr(v1, MemOperand(a1)); 249 __ lwl(v1, 250 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 251 __ addu(a1, a1, a3); 252 __ swr(v1, MemOperand(a0)); 253 __ addu(a0, a0, a3); 254 } else { 255 __ lwl(v1, MemOperand(a1)); 256 __ lwr(v1, 257 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 258 __ addu(a1, a1, a3); 259 __ swl(v1, MemOperand(a0)); 260 __ addu(a0, a0, a3); 261 } 262 263 // Now the dst (but not the source) is aligned. Set a2 to count how many 264 // bytes we have to copy after all the 64 byte chunks are copied and a3 to 265 // the dst pointer after all the 64 byte chunks have been copied. We will 266 // loop, incrementing a0 and a1 until a0 equals a3. 267 __ bind(&ua_chk16w); 268 __ andi(t8, a2, 0x3f); 269 __ beq(a2, t8, &ua_chkw); 270 __ subu(a3, a2, t8); // In delay slot. 271 __ addu(a3, a0, a3); 272 273 if (pref_hint_store == kPrefHintPrepareForStore) { 274 __ addu(a4, a0, a2); 275 __ Subu(t9, a4, pref_limit); 276 } 277 278 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); 279 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); 280 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); 281 282 if (pref_hint_store != kPrefHintPrepareForStore) { 283 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); 284 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); 285 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); 286 } 287 288 __ bind(&ua_loop16w); 289 if (kArchEndian == kLittle) { 290 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); 291 __ lwr(a4, MemOperand(a1)); 292 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk)); 293 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk)); 294 295 if (pref_hint_store == kPrefHintPrepareForStore) { 296 __ sltu(v1, t9, a0); 297 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); 298 } 299 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. 300 301 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); 302 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); 303 304 __ bind(&ua_skip_pref); 305 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk)); 306 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk)); 307 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk)); 308 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk)); 309 __ lwl(a4, 310 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 311 __ lwl(a5, 312 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); 313 __ lwl(a6, 314 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); 315 __ lwl(a7, 316 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); 317 __ lwl(t0, 318 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); 319 __ lwl(t1, 320 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); 321 __ lwl(t2, 322 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); 323 __ lwl(t3, 324 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); 325 } else { 326 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); 327 __ lwl(a4, MemOperand(a1)); 328 __ lwl(a5, MemOperand(a1, 1, loadstore_chunk)); 329 __ lwl(a6, MemOperand(a1, 2, loadstore_chunk)); 330 331 if (pref_hint_store == kPrefHintPrepareForStore) { 332 __ sltu(v1, t9, a0); 333 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); 334 } 335 __ lwl(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. 336 337 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); 338 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); 339 340 __ bind(&ua_skip_pref); 341 __ lwl(t0, MemOperand(a1, 4, loadstore_chunk)); 342 __ lwl(t1, MemOperand(a1, 5, loadstore_chunk)); 343 __ lwl(t2, MemOperand(a1, 6, loadstore_chunk)); 344 __ lwl(t3, MemOperand(a1, 7, loadstore_chunk)); 345 __ lwr(a4, 346 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 347 __ lwr(a5, 348 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); 349 __ lwr(a6, 350 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); 351 __ lwr(a7, 352 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); 353 __ lwr(t0, 354 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); 355 __ lwr(t1, 356 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); 357 __ lwr(t2, 358 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); 359 __ lwr(t3, 360 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); 361 } 362 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); 363 __ sw(a4, MemOperand(a0)); 364 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); 365 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); 366 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); 367 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); 368 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); 369 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); 370 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); 371 if (kArchEndian == kLittle) { 372 __ lwr(a4, MemOperand(a1, 8, loadstore_chunk)); 373 __ lwr(a5, MemOperand(a1, 9, loadstore_chunk)); 374 __ lwr(a6, MemOperand(a1, 10, loadstore_chunk)); 375 __ lwr(a7, MemOperand(a1, 11, loadstore_chunk)); 376 __ lwr(t0, MemOperand(a1, 12, loadstore_chunk)); 377 __ lwr(t1, MemOperand(a1, 13, loadstore_chunk)); 378 __ lwr(t2, MemOperand(a1, 14, loadstore_chunk)); 379 __ lwr(t3, MemOperand(a1, 15, loadstore_chunk)); 380 __ lwl(a4, 381 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); 382 __ lwl(a5, 383 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); 384 __ lwl(a6, 385 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); 386 __ lwl(a7, 387 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); 388 __ lwl(t0, 389 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); 390 __ lwl(t1, 391 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); 392 __ lwl(t2, 393 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); 394 __ lwl(t3, 395 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); 396 } else { 397 __ lwl(a4, MemOperand(a1, 8, loadstore_chunk)); 398 __ lwl(a5, MemOperand(a1, 9, loadstore_chunk)); 399 __ lwl(a6, MemOperand(a1, 10, loadstore_chunk)); 400 __ lwl(a7, MemOperand(a1, 11, loadstore_chunk)); 401 __ lwl(t0, MemOperand(a1, 12, loadstore_chunk)); 402 __ lwl(t1, MemOperand(a1, 13, loadstore_chunk)); 403 __ lwl(t2, MemOperand(a1, 14, loadstore_chunk)); 404 __ lwl(t3, MemOperand(a1, 15, loadstore_chunk)); 405 __ lwr(a4, 406 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); 407 __ lwr(a5, 408 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); 409 __ lwr(a6, 410 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); 411 __ lwr(a7, 412 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); 413 __ lwr(t0, 414 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); 415 __ lwr(t1, 416 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); 417 __ lwr(t2, 418 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); 419 __ lwr(t3, 420 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); 421 } 422 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); 423 __ sw(a4, MemOperand(a0, 8, loadstore_chunk)); 424 __ sw(a5, MemOperand(a0, 9, loadstore_chunk)); 425 __ sw(a6, MemOperand(a0, 10, loadstore_chunk)); 426 __ sw(a7, MemOperand(a0, 11, loadstore_chunk)); 427 __ sw(t0, MemOperand(a0, 12, loadstore_chunk)); 428 __ sw(t1, MemOperand(a0, 13, loadstore_chunk)); 429 __ sw(t2, MemOperand(a0, 14, loadstore_chunk)); 430 __ sw(t3, MemOperand(a0, 15, loadstore_chunk)); 431 __ addiu(a0, a0, 16 * loadstore_chunk); 432 __ bne(a0, a3, &ua_loop16w); 433 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. 434 __ mov(a2, t8); 435 436 // Here less than 64-bytes. Check for 437 // a 32 byte chunk and copy if there is one. Otherwise jump down to 438 // ua_chk1w to handle the tail end of the copy. 439 __ bind(&ua_chkw); 440 __ Pref(pref_hint_load, MemOperand(a1)); 441 __ andi(t8, a2, 0x1f); 442 443 __ beq(a2, t8, &ua_chk1w); 444 __ nop(); // In delay slot. 445 if (kArchEndian == kLittle) { 446 __ lwr(a4, MemOperand(a1)); 447 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk)); 448 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk)); 449 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); 450 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk)); 451 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk)); 452 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk)); 453 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk)); 454 __ lwl(a4, 455 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 456 __ lwl(a5, 457 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); 458 __ lwl(a6, 459 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); 460 __ lwl(a7, 461 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); 462 __ lwl(t0, 463 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); 464 __ lwl(t1, 465 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); 466 __ lwl(t2, 467 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); 468 __ lwl(t3, 469 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); 470 } else { 471 __ lwl(a4, MemOperand(a1)); 472 __ lwl(a5, MemOperand(a1, 1, loadstore_chunk)); 473 __ lwl(a6, MemOperand(a1, 2, loadstore_chunk)); 474 __ lwl(a7, MemOperand(a1, 3, loadstore_chunk)); 475 __ lwl(t0, MemOperand(a1, 4, loadstore_chunk)); 476 __ lwl(t1, MemOperand(a1, 5, loadstore_chunk)); 477 __ lwl(t2, MemOperand(a1, 6, loadstore_chunk)); 478 __ lwl(t3, MemOperand(a1, 7, loadstore_chunk)); 479 __ lwr(a4, 480 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 481 __ lwr(a5, 482 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); 483 __ lwr(a6, 484 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); 485 __ lwr(a7, 486 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); 487 __ lwr(t0, 488 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); 489 __ lwr(t1, 490 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); 491 __ lwr(t2, 492 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); 493 __ lwr(t3, 494 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); 495 } 496 __ addiu(a1, a1, 8 * loadstore_chunk); 497 __ sw(a4, MemOperand(a0)); 498 __ sw(a5, MemOperand(a0, 1, loadstore_chunk)); 499 __ sw(a6, MemOperand(a0, 2, loadstore_chunk)); 500 __ sw(a7, MemOperand(a0, 3, loadstore_chunk)); 501 __ sw(t0, MemOperand(a0, 4, loadstore_chunk)); 502 __ sw(t1, MemOperand(a0, 5, loadstore_chunk)); 503 __ sw(t2, MemOperand(a0, 6, loadstore_chunk)); 504 __ sw(t3, MemOperand(a0, 7, loadstore_chunk)); 505 __ addiu(a0, a0, 8 * loadstore_chunk); 506 507 // Less than 32 bytes to copy. Set up for a loop to 508 // copy one word at a time. 509 __ bind(&ua_chk1w); 510 __ andi(a2, t8, loadstore_chunk - 1); 511 __ beq(a2, t8, &ua_smallCopy); 512 __ subu(a3, t8, a2); // In delay slot. 513 __ addu(a3, a0, a3); 514 515 __ bind(&ua_wordCopy_loop); 516 if (kArchEndian == kLittle) { 517 __ lwr(v1, MemOperand(a1)); 518 __ lwl(v1, 519 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 520 } else { 521 __ lwl(v1, MemOperand(a1)); 522 __ lwr(v1, 523 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); 524 } 525 __ addiu(a0, a0, loadstore_chunk); 526 __ addiu(a1, a1, loadstore_chunk); 527 __ bne(a0, a3, &ua_wordCopy_loop); 528 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. 529 530 // Copy the last 8 bytes. 531 __ bind(&ua_smallCopy); 532 __ beq(a2, zero_reg, &leave); 533 __ addu(a3, a0, a2); // In delay slot. 534 535 __ bind(&ua_smallCopy_loop); 536 __ lb(v1, MemOperand(a1)); 537 __ addiu(a0, a0, 1); 538 __ addiu(a1, a1, 1); 539 __ bne(a0, a3, &ua_smallCopy_loop); 540 __ sb(v1, MemOperand(a0, -1)); // In delay slot. 541 542 __ jr(ra); 543 __ nop(); 544 } 545 CodeDesc desc; 546 masm.GetCode(&desc); 547 DCHECK(!RelocInfo::RequiresRelocation(desc)); 548 549 Assembler::FlushICache(isolate, buffer, actual_size); 550 base::OS::ProtectCode(buffer, actual_size); 551 return FUNCTION_CAST<MemCopyUint8Function>(buffer); 552 #endif 553 } 554 #endif 555 556 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { 557 #if defined(USE_SIMULATOR) 558 return nullptr; 559 #else 560 size_t actual_size; 561 byte* buffer = 562 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); 563 if (buffer == nullptr) return nullptr; 564 565 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), 566 CodeObjectRequired::kNo); 567 568 __ MovFromFloatParameter(f12); 569 __ sqrt_d(f0, f12); 570 __ MovToFloatResult(f0); 571 __ Ret(); 572 573 CodeDesc desc; 574 masm.GetCode(&desc); 575 DCHECK(!RelocInfo::RequiresRelocation(desc)); 576 577 Assembler::FlushICache(isolate, buffer, actual_size); 578 base::OS::ProtectCode(buffer, actual_size); 579 return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer); 580 #endif 581 } 582 583 #undef __ 584 585 586 // ------------------------------------------------------------------------- 587 // Platform-specific RuntimeCallHelper functions. 588 589 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 590 masm->EnterFrame(StackFrame::INTERNAL); 591 DCHECK(!masm->has_frame()); 592 masm->set_has_frame(true); 593 } 594 595 596 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 597 masm->LeaveFrame(StackFrame::INTERNAL); 598 DCHECK(masm->has_frame()); 599 masm->set_has_frame(false); 600 } 601 602 603 // ------------------------------------------------------------------------- 604 // Code generators 605 606 #define __ ACCESS_MASM(masm) 607 608 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( 609 MacroAssembler* masm, 610 Register receiver, 611 Register key, 612 Register value, 613 Register target_map, 614 AllocationSiteMode mode, 615 Label* allocation_memento_found) { 616 Register scratch_elements = a4; 617 DCHECK(!AreAliased(receiver, key, value, target_map, 618 scratch_elements)); 619 620 if (mode == TRACK_ALLOCATION_SITE) { 621 __ JumpIfJSArrayHasAllocationMemento( 622 receiver, scratch_elements, allocation_memento_found); 623 } 624 625 // Set transitioned map. 626 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 627 __ RecordWriteField(receiver, 628 HeapObject::kMapOffset, 629 target_map, 630 t1, 631 kRAHasNotBeenSaved, 632 kDontSaveFPRegs, 633 EMIT_REMEMBERED_SET, 634 OMIT_SMI_CHECK); 635 } 636 637 638 void ElementsTransitionGenerator::GenerateSmiToDouble( 639 MacroAssembler* masm, 640 Register receiver, 641 Register key, 642 Register value, 643 Register target_map, 644 AllocationSiteMode mode, 645 Label* fail) { 646 // Register ra contains the return address. 647 Label loop, entry, convert_hole, gc_required, only_change_map, done; 648 Register elements = a4; 649 Register length = a5; 650 Register array = a6; 651 Register array_end = array; 652 653 // target_map parameter can be clobbered. 654 Register scratch1 = target_map; 655 Register scratch2 = t1; 656 Register scratch3 = a7; 657 658 // Verify input registers don't conflict with locals. 659 DCHECK(!AreAliased(receiver, key, value, target_map, 660 elements, length, array, scratch2)); 661 662 Register scratch = t2; 663 if (mode == TRACK_ALLOCATION_SITE) { 664 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); 665 } 666 667 // Check for empty arrays, which only require a map transition and no changes 668 // to the backing store. 669 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 670 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); 671 __ Branch(&only_change_map, eq, at, Operand(elements)); 672 673 __ push(ra); 674 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); 675 // elements: source FixedArray 676 // length: number of elements (smi-tagged) 677 678 // Allocate new FixedDoubleArray. 679 __ SmiScale(scratch, length, kDoubleSizeLog2); 680 __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize); 681 __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT); 682 __ Dsubu(array, array, kHeapObjectTag); 683 // array: destination FixedDoubleArray, not tagged as heap object 684 685 // Set destination FixedDoubleArray's length and map. 686 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); 687 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); 688 // Update receiver's map. 689 __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset)); 690 691 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 692 __ RecordWriteField(receiver, 693 HeapObject::kMapOffset, 694 target_map, 695 scratch2, 696 kRAHasBeenSaved, 697 kDontSaveFPRegs, 698 OMIT_REMEMBERED_SET, 699 OMIT_SMI_CHECK); 700 // Replace receiver's backing store with newly created FixedDoubleArray. 701 __ Daddu(scratch1, array, Operand(kHeapObjectTag)); 702 __ sd(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); 703 __ RecordWriteField(receiver, 704 JSObject::kElementsOffset, 705 scratch1, 706 scratch2, 707 kRAHasBeenSaved, 708 kDontSaveFPRegs, 709 EMIT_REMEMBERED_SET, 710 OMIT_SMI_CHECK); 711 712 713 // Prepare for conversion loop. 714 __ Daddu(scratch1, elements, 715 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 716 __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize)); 717 __ SmiScale(array_end, length, kDoubleSizeLog2); 718 __ Daddu(array_end, array_end, scratch3); 719 720 // Repurpose registers no longer in use. 721 Register hole_lower = elements; 722 Register hole_upper = length; 723 __ li(hole_lower, Operand(kHoleNanLower32)); 724 __ li(hole_upper, Operand(kHoleNanUpper32)); 725 726 // scratch1: begin of source FixedArray element fields, not tagged 727 // hole_lower: kHoleNanLower32 728 // hole_upper: kHoleNanUpper32 729 // array_end: end of destination FixedDoubleArray, not tagged 730 // scratch3: begin of FixedDoubleArray element fields, not tagged 731 732 __ Branch(&entry); 733 734 __ bind(&only_change_map); 735 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 736 __ RecordWriteField(receiver, 737 HeapObject::kMapOffset, 738 target_map, 739 scratch2, 740 kRAHasBeenSaved, 741 kDontSaveFPRegs, 742 OMIT_REMEMBERED_SET, 743 OMIT_SMI_CHECK); 744 __ Branch(&done); 745 746 // Call into runtime if GC is required. 747 __ bind(&gc_required); 748 __ ld(ra, MemOperand(sp, 0)); 749 __ Branch(USE_DELAY_SLOT, fail); 750 __ daddiu(sp, sp, kPointerSize); // In delay slot. 751 752 // Convert and copy elements. 753 __ bind(&loop); 754 __ ld(scratch2, MemOperand(scratch1)); 755 __ Daddu(scratch1, scratch1, kPointerSize); 756 // scratch2: current element 757 __ JumpIfNotSmi(scratch2, &convert_hole); 758 __ SmiUntag(scratch2); 759 760 // Normal smi, convert to double and store. 761 __ mtc1(scratch2, f0); 762 __ cvt_d_w(f0, f0); 763 __ sdc1(f0, MemOperand(scratch3)); 764 __ Branch(USE_DELAY_SLOT, &entry); 765 __ daddiu(scratch3, scratch3, kDoubleSize); // In delay slot. 766 767 // Hole found, store the-hole NaN. 768 __ bind(&convert_hole); 769 if (FLAG_debug_code) { 770 // Restore a "smi-untagged" heap object. 771 __ Or(scratch2, scratch2, Operand(1)); 772 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 773 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2)); 774 } 775 // mantissa 776 __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset)); 777 // exponent 778 __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset)); 779 __ Daddu(scratch3, scratch3, kDoubleSize); 780 781 __ bind(&entry); 782 __ Branch(&loop, lt, scratch3, Operand(array_end)); 783 784 __ bind(&done); 785 __ pop(ra); 786 } 787 788 789 void ElementsTransitionGenerator::GenerateDoubleToObject( 790 MacroAssembler* masm, 791 Register receiver, 792 Register key, 793 Register value, 794 Register target_map, 795 AllocationSiteMode mode, 796 Label* fail) { 797 // Register ra contains the return address. 798 Label entry, loop, convert_hole, gc_required, only_change_map; 799 Register elements = a4; 800 Register array = a6; 801 Register length = a5; 802 Register scratch = t1; 803 804 // Verify input registers don't conflict with locals. 805 DCHECK(!AreAliased(receiver, key, value, target_map, 806 elements, array, length, scratch)); 807 if (mode == TRACK_ALLOCATION_SITE) { 808 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); 809 } 810 811 // Check for empty arrays, which only require a map transition and no changes 812 // to the backing store. 813 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 814 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); 815 __ Branch(&only_change_map, eq, at, Operand(elements)); 816 817 __ MultiPush( 818 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); 819 820 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); 821 // elements: source FixedArray 822 // length: number of elements (smi-tagged) 823 824 // Allocate new FixedArray. 825 // Re-use value and target_map registers, as they have been saved on the 826 // stack. 827 Register array_size = value; 828 Register allocate_scratch = target_map; 829 __ SmiScale(array_size, length, kPointerSizeLog2); 830 __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize); 831 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, 832 NO_ALLOCATION_FLAGS); 833 __ Dsubu(array, array, kHeapObjectTag); 834 // array: destination FixedArray, not tagged as heap object 835 // Set destination FixedDoubleArray's length and map. 836 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); 837 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); 838 __ sd(scratch, MemOperand(array, HeapObject::kMapOffset)); 839 840 // Prepare for conversion loop. 841 Register src_elements = elements; 842 Register dst_elements = target_map; 843 Register dst_end = length; 844 Register heap_number_map = scratch; 845 __ Daddu(src_elements, src_elements, 846 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); 847 __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize)); 848 __ SmiScale(dst_end, dst_end, kPointerSizeLog2); 849 __ Daddu(dst_end, dst_elements, dst_end); 850 851 // Allocating heap numbers in the loop below can fail and cause a jump to 852 // gc_required. We can't leave a partly initialized FixedArray behind, 853 // so pessimistically fill it with holes now. 854 Label initialization_loop, initialization_loop_entry; 855 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 856 __ Branch(&initialization_loop_entry); 857 __ bind(&initialization_loop); 858 __ sd(scratch, MemOperand(dst_elements)); 859 __ Daddu(dst_elements, dst_elements, Operand(kPointerSize)); 860 __ bind(&initialization_loop_entry); 861 __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end)); 862 863 __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize)); 864 __ Daddu(array, array, Operand(kHeapObjectTag)); 865 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 866 // Using offsetted addresses. 867 // dst_elements: begin of destination FixedArray element fields, not tagged 868 // src_elements: begin of source FixedDoubleArray element fields, not tagged, 869 // points to the exponent 870 // dst_end: end of destination FixedArray, not tagged 871 // array: destination FixedArray 872 // heap_number_map: heap number map 873 __ Branch(&entry); 874 875 // Call into runtime if GC is required. 876 __ bind(&gc_required); 877 __ MultiPop( 878 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); 879 880 __ Branch(fail); 881 882 __ bind(&loop); 883 Register upper_bits = key; 884 __ lw(upper_bits, MemOperand(src_elements, Register::kExponentOffset)); 885 __ Daddu(src_elements, src_elements, kDoubleSize); 886 // upper_bits: current element's upper 32 bit 887 // src_elements: address of next element 888 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); 889 890 // Non-hole double, copy value into a heap number. 891 Register heap_number = receiver; 892 Register scratch2 = value; 893 Register scratch3 = t2; 894 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, 895 &gc_required); 896 // heap_number: new heap number 897 // Load current element, src_elements point to next element. 898 899 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize)); 900 __ sd(scratch2, FieldMemOperand(heap_number, HeapNumber::kValueOffset)); 901 902 __ mov(scratch2, dst_elements); 903 __ sd(heap_number, MemOperand(dst_elements)); 904 __ Daddu(dst_elements, dst_elements, kPointerSize); 905 __ RecordWrite(array, 906 scratch2, 907 heap_number, 908 kRAHasBeenSaved, 909 kDontSaveFPRegs, 910 EMIT_REMEMBERED_SET, 911 OMIT_SMI_CHECK); 912 __ Branch(&entry); 913 914 // Replace the-hole NaN with the-hole pointer. 915 __ bind(&convert_hole); 916 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); 917 __ sd(scratch2, MemOperand(dst_elements)); 918 __ Daddu(dst_elements, dst_elements, kPointerSize); 919 920 __ bind(&entry); 921 __ Branch(&loop, lt, dst_elements, Operand(dst_end)); 922 923 __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit()); 924 // Replace receiver's backing store with newly created and filled FixedArray. 925 __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); 926 __ RecordWriteField(receiver, 927 JSObject::kElementsOffset, 928 array, 929 scratch, 930 kRAHasBeenSaved, 931 kDontSaveFPRegs, 932 EMIT_REMEMBERED_SET, 933 OMIT_SMI_CHECK); 934 __ pop(ra); 935 936 __ bind(&only_change_map); 937 // Update receiver's map. 938 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 939 __ RecordWriteField(receiver, 940 HeapObject::kMapOffset, 941 target_map, 942 scratch, 943 kRAHasNotBeenSaved, 944 kDontSaveFPRegs, 945 OMIT_REMEMBERED_SET, 946 OMIT_SMI_CHECK); 947 } 948 949 950 void StringCharLoadGenerator::Generate(MacroAssembler* masm, 951 Register string, 952 Register index, 953 Register result, 954 Label* call_runtime) { 955 // Fetch the instance type of the receiver into result register. 956 __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset)); 957 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); 958 959 // We need special handling for indirect strings. 960 Label check_sequential; 961 __ And(at, result, Operand(kIsIndirectStringMask)); 962 __ Branch(&check_sequential, eq, at, Operand(zero_reg)); 963 964 // Dispatch on the indirect string shape: slice or cons. 965 Label cons_string; 966 __ And(at, result, Operand(kSlicedNotConsMask)); 967 __ Branch(&cons_string, eq, at, Operand(zero_reg)); 968 969 // Handle slices. 970 Label indirect_string_loaded; 971 __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); 972 __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset)); 973 __ dsra32(at, result, 0); 974 __ Daddu(index, index, at); 975 __ jmp(&indirect_string_loaded); 976 977 // Handle cons strings. 978 // Check whether the right hand side is the empty string (i.e. if 979 // this is really a flat string in a cons string). If that is not 980 // the case we would rather go to the runtime system now to flatten 981 // the string. 982 __ bind(&cons_string); 983 __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset)); 984 __ LoadRoot(at, Heap::kempty_stringRootIndex); 985 __ Branch(call_runtime, ne, result, Operand(at)); 986 // Get the first of the two strings and load its instance type. 987 __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset)); 988 989 __ bind(&indirect_string_loaded); 990 __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset)); 991 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); 992 993 // Distinguish sequential and external strings. Only these two string 994 // representations can reach here (slices and flat cons strings have been 995 // reduced to the underlying sequential or external string). 996 Label external_string, check_encoding; 997 __ bind(&check_sequential); 998 STATIC_ASSERT(kSeqStringTag == 0); 999 __ And(at, result, Operand(kStringRepresentationMask)); 1000 __ Branch(&external_string, ne, at, Operand(zero_reg)); 1001 1002 // Prepare sequential strings 1003 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); 1004 __ Daddu(string, 1005 string, 1006 SeqTwoByteString::kHeaderSize - kHeapObjectTag); 1007 __ jmp(&check_encoding); 1008 1009 // Handle external strings. 1010 __ bind(&external_string); 1011 if (FLAG_debug_code) { 1012 // Assert that we do not have a cons or slice (indirect strings) here. 1013 // Sequential strings have already been ruled out. 1014 __ And(at, result, Operand(kIsIndirectStringMask)); 1015 __ Assert(eq, kExternalStringExpectedButNotFound, 1016 at, Operand(zero_reg)); 1017 } 1018 // Rule out short external strings. 1019 STATIC_ASSERT(kShortExternalStringTag != 0); 1020 __ And(at, result, Operand(kShortExternalStringMask)); 1021 __ Branch(call_runtime, ne, at, Operand(zero_reg)); 1022 __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); 1023 1024 Label one_byte, done; 1025 __ bind(&check_encoding); 1026 STATIC_ASSERT(kTwoByteStringTag == 0); 1027 __ And(at, result, Operand(kStringEncodingMask)); 1028 __ Branch(&one_byte, ne, at, Operand(zero_reg)); 1029 // Two-byte string. 1030 __ Dlsa(at, string, index, 1); 1031 __ lhu(result, MemOperand(at)); 1032 __ jmp(&done); 1033 __ bind(&one_byte); 1034 // One_byte string. 1035 __ Daddu(at, string, index); 1036 __ lbu(result, MemOperand(at)); 1037 __ bind(&done); 1038 } 1039 1040 #ifdef DEBUG 1041 // nop(CODE_AGE_MARKER_NOP) 1042 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; 1043 #endif 1044 1045 1046 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { 1047 USE(isolate); 1048 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); 1049 // Since patcher is a large object, allocate it dynamically when needed, 1050 // to avoid overloading the stack in stress conditions. 1051 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in 1052 // the process, before MIPS simulator ICache is setup. 1053 base::SmartPointer<CodePatcher> patcher( 1054 new CodePatcher(isolate, young_sequence_.start(), 1055 young_sequence_.length() / Assembler::kInstrSize, 1056 CodePatcher::DONT_FLUSH)); 1057 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); 1058 patcher->masm()->PushStandardFrame(a1); 1059 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); 1060 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); 1061 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); 1062 } 1063 1064 1065 #ifdef DEBUG 1066 bool CodeAgingHelper::IsOld(byte* candidate) const { 1067 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; 1068 } 1069 #endif 1070 1071 1072 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { 1073 bool result = isolate->code_aging_helper()->IsYoung(sequence); 1074 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); 1075 return result; 1076 } 1077 1078 1079 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, 1080 MarkingParity* parity) { 1081 if (IsYoungSequence(isolate, sequence)) { 1082 *age = kNoAgeCodeAge; 1083 *parity = NO_MARKING_PARITY; 1084 } else { 1085 Address target_address = Assembler::target_address_at( 1086 sequence + Assembler::kInstrSize); 1087 Code* stub = GetCodeFromTargetAddress(target_address); 1088 GetCodeAgeAndParity(stub, age, parity); 1089 } 1090 } 1091 1092 1093 void Code::PatchPlatformCodeAge(Isolate* isolate, 1094 byte* sequence, 1095 Code::Age age, 1096 MarkingParity parity) { 1097 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); 1098 if (age == kNoAgeCodeAge) { 1099 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); 1100 Assembler::FlushICache(isolate, sequence, young_length); 1101 } else { 1102 Code* stub = GetCodeAgeStub(isolate, age, parity); 1103 CodePatcher patcher(isolate, sequence, 1104 young_length / Assembler::kInstrSize); 1105 // Mark this code sequence for FindPlatformCodeAgeSequence(). 1106 patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); 1107 // Load the stub address to t9 and call it, 1108 // GetCodeAgeAndParity() extracts the stub address from this instruction. 1109 patcher.masm()->li( 1110 t9, 1111 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())), 1112 ADDRESS_LOAD); 1113 patcher.masm()->nop(); // Prevent jalr to jal optimization. 1114 patcher.masm()->jalr(t9, a0); 1115 patcher.masm()->nop(); // Branch delay slot nop. 1116 patcher.masm()->nop(); // Pad the empty space. 1117 } 1118 } 1119 1120 1121 #undef __ 1122 1123 } // namespace internal 1124 } // namespace v8 1125 1126 #endif // V8_TARGET_ARCH_MIPS64 1127