1 // Copyright 2014 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ 6 #define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ 7 8 #include "src/assembler.h" 9 #include "src/compiler/common-operator.h" 10 #include "src/compiler/graph.h" 11 #include "src/compiler/linkage.h" 12 #include "src/compiler/machine-operator.h" 13 #include "src/compiler/node.h" 14 #include "src/compiler/operator.h" 15 #include "src/factory.h" 16 #include "src/globals.h" 17 18 namespace v8 { 19 namespace internal { 20 namespace compiler { 21 22 class BasicBlock; 23 class RawMachineLabel; 24 class Schedule; 25 26 27 // The RawMachineAssembler produces a low-level IR graph. All nodes are wired 28 // into a graph and also placed into a schedule immediately, hence subsequent 29 // code generation can happen without the need for scheduling. 30 // 31 // In order to create a schedule on-the-fly, the assembler keeps track of basic 32 // blocks by having one current basic block being populated and by referencing 33 // other basic blocks through the use of labels. 34 // 35 // Also note that the generated graph is only valid together with the generated 36 // schedule, using one without the other is invalid as the graph is inherently 37 // non-schedulable due to missing control and effect dependencies. 38 class V8_EXPORT_PRIVATE RawMachineAssembler { 39 public: 40 RawMachineAssembler( 41 Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor, 42 MachineRepresentation word = MachineType::PointerRepresentation(), 43 MachineOperatorBuilder::Flags flags = 44 MachineOperatorBuilder::Flag::kNoFlags, 45 MachineOperatorBuilder::AlignmentRequirements alignment_requirements = 46 MachineOperatorBuilder::AlignmentRequirements:: 47 FullUnalignedAccessSupport()); 48 ~RawMachineAssembler() {} 49 50 Isolate* isolate() const { return isolate_; } 51 Graph* graph() const { return graph_; } 52 Zone* zone() const { return graph()->zone(); } 53 MachineOperatorBuilder* machine() { return &machine_; } 54 CommonOperatorBuilder* common() { return &common_; } 55 CallDescriptor* call_descriptor() const { return call_descriptor_; } 56 57 // Finalizes the schedule and exports it to be used for code generation. Note 58 // that this RawMachineAssembler becomes invalid after export. 59 Schedule* Export(); 60 61 // =========================================================================== 62 // The following utility methods create new nodes with specific operators and 63 // place them into the current basic block. They don't perform control flow, 64 // hence will not switch the current basic block. 65 66 Node* NullConstant() { 67 return HeapConstant(isolate()->factory()->null_value()); 68 } 69 70 Node* UndefinedConstant() { 71 return HeapConstant(isolate()->factory()->undefined_value()); 72 } 73 74 // Constants. 75 Node* PointerConstant(void* value) { 76 return IntPtrConstant(reinterpret_cast<intptr_t>(value)); 77 } 78 Node* IntPtrConstant(intptr_t value) { 79 // TODO(dcarney): mark generated code as unserializable if value != 0. 80 return kPointerSize == 8 ? Int64Constant(value) 81 : Int32Constant(static_cast<int>(value)); 82 } 83 Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode); 84 Node* Int32Constant(int32_t value) { 85 return AddNode(common()->Int32Constant(value)); 86 } 87 Node* StackSlot(MachineRepresentation rep) { 88 return AddNode(machine()->StackSlot(rep)); 89 } 90 Node* Int64Constant(int64_t value) { 91 return AddNode(common()->Int64Constant(value)); 92 } 93 Node* NumberConstant(double value) { 94 return AddNode(common()->NumberConstant(value)); 95 } 96 Node* Float32Constant(float value) { 97 return AddNode(common()->Float32Constant(value)); 98 } 99 Node* Float64Constant(double value) { 100 return AddNode(common()->Float64Constant(value)); 101 } 102 Node* HeapConstant(Handle<HeapObject> object) { 103 return AddNode(common()->HeapConstant(object)); 104 } 105 Node* BooleanConstant(bool value) { 106 Handle<Object> object = isolate()->factory()->ToBoolean(value); 107 return HeapConstant(Handle<HeapObject>::cast(object)); 108 } 109 Node* ExternalConstant(ExternalReference address) { 110 return AddNode(common()->ExternalConstant(address)); 111 } 112 Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) { 113 return AddNode(common()->RelocatableInt32Constant(value, rmode)); 114 } 115 Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) { 116 return AddNode(common()->RelocatableInt64Constant(value, rmode)); 117 } 118 119 Node* Projection(int index, Node* a) { 120 return AddNode(common()->Projection(index), a); 121 } 122 123 // Memory Operations. 124 Node* Load(MachineType rep, Node* base) { 125 return Load(rep, base, IntPtrConstant(0)); 126 } 127 Node* Load(MachineType rep, Node* base, Node* index) { 128 return AddNode(machine()->Load(rep), base, index); 129 } 130 Node* Store(MachineRepresentation rep, Node* base, Node* value, 131 WriteBarrierKind write_barrier) { 132 return Store(rep, base, IntPtrConstant(0), value, write_barrier); 133 } 134 Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value, 135 WriteBarrierKind write_barrier) { 136 return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)), 137 base, index, value); 138 } 139 Node* Retain(Node* value) { return AddNode(common()->Retain(), value); } 140 141 // Unaligned memory operations 142 Node* UnalignedLoad(MachineType rep, Node* base) { 143 return UnalignedLoad(rep, base, IntPtrConstant(0)); 144 } 145 Node* UnalignedLoad(MachineType rep, Node* base, Node* index) { 146 if (machine()->UnalignedLoadSupported(rep, 1)) { 147 return AddNode(machine()->Load(rep), base, index); 148 } else { 149 return AddNode(machine()->UnalignedLoad(rep), base, index); 150 } 151 } 152 Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* value) { 153 return UnalignedStore(rep, base, IntPtrConstant(0), value); 154 } 155 Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index, 156 Node* value) { 157 MachineType t = MachineType::TypeForRepresentation(rep); 158 if (machine()->UnalignedStoreSupported(t, 1)) { 159 return AddNode(machine()->Store(StoreRepresentation( 160 rep, WriteBarrierKind::kNoWriteBarrier)), 161 base, index, value); 162 } else { 163 return AddNode( 164 machine()->UnalignedStore(UnalignedStoreRepresentation(rep)), base, 165 index, value); 166 } 167 } 168 169 // Atomic memory operations. 170 Node* AtomicLoad(MachineType rep, Node* base, Node* index) { 171 return AddNode(machine()->AtomicLoad(rep), base, index); 172 } 173 Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index, 174 Node* value) { 175 return AddNode(machine()->AtomicStore(rep), base, index, value); 176 } 177 178 // Arithmetic Operations. 179 Node* WordAnd(Node* a, Node* b) { 180 return AddNode(machine()->WordAnd(), a, b); 181 } 182 Node* WordOr(Node* a, Node* b) { return AddNode(machine()->WordOr(), a, b); } 183 Node* WordXor(Node* a, Node* b) { 184 return AddNode(machine()->WordXor(), a, b); 185 } 186 Node* WordShl(Node* a, Node* b) { 187 return AddNode(machine()->WordShl(), a, b); 188 } 189 Node* WordShr(Node* a, Node* b) { 190 return AddNode(machine()->WordShr(), a, b); 191 } 192 Node* WordSar(Node* a, Node* b) { 193 return AddNode(machine()->WordSar(), a, b); 194 } 195 Node* WordRor(Node* a, Node* b) { 196 return AddNode(machine()->WordRor(), a, b); 197 } 198 Node* WordEqual(Node* a, Node* b) { 199 return AddNode(machine()->WordEqual(), a, b); 200 } 201 Node* WordNotEqual(Node* a, Node* b) { 202 return Word32BinaryNot(WordEqual(a, b)); 203 } 204 Node* WordNot(Node* a) { 205 if (machine()->Is32()) { 206 return Word32Not(a); 207 } else { 208 return Word64Not(a); 209 } 210 } 211 212 Node* Word32And(Node* a, Node* b) { 213 return AddNode(machine()->Word32And(), a, b); 214 } 215 Node* Word32Or(Node* a, Node* b) { 216 return AddNode(machine()->Word32Or(), a, b); 217 } 218 Node* Word32Xor(Node* a, Node* b) { 219 return AddNode(machine()->Word32Xor(), a, b); 220 } 221 Node* Word32Shl(Node* a, Node* b) { 222 return AddNode(machine()->Word32Shl(), a, b); 223 } 224 Node* Word32Shr(Node* a, Node* b) { 225 return AddNode(machine()->Word32Shr(), a, b); 226 } 227 Node* Word32Sar(Node* a, Node* b) { 228 return AddNode(machine()->Word32Sar(), a, b); 229 } 230 Node* Word32Ror(Node* a, Node* b) { 231 return AddNode(machine()->Word32Ror(), a, b); 232 } 233 Node* Word32Clz(Node* a) { return AddNode(machine()->Word32Clz(), a); } 234 Node* Word32Equal(Node* a, Node* b) { 235 return AddNode(machine()->Word32Equal(), a, b); 236 } 237 Node* Word32NotEqual(Node* a, Node* b) { 238 return Word32BinaryNot(Word32Equal(a, b)); 239 } 240 Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); } 241 Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); } 242 243 Node* Word64And(Node* a, Node* b) { 244 return AddNode(machine()->Word64And(), a, b); 245 } 246 Node* Word64Or(Node* a, Node* b) { 247 return AddNode(machine()->Word64Or(), a, b); 248 } 249 Node* Word64Xor(Node* a, Node* b) { 250 return AddNode(machine()->Word64Xor(), a, b); 251 } 252 Node* Word64Shl(Node* a, Node* b) { 253 return AddNode(machine()->Word64Shl(), a, b); 254 } 255 Node* Word64Shr(Node* a, Node* b) { 256 return AddNode(machine()->Word64Shr(), a, b); 257 } 258 Node* Word64Sar(Node* a, Node* b) { 259 return AddNode(machine()->Word64Sar(), a, b); 260 } 261 Node* Word64Ror(Node* a, Node* b) { 262 return AddNode(machine()->Word64Ror(), a, b); 263 } 264 Node* Word64Clz(Node* a) { return AddNode(machine()->Word64Clz(), a); } 265 Node* Word64Equal(Node* a, Node* b) { 266 return AddNode(machine()->Word64Equal(), a, b); 267 } 268 Node* Word64NotEqual(Node* a, Node* b) { 269 return Word32BinaryNot(Word64Equal(a, b)); 270 } 271 Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); } 272 273 Node* Int32Add(Node* a, Node* b) { 274 return AddNode(machine()->Int32Add(), a, b); 275 } 276 Node* Int32AddWithOverflow(Node* a, Node* b) { 277 return AddNode(machine()->Int32AddWithOverflow(), a, b); 278 } 279 Node* Int32Sub(Node* a, Node* b) { 280 return AddNode(machine()->Int32Sub(), a, b); 281 } 282 Node* Int32SubWithOverflow(Node* a, Node* b) { 283 return AddNode(machine()->Int32SubWithOverflow(), a, b); 284 } 285 Node* Int32Mul(Node* a, Node* b) { 286 return AddNode(machine()->Int32Mul(), a, b); 287 } 288 Node* Int32MulHigh(Node* a, Node* b) { 289 return AddNode(machine()->Int32MulHigh(), a, b); 290 } 291 Node* Int32MulWithOverflow(Node* a, Node* b) { 292 return AddNode(machine()->Int32MulWithOverflow(), a, b); 293 } 294 Node* Int32Div(Node* a, Node* b) { 295 return AddNode(machine()->Int32Div(), a, b); 296 } 297 Node* Int32Mod(Node* a, Node* b) { 298 return AddNode(machine()->Int32Mod(), a, b); 299 } 300 Node* Int32LessThan(Node* a, Node* b) { 301 return AddNode(machine()->Int32LessThan(), a, b); 302 } 303 Node* Int32LessThanOrEqual(Node* a, Node* b) { 304 return AddNode(machine()->Int32LessThanOrEqual(), a, b); 305 } 306 Node* Uint32Div(Node* a, Node* b) { 307 return AddNode(machine()->Uint32Div(), a, b); 308 } 309 Node* Uint32LessThan(Node* a, Node* b) { 310 return AddNode(machine()->Uint32LessThan(), a, b); 311 } 312 Node* Uint32LessThanOrEqual(Node* a, Node* b) { 313 return AddNode(machine()->Uint32LessThanOrEqual(), a, b); 314 } 315 Node* Uint32Mod(Node* a, Node* b) { 316 return AddNode(machine()->Uint32Mod(), a, b); 317 } 318 Node* Uint32MulHigh(Node* a, Node* b) { 319 return AddNode(machine()->Uint32MulHigh(), a, b); 320 } 321 Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); } 322 Node* Int32GreaterThanOrEqual(Node* a, Node* b) { 323 return Int32LessThanOrEqual(b, a); 324 } 325 Node* Uint32GreaterThan(Node* a, Node* b) { return Uint32LessThan(b, a); } 326 Node* Uint32GreaterThanOrEqual(Node* a, Node* b) { 327 return Uint32LessThanOrEqual(b, a); 328 } 329 Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); } 330 331 Node* Int64Add(Node* a, Node* b) { 332 return AddNode(machine()->Int64Add(), a, b); 333 } 334 Node* Int64AddWithOverflow(Node* a, Node* b) { 335 return AddNode(machine()->Int64AddWithOverflow(), a, b); 336 } 337 Node* Int64Sub(Node* a, Node* b) { 338 return AddNode(machine()->Int64Sub(), a, b); 339 } 340 Node* Int64SubWithOverflow(Node* a, Node* b) { 341 return AddNode(machine()->Int64SubWithOverflow(), a, b); 342 } 343 Node* Int64Mul(Node* a, Node* b) { 344 return AddNode(machine()->Int64Mul(), a, b); 345 } 346 Node* Int64Div(Node* a, Node* b) { 347 return AddNode(machine()->Int64Div(), a, b); 348 } 349 Node* Int64Mod(Node* a, Node* b) { 350 return AddNode(machine()->Int64Mod(), a, b); 351 } 352 Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); } 353 Node* Int64LessThan(Node* a, Node* b) { 354 return AddNode(machine()->Int64LessThan(), a, b); 355 } 356 Node* Int64LessThanOrEqual(Node* a, Node* b) { 357 return AddNode(machine()->Int64LessThanOrEqual(), a, b); 358 } 359 Node* Uint64LessThan(Node* a, Node* b) { 360 return AddNode(machine()->Uint64LessThan(), a, b); 361 } 362 Node* Uint64LessThanOrEqual(Node* a, Node* b) { 363 return AddNode(machine()->Uint64LessThanOrEqual(), a, b); 364 } 365 Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); } 366 Node* Int64GreaterThanOrEqual(Node* a, Node* b) { 367 return Int64LessThanOrEqual(b, a); 368 } 369 Node* Uint64GreaterThan(Node* a, Node* b) { return Uint64LessThan(b, a); } 370 Node* Uint64GreaterThanOrEqual(Node* a, Node* b) { 371 return Uint64LessThanOrEqual(b, a); 372 } 373 Node* Uint64Div(Node* a, Node* b) { 374 return AddNode(machine()->Uint64Div(), a, b); 375 } 376 Node* Uint64Mod(Node* a, Node* b) { 377 return AddNode(machine()->Uint64Mod(), a, b); 378 } 379 Node* Int32PairAdd(Node* a_low, Node* a_high, Node* b_low, Node* b_high) { 380 return AddNode(machine()->Int32PairAdd(), a_low, a_high, b_low, b_high); 381 } 382 Node* Int32PairSub(Node* a_low, Node* a_high, Node* b_low, Node* b_high) { 383 return AddNode(machine()->Int32PairSub(), a_low, a_high, b_low, b_high); 384 } 385 Node* Int32PairMul(Node* a_low, Node* a_high, Node* b_low, Node* b_high) { 386 return AddNode(machine()->Int32PairMul(), a_low, a_high, b_low, b_high); 387 } 388 Node* Word32PairShl(Node* low_word, Node* high_word, Node* shift) { 389 return AddNode(machine()->Word32PairShl(), low_word, high_word, shift); 390 } 391 Node* Word32PairShr(Node* low_word, Node* high_word, Node* shift) { 392 return AddNode(machine()->Word32PairShr(), low_word, high_word, shift); 393 } 394 Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) { 395 return AddNode(machine()->Word32PairSar(), low_word, high_word, shift); 396 } 397 398 #define INTPTR_BINOP(prefix, name) \ 399 Node* IntPtr##name(Node* a, Node* b) { \ 400 return kPointerSize == 8 ? prefix##64##name(a, b) \ 401 : prefix##32##name(a, b); \ 402 } 403 404 INTPTR_BINOP(Int, Add); 405 INTPTR_BINOP(Int, AddWithOverflow); 406 INTPTR_BINOP(Int, Sub); 407 INTPTR_BINOP(Int, SubWithOverflow); 408 INTPTR_BINOP(Int, Mul); 409 INTPTR_BINOP(Int, Div); 410 INTPTR_BINOP(Int, LessThan); 411 INTPTR_BINOP(Int, LessThanOrEqual); 412 INTPTR_BINOP(Word, Equal); 413 INTPTR_BINOP(Word, NotEqual); 414 INTPTR_BINOP(Int, GreaterThanOrEqual); 415 INTPTR_BINOP(Int, GreaterThan); 416 417 #undef INTPTR_BINOP 418 419 #define UINTPTR_BINOP(prefix, name) \ 420 Node* UintPtr##name(Node* a, Node* b) { \ 421 return kPointerSize == 8 ? prefix##64##name(a, b) \ 422 : prefix##32##name(a, b); \ 423 } 424 425 UINTPTR_BINOP(Uint, LessThan); 426 UINTPTR_BINOP(Uint, LessThanOrEqual); 427 UINTPTR_BINOP(Uint, GreaterThanOrEqual); 428 UINTPTR_BINOP(Uint, GreaterThan); 429 430 #undef UINTPTR_BINOP 431 432 Node* Float32Add(Node* a, Node* b) { 433 return AddNode(machine()->Float32Add(), a, b); 434 } 435 Node* Float32Sub(Node* a, Node* b) { 436 return AddNode(machine()->Float32Sub(), a, b); 437 } 438 Node* Float32Mul(Node* a, Node* b) { 439 return AddNode(machine()->Float32Mul(), a, b); 440 } 441 Node* Float32Div(Node* a, Node* b) { 442 return AddNode(machine()->Float32Div(), a, b); 443 } 444 Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); } 445 Node* Float32Neg(Node* a) { return AddNode(machine()->Float32Neg(), a); } 446 Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); } 447 Node* Float32Equal(Node* a, Node* b) { 448 return AddNode(machine()->Float32Equal(), a, b); 449 } 450 Node* Float32NotEqual(Node* a, Node* b) { 451 return Word32BinaryNot(Float32Equal(a, b)); 452 } 453 Node* Float32LessThan(Node* a, Node* b) { 454 return AddNode(machine()->Float32LessThan(), a, b); 455 } 456 Node* Float32LessThanOrEqual(Node* a, Node* b) { 457 return AddNode(machine()->Float32LessThanOrEqual(), a, b); 458 } 459 Node* Float32GreaterThan(Node* a, Node* b) { return Float32LessThan(b, a); } 460 Node* Float32GreaterThanOrEqual(Node* a, Node* b) { 461 return Float32LessThanOrEqual(b, a); 462 } 463 Node* Float32Max(Node* a, Node* b) { 464 return AddNode(machine()->Float32Max(), a, b); 465 } 466 Node* Float32Min(Node* a, Node* b) { 467 return AddNode(machine()->Float32Min(), a, b); 468 } 469 Node* Float64Add(Node* a, Node* b) { 470 return AddNode(machine()->Float64Add(), a, b); 471 } 472 Node* Float64Sub(Node* a, Node* b) { 473 return AddNode(machine()->Float64Sub(), a, b); 474 } 475 Node* Float64Mul(Node* a, Node* b) { 476 return AddNode(machine()->Float64Mul(), a, b); 477 } 478 Node* Float64Div(Node* a, Node* b) { 479 return AddNode(machine()->Float64Div(), a, b); 480 } 481 Node* Float64Mod(Node* a, Node* b) { 482 return AddNode(machine()->Float64Mod(), a, b); 483 } 484 Node* Float64Max(Node* a, Node* b) { 485 return AddNode(machine()->Float64Max(), a, b); 486 } 487 Node* Float64Min(Node* a, Node* b) { 488 return AddNode(machine()->Float64Min(), a, b); 489 } 490 Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); } 491 Node* Float64Neg(Node* a) { return AddNode(machine()->Float64Neg(), a); } 492 Node* Float64Acos(Node* a) { return AddNode(machine()->Float64Acos(), a); } 493 Node* Float64Acosh(Node* a) { return AddNode(machine()->Float64Acosh(), a); } 494 Node* Float64Asin(Node* a) { return AddNode(machine()->Float64Asin(), a); } 495 Node* Float64Asinh(Node* a) { return AddNode(machine()->Float64Asinh(), a); } 496 Node* Float64Atan(Node* a) { return AddNode(machine()->Float64Atan(), a); } 497 Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); } 498 Node* Float64Atan2(Node* a, Node* b) { 499 return AddNode(machine()->Float64Atan2(), a, b); 500 } 501 Node* Float64Cbrt(Node* a) { return AddNode(machine()->Float64Cbrt(), a); } 502 Node* Float64Cos(Node* a) { return AddNode(machine()->Float64Cos(), a); } 503 Node* Float64Cosh(Node* a) { return AddNode(machine()->Float64Cosh(), a); } 504 Node* Float64Exp(Node* a) { return AddNode(machine()->Float64Exp(), a); } 505 Node* Float64Expm1(Node* a) { return AddNode(machine()->Float64Expm1(), a); } 506 Node* Float64Log(Node* a) { return AddNode(machine()->Float64Log(), a); } 507 Node* Float64Log1p(Node* a) { return AddNode(machine()->Float64Log1p(), a); } 508 Node* Float64Log10(Node* a) { return AddNode(machine()->Float64Log10(), a); } 509 Node* Float64Log2(Node* a) { return AddNode(machine()->Float64Log2(), a); } 510 Node* Float64Pow(Node* a, Node* b) { 511 return AddNode(machine()->Float64Pow(), a, b); 512 } 513 Node* Float64Sin(Node* a) { return AddNode(machine()->Float64Sin(), a); } 514 Node* Float64Sinh(Node* a) { return AddNode(machine()->Float64Sinh(), a); } 515 Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); } 516 Node* Float64Tan(Node* a) { return AddNode(machine()->Float64Tan(), a); } 517 Node* Float64Tanh(Node* a) { return AddNode(machine()->Float64Tanh(), a); } 518 Node* Float64Equal(Node* a, Node* b) { 519 return AddNode(machine()->Float64Equal(), a, b); 520 } 521 Node* Float64NotEqual(Node* a, Node* b) { 522 return Word32BinaryNot(Float64Equal(a, b)); 523 } 524 Node* Float64LessThan(Node* a, Node* b) { 525 return AddNode(machine()->Float64LessThan(), a, b); 526 } 527 Node* Float64LessThanOrEqual(Node* a, Node* b) { 528 return AddNode(machine()->Float64LessThanOrEqual(), a, b); 529 } 530 Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); } 531 Node* Float64GreaterThanOrEqual(Node* a, Node* b) { 532 return Float64LessThanOrEqual(b, a); 533 } 534 535 // Conversions. 536 Node* BitcastTaggedToWord(Node* a) { 537 #ifdef ENABLE_VERIFY_CSA 538 return AddNode(machine()->BitcastTaggedToWord(), a); 539 #else 540 return a; 541 #endif 542 } 543 Node* BitcastWordToTagged(Node* a) { 544 return AddNode(machine()->BitcastWordToTagged(), a); 545 } 546 Node* BitcastWordToTaggedSigned(Node* a) { 547 #ifdef ENABLE_VERIFY_CSA 548 return AddNode(machine()->BitcastWordToTaggedSigned(), a); 549 #else 550 return a; 551 #endif 552 } 553 Node* TruncateFloat64ToWord32(Node* a) { 554 return AddNode(machine()->TruncateFloat64ToWord32(), a); 555 } 556 Node* ChangeFloat32ToFloat64(Node* a) { 557 return AddNode(machine()->ChangeFloat32ToFloat64(), a); 558 } 559 Node* ChangeInt32ToFloat64(Node* a) { 560 return AddNode(machine()->ChangeInt32ToFloat64(), a); 561 } 562 Node* ChangeUint32ToFloat64(Node* a) { 563 return AddNode(machine()->ChangeUint32ToFloat64(), a); 564 } 565 Node* ChangeFloat64ToInt32(Node* a) { 566 return AddNode(machine()->ChangeFloat64ToInt32(), a); 567 } 568 Node* ChangeFloat64ToUint32(Node* a) { 569 return AddNode(machine()->ChangeFloat64ToUint32(), a); 570 } 571 Node* TruncateFloat64ToUint32(Node* a) { 572 return AddNode(machine()->TruncateFloat64ToUint32(), a); 573 } 574 Node* TruncateFloat32ToInt32(Node* a) { 575 return AddNode(machine()->TruncateFloat32ToInt32(), a); 576 } 577 Node* TruncateFloat32ToUint32(Node* a) { 578 return AddNode(machine()->TruncateFloat32ToUint32(), a); 579 } 580 Node* TryTruncateFloat32ToInt64(Node* a) { 581 return AddNode(machine()->TryTruncateFloat32ToInt64(), a); 582 } 583 Node* TryTruncateFloat64ToInt64(Node* a) { 584 return AddNode(machine()->TryTruncateFloat64ToInt64(), a); 585 } 586 Node* TryTruncateFloat32ToUint64(Node* a) { 587 return AddNode(machine()->TryTruncateFloat32ToUint64(), a); 588 } 589 Node* TryTruncateFloat64ToUint64(Node* a) { 590 return AddNode(machine()->TryTruncateFloat64ToUint64(), a); 591 } 592 Node* ChangeInt32ToInt64(Node* a) { 593 return AddNode(machine()->ChangeInt32ToInt64(), a); 594 } 595 Node* ChangeUint32ToUint64(Node* a) { 596 return AddNode(machine()->ChangeUint32ToUint64(), a); 597 } 598 Node* TruncateFloat64ToFloat32(Node* a) { 599 return AddNode(machine()->TruncateFloat64ToFloat32(), a); 600 } 601 Node* TruncateInt64ToInt32(Node* a) { 602 return AddNode(machine()->TruncateInt64ToInt32(), a); 603 } 604 Node* RoundFloat64ToInt32(Node* a) { 605 return AddNode(machine()->RoundFloat64ToInt32(), a); 606 } 607 Node* RoundInt32ToFloat32(Node* a) { 608 return AddNode(machine()->RoundInt32ToFloat32(), a); 609 } 610 Node* RoundInt64ToFloat32(Node* a) { 611 return AddNode(machine()->RoundInt64ToFloat32(), a); 612 } 613 Node* RoundInt64ToFloat64(Node* a) { 614 return AddNode(machine()->RoundInt64ToFloat64(), a); 615 } 616 Node* RoundUint32ToFloat32(Node* a) { 617 return AddNode(machine()->RoundUint32ToFloat32(), a); 618 } 619 Node* RoundUint64ToFloat32(Node* a) { 620 return AddNode(machine()->RoundUint64ToFloat32(), a); 621 } 622 Node* RoundUint64ToFloat64(Node* a) { 623 return AddNode(machine()->RoundUint64ToFloat64(), a); 624 } 625 Node* BitcastFloat32ToInt32(Node* a) { 626 return AddNode(machine()->BitcastFloat32ToInt32(), a); 627 } 628 Node* BitcastFloat64ToInt64(Node* a) { 629 return AddNode(machine()->BitcastFloat64ToInt64(), a); 630 } 631 Node* BitcastInt32ToFloat32(Node* a) { 632 return AddNode(machine()->BitcastInt32ToFloat32(), a); 633 } 634 Node* BitcastInt64ToFloat64(Node* a) { 635 return AddNode(machine()->BitcastInt64ToFloat64(), a); 636 } 637 Node* Float32RoundDown(Node* a) { 638 return AddNode(machine()->Float32RoundDown().op(), a); 639 } 640 Node* Float64RoundDown(Node* a) { 641 return AddNode(machine()->Float64RoundDown().op(), a); 642 } 643 Node* Float32RoundUp(Node* a) { 644 return AddNode(machine()->Float32RoundUp().op(), a); 645 } 646 Node* Float64RoundUp(Node* a) { 647 return AddNode(machine()->Float64RoundUp().op(), a); 648 } 649 Node* Float32RoundTruncate(Node* a) { 650 return AddNode(machine()->Float32RoundTruncate().op(), a); 651 } 652 Node* Float64RoundTruncate(Node* a) { 653 return AddNode(machine()->Float64RoundTruncate().op(), a); 654 } 655 Node* Float64RoundTiesAway(Node* a) { 656 return AddNode(machine()->Float64RoundTiesAway().op(), a); 657 } 658 Node* Float32RoundTiesEven(Node* a) { 659 return AddNode(machine()->Float32RoundTiesEven().op(), a); 660 } 661 Node* Float64RoundTiesEven(Node* a) { 662 return AddNode(machine()->Float64RoundTiesEven().op(), a); 663 } 664 Node* Word32ReverseBytes(Node* a) { 665 return AddNode(machine()->Word32ReverseBytes().op(), a); 666 } 667 Node* Word64ReverseBytes(Node* a) { 668 return AddNode(machine()->Word64ReverseBytes().op(), a); 669 } 670 671 // Float64 bit operations. 672 Node* Float64ExtractLowWord32(Node* a) { 673 return AddNode(machine()->Float64ExtractLowWord32(), a); 674 } 675 Node* Float64ExtractHighWord32(Node* a) { 676 return AddNode(machine()->Float64ExtractHighWord32(), a); 677 } 678 Node* Float64InsertLowWord32(Node* a, Node* b) { 679 return AddNode(machine()->Float64InsertLowWord32(), a, b); 680 } 681 Node* Float64InsertHighWord32(Node* a, Node* b) { 682 return AddNode(machine()->Float64InsertHighWord32(), a, b); 683 } 684 Node* Float64SilenceNaN(Node* a) { 685 return AddNode(machine()->Float64SilenceNaN(), a); 686 } 687 688 // Stack operations. 689 Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); } 690 Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); } 691 Node* LoadParentFramePointer() { 692 return AddNode(machine()->LoadParentFramePointer()); 693 } 694 695 // Parameters. 696 Node* Parameter(size_t index); 697 698 // Pointer utilities. 699 Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) { 700 return Load(rep, PointerConstant(address), Int32Constant(offset)); 701 } 702 Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) { 703 return Store(rep, PointerConstant(address), node, kNoWriteBarrier); 704 } 705 Node* UnalignedLoadFromPointer(void* address, MachineType rep, 706 int32_t offset = 0) { 707 return UnalignedLoad(rep, PointerConstant(address), Int32Constant(offset)); 708 } 709 Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep, 710 Node* node) { 711 return UnalignedStore(rep, PointerConstant(address), node); 712 } 713 Node* StringConstant(const char* string) { 714 return HeapConstant(isolate()->factory()->InternalizeUtf8String(string)); 715 } 716 717 // Call a given call descriptor and the given arguments. 718 // The call target is passed as part of the {inputs} array. 719 Node* CallN(CallDescriptor* desc, int input_count, Node* const* inputs); 720 721 // Call a given call descriptor and the given arguments and frame-state. 722 // The call target and frame state are passed as part of the {inputs} array. 723 Node* CallNWithFrameState(CallDescriptor* desc, int input_count, 724 Node* const* inputs); 725 726 // Tail call a given call descriptor and the given arguments. 727 // The call target is passed as part of the {inputs} array. 728 Node* TailCallN(CallDescriptor* desc, int input_count, Node* const* inputs); 729 730 // Call to a C function with zero arguments. 731 Node* CallCFunction0(MachineType return_type, Node* function); 732 // Call to a C function with one parameter. 733 Node* CallCFunction1(MachineType return_type, MachineType arg0_type, 734 Node* function, Node* arg0); 735 // Call to a C function with two arguments. 736 Node* CallCFunction2(MachineType return_type, MachineType arg0_type, 737 MachineType arg1_type, Node* function, Node* arg0, 738 Node* arg1); 739 // Call to a C function with three arguments. 740 Node* CallCFunction3(MachineType return_type, MachineType arg0_type, 741 MachineType arg1_type, MachineType arg2_type, 742 Node* function, Node* arg0, Node* arg1, Node* arg2); 743 // Call to a C function with eight arguments. 744 Node* CallCFunction8(MachineType return_type, MachineType arg0_type, 745 MachineType arg1_type, MachineType arg2_type, 746 MachineType arg3_type, MachineType arg4_type, 747 MachineType arg5_type, MachineType arg6_type, 748 MachineType arg7_type, Node* function, Node* arg0, 749 Node* arg1, Node* arg2, Node* arg3, Node* arg4, 750 Node* arg5, Node* arg6, Node* arg7); 751 752 // =========================================================================== 753 // The following utility methods deal with control flow, hence might switch 754 // the current basic block or create new basic blocks for labels. 755 756 // Control flow. 757 void Goto(RawMachineLabel* label); 758 void Branch(Node* condition, RawMachineLabel* true_val, 759 RawMachineLabel* false_val); 760 void Switch(Node* index, RawMachineLabel* default_label, 761 const int32_t* case_values, RawMachineLabel** case_labels, 762 size_t case_count); 763 void Return(Node* value); 764 void Return(Node* v1, Node* v2); 765 void Return(Node* v1, Node* v2, Node* v3); 766 void PopAndReturn(Node* pop, Node* value); 767 void PopAndReturn(Node* pop, Node* v1, Node* v2); 768 void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3); 769 void Bind(RawMachineLabel* label); 770 void Deoptimize(Node* state); 771 void DebugBreak(); 772 void Unreachable(); 773 void Comment(const char* msg); 774 775 // Add success / exception successor blocks and ends the current block ending 776 // in a potentially throwing call node. 777 void Continuations(Node* call, RawMachineLabel* if_success, 778 RawMachineLabel* if_exception); 779 780 // Variables. 781 Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) { 782 return AddNode(common()->Phi(rep, 2), n1, n2, graph()->start()); 783 } 784 Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) { 785 return AddNode(common()->Phi(rep, 3), n1, n2, n3, graph()->start()); 786 } 787 Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) { 788 return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4, graph()->start()); 789 } 790 Node* Phi(MachineRepresentation rep, int input_count, Node* const* inputs); 791 void AppendPhiInput(Node* phi, Node* new_input); 792 793 // =========================================================================== 794 // The following generic node creation methods can be used for operators that 795 // are not covered by the above utility methods. There should rarely be a need 796 // to do that outside of testing though. 797 798 Node* AddNode(const Operator* op, int input_count, Node* const* inputs); 799 800 Node* AddNode(const Operator* op) { 801 return AddNode(op, 0, static_cast<Node* const*>(nullptr)); 802 } 803 804 template <class... TArgs> 805 Node* AddNode(const Operator* op, Node* n1, TArgs... args) { 806 Node* buffer[] = {n1, args...}; 807 return AddNode(op, sizeof...(args) + 1, buffer); 808 } 809 810 private: 811 Node* MakeNode(const Operator* op, int input_count, Node* const* inputs); 812 BasicBlock* Use(RawMachineLabel* label); 813 BasicBlock* EnsureBlock(RawMachineLabel* label); 814 BasicBlock* CurrentBlock(); 815 816 Schedule* schedule() { return schedule_; } 817 size_t parameter_count() const { return call_descriptor_->ParameterCount(); } 818 819 Isolate* isolate_; 820 Graph* graph_; 821 Schedule* schedule_; 822 MachineOperatorBuilder machine_; 823 CommonOperatorBuilder common_; 824 CallDescriptor* call_descriptor_; 825 NodeVector parameters_; 826 BasicBlock* current_block_; 827 828 DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler); 829 }; 830 831 class V8_EXPORT_PRIVATE RawMachineLabel final { 832 public: 833 enum Type { kDeferred, kNonDeferred }; 834 835 explicit RawMachineLabel(Type type = kNonDeferred) 836 : deferred_(type == kDeferred) {} 837 ~RawMachineLabel(); 838 839 private: 840 BasicBlock* block_ = nullptr; 841 bool used_ = false; 842 bool bound_ = false; 843 bool deferred_; 844 friend class RawMachineAssembler; 845 DISALLOW_COPY_AND_ASSIGN(RawMachineLabel); 846 }; 847 848 } // namespace compiler 849 } // namespace internal 850 } // namespace v8 851 852 #endif // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_ 853