1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "courgette/encoded_program.h" 6 7 #include <algorithm> 8 #include <map> 9 #include <string> 10 #include <vector> 11 12 #include "base/environment.h" 13 #include "base/logging.h" 14 #include "base/memory/scoped_ptr.h" 15 #include "base/strings/string_util.h" 16 #include "base/strings/utf_string_conversions.h" 17 #include "courgette/courgette.h" 18 #include "courgette/disassembler_elf_32_arm.h" 19 #include "courgette/streams.h" 20 #include "courgette/types_elf.h" 21 22 namespace courgette { 23 24 // Stream indexes. 25 const int kStreamMisc = 0; 26 const int kStreamOps = 1; 27 const int kStreamBytes = 2; 28 const int kStreamAbs32Indexes = 3; 29 const int kStreamRel32Indexes = 4; 30 const int kStreamAbs32Addresses = 5; 31 const int kStreamRel32Addresses = 6; 32 const int kStreamCopyCounts = 7; 33 const int kStreamOriginAddresses = kStreamMisc; 34 35 const int kStreamLimit = 9; 36 37 // Constructor is here rather than in the header. Although the constructor 38 // appears to do nothing it is fact quite large because of the implicit calls to 39 // field constructors. Ditto for the destructor. 40 EncodedProgram::EncodedProgram() : image_base_(0) {} 41 EncodedProgram::~EncodedProgram() {} 42 43 // Serializes a vector of integral values using Varint32 coding. 44 template<typename V> 45 CheckBool WriteVector(const V& items, SinkStream* buffer) { 46 size_t count = items.size(); 47 bool ok = buffer->WriteSizeVarint32(count); 48 for (size_t i = 0; ok && i < count; ++i) { 49 COMPILE_ASSERT(sizeof(items[0]) <= sizeof(uint32), // NOLINT 50 T_must_fit_in_uint32); 51 ok = buffer->WriteSizeVarint32(items[i]); 52 } 53 return ok; 54 } 55 56 template<typename V> 57 bool ReadVector(V* items, SourceStream* buffer) { 58 uint32 count; 59 if (!buffer->ReadVarint32(&count)) 60 return false; 61 62 items->clear(); 63 64 bool ok = items->reserve(count); 65 for (size_t i = 0; ok && i < count; ++i) { 66 uint32 item; 67 ok = buffer->ReadVarint32(&item); 68 if (ok) 69 ok = items->push_back(static_cast<typename V::value_type>(item)); 70 } 71 72 return ok; 73 } 74 75 // Serializes a vector, using delta coding followed by Varint32 coding. 76 template<typename V> 77 CheckBool WriteU32Delta(const V& set, SinkStream* buffer) { 78 size_t count = set.size(); 79 bool ok = buffer->WriteSizeVarint32(count); 80 uint32 prev = 0; 81 for (size_t i = 0; ok && i < count; ++i) { 82 uint32 current = set[i]; 83 uint32 delta = current - prev; 84 ok = buffer->WriteVarint32(delta); 85 prev = current; 86 } 87 return ok; 88 } 89 90 template <typename V> 91 static CheckBool ReadU32Delta(V* set, SourceStream* buffer) { 92 uint32 count; 93 94 if (!buffer->ReadVarint32(&count)) 95 return false; 96 97 set->clear(); 98 bool ok = set->reserve(count); 99 uint32 prev = 0; 100 101 for (size_t i = 0; ok && i < count; ++i) { 102 uint32 delta; 103 ok = buffer->ReadVarint32(&delta); 104 if (ok) { 105 uint32 current = prev + delta; 106 ok = set->push_back(current); 107 prev = current; 108 } 109 } 110 111 return ok; 112 } 113 114 // Write a vector as the byte representation of the contents. 115 // 116 // (This only really makes sense for a type T that has sizeof(T)==1, otherwise 117 // serialized representation is not endian-agnostic. But it is useful to keep 118 // the possibility of a greater size for experiments comparing Varint32 encoding 119 // of a vector of larger integrals vs a plain form.) 120 // 121 template<typename V> 122 CheckBool WriteVectorU8(const V& items, SinkStream* buffer) { 123 size_t count = items.size(); 124 bool ok = buffer->WriteSizeVarint32(count); 125 if (count != 0 && ok) { 126 size_t byte_count = count * sizeof(typename V::value_type); 127 ok = buffer->Write(static_cast<const void*>(&items[0]), byte_count); 128 } 129 return ok; 130 } 131 132 template<typename V> 133 bool ReadVectorU8(V* items, SourceStream* buffer) { 134 uint32 count; 135 if (!buffer->ReadVarint32(&count)) 136 return false; 137 138 items->clear(); 139 bool ok = items->resize(count, 0); 140 if (ok && count != 0) { 141 size_t byte_count = count * sizeof(typename V::value_type); 142 return buffer->Read(static_cast<void*>(&((*items)[0])), byte_count); 143 } 144 return ok; 145 } 146 147 //////////////////////////////////////////////////////////////////////////////// 148 149 CheckBool EncodedProgram::DefineRel32Label(int index, RVA value) { 150 return DefineLabelCommon(&rel32_rva_, index, value); 151 } 152 153 CheckBool EncodedProgram::DefineAbs32Label(int index, RVA value) { 154 return DefineLabelCommon(&abs32_rva_, index, value); 155 } 156 157 static const RVA kUnassignedRVA = static_cast<RVA>(-1); 158 159 CheckBool EncodedProgram::DefineLabelCommon(RvaVector* rvas, 160 int index, 161 RVA rva) { 162 bool ok = true; 163 if (static_cast<int>(rvas->size()) <= index) 164 ok = rvas->resize(index + 1, kUnassignedRVA); 165 166 if (ok) { 167 DCHECK_EQ((*rvas)[index], kUnassignedRVA) 168 << "DefineLabel double assigned " << index; 169 (*rvas)[index] = rva; 170 } 171 172 return ok; 173 } 174 175 void EncodedProgram::EndLabels() { 176 FinishLabelsCommon(&abs32_rva_); 177 FinishLabelsCommon(&rel32_rva_); 178 } 179 180 void EncodedProgram::FinishLabelsCommon(RvaVector* rvas) { 181 // Replace all unassigned slots with the value at the previous index so they 182 // delta-encode to zero. (There might be better values than zero. The way to 183 // get that is have the higher level assembly program assign the unassigned 184 // slots.) 185 RVA previous = 0; 186 size_t size = rvas->size(); 187 for (size_t i = 0; i < size; ++i) { 188 if ((*rvas)[i] == kUnassignedRVA) 189 (*rvas)[i] = previous; 190 else 191 previous = (*rvas)[i]; 192 } 193 } 194 195 CheckBool EncodedProgram::AddOrigin(RVA origin) { 196 return ops_.push_back(ORIGIN) && origins_.push_back(origin); 197 } 198 199 CheckBool EncodedProgram::AddCopy(uint32 count, const void* bytes) { 200 const uint8* source = static_cast<const uint8*>(bytes); 201 202 bool ok = true; 203 204 // Fold adjacent COPY instructions into one. This nearly halves the size of 205 // an EncodedProgram with only COPY1 instructions since there are approx plain 206 // 16 bytes per reloc. This has a working-set benefit during decompression. 207 // For compression of files with large differences this makes a small (4%) 208 // improvement in size. For files with small differences this degrades the 209 // compressed size by 1.3% 210 if (!ops_.empty()) { 211 if (ops_.back() == COPY1) { 212 ops_.back() = COPY; 213 ok = copy_counts_.push_back(1); 214 } 215 if (ok && ops_.back() == COPY) { 216 copy_counts_.back() += count; 217 for (uint32 i = 0; ok && i < count; ++i) { 218 ok = copy_bytes_.push_back(source[i]); 219 } 220 return ok; 221 } 222 } 223 224 if (ok) { 225 if (count == 1) { 226 ok = ops_.push_back(COPY1) && copy_bytes_.push_back(source[0]); 227 } else { 228 ok = ops_.push_back(COPY) && copy_counts_.push_back(count); 229 for (uint32 i = 0; ok && i < count; ++i) { 230 ok = copy_bytes_.push_back(source[i]); 231 } 232 } 233 } 234 235 return ok; 236 } 237 238 CheckBool EncodedProgram::AddAbs32(int label_index) { 239 return ops_.push_back(ABS32) && abs32_ix_.push_back(label_index); 240 } 241 242 CheckBool EncodedProgram::AddRel32(int label_index) { 243 return ops_.push_back(REL32) && rel32_ix_.push_back(label_index); 244 } 245 246 CheckBool EncodedProgram::AddRel32ARM(uint16 op, int label_index) { 247 return ops_.push_back(static_cast<OP>(op)) && 248 rel32_ix_.push_back(label_index); 249 } 250 251 CheckBool EncodedProgram::AddPeMakeRelocs(ExecutableType kind) { 252 if (kind == EXE_WIN_32_X86) 253 return ops_.push_back(MAKE_PE_RELOCATION_TABLE); 254 return ops_.push_back(MAKE_PE64_RELOCATION_TABLE); 255 } 256 257 CheckBool EncodedProgram::AddElfMakeRelocs() { 258 return ops_.push_back(MAKE_ELF_RELOCATION_TABLE); 259 } 260 261 CheckBool EncodedProgram::AddElfARMMakeRelocs() { 262 return ops_.push_back(MAKE_ELF_ARM_RELOCATION_TABLE); 263 } 264 265 void EncodedProgram::DebuggingSummary() { 266 VLOG(1) << "EncodedProgram Summary" 267 << "\n image base " << image_base_ 268 << "\n abs32 rvas " << abs32_rva_.size() 269 << "\n rel32 rvas " << rel32_rva_.size() 270 << "\n ops " << ops_.size() 271 << "\n origins " << origins_.size() 272 << "\n copy_counts " << copy_counts_.size() 273 << "\n copy_bytes " << copy_bytes_.size() 274 << "\n abs32_ix " << abs32_ix_.size() 275 << "\n rel32_ix " << rel32_ix_.size(); 276 } 277 278 //////////////////////////////////////////////////////////////////////////////// 279 280 // For algorithm refinement purposes it is useful to write subsets of the file 281 // format. This gives us the ability to estimate the entropy of the 282 // differential compression of the individual streams, which can provide 283 // invaluable insights. The default, of course, is to include all the streams. 284 // 285 enum FieldSelect { 286 INCLUDE_ABS32_ADDRESSES = 0x0001, 287 INCLUDE_REL32_ADDRESSES = 0x0002, 288 INCLUDE_ABS32_INDEXES = 0x0010, 289 INCLUDE_REL32_INDEXES = 0x0020, 290 INCLUDE_OPS = 0x0100, 291 INCLUDE_BYTES = 0x0200, 292 INCLUDE_COPY_COUNTS = 0x0400, 293 INCLUDE_MISC = 0x1000 294 }; 295 296 static FieldSelect GetFieldSelect() { 297 #if 1 298 // TODO(sra): Use better configuration. 299 scoped_ptr<base::Environment> env(base::Environment::Create()); 300 std::string s; 301 env->GetVar("A_FIELDS", &s); 302 if (!s.empty()) { 303 return static_cast<FieldSelect>(wcstoul(ASCIIToWide(s).c_str(), 0, 0)); 304 } 305 #endif 306 return static_cast<FieldSelect>(~0); 307 } 308 309 CheckBool EncodedProgram::WriteTo(SinkStreamSet* streams) { 310 FieldSelect select = GetFieldSelect(); 311 312 // The order of fields must be consistent in WriteTo and ReadFrom, regardless 313 // of the streams used. The code can be configured with all kStreamXXX 314 // constants the same. 315 // 316 // If we change the code to pipeline reading with assembly (to avoid temporary 317 // storage vectors by consuming operands directly from the stream) then we 318 // need to read the base address and the random access address tables first, 319 // the rest can be interleaved. 320 321 if (select & INCLUDE_MISC) { 322 // TODO(sra): write 64 bits. 323 if (!streams->stream(kStreamMisc)->WriteVarint32( 324 static_cast<uint32>(image_base_))) { 325 return false; 326 } 327 } 328 329 bool success = true; 330 331 if (select & INCLUDE_ABS32_ADDRESSES) { 332 success &= WriteU32Delta(abs32_rva_, 333 streams->stream(kStreamAbs32Addresses)); 334 } 335 336 if (select & INCLUDE_REL32_ADDRESSES) { 337 success &= WriteU32Delta(rel32_rva_, 338 streams->stream(kStreamRel32Addresses)); 339 } 340 341 if (select & INCLUDE_MISC) 342 success &= WriteVector(origins_, streams->stream(kStreamOriginAddresses)); 343 344 if (select & INCLUDE_OPS) { 345 // 5 for length. 346 success &= streams->stream(kStreamOps)->Reserve(ops_.size() + 5); 347 success &= WriteVector(ops_, streams->stream(kStreamOps)); 348 } 349 350 if (select & INCLUDE_COPY_COUNTS) 351 success &= WriteVector(copy_counts_, streams->stream(kStreamCopyCounts)); 352 353 if (select & INCLUDE_BYTES) 354 success &= WriteVectorU8(copy_bytes_, streams->stream(kStreamBytes)); 355 356 if (select & INCLUDE_ABS32_INDEXES) 357 success &= WriteVector(abs32_ix_, streams->stream(kStreamAbs32Indexes)); 358 359 if (select & INCLUDE_REL32_INDEXES) 360 success &= WriteVector(rel32_ix_, streams->stream(kStreamRel32Indexes)); 361 362 return success; 363 } 364 365 bool EncodedProgram::ReadFrom(SourceStreamSet* streams) { 366 // TODO(sra): read 64 bits. 367 uint32 temp; 368 if (!streams->stream(kStreamMisc)->ReadVarint32(&temp)) 369 return false; 370 image_base_ = temp; 371 372 if (!ReadU32Delta(&abs32_rva_, streams->stream(kStreamAbs32Addresses))) 373 return false; 374 if (!ReadU32Delta(&rel32_rva_, streams->stream(kStreamRel32Addresses))) 375 return false; 376 if (!ReadVector(&origins_, streams->stream(kStreamOriginAddresses))) 377 return false; 378 if (!ReadVector(&ops_, streams->stream(kStreamOps))) 379 return false; 380 if (!ReadVector(©_counts_, streams->stream(kStreamCopyCounts))) 381 return false; 382 if (!ReadVectorU8(©_bytes_, streams->stream(kStreamBytes))) 383 return false; 384 if (!ReadVector(&abs32_ix_, streams->stream(kStreamAbs32Indexes))) 385 return false; 386 if (!ReadVector(&rel32_ix_, streams->stream(kStreamRel32Indexes))) 387 return false; 388 389 // Check that streams have been completely consumed. 390 for (int i = 0; i < kStreamLimit; ++i) { 391 if (streams->stream(i)->Remaining() > 0) 392 return false; 393 } 394 395 return true; 396 } 397 398 // Safe, non-throwing version of std::vector::at(). Returns 'true' for success, 399 // 'false' for out-of-bounds index error. 400 template<typename V, typename T> 401 bool VectorAt(const V& v, size_t index, T* output) { 402 if (index >= v.size()) 403 return false; 404 *output = v[index]; 405 return true; 406 } 407 408 CheckBool EncodedProgram::EvaluateRel32ARM(OP op, 409 size_t& ix_rel32_ix, 410 RVA& current_rva, 411 SinkStream* output) { 412 switch (op & 0x0000F000) { 413 case REL32ARM8: { 414 uint32 index; 415 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 416 return false; 417 ++ix_rel32_ix; 418 RVA rva; 419 if (!VectorAt(rel32_rva_, index, &rva)) 420 return false; 421 uint32 decompressed_op; 422 if (!DisassemblerElf32ARM::Decompress(ARM_OFF8, 423 static_cast<uint16>(op), 424 static_cast<uint32>(rva - 425 current_rva), 426 &decompressed_op)) { 427 return false; 428 } 429 uint16 op16 = decompressed_op; 430 if (!output->Write(&op16, 2)) 431 return false; 432 current_rva += 2; 433 break; 434 } 435 case REL32ARM11: { 436 uint32 index; 437 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 438 return false; 439 ++ix_rel32_ix; 440 RVA rva; 441 if (!VectorAt(rel32_rva_, index, &rva)) 442 return false; 443 uint32 decompressed_op; 444 if (!DisassemblerElf32ARM::Decompress(ARM_OFF11, (uint16) op, 445 (uint32) (rva - current_rva), 446 &decompressed_op)) { 447 return false; 448 } 449 uint16 op16 = decompressed_op; 450 if (!output->Write(&op16, 2)) 451 return false; 452 current_rva += 2; 453 break; 454 } 455 case REL32ARM24: { 456 uint32 index; 457 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 458 return false; 459 ++ix_rel32_ix; 460 RVA rva; 461 if (!VectorAt(rel32_rva_, index, &rva)) 462 return false; 463 uint32 decompressed_op; 464 if (!DisassemblerElf32ARM::Decompress(ARM_OFF24, (uint16) op, 465 (uint32) (rva - current_rva), 466 &decompressed_op)) { 467 return false; 468 } 469 if (!output->Write(&decompressed_op, 4)) 470 return false; 471 current_rva += 4; 472 break; 473 } 474 case REL32ARM25: { 475 uint32 index; 476 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 477 return false; 478 ++ix_rel32_ix; 479 RVA rva; 480 if (!VectorAt(rel32_rva_, index, &rva)) 481 return false; 482 uint32 decompressed_op; 483 if (!DisassemblerElf32ARM::Decompress(ARM_OFF25, (uint16) op, 484 (uint32) (rva - current_rva), 485 &decompressed_op)) { 486 return false; 487 } 488 uint32 words = (decompressed_op << 16) | (decompressed_op >> 16); 489 if (!output->Write(&words, 4)) 490 return false; 491 current_rva += 4; 492 break; 493 } 494 case REL32ARM21: { 495 uint32 index; 496 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 497 return false; 498 ++ix_rel32_ix; 499 RVA rva; 500 if (!VectorAt(rel32_rva_, index, &rva)) 501 return false; 502 uint32 decompressed_op; 503 if (!DisassemblerElf32ARM::Decompress(ARM_OFF21, (uint16) op, 504 (uint32) (rva - current_rva), 505 &decompressed_op)) { 506 return false; 507 } 508 uint32 words = (decompressed_op << 16) | (decompressed_op >> 16); 509 if (!output->Write(&words, 4)) 510 return false; 511 current_rva += 4; 512 break; 513 } 514 default: 515 return false; 516 } 517 518 return true; 519 } 520 521 CheckBool EncodedProgram::AssembleTo(SinkStream* final_buffer) { 522 // For the most part, the assembly process walks the various tables. 523 // ix_mumble is the index into the mumble table. 524 size_t ix_origins = 0; 525 size_t ix_copy_counts = 0; 526 size_t ix_copy_bytes = 0; 527 size_t ix_abs32_ix = 0; 528 size_t ix_rel32_ix = 0; 529 530 RVA current_rva = 0; 531 532 bool pending_pe_relocation_table = false; 533 uint8 pending_pe_relocation_table_type = 0x03; // IMAGE_REL_BASED_HIGHLOW 534 Elf32_Word pending_elf_relocation_table_type = 0; 535 SinkStream bytes_following_relocation_table; 536 537 SinkStream* output = final_buffer; 538 539 for (size_t ix_ops = 0; ix_ops < ops_.size(); ++ix_ops) { 540 OP op = ops_[ix_ops]; 541 542 switch (op) { 543 default: 544 if (!EvaluateRel32ARM(op, ix_rel32_ix, current_rva, output)) 545 return false; 546 break; 547 548 case ORIGIN: { 549 RVA section_rva; 550 if (!VectorAt(origins_, ix_origins, §ion_rva)) 551 return false; 552 ++ix_origins; 553 current_rva = section_rva; 554 break; 555 } 556 557 case COPY: { 558 uint32 count; 559 if (!VectorAt(copy_counts_, ix_copy_counts, &count)) 560 return false; 561 ++ix_copy_counts; 562 for (uint32 i = 0; i < count; ++i) { 563 uint8 b; 564 if (!VectorAt(copy_bytes_, ix_copy_bytes, &b)) 565 return false; 566 ++ix_copy_bytes; 567 if (!output->Write(&b, 1)) 568 return false; 569 } 570 current_rva += count; 571 break; 572 } 573 574 case COPY1: { 575 uint8 b; 576 if (!VectorAt(copy_bytes_, ix_copy_bytes, &b)) 577 return false; 578 ++ix_copy_bytes; 579 if (!output->Write(&b, 1)) 580 return false; 581 current_rva += 1; 582 break; 583 } 584 585 case REL32: { 586 uint32 index; 587 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 588 return false; 589 ++ix_rel32_ix; 590 RVA rva; 591 if (!VectorAt(rel32_rva_, index, &rva)) 592 return false; 593 uint32 offset = (rva - (current_rva + 4)); 594 if (!output->Write(&offset, 4)) 595 return false; 596 current_rva += 4; 597 break; 598 } 599 600 case ABS32: { 601 uint32 index; 602 if (!VectorAt(abs32_ix_, ix_abs32_ix, &index)) 603 return false; 604 ++ix_abs32_ix; 605 RVA rva; 606 if (!VectorAt(abs32_rva_, index, &rva)) 607 return false; 608 uint32 abs32 = static_cast<uint32>(rva + image_base_); 609 if (!abs32_relocs_.push_back(current_rva) || !output->Write(&abs32, 4)) 610 return false; 611 current_rva += 4; 612 break; 613 } 614 615 case MAKE_PE_RELOCATION_TABLE: { 616 // We can see the base relocation anywhere, but we only have the 617 // information to generate it at the very end. So we divert the bytes 618 // we are generating to a temporary stream. 619 if (pending_pe_relocation_table) 620 return false; // Can't have two base relocation tables. 621 622 pending_pe_relocation_table = true; 623 output = &bytes_following_relocation_table; 624 break; 625 // There is a potential problem *if* the instruction stream contains 626 // some REL32 relocations following the base relocation and in the same 627 // section. We don't know the size of the table, so 'current_rva' will 628 // be wrong, causing REL32 offsets to be miscalculated. This never 629 // happens; the base relocation table is usually in a section of its 630 // own, a data-only section, and following everything else in the 631 // executable except some padding zero bytes. We could fix this by 632 // emitting an ORIGIN after the MAKE_BASE_RELOCATION_TABLE. 633 } 634 635 case MAKE_PE64_RELOCATION_TABLE: { 636 if (pending_pe_relocation_table) 637 return false; // Can't have two base relocation tables. 638 639 pending_pe_relocation_table = true; 640 pending_pe_relocation_table_type = 0x0A; // IMAGE_REL_BASED_DIR64 641 output = &bytes_following_relocation_table; 642 break; 643 } 644 645 case MAKE_ELF_ARM_RELOCATION_TABLE: { 646 // We can see the base relocation anywhere, but we only have the 647 // information to generate it at the very end. So we divert the bytes 648 // we are generating to a temporary stream. 649 if (pending_elf_relocation_table_type) 650 return false; // Can't have two base relocation tables. 651 652 pending_elf_relocation_table_type = R_ARM_RELATIVE; 653 output = &bytes_following_relocation_table; 654 break; 655 } 656 657 case MAKE_ELF_RELOCATION_TABLE: { 658 // We can see the base relocation anywhere, but we only have the 659 // information to generate it at the very end. So we divert the bytes 660 // we are generating to a temporary stream. 661 if (pending_elf_relocation_table_type) 662 return false; // Can't have two base relocation tables. 663 664 pending_elf_relocation_table_type = R_386_RELATIVE; 665 output = &bytes_following_relocation_table; 666 break; 667 } 668 } 669 } 670 671 if (pending_pe_relocation_table) { 672 if (!GeneratePeRelocations(final_buffer, 673 pending_pe_relocation_table_type) || 674 !final_buffer->Append(&bytes_following_relocation_table)) 675 return false; 676 } 677 678 if (pending_elf_relocation_table_type) { 679 if (!GenerateElfRelocations(pending_elf_relocation_table_type, 680 final_buffer) || 681 !final_buffer->Append(&bytes_following_relocation_table)) 682 return false; 683 } 684 685 // Final verification check: did we consume all lists? 686 if (ix_copy_counts != copy_counts_.size()) 687 return false; 688 if (ix_copy_bytes != copy_bytes_.size()) 689 return false; 690 if (ix_abs32_ix != abs32_ix_.size()) 691 return false; 692 if (ix_rel32_ix != rel32_ix_.size()) 693 return false; 694 695 return true; 696 } 697 698 // RelocBlock has the layout of a block of relocations in the base relocation 699 // table file format. 700 // 701 struct RelocBlockPOD { 702 uint32 page_rva; 703 uint32 block_size; 704 uint16 relocs[4096]; // Allow up to one relocation per byte of a 4k page. 705 }; 706 707 COMPILE_ASSERT(offsetof(RelocBlockPOD, relocs) == 8, reloc_block_header_size); 708 709 class RelocBlock { 710 public: 711 RelocBlock() { 712 pod.page_rva = ~0; 713 pod.block_size = 8; 714 } 715 716 void Add(uint16 item) { 717 pod.relocs[(pod.block_size-8)/2] = item; 718 pod.block_size += 2; 719 } 720 721 CheckBool Flush(SinkStream* buffer) WARN_UNUSED_RESULT { 722 bool ok = true; 723 if (pod.block_size != 8) { 724 if (pod.block_size % 4 != 0) { // Pad to make size multiple of 4 bytes. 725 Add(0); 726 } 727 ok = buffer->Write(&pod, pod.block_size); 728 pod.block_size = 8; 729 } 730 return ok; 731 } 732 RelocBlockPOD pod; 733 }; 734 735 CheckBool EncodedProgram::GeneratePeRelocations(SinkStream* buffer, 736 uint8 type) { 737 std::sort(abs32_relocs_.begin(), abs32_relocs_.end()); 738 739 RelocBlock block; 740 741 bool ok = true; 742 for (size_t i = 0; ok && i < abs32_relocs_.size(); ++i) { 743 uint32 rva = abs32_relocs_[i]; 744 uint32 page_rva = rva & ~0xFFF; 745 if (page_rva != block.pod.page_rva) { 746 ok &= block.Flush(buffer); 747 block.pod.page_rva = page_rva; 748 } 749 if (ok) 750 block.Add(((static_cast<uint16>(type)) << 12 ) | (rva & 0xFFF)); 751 } 752 ok &= block.Flush(buffer); 753 return ok; 754 } 755 756 CheckBool EncodedProgram::GenerateElfRelocations(Elf32_Word r_info, 757 SinkStream* buffer) { 758 std::sort(abs32_relocs_.begin(), abs32_relocs_.end()); 759 760 Elf32_Rel relocation_block; 761 762 relocation_block.r_info = r_info; 763 764 bool ok = true; 765 for (size_t i = 0; ok && i < abs32_relocs_.size(); ++i) { 766 relocation_block.r_offset = abs32_relocs_[i]; 767 ok = buffer->Write(&relocation_block, sizeof(Elf32_Rel)); 768 } 769 770 return ok; 771 } 772 //////////////////////////////////////////////////////////////////////////////// 773 774 Status WriteEncodedProgram(EncodedProgram* encoded, SinkStreamSet* sink) { 775 if (!encoded->WriteTo(sink)) 776 return C_STREAM_ERROR; 777 return C_OK; 778 } 779 780 Status ReadEncodedProgram(SourceStreamSet* streams, EncodedProgram** output) { 781 EncodedProgram* encoded = new EncodedProgram(); 782 if (encoded->ReadFrom(streams)) { 783 *output = encoded; 784 return C_OK; 785 } 786 delete encoded; 787 return C_DESERIALIZATION_FAILED; 788 } 789 790 Status Assemble(EncodedProgram* encoded, SinkStream* buffer) { 791 bool assembled = encoded->AssembleTo(buffer); 792 if (assembled) 793 return C_OK; 794 return C_ASSEMBLY_FAILED; 795 } 796 797 void DeleteEncodedProgram(EncodedProgram* encoded) { 798 delete encoded; 799 } 800 801 } // end namespace 802