1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "courgette/encoded_program.h" 6 7 #include <algorithm> 8 #include <map> 9 #include <string> 10 #include <vector> 11 12 #include "base/environment.h" 13 #include "base/logging.h" 14 #include "base/memory/scoped_ptr.h" 15 #include "base/strings/string_util.h" 16 #include "base/strings/utf_string_conversions.h" 17 #include "courgette/courgette.h" 18 #include "courgette/disassembler_elf_32_arm.h" 19 #include "courgette/streams.h" 20 #include "courgette/types_elf.h" 21 22 namespace courgette { 23 24 // Stream indexes. 25 const int kStreamMisc = 0; 26 const int kStreamOps = 1; 27 const int kStreamBytes = 2; 28 const int kStreamAbs32Indexes = 3; 29 const int kStreamRel32Indexes = 4; 30 const int kStreamAbs32Addresses = 5; 31 const int kStreamRel32Addresses = 6; 32 const int kStreamCopyCounts = 7; 33 const int kStreamOriginAddresses = kStreamMisc; 34 35 const int kStreamLimit = 9; 36 37 // Constructor is here rather than in the header. Although the constructor 38 // appears to do nothing it is fact quite large because of the implicit calls to 39 // field constructors. Ditto for the destructor. 40 EncodedProgram::EncodedProgram() : image_base_(0) {} 41 EncodedProgram::~EncodedProgram() {} 42 43 // Serializes a vector of integral values using Varint32 coding. 44 template<typename V> 45 CheckBool WriteVector(const V& items, SinkStream* buffer) { 46 size_t count = items.size(); 47 bool ok = buffer->WriteSizeVarint32(count); 48 for (size_t i = 0; ok && i < count; ++i) { 49 COMPILE_ASSERT(sizeof(items[0]) <= sizeof(uint32), // NOLINT 50 T_must_fit_in_uint32); 51 ok = buffer->WriteSizeVarint32(items[i]); 52 } 53 return ok; 54 } 55 56 template<typename V> 57 bool ReadVector(V* items, SourceStream* buffer) { 58 uint32 count; 59 if (!buffer->ReadVarint32(&count)) 60 return false; 61 62 items->clear(); 63 64 bool ok = items->reserve(count); 65 for (size_t i = 0; ok && i < count; ++i) { 66 uint32 item; 67 ok = buffer->ReadVarint32(&item); 68 if (ok) 69 ok = items->push_back(static_cast<typename V::value_type>(item)); 70 } 71 72 return ok; 73 } 74 75 // Serializes a vector, using delta coding followed by Varint32 coding. 76 template<typename V> 77 CheckBool WriteU32Delta(const V& set, SinkStream* buffer) { 78 size_t count = set.size(); 79 bool ok = buffer->WriteSizeVarint32(count); 80 uint32 prev = 0; 81 for (size_t i = 0; ok && i < count; ++i) { 82 uint32 current = set[i]; 83 uint32 delta = current - prev; 84 ok = buffer->WriteVarint32(delta); 85 prev = current; 86 } 87 return ok; 88 } 89 90 template <typename V> 91 static CheckBool ReadU32Delta(V* set, SourceStream* buffer) { 92 uint32 count; 93 94 if (!buffer->ReadVarint32(&count)) 95 return false; 96 97 set->clear(); 98 bool ok = set->reserve(count); 99 uint32 prev = 0; 100 101 for (size_t i = 0; ok && i < count; ++i) { 102 uint32 delta; 103 ok = buffer->ReadVarint32(&delta); 104 if (ok) { 105 uint32 current = prev + delta; 106 ok = set->push_back(current); 107 prev = current; 108 } 109 } 110 111 return ok; 112 } 113 114 // Write a vector as the byte representation of the contents. 115 // 116 // (This only really makes sense for a type T that has sizeof(T)==1, otherwise 117 // serialized representation is not endian-agnostic. But it is useful to keep 118 // the possibility of a greater size for experiments comparing Varint32 encoding 119 // of a vector of larger integrals vs a plain form.) 120 // 121 template<typename V> 122 CheckBool WriteVectorU8(const V& items, SinkStream* buffer) { 123 size_t count = items.size(); 124 bool ok = buffer->WriteSizeVarint32(count); 125 if (count != 0 && ok) { 126 size_t byte_count = count * sizeof(typename V::value_type); 127 ok = buffer->Write(static_cast<const void*>(&items[0]), byte_count); 128 } 129 return ok; 130 } 131 132 template<typename V> 133 bool ReadVectorU8(V* items, SourceStream* buffer) { 134 uint32 count; 135 if (!buffer->ReadVarint32(&count)) 136 return false; 137 138 items->clear(); 139 bool ok = items->resize(count, 0); 140 if (ok && count != 0) { 141 size_t byte_count = count * sizeof(typename V::value_type); 142 return buffer->Read(static_cast<void*>(&((*items)[0])), byte_count); 143 } 144 return ok; 145 } 146 147 //////////////////////////////////////////////////////////////////////////////// 148 149 CheckBool EncodedProgram::DefineRel32Label(int index, RVA value) { 150 return DefineLabelCommon(&rel32_rva_, index, value); 151 } 152 153 CheckBool EncodedProgram::DefineAbs32Label(int index, RVA value) { 154 return DefineLabelCommon(&abs32_rva_, index, value); 155 } 156 157 static const RVA kUnassignedRVA = static_cast<RVA>(-1); 158 159 CheckBool EncodedProgram::DefineLabelCommon(RvaVector* rvas, 160 int index, 161 RVA rva) { 162 bool ok = true; 163 if (static_cast<int>(rvas->size()) <= index) 164 ok = rvas->resize(index + 1, kUnassignedRVA); 165 166 if (ok) { 167 DCHECK_EQ((*rvas)[index], kUnassignedRVA) 168 << "DefineLabel double assigned " << index; 169 (*rvas)[index] = rva; 170 } 171 172 return ok; 173 } 174 175 void EncodedProgram::EndLabels() { 176 FinishLabelsCommon(&abs32_rva_); 177 FinishLabelsCommon(&rel32_rva_); 178 } 179 180 void EncodedProgram::FinishLabelsCommon(RvaVector* rvas) { 181 // Replace all unassigned slots with the value at the previous index so they 182 // delta-encode to zero. (There might be better values than zero. The way to 183 // get that is have the higher level assembly program assign the unassigned 184 // slots.) 185 RVA previous = 0; 186 size_t size = rvas->size(); 187 for (size_t i = 0; i < size; ++i) { 188 if ((*rvas)[i] == kUnassignedRVA) 189 (*rvas)[i] = previous; 190 else 191 previous = (*rvas)[i]; 192 } 193 } 194 195 CheckBool EncodedProgram::AddOrigin(RVA origin) { 196 return ops_.push_back(ORIGIN) && origins_.push_back(origin); 197 } 198 199 CheckBool EncodedProgram::AddCopy(uint32 count, const void* bytes) { 200 const uint8* source = static_cast<const uint8*>(bytes); 201 202 bool ok = true; 203 204 // Fold adjacent COPY instructions into one. This nearly halves the size of 205 // an EncodedProgram with only COPY1 instructions since there are approx plain 206 // 16 bytes per reloc. This has a working-set benefit during decompression. 207 // For compression of files with large differences this makes a small (4%) 208 // improvement in size. For files with small differences this degrades the 209 // compressed size by 1.3% 210 if (!ops_.empty()) { 211 if (ops_.back() == COPY1) { 212 ops_.back() = COPY; 213 ok = copy_counts_.push_back(1); 214 } 215 if (ok && ops_.back() == COPY) { 216 copy_counts_.back() += count; 217 for (uint32 i = 0; ok && i < count; ++i) { 218 ok = copy_bytes_.push_back(source[i]); 219 } 220 return ok; 221 } 222 } 223 224 if (ok) { 225 if (count == 1) { 226 ok = ops_.push_back(COPY1) && copy_bytes_.push_back(source[0]); 227 } else { 228 ok = ops_.push_back(COPY) && copy_counts_.push_back(count); 229 for (uint32 i = 0; ok && i < count; ++i) { 230 ok = copy_bytes_.push_back(source[i]); 231 } 232 } 233 } 234 235 return ok; 236 } 237 238 CheckBool EncodedProgram::AddAbs32(int label_index) { 239 return ops_.push_back(ABS32) && abs32_ix_.push_back(label_index); 240 } 241 242 CheckBool EncodedProgram::AddRel32(int label_index) { 243 return ops_.push_back(REL32) && rel32_ix_.push_back(label_index); 244 } 245 246 CheckBool EncodedProgram::AddRel32ARM(uint16 op, int label_index) { 247 return ops_.push_back(static_cast<OP>(op)) && 248 rel32_ix_.push_back(label_index); 249 } 250 251 CheckBool EncodedProgram::AddPeMakeRelocs(ExecutableType kind) { 252 if (kind == EXE_WIN_32_X86) 253 return ops_.push_back(MAKE_PE_RELOCATION_TABLE); 254 return ops_.push_back(MAKE_PE64_RELOCATION_TABLE); 255 } 256 257 CheckBool EncodedProgram::AddElfMakeRelocs() { 258 return ops_.push_back(MAKE_ELF_RELOCATION_TABLE); 259 } 260 261 CheckBool EncodedProgram::AddElfARMMakeRelocs() { 262 return ops_.push_back(MAKE_ELF_ARM_RELOCATION_TABLE); 263 } 264 265 void EncodedProgram::DebuggingSummary() { 266 VLOG(1) << "EncodedProgram Summary" 267 << "\n image base " << image_base_ 268 << "\n abs32 rvas " << abs32_rva_.size() 269 << "\n rel32 rvas " << rel32_rva_.size() 270 << "\n ops " << ops_.size() 271 << "\n origins " << origins_.size() 272 << "\n copy_counts " << copy_counts_.size() 273 << "\n copy_bytes " << copy_bytes_.size() 274 << "\n abs32_ix " << abs32_ix_.size() 275 << "\n rel32_ix " << rel32_ix_.size(); 276 } 277 278 //////////////////////////////////////////////////////////////////////////////// 279 280 // For algorithm refinement purposes it is useful to write subsets of the file 281 // format. This gives us the ability to estimate the entropy of the 282 // differential compression of the individual streams, which can provide 283 // invaluable insights. The default, of course, is to include all the streams. 284 // 285 enum FieldSelect { 286 INCLUDE_ABS32_ADDRESSES = 0x0001, 287 INCLUDE_REL32_ADDRESSES = 0x0002, 288 INCLUDE_ABS32_INDEXES = 0x0010, 289 INCLUDE_REL32_INDEXES = 0x0020, 290 INCLUDE_OPS = 0x0100, 291 INCLUDE_BYTES = 0x0200, 292 INCLUDE_COPY_COUNTS = 0x0400, 293 INCLUDE_MISC = 0x1000 294 }; 295 296 static FieldSelect GetFieldSelect() { 297 #if 1 298 // TODO(sra): Use better configuration. 299 scoped_ptr<base::Environment> env(base::Environment::Create()); 300 std::string s; 301 env->GetVar("A_FIELDS", &s); 302 if (!s.empty()) { 303 return static_cast<FieldSelect>( 304 wcstoul(base::ASCIIToWide(s).c_str(), 0, 0)); 305 } 306 #endif 307 return static_cast<FieldSelect>(~0); 308 } 309 310 CheckBool EncodedProgram::WriteTo(SinkStreamSet* streams) { 311 FieldSelect select = GetFieldSelect(); 312 313 // The order of fields must be consistent in WriteTo and ReadFrom, regardless 314 // of the streams used. The code can be configured with all kStreamXXX 315 // constants the same. 316 // 317 // If we change the code to pipeline reading with assembly (to avoid temporary 318 // storage vectors by consuming operands directly from the stream) then we 319 // need to read the base address and the random access address tables first, 320 // the rest can be interleaved. 321 322 if (select & INCLUDE_MISC) { 323 // TODO(sra): write 64 bits. 324 if (!streams->stream(kStreamMisc)->WriteVarint32( 325 static_cast<uint32>(image_base_))) { 326 return false; 327 } 328 } 329 330 bool success = true; 331 332 if (select & INCLUDE_ABS32_ADDRESSES) { 333 success &= WriteU32Delta(abs32_rva_, 334 streams->stream(kStreamAbs32Addresses)); 335 } 336 337 if (select & INCLUDE_REL32_ADDRESSES) { 338 success &= WriteU32Delta(rel32_rva_, 339 streams->stream(kStreamRel32Addresses)); 340 } 341 342 if (select & INCLUDE_MISC) 343 success &= WriteVector(origins_, streams->stream(kStreamOriginAddresses)); 344 345 if (select & INCLUDE_OPS) { 346 // 5 for length. 347 success &= streams->stream(kStreamOps)->Reserve(ops_.size() + 5); 348 success &= WriteVector(ops_, streams->stream(kStreamOps)); 349 } 350 351 if (select & INCLUDE_COPY_COUNTS) 352 success &= WriteVector(copy_counts_, streams->stream(kStreamCopyCounts)); 353 354 if (select & INCLUDE_BYTES) 355 success &= WriteVectorU8(copy_bytes_, streams->stream(kStreamBytes)); 356 357 if (select & INCLUDE_ABS32_INDEXES) 358 success &= WriteVector(abs32_ix_, streams->stream(kStreamAbs32Indexes)); 359 360 if (select & INCLUDE_REL32_INDEXES) 361 success &= WriteVector(rel32_ix_, streams->stream(kStreamRel32Indexes)); 362 363 return success; 364 } 365 366 bool EncodedProgram::ReadFrom(SourceStreamSet* streams) { 367 // TODO(sra): read 64 bits. 368 uint32 temp; 369 if (!streams->stream(kStreamMisc)->ReadVarint32(&temp)) 370 return false; 371 image_base_ = temp; 372 373 if (!ReadU32Delta(&abs32_rva_, streams->stream(kStreamAbs32Addresses))) 374 return false; 375 if (!ReadU32Delta(&rel32_rva_, streams->stream(kStreamRel32Addresses))) 376 return false; 377 if (!ReadVector(&origins_, streams->stream(kStreamOriginAddresses))) 378 return false; 379 if (!ReadVector(&ops_, streams->stream(kStreamOps))) 380 return false; 381 if (!ReadVector(©_counts_, streams->stream(kStreamCopyCounts))) 382 return false; 383 if (!ReadVectorU8(©_bytes_, streams->stream(kStreamBytes))) 384 return false; 385 if (!ReadVector(&abs32_ix_, streams->stream(kStreamAbs32Indexes))) 386 return false; 387 if (!ReadVector(&rel32_ix_, streams->stream(kStreamRel32Indexes))) 388 return false; 389 390 // Check that streams have been completely consumed. 391 for (int i = 0; i < kStreamLimit; ++i) { 392 if (streams->stream(i)->Remaining() > 0) 393 return false; 394 } 395 396 return true; 397 } 398 399 // Safe, non-throwing version of std::vector::at(). Returns 'true' for success, 400 // 'false' for out-of-bounds index error. 401 template<typename V, typename T> 402 bool VectorAt(const V& v, size_t index, T* output) { 403 if (index >= v.size()) 404 return false; 405 *output = v[index]; 406 return true; 407 } 408 409 CheckBool EncodedProgram::EvaluateRel32ARM(OP op, 410 size_t& ix_rel32_ix, 411 RVA& current_rva, 412 SinkStream* output) { 413 switch (op & 0x0000F000) { 414 case REL32ARM8: { 415 uint32 index; 416 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 417 return false; 418 ++ix_rel32_ix; 419 RVA rva; 420 if (!VectorAt(rel32_rva_, index, &rva)) 421 return false; 422 uint32 decompressed_op; 423 if (!DisassemblerElf32ARM::Decompress(ARM_OFF8, 424 static_cast<uint16>(op), 425 static_cast<uint32>(rva - 426 current_rva), 427 &decompressed_op)) { 428 return false; 429 } 430 uint16 op16 = decompressed_op; 431 if (!output->Write(&op16, 2)) 432 return false; 433 current_rva += 2; 434 break; 435 } 436 case REL32ARM11: { 437 uint32 index; 438 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 439 return false; 440 ++ix_rel32_ix; 441 RVA rva; 442 if (!VectorAt(rel32_rva_, index, &rva)) 443 return false; 444 uint32 decompressed_op; 445 if (!DisassemblerElf32ARM::Decompress(ARM_OFF11, (uint16) op, 446 (uint32) (rva - current_rva), 447 &decompressed_op)) { 448 return false; 449 } 450 uint16 op16 = decompressed_op; 451 if (!output->Write(&op16, 2)) 452 return false; 453 current_rva += 2; 454 break; 455 } 456 case REL32ARM24: { 457 uint32 index; 458 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 459 return false; 460 ++ix_rel32_ix; 461 RVA rva; 462 if (!VectorAt(rel32_rva_, index, &rva)) 463 return false; 464 uint32 decompressed_op; 465 if (!DisassemblerElf32ARM::Decompress(ARM_OFF24, (uint16) op, 466 (uint32) (rva - current_rva), 467 &decompressed_op)) { 468 return false; 469 } 470 if (!output->Write(&decompressed_op, 4)) 471 return false; 472 current_rva += 4; 473 break; 474 } 475 case REL32ARM25: { 476 uint32 index; 477 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 478 return false; 479 ++ix_rel32_ix; 480 RVA rva; 481 if (!VectorAt(rel32_rva_, index, &rva)) 482 return false; 483 uint32 decompressed_op; 484 if (!DisassemblerElf32ARM::Decompress(ARM_OFF25, (uint16) op, 485 (uint32) (rva - current_rva), 486 &decompressed_op)) { 487 return false; 488 } 489 uint32 words = (decompressed_op << 16) | (decompressed_op >> 16); 490 if (!output->Write(&words, 4)) 491 return false; 492 current_rva += 4; 493 break; 494 } 495 case REL32ARM21: { 496 uint32 index; 497 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 498 return false; 499 ++ix_rel32_ix; 500 RVA rva; 501 if (!VectorAt(rel32_rva_, index, &rva)) 502 return false; 503 uint32 decompressed_op; 504 if (!DisassemblerElf32ARM::Decompress(ARM_OFF21, (uint16) op, 505 (uint32) (rva - current_rva), 506 &decompressed_op)) { 507 return false; 508 } 509 uint32 words = (decompressed_op << 16) | (decompressed_op >> 16); 510 if (!output->Write(&words, 4)) 511 return false; 512 current_rva += 4; 513 break; 514 } 515 default: 516 return false; 517 } 518 519 return true; 520 } 521 522 CheckBool EncodedProgram::AssembleTo(SinkStream* final_buffer) { 523 // For the most part, the assembly process walks the various tables. 524 // ix_mumble is the index into the mumble table. 525 size_t ix_origins = 0; 526 size_t ix_copy_counts = 0; 527 size_t ix_copy_bytes = 0; 528 size_t ix_abs32_ix = 0; 529 size_t ix_rel32_ix = 0; 530 531 RVA current_rva = 0; 532 533 bool pending_pe_relocation_table = false; 534 uint8 pending_pe_relocation_table_type = 0x03; // IMAGE_REL_BASED_HIGHLOW 535 Elf32_Word pending_elf_relocation_table_type = 0; 536 SinkStream bytes_following_relocation_table; 537 538 SinkStream* output = final_buffer; 539 540 for (size_t ix_ops = 0; ix_ops < ops_.size(); ++ix_ops) { 541 OP op = ops_[ix_ops]; 542 543 switch (op) { 544 default: 545 if (!EvaluateRel32ARM(op, ix_rel32_ix, current_rva, output)) 546 return false; 547 break; 548 549 case ORIGIN: { 550 RVA section_rva; 551 if (!VectorAt(origins_, ix_origins, §ion_rva)) 552 return false; 553 ++ix_origins; 554 current_rva = section_rva; 555 break; 556 } 557 558 case COPY: { 559 uint32 count; 560 if (!VectorAt(copy_counts_, ix_copy_counts, &count)) 561 return false; 562 ++ix_copy_counts; 563 for (uint32 i = 0; i < count; ++i) { 564 uint8 b; 565 if (!VectorAt(copy_bytes_, ix_copy_bytes, &b)) 566 return false; 567 ++ix_copy_bytes; 568 if (!output->Write(&b, 1)) 569 return false; 570 } 571 current_rva += count; 572 break; 573 } 574 575 case COPY1: { 576 uint8 b; 577 if (!VectorAt(copy_bytes_, ix_copy_bytes, &b)) 578 return false; 579 ++ix_copy_bytes; 580 if (!output->Write(&b, 1)) 581 return false; 582 current_rva += 1; 583 break; 584 } 585 586 case REL32: { 587 uint32 index; 588 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index)) 589 return false; 590 ++ix_rel32_ix; 591 RVA rva; 592 if (!VectorAt(rel32_rva_, index, &rva)) 593 return false; 594 uint32 offset = (rva - (current_rva + 4)); 595 if (!output->Write(&offset, 4)) 596 return false; 597 current_rva += 4; 598 break; 599 } 600 601 case ABS32: { 602 uint32 index; 603 if (!VectorAt(abs32_ix_, ix_abs32_ix, &index)) 604 return false; 605 ++ix_abs32_ix; 606 RVA rva; 607 if (!VectorAt(abs32_rva_, index, &rva)) 608 return false; 609 uint32 abs32 = static_cast<uint32>(rva + image_base_); 610 if (!abs32_relocs_.push_back(current_rva) || !output->Write(&abs32, 4)) 611 return false; 612 current_rva += 4; 613 break; 614 } 615 616 case MAKE_PE_RELOCATION_TABLE: { 617 // We can see the base relocation anywhere, but we only have the 618 // information to generate it at the very end. So we divert the bytes 619 // we are generating to a temporary stream. 620 if (pending_pe_relocation_table) 621 return false; // Can't have two base relocation tables. 622 623 pending_pe_relocation_table = true; 624 output = &bytes_following_relocation_table; 625 break; 626 // There is a potential problem *if* the instruction stream contains 627 // some REL32 relocations following the base relocation and in the same 628 // section. We don't know the size of the table, so 'current_rva' will 629 // be wrong, causing REL32 offsets to be miscalculated. This never 630 // happens; the base relocation table is usually in a section of its 631 // own, a data-only section, and following everything else in the 632 // executable except some padding zero bytes. We could fix this by 633 // emitting an ORIGIN after the MAKE_BASE_RELOCATION_TABLE. 634 } 635 636 case MAKE_PE64_RELOCATION_TABLE: { 637 if (pending_pe_relocation_table) 638 return false; // Can't have two base relocation tables. 639 640 pending_pe_relocation_table = true; 641 pending_pe_relocation_table_type = 0x0A; // IMAGE_REL_BASED_DIR64 642 output = &bytes_following_relocation_table; 643 break; 644 } 645 646 case MAKE_ELF_ARM_RELOCATION_TABLE: { 647 // We can see the base relocation anywhere, but we only have the 648 // information to generate it at the very end. So we divert the bytes 649 // we are generating to a temporary stream. 650 if (pending_elf_relocation_table_type) 651 return false; // Can't have two base relocation tables. 652 653 pending_elf_relocation_table_type = R_ARM_RELATIVE; 654 output = &bytes_following_relocation_table; 655 break; 656 } 657 658 case MAKE_ELF_RELOCATION_TABLE: { 659 // We can see the base relocation anywhere, but we only have the 660 // information to generate it at the very end. So we divert the bytes 661 // we are generating to a temporary stream. 662 if (pending_elf_relocation_table_type) 663 return false; // Can't have two base relocation tables. 664 665 pending_elf_relocation_table_type = R_386_RELATIVE; 666 output = &bytes_following_relocation_table; 667 break; 668 } 669 } 670 } 671 672 if (pending_pe_relocation_table) { 673 if (!GeneratePeRelocations(final_buffer, 674 pending_pe_relocation_table_type) || 675 !final_buffer->Append(&bytes_following_relocation_table)) 676 return false; 677 } 678 679 if (pending_elf_relocation_table_type) { 680 if (!GenerateElfRelocations(pending_elf_relocation_table_type, 681 final_buffer) || 682 !final_buffer->Append(&bytes_following_relocation_table)) 683 return false; 684 } 685 686 // Final verification check: did we consume all lists? 687 if (ix_copy_counts != copy_counts_.size()) 688 return false; 689 if (ix_copy_bytes != copy_bytes_.size()) 690 return false; 691 if (ix_abs32_ix != abs32_ix_.size()) 692 return false; 693 if (ix_rel32_ix != rel32_ix_.size()) 694 return false; 695 696 return true; 697 } 698 699 // RelocBlock has the layout of a block of relocations in the base relocation 700 // table file format. 701 // 702 struct RelocBlockPOD { 703 uint32 page_rva; 704 uint32 block_size; 705 uint16 relocs[4096]; // Allow up to one relocation per byte of a 4k page. 706 }; 707 708 COMPILE_ASSERT(offsetof(RelocBlockPOD, relocs) == 8, reloc_block_header_size); 709 710 class RelocBlock { 711 public: 712 RelocBlock() { 713 pod.page_rva = ~0; 714 pod.block_size = 8; 715 } 716 717 void Add(uint16 item) { 718 pod.relocs[(pod.block_size-8)/2] = item; 719 pod.block_size += 2; 720 } 721 722 CheckBool Flush(SinkStream* buffer) WARN_UNUSED_RESULT { 723 bool ok = true; 724 if (pod.block_size != 8) { 725 if (pod.block_size % 4 != 0) { // Pad to make size multiple of 4 bytes. 726 Add(0); 727 } 728 ok = buffer->Write(&pod, pod.block_size); 729 pod.block_size = 8; 730 } 731 return ok; 732 } 733 RelocBlockPOD pod; 734 }; 735 736 CheckBool EncodedProgram::GeneratePeRelocations(SinkStream* buffer, 737 uint8 type) { 738 std::sort(abs32_relocs_.begin(), abs32_relocs_.end()); 739 740 RelocBlock block; 741 742 bool ok = true; 743 for (size_t i = 0; ok && i < abs32_relocs_.size(); ++i) { 744 uint32 rva = abs32_relocs_[i]; 745 uint32 page_rva = rva & ~0xFFF; 746 if (page_rva != block.pod.page_rva) { 747 ok &= block.Flush(buffer); 748 block.pod.page_rva = page_rva; 749 } 750 if (ok) 751 block.Add(((static_cast<uint16>(type)) << 12 ) | (rva & 0xFFF)); 752 } 753 ok &= block.Flush(buffer); 754 return ok; 755 } 756 757 CheckBool EncodedProgram::GenerateElfRelocations(Elf32_Word r_info, 758 SinkStream* buffer) { 759 std::sort(abs32_relocs_.begin(), abs32_relocs_.end()); 760 761 Elf32_Rel relocation_block; 762 763 relocation_block.r_info = r_info; 764 765 bool ok = true; 766 for (size_t i = 0; ok && i < abs32_relocs_.size(); ++i) { 767 relocation_block.r_offset = abs32_relocs_[i]; 768 ok = buffer->Write(&relocation_block, sizeof(Elf32_Rel)); 769 } 770 771 return ok; 772 } 773 //////////////////////////////////////////////////////////////////////////////// 774 775 Status WriteEncodedProgram(EncodedProgram* encoded, SinkStreamSet* sink) { 776 if (!encoded->WriteTo(sink)) 777 return C_STREAM_ERROR; 778 return C_OK; 779 } 780 781 Status ReadEncodedProgram(SourceStreamSet* streams, EncodedProgram** output) { 782 EncodedProgram* encoded = new EncodedProgram(); 783 if (encoded->ReadFrom(streams)) { 784 *output = encoded; 785 return C_OK; 786 } 787 delete encoded; 788 return C_DESERIALIZATION_FAILED; 789 } 790 791 Status Assemble(EncodedProgram* encoded, SinkStream* buffer) { 792 bool assembled = encoded->AssembleTo(buffer); 793 if (assembled) 794 return C_OK; 795 return C_ASSEMBLY_FAILED; 796 } 797 798 void DeleteEncodedProgram(EncodedProgram* encoded) { 799 delete encoded; 800 } 801 802 } // end namespace 803