1 /** @file 2 * File managing the MMU for ARMv8 architecture 3 * 4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved. 5 * 6 * This program and the accompanying materials 7 * are licensed and made available under the terms and conditions of the BSD License 8 * which accompanies this distribution. The full text of the license may be found at 9 * http://opensource.org/licenses/bsd-license.php 10 * 11 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. 13 * 14 **/ 15 16 #include <Uefi.h> 17 #include <Chipset/AArch64.h> 18 #include <Library/BaseMemoryLib.h> 19 #include <Library/MemoryAllocationLib.h> 20 #include <Library/ArmLib.h> 21 #include <Library/BaseLib.h> 22 #include <Library/DebugLib.h> 23 #include "AArch64Lib.h" 24 #include "ArmLibPrivate.h" 25 26 // We use this index definition to define an invalid block entry 27 #define TT_ATTR_INDX_INVALID ((UINT32)~0) 28 29 STATIC 30 UINT64 31 ArmMemoryAttributeToPageAttribute ( 32 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes 33 ) 34 { 35 switch (Attributes) { 36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK: 37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK: 38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE; 39 40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH: 41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH: 42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE; 43 44 // Uncached and device mappings are treated as outer shareable by default, 45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED: 46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED: 47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE; 48 49 default: 50 ASSERT(0); 51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE: 52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE: 53 if (ArmReadCurrentEL () == AARCH64_EL2) 54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_TABLE_XN; 55 else 56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_TABLE_UXN | TT_TABLE_PXN; 57 } 58 } 59 60 UINT64 61 PageAttributeToGcdAttribute ( 62 IN UINT64 PageAttributes 63 ) 64 { 65 UINT64 GcdAttributes; 66 67 switch (PageAttributes & TT_ATTR_INDX_MASK) { 68 case TT_ATTR_INDX_DEVICE_MEMORY: 69 GcdAttributes = EFI_MEMORY_UC; 70 break; 71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE: 72 GcdAttributes = EFI_MEMORY_WC; 73 break; 74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH: 75 GcdAttributes = EFI_MEMORY_WT; 76 break; 77 case TT_ATTR_INDX_MEMORY_WRITE_BACK: 78 GcdAttributes = EFI_MEMORY_WB; 79 break; 80 default: 81 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes)); 82 ASSERT (0); 83 // The Global Coherency Domain (GCD) value is defined as a bit set. 84 // Returning 0 means no attribute has been set. 85 GcdAttributes = 0; 86 } 87 88 // Determine protection attributes 89 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) { 90 // Read only cases map to write-protect 91 GcdAttributes |= EFI_MEMORY_WP; 92 } 93 94 // Process eXecute Never attribute 95 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) { 96 GcdAttributes |= EFI_MEMORY_XP; 97 } 98 99 return GcdAttributes; 100 } 101 102 ARM_MEMORY_REGION_ATTRIBUTES 103 GcdAttributeToArmAttribute ( 104 IN UINT64 GcdAttributes 105 ) 106 { 107 switch (GcdAttributes & 0xFF) { 108 case EFI_MEMORY_UC: 109 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE; 110 case EFI_MEMORY_WC: 111 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED; 112 case EFI_MEMORY_WT: 113 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH; 114 case EFI_MEMORY_WB: 115 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK; 116 default: 117 DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes)); 118 ASSERT (0); 119 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE; 120 } 121 } 122 123 // Describe the T0SZ values for each translation table level 124 typedef struct { 125 UINTN MinT0SZ; 126 UINTN MaxT0SZ; 127 UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table 128 // the MaxT0SZ is not at the boundary of the table 129 } T0SZ_DESCRIPTION_PER_LEVEL; 130 131 // Map table for the corresponding Level of Table 132 STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = { 133 { 16, 24, 24 }, // Table Level 0 134 { 25, 33, 33 }, // Table Level 1 135 { 34, 39, 42 } // Table Level 2 136 }; 137 138 VOID 139 GetRootTranslationTableInfo ( 140 IN UINTN T0SZ, 141 OUT UINTN *TableLevel, 142 OUT UINTN *TableEntryCount 143 ) 144 { 145 UINTN Index; 146 147 // Identify the level of the root table from the given T0SZ 148 for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) { 149 if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) { 150 break; 151 } 152 } 153 154 // If we have not found the corresponding maximum T0SZ then we use the last one 155 if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) { 156 Index--; 157 } 158 159 // Get the level of the root table 160 if (TableLevel) { 161 *TableLevel = Index; 162 } 163 164 // The Size of the Table is 2^(T0SZ-LargestT0SZ) 165 if (TableEntryCount) { 166 *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1); 167 } 168 } 169 170 STATIC 171 VOID 172 LookupAddresstoRootTable ( 173 IN UINT64 MaxAddress, 174 OUT UINTN *T0SZ, 175 OUT UINTN *TableEntryCount 176 ) 177 { 178 UINTN TopBit; 179 180 // Check the parameters are not NULL 181 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL)); 182 183 // Look for the highest bit set in MaxAddress 184 for (TopBit = 63; TopBit != 0; TopBit--) { 185 if ((1ULL << TopBit) & MaxAddress) { 186 // MaxAddress top bit is found 187 TopBit = TopBit + 1; 188 break; 189 } 190 } 191 ASSERT (TopBit != 0); 192 193 // Calculate T0SZ from the top bit of the MaxAddress 194 *T0SZ = 64 - TopBit; 195 196 // Get the Table info from T0SZ 197 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount); 198 } 199 200 STATIC 201 UINT64* 202 GetBlockEntryListFromAddress ( 203 IN UINT64 *RootTable, 204 IN UINT64 RegionStart, 205 OUT UINTN *TableLevel, 206 IN OUT UINT64 *BlockEntrySize, 207 OUT UINT64 **LastBlockEntry 208 ) 209 { 210 UINTN RootTableLevel; 211 UINTN RootTableEntryCount; 212 UINT64 *TranslationTable; 213 UINT64 *BlockEntry; 214 UINT64 *SubTableBlockEntry; 215 UINT64 BlockEntryAddress; 216 UINTN BaseAddressAlignment; 217 UINTN PageLevel; 218 UINTN Index; 219 UINTN IndexLevel; 220 UINTN T0SZ; 221 UINT64 Attributes; 222 UINT64 TableAttributes; 223 224 // Initialize variable 225 BlockEntry = NULL; 226 227 // Ensure the parameters are valid 228 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) { 229 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); 230 return NULL; 231 } 232 233 // Ensure the Region is aligned on 4KB boundary 234 if ((RegionStart & (SIZE_4KB - 1)) != 0) { 235 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); 236 return NULL; 237 } 238 239 // Ensure the required size is aligned on 4KB boundary and not 0 240 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) { 241 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); 242 return NULL; 243 } 244 245 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK; 246 // Get the Table info from T0SZ 247 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount); 248 249 // If the start address is 0x0 then we use the size of the region to identify the alignment 250 if (RegionStart == 0) { 251 // Identify the highest possible alignment for the Region Size 252 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize); 253 } else { 254 // Identify the highest possible alignment for the Base Address 255 BaseAddressAlignment = LowBitSet64 (RegionStart); 256 } 257 258 // Identify the Page Level the RegionStart must belong to. Note that PageLevel 259 // should be at least 1 since block translations are not supported at level 0 260 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1); 261 262 // If the required size is smaller than the current block size then we need to go to the page below. 263 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment 264 // of the allocation size 265 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) { 266 // It does not fit so we need to go a page level above 267 PageLevel++; 268 } 269 270 // 271 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries 272 // 273 274 TranslationTable = RootTable; 275 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) { 276 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart); 277 278 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) { 279 // Go to the next table 280 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE); 281 282 // If we are at the last level then update the last level to next level 283 if (IndexLevel == PageLevel) { 284 // Enter the next level 285 PageLevel++; 286 } 287 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) { 288 // If we are not at the last level then we need to split this BlockEntry 289 if (IndexLevel != PageLevel) { 290 // Retrieve the attributes from the block entry 291 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK; 292 293 // Convert the block entry attributes into Table descriptor attributes 294 TableAttributes = TT_TABLE_AP_NO_PERMISSION; 295 if (Attributes & TT_PXN_MASK) { 296 TableAttributes = TT_TABLE_PXN; 297 } 298 // XN maps to UXN in the EL1&0 translation regime 299 if (Attributes & TT_XN_MASK) { 300 TableAttributes = TT_TABLE_XN; 301 } 302 if (Attributes & TT_NS) { 303 TableAttributes = TT_TABLE_NS; 304 } 305 306 // Get the address corresponding at this entry 307 BlockEntryAddress = RegionStart; 308 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel); 309 // Shift back to right to set zero before the effective address 310 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel); 311 312 // Set the correct entry type for the next page level 313 if ((IndexLevel + 1) == 3) { 314 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3; 315 } else { 316 Attributes |= TT_TYPE_BLOCK_ENTRY; 317 } 318 319 // Create a new translation table 320 TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE); 321 if (TranslationTable == NULL) { 322 return NULL; 323 } 324 325 // Populate the newly created lower level table 326 SubTableBlockEntry = TranslationTable; 327 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) { 328 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1))); 329 SubTableBlockEntry++; 330 } 331 332 // Fill the BlockEntry with the new TranslationTable 333 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY; 334 } 335 } else { 336 if (IndexLevel != PageLevel) { 337 // 338 // Case when we have an Invalid Entry and we are at a page level above of the one targetted. 339 // 340 341 // Create a new translation table 342 TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE); 343 if (TranslationTable == NULL) { 344 return NULL; 345 } 346 347 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64)); 348 349 // Fill the new BlockEntry with the TranslationTable 350 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY; 351 } 352 } 353 } 354 355 // Expose the found PageLevel to the caller 356 *TableLevel = PageLevel; 357 358 // Now, we have the Table Level we can get the Block Size associated to this table 359 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel); 360 361 // The last block of the root table depends on the number of entry in this table, 362 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table. 363 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, 364 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT); 365 366 return BlockEntry; 367 } 368 369 STATIC 370 RETURN_STATUS 371 UpdateRegionMapping ( 372 IN UINT64 *RootTable, 373 IN UINT64 RegionStart, 374 IN UINT64 RegionLength, 375 IN UINT64 Attributes, 376 IN UINT64 BlockEntryMask 377 ) 378 { 379 UINT32 Type; 380 UINT64 *BlockEntry; 381 UINT64 *LastBlockEntry; 382 UINT64 BlockEntrySize; 383 UINTN TableLevel; 384 385 // Ensure the Length is aligned on 4KB boundary 386 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) { 387 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); 388 return RETURN_INVALID_PARAMETER; 389 } 390 391 do { 392 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor 393 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor 394 BlockEntrySize = RegionLength; 395 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry); 396 if (BlockEntry == NULL) { 397 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables 398 return RETURN_OUT_OF_RESOURCES; 399 } 400 401 if (TableLevel != 3) { 402 Type = TT_TYPE_BLOCK_ENTRY; 403 } else { 404 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3; 405 } 406 407 do { 408 // Fill the Block Entry with attribute and output block address 409 *BlockEntry &= BlockEntryMask; 410 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type; 411 412 // Go to the next BlockEntry 413 RegionStart += BlockEntrySize; 414 RegionLength -= BlockEntrySize; 415 BlockEntry++; 416 417 // Break the inner loop when next block is a table 418 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak 419 if (TableLevel != 3 && 420 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) { 421 break; 422 } 423 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry)); 424 } while (RegionLength != 0); 425 426 return RETURN_SUCCESS; 427 } 428 429 STATIC 430 RETURN_STATUS 431 FillTranslationTable ( 432 IN UINT64 *RootTable, 433 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion 434 ) 435 { 436 return UpdateRegionMapping ( 437 RootTable, 438 MemoryRegion->VirtualBase, 439 MemoryRegion->Length, 440 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF, 441 0 442 ); 443 } 444 445 RETURN_STATUS 446 SetMemoryAttributes ( 447 IN EFI_PHYSICAL_ADDRESS BaseAddress, 448 IN UINT64 Length, 449 IN UINT64 Attributes, 450 IN EFI_PHYSICAL_ADDRESS VirtualMask 451 ) 452 { 453 RETURN_STATUS Status; 454 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion; 455 UINT64 *TranslationTable; 456 457 MemoryRegion.PhysicalBase = BaseAddress; 458 MemoryRegion.VirtualBase = BaseAddress; 459 MemoryRegion.Length = Length; 460 MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes); 461 462 TranslationTable = ArmGetTTBR0BaseAddress (); 463 464 Status = FillTranslationTable (TranslationTable, &MemoryRegion); 465 if (RETURN_ERROR (Status)) { 466 return Status; 467 } 468 469 // Invalidate all TLB entries so changes are synced 470 ArmInvalidateTlb (); 471 472 return RETURN_SUCCESS; 473 } 474 475 STATIC 476 RETURN_STATUS 477 SetMemoryRegionAttribute ( 478 IN EFI_PHYSICAL_ADDRESS BaseAddress, 479 IN UINT64 Length, 480 IN UINT64 Attributes, 481 IN UINT64 BlockEntryMask 482 ) 483 { 484 RETURN_STATUS Status; 485 UINT64 *RootTable; 486 487 RootTable = ArmGetTTBR0BaseAddress (); 488 489 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask); 490 if (RETURN_ERROR (Status)) { 491 return Status; 492 } 493 494 // Invalidate all TLB entries so changes are synced 495 ArmInvalidateTlb (); 496 497 return RETURN_SUCCESS; 498 } 499 500 RETURN_STATUS 501 ArmSetMemoryRegionNoExec ( 502 IN EFI_PHYSICAL_ADDRESS BaseAddress, 503 IN UINT64 Length 504 ) 505 { 506 UINT64 Val; 507 508 if (ArmReadCurrentEL () == AARCH64_EL1) { 509 Val = TT_PXN_MASK | TT_UXN_MASK; 510 } else { 511 Val = TT_XN_MASK; 512 } 513 514 return SetMemoryRegionAttribute ( 515 BaseAddress, 516 Length, 517 Val, 518 ~TT_ADDRESS_MASK_BLOCK_ENTRY); 519 } 520 521 RETURN_STATUS 522 ArmClearMemoryRegionNoExec ( 523 IN EFI_PHYSICAL_ADDRESS BaseAddress, 524 IN UINT64 Length 525 ) 526 { 527 UINT64 Mask; 528 529 // XN maps to UXN in the EL1&0 translation regime 530 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK); 531 532 return SetMemoryRegionAttribute ( 533 BaseAddress, 534 Length, 535 0, 536 Mask); 537 } 538 539 RETURN_STATUS 540 ArmSetMemoryRegionReadOnly ( 541 IN EFI_PHYSICAL_ADDRESS BaseAddress, 542 IN UINT64 Length 543 ) 544 { 545 return SetMemoryRegionAttribute ( 546 BaseAddress, 547 Length, 548 TT_AP_RO_RO, 549 ~TT_ADDRESS_MASK_BLOCK_ENTRY); 550 } 551 552 RETURN_STATUS 553 ArmClearMemoryRegionReadOnly ( 554 IN EFI_PHYSICAL_ADDRESS BaseAddress, 555 IN UINT64 Length 556 ) 557 { 558 return SetMemoryRegionAttribute ( 559 BaseAddress, 560 Length, 561 TT_AP_NO_RO, 562 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)); 563 } 564 565 RETURN_STATUS 566 EFIAPI 567 ArmConfigureMmu ( 568 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable, 569 OUT VOID **TranslationTableBase OPTIONAL, 570 OUT UINTN *TranslationTableSize OPTIONAL 571 ) 572 { 573 VOID* TranslationTable; 574 UINTN TranslationTablePageCount; 575 UINT32 TranslationTableAttribute; 576 ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry; 577 UINT64 MaxAddress; 578 UINT64 TopAddress; 579 UINTN T0SZ; 580 UINTN RootTableEntryCount; 581 UINT64 TCR; 582 RETURN_STATUS Status; 583 584 if(MemoryTable == NULL) { 585 ASSERT (MemoryTable != NULL); 586 return RETURN_INVALID_PARAMETER; 587 } 588 589 // Identify the highest address of the memory table 590 MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1; 591 MemoryTableEntry = MemoryTable; 592 while (MemoryTableEntry->Length != 0) { 593 TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1; 594 if (TopAddress > MaxAddress) { 595 MaxAddress = TopAddress; 596 } 597 MemoryTableEntry++; 598 } 599 600 // Lookup the Table Level to get the information 601 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount); 602 603 // 604 // Set TCR that allows us to retrieve T0SZ in the subsequent functions 605 // 606 // Ideally we will be running at EL2, but should support EL1 as well. 607 // UEFI should not run at EL3. 608 if (ArmReadCurrentEL () == AARCH64_EL2) { 609 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2 610 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB; 611 612 // Set the Physical Address Size using MaxAddress 613 if (MaxAddress < SIZE_4GB) { 614 TCR |= TCR_PS_4GB; 615 } else if (MaxAddress < SIZE_64GB) { 616 TCR |= TCR_PS_64GB; 617 } else if (MaxAddress < SIZE_1TB) { 618 TCR |= TCR_PS_1TB; 619 } else if (MaxAddress < SIZE_4TB) { 620 TCR |= TCR_PS_4TB; 621 } else if (MaxAddress < SIZE_16TB) { 622 TCR |= TCR_PS_16TB; 623 } else if (MaxAddress < SIZE_256TB) { 624 TCR |= TCR_PS_256TB; 625 } else { 626 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress)); 627 ASSERT (0); // Bigger than 48-bit memory space are not supported 628 return RETURN_UNSUPPORTED; 629 } 630 } else if (ArmReadCurrentEL () == AARCH64_EL1) { 631 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1. 632 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1; 633 634 // Set the Physical Address Size using MaxAddress 635 if (MaxAddress < SIZE_4GB) { 636 TCR |= TCR_IPS_4GB; 637 } else if (MaxAddress < SIZE_64GB) { 638 TCR |= TCR_IPS_64GB; 639 } else if (MaxAddress < SIZE_1TB) { 640 TCR |= TCR_IPS_1TB; 641 } else if (MaxAddress < SIZE_4TB) { 642 TCR |= TCR_IPS_4TB; 643 } else if (MaxAddress < SIZE_16TB) { 644 TCR |= TCR_IPS_16TB; 645 } else if (MaxAddress < SIZE_256TB) { 646 TCR |= TCR_IPS_256TB; 647 } else { 648 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress)); 649 ASSERT (0); // Bigger than 48-bit memory space are not supported 650 return RETURN_UNSUPPORTED; 651 } 652 } else { 653 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3. 654 return RETURN_UNSUPPORTED; 655 } 656 657 // Set TCR 658 ArmSetTCR (TCR); 659 660 // Allocate pages for translation table 661 TranslationTablePageCount = EFI_SIZE_TO_PAGES(RootTableEntryCount * sizeof(UINT64)); 662 TranslationTable = (UINT64*)AllocateAlignedPages (TranslationTablePageCount, TT_ALIGNMENT_DESCRIPTION_TABLE); 663 if (TranslationTable == NULL) { 664 return RETURN_OUT_OF_RESOURCES; 665 } 666 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent 667 // functions without needing to pass this value across the functions. The MMU is only enabled 668 // after the translation tables are populated. 669 ArmSetTTBR0 (TranslationTable); 670 671 if (TranslationTableBase != NULL) { 672 *TranslationTableBase = TranslationTable; 673 } 674 675 if (TranslationTableSize != NULL) { 676 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64); 677 } 678 679 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64)); 680 681 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs 682 ArmDisableMmu (); 683 ArmDisableDataCache (); 684 ArmDisableInstructionCache (); 685 686 // Make sure nothing sneaked into the cache 687 ArmCleanInvalidateDataCache (); 688 ArmInvalidateInstructionCache (); 689 690 TranslationTableAttribute = TT_ATTR_INDX_INVALID; 691 while (MemoryTable->Length != 0) { 692 // Find the memory attribute for the Translation Table 693 if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) && 694 ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) { 695 TranslationTableAttribute = MemoryTable->Attributes; 696 } 697 698 Status = FillTranslationTable (TranslationTable, MemoryTable); 699 if (RETURN_ERROR (Status)) { 700 goto FREE_TRANSLATION_TABLE; 701 } 702 MemoryTable++; 703 } 704 705 // Translate the Memory Attributes into Translation Table Register Attributes 706 if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) || 707 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) { 708 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE; 709 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) || 710 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) { 711 TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC; 712 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) || 713 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) { 714 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH; 715 } else { 716 // If we failed to find a mapping that contains the root translation table then it probably means the translation table 717 // is not mapped in the given memory map. 718 ASSERT (0); 719 Status = RETURN_UNSUPPORTED; 720 goto FREE_TRANSLATION_TABLE; 721 } 722 723 // Set again TCR after getting the Translation Table attributes 724 ArmSetTCR (TCR); 725 726 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC 727 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC 728 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT 729 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB 730 731 ArmDisableAlignmentCheck (); 732 ArmEnableInstructionCache (); 733 ArmEnableDataCache (); 734 735 ArmEnableMmu (); 736 return RETURN_SUCCESS; 737 738 FREE_TRANSLATION_TABLE: 739 FreePages (TranslationTable, TranslationTablePageCount); 740 return Status; 741 } 742