Home | History | Annotate | Download | only in AArch64
      1 #------------------------------------------------------------------------------
      2 #
      3 # Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
      4 # Copyright (c) 2011 - 2014, ARM Limited. All rights reserved.
      5 #
      6 # This program and the accompanying materials
      7 # are licensed and made available under the terms and conditions of the BSD License
      8 # which accompanies this distribution.  The full text of the license may be found at
      9 # http://opensource.org/licenses/bsd-license.php
     10 #
     11 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
     12 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
     13 #
     14 #------------------------------------------------------------------------------
     15 
     16 #include <Chipset/AArch64.h>
     17 #include <AsmMacroIoLibV8.h>
     18 
     19 .text
     20 .align 3
     21 
     22 GCC_ASM_EXPORT (ArmInvalidateInstructionCache)
     23 GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)
     24 GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)
     25 GCC_ASM_EXPORT (ArmCleanDataCacheEntryToPoUByMVA)
     26 GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)
     27 GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)
     28 GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)
     29 GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)
     30 GCC_ASM_EXPORT (ArmEnableMmu)
     31 GCC_ASM_EXPORT (ArmDisableMmu)
     32 GCC_ASM_EXPORT (ArmDisableCachesAndMmu)
     33 GCC_ASM_EXPORT (ArmMmuEnabled)
     34 GCC_ASM_EXPORT (ArmEnableDataCache)
     35 GCC_ASM_EXPORT (ArmDisableDataCache)
     36 GCC_ASM_EXPORT (ArmEnableInstructionCache)
     37 GCC_ASM_EXPORT (ArmDisableInstructionCache)
     38 GCC_ASM_EXPORT (ArmDisableAlignmentCheck)
     39 GCC_ASM_EXPORT (ArmEnableAlignmentCheck)
     40 GCC_ASM_EXPORT (ArmEnableBranchPrediction)
     41 GCC_ASM_EXPORT (ArmDisableBranchPrediction)
     42 GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
     43 GCC_ASM_EXPORT (ArmDataMemoryBarrier)
     44 GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)
     45 GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
     46 GCC_ASM_EXPORT (ArmWriteVBar)
     47 GCC_ASM_EXPORT (ArmReadVBar)
     48 GCC_ASM_EXPORT (ArmEnableVFP)
     49 GCC_ASM_EXPORT (ArmCallWFI)
     50 GCC_ASM_EXPORT (ArmReadMpidr)
     51 GCC_ASM_EXPORT (ArmReadTpidrurw)
     52 GCC_ASM_EXPORT (ArmWriteTpidrurw)
     53 GCC_ASM_EXPORT (ArmIsArchTimerImplemented)
     54 GCC_ASM_EXPORT (ArmReadIdPfr0)
     55 GCC_ASM_EXPORT (ArmReadIdPfr1)
     56 GCC_ASM_EXPORT (ArmWriteHcr)
     57 GCC_ASM_EXPORT (ArmReadCurrentEL)
     58 
     59 .set CTRL_M_BIT,      (1 << 0)
     60 .set CTRL_A_BIT,      (1 << 1)
     61 .set CTRL_C_BIT,      (1 << 2)
     62 .set CTRL_I_BIT,      (1 << 12)
     63 .set CTRL_V_BIT,      (1 << 12)
     64 .set CPACR_VFP_BITS,  (3 << 20)
     65 
     66 ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
     67   dc      ivac, x0    // Invalidate single data cache line
     68   ret
     69 
     70 
     71 ASM_PFX(ArmCleanDataCacheEntryByMVA):
     72   dc      cvac, x0    // Clean single data cache line
     73   ret
     74 
     75 
     76 ASM_PFX(ArmCleanDataCacheEntryToPoUByMVA):
     77   dc      cvau, x0    // Clean single data cache line to PoU
     78   ret
     79 
     80 
     81 ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
     82   dc      civac, x0   // Clean and invalidate single data cache line
     83   ret
     84 
     85 
     86 ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
     87   dc      isw, x0     // Invalidate this line
     88   ret
     89 
     90 
     91 ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
     92   dc      cisw, x0    // Clean and Invalidate this line
     93   ret
     94 
     95 
     96 ASM_PFX(ArmCleanDataCacheEntryBySetWay):
     97   dc      csw, x0     // Clean this line
     98   ret
     99 
    100 
    101 ASM_PFX(ArmInvalidateInstructionCache):
    102   ic      iallu       // Invalidate entire instruction cache
    103   dsb     sy
    104   isb
    105   ret
    106 
    107 
    108 ASM_PFX(ArmEnableMmu):
    109    EL1_OR_EL2_OR_EL3(x1)
    110 1: mrs     x0, sctlr_el1       // Read System control register EL1
    111    b       4f
    112 2: mrs     x0, sctlr_el2       // Read System control register EL2
    113    b       4f
    114 3: mrs     x0, sctlr_el3       // Read System control register EL3
    115 4: orr     x0, x0, #CTRL_M_BIT // Set MMU enable bit
    116    EL1_OR_EL2_OR_EL3(x1)
    117 1: tlbi    vmalle1
    118    dsb     nsh
    119    isb
    120    msr     sctlr_el1, x0       // Write back
    121    b       4f
    122 2: tlbi    alle2
    123    dsb     nsh
    124    isb
    125    msr     sctlr_el2, x0       // Write back
    126    b       4f
    127 3: tlbi    alle3
    128    dsb     nsh
    129    isb
    130    msr     sctlr_el3, x0       // Write back
    131 4: isb
    132    ret
    133 
    134 
    135 ASM_PFX(ArmDisableMmu):
    136    EL1_OR_EL2_OR_EL3(x1)
    137 1: mrs     x0, sctlr_el1        // Read System Control Register EL1
    138    b       4f
    139 2: mrs     x0, sctlr_el2        // Read System Control Register EL2
    140    b       4f
    141 3: mrs     x0, sctlr_el3        // Read System Control Register EL3
    142 4: and     x0, x0, #~CTRL_M_BIT  // Clear MMU enable bit
    143    EL1_OR_EL2_OR_EL3(x1)
    144 1: msr     sctlr_el1, x0        // Write back
    145    tlbi    vmalle1
    146    b       4f
    147 2: msr     sctlr_el2, x0        // Write back
    148    tlbi    alle2
    149    b       4f
    150 3: msr     sctlr_el3, x0        // Write back
    151    tlbi    alle3
    152 4: dsb     sy
    153    isb
    154    ret
    155 
    156 
    157 ASM_PFX(ArmDisableCachesAndMmu):
    158    EL1_OR_EL2_OR_EL3(x1)
    159 1: mrs     x0, sctlr_el1        // Get control register EL1
    160    b       4f
    161 2: mrs     x0, sctlr_el2        // Get control register EL2
    162    b       4f
    163 3: mrs     x0, sctlr_el3        // Get control register EL3
    164 4: mov     x1, #~(CTRL_M_BIT | CTRL_C_BIT | CTRL_I_BIT)  // Disable MMU, D & I caches
    165    and     x0, x0, x1
    166    EL1_OR_EL2_OR_EL3(x1)
    167 1: msr     sctlr_el1, x0        // Write back control register
    168    b       4f
    169 2: msr     sctlr_el2, x0        // Write back control register
    170    b       4f
    171 3: msr     sctlr_el3, x0        // Write back control register
    172 4: dsb     sy
    173    isb
    174    ret
    175 
    176 
    177 ASM_PFX(ArmMmuEnabled):
    178    EL1_OR_EL2_OR_EL3(x1)
    179 1: mrs     x0, sctlr_el1        // Get control register EL1
    180    b       4f
    181 2: mrs     x0, sctlr_el2        // Get control register EL2
    182    b       4f
    183 3: mrs     x0, sctlr_el3        // Get control register EL3
    184 4: and     x0, x0, #CTRL_M_BIT
    185    ret
    186 
    187 
    188 ASM_PFX(ArmEnableDataCache):
    189    EL1_OR_EL2_OR_EL3(x1)
    190 1: mrs     x0, sctlr_el1        // Get control register EL1
    191    b       4f
    192 2: mrs     x0, sctlr_el2        // Get control register EL2
    193    b       4f
    194 3: mrs     x0, sctlr_el3        // Get control register EL3
    195 4: orr     x0, x0, #CTRL_C_BIT  // Set C bit
    196    EL1_OR_EL2_OR_EL3(x1)
    197 1: msr     sctlr_el1, x0        // Write back control register
    198    b       4f
    199 2: msr     sctlr_el2, x0        // Write back control register
    200    b       4f
    201 3: msr     sctlr_el3, x0        // Write back control register
    202 4: dsb     sy
    203    isb
    204    ret
    205 
    206 
    207 ASM_PFX(ArmDisableDataCache):
    208    EL1_OR_EL2_OR_EL3(x1)
    209 1: mrs     x0, sctlr_el1        // Get control register EL1
    210    b       4f
    211 2: mrs     x0, sctlr_el2        // Get control register EL2
    212    b       4f
    213 3: mrs     x0, sctlr_el3        // Get control register EL3
    214 4: and     x0, x0, #~CTRL_C_BIT  // Clear C bit
    215    EL1_OR_EL2_OR_EL3(x1)
    216 1: msr     sctlr_el1, x0        // Write back control register
    217    b       4f
    218 2: msr     sctlr_el2, x0        // Write back control register
    219    b       4f
    220 3: msr     sctlr_el3, x0        // Write back control register
    221 4: dsb     sy
    222    isb
    223    ret
    224 
    225 
    226 ASM_PFX(ArmEnableInstructionCache):
    227    EL1_OR_EL2_OR_EL3(x1)
    228 1: mrs     x0, sctlr_el1        // Get control register EL1
    229    b       4f
    230 2: mrs     x0, sctlr_el2        // Get control register EL2
    231    b       4f
    232 3: mrs     x0, sctlr_el3        // Get control register EL3
    233 4: orr     x0, x0, #CTRL_I_BIT  // Set I bit
    234    EL1_OR_EL2_OR_EL3(x1)
    235 1: msr     sctlr_el1, x0        // Write back control register
    236    b       4f
    237 2: msr     sctlr_el2, x0        // Write back control register
    238    b       4f
    239 3: msr     sctlr_el3, x0        // Write back control register
    240 4: dsb     sy
    241    isb
    242    ret
    243 
    244 
    245 ASM_PFX(ArmDisableInstructionCache):
    246    EL1_OR_EL2_OR_EL3(x1)
    247 1: mrs     x0, sctlr_el1        // Get control register EL1
    248    b       4f
    249 2: mrs     x0, sctlr_el2        // Get control register EL2
    250    b       4f
    251 3: mrs     x0, sctlr_el3        // Get control register EL3
    252 4: and     x0, x0, #~CTRL_I_BIT  // Clear I bit
    253    EL1_OR_EL2_OR_EL3(x1)
    254 1: msr     sctlr_el1, x0        // Write back control register
    255    b       4f
    256 2: msr     sctlr_el2, x0        // Write back control register
    257    b       4f
    258 3: msr     sctlr_el3, x0        // Write back control register
    259 4: dsb     sy
    260    isb
    261    ret
    262 
    263 
    264 ASM_PFX(ArmEnableAlignmentCheck):
    265    EL1_OR_EL2(x1)
    266 1: mrs     x0, sctlr_el1        // Get control register EL1
    267    b       3f
    268 2: mrs     x0, sctlr_el2        // Get control register EL2
    269 3: orr     x0, x0, #CTRL_A_BIT  // Set A (alignment check) bit
    270    EL1_OR_EL2(x1)
    271 1: msr     sctlr_el1, x0        // Write back control register
    272    b       3f
    273 2: msr     sctlr_el2, x0        // Write back control register
    274 3: dsb     sy
    275    isb
    276    ret
    277 
    278 
    279 ASM_PFX(ArmDisableAlignmentCheck):
    280    EL1_OR_EL2_OR_EL3(x1)
    281 1: mrs     x0, sctlr_el1        // Get control register EL1
    282    b       4f
    283 2: mrs     x0, sctlr_el2        // Get control register EL2
    284    b       4f
    285 3: mrs     x0, sctlr_el3        // Get control register EL3
    286 4: and     x0, x0, #~CTRL_A_BIT  // Clear A (alignment check) bit
    287    EL1_OR_EL2_OR_EL3(x1)
    288 1: msr     sctlr_el1, x0        // Write back control register
    289    b       4f
    290 2: msr     sctlr_el2, x0        // Write back control register
    291    b       4f
    292 3: msr     sctlr_el3, x0        // Write back control register
    293 4: dsb     sy
    294    isb
    295    ret
    296 
    297 
    298 // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now
    299 ASM_PFX(ArmEnableBranchPrediction):
    300   ret
    301 
    302 
    303 // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.
    304 ASM_PFX(ArmDisableBranchPrediction):
    305   ret
    306 
    307 
    308 ASM_PFX(AArch64AllDataCachesOperation):
    309 // We can use regs 0-7 and 9-15 without having to save/restore.
    310 // Save our link register on the stack. - The stack must always be quad-word aligned
    311   str   x30, [sp, #-16]!
    312   mov   x1, x0                  // Save Function call in x1
    313   mrs   x6, clidr_el1           // Read EL1 CLIDR
    314   and   x3, x6, #0x7000000      // Mask out all but Level of Coherency (LoC)
    315   lsr   x3, x3, #23             // Left align cache level value - the level is shifted by 1 to the
    316                                 // right to ease the access to CSSELR and the Set/Way operation.
    317   cbz   x3, L_Finished          // No need to clean if LoC is 0
    318   mov   x10, #0                 // Start clean at cache level 0
    319 
    320 Loop1:
    321   add   x2, x10, x10, lsr #1    // Work out 3x cachelevel for cache info
    322   lsr   x12, x6, x2             // bottom 3 bits are the Cache type for this level
    323   and   x12, x12, #7            // get those 3 bits alone
    324   cmp   x12, #2                 // what cache at this level?
    325   b.lt  L_Skip                  // no cache or only instruction cache at this level
    326   msr   csselr_el1, x10         // write the Cache Size selection register with current level (CSSELR)
    327   isb                           // isb to sync the change to the CacheSizeID reg
    328   mrs   x12, ccsidr_el1         // reads current Cache Size ID register (CCSIDR)
    329   and   x2, x12, #0x7           // extract the line length field
    330   add   x2, x2, #4              // add 4 for the line length offset (log2 16 bytes)
    331   mov   x4, #0x400
    332   sub   x4, x4, #1
    333   and   x4, x4, x12, lsr #3     // x4 is the max number on the way size (right aligned)
    334   clz   w5, w4                  // w5 is the bit position of the way size increment
    335   mov   x7, #0x00008000
    336   sub   x7, x7, #1
    337   and   x7, x7, x12, lsr #13    // x7 is the max number of the index size (right aligned)
    338 
    339 Loop2:
    340   mov   x9, x4                  // x9 working copy of the max way size (right aligned)
    341 
    342 Loop3:
    343   lsl   x11, x9, x5
    344   orr   x0, x10, x11            // factor in the way number and cache number
    345   lsl   x11, x7, x2
    346   orr   x0, x0, x11             // factor in the index number
    347 
    348   blr   x1                      // Goto requested cache operation
    349 
    350   subs  x9, x9, #1              // decrement the way number
    351   b.ge  Loop3
    352   subs  x7, x7, #1              // decrement the index
    353   b.ge  Loop2
    354 L_Skip:
    355   add   x10, x10, #2            // increment the cache number
    356   cmp   x3, x10
    357   b.gt  Loop1
    358 
    359 L_Finished:
    360   dsb   sy
    361   isb
    362   ldr   x30, [sp], #0x10
    363   ret
    364 
    365 
    366 ASM_PFX(ArmDataMemoryBarrier):
    367   dmb   sy
    368   ret
    369 
    370 
    371 ASM_PFX(ArmDataSynchronizationBarrier):
    372   dsb   sy
    373   ret
    374 
    375 
    376 ASM_PFX(ArmInstructionSynchronizationBarrier):
    377   isb
    378   ret
    379 
    380 
    381 ASM_PFX(ArmWriteVBar):
    382    EL1_OR_EL2_OR_EL3(x1)
    383 1: msr   vbar_el1, x0            // Set the Address of the EL1 Vector Table in the VBAR register
    384    b     4f
    385 2: msr   vbar_el2, x0            // Set the Address of the EL2 Vector Table in the VBAR register
    386    b     4f
    387 3: msr   vbar_el3, x0            // Set the Address of the EL3 Vector Table in the VBAR register
    388 4: isb
    389    ret
    390 
    391 ASM_PFX(ArmReadVBar):
    392    EL1_OR_EL2_OR_EL3(x1)
    393 1: mrs   x0, vbar_el1            // Set the Address of the EL1 Vector Table in the VBAR register
    394    ret
    395 2: mrs   x0, vbar_el2            // Set the Address of the EL2 Vector Table in the VBAR register
    396    ret
    397 3: mrs   x0, vbar_el3            // Set the Address of the EL3 Vector Table in the VBAR register
    398    ret
    399 
    400 
    401 ASM_PFX(ArmEnableVFP):
    402   // Check whether floating-point is implemented in the processor.
    403   mov   x1, x30                 // Save LR
    404   bl    ArmReadIdPfr0           // Read EL1 Processor Feature Register (PFR0)
    405   mov   x30, x1                 // Restore LR
    406   ands  x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation
    407   cmp   x0, #0                  // VFP is implemented if '0'.
    408   b.ne  4f                      // Exit if VFP not implemented.
    409   // FVP is implemented.
    410   // Make sure VFP exceptions are not trapped (to any exception level).
    411   mrs   x0, cpacr_el1           // Read EL1 Coprocessor Access Control Register (CPACR)
    412   orr   x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1
    413   msr   cpacr_el1, x0           // Write back EL1 Coprocessor Access Control Register (CPACR)
    414   mov   x1, #AARCH64_CPTR_TFP   // TFP Bit for trapping VFP Exceptions
    415   EL1_OR_EL2_OR_EL3(x2)
    416 1:ret                           // Not configurable in EL1
    417 2:mrs   x0, cptr_el2            // Disable VFP traps to EL2
    418   bic   x0, x0, x1
    419   msr   cptr_el2, x0
    420   ret
    421 3:mrs   x0, cptr_el3            // Disable VFP traps to EL3
    422   bic   x0, x0, x1
    423   msr   cptr_el3, x0
    424 4:ret
    425 
    426 
    427 ASM_PFX(ArmCallWFI):
    428   wfi
    429   ret
    430 
    431 
    432 ASM_PFX(ArmReadMpidr):
    433   mrs   x0, mpidr_el1           // read EL1 MPIDR
    434   ret
    435 
    436 
    437 // Keep old function names for C compatibilty for now. Change later?
    438 ASM_PFX(ArmReadTpidrurw):
    439   mrs   x0, tpidr_el0           // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
    440   ret
    441 
    442 
    443 // Keep old function names for C compatibilty for now. Change later?
    444 ASM_PFX(ArmWriteTpidrurw):
    445   msr   tpidr_el0, x0           // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
    446   ret
    447 
    448 
    449 // Arch timers are mandatory on AArch64
    450 ASM_PFX(ArmIsArchTimerImplemented):
    451   mov   x0, #1
    452   ret
    453 
    454 
    455 ASM_PFX(ArmReadIdPfr0):
    456   mrs   x0, id_aa64pfr0_el1   // Read ID_AA64PFR0 Register
    457   ret
    458 
    459 
    460 // Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?
    461 // A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.
    462 //    See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c
    463 //    Not defined yet, but stick in here for now, should read all zeros.
    464 ASM_PFX(ArmReadIdPfr1):
    465   mrs   x0, id_aa64pfr1_el1   // Read ID_PFR1 Register
    466   ret
    467 
    468 // VOID ArmWriteHcr(UINTN Hcr)
    469 ASM_PFX(ArmWriteHcr):
    470   msr   hcr_el2, x0        // Write the passed HCR value
    471   ret
    472 
    473 // UINTN ArmReadCurrentEL(VOID)
    474 ASM_PFX(ArmReadCurrentEL):
    475   mrs   x0, CurrentEL
    476   ret
    477 
    478 ASM_FUNCTION_REMOVE_IF_UNREFERENCED
    479