Home | History | Annotate | Download | only in AArch64
      1 #------------------------------------------------------------------------------
      2 #
      3 # Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
      4 # Copyright (c) 2011 - 2014, ARM Limited. All rights reserved.
      5 # Copyright (c) 2016, Linaro Limited. All rights reserved.
      6 #
      7 # This program and the accompanying materials
      8 # are licensed and made available under the terms and conditions of the BSD License
      9 # which accompanies this distribution.  The full text of the license may be found at
     10 # http://opensource.org/licenses/bsd-license.php
     11 #
     12 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
     13 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
     14 #
     15 #------------------------------------------------------------------------------
     16 
     17 #include <Chipset/AArch64.h>
     18 #include <AsmMacroIoLibV8.h>
     19 
     20 .set CTRL_M_BIT,      (1 << 0)
     21 .set CTRL_A_BIT,      (1 << 1)
     22 .set CTRL_C_BIT,      (1 << 2)
     23 .set CTRL_I_BIT,      (1 << 12)
     24 .set CTRL_V_BIT,      (1 << 12)
     25 .set CPACR_VFP_BITS,  (3 << 20)
     26 
     27 ASM_FUNC(ArmInvalidateDataCacheEntryByMVA)
     28   dc      ivac, x0    // Invalidate single data cache line
     29   ret
     30 
     31 
     32 ASM_FUNC(ArmCleanDataCacheEntryByMVA)
     33   dc      cvac, x0    // Clean single data cache line
     34   ret
     35 
     36 
     37 ASM_FUNC(ArmCleanDataCacheEntryToPoUByMVA)
     38   dc      cvau, x0    // Clean single data cache line to PoU
     39   ret
     40 
     41 ASM_FUNC(ArmInvalidateInstructionCacheEntryToPoUByMVA)
     42   ic      ivau, x0    // Invalidate single instruction cache line to PoU
     43   ret
     44 
     45 
     46 ASM_FUNC(ArmCleanInvalidateDataCacheEntryByMVA)
     47   dc      civac, x0   // Clean and invalidate single data cache line
     48   ret
     49 
     50 
     51 ASM_FUNC(ArmInvalidateDataCacheEntryBySetWay)
     52   dc      isw, x0     // Invalidate this line
     53   ret
     54 
     55 
     56 ASM_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay)
     57   dc      cisw, x0    // Clean and Invalidate this line
     58   ret
     59 
     60 
     61 ASM_FUNC(ArmCleanDataCacheEntryBySetWay)
     62   dc      csw, x0     // Clean this line
     63   ret
     64 
     65 
     66 ASM_FUNC(ArmInvalidateInstructionCache)
     67   ic      iallu       // Invalidate entire instruction cache
     68   dsb     sy
     69   isb
     70   ret
     71 
     72 
     73 ASM_FUNC(ArmEnableMmu)
     74    EL1_OR_EL2_OR_EL3(x1)
     75 1: mrs     x0, sctlr_el1       // Read System control register EL1
     76    b       4f
     77 2: mrs     x0, sctlr_el2       // Read System control register EL2
     78    b       4f
     79 3: mrs     x0, sctlr_el3       // Read System control register EL3
     80 4: orr     x0, x0, #CTRL_M_BIT // Set MMU enable bit
     81    EL1_OR_EL2_OR_EL3(x1)
     82 1: tlbi    vmalle1
     83    dsb     nsh
     84    isb
     85    msr     sctlr_el1, x0       // Write back
     86    b       4f
     87 2: tlbi    alle2
     88    dsb     nsh
     89    isb
     90    msr     sctlr_el2, x0       // Write back
     91    b       4f
     92 3: tlbi    alle3
     93    dsb     nsh
     94    isb
     95    msr     sctlr_el3, x0       // Write back
     96 4: isb
     97    ret
     98 
     99 
    100 ASM_FUNC(ArmDisableMmu)
    101    EL1_OR_EL2_OR_EL3(x1)
    102 1: mrs     x0, sctlr_el1        // Read System Control Register EL1
    103    b       4f
    104 2: mrs     x0, sctlr_el2        // Read System Control Register EL2
    105    b       4f
    106 3: mrs     x0, sctlr_el3        // Read System Control Register EL3
    107 4: and     x0, x0, #~CTRL_M_BIT  // Clear MMU enable bit
    108    EL1_OR_EL2_OR_EL3(x1)
    109 1: msr     sctlr_el1, x0        // Write back
    110    tlbi    vmalle1
    111    b       4f
    112 2: msr     sctlr_el2, x0        // Write back
    113    tlbi    alle2
    114    b       4f
    115 3: msr     sctlr_el3, x0        // Write back
    116    tlbi    alle3
    117 4: dsb     sy
    118    isb
    119    ret
    120 
    121 
    122 ASM_FUNC(ArmDisableCachesAndMmu)
    123    EL1_OR_EL2_OR_EL3(x1)
    124 1: mrs     x0, sctlr_el1        // Get control register EL1
    125    b       4f
    126 2: mrs     x0, sctlr_el2        // Get control register EL2
    127    b       4f
    128 3: mrs     x0, sctlr_el3        // Get control register EL3
    129 4: mov     x1, #~(CTRL_M_BIT | CTRL_C_BIT | CTRL_I_BIT)  // Disable MMU, D & I caches
    130    and     x0, x0, x1
    131    EL1_OR_EL2_OR_EL3(x1)
    132 1: msr     sctlr_el1, x0        // Write back control register
    133    b       4f
    134 2: msr     sctlr_el2, x0        // Write back control register
    135    b       4f
    136 3: msr     sctlr_el3, x0        // Write back control register
    137 4: dsb     sy
    138    isb
    139    ret
    140 
    141 
    142 ASM_FUNC(ArmMmuEnabled)
    143    EL1_OR_EL2_OR_EL3(x1)
    144 1: mrs     x0, sctlr_el1        // Get control register EL1
    145    b       4f
    146 2: mrs     x0, sctlr_el2        // Get control register EL2
    147    b       4f
    148 3: mrs     x0, sctlr_el3        // Get control register EL3
    149 4: and     x0, x0, #CTRL_M_BIT
    150    ret
    151 
    152 
    153 ASM_FUNC(ArmEnableDataCache)
    154    EL1_OR_EL2_OR_EL3(x1)
    155 1: mrs     x0, sctlr_el1        // Get control register EL1
    156    b       4f
    157 2: mrs     x0, sctlr_el2        // Get control register EL2
    158    b       4f
    159 3: mrs     x0, sctlr_el3        // Get control register EL3
    160 4: orr     x0, x0, #CTRL_C_BIT  // Set C bit
    161    EL1_OR_EL2_OR_EL3(x1)
    162 1: msr     sctlr_el1, x0        // Write back control register
    163    b       4f
    164 2: msr     sctlr_el2, x0        // Write back control register
    165    b       4f
    166 3: msr     sctlr_el3, x0        // Write back control register
    167 4: dsb     sy
    168    isb
    169    ret
    170 
    171 
    172 ASM_FUNC(ArmDisableDataCache)
    173    EL1_OR_EL2_OR_EL3(x1)
    174 1: mrs     x0, sctlr_el1        // Get control register EL1
    175    b       4f
    176 2: mrs     x0, sctlr_el2        // Get control register EL2
    177    b       4f
    178 3: mrs     x0, sctlr_el3        // Get control register EL3
    179 4: and     x0, x0, #~CTRL_C_BIT  // Clear C bit
    180    EL1_OR_EL2_OR_EL3(x1)
    181 1: msr     sctlr_el1, x0        // Write back control register
    182    b       4f
    183 2: msr     sctlr_el2, x0        // Write back control register
    184    b       4f
    185 3: msr     sctlr_el3, x0        // Write back control register
    186 4: dsb     sy
    187    isb
    188    ret
    189 
    190 
    191 ASM_FUNC(ArmEnableInstructionCache)
    192    EL1_OR_EL2_OR_EL3(x1)
    193 1: mrs     x0, sctlr_el1        // Get control register EL1
    194    b       4f
    195 2: mrs     x0, sctlr_el2        // Get control register EL2
    196    b       4f
    197 3: mrs     x0, sctlr_el3        // Get control register EL3
    198 4: orr     x0, x0, #CTRL_I_BIT  // Set I bit
    199    EL1_OR_EL2_OR_EL3(x1)
    200 1: msr     sctlr_el1, x0        // Write back control register
    201    b       4f
    202 2: msr     sctlr_el2, x0        // Write back control register
    203    b       4f
    204 3: msr     sctlr_el3, x0        // Write back control register
    205 4: dsb     sy
    206    isb
    207    ret
    208 
    209 
    210 ASM_FUNC(ArmDisableInstructionCache)
    211    EL1_OR_EL2_OR_EL3(x1)
    212 1: mrs     x0, sctlr_el1        // Get control register EL1
    213    b       4f
    214 2: mrs     x0, sctlr_el2        // Get control register EL2
    215    b       4f
    216 3: mrs     x0, sctlr_el3        // Get control register EL3
    217 4: and     x0, x0, #~CTRL_I_BIT  // Clear I bit
    218    EL1_OR_EL2_OR_EL3(x1)
    219 1: msr     sctlr_el1, x0        // Write back control register
    220    b       4f
    221 2: msr     sctlr_el2, x0        // Write back control register
    222    b       4f
    223 3: msr     sctlr_el3, x0        // Write back control register
    224 4: dsb     sy
    225    isb
    226    ret
    227 
    228 
    229 ASM_FUNC(ArmEnableAlignmentCheck)
    230    EL1_OR_EL2(x1)
    231 1: mrs     x0, sctlr_el1        // Get control register EL1
    232    b       3f
    233 2: mrs     x0, sctlr_el2        // Get control register EL2
    234 3: orr     x0, x0, #CTRL_A_BIT  // Set A (alignment check) bit
    235    EL1_OR_EL2(x1)
    236 1: msr     sctlr_el1, x0        // Write back control register
    237    b       3f
    238 2: msr     sctlr_el2, x0        // Write back control register
    239 3: dsb     sy
    240    isb
    241    ret
    242 
    243 
    244 ASM_FUNC(ArmDisableAlignmentCheck)
    245    EL1_OR_EL2_OR_EL3(x1)
    246 1: mrs     x0, sctlr_el1        // Get control register EL1
    247    b       4f
    248 2: mrs     x0, sctlr_el2        // Get control register EL2
    249    b       4f
    250 3: mrs     x0, sctlr_el3        // Get control register EL3
    251 4: and     x0, x0, #~CTRL_A_BIT  // Clear A (alignment check) bit
    252    EL1_OR_EL2_OR_EL3(x1)
    253 1: msr     sctlr_el1, x0        // Write back control register
    254    b       4f
    255 2: msr     sctlr_el2, x0        // Write back control register
    256    b       4f
    257 3: msr     sctlr_el3, x0        // Write back control register
    258 4: dsb     sy
    259    isb
    260    ret
    261 
    262 
    263 // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now
    264 ASM_FUNC(ArmEnableBranchPrediction)
    265   ret
    266 
    267 
    268 // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.
    269 ASM_FUNC(ArmDisableBranchPrediction)
    270   ret
    271 
    272 
    273 ASM_FUNC(AArch64AllDataCachesOperation)
    274 // We can use regs 0-7 and 9-15 without having to save/restore.
    275 // Save our link register on the stack. - The stack must always be quad-word aligned
    276   str   x30, [sp, #-16]!
    277   mov   x1, x0                  // Save Function call in x1
    278   mrs   x6, clidr_el1           // Read EL1 CLIDR
    279   and   x3, x6, #0x7000000      // Mask out all but Level of Coherency (LoC)
    280   lsr   x3, x3, #23             // Left align cache level value - the level is shifted by 1 to the
    281                                 // right to ease the access to CSSELR and the Set/Way operation.
    282   cbz   x3, L_Finished          // No need to clean if LoC is 0
    283   mov   x10, #0                 // Start clean at cache level 0
    284 
    285 Loop1:
    286   add   x2, x10, x10, lsr #1    // Work out 3x cachelevel for cache info
    287   lsr   x12, x6, x2             // bottom 3 bits are the Cache type for this level
    288   and   x12, x12, #7            // get those 3 bits alone
    289   cmp   x12, #2                 // what cache at this level?
    290   b.lt  L_Skip                  // no cache or only instruction cache at this level
    291   msr   csselr_el1, x10         // write the Cache Size selection register with current level (CSSELR)
    292   isb                           // isb to sync the change to the CacheSizeID reg
    293   mrs   x12, ccsidr_el1         // reads current Cache Size ID register (CCSIDR)
    294   and   x2, x12, #0x7           // extract the line length field
    295   add   x2, x2, #4              // add 4 for the line length offset (log2 16 bytes)
    296   mov   x4, #0x400
    297   sub   x4, x4, #1
    298   and   x4, x4, x12, lsr #3     // x4 is the max number on the way size (right aligned)
    299   clz   w5, w4                  // w5 is the bit position of the way size increment
    300   mov   x7, #0x00008000
    301   sub   x7, x7, #1
    302   and   x7, x7, x12, lsr #13    // x7 is the max number of the index size (right aligned)
    303 
    304 Loop2:
    305   mov   x9, x4                  // x9 working copy of the max way size (right aligned)
    306 
    307 Loop3:
    308   lsl   x11, x9, x5
    309   orr   x0, x10, x11            // factor in the way number and cache number
    310   lsl   x11, x7, x2
    311   orr   x0, x0, x11             // factor in the index number
    312 
    313   blr   x1                      // Goto requested cache operation
    314 
    315   subs  x9, x9, #1              // decrement the way number
    316   b.ge  Loop3
    317   subs  x7, x7, #1              // decrement the index
    318   b.ge  Loop2
    319 L_Skip:
    320   add   x10, x10, #2            // increment the cache number
    321   cmp   x3, x10
    322   b.gt  Loop1
    323 
    324 L_Finished:
    325   dsb   sy
    326   isb
    327   ldr   x30, [sp], #0x10
    328   ret
    329 
    330 
    331 ASM_FUNC(ArmDataMemoryBarrier)
    332   dmb   sy
    333   ret
    334 
    335 
    336 ASM_FUNC(ArmDataSynchronizationBarrier)
    337   dsb   sy
    338   ret
    339 
    340 
    341 ASM_FUNC(ArmInstructionSynchronizationBarrier)
    342   isb
    343   ret
    344 
    345 
    346 ASM_FUNC(ArmWriteVBar)
    347    EL1_OR_EL2_OR_EL3(x1)
    348 1: msr   vbar_el1, x0            // Set the Address of the EL1 Vector Table in the VBAR register
    349    b     4f
    350 2: msr   vbar_el2, x0            // Set the Address of the EL2 Vector Table in the VBAR register
    351    b     4f
    352 3: msr   vbar_el3, x0            // Set the Address of the EL3 Vector Table in the VBAR register
    353 4: isb
    354    ret
    355 
    356 ASM_FUNC(ArmReadVBar)
    357    EL1_OR_EL2_OR_EL3(x1)
    358 1: mrs   x0, vbar_el1            // Set the Address of the EL1 Vector Table in the VBAR register
    359    ret
    360 2: mrs   x0, vbar_el2            // Set the Address of the EL2 Vector Table in the VBAR register
    361    ret
    362 3: mrs   x0, vbar_el3            // Set the Address of the EL3 Vector Table in the VBAR register
    363    ret
    364 
    365 
    366 ASM_FUNC(ArmEnableVFP)
    367   // Check whether floating-point is implemented in the processor.
    368   mov   x1, x30                 // Save LR
    369   bl    ArmReadIdPfr0           // Read EL1 Processor Feature Register (PFR0)
    370   mov   x30, x1                 // Restore LR
    371   ands  x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation
    372   cmp   x0, #0                  // VFP is implemented if '0'.
    373   b.ne  4f                      // Exit if VFP not implemented.
    374   // FVP is implemented.
    375   // Make sure VFP exceptions are not trapped (to any exception level).
    376   mrs   x0, cpacr_el1           // Read EL1 Coprocessor Access Control Register (CPACR)
    377   orr   x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1
    378   msr   cpacr_el1, x0           // Write back EL1 Coprocessor Access Control Register (CPACR)
    379   mov   x1, #AARCH64_CPTR_TFP   // TFP Bit for trapping VFP Exceptions
    380   EL1_OR_EL2_OR_EL3(x2)
    381 1:ret                           // Not configurable in EL1
    382 2:mrs   x0, cptr_el2            // Disable VFP traps to EL2
    383   bic   x0, x0, x1
    384   msr   cptr_el2, x0
    385   ret
    386 3:mrs   x0, cptr_el3            // Disable VFP traps to EL3
    387   bic   x0, x0, x1
    388   msr   cptr_el3, x0
    389 4:ret
    390 
    391 
    392 ASM_FUNC(ArmCallWFI)
    393   wfi
    394   ret
    395 
    396 
    397 ASM_FUNC(ArmReadMpidr)
    398   mrs   x0, mpidr_el1           // read EL1 MPIDR
    399   ret
    400 
    401 
    402 // Keep old function names for C compatibilty for now. Change later?
    403 ASM_FUNC(ArmReadTpidrurw)
    404   mrs   x0, tpidr_el0           // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
    405   ret
    406 
    407 
    408 // Keep old function names for C compatibilty for now. Change later?
    409 ASM_FUNC(ArmWriteTpidrurw)
    410   msr   tpidr_el0, x0           // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
    411   ret
    412 
    413 
    414 // Arch timers are mandatory on AArch64
    415 ASM_FUNC(ArmIsArchTimerImplemented)
    416   mov   x0, #1
    417   ret
    418 
    419 
    420 ASM_FUNC(ArmReadIdPfr0)
    421   mrs   x0, id_aa64pfr0_el1   // Read ID_AA64PFR0 Register
    422   ret
    423 
    424 
    425 // Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?
    426 // A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.
    427 //    See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c
    428 //    Not defined yet, but stick in here for now, should read all zeros.
    429 ASM_FUNC(ArmReadIdPfr1)
    430   mrs   x0, id_aa64pfr1_el1   // Read ID_PFR1 Register
    431   ret
    432 
    433 // VOID ArmWriteHcr(UINTN Hcr)
    434 ASM_FUNC(ArmWriteHcr)
    435   msr   hcr_el2, x0        // Write the passed HCR value
    436   ret
    437 
    438 // UINTN ArmReadHcr(VOID)
    439 ASM_FUNC(ArmReadHcr)
    440   mrs   x0, hcr_el2
    441   ret
    442 
    443 // UINTN ArmReadCurrentEL(VOID)
    444 ASM_FUNC(ArmReadCurrentEL)
    445   mrs   x0, CurrentEL
    446   ret
    447 
    448 ASM_FUNCTION_REMOVE_IF_UNREFERENCED
    449