1 /* $NetBSD: cpufunc.h,v 1.37.24.1 2007/02/21 18:36:02 snj Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Mark Brinicombe. 5 * Copyright (c) 1997 Causality Limited 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Causality Limited. 19 * 4. The name of Causality Limited may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * RiscBSD kernel project 36 * 37 * cpufunc.h 38 * 39 * Prototypes for cpu, mmu and tlb related functions. 40 */ 41 42 #ifndef _ARM32_CPUFUNC_H_ 43 #define _ARM32_CPUFUNC_H_ 44 45 #ifdef _KERNEL 46 47 #include <sys/types.h> 48 #include <arm/cpuconf.h> 49 50 struct cpu_functions { 51 52 /* CPU functions */ 53 54 u_int (*cf_id) __P((void)); 55 void (*cf_cpwait) __P((void)); 56 57 /* MMU functions */ 58 59 u_int (*cf_control) __P((u_int, u_int)); 60 void (*cf_domains) __P((u_int)); 61 void (*cf_setttb) __P((u_int)); 62 u_int (*cf_faultstatus) __P((void)); 63 u_int (*cf_faultaddress) __P((void)); 64 65 /* TLB functions */ 66 67 void (*cf_tlb_flushID) __P((void)); 68 void (*cf_tlb_flushID_SE) __P((u_int)); 69 void (*cf_tlb_flushI) __P((void)); 70 void (*cf_tlb_flushI_SE) __P((u_int)); 71 void (*cf_tlb_flushD) __P((void)); 72 void (*cf_tlb_flushD_SE) __P((u_int)); 73 74 /* 75 * Cache operations: 76 * 77 * We define the following primitives: 78 * 79 * icache_sync_all Synchronize I-cache 80 * icache_sync_range Synchronize I-cache range 81 * 82 * dcache_wbinv_all Write-back and Invalidate D-cache 83 * dcache_wbinv_range Write-back and Invalidate D-cache range 84 * dcache_inv_range Invalidate D-cache range 85 * dcache_wb_range Write-back D-cache range 86 * 87 * idcache_wbinv_all Write-back and Invalidate D-cache, 88 * Invalidate I-cache 89 * idcache_wbinv_range Write-back and Invalidate D-cache, 90 * Invalidate I-cache range 91 * 92 * Note that the ARM term for "write-back" is "clean". We use 93 * the term "write-back" since it's a more common way to describe 94 * the operation. 95 * 96 * There are some rules that must be followed: 97 * 98 * I-cache Synch (all or range): 99 * The goal is to synchronize the instruction stream, 100 * so you may beed to write-back dirty D-cache blocks 101 * first. If a range is requested, and you can't 102 * synchronize just a range, you have to hit the whole 103 * thing. 104 * 105 * D-cache Write-Back and Invalidate range: 106 * If you can't WB-Inv a range, you must WB-Inv the 107 * entire D-cache. 108 * 109 * D-cache Invalidate: 110 * If you can't Inv the D-cache, you must Write-Back 111 * and Invalidate. Code that uses this operation 112 * MUST NOT assume that the D-cache will not be written 113 * back to memory. 114 * 115 * D-cache Write-Back: 116 * If you can't Write-back without doing an Inv, 117 * that's fine. Then treat this as a WB-Inv. 118 * Skipping the invalidate is merely an optimization. 119 * 120 * All operations: 121 * Valid virtual addresses must be passed to each 122 * cache operation. 123 */ 124 void (*cf_icache_sync_all) __P((void)); 125 void (*cf_icache_sync_range) __P((vaddr_t, vsize_t)); 126 127 void (*cf_dcache_wbinv_all) __P((void)); 128 void (*cf_dcache_wbinv_range) __P((vaddr_t, vsize_t)); 129 void (*cf_dcache_inv_range) __P((vaddr_t, vsize_t)); 130 void (*cf_dcache_wb_range) __P((vaddr_t, vsize_t)); 131 132 void (*cf_idcache_wbinv_all) __P((void)); 133 void (*cf_idcache_wbinv_range) __P((vaddr_t, vsize_t)); 134 135 /* Other functions */ 136 137 void (*cf_flush_prefetchbuf) __P((void)); 138 void (*cf_drain_writebuf) __P((void)); 139 void (*cf_flush_brnchtgt_C) __P((void)); 140 void (*cf_flush_brnchtgt_E) __P((u_int)); 141 142 void (*cf_sleep) __P((int mode)); 143 144 /* Soft functions */ 145 146 int (*cf_dataabt_fixup) __P((void *)); 147 int (*cf_prefetchabt_fixup) __P((void *)); 148 149 void (*cf_context_switch) __P((void)); 150 151 void (*cf_setup) __P((char *)); 152 }; 153 154 extern struct cpu_functions cpufuncs; 155 extern u_int cputype; 156 157 #define cpu_id() cpufuncs.cf_id() 158 #define cpu_cpwait() cpufuncs.cf_cpwait() 159 160 #define cpu_control(c, e) cpufuncs.cf_control(c, e) 161 #define cpu_domains(d) cpufuncs.cf_domains(d) 162 #define cpu_setttb(t) cpufuncs.cf_setttb(t) 163 #define cpu_faultstatus() cpufuncs.cf_faultstatus() 164 #define cpu_faultaddress() cpufuncs.cf_faultaddress() 165 166 #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() 167 #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) 168 #define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI() 169 #define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e) 170 #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() 171 #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) 172 173 #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all() 174 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) 175 176 #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() 177 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) 178 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) 179 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) 180 181 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() 182 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) 183 184 #define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf() 185 #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() 186 #define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C() 187 #define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e) 188 189 #define cpu_sleep(m) cpufuncs.cf_sleep(m) 190 191 #define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a) 192 #define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a) 193 #define ABORT_FIXUP_OK 0 /* fixup succeeded */ 194 #define ABORT_FIXUP_FAILED 1 /* fixup failed */ 195 #define ABORT_FIXUP_RETURN 2 /* abort handler should return */ 196 197 #define cpu_setup(a) cpufuncs.cf_setup(a) 198 199 int set_cpufuncs __P((void)); 200 #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ 201 #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ 202 203 void cpufunc_nullop __P((void)); 204 int cpufunc_null_fixup __P((void *)); 205 int early_abort_fixup __P((void *)); 206 int late_abort_fixup __P((void *)); 207 u_int cpufunc_id __P((void)); 208 u_int cpufunc_control __P((u_int, u_int)); 209 void cpufunc_domains __P((u_int)); 210 u_int cpufunc_faultstatus __P((void)); 211 u_int cpufunc_faultaddress __P((void)); 212 213 #ifdef CPU_ARM3 214 u_int arm3_control __P((u_int, u_int)); 215 void arm3_cache_flush __P((void)); 216 #endif /* CPU_ARM3 */ 217 218 #if defined(CPU_ARM6) || defined(CPU_ARM7) 219 void arm67_setttb __P((u_int)); 220 void arm67_tlb_flush __P((void)); 221 void arm67_tlb_purge __P((u_int)); 222 void arm67_cache_flush __P((void)); 223 void arm67_context_switch __P((void)); 224 #endif /* CPU_ARM6 || CPU_ARM7 */ 225 226 #ifdef CPU_ARM6 227 void arm6_setup __P((char *)); 228 #endif /* CPU_ARM6 */ 229 230 #ifdef CPU_ARM7 231 void arm7_setup __P((char *)); 232 #endif /* CPU_ARM7 */ 233 234 #ifdef CPU_ARM7TDMI 235 int arm7_dataabt_fixup __P((void *)); 236 void arm7tdmi_setup __P((char *)); 237 void arm7tdmi_setttb __P((u_int)); 238 void arm7tdmi_tlb_flushID __P((void)); 239 void arm7tdmi_tlb_flushID_SE __P((u_int)); 240 void arm7tdmi_cache_flushID __P((void)); 241 void arm7tdmi_context_switch __P((void)); 242 #endif /* CPU_ARM7TDMI */ 243 244 #ifdef CPU_ARM8 245 void arm8_setttb __P((u_int)); 246 void arm8_tlb_flushID __P((void)); 247 void arm8_tlb_flushID_SE __P((u_int)); 248 void arm8_cache_flushID __P((void)); 249 void arm8_cache_flushID_E __P((u_int)); 250 void arm8_cache_cleanID __P((void)); 251 void arm8_cache_cleanID_E __P((u_int)); 252 void arm8_cache_purgeID __P((void)); 253 void arm8_cache_purgeID_E __P((u_int entry)); 254 255 void arm8_cache_syncI __P((void)); 256 void arm8_cache_cleanID_rng __P((vaddr_t, vsize_t)); 257 void arm8_cache_cleanD_rng __P((vaddr_t, vsize_t)); 258 void arm8_cache_purgeID_rng __P((vaddr_t, vsize_t)); 259 void arm8_cache_purgeD_rng __P((vaddr_t, vsize_t)); 260 void arm8_cache_syncI_rng __P((vaddr_t, vsize_t)); 261 262 void arm8_context_switch __P((void)); 263 264 void arm8_setup __P((char *)); 265 266 u_int arm8_clock_config __P((u_int, u_int)); 267 #endif 268 269 #ifdef CPU_SA110 270 void sa110_setup __P((char *)); 271 void sa110_context_switch __P((void)); 272 #endif /* CPU_SA110 */ 273 274 #if defined(CPU_SA1100) || defined(CPU_SA1110) 275 void sa11x0_drain_readbuf __P((void)); 276 277 void sa11x0_context_switch __P((void)); 278 void sa11x0_cpu_sleep __P((int)); 279 280 void sa11x0_setup __P((char *)); 281 #endif 282 283 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) 284 void sa1_setttb __P((u_int)); 285 286 void sa1_tlb_flushID_SE __P((u_int)); 287 288 void sa1_cache_flushID __P((void)); 289 void sa1_cache_flushI __P((void)); 290 void sa1_cache_flushD __P((void)); 291 void sa1_cache_flushD_SE __P((u_int)); 292 293 void sa1_cache_cleanID __P((void)); 294 void sa1_cache_cleanD __P((void)); 295 void sa1_cache_cleanD_E __P((u_int)); 296 297 void sa1_cache_purgeID __P((void)); 298 void sa1_cache_purgeID_E __P((u_int)); 299 void sa1_cache_purgeD __P((void)); 300 void sa1_cache_purgeD_E __P((u_int)); 301 302 void sa1_cache_syncI __P((void)); 303 void sa1_cache_cleanID_rng __P((vaddr_t, vsize_t)); 304 void sa1_cache_cleanD_rng __P((vaddr_t, vsize_t)); 305 void sa1_cache_purgeID_rng __P((vaddr_t, vsize_t)); 306 void sa1_cache_purgeD_rng __P((vaddr_t, vsize_t)); 307 void sa1_cache_syncI_rng __P((vaddr_t, vsize_t)); 308 309 #endif 310 311 #ifdef CPU_ARM9 312 void arm9_setttb __P((u_int)); 313 314 void arm9_tlb_flushID_SE __P((u_int)); 315 316 void arm9_icache_sync_all __P((void)); 317 void arm9_icache_sync_range __P((vaddr_t, vsize_t)); 318 319 void arm9_dcache_wbinv_all __P((void)); 320 void arm9_dcache_wbinv_range __P((vaddr_t, vsize_t)); 321 void arm9_dcache_inv_range __P((vaddr_t, vsize_t)); 322 void arm9_dcache_wb_range __P((vaddr_t, vsize_t)); 323 324 void arm9_idcache_wbinv_all __P((void)); 325 void arm9_idcache_wbinv_range __P((vaddr_t, vsize_t)); 326 327 void arm9_context_switch __P((void)); 328 329 void arm9_setup __P((char *)); 330 331 extern unsigned arm9_dcache_sets_max; 332 extern unsigned arm9_dcache_sets_inc; 333 extern unsigned arm9_dcache_index_max; 334 extern unsigned arm9_dcache_index_inc; 335 #endif 336 337 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 338 void arm10_tlb_flushID_SE __P((u_int)); 339 void arm10_tlb_flushI_SE __P((u_int)); 340 341 void arm10_context_switch __P((void)); 342 343 void arm10_setup __P((char *)); 344 #endif 345 346 #ifdef CPU_ARM11 347 void arm11_setttb __P((u_int)); 348 349 void arm11_tlb_flushID_SE __P((u_int)); 350 void arm11_tlb_flushI_SE __P((u_int)); 351 352 void arm11_context_switch __P((void)); 353 354 void arm11_setup __P((char *string)); 355 void arm11_tlb_flushID __P((void)); 356 void arm11_tlb_flushI __P((void)); 357 void arm11_tlb_flushD __P((void)); 358 void arm11_tlb_flushD_SE __P((u_int va)); 359 360 void arm11_drain_writebuf __P((void)); 361 #endif 362 363 #if defined(CPU_ARM9E) || defined (CPU_ARM10) 364 void armv5_ec_setttb __P((u_int)); 365 366 void armv5_ec_icache_sync_all __P((void)); 367 void armv5_ec_icache_sync_range __P((vaddr_t, vsize_t)); 368 369 void armv5_ec_dcache_wbinv_all __P((void)); 370 void armv5_ec_dcache_wbinv_range __P((vaddr_t, vsize_t)); 371 void armv5_ec_dcache_inv_range __P((vaddr_t, vsize_t)); 372 void armv5_ec_dcache_wb_range __P((vaddr_t, vsize_t)); 373 374 void armv5_ec_idcache_wbinv_all __P((void)); 375 void armv5_ec_idcache_wbinv_range __P((vaddr_t, vsize_t)); 376 #endif 377 378 #if defined (CPU_ARM10) || defined (CPU_ARM11) 379 void armv5_setttb __P((u_int)); 380 381 void armv5_icache_sync_all __P((void)); 382 void armv5_icache_sync_range __P((vaddr_t, vsize_t)); 383 384 void armv5_dcache_wbinv_all __P((void)); 385 void armv5_dcache_wbinv_range __P((vaddr_t, vsize_t)); 386 void armv5_dcache_inv_range __P((vaddr_t, vsize_t)); 387 void armv5_dcache_wb_range __P((vaddr_t, vsize_t)); 388 389 void armv5_idcache_wbinv_all __P((void)); 390 void armv5_idcache_wbinv_range __P((vaddr_t, vsize_t)); 391 392 extern unsigned armv5_dcache_sets_max; 393 extern unsigned armv5_dcache_sets_inc; 394 extern unsigned armv5_dcache_index_max; 395 extern unsigned armv5_dcache_index_inc; 396 #endif 397 398 #if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \ 399 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 400 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 401 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 402 403 void armv4_tlb_flushID __P((void)); 404 void armv4_tlb_flushI __P((void)); 405 void armv4_tlb_flushD __P((void)); 406 void armv4_tlb_flushD_SE __P((u_int)); 407 408 void armv4_drain_writebuf __P((void)); 409 #endif 410 411 #if defined(CPU_IXP12X0) 412 void ixp12x0_drain_readbuf __P((void)); 413 void ixp12x0_context_switch __P((void)); 414 void ixp12x0_setup __P((char *)); 415 #endif 416 417 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 418 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 419 void xscale_cpwait __P((void)); 420 421 void xscale_cpu_sleep __P((int)); 422 423 u_int xscale_control __P((u_int, u_int)); 424 425 void xscale_setttb __P((u_int)); 426 427 void xscale_tlb_flushID_SE __P((u_int)); 428 429 void xscale_cache_flushID __P((void)); 430 void xscale_cache_flushI __P((void)); 431 void xscale_cache_flushD __P((void)); 432 void xscale_cache_flushD_SE __P((u_int)); 433 434 void xscale_cache_cleanID __P((void)); 435 void xscale_cache_cleanD __P((void)); 436 void xscale_cache_cleanD_E __P((u_int)); 437 438 void xscale_cache_clean_minidata __P((void)); 439 440 void xscale_cache_purgeID __P((void)); 441 void xscale_cache_purgeID_E __P((u_int)); 442 void xscale_cache_purgeD __P((void)); 443 void xscale_cache_purgeD_E __P((u_int)); 444 445 void xscale_cache_syncI __P((void)); 446 void xscale_cache_cleanID_rng __P((vaddr_t, vsize_t)); 447 void xscale_cache_cleanD_rng __P((vaddr_t, vsize_t)); 448 void xscale_cache_purgeID_rng __P((vaddr_t, vsize_t)); 449 void xscale_cache_purgeD_rng __P((vaddr_t, vsize_t)); 450 void xscale_cache_syncI_rng __P((vaddr_t, vsize_t)); 451 void xscale_cache_flushD_rng __P((vaddr_t, vsize_t)); 452 453 void xscale_context_switch __P((void)); 454 455 void xscale_setup __P((char *)); 456 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 457 458 #define tlb_flush cpu_tlb_flushID 459 #define setttb cpu_setttb 460 #define drain_writebuf cpu_drain_writebuf 461 462 /* 463 * Macros for manipulating CPU interrupts 464 */ 465 #ifdef __PROG32 466 static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor) __attribute__((__unused__)); 467 468 static __inline u_int32_t 469 __set_cpsr_c(u_int bic, u_int eor) 470 { 471 u_int32_t tmp, ret; 472 473 __asm volatile( 474 "mrs %0, cpsr\n" /* Get the CPSR */ 475 "bic %1, %0, %2\n" /* Clear bits */ 476 "eor %1, %1, %3\n" /* XOR bits */ 477 "msr cpsr_c, %1\n" /* Set the control field of CPSR */ 478 : "=&r" (ret), "=&r" (tmp) 479 : "r" (bic), "r" (eor) : "memory"); 480 481 return ret; 482 } 483 484 #define disable_interrupts(mask) \ 485 (__set_cpsr_c((mask) & (I32_bit | F32_bit), \ 486 (mask) & (I32_bit | F32_bit))) 487 488 #define enable_interrupts(mask) \ 489 (__set_cpsr_c((mask) & (I32_bit | F32_bit), 0)) 490 491 #define restore_interrupts(old_cpsr) \ 492 (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit))) 493 #else /* ! __PROG32 */ 494 #define disable_interrupts(mask) \ 495 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), \ 496 (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE))) 497 498 #define enable_interrupts(mask) \ 499 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0)) 500 501 #define restore_interrupts(old_r15) \ 502 (set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE), \ 503 (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE))) 504 #endif /* __PROG32 */ 505 506 #ifdef __PROG32 507 /* Functions to manipulate the CPSR. */ 508 u_int SetCPSR(u_int, u_int); 509 u_int GetCPSR(void); 510 #else 511 /* Functions to manipulate the processor control bits in r15. */ 512 u_int set_r15(u_int, u_int); 513 u_int get_r15(void); 514 #endif /* __PROG32 */ 515 516 /* 517 * Functions to manipulate cpu r13 518 * (in arm/arm32/setstack.S) 519 */ 520 521 void set_stackptr __P((u_int, u_int)); 522 u_int get_stackptr __P((u_int)); 523 524 /* 525 * Miscellany 526 */ 527 528 int get_pc_str_offset __P((void)); 529 530 /* 531 * CPU functions from locore.S 532 */ 533 534 void cpu_reset __P((void)) __attribute__((__noreturn__)); 535 536 /* 537 * Cache info variables. 538 */ 539 540 /* PRIMARY CACHE VARIABLES */ 541 extern int arm_picache_size; 542 extern int arm_picache_line_size; 543 extern int arm_picache_ways; 544 545 extern int arm_pdcache_size; /* and unified */ 546 extern int arm_pdcache_line_size; 547 extern int arm_pdcache_ways; 548 549 extern int arm_pcache_type; 550 extern int arm_pcache_unified; 551 552 extern int arm_dcache_align; 553 extern int arm_dcache_align_mask; 554 555 #endif /* _KERNEL */ 556 #endif /* _ARM32_CPUFUNC_H_ */ 557 558 /* End of cpufunc.h */ 559