Home | History | Annotate | Download | only in asm
      1 /* SPDX-License-Identifier: GPL-2.0 */
      2 /*
      3  * Copyright (C) 1994, 1995 Waldorf GmbH
      4  * Copyright (C) 1994 - 2000, 06 Ralf Baechle
      5  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
      6  * Copyright (C) 2004, 2005  MIPS Technologies, Inc.  All rights reserved.
      7  *	Author: Maciej W. Rozycki <macro (at) mips.com>
      8  */
      9 #ifndef _ASM_IO_H
     10 #define _ASM_IO_H
     11 
     12 #include <linux/bug.h>
     13 #include <linux/compiler.h>
     14 #include <linux/types.h>
     15 
     16 #include <asm/addrspace.h>
     17 #include <asm/byteorder.h>
     18 #include <asm/cpu-features.h>
     19 #include <asm/pgtable-bits.h>
     20 #include <asm/processor.h>
     21 #include <asm/string.h>
     22 
     23 #include <ioremap.h>
     24 #include <mangle-port.h>
     25 #include <spaces.h>
     26 
     27 /*
     28  * Raw operations are never swapped in software.  OTOH values that raw
     29  * operations are working on may or may not have been swapped by the bus
     30  * hardware.  An example use would be for flash memory that's used for
     31  * execute in place.
     32  */
     33 # define __raw_ioswabb(a, x)	(x)
     34 # define __raw_ioswabw(a, x)	(x)
     35 # define __raw_ioswabl(a, x)	(x)
     36 # define __raw_ioswabq(a, x)	(x)
     37 # define ____raw_ioswabq(a, x)	(x)
     38 
     39 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
     40 
     41 #define IO_SPACE_LIMIT 0xffff
     42 
     43 #ifdef CONFIG_DYNAMIC_IO_PORT_BASE
     44 
     45 static inline ulong mips_io_port_base(void)
     46 {
     47 	DECLARE_GLOBAL_DATA_PTR;
     48 
     49 	return gd->arch.io_port_base;
     50 }
     51 
     52 static inline void set_io_port_base(unsigned long base)
     53 {
     54 	DECLARE_GLOBAL_DATA_PTR;
     55 
     56 	gd->arch.io_port_base = base;
     57 	barrier();
     58 }
     59 
     60 #else /* !CONFIG_DYNAMIC_IO_PORT_BASE */
     61 
     62 static inline ulong mips_io_port_base(void)
     63 {
     64 	return 0;
     65 }
     66 
     67 static inline void set_io_port_base(unsigned long base)
     68 {
     69 	BUG_ON(base);
     70 }
     71 
     72 #endif /* !CONFIG_DYNAMIC_IO_PORT_BASE */
     73 
     74 /*
     75  *     virt_to_phys    -       map virtual addresses to physical
     76  *     @address: address to remap
     77  *
     78  *     The returned physical address is the physical (CPU) mapping for
     79  *     the memory address given. It is only valid to use this function on
     80  *     addresses directly mapped or allocated via kmalloc.
     81  *
     82  *     This function does not give bus mappings for DMA transfers. In
     83  *     almost all conceivable cases a device driver should not be using
     84  *     this function
     85  */
     86 static inline unsigned long virt_to_phys(volatile const void *address)
     87 {
     88 	unsigned long addr = (unsigned long)address;
     89 
     90 	/* this corresponds to kernel implementation of __pa() */
     91 #ifdef CONFIG_64BIT
     92 	if (addr < CKSEG0)
     93 		return XPHYSADDR(addr);
     94 #endif
     95 	return CPHYSADDR(addr);
     96 }
     97 #define virt_to_phys virt_to_phys
     98 
     99 /*
    100  *     phys_to_virt    -       map physical address to virtual
    101  *     @address: address to remap
    102  *
    103  *     The returned virtual address is a current CPU mapping for
    104  *     the memory address given. It is only valid to use this function on
    105  *     addresses that have a kernel mapping
    106  *
    107  *     This function does not handle bus mappings for DMA transfers. In
    108  *     almost all conceivable cases a device driver should not be using
    109  *     this function
    110  */
    111 static inline void *phys_to_virt(unsigned long address)
    112 {
    113 	return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
    114 }
    115 #define phys_to_virt phys_to_virt
    116 
    117 /*
    118  * ISA I/O bus memory addresses are 1:1 with the physical address.
    119  */
    120 static inline unsigned long isa_virt_to_bus(volatile void *address)
    121 {
    122 	return (unsigned long)address - PAGE_OFFSET;
    123 }
    124 
    125 static inline void *isa_bus_to_virt(unsigned long address)
    126 {
    127 	return (void *)(address + PAGE_OFFSET);
    128 }
    129 
    130 #define isa_page_to_bus page_to_phys
    131 
    132 /*
    133  * However PCI ones are not necessarily 1:1 and therefore these interfaces
    134  * are forbidden in portable PCI drivers.
    135  *
    136  * Allow them for x86 for legacy drivers, though.
    137  */
    138 #define virt_to_bus virt_to_phys
    139 #define bus_to_virt phys_to_virt
    140 
    141 static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size,
    142 	unsigned long flags)
    143 {
    144 	void __iomem *addr;
    145 	phys_addr_t phys_addr;
    146 
    147 	addr = plat_ioremap(offset, size, flags);
    148 	if (addr)
    149 		return addr;
    150 
    151 	phys_addr = fixup_bigphys_addr(offset, size);
    152 	return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr);
    153 }
    154 
    155 /*
    156  * ioremap     -   map bus memory into CPU space
    157  * @offset:    bus address of the memory
    158  * @size:      size of the resource to map
    159  *
    160  * ioremap performs a platform specific sequence of operations to
    161  * make bus memory CPU accessible via the readb/readw/readl/writeb/
    162  * writew/writel functions and the other mmio helpers. The returned
    163  * address is not guaranteed to be usable directly as a virtual
    164  * address.
    165  */
    166 #define ioremap(offset, size)						\
    167 	__ioremap_mode((offset), (size), _CACHE_UNCACHED)
    168 
    169 /*
    170  * ioremap_nocache     -   map bus memory into CPU space
    171  * @offset:    bus address of the memory
    172  * @size:      size of the resource to map
    173  *
    174  * ioremap_nocache performs a platform specific sequence of operations to
    175  * make bus memory CPU accessible via the readb/readw/readl/writeb/
    176  * writew/writel functions and the other mmio helpers. The returned
    177  * address is not guaranteed to be usable directly as a virtual
    178  * address.
    179  *
    180  * This version of ioremap ensures that the memory is marked uncachable
    181  * on the CPU as well as honouring existing caching rules from things like
    182  * the PCI bus. Note that there are other caches and buffers on many
    183  * busses. In particular driver authors should read up on PCI writes
    184  *
    185  * It's useful if some control registers are in such an area and
    186  * write combining or read caching is not desirable:
    187  */
    188 #define ioremap_nocache(offset, size)					\
    189 	__ioremap_mode((offset), (size), _CACHE_UNCACHED)
    190 #define ioremap_uc ioremap_nocache
    191 
    192 /*
    193  * ioremap_cachable -	map bus memory into CPU space
    194  * @offset:	    bus address of the memory
    195  * @size:	    size of the resource to map
    196  *
    197  * ioremap_nocache performs a platform specific sequence of operations to
    198  * make bus memory CPU accessible via the readb/readw/readl/writeb/
    199  * writew/writel functions and the other mmio helpers. The returned
    200  * address is not guaranteed to be usable directly as a virtual
    201  * address.
    202  *
    203  * This version of ioremap ensures that the memory is marked cachable by
    204  * the CPU.  Also enables full write-combining.	 Useful for some
    205  * memory-like regions on I/O busses.
    206  */
    207 #define ioremap_cachable(offset, size)					\
    208 	__ioremap_mode((offset), (size), _page_cachable_default)
    209 
    210 /*
    211  * These two are MIPS specific ioremap variant.	 ioremap_cacheable_cow
    212  * requests a cachable mapping, ioremap_uncached_accelerated requests a
    213  * mapping using the uncached accelerated mode which isn't supported on
    214  * all processors.
    215  */
    216 #define ioremap_cacheable_cow(offset, size)				\
    217 	__ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
    218 #define ioremap_uncached_accelerated(offset, size)			\
    219 	__ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
    220 
    221 static inline void iounmap(const volatile void __iomem *addr)
    222 {
    223 	plat_iounmap(addr);
    224 }
    225 
    226 #ifdef CONFIG_CPU_CAVIUM_OCTEON
    227 #define war_octeon_io_reorder_wmb()		wmb()
    228 #else
    229 #define war_octeon_io_reorder_wmb()		do { } while (0)
    230 #endif
    231 
    232 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq)			\
    233 									\
    234 static inline void pfx##write##bwlq(type val,				\
    235 				    volatile void __iomem *mem)		\
    236 {									\
    237 	volatile type *__mem;						\
    238 	type __val;							\
    239 									\
    240 	war_octeon_io_reorder_wmb();					\
    241 									\
    242 	__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));	\
    243 									\
    244 	__val = pfx##ioswab##bwlq(__mem, val);				\
    245 									\
    246 	if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
    247 		*__mem = __val;						\
    248 	else if (cpu_has_64bits) {					\
    249 		type __tmp;						\
    250 									\
    251 		__asm__ __volatile__(					\
    252 			".set	arch=r4000"	"\t\t# __writeq""\n\t"	\
    253 			"dsll32 %L0, %L0, 0"			"\n\t"	\
    254 			"dsrl32 %L0, %L0, 0"			"\n\t"	\
    255 			"dsll32 %M0, %M0, 0"			"\n\t"	\
    256 			"or	%L0, %L0, %M0"			"\n\t"	\
    257 			"sd	%L0, %2"			"\n\t"	\
    258 			".set	mips0"				"\n"	\
    259 			: "=r" (__tmp)					\
    260 			: "0" (__val), "m" (*__mem));			\
    261 	} else								\
    262 		BUG();							\
    263 }									\
    264 									\
    265 static inline type pfx##read##bwlq(const volatile void __iomem *mem)	\
    266 {									\
    267 	volatile type *__mem;						\
    268 	type __val;							\
    269 									\
    270 	__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));	\
    271 									\
    272 	if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
    273 		__val = *__mem;						\
    274 	else if (cpu_has_64bits) {					\
    275 		__asm__ __volatile__(					\
    276 			".set	arch=r4000"	"\t\t# __readq" "\n\t"	\
    277 			"ld	%L0, %1"			"\n\t"	\
    278 			"dsra32 %M0, %L0, 0"			"\n\t"	\
    279 			"sll	%L0, %L0, 0"			"\n\t"	\
    280 			".set	mips0"				"\n"	\
    281 			: "=r" (__val)					\
    282 			: "m" (*__mem));				\
    283 	} else {							\
    284 		__val = 0;						\
    285 		BUG();							\
    286 	}								\
    287 									\
    288 	return pfx##ioswab##bwlq(__mem, __val);				\
    289 }
    290 
    291 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p)			\
    292 									\
    293 static inline void pfx##out##bwlq##p(type val, unsigned long port)	\
    294 {									\
    295 	volatile type *__addr;						\
    296 	type __val;							\
    297 									\
    298 	war_octeon_io_reorder_wmb();					\
    299 									\
    300 	__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \
    301 									\
    302 	__val = pfx##ioswab##bwlq(__addr, val);				\
    303 									\
    304 	/* Really, we want this to be atomic */				\
    305 	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
    306 									\
    307 	*__addr = __val;						\
    308 }									\
    309 									\
    310 static inline type pfx##in##bwlq##p(unsigned long port)			\
    311 {									\
    312 	volatile type *__addr;						\
    313 	type __val;							\
    314 									\
    315 	__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \
    316 									\
    317 	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
    318 									\
    319 	__val = *__addr;						\
    320 									\
    321 	return pfx##ioswab##bwlq(__addr, __val);			\
    322 }
    323 
    324 #define __BUILD_MEMORY_PFX(bus, bwlq, type)				\
    325 									\
    326 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
    327 
    328 #define BUILDIO_MEM(bwlq, type)						\
    329 									\
    330 __BUILD_MEMORY_PFX(__raw_, bwlq, type)					\
    331 __BUILD_MEMORY_PFX(, bwlq, type)					\
    332 __BUILD_MEMORY_PFX(__mem_, bwlq, type)					\
    333 
    334 BUILDIO_MEM(b, u8)
    335 BUILDIO_MEM(w, u16)
    336 BUILDIO_MEM(l, u32)
    337 BUILDIO_MEM(q, u64)
    338 
    339 #define __BUILD_IOPORT_PFX(bus, bwlq, type)				\
    340 	__BUILD_IOPORT_SINGLE(bus, bwlq, type, )			\
    341 	__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p)
    342 
    343 #define BUILDIO_IOPORT(bwlq, type)					\
    344 	__BUILD_IOPORT_PFX(, bwlq, type)				\
    345 	__BUILD_IOPORT_PFX(__mem_, bwlq, type)
    346 
    347 BUILDIO_IOPORT(b, u8)
    348 BUILDIO_IOPORT(w, u16)
    349 BUILDIO_IOPORT(l, u32)
    350 #ifdef CONFIG_64BIT
    351 BUILDIO_IOPORT(q, u64)
    352 #endif
    353 
    354 #define __BUILDIO(bwlq, type)						\
    355 									\
    356 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
    357 
    358 __BUILDIO(q, u64)
    359 
    360 #define readb_relaxed			readb
    361 #define readw_relaxed			readw
    362 #define readl_relaxed			readl
    363 #define readq_relaxed			readq
    364 
    365 #define writeb_relaxed			writeb
    366 #define writew_relaxed			writew
    367 #define writel_relaxed			writel
    368 #define writeq_relaxed			writeq
    369 
    370 #define readb_be(addr)							\
    371 	__raw_readb((__force unsigned *)(addr))
    372 #define readw_be(addr)							\
    373 	be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
    374 #define readl_be(addr)							\
    375 	be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
    376 #define readq_be(addr)							\
    377 	be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
    378 
    379 #define writeb_be(val, addr)						\
    380 	__raw_writeb((val), (__force unsigned *)(addr))
    381 #define writew_be(val, addr)						\
    382 	__raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
    383 #define writel_be(val, addr)						\
    384 	__raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
    385 #define writeq_be(val, addr)						\
    386 	__raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
    387 
    388 /*
    389  * Some code tests for these symbols
    390  */
    391 #define readq				readq
    392 #define writeq				writeq
    393 
    394 #define __BUILD_MEMORY_STRING(bwlq, type)				\
    395 									\
    396 static inline void writes##bwlq(volatile void __iomem *mem,		\
    397 				const void *addr, unsigned int count)	\
    398 {									\
    399 	const volatile type *__addr = addr;				\
    400 									\
    401 	while (count--) {						\
    402 		__mem_write##bwlq(*__addr, mem);			\
    403 		__addr++;						\
    404 	}								\
    405 }									\
    406 									\
    407 static inline void reads##bwlq(volatile void __iomem *mem, void *addr,	\
    408 			       unsigned int count)			\
    409 {									\
    410 	volatile type *__addr = addr;					\
    411 									\
    412 	while (count--) {						\
    413 		*__addr = __mem_read##bwlq(mem);			\
    414 		__addr++;						\
    415 	}								\
    416 }
    417 
    418 #define __BUILD_IOPORT_STRING(bwlq, type)				\
    419 									\
    420 static inline void outs##bwlq(unsigned long port, const void *addr,	\
    421 			      unsigned int count)			\
    422 {									\
    423 	const volatile type *__addr = addr;				\
    424 									\
    425 	while (count--) {						\
    426 		__mem_out##bwlq(*__addr, port);				\
    427 		__addr++;						\
    428 	}								\
    429 }									\
    430 									\
    431 static inline void ins##bwlq(unsigned long port, void *addr,		\
    432 			     unsigned int count)			\
    433 {									\
    434 	volatile type *__addr = addr;					\
    435 									\
    436 	while (count--) {						\
    437 		*__addr = __mem_in##bwlq(port);				\
    438 		__addr++;						\
    439 	}								\
    440 }
    441 
    442 #define BUILDSTRING(bwlq, type)						\
    443 									\
    444 __BUILD_MEMORY_STRING(bwlq, type)					\
    445 __BUILD_IOPORT_STRING(bwlq, type)
    446 
    447 BUILDSTRING(b, u8)
    448 BUILDSTRING(w, u16)
    449 BUILDSTRING(l, u32)
    450 #ifdef CONFIG_64BIT
    451 BUILDSTRING(q, u64)
    452 #endif
    453 
    454 
    455 #ifdef CONFIG_CPU_CAVIUM_OCTEON
    456 #define mmiowb() wmb()
    457 #else
    458 /* Depends on MIPS II instruction set */
    459 #define mmiowb() asm volatile ("sync" ::: "memory")
    460 #endif
    461 
    462 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
    463 {
    464 	memset((void __force *)addr, val, count);
    465 }
    466 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
    467 {
    468 	memcpy(dst, (void __force *)src, count);
    469 }
    470 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
    471 {
    472 	memcpy((void __force *)dst, src, count);
    473 }
    474 
    475 /*
    476  * Read a 32-bit register that requires a 64-bit read cycle on the bus.
    477  * Avoid interrupt mucking, just adjust the address for 4-byte access.
    478  * Assume the addresses are 8-byte aligned.
    479  */
    480 #ifdef __MIPSEB__
    481 #define __CSR_32_ADJUST 4
    482 #else
    483 #define __CSR_32_ADJUST 0
    484 #endif
    485 
    486 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
    487 #define csr_in32(a)    (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
    488 
    489 /*
    490  * U-Boot specific
    491  */
    492 #define sync()		mmiowb()
    493 
    494 #define MAP_NOCACHE	1
    495 
    496 static inline void *
    497 map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags)
    498 {
    499 	if (flags == MAP_NOCACHE)
    500 		return ioremap(paddr, len);
    501 
    502 	return (void *)CKSEG0ADDR(paddr);
    503 }
    504 #define map_physmem map_physmem
    505 
    506 #define __BUILD_CLRBITS(bwlq, sfx, end, type)				\
    507 									\
    508 static inline void clrbits_##sfx(volatile void __iomem *mem, type clr)	\
    509 {									\
    510 	type __val = __raw_read##bwlq(mem);				\
    511 	__val = end##_to_cpu(__val);					\
    512 	__val &= ~clr;							\
    513 	__val = cpu_to_##end(__val);					\
    514 	__raw_write##bwlq(__val, mem);					\
    515 }
    516 
    517 #define __BUILD_SETBITS(bwlq, sfx, end, type)				\
    518 									\
    519 static inline void setbits_##sfx(volatile void __iomem *mem, type set)	\
    520 {									\
    521 	type __val = __raw_read##bwlq(mem);				\
    522 	__val = end##_to_cpu(__val);					\
    523 	__val |= set;							\
    524 	__val = cpu_to_##end(__val);					\
    525 	__raw_write##bwlq(__val, mem);					\
    526 }
    527 
    528 #define __BUILD_CLRSETBITS(bwlq, sfx, end, type)			\
    529 									\
    530 static inline void clrsetbits_##sfx(volatile void __iomem *mem,		\
    531 					type clr, type set)		\
    532 {									\
    533 	type __val = __raw_read##bwlq(mem);				\
    534 	__val = end##_to_cpu(__val);					\
    535 	__val &= ~clr;							\
    536 	__val |= set;							\
    537 	__val = cpu_to_##end(__val);					\
    538 	__raw_write##bwlq(__val, mem);					\
    539 }
    540 
    541 #define BUILD_CLRSETBITS(bwlq, sfx, end, type)				\
    542 									\
    543 __BUILD_CLRBITS(bwlq, sfx, end, type)					\
    544 __BUILD_SETBITS(bwlq, sfx, end, type)					\
    545 __BUILD_CLRSETBITS(bwlq, sfx, end, type)
    546 
    547 #define __to_cpu(v)		(v)
    548 #define cpu_to__(v)		(v)
    549 
    550 BUILD_CLRSETBITS(b, 8, _, u8)
    551 BUILD_CLRSETBITS(w, le16, le16, u16)
    552 BUILD_CLRSETBITS(w, be16, be16, u16)
    553 BUILD_CLRSETBITS(w, 16, _, u16)
    554 BUILD_CLRSETBITS(l, le32, le32, u32)
    555 BUILD_CLRSETBITS(l, be32, be32, u32)
    556 BUILD_CLRSETBITS(l, 32, _, u32)
    557 BUILD_CLRSETBITS(q, le64, le64, u64)
    558 BUILD_CLRSETBITS(q, be64, be64, u64)
    559 BUILD_CLRSETBITS(q, 64, _, u64)
    560 
    561 #include <asm-generic/io.h>
    562 
    563 #endif /* _ASM_IO_H */
    564