Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 1 | /* SPARC I/O definitions |
| 2 | * |
Francois Retief | ff6929d | 2015-10-28 10:35:12 +0200 | [diff] [blame] | 3 | * (C) Copyright 2007, 2015 |
| 4 | * Daniel Hellstrom, Cobham Gaisler, daniel@gaisler.com. |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 5 | * |
Wolfgang Denk | d79de1d | 2013-07-08 09:37:19 +0200 | [diff] [blame] | 6 | * SPDX-License-Identifier: GPL-2.0+ |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #ifndef _SPARC_IO_H |
| 10 | #define _SPARC_IO_H |
| 11 | |
| 12 | /* Nothing to sync, total store ordering (TSO)... */ |
| 13 | #define sync() |
| 14 | |
Francois Retief | ff6929d | 2015-10-28 10:35:12 +0200 | [diff] [blame] | 15 | /* |
| 16 | * Generic virtual read/write. |
| 17 | */ |
| 18 | |
| 19 | #ifndef CONFIG_SYS_HAS_NO_CACHE |
| 20 | |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 21 | /* Forces a cache miss on read/load. |
| 22 | * On some architectures we need to bypass the cache when reading |
| 23 | * I/O registers so that we are not reading the same status word |
| 24 | * over and over again resulting in a hang (until an IRQ if lucky) |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 25 | */ |
Francois Retief | ff6929d | 2015-10-28 10:35:12 +0200 | [diff] [blame] | 26 | |
| 27 | #define __arch_getb(a) SPARC_NOCACHE_READ_BYTE((unsigned int)(a)) |
| 28 | #define __arch_getw(a) SPARC_NOCACHE_READ_HWORD((unsigned int)(a)) |
| 29 | #define __arch_getl(a) SPARC_NOCACHE_READ((unsigned int)(a)) |
| 30 | #define __arch_getq(a) SPARC_NOCACHE_READ_DWORD((unsigned int)(a)) |
| 31 | |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 32 | #else |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 33 | |
Francois Retief | ff6929d | 2015-10-28 10:35:12 +0200 | [diff] [blame] | 34 | #define __arch_getb(a) (*(volatile unsigned char *)(a)) |
| 35 | #define __arch_getw(a) (*(volatile unsigned short *)(a)) |
| 36 | #define __arch_getl(a) (*(volatile unsigned int *)(a)) |
| 37 | #define __arch_getq(a) (*(volatile unsigned long long *)(a)) |
| 38 | |
| 39 | #endif /* CONFIG_SYS_HAS_NO_CACHE */ |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 40 | |
Francois Retief | ff6929d | 2015-10-28 10:35:12 +0200 | [diff] [blame] | 41 | #define __arch_putb(v, a) (*(volatile unsigned char *)(a) = (v)) |
| 42 | #define __arch_putw(v, a) (*(volatile unsigned short *)(a) = (v)) |
| 43 | #define __arch_putl(v, a) (*(volatile unsigned int *)(a) = (v)) |
| 44 | #define __arch_putq(v, a) (*(volatile unsigned long long *)(a) = (v)) |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 45 | |
Francois Retief | ff6929d | 2015-10-28 10:35:12 +0200 | [diff] [blame] | 46 | #define __raw_writeb(v, a) __arch_putb(v, a) |
| 47 | #define __raw_writew(v, a) __arch_putw(v, a) |
| 48 | #define __raw_writel(v, a) __arch_putl(v, a) |
| 49 | #define __raw_writeq(v, a) __arch_putq(v, a) |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 50 | |
| 51 | #define __raw_readb(a) __arch_getb(a) |
| 52 | #define __raw_readw(a) __arch_getw(a) |
| 53 | #define __raw_readl(a) __arch_getl(a) |
| 54 | #define __raw_readq(a) __arch_getq(a) |
| 55 | |
Francois Retief | ff6929d | 2015-10-28 10:35:12 +0200 | [diff] [blame] | 56 | #define writeb __raw_writeb |
| 57 | #define writew __raw_writew |
| 58 | #define writel __raw_writel |
| 59 | #define writeq __raw_writeq |
| 60 | |
| 61 | #define readb __raw_readb |
| 62 | #define readw __raw_readw |
| 63 | #define readl __raw_readl |
| 64 | #define readq __raw_readq |
| 65 | |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 66 | /* |
| 67 | * Given a physical address and a length, return a virtual address |
| 68 | * that can be used to access the memory range with the caching |
| 69 | * properties specified by "flags". |
| 70 | */ |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 71 | |
| 72 | #define MAP_NOCACHE (0) |
| 73 | #define MAP_WRCOMBINE (0) |
| 74 | #define MAP_WRBACK (0) |
| 75 | #define MAP_WRTHROUGH (0) |
| 76 | |
| 77 | static inline void *map_physmem(phys_addr_t paddr, unsigned long len, |
| 78 | unsigned long flags) |
| 79 | { |
| 80 | return (void *)paddr; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Take down a mapping set up by map_physmem(). |
| 85 | */ |
| 86 | static inline void unmap_physmem(void *vaddr, unsigned long flags) |
| 87 | { |
| 88 | |
| 89 | } |
| 90 | |
Kumar Gala | 9364a67 | 2008-12-13 17:20:27 -0600 | [diff] [blame] | 91 | static inline phys_addr_t virt_to_phys(void * vaddr) |
| 92 | { |
| 93 | return (phys_addr_t)(vaddr); |
| 94 | } |
| 95 | |
Daniel Hellstrom | 9d7c6b2 | 2008-03-28 09:47:00 +0100 | [diff] [blame] | 96 | #endif |