Haavard Skinnemoen | c6f292f | 2010-08-12 13:52:54 +0700 | [diff] [blame] | 1 | /* |
| 2 | * In order to deal with the hardcoded u-boot requirement that virtual |
| 3 | * addresses are always mapped 1:1 with physical addresses, we implement |
| 4 | * a small virtual memory manager so that we can use the MMU hardware in |
| 5 | * order to get the caching properties right. |
| 6 | * |
| 7 | * A few pages (or possibly just one) are locked in the TLB permanently |
| 8 | * in order to avoid recursive TLB misses, but most pages are faulted in |
| 9 | * on demand. |
| 10 | */ |
| 11 | #ifndef __ASM_ARCH_MMU_H |
| 12 | #define __ASM_ARCH_MMU_H |
| 13 | |
| 14 | #include <asm/sysreg.h> |
| 15 | |
| 16 | #define PAGE_SHIFT 20 |
| 17 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
| 18 | #define PAGE_ADDR_MASK (~(PAGE_SIZE - 1)) |
| 19 | |
| 20 | #define MMU_VMR_CACHE_NONE \ |
| 21 | (SYSREG_BF(AP, 3) | SYSREG_BF(SZ, 3) | SYSREG_BIT(TLBELO_D)) |
| 22 | #define MMU_VMR_CACHE_WBUF \ |
| 23 | (MMU_VMR_CACHE_NONE | SYSREG_BIT(B)) |
| 24 | #define MMU_VMR_CACHE_WRTHRU \ |
| 25 | (MMU_VMR_CACHE_NONE | SYSREG_BIT(TLBELO_C) | SYSREG_BIT(W)) |
| 26 | #define MMU_VMR_CACHE_WRBACK \ |
| 27 | (MMU_VMR_CACHE_WBUF | SYSREG_BIT(TLBELO_C)) |
| 28 | |
| 29 | /* |
| 30 | * This structure is used in our "page table". Instead of the usual |
| 31 | * x86-inspired radix tree, we let each entry cover an arbitrary-sized |
| 32 | * virtual address range and store them in a binary search tree. This is |
| 33 | * somewhat slower, but should use significantly less RAM, and we |
| 34 | * shouldn't get many TLB misses when using 1 MB pages anyway. |
| 35 | * |
| 36 | * With 1 MB pages, we need 12 bits to store the page number. In |
| 37 | * addition, we stick an Invalid bit in the high bit of virt_pgno (if |
| 38 | * set, it cannot possibly match any faulting page), and all the bits |
| 39 | * that need to be written to TLBELO in phys_pgno. |
| 40 | */ |
| 41 | struct mmu_vm_range { |
| 42 | uint16_t virt_pgno; |
| 43 | uint16_t nr_pages; |
| 44 | uint32_t phys; |
| 45 | }; |
| 46 | |
| 47 | /* |
| 48 | * An array of mmu_vm_range objects describing all pageable addresses. |
| 49 | * The array is sorted by virt_pgno so that the TLB miss exception |
| 50 | * handler can do a binary search to find the correct entry. |
| 51 | */ |
| 52 | extern struct mmu_vm_range mmu_vmr_table[]; |
| 53 | |
| 54 | /* |
| 55 | * Initialize the MMU. This will set up a fixed TLB entry for the static |
| 56 | * u-boot image at dest_addr and enable paging. |
| 57 | */ |
| 58 | void mmu_init_r(unsigned long dest_addr); |
| 59 | |
| 60 | /* |
| 61 | * Handle a TLB miss exception. This function is called directly from |
| 62 | * the exception vector table written in assembly. |
| 63 | */ |
| 64 | int mmu_handle_tlb_miss(void); |
| 65 | |
| 66 | #endif /* __ASM_ARCH_MMU_H */ |