developer | 2fddd72 | 2022-05-20 11:22:21 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2022 MediaTek Inc. All rights reserved. |
| 4 | * |
| 5 | * Author: Weijie Gao <weijie.gao@mediatek.com> |
| 6 | */ |
| 7 | |
| 8 | #include <image.h> |
| 9 | #include <asm/system.h> |
| 10 | #include <asm/sections.h> |
| 11 | #include <asm/cacheops.h> |
| 12 | #include <asm/mipsregs.h> |
| 13 | #include <asm/cm.h> |
| 14 | |
| 15 | #define INDEX_STORE_DATA_SD 0x0f |
| 16 | |
| 17 | typedef void __noreturn (*image_entry_noargs_t)(void); |
| 18 | |
| 19 | /* |
| 20 | * Lock L2 cache and fill data |
| 21 | * Assume that data is 4-byte aligned and start_addr/size is 32-byte aligned |
| 22 | */ |
| 23 | static void fill_lock_l2cache(uintptr_t dataptr, ulong start_addr, ulong size) |
| 24 | { |
| 25 | ulong slsize = CONFIG_SYS_DCACHE_LINE_SIZE; |
| 26 | ulong end_addr = start_addr + size; |
| 27 | const u32 *data = (u32 *)dataptr; |
| 28 | ulong i, addr; |
| 29 | u32 val; |
| 30 | |
| 31 | /* Clear WSC & SPR bit in ErrCtl */ |
| 32 | val = read_c0_ecc(); |
| 33 | val &= 0xcfffffff; |
| 34 | write_c0_ecc(val); |
| 35 | execution_hazard_barrier(); |
| 36 | |
| 37 | for (addr = start_addr; addr < end_addr; addr += slsize) { |
| 38 | /* Set STagLo to lock cache line */ |
| 39 | write_c0_staglo((addr & 0x1ffff800) | 0xa0); |
| 40 | mips_cache(INDEX_STORE_TAG_SD, (void *)addr); |
| 41 | |
| 42 | /* Fill data */ |
| 43 | for (i = 0; i < slsize; i += 8) { |
| 44 | val = *data++; |
| 45 | __write_32bit_c0_register($28, 5, val); /* sdtaglo */ |
| 46 | val = *data++; |
| 47 | __write_32bit_c0_register($29, 5, val); /* sdtaghi */ |
| 48 | mips_cache(INDEX_STORE_DATA_SD, (void *)(addr + i)); |
| 49 | } |
| 50 | } |
| 51 | |
| 52 | sync(); |
| 53 | } |
| 54 | |
| 55 | /* A simple function to initialize MT7621's cache */ |
| 56 | static void mt7621_cache_init(void) |
| 57 | { |
| 58 | void __iomem *cm_base = (void *)KSEG1ADDR(CONFIG_MIPS_CM_BASE); |
| 59 | ulong lsize = CONFIG_SYS_DCACHE_LINE_SIZE; |
| 60 | ulong addr; |
| 61 | u32 val; |
| 62 | |
| 63 | /* Enable CCA override. Set to uncached */ |
| 64 | val = readl(cm_base + GCR_BASE); |
| 65 | val &= ~CCA_DEFAULT_OVR_MASK; |
| 66 | val |= CCA_DEFAULT_OVREN | (2 << CCA_DEFAULT_OVR_SHIFT); |
| 67 | writel(val, cm_base + GCR_BASE); |
| 68 | |
| 69 | /* Initialize L1 I-Cache */ |
| 70 | write_c0_taglo(0); |
| 71 | write_c0_taghi(0); |
| 72 | |
| 73 | for (addr = 0; addr < CONFIG_SYS_ICACHE_SIZE; addr += lsize) |
| 74 | mips_cache(INDEX_STORE_TAG_I, (void *)addr); |
| 75 | |
| 76 | /* Initialize L1 D-Cache */ |
| 77 | write_c0_dtaglo(0); |
| 78 | __write_32bit_c0_register($29, 2, 0); /* dtaghi */ |
| 79 | |
| 80 | for (addr = 0; addr < CONFIG_SYS_DCACHE_SIZE; addr += lsize) |
| 81 | mips_cache(INDEX_STORE_TAG_D, (void *)addr); |
| 82 | |
| 83 | /* Initialize L2 Cache */ |
| 84 | write_c0_staglo(0); |
| 85 | __write_32bit_c0_register($29, 4, 0); /* staghi */ |
| 86 | |
| 87 | for (addr = 0; addr < (256 << 10); addr += lsize) |
| 88 | mips_cache(INDEX_STORE_TAG_SD, (void *)addr); |
| 89 | |
| 90 | /* Dsiable CCA override */ |
| 91 | val = readl(cm_base + GCR_BASE); |
| 92 | val &= ~(CCA_DEFAULT_OVR_MASK | CCA_DEFAULT_OVREN); |
| 93 | writel(val, cm_base + GCR_BASE); |
| 94 | |
| 95 | /* Set KSEG0 to non-coherent cached (important!) */ |
| 96 | val = read_c0_config(); |
| 97 | val &= ~CONF_CM_CMASK; |
| 98 | val |= CONF_CM_CACHABLE_NONCOHERENT; |
| 99 | write_c0_config(val); |
| 100 | execution_hazard_barrier(); |
| 101 | |
| 102 | /* Again, invalidate L1 D-Cache */ |
| 103 | for (addr = 0; addr < CONFIG_SYS_DCACHE_SIZE; addr += lsize) |
| 104 | mips_cache(INDEX_WRITEBACK_INV_D, (void *)addr); |
| 105 | |
| 106 | /* Invalidate L1 I-Cache */ |
| 107 | for (addr = 0; addr < CONFIG_SYS_ICACHE_SIZE; addr += lsize) |
| 108 | mips_cache(INDEX_INVALIDATE_I, (void *)addr); |
| 109 | |
| 110 | /* Disable L2 cache bypass */ |
| 111 | val = read_c0_config2(); |
| 112 | val &= ~MIPS_CONF_IMPL; |
| 113 | write_c0_config2(val); |
| 114 | execution_hazard_barrier(); |
| 115 | } |
| 116 | |
| 117 | void __noreturn tpl_main(void) |
| 118 | { |
Simon Glass | bb7d3bb | 2022-09-06 20:26:52 -0600 | [diff] [blame] | 119 | const struct legacy_img_hdr *hdr = (const struct legacy_img_hdr *)__image_copy_end; |
developer | 2fddd72 | 2022-05-20 11:22:21 +0800 | [diff] [blame] | 120 | image_entry_noargs_t image_entry; |
| 121 | u32 loadaddr, size; |
| 122 | uintptr_t data; |
| 123 | |
| 124 | /* Initialize the cache first */ |
| 125 | mt7621_cache_init(); |
| 126 | |
| 127 | if (image_get_magic(hdr) != IH_MAGIC) |
| 128 | goto failed; |
| 129 | |
| 130 | loadaddr = image_get_load(hdr); |
| 131 | size = image_get_size(hdr); |
| 132 | image_entry = (image_entry_noargs_t)image_get_ep(hdr); |
| 133 | |
| 134 | /* Load TPL image to L2 cache */ |
Simon Glass | bb7d3bb | 2022-09-06 20:26:52 -0600 | [diff] [blame] | 135 | data = (uintptr_t)__image_copy_end + sizeof(struct legacy_img_hdr); |
developer | 2fddd72 | 2022-05-20 11:22:21 +0800 | [diff] [blame] | 136 | fill_lock_l2cache(data, loadaddr, size); |
| 137 | |
| 138 | /* Jump to SPL */ |
| 139 | image_entry(); |
| 140 | |
| 141 | failed: |
| 142 | for (;;) |
| 143 | ; |
| 144 | } |