blob: 5db99e2a73a1201a7b4fae0eaa41edd98e3c56a3 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Michal Simek04b7e622015-01-15 10:01:51 +01002/*
3 * (C) Copyright 2014 - 2015 Xilinx, Inc.
Michal Simeka8c94362023-07-10 14:35:49 +02004 * Michal Simek <michal.simek@amd.com>
Michal Simek04b7e622015-01-15 10:01:51 +01005 */
6
Simon Glass97589732020-05-10 11:40:02 -06007#include <init.h>
Simon Glassa9dc0682019-12-28 10:44:59 -07008#include <time.h>
Tom Rinidec7ea02024-05-20 13:35:03 -06009#include <linux/errno.h>
10#include <linux/types.h>
Michal Simek04b7e622015-01-15 10:01:51 +010011#include <asm/arch/hardware.h>
12#include <asm/arch/sys_proto.h>
Alexander Graf0e2088c2016-03-04 01:09:49 +010013#include <asm/armv8/mmu.h>
Simon Glass274e0b02020-05-10 11:39:56 -060014#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
Michal Simek04b7e622015-01-15 10:01:51 +010016#include <asm/io.h>
Ibai Erkiagac8a3efa2019-09-27 11:37:01 +010017#include <zynqmp_firmware.h>
Ovidiu Panait2b618472020-03-29 20:57:40 +030018#include <asm/cache.h>
T Karthik Reddy501c2062021-08-10 06:50:18 -060019#include <dm/platdata.h>
Michal Simek04b7e622015-01-15 10:01:51 +010020
21#define ZYNQ_SILICON_VER_MASK 0xF000
22#define ZYNQ_SILICON_VER_SHIFT 12
23
24DECLARE_GLOBAL_DATA_PTR;
25
Nitin Jain9bcc76f2018-04-20 12:30:40 +053026/*
27 * Number of filled static entries and also the first empty
28 * slot in zynqmp_mem_map.
29 */
30#define ZYNQMP_MEM_MAP_USED 4
31
Siva Durga Prasad Paladugucafb6312018-01-12 15:35:46 +053032#if !defined(CONFIG_ZYNQMP_NO_DDR)
Nitin Jain9bcc76f2018-04-20 12:30:40 +053033#define DRAM_BANKS CONFIG_NR_DRAM_BANKS
34#else
35#define DRAM_BANKS 0
36#endif
37
38#if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
39#define TCM_MAP 1
40#else
41#define TCM_MAP 0
Siva Durga Prasad Paladugucafb6312018-01-12 15:35:46 +053042#endif
Nitin Jain9bcc76f2018-04-20 12:30:40 +053043
44/* +1 is end of list which needs to be empty */
45#define ZYNQMP_MEM_MAP_MAX (ZYNQMP_MEM_MAP_USED + DRAM_BANKS + TCM_MAP + 1)
46
47static struct mm_region zynqmp_mem_map[ZYNQMP_MEM_MAP_MAX] = {
Siva Durga Prasad Paladugucafb6312018-01-12 15:35:46 +053048 {
York Sunc7104e52016-06-24 16:46:22 -070049 .virt = 0x80000000UL,
50 .phys = 0x80000000UL,
Alexander Graf0e2088c2016-03-04 01:09:49 +010051 .size = 0x70000000UL,
52 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
53 PTE_BLOCK_NON_SHARE |
54 PTE_BLOCK_PXN | PTE_BLOCK_UXN
Nitin Jain9bcc76f2018-04-20 12:30:40 +053055 }, {
York Sunc7104e52016-06-24 16:46:22 -070056 .virt = 0xf8000000UL,
57 .phys = 0xf8000000UL,
Alexander Graf0e2088c2016-03-04 01:09:49 +010058 .size = 0x07e00000UL,
59 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
60 PTE_BLOCK_NON_SHARE |
61 PTE_BLOCK_PXN | PTE_BLOCK_UXN
62 }, {
York Sunc7104e52016-06-24 16:46:22 -070063 .virt = 0x400000000UL,
64 .phys = 0x400000000UL,
Anders Hedlundfcc09922017-12-19 17:24:41 +010065 .size = 0x400000000UL,
Alexander Graf0e2088c2016-03-04 01:09:49 +010066 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
67 PTE_BLOCK_NON_SHARE |
68 PTE_BLOCK_PXN | PTE_BLOCK_UXN
Nitin Jain9bcc76f2018-04-20 12:30:40 +053069 }, {
Anders Hedlundfcc09922017-12-19 17:24:41 +010070 .virt = 0x1000000000UL,
71 .phys = 0x1000000000UL,
72 .size = 0xf000000000UL,
Alexander Graf0e2088c2016-03-04 01:09:49 +010073 .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
74 PTE_BLOCK_NON_SHARE |
75 PTE_BLOCK_PXN | PTE_BLOCK_UXN
Alexander Graf0e2088c2016-03-04 01:09:49 +010076 }
77};
Nitin Jain9bcc76f2018-04-20 12:30:40 +053078
79void mem_map_fill(void)
80{
81 int banks = ZYNQMP_MEM_MAP_USED;
82
83#if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
84 zynqmp_mem_map[banks].virt = 0xffe00000UL;
85 zynqmp_mem_map[banks].phys = 0xffe00000UL;
86 zynqmp_mem_map[banks].size = 0x00200000UL;
87 zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
88 PTE_BLOCK_INNER_SHARE;
89 banks = banks + 1;
90#endif
91
92#if !defined(CONFIG_ZYNQMP_NO_DDR)
93 for (int i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
94 /* Zero size means no more DDR that's this is end */
95 if (!gd->bd->bi_dram[i].size)
96 break;
97
98 zynqmp_mem_map[banks].virt = gd->bd->bi_dram[i].start;
99 zynqmp_mem_map[banks].phys = gd->bd->bi_dram[i].start;
100 zynqmp_mem_map[banks].size = gd->bd->bi_dram[i].size;
101 zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
102 PTE_BLOCK_INNER_SHARE;
103 banks = banks + 1;
104 }
105#endif
106}
107
Alexander Graf0e2088c2016-03-04 01:09:49 +0100108struct mm_region *mem_map = zynqmp_mem_map;
109
Michal Simek1a2d5e22016-05-30 10:41:26 +0200110u64 get_page_table_size(void)
111{
112 return 0x14000;
113}
114
Siva Durga Prasad Paladugu48eaa0c2018-10-05 15:09:05 +0530115#if defined(CONFIG_SYS_MEM_RSVD_FOR_MMU) || defined(CONFIG_DEFINE_TCM_OCM_MMAP)
116void tcm_init(u8 mode)
Siva Durga Prasad Paladugu4628c502017-07-13 19:01:11 +0530117{
Padmarao Begari384e25b2024-09-30 10:08:13 +0530118 int ret;
119
120 ret = check_tcm_mode(mode);
121 if (!ret) {
122 puts("WARNING: Initializing TCM overwrites TCM content\n");
123 initialize_tcm(mode);
124 memset((void *)ZYNQMP_TCM_BASE_ADDR, 0, ZYNQMP_TCM_SIZE);
125 }
126
127 if (ret == -EACCES)
128 printf("ERROR: Split to lockstep mode required reset/disable cpu\n");
129
130 /* Ignore if ret is -EAGAIN, trying to initialize same mode again */
Siva Durga Prasad Paladugua1ad8782018-10-05 15:09:04 +0530131}
Siva Durga Prasad Paladugu48eaa0c2018-10-05 15:09:05 +0530132#endif
Siva Durga Prasad Paladugua1ad8782018-10-05 15:09:04 +0530133
Siva Durga Prasad Paladugu48eaa0c2018-10-05 15:09:05 +0530134#ifdef CONFIG_SYS_MEM_RSVD_FOR_MMU
Ovidiu Panait2b618472020-03-29 20:57:40 +0300135int arm_reserve_mmu(void)
Siva Durga Prasad Paladugua1ad8782018-10-05 15:09:04 +0530136{
137 tcm_init(TCM_LOCK);
Siva Durga Prasad Paladugu4628c502017-07-13 19:01:11 +0530138 gd->arch.tlb_size = PGTABLE_SIZE;
139 gd->arch.tlb_addr = ZYNQMP_TCM_BASE_ADDR;
140
141 return 0;
142}
143#endif
144
Michal Simekc23d3f82015-11-05 08:34:35 +0100145static unsigned int zynqmp_get_silicon_version_secure(void)
146{
147 u32 ver;
148
149 ver = readl(&csu_base->version);
150 ver &= ZYNQMP_SILICON_VER_MASK;
151 ver >>= ZYNQMP_SILICON_VER_SHIFT;
152
153 return ver;
154}
155
Michal Simek04b7e622015-01-15 10:01:51 +0100156unsigned int zynqmp_get_silicon_version(void)
157{
Michal Simekc23d3f82015-11-05 08:34:35 +0100158 if (current_el() == 3)
159 return zynqmp_get_silicon_version_secure();
160
Michal Simek04b7e622015-01-15 10:01:51 +0100161 gd->cpu_clk = get_tbclk();
162
163 switch (gd->cpu_clk) {
164 case 50000000:
165 return ZYNQMP_CSU_VERSION_QEMU;
166 }
167
Michal Simek8d2c02d2015-08-20 14:01:39 +0200168 return ZYNQMP_CSU_VERSION_SILICON;
Michal Simek04b7e622015-01-15 10:01:51 +0100169}
Siva Durga Prasad Paladugu0e39bd72017-02-02 01:10:46 +0530170
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530171static int zynqmp_mmio_rawwrite(const u32 address,
Siva Durga Prasad Paladugu0e39bd72017-02-02 01:10:46 +0530172 const u32 mask,
173 const u32 value)
174{
175 u32 data;
176 u32 value_local = value;
Michal Simekfaac0ce2018-06-13 10:38:33 +0200177 int ret;
178
179 ret = zynqmp_mmio_read(address, &data);
180 if (ret)
181 return ret;
Siva Durga Prasad Paladugu0e39bd72017-02-02 01:10:46 +0530182
Siva Durga Prasad Paladugu0e39bd72017-02-02 01:10:46 +0530183 data &= ~mask;
184 value_local &= mask;
185 value_local |= data;
186 writel(value_local, (ulong)address);
187 return 0;
188}
189
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530190static int zynqmp_mmio_rawread(const u32 address, u32 *value)
Siva Durga Prasad Paladugu0e39bd72017-02-02 01:10:46 +0530191{
192 *value = readl((ulong)address);
193 return 0;
194}
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530195
196int zynqmp_mmio_write(const u32 address,
197 const u32 mask,
198 const u32 value)
199{
Simon Glass85ed77d2024-09-29 19:49:46 -0600200 if (IS_ENABLED(CONFIG_XPL_BUILD) || current_el() == 3)
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530201 return zynqmp_mmio_rawwrite(address, mask, value);
Michal Simek81efd2a2019-10-04 15:45:29 +0200202#if defined(CONFIG_ZYNQMP_FIRMWARE)
Heinrich Schuchardt9f92f792017-10-13 01:14:27 +0200203 else
Michal Simek4c3de372019-10-04 15:35:45 +0200204 return xilinx_pm_request(PM_MMIO_WRITE, address, mask,
205 value, 0, NULL);
Michal Simek81efd2a2019-10-04 15:45:29 +0200206#endif
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530207
208 return -EINVAL;
209}
210
211int zynqmp_mmio_read(const u32 address, u32 *value)
212{
Michal Simek81efd2a2019-10-04 15:45:29 +0200213 u32 ret = -EINVAL;
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530214
215 if (!value)
Michal Simek81efd2a2019-10-04 15:45:29 +0200216 return ret;
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530217
Simon Glass85ed77d2024-09-29 19:49:46 -0600218 if (IS_ENABLED(CONFIG_XPL_BUILD) || current_el() == 3) {
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530219 ret = zynqmp_mmio_rawread(address, value);
Michal Simek81efd2a2019-10-04 15:45:29 +0200220 }
221#if defined(CONFIG_ZYNQMP_FIRMWARE)
222 else {
223 u32 ret_payload[PAYLOAD_ARG_CNT];
224
Michal Simek4c3de372019-10-04 15:35:45 +0200225 ret = xilinx_pm_request(PM_MMIO_READ, address, 0, 0,
226 0, ret_payload);
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530227 *value = ret_payload[1];
228 }
Michal Simek81efd2a2019-10-04 15:45:29 +0200229#endif
Siva Durga Prasad Paladugu668fdd42017-07-13 19:01:12 +0530230
231 return ret;
232}
T Karthik Reddy501c2062021-08-10 06:50:18 -0600233
234U_BOOT_DRVINFO(soc_xilinx_zynqmp) = {
235 .name = "soc_xilinx_zynqmp",
236};