blob: d31af47c31d56ff33f5174511ce246c0c603f2a8 [file] [log] [blame]
Peng Fan2e6be072018-10-18 14:28:18 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2018 NXP
4 */
5
6#include <common.h>
7#include <clk.h>
Anatolij Gustschin9b39be92018-10-18 14:28:24 +02008#include <cpu.h>
Simon Glass1d91ba72019-11-14 12:57:37 -07009#include <cpu_func.h>
Peng Fan2e6be072018-10-18 14:28:18 +020010#include <dm.h>
11#include <dm/device-internal.h>
12#include <dm/lists.h>
13#include <dm/uclass.h>
14#include <errno.h>
Peng Fan48f9c4e2019-04-26 01:44:27 +000015#include <thermal.h>
Peng Fan2e6be072018-10-18 14:28:18 +020016#include <asm/arch/sci/sci.h>
Peng Fan29c9dd32018-10-18 14:28:19 +020017#include <asm/arch/sys_proto.h>
Peng Fan2e6be072018-10-18 14:28:18 +020018#include <asm/arch-imx/cpu.h>
19#include <asm/armv8/cpu.h>
Peng Fan4f211a52018-10-18 14:28:21 +020020#include <asm/armv8/mmu.h>
Peng Fan29c9dd32018-10-18 14:28:19 +020021#include <asm/mach-imx/boot_mode.h>
Peng Fan2e6be072018-10-18 14:28:18 +020022
23DECLARE_GLOBAL_DATA_PTR;
24
Peng Fan14b4cd22018-10-18 14:28:22 +020025#define BT_PASSOVER_TAG 0x504F
26struct pass_over_info_t *get_pass_over_info(void)
27{
28 struct pass_over_info_t *p =
29 (struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
30
31 if (p->barker != BT_PASSOVER_TAG ||
32 p->len != sizeof(struct pass_over_info_t))
33 return NULL;
34
35 return p;
36}
37
38int arch_cpu_init(void)
39{
Peng Fan0bcec7f2019-01-18 08:58:38 +000040#ifdef CONFIG_SPL_BUILD
41 struct pass_over_info_t *pass_over;
Peng Fan14b4cd22018-10-18 14:28:22 +020042
Peng Fan0bcec7f2019-01-18 08:58:38 +000043 if (is_soc_rev(CHIP_REV_A)) {
44 pass_over = get_pass_over_info();
45 if (pass_over && pass_over->g_ap_mu == 0) {
46 /*
47 * When ap_mu is 0, means the U-Boot booted
48 * from first container
49 */
50 sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
51 }
Peng Fan14b4cd22018-10-18 14:28:22 +020052 }
Peng Fan0bcec7f2019-01-18 08:58:38 +000053#endif
Peng Fan14b4cd22018-10-18 14:28:22 +020054
55 return 0;
56}
57
58int arch_cpu_init_dm(void)
59{
60 struct udevice *devp;
61 int node, ret;
62
63 node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
Peng Fan14b4cd22018-10-18 14:28:22 +020064
Ye Lif2ea6f02019-08-26 08:11:42 +000065 ret = uclass_get_device_by_of_offset(UCLASS_MISC, node, &devp);
Peng Fan14b4cd22018-10-18 14:28:22 +020066 if (ret) {
Ye Lif2ea6f02019-08-26 08:11:42 +000067 printf("could not get scu %d\n", ret);
Peng Fan14b4cd22018-10-18 14:28:22 +020068 return ret;
69 }
70
Peng Fanee380c52019-08-26 08:11:49 +000071 if (is_imx8qm()) {
72 ret = sc_pm_set_resource_power_mode(-1, SC_R_SMMU,
73 SC_PM_PW_MODE_ON);
74 if (ret)
75 return ret;
76 }
77
Peng Fan14b4cd22018-10-18 14:28:22 +020078 return 0;
79}
80
Peng Fan29c9dd32018-10-18 14:28:19 +020081int print_bootinfo(void)
82{
83 enum boot_device bt_dev = get_boot_device();
84
85 puts("Boot: ");
86 switch (bt_dev) {
87 case SD1_BOOT:
88 puts("SD0\n");
89 break;
90 case SD2_BOOT:
91 puts("SD1\n");
92 break;
93 case SD3_BOOT:
94 puts("SD2\n");
95 break;
96 case MMC1_BOOT:
97 puts("MMC0\n");
98 break;
99 case MMC2_BOOT:
100 puts("MMC1\n");
101 break;
102 case MMC3_BOOT:
103 puts("MMC2\n");
104 break;
105 case FLEXSPI_BOOT:
106 puts("FLEXSPI\n");
107 break;
108 case SATA_BOOT:
109 puts("SATA\n");
110 break;
111 case NAND_BOOT:
112 puts("NAND\n");
113 break;
114 case USB_BOOT:
115 puts("USB\n");
116 break;
117 default:
118 printf("Unknown device %u\n", bt_dev);
119 break;
120 }
121
122 return 0;
123}
124
125enum boot_device get_boot_device(void)
126{
127 enum boot_device boot_dev = SD1_BOOT;
128
129 sc_rsrc_t dev_rsrc;
130
131 sc_misc_get_boot_dev(-1, &dev_rsrc);
132
133 switch (dev_rsrc) {
134 case SC_R_SDHC_0:
135 boot_dev = MMC1_BOOT;
136 break;
137 case SC_R_SDHC_1:
138 boot_dev = SD2_BOOT;
139 break;
140 case SC_R_SDHC_2:
141 boot_dev = SD3_BOOT;
142 break;
143 case SC_R_NAND:
144 boot_dev = NAND_BOOT;
145 break;
146 case SC_R_FSPI_0:
147 boot_dev = FLEXSPI_BOOT;
148 break;
149 case SC_R_SATA_0:
150 boot_dev = SATA_BOOT;
151 break;
152 case SC_R_USB_0:
153 case SC_R_USB_1:
154 case SC_R_USB_2:
155 boot_dev = USB_BOOT;
156 break;
157 default:
158 break;
159 }
160
161 return boot_dev;
162}
Peng Fan93b6cfd2018-10-18 14:28:20 +0200163
164#ifdef CONFIG_ENV_IS_IN_MMC
165__weak int board_mmc_get_env_dev(int devno)
166{
167 return CONFIG_SYS_MMC_ENV_DEV;
168}
169
170int mmc_get_env_dev(void)
171{
172 sc_rsrc_t dev_rsrc;
173 int devno;
174
175 sc_misc_get_boot_dev(-1, &dev_rsrc);
176
177 switch (dev_rsrc) {
178 case SC_R_SDHC_0:
179 devno = 0;
180 break;
181 case SC_R_SDHC_1:
182 devno = 1;
183 break;
184 case SC_R_SDHC_2:
185 devno = 2;
186 break;
187 default:
188 /* If not boot from sd/mmc, use default value */
189 return CONFIG_SYS_MMC_ENV_DEV;
190 }
191
192 return board_mmc_get_env_dev(devno);
193}
194#endif
Peng Fan4f211a52018-10-18 14:28:21 +0200195
196#define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
197
198static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
199 sc_faddr_t *addr_end)
200{
201 sc_faddr_t start, end;
202 int ret;
203 bool owned;
204
205 owned = sc_rm_is_memreg_owned(-1, mr);
206 if (owned) {
207 ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
208 if (ret) {
209 printf("Memreg get info failed, %d\n", ret);
210 return -EINVAL;
211 }
212 debug("0x%llx -- 0x%llx\n", start, end);
213 *addr_start = start;
214 *addr_end = end;
215
216 return 0;
217 }
218
219 return -EINVAL;
220}
221
222phys_size_t get_effective_memsize(void)
223{
224 sc_rm_mr_t mr;
225 sc_faddr_t start, end, end1;
226 int err;
227
228 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
229
230 for (mr = 0; mr < 64; mr++) {
231 err = get_owned_memreg(mr, &start, &end);
232 if (!err) {
233 start = roundup(start, MEMSTART_ALIGNMENT);
234 /* Too small memory region, not use it */
235 if (start > end)
236 continue;
237
Peng Fan14b4cd22018-10-18 14:28:22 +0200238 /* Find the memory region runs the U-Boot */
Peng Fan4f211a52018-10-18 14:28:21 +0200239 if (start >= PHYS_SDRAM_1 && start <= end1 &&
240 (start <= CONFIG_SYS_TEXT_BASE &&
241 end >= CONFIG_SYS_TEXT_BASE)) {
242 if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
243 PHYS_SDRAM_1_SIZE))
244 return (end - PHYS_SDRAM_1 + 1);
245 else
246 return PHYS_SDRAM_1_SIZE;
247 }
248 }
249 }
250
251 return PHYS_SDRAM_1_SIZE;
252}
253
254int dram_init(void)
255{
256 sc_rm_mr_t mr;
257 sc_faddr_t start, end, end1, end2;
258 int err;
259
260 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
261 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
262 for (mr = 0; mr < 64; mr++) {
263 err = get_owned_memreg(mr, &start, &end);
264 if (!err) {
265 start = roundup(start, MEMSTART_ALIGNMENT);
266 /* Too small memory region, not use it */
267 if (start > end)
268 continue;
269
270 if (start >= PHYS_SDRAM_1 && start <= end1) {
271 if ((end + 1) <= end1)
272 gd->ram_size += end - start + 1;
273 else
274 gd->ram_size += end1 - start;
275 } else if (start >= PHYS_SDRAM_2 && start <= end2) {
276 if ((end + 1) <= end2)
277 gd->ram_size += end - start + 1;
278 else
279 gd->ram_size += end2 - start;
280 }
281 }
282 }
283
284 /* If error, set to the default value */
285 if (!gd->ram_size) {
286 gd->ram_size = PHYS_SDRAM_1_SIZE;
287 gd->ram_size += PHYS_SDRAM_2_SIZE;
288 }
289 return 0;
290}
291
292static void dram_bank_sort(int current_bank)
293{
294 phys_addr_t start;
295 phys_size_t size;
296
297 while (current_bank > 0) {
298 if (gd->bd->bi_dram[current_bank - 1].start >
299 gd->bd->bi_dram[current_bank].start) {
300 start = gd->bd->bi_dram[current_bank - 1].start;
301 size = gd->bd->bi_dram[current_bank - 1].size;
302
303 gd->bd->bi_dram[current_bank - 1].start =
304 gd->bd->bi_dram[current_bank].start;
305 gd->bd->bi_dram[current_bank - 1].size =
306 gd->bd->bi_dram[current_bank].size;
307
308 gd->bd->bi_dram[current_bank].start = start;
309 gd->bd->bi_dram[current_bank].size = size;
310 }
311 current_bank--;
312 }
313}
314
315int dram_init_banksize(void)
316{
317 sc_rm_mr_t mr;
318 sc_faddr_t start, end, end1, end2;
319 int i = 0;
320 int err;
321
322 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
323 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
324
325 for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
326 err = get_owned_memreg(mr, &start, &end);
327 if (!err) {
328 start = roundup(start, MEMSTART_ALIGNMENT);
329 if (start > end) /* Small memory region, no use it */
330 continue;
331
332 if (start >= PHYS_SDRAM_1 && start <= end1) {
333 gd->bd->bi_dram[i].start = start;
334
335 if ((end + 1) <= end1)
336 gd->bd->bi_dram[i].size =
337 end - start + 1;
338 else
339 gd->bd->bi_dram[i].size = end1 - start;
340
341 dram_bank_sort(i);
342 i++;
343 } else if (start >= PHYS_SDRAM_2 && start <= end2) {
344 gd->bd->bi_dram[i].start = start;
345
346 if ((end + 1) <= end2)
347 gd->bd->bi_dram[i].size =
348 end - start + 1;
349 else
350 gd->bd->bi_dram[i].size = end2 - start;
351
352 dram_bank_sort(i);
353 i++;
354 }
355 }
356 }
357
358 /* If error, set to the default value */
359 if (!i) {
360 gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
361 gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
362 gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
363 gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
364 }
365
366 return 0;
367}
368
369static u64 get_block_attrs(sc_faddr_t addr_start)
370{
371 u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
372 PTE_BLOCK_PXN | PTE_BLOCK_UXN;
373
374 if ((addr_start >= PHYS_SDRAM_1 &&
375 addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
376 (addr_start >= PHYS_SDRAM_2 &&
377 addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
378 return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
379
380 return attr;
381}
382
383static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
384{
385 sc_faddr_t end1, end2;
386
387 end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
388 end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
389
390 if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
391 if ((addr_end + 1) > end1)
392 return end1 - addr_start;
393 } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
394 if ((addr_end + 1) > end2)
395 return end2 - addr_start;
396 }
397
398 return (addr_end - addr_start + 1);
399}
400
401#define MAX_PTE_ENTRIES 512
402#define MAX_MEM_MAP_REGIONS 16
403
404static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
405struct mm_region *mem_map = imx8_mem_map;
406
407void enable_caches(void)
408{
409 sc_rm_mr_t mr;
410 sc_faddr_t start, end;
411 int err, i;
412
413 /* Create map for registers access from 0x1c000000 to 0x80000000*/
414 imx8_mem_map[0].virt = 0x1c000000UL;
415 imx8_mem_map[0].phys = 0x1c000000UL;
416 imx8_mem_map[0].size = 0x64000000UL;
417 imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
418 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
419
420 i = 1;
421 for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
422 err = get_owned_memreg(mr, &start, &end);
423 if (!err) {
424 imx8_mem_map[i].virt = start;
425 imx8_mem_map[i].phys = start;
426 imx8_mem_map[i].size = get_block_size(start, end);
427 imx8_mem_map[i].attrs = get_block_attrs(start);
428 i++;
429 }
430 }
431
432 if (i < MAX_MEM_MAP_REGIONS) {
433 imx8_mem_map[i].size = 0;
434 imx8_mem_map[i].attrs = 0;
435 } else {
436 puts("Error, need more MEM MAP REGIONS reserved\n");
437 icache_enable();
438 return;
439 }
440
441 for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
442 debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
443 i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
444 imx8_mem_map[i].size, imx8_mem_map[i].attrs);
445 }
446
447 icache_enable();
448 dcache_enable();
449}
450
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400451#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Peng Fan4f211a52018-10-18 14:28:21 +0200452u64 get_page_table_size(void)
453{
454 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
455 u64 size = 0;
456
457 /*
458 * For each memory region, the max table size:
459 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
460 */
461 size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
462
463 /*
464 * We need to duplicate our page table once to have an emergency pt to
465 * resort to when splitting page tables later on
466 */
467 size *= 2;
468
469 /*
470 * We may need to split page tables later on if dcache settings change,
471 * so reserve up to 4 (random pick) page tables for that.
472 */
473 size += one_pt * 4;
474
475 return size;
476}
477#endif
Anatolij Gustschin05b354b2018-10-18 14:28:23 +0200478
Peng Fan303324d2019-08-26 08:12:23 +0000479#if defined(CONFIG_IMX8QM)
480#define FUSE_MAC0_WORD0 452
481#define FUSE_MAC0_WORD1 453
482#define FUSE_MAC1_WORD0 454
483#define FUSE_MAC1_WORD1 455
484#elif defined(CONFIG_IMX8QXP)
Anatolij Gustschin05b354b2018-10-18 14:28:23 +0200485#define FUSE_MAC0_WORD0 708
486#define FUSE_MAC0_WORD1 709
487#define FUSE_MAC1_WORD0 710
488#define FUSE_MAC1_WORD1 711
Peng Fan303324d2019-08-26 08:12:23 +0000489#endif
Anatolij Gustschin05b354b2018-10-18 14:28:23 +0200490
491void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
492{
493 u32 word[2], val[2] = {};
494 int i, ret;
495
496 if (dev_id == 0) {
497 word[0] = FUSE_MAC0_WORD0;
498 word[1] = FUSE_MAC0_WORD1;
499 } else {
500 word[0] = FUSE_MAC1_WORD0;
501 word[1] = FUSE_MAC1_WORD1;
502 }
503
504 for (i = 0; i < 2; i++) {
505 ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
506 if (ret < 0)
507 goto err;
508 }
509
510 mac[0] = val[0];
511 mac[1] = val[0] >> 8;
512 mac[2] = val[0] >> 16;
513 mac[3] = val[0] >> 24;
514 mac[4] = val[1];
515 mac[5] = val[1] >> 8;
516
517 debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
518 __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
519 return;
520err:
521 printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
522}
Anatolij Gustschin9b39be92018-10-18 14:28:24 +0200523
Anatolij Gustschin9b39be92018-10-18 14:28:24 +0200524u32 get_cpu_rev(void)
525{
526 u32 id = 0, rev = 0;
527 int ret;
528
529 ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
530 if (ret)
531 return 0;
532
533 rev = (id >> 5) & 0xf;
534 id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
535
536 return (id << 12) | rev;
537}
538