blob: c98d2e96af52cb15cd11bc0a2863d77bcec61df2 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Peng Fand8c75bc2017-08-17 17:48:50 +08002/*
3 * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
4 * Copyright 2017 NXP
Peng Fand8c75bc2017-08-17 17:48:50 +08005 */
6
Peng Fanfcd53ce2015-10-23 10:13:04 +08007#include <asm/io.h>
8#include <asm/psci.h>
Chen-Yu Tsai7ca14502016-06-19 12:38:41 +08009#include <asm/secure.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080010#include <asm/arch/imx-regs.h>
Anson Huang6475e5e2018-08-08 09:17:50 +080011#include <asm/armv7.h>
12#include <asm/gic.h>
Stefan Agner158a1022018-06-24 21:09:56 +020013#include <linux/bitops.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080014#include <common.h>
Anson Huangf20bc132018-01-07 14:34:31 +080015#include <fsl_wdog.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080016
Anson Huang6475e5e2018-08-08 09:17:50 +080017#define GPC_LPCR_A7_BSC 0x0
18#define GPC_LPCR_A7_AD 0x4
19#define GPC_SLPCR 0x14
20#define GPC_PGC_ACK_SEL_A7 0x24
21#define GPC_IMR1_CORE0 0x30
22#define GPC_SLOT0_CFG 0xb0
Peng Fanfcd53ce2015-10-23 10:13:04 +080023#define GPC_CPU_PGC_SW_PUP_REQ 0xf0
Anson Huang6475e5e2018-08-08 09:17:50 +080024#define GPC_CPU_PGC_SW_PDN_REQ 0xfc
25#define GPC_PGC_C0 0x800
Stefan Agner2a79d882018-06-24 21:09:57 +020026#define GPC_PGC_C0 0x800
Peng Fanfcd53ce2015-10-23 10:13:04 +080027#define GPC_PGC_C1 0x840
Anson Huang6475e5e2018-08-08 09:17:50 +080028#define GPC_PGC_SCU 0x880
29
30#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000
31#define BM_LPCR_A7_BSC_LPM1 0xc
32#define BM_LPCR_A7_BSC_LPM0 0x3
33#define BP_LPCR_A7_BSC_LPM0 0
34#define BM_SLPCR_EN_DSM 0x80000000
35#define BM_SLPCR_RBC_EN 0x40000000
36#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000
37#define BM_SLPCR_VSTBY 0x4
38#define BM_SLPCR_SBYOS 0x2
39#define BM_SLPCR_BYPASS_PMIC_READY 0x1
40#define BM_LPCR_A7_AD_L2PGE 0x10000
41#define BM_LPCR_A7_AD_EN_C1_PUP 0x800
42#define BM_LPCR_A7_AD_EN_C0_PUP 0x200
43#define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10
44#define BM_LPCR_A7_AD_EN_C1_PDN 0x8
45#define BM_LPCR_A7_AD_EN_C0_PDN 0x2
Peng Fanfcd53ce2015-10-23 10:13:04 +080046
Stefan Agner2a79d882018-06-24 21:09:57 +020047#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7 0x1
Peng Fanfcd53ce2015-10-23 10:13:04 +080048#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2
49
Anson Huang6475e5e2018-08-08 09:17:50 +080050#define BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK 0x8000
51#define BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK 0x80000000
52
53#define MAX_SLOT_NUMBER 10
54#define A7_LPM_WAIT 0x5
55#define A7_LPM_STOP 0xa
56
57#define BM_SYS_COUNTER_CNTCR_FCR1 0x200
58#define BM_SYS_COUNTER_CNTCR_FCR0 0x100
59
60#define REG_SET 0x4
61#define REG_CLR 0x8
62
63#define ANADIG_ARM_PLL 0x60
64#define ANADIG_DDR_PLL 0x70
65#define ANADIG_SYS_PLL 0xb0
66#define ANADIG_ENET_PLL 0xe0
67#define ANADIG_AUDIO_PLL 0xf0
68#define ANADIG_VIDEO_PLL 0x130
69#define BM_ANATOP_ARM_PLL_OVERRIDE BIT(20)
70#define BM_ANATOP_DDR_PLL_OVERRIDE BIT(19)
71#define BM_ANATOP_SYS_PLL_OVERRIDE (0x1ff << 17)
72#define BM_ANATOP_ENET_PLL_OVERRIDE BIT(13)
73#define BM_ANATOP_AUDIO_PLL_OVERRIDE BIT(24)
74#define BM_ANATOP_VIDEO_PLL_OVERRIDE BIT(24)
75
76#define DDRC_STAT 0x4
77#define DDRC_PWRCTL 0x30
78#define DDRC_PSTAT 0x3fc
79
Peng Fanfcd53ce2015-10-23 10:13:04 +080080#define SRC_GPR1_MX7D 0x074
Anson Huang6475e5e2018-08-08 09:17:50 +080081#define SRC_GPR2_MX7D 0x078
Peng Fanfcd53ce2015-10-23 10:13:04 +080082#define SRC_A7RCR0 0x004
83#define SRC_A7RCR1 0x008
84
85#define BP_SRC_A7RCR0_A7_CORE_RESET0 0
86#define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1
87
Anson Huangf46fb182018-01-07 14:34:32 +080088#define SNVS_LPCR 0x38
89#define BP_SNVS_LPCR_DP_EN 0x20
90#define BP_SNVS_LPCR_TOP 0x40
91
92#define CCM_CCGR_SNVS 0x4250
93
Anson Huangf20bc132018-01-07 14:34:31 +080094#define CCM_ROOT_WDOG 0xbb80
95#define CCM_CCGR_WDOG1 0x49c0
96
Stefan Agner158a1022018-06-24 21:09:56 +020097#define MPIDR_AFF0 GENMASK(7, 0)
98
99#define IMX7D_PSCI_NR_CPUS 2
100#if IMX7D_PSCI_NR_CPUS > CONFIG_ARMV7_PSCI_NR_CPUS
101#error "invalid value for CONFIG_ARMV7_PSCI_NR_CPUS"
102#endif
103
Anson Huang05e89eb2018-08-08 09:17:48 +0800104#define imx_cpu_gpr_entry_offset(cpu) \
105 (SRC_BASE_ADDR + SRC_GPR1_MX7D + cpu * 8)
106#define imx_cpu_gpr_para_offset(cpu) \
107 (imx_cpu_gpr_entry_offset(cpu) + 4)
108
109#define IMX_CPU_SYNC_OFF ~0
110#define IMX_CPU_SYNC_ON 0
111
Stefan Agner158a1022018-06-24 21:09:56 +0200112u8 psci_state[IMX7D_PSCI_NR_CPUS] __secure_data = {
113 PSCI_AFFINITY_LEVEL_ON,
114 PSCI_AFFINITY_LEVEL_OFF};
115
Anson Huang6475e5e2018-08-08 09:17:50 +0800116enum imx_gpc_slot {
117 CORE0_A7,
118 CORE1_A7,
119 SCU_A7,
120 FAST_MEGA_MIX,
121 MIPI_PHY,
122 PCIE_PHY,
123 USB_OTG1_PHY,
124 USB_OTG2_PHY,
125 USB_HSIC_PHY,
126 CORE0_M4,
127};
128
129enum mxc_cpu_pwr_mode {
130 RUN,
131 WAIT,
132 STOP,
133};
134
135extern void psci_system_resume(void);
136
Stefan Agner158a1022018-06-24 21:09:56 +0200137static inline void psci_set_state(int cpu, u8 state)
138{
139 psci_state[cpu] = state;
140 dsb();
141 isb();
142}
143
Peng Fanfcd53ce2015-10-23 10:13:04 +0800144static inline void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
145{
146 writel(enable, GPC_IPS_BASE_ADDR + offset);
147}
148
Stefan Agner2a79d882018-06-24 21:09:57 +0200149__secure void imx_gpcv2_set_core_power(int cpu, bool pdn)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800150{
151 u32 reg = pdn ? GPC_CPU_PGC_SW_PUP_REQ : GPC_CPU_PGC_SW_PDN_REQ;
Stefan Agner2a79d882018-06-24 21:09:57 +0200152 u32 pgc = cpu ? GPC_PGC_C1 : GPC_PGC_C0;
153 u32 pdn_pup_req = cpu ? BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 :
154 BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800155 u32 val;
156
Stefan Agner2a79d882018-06-24 21:09:57 +0200157 imx_gpcv2_set_m_core_pgc(true, pgc);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800158
159 val = readl(GPC_IPS_BASE_ADDR + reg);
Stefan Agner2a79d882018-06-24 21:09:57 +0200160 val |= pdn_pup_req;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800161 writel(val, GPC_IPS_BASE_ADDR + reg);
162
Stefan Agner2a79d882018-06-24 21:09:57 +0200163 while ((readl(GPC_IPS_BASE_ADDR + reg) & pdn_pup_req) != 0)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800164 ;
165
Stefan Agner2a79d882018-06-24 21:09:57 +0200166 imx_gpcv2_set_m_core_pgc(false, pgc);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800167}
168
169__secure void imx_enable_cpu_ca7(int cpu, bool enable)
170{
171 u32 mask, val;
172
173 mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1);
174 val = readl(SRC_BASE_ADDR + SRC_A7RCR1);
175 val = enable ? val | mask : val & ~mask;
176 writel(val, SRC_BASE_ADDR + SRC_A7RCR1);
177}
178
Stefan Agner158a1022018-06-24 21:09:56 +0200179__secure void psci_arch_cpu_entry(void)
180{
181 u32 cpu = psci_get_cpu_id();
182
183 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON);
184}
185
Stefan Agner4bc4f132018-06-24 21:09:55 +0200186__secure s32 psci_cpu_on(u32 __always_unused function_id, u32 mpidr, u32 ep,
187 u32 context_id)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800188{
Stefan Agner158a1022018-06-24 21:09:56 +0200189 u32 cpu = mpidr & MPIDR_AFF0;
190
191 if (mpidr & ~MPIDR_AFF0)
192 return ARM_PSCI_RET_INVAL;
193
194 if (cpu >= IMX7D_PSCI_NR_CPUS)
195 return ARM_PSCI_RET_INVAL;
196
197 if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON)
198 return ARM_PSCI_RET_ALREADY_ON;
199
200 if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON_PENDING)
201 return ARM_PSCI_RET_ON_PENDING;
Stefan Agner4bc4f132018-06-24 21:09:55 +0200202
203 psci_save(cpu, ep, context_id);
204
Anson Huang05e89eb2018-08-08 09:17:48 +0800205 writel((u32)psci_cpu_entry, imx_cpu_gpr_entry_offset(cpu));
Stefan Agner158a1022018-06-24 21:09:56 +0200206
207 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON_PENDING);
208
Stefan Agner2a79d882018-06-24 21:09:57 +0200209 imx_gpcv2_set_core_power(cpu, true);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800210 imx_enable_cpu_ca7(cpu, true);
Stefan Agner158a1022018-06-24 21:09:56 +0200211
212 return ARM_PSCI_RET_SUCCESS;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800213}
214
Stefan Agner4bc4f132018-06-24 21:09:55 +0200215__secure s32 psci_cpu_off(void)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800216{
Stefan Agner4bc4f132018-06-24 21:09:55 +0200217 int cpu;
218
Stefan Agner4bc4f132018-06-24 21:09:55 +0200219 cpu = psci_get_cpu_id();
Stefan Agner158a1022018-06-24 21:09:56 +0200220
221 psci_cpu_off_common();
222 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_OFF);
223
Peng Fanfcd53ce2015-10-23 10:13:04 +0800224 imx_enable_cpu_ca7(cpu, false);
Stefan Agner2a79d882018-06-24 21:09:57 +0200225 imx_gpcv2_set_core_power(cpu, false);
Anson Huang05e89eb2018-08-08 09:17:48 +0800226 /*
227 * We use the cpu jumping argument register to sync with
228 * psci_affinity_info() which is running on cpu0 to kill the cpu.
229 */
230 writel(IMX_CPU_SYNC_OFF, imx_cpu_gpr_para_offset(cpu));
Stefan Agner4bc4f132018-06-24 21:09:55 +0200231
232 while (1)
233 wfi();
Peng Fanfcd53ce2015-10-23 10:13:04 +0800234}
Anson Huangf20bc132018-01-07 14:34:31 +0800235
Stefan Agner4bc4f132018-06-24 21:09:55 +0200236__secure void psci_system_reset(void)
Anson Huangf20bc132018-01-07 14:34:31 +0800237{
238 struct wdog_regs *wdog = (struct wdog_regs *)WDOG1_BASE_ADDR;
239
240 /* make sure WDOG1 clock is enabled */
241 writel(0x1 << 28, CCM_BASE_ADDR + CCM_ROOT_WDOG);
242 writel(0x3, CCM_BASE_ADDR + CCM_CCGR_WDOG1);
243 writew(WCR_WDE, &wdog->wcr);
Stefan Agner4bc4f132018-06-24 21:09:55 +0200244
245 while (1)
246 wfi();
Anson Huangf20bc132018-01-07 14:34:31 +0800247}
Anson Huangf46fb182018-01-07 14:34:32 +0800248
Stefan Agner4bc4f132018-06-24 21:09:55 +0200249__secure void psci_system_off(void)
Anson Huangf46fb182018-01-07 14:34:32 +0800250{
251 u32 val;
252
253 /* make sure SNVS clock is enabled */
254 writel(0x3, CCM_BASE_ADDR + CCM_CCGR_SNVS);
255
256 val = readl(SNVS_BASE_ADDR + SNVS_LPCR);
257 val |= BP_SNVS_LPCR_DP_EN | BP_SNVS_LPCR_TOP;
258 writel(val, SNVS_BASE_ADDR + SNVS_LPCR);
Stefan Agner4bc4f132018-06-24 21:09:55 +0200259
260 while (1)
261 wfi();
Anson Huangf46fb182018-01-07 14:34:32 +0800262}
Stefan Agner158a1022018-06-24 21:09:56 +0200263
264__secure u32 psci_version(void)
265{
266 return ARM_PSCI_VER_1_0;
267}
268
269__secure s32 psci_cpu_suspend(u32 __always_unused function_id, u32 power_state,
270 u32 entry_point_address,
271 u32 context_id)
272{
273 return ARM_PSCI_RET_INVAL;
274}
275
276__secure s32 psci_affinity_info(u32 __always_unused function_id,
277 u32 target_affinity,
278 u32 lowest_affinity_level)
279{
280 u32 cpu = target_affinity & MPIDR_AFF0;
281
282 if (lowest_affinity_level > 0)
283 return ARM_PSCI_RET_INVAL;
284
285 if (target_affinity & ~MPIDR_AFF0)
286 return ARM_PSCI_RET_INVAL;
287
288 if (cpu >= IMX7D_PSCI_NR_CPUS)
289 return ARM_PSCI_RET_INVAL;
290
Anson Huang05e89eb2018-08-08 09:17:48 +0800291 /* CPU is waiting for killed */
292 if (readl(imx_cpu_gpr_para_offset(cpu)) == IMX_CPU_SYNC_OFF) {
293 imx_enable_cpu_ca7(cpu, false);
294 imx_gpcv2_set_core_power(cpu, false);
295 writel(IMX_CPU_SYNC_ON, imx_cpu_gpr_para_offset(cpu));
296 }
297
Stefan Agner158a1022018-06-24 21:09:56 +0200298 return psci_state[cpu];
299}
300
Patrick Delaunay9c59d862019-07-22 14:19:20 +0200301__secure u32 psci_migrate_info_type(void)
Stefan Agner67a7d822018-06-24 21:09:58 +0200302{
303 /* Trusted OS is either not present or does not require migration */
304 return 2;
305}
306
Stefan Agner158a1022018-06-24 21:09:56 +0200307__secure s32 psci_features(u32 __always_unused function_id, u32 psci_fid)
308{
309 switch (psci_fid) {
310 case ARM_PSCI_0_2_FN_PSCI_VERSION:
311 case ARM_PSCI_0_2_FN_CPU_OFF:
312 case ARM_PSCI_0_2_FN_CPU_ON:
313 case ARM_PSCI_0_2_FN_AFFINITY_INFO:
Stefan Agner67a7d822018-06-24 21:09:58 +0200314 case ARM_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
Stefan Agner158a1022018-06-24 21:09:56 +0200315 case ARM_PSCI_0_2_FN_SYSTEM_OFF:
316 case ARM_PSCI_0_2_FN_SYSTEM_RESET:
317 case ARM_PSCI_1_0_FN_PSCI_FEATURES:
Anson Huang6475e5e2018-08-08 09:17:50 +0800318 case ARM_PSCI_1_0_FN_SYSTEM_SUSPEND:
Stefan Agner158a1022018-06-24 21:09:56 +0200319 return 0x0;
320 }
321 return ARM_PSCI_RET_NI;
322}
Anson Huang6475e5e2018-08-08 09:17:50 +0800323
324static __secure void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode)
325{
326 u32 val1, val2, val3;
327
328 val1 = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
329 val2 = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
330
331 /* all cores' LPM settings must be same */
332 val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1);
333 val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
334
335 val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN |
336 BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY);
337 /*
338 * GPC: When improper low-power sequence is used,
339 * the SoC enters low power mode before the ARM core executes WFI.
340 *
341 * Software workaround:
342 * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
343 * by setting IOMUX_GPR1_IRQ.
344 * 2) Software should then unmask IRQ #32 in GPC before setting GPC
345 * Low-Power mode.
346 * 3) Software should mask IRQ #32 right after GPC Low-Power mode
347 * is set.
348 */
349 switch (mode) {
350 case RUN:
351 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
352 val3 &= ~0x1;
353 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
354 break;
355 case WAIT:
356 val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0;
357 val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
358 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
359 val3 &= ~0x1;
360 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
361 break;
362 case STOP:
363 val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0;
364 val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
365 val2 |= BM_SLPCR_EN_DSM;
366 val2 |= BM_SLPCR_SBYOS;
367 val2 |= BM_SLPCR_VSTBY;
368 val2 |= BM_SLPCR_BYPASS_PMIC_READY;
369 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
370 val3 |= 0x1;
371 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
372 break;
373 default:
374 return;
375 }
376 writel(val1, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
377 writel(val2, GPC_IPS_BASE_ADDR + GPC_SLPCR);
378}
379
380static __secure void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn)
381{
382 u32 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
383
384 val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE);
385 if (pdn)
386 val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE;
387
388 writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
389}
390
391static __secure void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn)
392{
393 u32 val;
394
395 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
396 if (cpu == 0) {
397 if (pdn)
398 val |= BM_LPCR_A7_AD_EN_C0_PDN |
399 BM_LPCR_A7_AD_EN_C0_PUP;
400 else
401 val &= ~(BM_LPCR_A7_AD_EN_C0_PDN |
402 BM_LPCR_A7_AD_EN_C0_PUP);
403 }
404 if (cpu == 1) {
405 if (pdn)
406 val |= BM_LPCR_A7_AD_EN_C1_PDN |
407 BM_LPCR_A7_AD_EN_C1_PUP;
408 else
409 val &= ~(BM_LPCR_A7_AD_EN_C1_PDN |
410 BM_LPCR_A7_AD_EN_C1_PUP);
411 }
412 writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
413}
414
415static __secure void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core,
416 bool mode, bool ack)
417{
418 u32 val;
419
420 if (index >= MAX_SLOT_NUMBER)
421 return;
422
423 /* set slot */
424 writel(readl(GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4) |
425 ((mode + 1) << (m_core * 2)),
426 GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4);
427
428 if (ack) {
429 /* set ack */
430 val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
431 /* clear dummy ack */
432 val &= ~(mode ? BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK :
433 BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK);
434 val |= 1 << (m_core + (mode ? 16 : 0));
435 writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
436 }
437}
438
439static __secure void imx_system_counter_resume(void)
440{
441 u32 val;
442
443 val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
444 val &= ~BM_SYS_COUNTER_CNTCR_FCR1;
445 val |= BM_SYS_COUNTER_CNTCR_FCR0;
446 writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
447}
448
449static __secure void imx_system_counter_suspend(void)
450{
451 u32 val;
452
453 val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
454 val &= ~BM_SYS_COUNTER_CNTCR_FCR0;
455 val |= BM_SYS_COUNTER_CNTCR_FCR1;
456 writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
457}
458
459static __secure void gic_resume(void)
460{
461 u32 itlinesnr, i;
462 u32 gic_dist_addr = GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET;
463
464 /* enable the GIC distributor */
465 writel(readl(gic_dist_addr + GICD_CTLR) | 0x03,
466 gic_dist_addr + GICD_CTLR);
467
468 /* TYPER[4:0] contains an encoded number of available interrupts */
469 itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f;
470
471 /* set all bits in the GIC group registers to one to allow access
472 * from non-secure state. The first 32 interrupts are private per
473 * CPU and will be set later when enabling the GIC for each core
474 */
475 for (i = 1; i <= itlinesnr; i++)
476 writel((u32)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i);
477}
478
479static inline void imx_pll_suspend(void)
480{
481 writel(BM_ANATOP_ARM_PLL_OVERRIDE,
482 ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_SET);
483 writel(BM_ANATOP_DDR_PLL_OVERRIDE,
484 ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_SET);
485 writel(BM_ANATOP_SYS_PLL_OVERRIDE,
486 ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_SET);
487 writel(BM_ANATOP_ENET_PLL_OVERRIDE,
488 ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_SET);
489 writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
490 ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_SET);
491 writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
492 ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_SET);
493}
494
495static inline void imx_pll_resume(void)
496{
497 writel(BM_ANATOP_ARM_PLL_OVERRIDE,
498 ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_CLR);
499 writel(BM_ANATOP_DDR_PLL_OVERRIDE,
500 ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_CLR);
501 writel(BM_ANATOP_SYS_PLL_OVERRIDE,
502 ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_CLR);
503 writel(BM_ANATOP_ENET_PLL_OVERRIDE,
504 ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_CLR);
505 writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
506 ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_CLR);
507 writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
508 ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_CLR);
509}
510
511static inline void imx_udelay(u32 usec)
512{
513 u32 freq;
514 u64 start, end;
515
516 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (freq));
517 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (start));
518 do {
519 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (end));
520 if ((end - start) > usec * (freq / 1000000))
521 break;
522 } while (1);
523}
524
525static inline void imx_ddrc_enter_self_refresh(void)
526{
527 writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
528 while (readl(DDRC_IPS_BASE_ADDR + DDRC_PSTAT) & 0x10001)
529 ;
530
531 writel(0x20, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
532 while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x23) != 0x23)
533 ;
534 writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x8,
535 DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
536}
537
538static inline void imx_ddrc_exit_self_refresh(void)
539{
540 writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
541 while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x3) == 0x3)
542 ;
543 writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x1,
544 DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
545}
546
547__secure void imx_system_resume(void)
548{
549 unsigned int i, val, imr[4], entry;
550
551 entry = psci_get_target_pc(0);
552 imx_ddrc_exit_self_refresh();
553 imx_system_counter_resume();
554 imx_gpcv2_set_lpm_mode(RUN);
555 imx_gpcv2_set_cpu_power_gate_by_lpm(0, false);
556 imx_gpcv2_set_plat_power_gate_by_lpm(false);
557 imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0);
558 imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU);
559
560 /*
561 * need to mask all interrupts in GPC before
562 * operating RBC configurations
563 */
564 for (i = 0; i < 4; i++) {
565 imr[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
566 writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
567 }
568
569 /* configure RBC enable bit */
570 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
571 val &= ~BM_SLPCR_RBC_EN;
572 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
573
574 /* configure RBC count */
575 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
576 val &= ~BM_SLPCR_REG_BYPASS_COUNT;
577 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
578
579 /*
580 * need to delay at least 2 cycles of CKIL(32K)
581 * due to hardware design requirement, which is
582 * ~61us, here we use 65us for safe
583 */
584 imx_udelay(65);
585
586 /* restore GPC interrupt mask settings */
587 for (i = 0; i < 4; i++)
588 writel(imr[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
589
590 /* initialize gic distributor */
591 gic_resume();
592 _nonsec_init();
593
594 /* save cpu0 entry */
595 psci_save(0, entry, 0);
596 psci_cpu_entry();
597}
598
599__secure void psci_system_suspend(u32 __always_unused function_id,
600 u32 ep, u32 context_id)
601{
602 u32 gpc_mask[4];
603 u32 i, val;
604
605 psci_save(0, ep, context_id);
606 /* overwrite PLL to be controlled by low power mode */
607 imx_pll_suspend();
608 imx_system_counter_suspend();
609 /* set CA7 platform to enter STOP mode */
610 imx_gpcv2_set_lpm_mode(STOP);
611 /* enable core0/scu power down/up with low power mode */
612 imx_gpcv2_set_cpu_power_gate_by_lpm(0, true);
613 imx_gpcv2_set_plat_power_gate_by_lpm(true);
614 /* time slot settings for core0 and scu */
615 imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false);
616 imx_gpcv2_set_slot_ack(1, SCU_A7, false, true);
617 imx_gpcv2_set_slot_ack(5, SCU_A7, true, false);
618 imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true);
619 imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0);
620 imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU);
621 psci_v7_flush_dcache_all();
622
623 imx_ddrc_enter_self_refresh();
624
625 /*
626 * e10133: ARM: Boot failure after A7 enters into
627 * low-power idle mode
628 *
629 * Workaround:
630 * If both CPU0/CPU1 are IDLE, the last IDLE CPU should
631 * disable GIC first, then REG_BYPASS_COUNTER is used
632 * to mask wakeup INT, and then execute “wfi” is used to
633 * bring the system into power down processing safely.
634 * The counter must be enabled as close to the “wfi” state
635 * as possible. The following equation can be used to
636 * determine the RBC counter value:
637 * RBC_COUNT * (1/32K RTC frequency) >=
638 * (46 + PDNSCR_SW + PDNSCR_SW2ISO ) ( 1/IPG_CLK frequency ).
639 */
640
641 /* disable GIC distributor */
642 writel(0, GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET);
643
644 for (i = 0; i < 4; i++)
645 gpc_mask[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
646
647 /*
648 * enable the RBC bypass counter here
649 * to hold off the interrupts. RBC counter
650 * = 8 (240us). With this setting, the latency
651 * from wakeup interrupt to ARM power up
652 * is ~250uS.
653 */
654 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
655 val &= ~(0x3f << 24);
656 val |= (0x8 << 24);
657 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
658
659 /* enable the counter. */
660 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
661 val |= (1 << 30);
662 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
663
664 /* unmask all the GPC interrupts. */
665 for (i = 0; i < 4; i++)
666 writel(gpc_mask[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
667
668 /*
669 * now delay for a short while (3usec)
670 * ARM is at 1GHz at this point
671 * so a short loop should be enough.
672 * this delay is required to ensure that
673 * the RBC counter can start counting in
674 * case an interrupt is already pending
675 * or in case an interrupt arrives just
676 * as ARM is about to assert DSM_request.
677 */
678 imx_udelay(3);
679
680 /* save resume entry and sp in CPU0 GPR registers */
681 asm volatile("mov %0, sp" : "=r" (val));
682 writel((u32)psci_system_resume, SRC_BASE_ADDR + SRC_GPR1_MX7D);
683 writel(val, SRC_BASE_ADDR + SRC_GPR2_MX7D);
684
685 /* sleep */
686 while (1)
687 wfi();
688}