blob: f32945ea371212335b664978ef06ae808806ed7e [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Peng Fand8c75bc2017-08-17 17:48:50 +08002/*
3 * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
4 * Copyright 2017 NXP
Peng Fand8c75bc2017-08-17 17:48:50 +08005 */
6
Simon Glass63334482019-11-14 12:57:39 -07007#include <cpu_func.h>
Simon Glass274e0b02020-05-10 11:39:56 -06008#include <asm/cache.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +08009#include <asm/io.h>
10#include <asm/psci.h>
Chen-Yu Tsai7ca14502016-06-19 12:38:41 +080011#include <asm/secure.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080012#include <asm/arch/imx-regs.h>
Anson Huang6475e5e2018-08-08 09:17:50 +080013#include <asm/armv7.h>
14#include <asm/gic.h>
Stefan Agner158a1022018-06-24 21:09:56 +020015#include <linux/bitops.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080016#include <common.h>
Anson Huangf20bc132018-01-07 14:34:31 +080017#include <fsl_wdog.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080018
Anson Huang6475e5e2018-08-08 09:17:50 +080019#define GPC_LPCR_A7_BSC 0x0
20#define GPC_LPCR_A7_AD 0x4
21#define GPC_SLPCR 0x14
22#define GPC_PGC_ACK_SEL_A7 0x24
23#define GPC_IMR1_CORE0 0x30
24#define GPC_SLOT0_CFG 0xb0
Peng Fanfcd53ce2015-10-23 10:13:04 +080025#define GPC_CPU_PGC_SW_PUP_REQ 0xf0
Anson Huang6475e5e2018-08-08 09:17:50 +080026#define GPC_CPU_PGC_SW_PDN_REQ 0xfc
27#define GPC_PGC_C0 0x800
Stefan Agner2a79d882018-06-24 21:09:57 +020028#define GPC_PGC_C0 0x800
Peng Fanfcd53ce2015-10-23 10:13:04 +080029#define GPC_PGC_C1 0x840
Anson Huang6475e5e2018-08-08 09:17:50 +080030#define GPC_PGC_SCU 0x880
31
32#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000
33#define BM_LPCR_A7_BSC_LPM1 0xc
34#define BM_LPCR_A7_BSC_LPM0 0x3
35#define BP_LPCR_A7_BSC_LPM0 0
36#define BM_SLPCR_EN_DSM 0x80000000
37#define BM_SLPCR_RBC_EN 0x40000000
38#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000
39#define BM_SLPCR_VSTBY 0x4
40#define BM_SLPCR_SBYOS 0x2
41#define BM_SLPCR_BYPASS_PMIC_READY 0x1
42#define BM_LPCR_A7_AD_L2PGE 0x10000
43#define BM_LPCR_A7_AD_EN_C1_PUP 0x800
44#define BM_LPCR_A7_AD_EN_C0_PUP 0x200
45#define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10
46#define BM_LPCR_A7_AD_EN_C1_PDN 0x8
47#define BM_LPCR_A7_AD_EN_C0_PDN 0x2
Peng Fanfcd53ce2015-10-23 10:13:04 +080048
Stefan Agner2a79d882018-06-24 21:09:57 +020049#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7 0x1
Peng Fanfcd53ce2015-10-23 10:13:04 +080050#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2
51
Anson Huang6475e5e2018-08-08 09:17:50 +080052#define BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK 0x8000
53#define BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK 0x80000000
54
55#define MAX_SLOT_NUMBER 10
56#define A7_LPM_WAIT 0x5
57#define A7_LPM_STOP 0xa
58
59#define BM_SYS_COUNTER_CNTCR_FCR1 0x200
60#define BM_SYS_COUNTER_CNTCR_FCR0 0x100
61
62#define REG_SET 0x4
63#define REG_CLR 0x8
64
65#define ANADIG_ARM_PLL 0x60
66#define ANADIG_DDR_PLL 0x70
67#define ANADIG_SYS_PLL 0xb0
68#define ANADIG_ENET_PLL 0xe0
69#define ANADIG_AUDIO_PLL 0xf0
70#define ANADIG_VIDEO_PLL 0x130
71#define BM_ANATOP_ARM_PLL_OVERRIDE BIT(20)
72#define BM_ANATOP_DDR_PLL_OVERRIDE BIT(19)
73#define BM_ANATOP_SYS_PLL_OVERRIDE (0x1ff << 17)
74#define BM_ANATOP_ENET_PLL_OVERRIDE BIT(13)
75#define BM_ANATOP_AUDIO_PLL_OVERRIDE BIT(24)
76#define BM_ANATOP_VIDEO_PLL_OVERRIDE BIT(24)
77
78#define DDRC_STAT 0x4
79#define DDRC_PWRCTL 0x30
80#define DDRC_PSTAT 0x3fc
81
Peng Fanfcd53ce2015-10-23 10:13:04 +080082#define SRC_GPR1_MX7D 0x074
Anson Huang6475e5e2018-08-08 09:17:50 +080083#define SRC_GPR2_MX7D 0x078
Peng Fanfcd53ce2015-10-23 10:13:04 +080084#define SRC_A7RCR0 0x004
85#define SRC_A7RCR1 0x008
86
87#define BP_SRC_A7RCR0_A7_CORE_RESET0 0
88#define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1
89
Anson Huangf46fb182018-01-07 14:34:32 +080090#define SNVS_LPCR 0x38
91#define BP_SNVS_LPCR_DP_EN 0x20
92#define BP_SNVS_LPCR_TOP 0x40
93
94#define CCM_CCGR_SNVS 0x4250
95
Anson Huangf20bc132018-01-07 14:34:31 +080096#define CCM_ROOT_WDOG 0xbb80
97#define CCM_CCGR_WDOG1 0x49c0
98
Stefan Agner158a1022018-06-24 21:09:56 +020099#define MPIDR_AFF0 GENMASK(7, 0)
100
101#define IMX7D_PSCI_NR_CPUS 2
102#if IMX7D_PSCI_NR_CPUS > CONFIG_ARMV7_PSCI_NR_CPUS
103#error "invalid value for CONFIG_ARMV7_PSCI_NR_CPUS"
104#endif
105
Anson Huang05e89eb2018-08-08 09:17:48 +0800106#define imx_cpu_gpr_entry_offset(cpu) \
107 (SRC_BASE_ADDR + SRC_GPR1_MX7D + cpu * 8)
108#define imx_cpu_gpr_para_offset(cpu) \
109 (imx_cpu_gpr_entry_offset(cpu) + 4)
110
111#define IMX_CPU_SYNC_OFF ~0
112#define IMX_CPU_SYNC_ON 0
113
Stefan Agner158a1022018-06-24 21:09:56 +0200114u8 psci_state[IMX7D_PSCI_NR_CPUS] __secure_data = {
115 PSCI_AFFINITY_LEVEL_ON,
116 PSCI_AFFINITY_LEVEL_OFF};
117
Anson Huang6475e5e2018-08-08 09:17:50 +0800118enum imx_gpc_slot {
119 CORE0_A7,
120 CORE1_A7,
121 SCU_A7,
122 FAST_MEGA_MIX,
123 MIPI_PHY,
124 PCIE_PHY,
125 USB_OTG1_PHY,
126 USB_OTG2_PHY,
127 USB_HSIC_PHY,
128 CORE0_M4,
129};
130
131enum mxc_cpu_pwr_mode {
132 RUN,
133 WAIT,
134 STOP,
135};
136
137extern void psci_system_resume(void);
138
Stefan Agner158a1022018-06-24 21:09:56 +0200139static inline void psci_set_state(int cpu, u8 state)
140{
141 psci_state[cpu] = state;
142 dsb();
143 isb();
144}
145
Peng Fanfcd53ce2015-10-23 10:13:04 +0800146static inline void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
147{
148 writel(enable, GPC_IPS_BASE_ADDR + offset);
149}
150
Stefan Agner2a79d882018-06-24 21:09:57 +0200151__secure void imx_gpcv2_set_core_power(int cpu, bool pdn)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800152{
153 u32 reg = pdn ? GPC_CPU_PGC_SW_PUP_REQ : GPC_CPU_PGC_SW_PDN_REQ;
Stefan Agner2a79d882018-06-24 21:09:57 +0200154 u32 pgc = cpu ? GPC_PGC_C1 : GPC_PGC_C0;
155 u32 pdn_pup_req = cpu ? BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 :
156 BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800157 u32 val;
158
Stefan Agner2a79d882018-06-24 21:09:57 +0200159 imx_gpcv2_set_m_core_pgc(true, pgc);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800160
161 val = readl(GPC_IPS_BASE_ADDR + reg);
Stefan Agner2a79d882018-06-24 21:09:57 +0200162 val |= pdn_pup_req;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800163 writel(val, GPC_IPS_BASE_ADDR + reg);
164
Stefan Agner2a79d882018-06-24 21:09:57 +0200165 while ((readl(GPC_IPS_BASE_ADDR + reg) & pdn_pup_req) != 0)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800166 ;
167
Stefan Agner2a79d882018-06-24 21:09:57 +0200168 imx_gpcv2_set_m_core_pgc(false, pgc);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800169}
170
171__secure void imx_enable_cpu_ca7(int cpu, bool enable)
172{
173 u32 mask, val;
174
175 mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1);
176 val = readl(SRC_BASE_ADDR + SRC_A7RCR1);
177 val = enable ? val | mask : val & ~mask;
178 writel(val, SRC_BASE_ADDR + SRC_A7RCR1);
179}
180
Stefan Agner158a1022018-06-24 21:09:56 +0200181__secure void psci_arch_cpu_entry(void)
182{
183 u32 cpu = psci_get_cpu_id();
184
185 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON);
186}
187
Stefan Agner4bc4f132018-06-24 21:09:55 +0200188__secure s32 psci_cpu_on(u32 __always_unused function_id, u32 mpidr, u32 ep,
189 u32 context_id)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800190{
Stefan Agner158a1022018-06-24 21:09:56 +0200191 u32 cpu = mpidr & MPIDR_AFF0;
192
193 if (mpidr & ~MPIDR_AFF0)
194 return ARM_PSCI_RET_INVAL;
195
196 if (cpu >= IMX7D_PSCI_NR_CPUS)
197 return ARM_PSCI_RET_INVAL;
198
199 if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON)
200 return ARM_PSCI_RET_ALREADY_ON;
201
202 if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON_PENDING)
203 return ARM_PSCI_RET_ON_PENDING;
Stefan Agner4bc4f132018-06-24 21:09:55 +0200204
205 psci_save(cpu, ep, context_id);
206
Anson Huang05e89eb2018-08-08 09:17:48 +0800207 writel((u32)psci_cpu_entry, imx_cpu_gpr_entry_offset(cpu));
Stefan Agner158a1022018-06-24 21:09:56 +0200208
209 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON_PENDING);
210
Stefan Agner2a79d882018-06-24 21:09:57 +0200211 imx_gpcv2_set_core_power(cpu, true);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800212 imx_enable_cpu_ca7(cpu, true);
Stefan Agner158a1022018-06-24 21:09:56 +0200213
214 return ARM_PSCI_RET_SUCCESS;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800215}
216
Stefan Agner4bc4f132018-06-24 21:09:55 +0200217__secure s32 psci_cpu_off(void)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800218{
Stefan Agner4bc4f132018-06-24 21:09:55 +0200219 int cpu;
220
Stefan Agner4bc4f132018-06-24 21:09:55 +0200221 cpu = psci_get_cpu_id();
Stefan Agner158a1022018-06-24 21:09:56 +0200222
223 psci_cpu_off_common();
224 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_OFF);
225
Peng Fanfcd53ce2015-10-23 10:13:04 +0800226 imx_enable_cpu_ca7(cpu, false);
Stefan Agner2a79d882018-06-24 21:09:57 +0200227 imx_gpcv2_set_core_power(cpu, false);
Anson Huang05e89eb2018-08-08 09:17:48 +0800228 /*
229 * We use the cpu jumping argument register to sync with
230 * psci_affinity_info() which is running on cpu0 to kill the cpu.
231 */
232 writel(IMX_CPU_SYNC_OFF, imx_cpu_gpr_para_offset(cpu));
Stefan Agner4bc4f132018-06-24 21:09:55 +0200233
234 while (1)
235 wfi();
Peng Fanfcd53ce2015-10-23 10:13:04 +0800236}
Anson Huangf20bc132018-01-07 14:34:31 +0800237
Stefan Agner4bc4f132018-06-24 21:09:55 +0200238__secure void psci_system_reset(void)
Anson Huangf20bc132018-01-07 14:34:31 +0800239{
240 struct wdog_regs *wdog = (struct wdog_regs *)WDOG1_BASE_ADDR;
241
242 /* make sure WDOG1 clock is enabled */
243 writel(0x1 << 28, CCM_BASE_ADDR + CCM_ROOT_WDOG);
244 writel(0x3, CCM_BASE_ADDR + CCM_CCGR_WDOG1);
245 writew(WCR_WDE, &wdog->wcr);
Stefan Agner4bc4f132018-06-24 21:09:55 +0200246
247 while (1)
248 wfi();
Anson Huangf20bc132018-01-07 14:34:31 +0800249}
Anson Huangf46fb182018-01-07 14:34:32 +0800250
Stefan Agner4bc4f132018-06-24 21:09:55 +0200251__secure void psci_system_off(void)
Anson Huangf46fb182018-01-07 14:34:32 +0800252{
253 u32 val;
254
255 /* make sure SNVS clock is enabled */
256 writel(0x3, CCM_BASE_ADDR + CCM_CCGR_SNVS);
257
258 val = readl(SNVS_BASE_ADDR + SNVS_LPCR);
259 val |= BP_SNVS_LPCR_DP_EN | BP_SNVS_LPCR_TOP;
260 writel(val, SNVS_BASE_ADDR + SNVS_LPCR);
Stefan Agner4bc4f132018-06-24 21:09:55 +0200261
262 while (1)
263 wfi();
Anson Huangf46fb182018-01-07 14:34:32 +0800264}
Stefan Agner158a1022018-06-24 21:09:56 +0200265
266__secure u32 psci_version(void)
267{
268 return ARM_PSCI_VER_1_0;
269}
270
271__secure s32 psci_cpu_suspend(u32 __always_unused function_id, u32 power_state,
272 u32 entry_point_address,
273 u32 context_id)
274{
275 return ARM_PSCI_RET_INVAL;
276}
277
278__secure s32 psci_affinity_info(u32 __always_unused function_id,
279 u32 target_affinity,
280 u32 lowest_affinity_level)
281{
282 u32 cpu = target_affinity & MPIDR_AFF0;
283
284 if (lowest_affinity_level > 0)
285 return ARM_PSCI_RET_INVAL;
286
287 if (target_affinity & ~MPIDR_AFF0)
288 return ARM_PSCI_RET_INVAL;
289
290 if (cpu >= IMX7D_PSCI_NR_CPUS)
291 return ARM_PSCI_RET_INVAL;
292
Anson Huang05e89eb2018-08-08 09:17:48 +0800293 /* CPU is waiting for killed */
294 if (readl(imx_cpu_gpr_para_offset(cpu)) == IMX_CPU_SYNC_OFF) {
295 imx_enable_cpu_ca7(cpu, false);
296 imx_gpcv2_set_core_power(cpu, false);
297 writel(IMX_CPU_SYNC_ON, imx_cpu_gpr_para_offset(cpu));
298 }
299
Stefan Agner158a1022018-06-24 21:09:56 +0200300 return psci_state[cpu];
301}
302
Patrick Delaunay9c59d862019-07-22 14:19:20 +0200303__secure u32 psci_migrate_info_type(void)
Stefan Agner67a7d822018-06-24 21:09:58 +0200304{
305 /* Trusted OS is either not present or does not require migration */
306 return 2;
307}
308
Stefan Agner158a1022018-06-24 21:09:56 +0200309__secure s32 psci_features(u32 __always_unused function_id, u32 psci_fid)
310{
311 switch (psci_fid) {
312 case ARM_PSCI_0_2_FN_PSCI_VERSION:
313 case ARM_PSCI_0_2_FN_CPU_OFF:
314 case ARM_PSCI_0_2_FN_CPU_ON:
315 case ARM_PSCI_0_2_FN_AFFINITY_INFO:
Stefan Agner67a7d822018-06-24 21:09:58 +0200316 case ARM_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
Stefan Agner158a1022018-06-24 21:09:56 +0200317 case ARM_PSCI_0_2_FN_SYSTEM_OFF:
318 case ARM_PSCI_0_2_FN_SYSTEM_RESET:
319 case ARM_PSCI_1_0_FN_PSCI_FEATURES:
Anson Huang6475e5e2018-08-08 09:17:50 +0800320 case ARM_PSCI_1_0_FN_SYSTEM_SUSPEND:
Stefan Agner158a1022018-06-24 21:09:56 +0200321 return 0x0;
322 }
323 return ARM_PSCI_RET_NI;
324}
Anson Huang6475e5e2018-08-08 09:17:50 +0800325
326static __secure void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode)
327{
328 u32 val1, val2, val3;
329
330 val1 = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
331 val2 = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
332
333 /* all cores' LPM settings must be same */
334 val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1);
335 val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
336
337 val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN |
338 BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY);
339 /*
340 * GPC: When improper low-power sequence is used,
341 * the SoC enters low power mode before the ARM core executes WFI.
342 *
343 * Software workaround:
344 * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
345 * by setting IOMUX_GPR1_IRQ.
346 * 2) Software should then unmask IRQ #32 in GPC before setting GPC
347 * Low-Power mode.
348 * 3) Software should mask IRQ #32 right after GPC Low-Power mode
349 * is set.
350 */
351 switch (mode) {
352 case RUN:
353 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
354 val3 &= ~0x1;
355 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
356 break;
357 case WAIT:
358 val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0;
359 val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
360 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
361 val3 &= ~0x1;
362 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
363 break;
364 case STOP:
365 val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0;
366 val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
367 val2 |= BM_SLPCR_EN_DSM;
368 val2 |= BM_SLPCR_SBYOS;
369 val2 |= BM_SLPCR_VSTBY;
370 val2 |= BM_SLPCR_BYPASS_PMIC_READY;
371 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
372 val3 |= 0x1;
373 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
374 break;
375 default:
376 return;
377 }
378 writel(val1, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
379 writel(val2, GPC_IPS_BASE_ADDR + GPC_SLPCR);
380}
381
382static __secure void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn)
383{
384 u32 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
385
386 val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE);
387 if (pdn)
388 val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE;
389
390 writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
391}
392
393static __secure void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn)
394{
395 u32 val;
396
397 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
398 if (cpu == 0) {
399 if (pdn)
400 val |= BM_LPCR_A7_AD_EN_C0_PDN |
401 BM_LPCR_A7_AD_EN_C0_PUP;
402 else
403 val &= ~(BM_LPCR_A7_AD_EN_C0_PDN |
404 BM_LPCR_A7_AD_EN_C0_PUP);
405 }
406 if (cpu == 1) {
407 if (pdn)
408 val |= BM_LPCR_A7_AD_EN_C1_PDN |
409 BM_LPCR_A7_AD_EN_C1_PUP;
410 else
411 val &= ~(BM_LPCR_A7_AD_EN_C1_PDN |
412 BM_LPCR_A7_AD_EN_C1_PUP);
413 }
414 writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
415}
416
417static __secure void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core,
418 bool mode, bool ack)
419{
420 u32 val;
421
422 if (index >= MAX_SLOT_NUMBER)
423 return;
424
425 /* set slot */
426 writel(readl(GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4) |
427 ((mode + 1) << (m_core * 2)),
428 GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4);
429
430 if (ack) {
431 /* set ack */
432 val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
433 /* clear dummy ack */
434 val &= ~(mode ? BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK :
435 BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK);
436 val |= 1 << (m_core + (mode ? 16 : 0));
437 writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
438 }
439}
440
441static __secure void imx_system_counter_resume(void)
442{
443 u32 val;
444
445 val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
446 val &= ~BM_SYS_COUNTER_CNTCR_FCR1;
447 val |= BM_SYS_COUNTER_CNTCR_FCR0;
448 writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
449}
450
451static __secure void imx_system_counter_suspend(void)
452{
453 u32 val;
454
455 val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
456 val &= ~BM_SYS_COUNTER_CNTCR_FCR0;
457 val |= BM_SYS_COUNTER_CNTCR_FCR1;
458 writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
459}
460
461static __secure void gic_resume(void)
462{
463 u32 itlinesnr, i;
464 u32 gic_dist_addr = GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET;
465
466 /* enable the GIC distributor */
467 writel(readl(gic_dist_addr + GICD_CTLR) | 0x03,
468 gic_dist_addr + GICD_CTLR);
469
470 /* TYPER[4:0] contains an encoded number of available interrupts */
471 itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f;
472
473 /* set all bits in the GIC group registers to one to allow access
474 * from non-secure state. The first 32 interrupts are private per
475 * CPU and will be set later when enabling the GIC for each core
476 */
477 for (i = 1; i <= itlinesnr; i++)
478 writel((u32)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i);
479}
480
481static inline void imx_pll_suspend(void)
482{
483 writel(BM_ANATOP_ARM_PLL_OVERRIDE,
484 ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_SET);
485 writel(BM_ANATOP_DDR_PLL_OVERRIDE,
486 ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_SET);
487 writel(BM_ANATOP_SYS_PLL_OVERRIDE,
488 ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_SET);
489 writel(BM_ANATOP_ENET_PLL_OVERRIDE,
490 ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_SET);
491 writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
492 ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_SET);
493 writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
494 ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_SET);
495}
496
497static inline void imx_pll_resume(void)
498{
499 writel(BM_ANATOP_ARM_PLL_OVERRIDE,
500 ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_CLR);
501 writel(BM_ANATOP_DDR_PLL_OVERRIDE,
502 ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_CLR);
503 writel(BM_ANATOP_SYS_PLL_OVERRIDE,
504 ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_CLR);
505 writel(BM_ANATOP_ENET_PLL_OVERRIDE,
506 ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_CLR);
507 writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
508 ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_CLR);
509 writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
510 ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_CLR);
511}
512
513static inline void imx_udelay(u32 usec)
514{
515 u32 freq;
516 u64 start, end;
517
518 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (freq));
519 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (start));
520 do {
521 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (end));
522 if ((end - start) > usec * (freq / 1000000))
523 break;
524 } while (1);
525}
526
527static inline void imx_ddrc_enter_self_refresh(void)
528{
529 writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
530 while (readl(DDRC_IPS_BASE_ADDR + DDRC_PSTAT) & 0x10001)
531 ;
532
533 writel(0x20, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
534 while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x23) != 0x23)
535 ;
536 writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x8,
537 DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
538}
539
540static inline void imx_ddrc_exit_self_refresh(void)
541{
542 writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
543 while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x3) == 0x3)
544 ;
545 writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x1,
546 DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
547}
548
549__secure void imx_system_resume(void)
550{
551 unsigned int i, val, imr[4], entry;
552
553 entry = psci_get_target_pc(0);
554 imx_ddrc_exit_self_refresh();
555 imx_system_counter_resume();
556 imx_gpcv2_set_lpm_mode(RUN);
557 imx_gpcv2_set_cpu_power_gate_by_lpm(0, false);
558 imx_gpcv2_set_plat_power_gate_by_lpm(false);
559 imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0);
560 imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU);
561
562 /*
563 * need to mask all interrupts in GPC before
564 * operating RBC configurations
565 */
566 for (i = 0; i < 4; i++) {
567 imr[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
568 writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
569 }
570
571 /* configure RBC enable bit */
572 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
573 val &= ~BM_SLPCR_RBC_EN;
574 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
575
576 /* configure RBC count */
577 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
578 val &= ~BM_SLPCR_REG_BYPASS_COUNT;
579 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
580
581 /*
582 * need to delay at least 2 cycles of CKIL(32K)
583 * due to hardware design requirement, which is
584 * ~61us, here we use 65us for safe
585 */
586 imx_udelay(65);
587
588 /* restore GPC interrupt mask settings */
589 for (i = 0; i < 4; i++)
590 writel(imr[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
591
592 /* initialize gic distributor */
593 gic_resume();
594 _nonsec_init();
595
596 /* save cpu0 entry */
597 psci_save(0, entry, 0);
598 psci_cpu_entry();
599}
600
601__secure void psci_system_suspend(u32 __always_unused function_id,
602 u32 ep, u32 context_id)
603{
604 u32 gpc_mask[4];
605 u32 i, val;
606
607 psci_save(0, ep, context_id);
608 /* overwrite PLL to be controlled by low power mode */
609 imx_pll_suspend();
610 imx_system_counter_suspend();
611 /* set CA7 platform to enter STOP mode */
612 imx_gpcv2_set_lpm_mode(STOP);
613 /* enable core0/scu power down/up with low power mode */
614 imx_gpcv2_set_cpu_power_gate_by_lpm(0, true);
615 imx_gpcv2_set_plat_power_gate_by_lpm(true);
616 /* time slot settings for core0 and scu */
617 imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false);
618 imx_gpcv2_set_slot_ack(1, SCU_A7, false, true);
619 imx_gpcv2_set_slot_ack(5, SCU_A7, true, false);
620 imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true);
621 imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0);
622 imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU);
623 psci_v7_flush_dcache_all();
624
625 imx_ddrc_enter_self_refresh();
626
627 /*
628 * e10133: ARM: Boot failure after A7 enters into
629 * low-power idle mode
630 *
631 * Workaround:
632 * If both CPU0/CPU1 are IDLE, the last IDLE CPU should
633 * disable GIC first, then REG_BYPASS_COUNTER is used
634 * to mask wakeup INT, and then execute “wfi” is used to
635 * bring the system into power down processing safely.
636 * The counter must be enabled as close to the “wfi” state
637 * as possible. The following equation can be used to
638 * determine the RBC counter value:
639 * RBC_COUNT * (1/32K RTC frequency) >=
640 * (46 + PDNSCR_SW + PDNSCR_SW2ISO ) ( 1/IPG_CLK frequency ).
641 */
642
643 /* disable GIC distributor */
644 writel(0, GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET);
645
646 for (i = 0; i < 4; i++)
647 gpc_mask[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
648
649 /*
650 * enable the RBC bypass counter here
651 * to hold off the interrupts. RBC counter
652 * = 8 (240us). With this setting, the latency
653 * from wakeup interrupt to ARM power up
654 * is ~250uS.
655 */
656 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
657 val &= ~(0x3f << 24);
658 val |= (0x8 << 24);
659 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
660
661 /* enable the counter. */
662 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
663 val |= (1 << 30);
664 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
665
666 /* unmask all the GPC interrupts. */
667 for (i = 0; i < 4; i++)
668 writel(gpc_mask[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
669
670 /*
671 * now delay for a short while (3usec)
672 * ARM is at 1GHz at this point
673 * so a short loop should be enough.
674 * this delay is required to ensure that
675 * the RBC counter can start counting in
676 * case an interrupt is already pending
677 * or in case an interrupt arrives just
678 * as ARM is about to assert DSM_request.
679 */
680 imx_udelay(3);
681
682 /* save resume entry and sp in CPU0 GPR registers */
683 asm volatile("mov %0, sp" : "=r" (val));
684 writel((u32)psci_system_resume, SRC_BASE_ADDR + SRC_GPR1_MX7D);
685 writel(val, SRC_BASE_ADDR + SRC_GPR2_MX7D);
686
687 /* sleep */
688 while (1)
689 wfi();
690}