blob: c8f6ca235b9503928af7a3a67710b76e36caa69a [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Peng Fand8c75bc2017-08-17 17:48:50 +08002/*
3 * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
4 * Copyright 2017 NXP
Peng Fand8c75bc2017-08-17 17:48:50 +08005 */
6
Simon Glass63334482019-11-14 12:57:39 -07007#include <cpu_func.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +08008#include <asm/io.h>
9#include <asm/psci.h>
Chen-Yu Tsai7ca14502016-06-19 12:38:41 +080010#include <asm/secure.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080011#include <asm/arch/imx-regs.h>
Anson Huang6475e5e2018-08-08 09:17:50 +080012#include <asm/armv7.h>
13#include <asm/gic.h>
Stefan Agner158a1022018-06-24 21:09:56 +020014#include <linux/bitops.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080015#include <common.h>
Anson Huangf20bc132018-01-07 14:34:31 +080016#include <fsl_wdog.h>
Peng Fanfcd53ce2015-10-23 10:13:04 +080017
Anson Huang6475e5e2018-08-08 09:17:50 +080018#define GPC_LPCR_A7_BSC 0x0
19#define GPC_LPCR_A7_AD 0x4
20#define GPC_SLPCR 0x14
21#define GPC_PGC_ACK_SEL_A7 0x24
22#define GPC_IMR1_CORE0 0x30
23#define GPC_SLOT0_CFG 0xb0
Peng Fanfcd53ce2015-10-23 10:13:04 +080024#define GPC_CPU_PGC_SW_PUP_REQ 0xf0
Anson Huang6475e5e2018-08-08 09:17:50 +080025#define GPC_CPU_PGC_SW_PDN_REQ 0xfc
26#define GPC_PGC_C0 0x800
Stefan Agner2a79d882018-06-24 21:09:57 +020027#define GPC_PGC_C0 0x800
Peng Fanfcd53ce2015-10-23 10:13:04 +080028#define GPC_PGC_C1 0x840
Anson Huang6475e5e2018-08-08 09:17:50 +080029#define GPC_PGC_SCU 0x880
30
31#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000
32#define BM_LPCR_A7_BSC_LPM1 0xc
33#define BM_LPCR_A7_BSC_LPM0 0x3
34#define BP_LPCR_A7_BSC_LPM0 0
35#define BM_SLPCR_EN_DSM 0x80000000
36#define BM_SLPCR_RBC_EN 0x40000000
37#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000
38#define BM_SLPCR_VSTBY 0x4
39#define BM_SLPCR_SBYOS 0x2
40#define BM_SLPCR_BYPASS_PMIC_READY 0x1
41#define BM_LPCR_A7_AD_L2PGE 0x10000
42#define BM_LPCR_A7_AD_EN_C1_PUP 0x800
43#define BM_LPCR_A7_AD_EN_C0_PUP 0x200
44#define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10
45#define BM_LPCR_A7_AD_EN_C1_PDN 0x8
46#define BM_LPCR_A7_AD_EN_C0_PDN 0x2
Peng Fanfcd53ce2015-10-23 10:13:04 +080047
Stefan Agner2a79d882018-06-24 21:09:57 +020048#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7 0x1
Peng Fanfcd53ce2015-10-23 10:13:04 +080049#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2
50
Anson Huang6475e5e2018-08-08 09:17:50 +080051#define BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK 0x8000
52#define BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK 0x80000000
53
54#define MAX_SLOT_NUMBER 10
55#define A7_LPM_WAIT 0x5
56#define A7_LPM_STOP 0xa
57
58#define BM_SYS_COUNTER_CNTCR_FCR1 0x200
59#define BM_SYS_COUNTER_CNTCR_FCR0 0x100
60
61#define REG_SET 0x4
62#define REG_CLR 0x8
63
64#define ANADIG_ARM_PLL 0x60
65#define ANADIG_DDR_PLL 0x70
66#define ANADIG_SYS_PLL 0xb0
67#define ANADIG_ENET_PLL 0xe0
68#define ANADIG_AUDIO_PLL 0xf0
69#define ANADIG_VIDEO_PLL 0x130
70#define BM_ANATOP_ARM_PLL_OVERRIDE BIT(20)
71#define BM_ANATOP_DDR_PLL_OVERRIDE BIT(19)
72#define BM_ANATOP_SYS_PLL_OVERRIDE (0x1ff << 17)
73#define BM_ANATOP_ENET_PLL_OVERRIDE BIT(13)
74#define BM_ANATOP_AUDIO_PLL_OVERRIDE BIT(24)
75#define BM_ANATOP_VIDEO_PLL_OVERRIDE BIT(24)
76
77#define DDRC_STAT 0x4
78#define DDRC_PWRCTL 0x30
79#define DDRC_PSTAT 0x3fc
80
Peng Fanfcd53ce2015-10-23 10:13:04 +080081#define SRC_GPR1_MX7D 0x074
Anson Huang6475e5e2018-08-08 09:17:50 +080082#define SRC_GPR2_MX7D 0x078
Peng Fanfcd53ce2015-10-23 10:13:04 +080083#define SRC_A7RCR0 0x004
84#define SRC_A7RCR1 0x008
85
86#define BP_SRC_A7RCR0_A7_CORE_RESET0 0
87#define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1
88
Anson Huangf46fb182018-01-07 14:34:32 +080089#define SNVS_LPCR 0x38
90#define BP_SNVS_LPCR_DP_EN 0x20
91#define BP_SNVS_LPCR_TOP 0x40
92
93#define CCM_CCGR_SNVS 0x4250
94
Anson Huangf20bc132018-01-07 14:34:31 +080095#define CCM_ROOT_WDOG 0xbb80
96#define CCM_CCGR_WDOG1 0x49c0
97
Stefan Agner158a1022018-06-24 21:09:56 +020098#define MPIDR_AFF0 GENMASK(7, 0)
99
100#define IMX7D_PSCI_NR_CPUS 2
101#if IMX7D_PSCI_NR_CPUS > CONFIG_ARMV7_PSCI_NR_CPUS
102#error "invalid value for CONFIG_ARMV7_PSCI_NR_CPUS"
103#endif
104
Anson Huang05e89eb2018-08-08 09:17:48 +0800105#define imx_cpu_gpr_entry_offset(cpu) \
106 (SRC_BASE_ADDR + SRC_GPR1_MX7D + cpu * 8)
107#define imx_cpu_gpr_para_offset(cpu) \
108 (imx_cpu_gpr_entry_offset(cpu) + 4)
109
110#define IMX_CPU_SYNC_OFF ~0
111#define IMX_CPU_SYNC_ON 0
112
Stefan Agner158a1022018-06-24 21:09:56 +0200113u8 psci_state[IMX7D_PSCI_NR_CPUS] __secure_data = {
114 PSCI_AFFINITY_LEVEL_ON,
115 PSCI_AFFINITY_LEVEL_OFF};
116
Anson Huang6475e5e2018-08-08 09:17:50 +0800117enum imx_gpc_slot {
118 CORE0_A7,
119 CORE1_A7,
120 SCU_A7,
121 FAST_MEGA_MIX,
122 MIPI_PHY,
123 PCIE_PHY,
124 USB_OTG1_PHY,
125 USB_OTG2_PHY,
126 USB_HSIC_PHY,
127 CORE0_M4,
128};
129
130enum mxc_cpu_pwr_mode {
131 RUN,
132 WAIT,
133 STOP,
134};
135
136extern void psci_system_resume(void);
137
Stefan Agner158a1022018-06-24 21:09:56 +0200138static inline void psci_set_state(int cpu, u8 state)
139{
140 psci_state[cpu] = state;
141 dsb();
142 isb();
143}
144
Peng Fanfcd53ce2015-10-23 10:13:04 +0800145static inline void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
146{
147 writel(enable, GPC_IPS_BASE_ADDR + offset);
148}
149
Stefan Agner2a79d882018-06-24 21:09:57 +0200150__secure void imx_gpcv2_set_core_power(int cpu, bool pdn)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800151{
152 u32 reg = pdn ? GPC_CPU_PGC_SW_PUP_REQ : GPC_CPU_PGC_SW_PDN_REQ;
Stefan Agner2a79d882018-06-24 21:09:57 +0200153 u32 pgc = cpu ? GPC_PGC_C1 : GPC_PGC_C0;
154 u32 pdn_pup_req = cpu ? BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 :
155 BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800156 u32 val;
157
Stefan Agner2a79d882018-06-24 21:09:57 +0200158 imx_gpcv2_set_m_core_pgc(true, pgc);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800159
160 val = readl(GPC_IPS_BASE_ADDR + reg);
Stefan Agner2a79d882018-06-24 21:09:57 +0200161 val |= pdn_pup_req;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800162 writel(val, GPC_IPS_BASE_ADDR + reg);
163
Stefan Agner2a79d882018-06-24 21:09:57 +0200164 while ((readl(GPC_IPS_BASE_ADDR + reg) & pdn_pup_req) != 0)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800165 ;
166
Stefan Agner2a79d882018-06-24 21:09:57 +0200167 imx_gpcv2_set_m_core_pgc(false, pgc);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800168}
169
170__secure void imx_enable_cpu_ca7(int cpu, bool enable)
171{
172 u32 mask, val;
173
174 mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1);
175 val = readl(SRC_BASE_ADDR + SRC_A7RCR1);
176 val = enable ? val | mask : val & ~mask;
177 writel(val, SRC_BASE_ADDR + SRC_A7RCR1);
178}
179
Stefan Agner158a1022018-06-24 21:09:56 +0200180__secure void psci_arch_cpu_entry(void)
181{
182 u32 cpu = psci_get_cpu_id();
183
184 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON);
185}
186
Stefan Agner4bc4f132018-06-24 21:09:55 +0200187__secure s32 psci_cpu_on(u32 __always_unused function_id, u32 mpidr, u32 ep,
188 u32 context_id)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800189{
Stefan Agner158a1022018-06-24 21:09:56 +0200190 u32 cpu = mpidr & MPIDR_AFF0;
191
192 if (mpidr & ~MPIDR_AFF0)
193 return ARM_PSCI_RET_INVAL;
194
195 if (cpu >= IMX7D_PSCI_NR_CPUS)
196 return ARM_PSCI_RET_INVAL;
197
198 if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON)
199 return ARM_PSCI_RET_ALREADY_ON;
200
201 if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON_PENDING)
202 return ARM_PSCI_RET_ON_PENDING;
Stefan Agner4bc4f132018-06-24 21:09:55 +0200203
204 psci_save(cpu, ep, context_id);
205
Anson Huang05e89eb2018-08-08 09:17:48 +0800206 writel((u32)psci_cpu_entry, imx_cpu_gpr_entry_offset(cpu));
Stefan Agner158a1022018-06-24 21:09:56 +0200207
208 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON_PENDING);
209
Stefan Agner2a79d882018-06-24 21:09:57 +0200210 imx_gpcv2_set_core_power(cpu, true);
Peng Fanfcd53ce2015-10-23 10:13:04 +0800211 imx_enable_cpu_ca7(cpu, true);
Stefan Agner158a1022018-06-24 21:09:56 +0200212
213 return ARM_PSCI_RET_SUCCESS;
Peng Fanfcd53ce2015-10-23 10:13:04 +0800214}
215
Stefan Agner4bc4f132018-06-24 21:09:55 +0200216__secure s32 psci_cpu_off(void)
Peng Fanfcd53ce2015-10-23 10:13:04 +0800217{
Stefan Agner4bc4f132018-06-24 21:09:55 +0200218 int cpu;
219
Stefan Agner4bc4f132018-06-24 21:09:55 +0200220 cpu = psci_get_cpu_id();
Stefan Agner158a1022018-06-24 21:09:56 +0200221
222 psci_cpu_off_common();
223 psci_set_state(cpu, PSCI_AFFINITY_LEVEL_OFF);
224
Peng Fanfcd53ce2015-10-23 10:13:04 +0800225 imx_enable_cpu_ca7(cpu, false);
Stefan Agner2a79d882018-06-24 21:09:57 +0200226 imx_gpcv2_set_core_power(cpu, false);
Anson Huang05e89eb2018-08-08 09:17:48 +0800227 /*
228 * We use the cpu jumping argument register to sync with
229 * psci_affinity_info() which is running on cpu0 to kill the cpu.
230 */
231 writel(IMX_CPU_SYNC_OFF, imx_cpu_gpr_para_offset(cpu));
Stefan Agner4bc4f132018-06-24 21:09:55 +0200232
233 while (1)
234 wfi();
Peng Fanfcd53ce2015-10-23 10:13:04 +0800235}
Anson Huangf20bc132018-01-07 14:34:31 +0800236
Stefan Agner4bc4f132018-06-24 21:09:55 +0200237__secure void psci_system_reset(void)
Anson Huangf20bc132018-01-07 14:34:31 +0800238{
239 struct wdog_regs *wdog = (struct wdog_regs *)WDOG1_BASE_ADDR;
240
241 /* make sure WDOG1 clock is enabled */
242 writel(0x1 << 28, CCM_BASE_ADDR + CCM_ROOT_WDOG);
243 writel(0x3, CCM_BASE_ADDR + CCM_CCGR_WDOG1);
244 writew(WCR_WDE, &wdog->wcr);
Stefan Agner4bc4f132018-06-24 21:09:55 +0200245
246 while (1)
247 wfi();
Anson Huangf20bc132018-01-07 14:34:31 +0800248}
Anson Huangf46fb182018-01-07 14:34:32 +0800249
Stefan Agner4bc4f132018-06-24 21:09:55 +0200250__secure void psci_system_off(void)
Anson Huangf46fb182018-01-07 14:34:32 +0800251{
252 u32 val;
253
254 /* make sure SNVS clock is enabled */
255 writel(0x3, CCM_BASE_ADDR + CCM_CCGR_SNVS);
256
257 val = readl(SNVS_BASE_ADDR + SNVS_LPCR);
258 val |= BP_SNVS_LPCR_DP_EN | BP_SNVS_LPCR_TOP;
259 writel(val, SNVS_BASE_ADDR + SNVS_LPCR);
Stefan Agner4bc4f132018-06-24 21:09:55 +0200260
261 while (1)
262 wfi();
Anson Huangf46fb182018-01-07 14:34:32 +0800263}
Stefan Agner158a1022018-06-24 21:09:56 +0200264
265__secure u32 psci_version(void)
266{
267 return ARM_PSCI_VER_1_0;
268}
269
270__secure s32 psci_cpu_suspend(u32 __always_unused function_id, u32 power_state,
271 u32 entry_point_address,
272 u32 context_id)
273{
274 return ARM_PSCI_RET_INVAL;
275}
276
277__secure s32 psci_affinity_info(u32 __always_unused function_id,
278 u32 target_affinity,
279 u32 lowest_affinity_level)
280{
281 u32 cpu = target_affinity & MPIDR_AFF0;
282
283 if (lowest_affinity_level > 0)
284 return ARM_PSCI_RET_INVAL;
285
286 if (target_affinity & ~MPIDR_AFF0)
287 return ARM_PSCI_RET_INVAL;
288
289 if (cpu >= IMX7D_PSCI_NR_CPUS)
290 return ARM_PSCI_RET_INVAL;
291
Anson Huang05e89eb2018-08-08 09:17:48 +0800292 /* CPU is waiting for killed */
293 if (readl(imx_cpu_gpr_para_offset(cpu)) == IMX_CPU_SYNC_OFF) {
294 imx_enable_cpu_ca7(cpu, false);
295 imx_gpcv2_set_core_power(cpu, false);
296 writel(IMX_CPU_SYNC_ON, imx_cpu_gpr_para_offset(cpu));
297 }
298
Stefan Agner158a1022018-06-24 21:09:56 +0200299 return psci_state[cpu];
300}
301
Patrick Delaunay9c59d862019-07-22 14:19:20 +0200302__secure u32 psci_migrate_info_type(void)
Stefan Agner67a7d822018-06-24 21:09:58 +0200303{
304 /* Trusted OS is either not present or does not require migration */
305 return 2;
306}
307
Stefan Agner158a1022018-06-24 21:09:56 +0200308__secure s32 psci_features(u32 __always_unused function_id, u32 psci_fid)
309{
310 switch (psci_fid) {
311 case ARM_PSCI_0_2_FN_PSCI_VERSION:
312 case ARM_PSCI_0_2_FN_CPU_OFF:
313 case ARM_PSCI_0_2_FN_CPU_ON:
314 case ARM_PSCI_0_2_FN_AFFINITY_INFO:
Stefan Agner67a7d822018-06-24 21:09:58 +0200315 case ARM_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
Stefan Agner158a1022018-06-24 21:09:56 +0200316 case ARM_PSCI_0_2_FN_SYSTEM_OFF:
317 case ARM_PSCI_0_2_FN_SYSTEM_RESET:
318 case ARM_PSCI_1_0_FN_PSCI_FEATURES:
Anson Huang6475e5e2018-08-08 09:17:50 +0800319 case ARM_PSCI_1_0_FN_SYSTEM_SUSPEND:
Stefan Agner158a1022018-06-24 21:09:56 +0200320 return 0x0;
321 }
322 return ARM_PSCI_RET_NI;
323}
Anson Huang6475e5e2018-08-08 09:17:50 +0800324
325static __secure void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode)
326{
327 u32 val1, val2, val3;
328
329 val1 = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
330 val2 = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
331
332 /* all cores' LPM settings must be same */
333 val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1);
334 val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
335
336 val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN |
337 BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY);
338 /*
339 * GPC: When improper low-power sequence is used,
340 * the SoC enters low power mode before the ARM core executes WFI.
341 *
342 * Software workaround:
343 * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
344 * by setting IOMUX_GPR1_IRQ.
345 * 2) Software should then unmask IRQ #32 in GPC before setting GPC
346 * Low-Power mode.
347 * 3) Software should mask IRQ #32 right after GPC Low-Power mode
348 * is set.
349 */
350 switch (mode) {
351 case RUN:
352 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
353 val3 &= ~0x1;
354 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
355 break;
356 case WAIT:
357 val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0;
358 val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
359 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
360 val3 &= ~0x1;
361 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
362 break;
363 case STOP:
364 val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0;
365 val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
366 val2 |= BM_SLPCR_EN_DSM;
367 val2 |= BM_SLPCR_SBYOS;
368 val2 |= BM_SLPCR_VSTBY;
369 val2 |= BM_SLPCR_BYPASS_PMIC_READY;
370 val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
371 val3 |= 0x1;
372 writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
373 break;
374 default:
375 return;
376 }
377 writel(val1, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
378 writel(val2, GPC_IPS_BASE_ADDR + GPC_SLPCR);
379}
380
381static __secure void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn)
382{
383 u32 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
384
385 val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE);
386 if (pdn)
387 val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE;
388
389 writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
390}
391
392static __secure void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn)
393{
394 u32 val;
395
396 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
397 if (cpu == 0) {
398 if (pdn)
399 val |= BM_LPCR_A7_AD_EN_C0_PDN |
400 BM_LPCR_A7_AD_EN_C0_PUP;
401 else
402 val &= ~(BM_LPCR_A7_AD_EN_C0_PDN |
403 BM_LPCR_A7_AD_EN_C0_PUP);
404 }
405 if (cpu == 1) {
406 if (pdn)
407 val |= BM_LPCR_A7_AD_EN_C1_PDN |
408 BM_LPCR_A7_AD_EN_C1_PUP;
409 else
410 val &= ~(BM_LPCR_A7_AD_EN_C1_PDN |
411 BM_LPCR_A7_AD_EN_C1_PUP);
412 }
413 writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
414}
415
416static __secure void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core,
417 bool mode, bool ack)
418{
419 u32 val;
420
421 if (index >= MAX_SLOT_NUMBER)
422 return;
423
424 /* set slot */
425 writel(readl(GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4) |
426 ((mode + 1) << (m_core * 2)),
427 GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4);
428
429 if (ack) {
430 /* set ack */
431 val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
432 /* clear dummy ack */
433 val &= ~(mode ? BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK :
434 BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK);
435 val |= 1 << (m_core + (mode ? 16 : 0));
436 writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
437 }
438}
439
440static __secure void imx_system_counter_resume(void)
441{
442 u32 val;
443
444 val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
445 val &= ~BM_SYS_COUNTER_CNTCR_FCR1;
446 val |= BM_SYS_COUNTER_CNTCR_FCR0;
447 writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
448}
449
450static __secure void imx_system_counter_suspend(void)
451{
452 u32 val;
453
454 val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
455 val &= ~BM_SYS_COUNTER_CNTCR_FCR0;
456 val |= BM_SYS_COUNTER_CNTCR_FCR1;
457 writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
458}
459
460static __secure void gic_resume(void)
461{
462 u32 itlinesnr, i;
463 u32 gic_dist_addr = GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET;
464
465 /* enable the GIC distributor */
466 writel(readl(gic_dist_addr + GICD_CTLR) | 0x03,
467 gic_dist_addr + GICD_CTLR);
468
469 /* TYPER[4:0] contains an encoded number of available interrupts */
470 itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f;
471
472 /* set all bits in the GIC group registers to one to allow access
473 * from non-secure state. The first 32 interrupts are private per
474 * CPU and will be set later when enabling the GIC for each core
475 */
476 for (i = 1; i <= itlinesnr; i++)
477 writel((u32)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i);
478}
479
480static inline void imx_pll_suspend(void)
481{
482 writel(BM_ANATOP_ARM_PLL_OVERRIDE,
483 ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_SET);
484 writel(BM_ANATOP_DDR_PLL_OVERRIDE,
485 ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_SET);
486 writel(BM_ANATOP_SYS_PLL_OVERRIDE,
487 ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_SET);
488 writel(BM_ANATOP_ENET_PLL_OVERRIDE,
489 ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_SET);
490 writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
491 ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_SET);
492 writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
493 ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_SET);
494}
495
496static inline void imx_pll_resume(void)
497{
498 writel(BM_ANATOP_ARM_PLL_OVERRIDE,
499 ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_CLR);
500 writel(BM_ANATOP_DDR_PLL_OVERRIDE,
501 ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_CLR);
502 writel(BM_ANATOP_SYS_PLL_OVERRIDE,
503 ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_CLR);
504 writel(BM_ANATOP_ENET_PLL_OVERRIDE,
505 ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_CLR);
506 writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
507 ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_CLR);
508 writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
509 ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_CLR);
510}
511
512static inline void imx_udelay(u32 usec)
513{
514 u32 freq;
515 u64 start, end;
516
517 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (freq));
518 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (start));
519 do {
520 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (end));
521 if ((end - start) > usec * (freq / 1000000))
522 break;
523 } while (1);
524}
525
526static inline void imx_ddrc_enter_self_refresh(void)
527{
528 writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
529 while (readl(DDRC_IPS_BASE_ADDR + DDRC_PSTAT) & 0x10001)
530 ;
531
532 writel(0x20, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
533 while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x23) != 0x23)
534 ;
535 writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x8,
536 DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
537}
538
539static inline void imx_ddrc_exit_self_refresh(void)
540{
541 writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
542 while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x3) == 0x3)
543 ;
544 writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x1,
545 DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
546}
547
548__secure void imx_system_resume(void)
549{
550 unsigned int i, val, imr[4], entry;
551
552 entry = psci_get_target_pc(0);
553 imx_ddrc_exit_self_refresh();
554 imx_system_counter_resume();
555 imx_gpcv2_set_lpm_mode(RUN);
556 imx_gpcv2_set_cpu_power_gate_by_lpm(0, false);
557 imx_gpcv2_set_plat_power_gate_by_lpm(false);
558 imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0);
559 imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU);
560
561 /*
562 * need to mask all interrupts in GPC before
563 * operating RBC configurations
564 */
565 for (i = 0; i < 4; i++) {
566 imr[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
567 writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
568 }
569
570 /* configure RBC enable bit */
571 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
572 val &= ~BM_SLPCR_RBC_EN;
573 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
574
575 /* configure RBC count */
576 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
577 val &= ~BM_SLPCR_REG_BYPASS_COUNT;
578 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
579
580 /*
581 * need to delay at least 2 cycles of CKIL(32K)
582 * due to hardware design requirement, which is
583 * ~61us, here we use 65us for safe
584 */
585 imx_udelay(65);
586
587 /* restore GPC interrupt mask settings */
588 for (i = 0; i < 4; i++)
589 writel(imr[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
590
591 /* initialize gic distributor */
592 gic_resume();
593 _nonsec_init();
594
595 /* save cpu0 entry */
596 psci_save(0, entry, 0);
597 psci_cpu_entry();
598}
599
600__secure void psci_system_suspend(u32 __always_unused function_id,
601 u32 ep, u32 context_id)
602{
603 u32 gpc_mask[4];
604 u32 i, val;
605
606 psci_save(0, ep, context_id);
607 /* overwrite PLL to be controlled by low power mode */
608 imx_pll_suspend();
609 imx_system_counter_suspend();
610 /* set CA7 platform to enter STOP mode */
611 imx_gpcv2_set_lpm_mode(STOP);
612 /* enable core0/scu power down/up with low power mode */
613 imx_gpcv2_set_cpu_power_gate_by_lpm(0, true);
614 imx_gpcv2_set_plat_power_gate_by_lpm(true);
615 /* time slot settings for core0 and scu */
616 imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false);
617 imx_gpcv2_set_slot_ack(1, SCU_A7, false, true);
618 imx_gpcv2_set_slot_ack(5, SCU_A7, true, false);
619 imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true);
620 imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0);
621 imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU);
622 psci_v7_flush_dcache_all();
623
624 imx_ddrc_enter_self_refresh();
625
626 /*
627 * e10133: ARM: Boot failure after A7 enters into
628 * low-power idle mode
629 *
630 * Workaround:
631 * If both CPU0/CPU1 are IDLE, the last IDLE CPU should
632 * disable GIC first, then REG_BYPASS_COUNTER is used
633 * to mask wakeup INT, and then execute “wfi” is used to
634 * bring the system into power down processing safely.
635 * The counter must be enabled as close to the “wfi” state
636 * as possible. The following equation can be used to
637 * determine the RBC counter value:
638 * RBC_COUNT * (1/32K RTC frequency) >=
639 * (46 + PDNSCR_SW + PDNSCR_SW2ISO ) ( 1/IPG_CLK frequency ).
640 */
641
642 /* disable GIC distributor */
643 writel(0, GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET);
644
645 for (i = 0; i < 4; i++)
646 gpc_mask[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
647
648 /*
649 * enable the RBC bypass counter here
650 * to hold off the interrupts. RBC counter
651 * = 8 (240us). With this setting, the latency
652 * from wakeup interrupt to ARM power up
653 * is ~250uS.
654 */
655 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
656 val &= ~(0x3f << 24);
657 val |= (0x8 << 24);
658 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
659
660 /* enable the counter. */
661 val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
662 val |= (1 << 30);
663 writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
664
665 /* unmask all the GPC interrupts. */
666 for (i = 0; i < 4; i++)
667 writel(gpc_mask[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
668
669 /*
670 * now delay for a short while (3usec)
671 * ARM is at 1GHz at this point
672 * so a short loop should be enough.
673 * this delay is required to ensure that
674 * the RBC counter can start counting in
675 * case an interrupt is already pending
676 * or in case an interrupt arrives just
677 * as ARM is about to assert DSM_request.
678 */
679 imx_udelay(3);
680
681 /* save resume entry and sp in CPU0 GPR registers */
682 asm volatile("mov %0, sp" : "=r" (val));
683 writel((u32)psci_system_resume, SRC_BASE_ADDR + SRC_GPR1_MX7D);
684 writel(val, SRC_BASE_ADDR + SRC_GPR2_MX7D);
685
686 /* sleep */
687 while (1)
688 wfi();
689}