blob: 5cebc92228eb849772fc81b89a41f733aa43996b [file] [log] [blame]
Konstantin Porotchkine7be6e22018-10-08 16:53:09 +03001/*
2 * Copyright (C) 2018 Marvell International Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 * https://spdx.org/licenses
6 */
7
8#include <a3700_pm.h>
9#include <arch_helpers.h>
10#include <armada_common.h>
11#include <debug.h>
12#include <dram_win.h>
13#include <io_addr_dec.h>
14#include <mmio.h>
15#include <mvebu.h>
16#include <mvebu_def.h>
17#include <marvell_plat_priv.h>
18#include <platform.h>
19#include <plat_marvell.h>
20#include <psci.h>
21#ifdef USE_CCI
22#include <cci.h>
23#endif
24
25/* Warm reset register */
26#define MVEBU_WARM_RESET_REG (MVEBU_NB_REGS_BASE + 0x840)
27#define MVEBU_WARM_RESET_MAGIC 0x1D1E
28
29/* North Bridge GPIO1 SEL register */
30#define MVEBU_NB_GPIO1_SEL_REG (MVEBU_NB_REGS_BASE + 0x830)
31 #define MVEBU_NB_GPIO1_UART1_SEL BIT(19)
32 #define MVEBU_NB_GPIO1_GPIO_25_26_EN BIT(17)
33 #define MVEBU_NB_GPIO1_GPIO_19_EN BIT(14)
34 #define MVEBU_NB_GPIO1_GPIO_18_EN BIT(13)
35
36/* CPU 1 reset register */
37#define MVEBU_CPU_1_RESET_VECTOR (MVEBU_REGS_BASE + 0x14044)
38#define MVEBU_CPU_1_RESET_REG (MVEBU_REGS_BASE + 0xD00C)
39#define MVEBU_CPU_1_RESET_BIT 31
40
41/* IRQ register */
42#define MVEBU_NB_IRQ_STATUS_1_REG (MVEBU_NB_SB_IRQ_REG_BASE)
43#define MVEBU_NB_IRQ_STATUS_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
44 0x10)
45#define MVEBU_NB_IRQ_MASK_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
46 0x18)
47#define MVEBU_SB_IRQ_STATUS_1_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
48 0x40)
49#define MVEBU_SB_IRQ_STATUS_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
50 0x50)
51#define MVEBU_NB_GPIO_IRQ_MASK_1_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
52 0xC8)
53#define MVEBU_NB_GPIO_IRQ_MASK_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
54 0xD8)
55#define MVEBU_SB_GPIO_IRQ_MASK_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
56 0xE8)
57#define MVEBU_NB_GPIO_IRQ_EN_LOW_REG (MVEBU_NB_GPIO_IRQ_REG_BASE)
58#define MVEBU_NB_GPIO_IRQ_EN_HIGH_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
59 0x04)
60#define MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
61 0x10)
62#define MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
63 0x14)
64#define MVEBU_NB_GPIO_IRQ_WK_LOW_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
65 0x18)
66#define MVEBU_NB_GPIO_IRQ_WK_HIGH_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
67 0x1C)
68#define MVEBU_SB_GPIO_IRQ_EN_REG (MVEBU_SB_GPIO_IRQ_REG_BASE)
69#define MVEBU_SB_GPIO_IRQ_STATUS_REG (MVEBU_SB_GPIO_IRQ_REG_BASE + \
70 0x10)
71#define MVEBU_SB_GPIO_IRQ_WK_REG (MVEBU_SB_GPIO_IRQ_REG_BASE + \
72 0x18)
73
74/* PMU registers */
75#define MVEBU_PM_NB_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE)
76 #define MVEBU_PM_PWR_DN_CNT_SEL BIT(28)
77 #define MVEBU_PM_SB_PWR_DWN BIT(4)
78 #define MVEBU_PM_INTERFACE_IDLE BIT(0)
79#define MVEBU_PM_NB_CPU_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE + 0x4)
80 #define MVEBU_PM_L2_FLUSH_EN BIT(22)
81#define MVEBU_PM_NB_PWR_OPTION_REG (MVEBU_PMSU_REG_BASE + 0x8)
82 #define MVEBU_PM_DDR_SR_EN BIT(29)
83 #define MVEBU_PM_DDR_CLK_DIS_EN BIT(28)
84 #define MVEBU_PM_WARM_RESET_EN BIT(27)
85 #define MVEBU_PM_DDRPHY_PWRDWN_EN BIT(23)
86 #define MVEBU_PM_DDRPHY_PAD_PWRDWN_EN BIT(22)
87 #define MVEBU_PM_OSC_OFF_EN BIT(21)
88 #define MVEBU_PM_TBG_OFF_EN BIT(20)
89 #define MVEBU_PM_CPU_VDDV_OFF_EN BIT(19)
90 #define MVEBU_PM_AVS_DISABLE_MODE BIT(14)
91 #define MVEBU_PM_AVS_VDD2_MODE BIT(13)
92 #define MVEBU_PM_AVS_HOLD_MODE BIT(12)
93 #define MVEBU_PM_L2_SRAM_LKG_PD_EN BIT(8)
94 #define MVEBU_PM_EIP_SRAM_LKG_PD_EN BIT(7)
95 #define MVEBU_PM_DDRMC_SRAM_LKG_PD_EN BIT(6)
96 #define MVEBU_PM_MCI_SRAM_LKG_PD_EN BIT(5)
97 #define MVEBU_PM_MMC_SRAM_LKG_PD_EN BIT(4)
98 #define MVEBU_PM_SATA_SRAM_LKG_PD_EN BIT(3)
99 #define MVEBU_PM_DMA_SRAM_LKG_PD_EN BIT(2)
100 #define MVEBU_PM_SEC_SRAM_LKG_PD_EN BIT(1)
101 #define MVEBU_PM_CPU_SRAM_LKG_PD_EN BIT(0)
102 #define MVEBU_PM_NB_SRAM_LKG_PD_EN (MVEBU_PM_L2_SRAM_LKG_PD_EN |\
103 MVEBU_PM_EIP_SRAM_LKG_PD_EN | MVEBU_PM_DDRMC_SRAM_LKG_PD_EN |\
104 MVEBU_PM_MCI_SRAM_LKG_PD_EN | MVEBU_PM_MMC_SRAM_LKG_PD_EN |\
105 MVEBU_PM_SATA_SRAM_LKG_PD_EN | MVEBU_PM_DMA_SRAM_LKG_PD_EN |\
106 MVEBU_PM_SEC_SRAM_LKG_PD_EN | MVEBU_PM_CPU_SRAM_LKG_PD_EN)
107#define MVEBU_PM_NB_PWR_DEBUG_REG (MVEBU_PMSU_REG_BASE + 0xC)
108 #define MVEBU_PM_NB_FORCE_CLK_ON BIT(30)
109 #define MVEBU_PM_IGNORE_CM3_SLEEP BIT(21)
110 #define MVEBU_PM_IGNORE_CM3_DEEP BIT(20)
111#define MVEBU_PM_NB_WAKE_UP_EN_REG (MVEBU_PMSU_REG_BASE + 0x2C)
112 #define MVEBU_PM_SB_WKP_NB_EN BIT(31)
113 #define MVEBU_PM_NB_GPIO_WKP_EN BIT(27)
114 #define MVEBU_PM_SOC_TIMER_WKP_EN BIT(26)
115 #define MVEBU_PM_UART_WKP_EN BIT(25)
116 #define MVEBU_PM_UART2_WKP_EN BIT(19)
117 #define MVEBU_PM_CPU_TIMER_WKP_EN BIT(17)
118 #define MVEBU_PM_NB_WKP_EN BIT(16)
119 #define MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN BIT(13)
120 #define MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN BIT(12)
121#define MVEBU_PM_CPU_0_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE + 0x34)
122#define MVEBU_PM_CPU_1_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE + 0x38)
123 #define MVEBU_PM_CORE_SOC_PD BIT(2)
124 #define MVEBU_PM_CORE_PROC_PD BIT(1)
125 #define MVEBU_PM_CORE_PD BIT(0)
126#define MVEBU_PM_CORE_1_RETURN_ADDR_REG (MVEBU_PMSU_REG_BASE + 0x44)
127#define MVEBU_PM_CPU_VDD_OFF_INFO_1_REG (MVEBU_PMSU_REG_BASE + 0x48)
128#define MVEBU_PM_CPU_VDD_OFF_INFO_2_REG (MVEBU_PMSU_REG_BASE + 0x4C)
129 #define MVEBU_PM_LOW_POWER_STATE BIT(0)
130#define MVEBU_PM_CPU_WAKE_UP_CONF_REG (MVEBU_PMSU_REG_BASE + 0x54)
131 #define MVEBU_PM_CORE1_WAKEUP BIT(13)
132 #define MVEBU_PM_CORE0_WAKEUP BIT(12)
133#define MVEBU_PM_WAIT_DDR_RDY_VALUE (0x15)
134#define MVEBU_PM_SB_CPU_PWR_CTRL_REG (MVEBU_SB_WAKEUP_REG_BASE)
135 #define MVEBU_PM_SB_PM_START BIT(0)
136#define MVEBU_PM_SB_PWR_OPTION_REG (MVEBU_SB_WAKEUP_REG_BASE + 0x4)
137 #define MVEBU_PM_SDIO_PHY_PDWN_EN BIT(17)
138 #define MVEBU_PM_SB_VDDV_OFF_EN BIT(16)
139 #define MVEBU_PM_EBM_SRAM_LKG_PD_EN BIT(11)
140 #define MVEBU_PM_PCIE_SRAM_LKG_PD_EN BIT(10)
141 #define MVEBU_PM_GBE1_TX_SRAM_LKG_PD_EN BIT(9)
142 #define MVEBU_PM_GBE1_RX_SRAM_LKG_PD_EN BIT(8)
143 #define MVEBU_PM_GBE1_MIB_SRAM_LKG_PD_EN BIT(7)
144 #define MVEBU_PM_GBE0_TX_SRAM_LKG_PD_EN BIT(6)
145 #define MVEBU_PM_GBE0_RX_SRAM_LKG_PD_EN BIT(5)
146 #define MVEBU_PM_GBE0_MIB_SRAM_LKG_PD_EN BIT(4)
147 #define MVEBU_PM_SDIO_SRAM_LKG_PD_EN BIT(3)
148 #define MVEBU_PM_USB2_SRAM_LKG_PD_EN BIT(2)
149 #define MVEBU_PM_USB3_H_SRAM_LKG_PD_EN BIT(1)
150 #define MVEBU_PM_SB_SRAM_LKG_PD_EN (MVEBU_PM_EBM_SRAM_LKG_PD_EN |\
151 MVEBU_PM_PCIE_SRAM_LKG_PD_EN | MVEBU_PM_GBE1_TX_SRAM_LKG_PD_EN |\
152 MVEBU_PM_GBE1_RX_SRAM_LKG_PD_EN | MVEBU_PM_GBE1_MIB_SRAM_LKG_PD_EN |\
153 MVEBU_PM_GBE0_TX_SRAM_LKG_PD_EN | MVEBU_PM_GBE0_RX_SRAM_LKG_PD_EN |\
154 MVEBU_PM_GBE0_MIB_SRAM_LKG_PD_EN | MVEBU_PM_SDIO_SRAM_LKG_PD_EN |\
155 MVEBU_PM_USB2_SRAM_LKG_PD_EN | MVEBU_PM_USB3_H_SRAM_LKG_PD_EN)
156#define MVEBU_PM_SB_WK_EN_REG (MVEBU_SB_WAKEUP_REG_BASE + 0x10)
157 #define MVEBU_PM_SB_GPIO_WKP_EN BIT(24)
158 #define MVEBU_PM_SB_WKP_EN BIT(20)
159
160/* DRAM registers */
161#define MVEBU_DRAM_STATS_CH0_REG (MVEBU_DRAM_REG_BASE + 0x4)
162 #define MVEBU_DRAM_WCP_EMPTY BIT(19)
163#define MVEBU_DRAM_CMD_0_REG (MVEBU_DRAM_REG_BASE + 0x20)
164 #define MVEBU_DRAM_CH0_CMD0 BIT(28)
165 #define MVEBU_DRAM_CS_CMD0 BIT(24)
166 #define MVEBU_DRAM_WCB_DRAIN_REQ BIT(1)
167#define MVEBU_DRAM_PWR_CTRL_REG (MVEBU_DRAM_REG_BASE + 0x54)
168 #define MVEBU_DRAM_PHY_CLK_GATING_EN BIT(1)
169 #define MVEBU_DRAM_PHY_AUTO_AC_OFF_EN BIT(0)
170
171/* AVS registers */
172#define MVEBU_AVS_CTRL_2_REG (MVEBU_AVS_REG_BASE + 0x8)
173 #define MVEBU_LOW_VDD_MODE_EN BIT(6)
174
175/* Clock registers */
176#define MVEBU_NB_CLOCK_SEL_REG (MVEBU_NB_REGS_BASE + 0x10)
177 #define MVEBU_A53_CPU_CLK_SEL BIT(15)
178
179/* North Bridge Step-Down Registers */
180#define MVEBU_NB_STEP_DOWN_INT_EN_REG MVEBU_NB_STEP_DOWN_REG_BASE
181 #define MVEBU_NB_GPIO_INT_WAKE_WCPU_CLK BIT(8)
182
183#define MVEBU_NB_GPIO_18 18
184#define MVEBU_NB_GPIO_19 19
185#define MVEBU_NB_GPIO_25 25
186#define MVEBU_NB_GPIO_26 26
187
188typedef int (*wake_up_src_func)(union pm_wake_up_src_data *);
189
190struct wake_up_src_func_map {
191 enum pm_wake_up_src_type type;
192 wake_up_src_func func;
193};
194
195void marvell_psci_arch_init(int die_index)
196{
197}
198
199static void a3700_pm_ack_irq(void)
200{
201 uint32_t reg;
202
203 reg = mmio_read_32(MVEBU_NB_IRQ_STATUS_1_REG);
204 if (reg)
205 mmio_write_32(MVEBU_NB_IRQ_STATUS_1_REG, reg);
206
207 reg = mmio_read_32(MVEBU_NB_IRQ_STATUS_2_REG);
208 if (reg)
209 mmio_write_32(MVEBU_NB_IRQ_STATUS_2_REG, reg);
210
211 reg = mmio_read_32(MVEBU_SB_IRQ_STATUS_1_REG);
212 if (reg)
213 mmio_write_32(MVEBU_SB_IRQ_STATUS_1_REG, reg);
214
215 reg = mmio_read_32(MVEBU_SB_IRQ_STATUS_2_REG);
216 if (reg)
217 mmio_write_32(MVEBU_SB_IRQ_STATUS_2_REG, reg);
218
219 reg = mmio_read_32(MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG);
220 if (reg)
221 mmio_write_32(MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG, reg);
222
223 reg = mmio_read_32(MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG);
224 if (reg)
225 mmio_write_32(MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG, reg);
226
227 reg = mmio_read_32(MVEBU_SB_GPIO_IRQ_STATUS_REG);
228 if (reg)
229 mmio_write_32(MVEBU_SB_GPIO_IRQ_STATUS_REG, reg);
230}
231
232/*****************************************************************************
233 * A3700 handler called to check the validity of the power state
234 * parameter.
235 *****************************************************************************
236 */
237int a3700_validate_power_state(unsigned int power_state,
238 psci_power_state_t *req_state)
239{
240 ERROR("%s needs to be implemented\n", __func__);
241 panic();
242}
243
244/*****************************************************************************
245 * A3700 handler called when a CPU is about to enter standby.
246 *****************************************************************************
247 */
248void a3700_cpu_standby(plat_local_state_t cpu_state)
249{
250 ERROR("%s needs to be implemented\n", __func__);
251 panic();
252}
253
254/*****************************************************************************
255 * A3700 handler called when a power domain is about to be turned on. The
256 * mpidr determines the CPU to be turned on.
257 *****************************************************************************
258 */
259int a3700_pwr_domain_on(u_register_t mpidr)
260{
261 /* Set barrier */
262 dsbsy();
263
264 /* Set the cpu start address to BL1 entry point */
265 mmio_write_32(MVEBU_CPU_1_RESET_VECTOR,
266 PLAT_MARVELL_CPU_ENTRY_ADDR >> 2);
267
268 /* Get the cpu out of reset */
269 mmio_clrbits_32(MVEBU_CPU_1_RESET_REG, BIT(MVEBU_CPU_1_RESET_BIT));
270 mmio_setbits_32(MVEBU_CPU_1_RESET_REG, BIT(MVEBU_CPU_1_RESET_BIT));
271
272 return 0;
273}
274
275/*****************************************************************************
276 * A3700 handler called to validate the entry point.
277 *****************************************************************************
278 */
279int a3700_validate_ns_entrypoint(uintptr_t entrypoint)
280{
281 return PSCI_E_SUCCESS;
282}
283
284/*****************************************************************************
285 * A3700 handler called when a power domain is about to be turned off. The
286 * target_state encodes the power state that each level should transition to.
287 *****************************************************************************
288 */
289void a3700_pwr_domain_off(const psci_power_state_t *target_state)
290{
291 uint32_t cpu_idx = plat_my_core_pos();
292
293 /* Prevent interrupts from spuriously waking up this cpu */
294 plat_marvell_gic_cpuif_disable();
295
296 /*
297 * Enable Core VDD OFF, core is supposed to be powered
298 * off by PMU when WFI command is issued.
299 */
300 mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG + 4 * cpu_idx,
301 MVEBU_PM_CORE_PD);
302
303 /* Core can not be powered down with pending IRQ,
304 * acknowledge all the pending IRQ
305 */
306 a3700_pm_ack_irq();
307}
308
309static void a3700_set_gen_pwr_off_option(void)
310{
311 /* Enable L2 flush -> processor state-machine option */
312 mmio_setbits_32(MVEBU_PM_NB_CPU_PWR_CTRL_REG, MVEBU_PM_L2_FLUSH_EN);
313
314 /*
315 * North bridge cannot be VDD off (always ON).
316 * The NB state machine support low power mode by its state machine.
317 * This bit MUST be set for north bridge power down, e.g.,
318 * OSC input cutoff(NOT TEST), SRAM power down, PMIC, etc.
319 * It is not related to CPU VDD OFF!!
320 */
321 mmio_clrbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_CPU_VDDV_OFF_EN);
322
323 /*
324 * MUST: Switch CPU/AXI clock to OSC
325 * NB state machine clock is always connected to OSC (slow clock).
326 * But Core0/1/processor state machine's clock are connected to AXI
327 * clock. Now, AXI clock takes the TBG as clock source.
328 * If using AXI clock, Core0/1/processor state machine may much faster
329 * than NB state machine. It will cause problem in this case if cores
330 * are released before north bridge gets ready.
331 */
332 mmio_clrbits_32(MVEBU_NB_CLOCK_SEL_REG, MVEBU_A53_CPU_CLK_SEL);
333
334 /*
335 * These register bits will trigger north bridge
336 * power-down state machine regardless CM3 status.
337 */
338 mmio_setbits_32(MVEBU_PM_NB_PWR_DEBUG_REG, MVEBU_PM_IGNORE_CM3_SLEEP);
339 mmio_setbits_32(MVEBU_PM_NB_PWR_DEBUG_REG, MVEBU_PM_IGNORE_CM3_DEEP);
340
341 /*
342 * SRAM => controlled by north bridge state machine.
343 * Core VDD OFF is not related to CPU SRAM power down.
344 */
345 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_NB_SRAM_LKG_PD_EN);
346
347 /*
348 * Idle AXI interface in order to get L2_WFI
349 * L2 WFI is only asserted after CORE-0 and CORE-1 WFI asserted.
350 * (only both core-0/1in WFI, L2 WFI will be issued by CORE.)
351 * Once L2 WFI asserted, this bit is used for signalling assertion
352 * to AXI IO masters.
353 */
354 mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_INTERFACE_IDLE);
355
356 /* Enable core0 and core1 VDD_OFF */
357 mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_PD);
358 mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_PD);
359
360 /* Enable North bridge power down -
361 * Both Cores MUST enable this bit to power down north bridge!
362 */
363 mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_SOC_PD);
364 mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_SOC_PD);
365
366 /* CA53 (processor domain) power down */
367 mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_PROC_PD);
368 mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_PROC_PD);
369}
370
371static void a3700_en_ddr_self_refresh(void)
372{
373 /*
374 * Both count is 16 bits and configurable. By default, osc stb cnt
375 * is 0xFFF for lower 12 bits.
376 * Thus, powerdown count is smaller than osc count.
377 * This count is used for exiting DDR SR mode on wakeup event.
378 * The powerdown count also has impact on the following
379 * state changes: idle -> count-down -> ... (power-down, vdd off, etc)
380 * Here, make stable counter shorter
381 * Use power down count value instead of osc_stb_cnt to speed up
382 * DDR self refresh exit
383 */
384 mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_PWR_DN_CNT_SEL);
385
386 /*
387 * Enable DDR SR mode => controlled by north bridge state machine
388 * Therefore, we must powerdown north bridge to trigger the DDR SR
389 * mode switching.
390 */
391 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDR_SR_EN);
392 /* Disable DDR clock, otherwise DDR will not enter into SR mode. */
393 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDR_CLK_DIS_EN);
394 /* Power down DDR PHY (PAD) */
395 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDRPHY_PWRDWN_EN);
396 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG,
397 MVEBU_PM_DDRPHY_PAD_PWRDWN_EN);
398
399 /* Set wait time for DDR ready in ROM code */
400 mmio_write_32(MVEBU_PM_CPU_VDD_OFF_INFO_1_REG,
401 MVEBU_PM_WAIT_DDR_RDY_VALUE);
402
403 /* DDR flush write buffer - mandatory */
404 mmio_write_32(MVEBU_DRAM_CMD_0_REG, MVEBU_DRAM_CH0_CMD0 |
405 MVEBU_DRAM_CS_CMD0 | MVEBU_DRAM_WCB_DRAIN_REQ);
406 while ((mmio_read_32(MVEBU_DRAM_STATS_CH0_REG) &
407 MVEBU_DRAM_WCP_EMPTY) != MVEBU_DRAM_WCP_EMPTY)
408 ;
409
410 /* Trigger PHY reset after ddr out of self refresh =>
411 * supply reset pulse for DDR phy after wake up
412 */
413 mmio_setbits_32(MVEBU_DRAM_PWR_CTRL_REG, MVEBU_DRAM_PHY_CLK_GATING_EN |
414 MVEBU_DRAM_PHY_AUTO_AC_OFF_EN);
415}
416
417static void a3700_pwr_dn_avs(void)
418{
419 /*
420 * AVS power down - controlled by north bridge statemachine
421 * Enable AVS power down by clear the AVS disable bit.
422 */
423 mmio_clrbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_DISABLE_MODE);
424 /*
425 * Should set BIT[12:13] to powerdown AVS.
426 * 1. Enable AVS VDD2 mode
427 * 2. After power down AVS, we must hold AVS output voltage.
428 * 3. We can choose the lower VDD for AVS power down.
429 */
430 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_VDD2_MODE);
431 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_HOLD_MODE);
432
433 /* Enable low VDD mode, AVS will set CPU to lowest core VDD 747mV */
434 mmio_setbits_32(MVEBU_AVS_CTRL_2_REG, MVEBU_LOW_VDD_MODE_EN);
435}
436
437static void a3700_pwr_dn_tbg(void)
438{
439 /* Power down TBG */
440 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_TBG_OFF_EN);
441}
442
443static void a3700_pwr_dn_sb(void)
444{
445 /* Enable south bridge power down option */
446 mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_SB_PWR_DWN);
447
448 /* Enable SDIO_PHY_PWRDWN */
449 mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SDIO_PHY_PDWN_EN);
450
451 /* Enable SRAM LRM on SB */
452 mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SB_SRAM_LKG_PD_EN);
453
454 /* Enable SB Power Off */
455 mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SB_VDDV_OFF_EN);
456
457 /* Kick off South Bridge Power Off */
458 mmio_setbits_32(MVEBU_PM_SB_CPU_PWR_CTRL_REG, MVEBU_PM_SB_PM_START);
459}
460
461static void a3700_set_pwr_off_option(void)
462{
463 /* Set general power off option */
464 a3700_set_gen_pwr_off_option();
465
466 /* Enable DDR self refresh in low power mode */
467 a3700_en_ddr_self_refresh();
468
469 /* Power down AVS */
470 a3700_pwr_dn_avs();
471
472 /* Power down TBG */
473 a3700_pwr_dn_tbg();
474
475 /* Power down south bridge, pay attention south bridge setting
476 * should be done before
477 */
478 a3700_pwr_dn_sb();
479}
480
481static void a3700_set_wake_up_option(void)
482{
483 /*
484 * Enable the wakeup event for NB SOC => north-bridge
485 * state-machine enablement on wake-up event
486 */
487 mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_NB_WKP_EN);
488
489 /* Enable both core0 and core1 wakeup on demand */
490 mmio_setbits_32(MVEBU_PM_CPU_WAKE_UP_CONF_REG,
491 MVEBU_PM_CORE1_WAKEUP | MVEBU_PM_CORE0_WAKEUP);
492
493 /* Enable warm reset in low power mode */
494 mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_WARM_RESET_EN);
495}
496
497static void a3700_pm_en_nb_gpio(uint32_t gpio)
498{
499 /* For GPIO1 interrupt -- North bridge only */
500 if (gpio >= 32) {
501 /* GPIO int mask */
502 mmio_clrbits_32(MVEBU_NB_GPIO_IRQ_MASK_2_REG, BIT(gpio - 32));
503
504 /* NB_CPU_WAKE-up ENABLE GPIO int */
505 mmio_setbits_32(MVEBU_NB_GPIO_IRQ_EN_HIGH_REG, BIT(gpio - 32));
506 } else {
507 /* GPIO int mask */
508 mmio_clrbits_32(MVEBU_NB_GPIO_IRQ_MASK_1_REG, BIT(gpio));
509
510 /* NB_CPU_WAKE-up ENABLE GPIO int */
511 mmio_setbits_32(MVEBU_NB_GPIO_IRQ_EN_LOW_REG, BIT(gpio));
512 }
513
514 mmio_setbits_32(MVEBU_NB_STEP_DOWN_INT_EN_REG,
515 MVEBU_NB_GPIO_INT_WAKE_WCPU_CLK);
516
517 /* Enable using GPIO as wakeup event
518 * (actually not only for north bridge)
519 */
520 mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_NB_GPIO_WKP_EN |
521 MVEBU_PM_NB_WKP_EN | MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN |
522 MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN);
523}
524
525static void a3700_pm_en_sb_gpio(uint32_t gpio)
526{
527 /* Enable using GPIO as wakeup event */
528 mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_SB_WKP_NB_EN |
529 MVEBU_PM_NB_WKP_EN | MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN |
530 MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN);
531
532 /* SB GPIO Wake UP | South Bridge Wake Up Enable */
533 mmio_setbits_32(MVEBU_PM_SB_WK_EN_REG, MVEBU_PM_SB_GPIO_WKP_EN |
534 MVEBU_PM_SB_GPIO_WKP_EN);
535
536 /* GPIO int mask */
537 mmio_clrbits_32(MVEBU_SB_GPIO_IRQ_MASK_REG, BIT(gpio));
538
539 /* NB_CPU_WAKE-up ENABLE GPIO int */
540 mmio_setbits_32(MVEBU_SB_GPIO_IRQ_EN_REG, BIT(gpio));
541}
542
543int a3700_pm_src_gpio(union pm_wake_up_src_data *src_data)
544{
545 if (src_data->gpio_data.bank_num == 0)
546 /* North Bridge GPIO */
547 a3700_pm_en_nb_gpio(src_data->gpio_data.gpio_num);
548 else
549 a3700_pm_en_sb_gpio(src_data->gpio_data.gpio_num);
550 return 0;
551}
552
553int a3700_pm_src_uart1(union pm_wake_up_src_data *src_data)
554{
555 /* Clear Uart1 select */
556 mmio_clrbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_UART1_SEL);
557 /* set pin 19 gpio usage*/
558 mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_19_EN);
559 /* Enable gpio wake-up*/
560 a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_19);
561 /* set pin 18 gpio usage*/
562 mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_18_EN);
563 /* Enable gpio wake-up*/
564 a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_18);
565
566 return 0;
567}
568
569int a3700_pm_src_uart0(union pm_wake_up_src_data *src_data)
570{
571 /* set pin 25/26 gpio usage*/
572 mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_25_26_EN);
573 /* Enable gpio wake-up*/
574 a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_25);
575 /* Enable gpio wake-up*/
576 a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_26);
577
578 return 0;
579}
580
581struct wake_up_src_func_map src_func_table[WAKE_UP_SRC_MAX] = {
582 {WAKE_UP_SRC_GPIO, a3700_pm_src_gpio},
583 {WAKE_UP_SRC_UART1, a3700_pm_src_uart1},
584 {WAKE_UP_SRC_UART0, a3700_pm_src_uart0},
585 /* FOLLOWING SRC NOT SUPPORTED YET */
586 {WAKE_UP_SRC_TIMER, NULL}
587};
588
589static wake_up_src_func a3700_get_wake_up_src_func(
590 enum pm_wake_up_src_type type)
591{
592 uint32_t loop;
593
594 for (loop = 0; loop < WAKE_UP_SRC_MAX; loop++) {
595 if (src_func_table[loop].type == type)
596 return src_func_table[loop].func;
597 }
598 return NULL;
599}
600
601static void a3700_set_wake_up_source(void)
602{
603 struct pm_wake_up_src_config *wake_up_src;
604 uint32_t loop;
605 wake_up_src_func src_func = NULL;
606
607 wake_up_src = mv_wake_up_src_config_get();
608 for (loop = 0; loop < wake_up_src->wake_up_src_num; loop++) {
609 src_func = a3700_get_wake_up_src_func(
610 wake_up_src->wake_up_src[loop].wake_up_src_type);
611 if (src_func)
612 src_func(
613 &(wake_up_src->wake_up_src[loop].wake_up_data));
614 }
615}
616
617static void a3700_pm_save_lp_flag(void)
618{
619 /* Save the flag for enter the low power mode */
620 mmio_setbits_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG,
621 MVEBU_PM_LOW_POWER_STATE);
622}
623
624static void a3700_pm_clear_lp_flag(void)
625{
626 /* Clear the flag for enter the low power mode */
627 mmio_clrbits_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG,
628 MVEBU_PM_LOW_POWER_STATE);
629}
630
631static uint32_t a3700_pm_get_lp_flag(void)
632{
633 /* Get the flag for enter the low power mode */
634 return mmio_read_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG) &
635 MVEBU_PM_LOW_POWER_STATE;
636}
637
638/*****************************************************************************
639 * A3700 handler called when a power domain is about to be suspended. The
640 * target_state encodes the power state that each level should transition to.
641 *****************************************************************************
642 */
643void a3700_pwr_domain_suspend(const psci_power_state_t *target_state)
644{
645 /* Prevent interrupts from spuriously waking up this cpu */
646 plat_marvell_gic_cpuif_disable();
647
648 /* Save IRQ states */
649 plat_marvell_gic_irq_save();
650
651 /* Set wake up options */
652 a3700_set_wake_up_option();
653
654 /* Set wake up sources */
655 a3700_set_wake_up_source();
656
657 /* SoC can not be powered down with pending IRQ,
658 * acknowledge all the pending IRQ
659 */
660 a3700_pm_ack_irq();
661
662 /* Set power off options */
663 a3700_set_pwr_off_option();
664
665 /* Save the flag for enter the low power mode */
666 a3700_pm_save_lp_flag();
667
668 isb();
669}
670
671/*****************************************************************************
672 * A3700 handler called when a power domain has just been powered on after
673 * being turned off earlier. The target_state encodes the low power state that
674 * each level has woken up from.
675 *****************************************************************************
676 */
677void a3700_pwr_domain_on_finish(const psci_power_state_t *target_state)
678{
679 /* arch specific configuration */
680 marvell_psci_arch_init(0);
681
682 /* Per-CPU interrupt initialization */
683 plat_marvell_gic_pcpu_init();
684 plat_marvell_gic_cpuif_enable();
685
686 /* Restore the per-cpu IRQ state */
687 if (a3700_pm_get_lp_flag())
688 plat_marvell_gic_irq_pcpu_restore();
689}
690
691/*****************************************************************************
692 * A3700 handler called when a power domain has just been powered on after
693 * having been suspended earlier. The target_state encodes the low power state
694 * that each level has woken up from.
695 * TODO: At the moment we reuse the on finisher and reinitialize the secure
696 * context. Need to implement a separate suspend finisher.
697 *****************************************************************************
698 */
699void a3700_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
700{
701 struct dec_win_config *io_dec_map;
702 uint32_t dec_win_num;
703 struct dram_win_map dram_wins_map;
704
705 /* arch specific configuration */
706 marvell_psci_arch_init(0);
707
708 /* Interrupt initialization */
709 plat_marvell_gic_init();
710
711 /* Restore IRQ states */
712 plat_marvell_gic_irq_restore();
713
714 /*
715 * Initialize CCI for this cluster after resume from suspend state.
716 * No need for locks as no other CPU is active.
717 */
718 plat_marvell_interconnect_init();
719 /*
720 * Enable CCI coherency for the primary CPU's cluster.
721 * Platform specific PSCI code will enable coherency for other
722 * clusters.
723 */
724 plat_marvell_interconnect_enter_coherency();
725
726 /* CPU address decoder windows initialization. */
727 cpu_wins_init();
728
729 /* fetch CPU-DRAM window mapping information by reading
730 * CPU-DRAM decode windows (only the enabled ones)
731 */
732 dram_win_map_build(&dram_wins_map);
733
734 /* Get IO address decoder windows */
735 if (marvell_get_io_dec_win_conf(&io_dec_map, &dec_win_num)) {
736 printf("No IO address decoder windows configurations found!\n");
737 return;
738 }
739
740 /* IO address decoder init */
741 if (init_io_addr_dec(&dram_wins_map, io_dec_map, dec_win_num)) {
742 printf("IO address decoder windows initialization failed!\n");
743 return;
744 }
745
746 /* Clear low power mode flag */
747 a3700_pm_clear_lp_flag();
748}
749
750/*****************************************************************************
751 * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND
752 * call to get the `power_state` parameter. This allows the platform to encode
753 * the appropriate State-ID field within the `power_state` parameter which can
754 * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
755 *****************************************************************************
756 */
757void a3700_get_sys_suspend_power_state(psci_power_state_t *req_state)
758{
759 /* lower affinities use PLAT_MAX_OFF_STATE */
760 for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
761 req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
762}
763
764/*****************************************************************************
765 * A3700 handlers to shutdown/reboot the system
766 *****************************************************************************
767 */
768static void __dead2 a3700_system_off(void)
769{
770 ERROR("%s needs to be implemented\n", __func__);
771 panic();
772}
773
774/*****************************************************************************
775 * A3700 handlers to reset the system
776 *****************************************************************************
777 */
778static void __dead2 a3700_system_reset(void)
779{
780 /* Clean the mailbox magic number to let it as act like cold boot */
781 mmio_write_32(PLAT_MARVELL_MAILBOX_BASE, 0x0);
782
783 dsbsy();
784
785 /* Flush data cache if the mail box shared RAM is cached */
786#if PLAT_MARVELL_SHARED_RAM_CACHED
787 flush_dcache_range((uintptr_t)PLAT_MARVELL_MAILBOX_BASE,
788 2 * sizeof(uint64_t));
789#endif
790
791 /* Trigger the warm reset */
792 mmio_write_32(MVEBU_WARM_RESET_REG, MVEBU_WARM_RESET_MAGIC);
793
794 /* Shouldn't get to this point */
795 panic();
796}
797
798/*****************************************************************************
799 * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
800 * platform layer will take care of registering the handlers with PSCI.
801 *****************************************************************************
802 */
803const plat_psci_ops_t plat_arm_psci_pm_ops = {
804 .cpu_standby = a3700_cpu_standby,
805 .pwr_domain_on = a3700_pwr_domain_on,
806 .pwr_domain_off = a3700_pwr_domain_off,
807 .pwr_domain_suspend = a3700_pwr_domain_suspend,
808 .pwr_domain_on_finish = a3700_pwr_domain_on_finish,
809 .pwr_domain_suspend_finish = a3700_pwr_domain_suspend_finish,
810 .get_sys_suspend_power_state = a3700_get_sys_suspend_power_state,
811 .system_off = a3700_system_off,
812 .system_reset = a3700_system_reset,
813 .validate_power_state = a3700_validate_power_state,
814 .validate_ns_entrypoint = a3700_validate_ns_entrypoint
815};