blob: 81d2ccf56fbc25ba2441bd6aeaff398619f71413 [file] [log] [blame]
Sheetal Tigadoli2a96dc22019-12-18 12:01:01 +05301/*
2 * Copyright (c) 2017 - 2020, Broadcom
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9
10#include <arch_helpers.h>
11#include <common/debug.h>
12#include <drivers/arm/ccn.h>
13#include <lib/bakery_lock.h>
14#include <lib/mmio.h>
15#include <lib/psci/psci.h>
16#include <lib/spinlock.h>
17
18#include <brcm_scpi.h>
19#include <cmn_plat_util.h>
20#include <plat_brcm.h>
21#include <platform_def.h>
22
23#include "m0_cfg.h"
24
25
26#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
27#define CLUSTER_PWR_STATE(state) \
28 ((state)->pwr_domain_state[MPIDR_AFFLVL1])
29#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL2])
30
31#define VENDOR_RST_TYPE_SHIFT 4
32
33#if HW_ASSISTED_COHERENCY
34/*
35 * On systems where participant CPUs are cache-coherent, we can use spinlocks
36 * instead of bakery locks.
37 */
38spinlock_t event_lock;
39#define event_lock_get(_lock) spin_lock(&_lock)
40#define event_lock_release(_lock) spin_unlock(&_lock)
41
42#else
43/*
44 * Use bakery locks for state coordination as not all participants are
45 * cache coherent now.
46 */
47DEFINE_BAKERY_LOCK(event_lock);
48#define event_lock_get(_lock) bakery_lock_get(&_lock)
49#define event_lock_release(_lock) bakery_lock_release(&_lock)
50#endif
51
52static int brcm_pwr_domain_on(u_register_t mpidr)
53{
54 /*
55 * SCP takes care of powering up parent power domains so we
56 * only need to care about level 0
57 */
58 scpi_set_brcm_power_state(mpidr, scpi_power_on, scpi_power_on,
59 scpi_power_on);
60
61 return PSCI_E_SUCCESS;
62}
63
64/*******************************************************************************
65 * Handler called when a power level has just been powered on after
66 * being turned off earlier. The target_state encodes the low power state that
67 * each level has woken up from. This handler would never be invoked with
68 * the system power domain uninitialized as either the primary would have taken
69 * care of it as part of cold boot or the first core awakened from system
70 * suspend would have already initialized it.
71 ******************************************************************************/
72static void brcm_pwr_domain_on_finish(const psci_power_state_t *target_state)
73{
74 unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr());
75
76 /* Assert that the system power domain need not be initialized */
77 assert(SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_RUN);
78
79 assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
80
81 /*
82 * Perform the common cluster specific operations i.e enable coherency
83 * if this cluster was off.
84 */
85 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF) {
86 INFO("Cluster #%lu entering to snoop/dvm domain\n", cluster_id);
87 ccn_enter_snoop_dvm_domain(1 << cluster_id);
88 }
89
90 /* Program the gic per-cpu distributor or re-distributor interface */
91 plat_brcm_gic_pcpu_init();
92
93 /* Enable the gic cpu interface */
94 plat_brcm_gic_cpuif_enable();
95}
96
97static void brcm_power_down_common(void)
98{
99 unsigned int standbywfil2, standbywfi;
100 uint64_t mpidr = read_mpidr_el1();
101
102 switch (MPIDR_AFFLVL1_VAL(mpidr)) {
103 case 0x0:
104 standbywfi = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFI;
105 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH0_CDRU_STANDBYWFIL2;
106 break;
107 case 0x1:
108 standbywfi = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFI;
109 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH1_CDRU_STANDBYWFIL2;
110 break;
111 case 0x2:
112 standbywfi = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFI;
113 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH2_CDRU_STANDBYWFIL2;
114 break;
115 case 0x3:
116 standbywfi = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFI;
117 standbywfil2 = CDRU_PROC_EVENT_CLEAR__IH3_CDRU_STANDBYWFIL2;
118 break;
119 default:
120 ERROR("Invalid cluster #%llx\n", MPIDR_AFFLVL1_VAL(mpidr));
121 return;
122 }
123 /* Clear the WFI status bit */
124 event_lock_get(event_lock);
125 mmio_setbits_32(CDRU_PROC_EVENT_CLEAR,
126 (1 << (standbywfi + MPIDR_AFFLVL0_VAL(mpidr))) |
127 (1 << standbywfil2));
128 event_lock_release(event_lock);
129}
130
131/*
132 * Helper function to inform power down state to SCP.
133 */
134static void brcm_scp_suspend(const psci_power_state_t *target_state)
135{
136 uint32_t cluster_state = scpi_power_on;
137 uint32_t system_state = scpi_power_on;
138
139 /* Check if power down at system power domain level is requested */
140 if (SYSTEM_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
141 system_state = scpi_power_retention;
142
143 /* Check if Cluster is to be turned off */
144 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
145 cluster_state = scpi_power_off;
146
147 /*
148 * Ask the SCP to power down the appropriate components depending upon
149 * their state.
150 */
151 scpi_set_brcm_power_state(read_mpidr_el1(),
152 scpi_power_off,
153 cluster_state,
154 system_state);
155}
156
157/*
158 * Helper function to turn off a CPU power domain and its parent power domains
159 * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
160 * call the suspend helper here.
161 */
162static void brcm_scp_off(const psci_power_state_t *target_state)
163{
164 brcm_scp_suspend(target_state);
165}
166
167static void brcm_pwr_domain_off(const psci_power_state_t *target_state)
168{
169 unsigned long cluster_id = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
170
171 assert(CORE_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF);
172 /* Prevent interrupts from spuriously waking up this cpu */
173 plat_brcm_gic_cpuif_disable();
174
175 /* Turn redistributor off */
176 plat_brcm_gic_redistif_off();
177
178 /* If Cluster is to be turned off, disable coherency */
179 if (CLUSTER_PWR_STATE(target_state) == PLAT_LOCAL_STATE_OFF)
180 ccn_exit_snoop_dvm_domain(1 << cluster_id);
181
182 brcm_power_down_common();
183
184 brcm_scp_off(target_state);
185}
186
187/*******************************************************************************
188 * Handler called when the CPU power domain is about to enter standby.
189 ******************************************************************************/
190static void brcm_cpu_standby(plat_local_state_t cpu_state)
191{
192 unsigned int scr;
193
194 assert(cpu_state == PLAT_LOCAL_STATE_RET);
195
196 scr = read_scr_el3();
197 /*
198 * Enable the Non secure interrupt to wake the CPU.
199 * In GICv3 affinity routing mode, the non secure group1 interrupts use
200 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
201 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
202 * routing mode.
203 */
204 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
205 isb();
206 dsb();
207 wfi();
208
209 /*
210 * Restore SCR to the original value, synchronisation of scr_el3 is
211 * done by eret while el3_exit to save some execution cycles.
212 */
213 write_scr_el3(scr);
214}
215
216/*
217 * Helper function to shutdown the system via SCPI.
218 */
219static void __dead2 brcm_scp_sys_shutdown(void)
220{
221 /*
222 * Disable GIC CPU interface to prevent pending interrupt
223 * from waking up the AP from WFI.
224 */
225 plat_brcm_gic_cpuif_disable();
226
227 /* Flush and invalidate data cache */
228 dcsw_op_all(DCCISW);
229
230 /* Bring Cluster out of coherency domain as its going to die */
231 plat_brcm_interconnect_exit_coherency();
232
233 brcm_power_down_common();
234
235 /* Send the power down request to the SCP */
236 scpi_sys_power_state(scpi_system_shutdown);
237
238 wfi();
239 ERROR("BRCM System Off: operation not handled.\n");
240 panic();
241}
242
243/*
244 * Helper function to reset the system
245 */
246static void __dead2 brcm_scp_sys_reset(unsigned int reset_type)
247{
248 /*
249 * Disable GIC CPU interface to prevent pending interrupt
250 * from waking up the AP from WFI.
251 */
252 plat_brcm_gic_cpuif_disable();
253
254 /* Flush and invalidate data cache */
255 dcsw_op_all(DCCISW);
256
257 /* Bring Cluster out of coherency domain as its going to die */
258 plat_brcm_interconnect_exit_coherency();
259
260 brcm_power_down_common();
261
262 /* Send the system reset request to the SCP
263 *
264 * As per PSCI spec system power state could be
265 * 0-> Shutdown
266 * 1-> Reboot- Board level Reset
267 * 2-> Reset - SoC level Reset
268 *
269 * Spec allocates 8 bits, 2 nibble, for this. One nibble is sufficient
270 * for sending the state hence We are utilizing 2nd nibble for vendor
271 * define reset type.
272 */
273 scpi_sys_power_state((reset_type << VENDOR_RST_TYPE_SHIFT) |
274 scpi_system_reboot);
275
276 wfi();
277 ERROR("BRCM System Reset: operation not handled.\n");
278 panic();
279}
280
281static void __dead2 brcm_system_reset(void)
282{
283 brcm_scp_sys_reset(SOFT_SYS_RESET_L1);
284}
285
286static int brcm_system_reset2(int is_vendor, int reset_type,
287 u_register_t cookie)
288{
289 if (!is_vendor) {
290 /* Architectural warm boot: only warm reset is supported */
291 reset_type = SOFT_RESET_L3;
292 }
293 brcm_scp_sys_reset(reset_type);
294
295 /*
296 * brcm_scp_sys_reset cannot return (it is a __dead function),
297 * but brcm_system_reset2 has to return some value, even in
298 * this case.
299 */
300 return 0;
301}
302
303static int brcm_validate_ns_entrypoint(uintptr_t entrypoint)
304{
305 /*
306 * Check if the non secure entrypoint lies within the non
307 * secure DRAM.
308 */
309 if ((entrypoint >= BRCM_NS_DRAM1_BASE) &&
310 (entrypoint < (BRCM_NS_DRAM1_BASE + BRCM_NS_DRAM1_SIZE)))
311 return PSCI_E_SUCCESS;
312#ifndef AARCH32
313 if ((entrypoint >= BRCM_DRAM2_BASE) &&
314 (entrypoint < (BRCM_DRAM2_BASE + BRCM_DRAM2_SIZE)))
315 return PSCI_E_SUCCESS;
316
317 if ((entrypoint >= BRCM_DRAM3_BASE) &&
318 (entrypoint < (BRCM_DRAM3_BASE + BRCM_DRAM3_SIZE)))
319 return PSCI_E_SUCCESS;
320#endif
321
322 return PSCI_E_INVALID_ADDRESS;
323}
324
325/*******************************************************************************
326 * ARM standard platform handler called to check the validity of the power state
327 * parameter.
328 ******************************************************************************/
329static int brcm_validate_power_state(unsigned int power_state,
330 psci_power_state_t *req_state)
331{
332 int pstate = psci_get_pstate_type(power_state);
333 int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
334 int i;
335
336 assert(req_state);
337
338 if (pwr_lvl > PLAT_MAX_PWR_LVL)
339 return PSCI_E_INVALID_PARAMS;
340
341 /* Sanity check the requested state */
342 if (pstate == PSTATE_TYPE_STANDBY) {
343 /*
344 * It's possible to enter standby only on power level 0
345 * Ignore any other power level.
346 */
347 if (pwr_lvl != MPIDR_AFFLVL0)
348 return PSCI_E_INVALID_PARAMS;
349
350 req_state->pwr_domain_state[MPIDR_AFFLVL0] =
351 PLAT_LOCAL_STATE_RET;
352 } else {
353 for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
354 req_state->pwr_domain_state[i] =
355 PLAT_LOCAL_STATE_OFF;
356 }
357
358 /*
359 * We expect the 'state id' to be zero.
360 */
361 if (psci_get_pstate_id(power_state))
362 return PSCI_E_INVALID_PARAMS;
363
364 return PSCI_E_SUCCESS;
365}
366
367/*******************************************************************************
368 * Export the platform handlers via plat_brcm_psci_pm_ops. The ARM Standard
369 * platform will take care of registering the handlers with PSCI.
370 ******************************************************************************/
371plat_psci_ops_t plat_brcm_psci_pm_ops = {
372 .pwr_domain_on = brcm_pwr_domain_on,
373 .pwr_domain_on_finish = brcm_pwr_domain_on_finish,
374 .pwr_domain_off = brcm_pwr_domain_off,
375 .cpu_standby = brcm_cpu_standby,
376 .system_off = brcm_scp_sys_shutdown,
377 .system_reset = brcm_system_reset,
378 .system_reset2 = brcm_system_reset2,
379 .validate_ns_entrypoint = brcm_validate_ns_entrypoint,
380 .validate_power_state = brcm_validate_power_state,
381};
382
383int plat_setup_psci_ops(uintptr_t sec_entrypoint,
384 const struct plat_psci_ops **psci_ops)
385{
386 *psci_ops = &plat_brcm_psci_pm_ops;
387
388 /* Setup mailbox with entry point. */
389 mmio_write_64(CRMU_CFG_BASE + offsetof(M0CFG, core_cfg.rvbar),
390 sec_entrypoint);
391
392 return 0;
393}