blob: c58f32969aefd910cadf3916a8faf819d9c63a41 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Roberto Vargas777dd432018-02-12 12:36:17 +00002 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
dp-armfa3cf0b2017-05-03 09:38:09 +01004 * SPDX-License-Identifier: BSD-3-Clause
Achin Gupta4f6ad662013-10-25 09:08:21 +01005 */
6
7#ifndef __PSCI_PRIVATE_H__
8#define __PSCI_PRIVATE_H__
9
Achin Guptaa59caa42013-12-05 14:21:04 +000010#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010011#include <bakery_lock.h>
Soby Mathew8595b872015-01-06 15:36:38 +000012#include <bl_common.h>
Soby Mathew981487a2015-07-13 14:10:57 +010013#include <cpu_data.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010014#include <psci.h>
Soby Mathew981487a2015-07-13 14:10:57 +010015#include <spinlock.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010016
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000017#if HW_ASSISTED_COHERENCY
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000018
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000019/*
20 * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
21 * as PSCI participants are cache-coherent, and there's no need for explicit
22 * cache maintenance operations or barriers to coordinate their state.
23 */
24#define psci_flush_dcache_range(addr, size)
25#define psci_flush_cpu_data(member)
26#define psci_inv_cpu_data(member)
27
28#define psci_dsbish()
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000029
30/*
31 * On systems where participant CPUs are cache-coherent, we can use spinlocks
32 * instead of bakery locks.
33 */
34#define DEFINE_PSCI_LOCK(_name) spinlock_t _name
35#define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
36
37#define psci_lock_get(non_cpu_pd_node) \
38 spin_lock(&psci_locks[(non_cpu_pd_node)->lock_index])
39#define psci_lock_release(non_cpu_pd_node) \
40 spin_unlock(&psci_locks[(non_cpu_pd_node)->lock_index])
41
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000042#else
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000043
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000044/*
45 * If not all PSCI participants are cache-coherent, perform cache maintenance
46 * and issue barriers wherever required to coordinate state.
47 */
48#define psci_flush_dcache_range(addr, size) flush_dcache_range(addr, size)
49#define psci_flush_cpu_data(member) flush_cpu_data(member)
50#define psci_inv_cpu_data(member) inv_cpu_data(member)
51
52#define psci_dsbish() dsbish()
Jeenu Viswambharan0b56d6f2017-01-06 14:58:11 +000053
Soby Mathew523d6332015-01-08 18:02:19 +000054/*
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000055 * Use bakery locks for state coordination as not all PSCI participants are
56 * cache coherent.
Soby Mathew523d6332015-01-08 18:02:19 +000057 */
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000058#define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name)
59#define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name)
60
Soby Mathew981487a2015-07-13 14:10:57 +010061#define psci_lock_get(non_cpu_pd_node) \
Andrew Thoelkee466c9f2015-09-10 11:39:36 +010062 bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
Soby Mathew981487a2015-07-13 14:10:57 +010063#define psci_lock_release(non_cpu_pd_node) \
Andrew Thoelkee466c9f2015-09-10 11:39:36 +010064 bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
Andrew Thoelke56f44702014-06-20 00:36:14 +010065
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +000066#endif
67
68#define psci_lock_init(non_cpu_pd_node, idx) \
69 ((non_cpu_pd_node)[(idx)].lock_index = (idx))
70
Soby Mathew6cdddaf2015-01-07 11:10:22 +000071/*
72 * The PSCI capability which are provided by the generic code but does not
73 * depend on the platform or spd capabilities.
74 */
75#define PSCI_GENERIC_CAP \
76 (define_psci_cap(PSCI_VERSION) | \
77 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
78 define_psci_cap(PSCI_FEATURES))
79
80/*
81 * The PSCI capabilities mask for 64 bit functions.
82 */
83#define PSCI_CAP_64BIT_MASK \
84 (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \
85 define_psci_cap(PSCI_CPU_ON_AARCH64) | \
86 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
87 define_psci_cap(PSCI_MIG_AARCH64) | \
Soby Mathew96168382014-12-17 14:47:57 +000088 define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
Jeenu Viswambharan7f03e9d92016-08-03 15:54:50 +010089 define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \
Yatharth Kochar241ec6c2016-05-09 18:26:35 +010090 define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \
91 define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \
Roberto Vargasb820ad02017-07-26 09:23:09 +010092 define_psci_cap(PSCI_STAT_COUNT_AARCH64) | \
Roberto Vargas653fb8f2017-10-12 10:57:40 +010093 define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \
94 define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
Soby Mathew6cdddaf2015-01-07 11:10:22 +000095
Soby Mathew981487a2015-07-13 14:10:57 +010096/*
97 * Helper macros to get/set the fields of PSCI per-cpu data.
98 */
99#define psci_set_aff_info_state(aff_state) \
100 set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
101#define psci_get_aff_info_state() \
102 get_cpu_data(psci_svc_cpu_data.aff_info_state)
103#define psci_get_aff_info_state_by_idx(idx) \
104 get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
Soby Mathewca370502016-01-26 11:47:53 +0000105#define psci_set_aff_info_state_by_idx(idx, aff_state) \
106 set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
107 aff_state)
Soby Mathew981487a2015-07-13 14:10:57 +0100108#define psci_get_suspend_pwrlvl() \
109 get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
110#define psci_set_suspend_pwrlvl(target_lvl) \
111 set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
112#define psci_set_cpu_local_state(state) \
113 set_cpu_data(psci_svc_cpu_data.local_state, state)
114#define psci_get_cpu_local_state() \
115 get_cpu_data(psci_svc_cpu_data.local_state)
116#define psci_get_cpu_local_state_by_idx(idx) \
117 get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
118
119/*
120 * Helper macros for the CPU level spinlocks
121 */
122#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
123#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
124
125/* Helper macro to identify a CPU standby request in PSCI Suspend call */
126#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
127 (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
Soby Mathew6cdddaf2015-01-07 11:10:22 +0000128
Achin Gupta4f6ad662013-10-25 09:08:21 +0100129/*******************************************************************************
Soby Mathew981487a2015-07-13 14:10:57 +0100130 * The following two data structures implement the power domain tree. The tree
131 * is used to track the state of all the nodes i.e. power domain instances
132 * described by the platform. The tree consists of nodes that describe CPU power
133 * domains i.e. leaf nodes and all other power domains which are parents of a
134 * CPU power domain i.e. non-leaf nodes.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100135 ******************************************************************************/
Soby Mathew981487a2015-07-13 14:10:57 +0100136typedef struct non_cpu_pwr_domain_node {
137 /*
138 * Index of the first CPU power domain node level 0 which has this node
139 * as its parent.
140 */
141 unsigned int cpu_start_idx;
142
143 /*
144 * Number of CPU power domains which are siblings of the domain indexed
145 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
146 * -> cpu_start_idx + ncpus' have this node as their parent.
147 */
148 unsigned int ncpus;
149
150 /*
151 * Index of the parent power domain node.
152 * TODO: Figure out whether to whether using pointer is more efficient.
153 */
154 unsigned int parent_node;
155
156 plat_local_state_t local_state;
157
Achin Gupta75f73672013-12-05 16:33:10 +0000158 unsigned char level;
Andrew Thoelkee466c9f2015-09-10 11:39:36 +0100159
160 /* For indexing the psci_lock array*/
Soby Mathew981487a2015-07-13 14:10:57 +0100161 unsigned char lock_index;
Soby Mathew981487a2015-07-13 14:10:57 +0100162} non_cpu_pd_node_t;
163
164typedef struct cpu_pwr_domain_node {
Soby Mathew011ca182015-07-29 17:05:03 +0100165 u_register_t mpidr;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100166
Soby Mathew981487a2015-07-13 14:10:57 +0100167 /*
168 * Index of the parent power domain node.
169 * TODO: Figure out whether to whether using pointer is more efficient.
170 */
171 unsigned int parent_node;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100172
Soby Mathew981487a2015-07-13 14:10:57 +0100173 /*
174 * A CPU power domain does not require state coordination like its
175 * parent power domains. Hence this node does not include a bakery
176 * lock. A spinlock is required by the CPU_ON handler to prevent a race
177 * when multiple CPUs try to turn ON the same target CPU.
178 */
179 spinlock_t cpu_lock;
180} cpu_pd_node_t;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100181
182/*******************************************************************************
183 * Data prototypes
184 ******************************************************************************/
Soby Mathew981487a2015-07-13 14:10:57 +0100185extern const plat_psci_ops_t *psci_plat_pm_ops;
186extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
187extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
Soby Mathew011ca182015-07-29 17:05:03 +0100188extern unsigned int psci_caps;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100189
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000190/* One lock is required per non-CPU power domain node */
191DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
Andrew Thoelkee466c9f2015-09-10 11:39:36 +0100192
Achin Gupta4f6ad662013-10-25 09:08:21 +0100193/*******************************************************************************
Jeenu Viswambharan7f366602014-02-20 17:11:00 +0000194 * SPD's power management hooks registered with PSCI
Achin Gupta607084e2014-02-09 18:24:19 +0000195 ******************************************************************************/
Dan Handleye2712bc2014-04-10 15:37:22 +0100196extern const spd_pm_ops_t *psci_spd_pm;
Achin Gupta607084e2014-02-09 18:24:19 +0000197
198/*******************************************************************************
Achin Gupta4f6ad662013-10-25 09:08:21 +0100199 * Function prototypes
200 ******************************************************************************/
201/* Private exported functions from psci_common.c */
Soby Mathew981487a2015-07-13 14:10:57 +0100202int psci_validate_power_state(unsigned int power_state,
203 psci_power_state_t *state_info);
204void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
Soby Mathew011ca182015-07-29 17:05:03 +0100205int psci_validate_mpidr(u_register_t mpidr);
Soby Mathew981487a2015-07-13 14:10:57 +0100206void psci_init_req_local_pwr_states(void);
Achin Gupta9b2bf252016-06-28 16:46:15 +0100207void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
208 psci_power_state_t *target_state);
Soby Mathewf1f97a12015-07-15 12:13:26 +0100209int psci_validate_entry_point(entry_point_info_t *ep,
Soby Mathew011ca182015-07-29 17:05:03 +0100210 uintptr_t entrypoint, u_register_t context_id);
Soby Mathew981487a2015-07-13 14:10:57 +0100211void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
Soby Mathew011ca182015-07-29 17:05:03 +0100212 unsigned int end_lvl,
Soby Mathew981487a2015-07-13 14:10:57 +0100213 unsigned int node_index[]);
Soby Mathew011ca182015-07-29 17:05:03 +0100214void psci_do_state_coordination(unsigned int end_pwrlvl,
Soby Mathew981487a2015-07-13 14:10:57 +0100215 psci_power_state_t *state_info);
Soby Mathew011ca182015-07-29 17:05:03 +0100216void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
Soby Mathew981487a2015-07-13 14:10:57 +0100217 unsigned int cpu_idx);
Soby Mathew011ca182015-07-29 17:05:03 +0100218void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
Soby Mathew981487a2015-07-13 14:10:57 +0100219 unsigned int cpu_idx);
220int psci_validate_suspend_req(const psci_power_state_t *state_info,
Roberto Vargas777dd432018-02-12 12:36:17 +0000221 unsigned int is_power_down_state);
Soby Mathew981487a2015-07-13 14:10:57 +0100222unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
223unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
Soby Mathew011ca182015-07-29 17:05:03 +0100224void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
Soby Mathew981487a2015-07-13 14:10:57 +0100225void psci_print_power_domain_map(void);
Soby Mathew96168382014-12-17 14:47:57 +0000226unsigned int psci_is_last_on_cpu(void);
Soby Mathew011ca182015-07-29 17:05:03 +0100227int psci_spd_migrate_info(u_register_t *mpidr);
Jeenu Viswambharan346bfd82017-01-05 11:01:02 +0000228void psci_do_pwrdown_sequence(unsigned int power_level);
229
230/*
231 * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
232 * available. Otherwise, this needs post-call stack maintenance, which is
233 * handled in assembly.
234 */
235void prepare_cpu_pwr_dwn(unsigned int power_level);
Achin Gupta0959db52013-12-02 17:33:04 +0000236
Soby Mathew981487a2015-07-13 14:10:57 +0100237/* Private exported functions from psci_on.c */
Soby Mathewa0fedc42016-06-16 14:52:04 +0100238int psci_cpu_on_start(u_register_t target_cpu,
Sandrine Bailleux7497bff2016-04-25 09:28:43 +0100239 entry_point_info_t *ep);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100240
Soby Mathew981487a2015-07-13 14:10:57 +0100241void psci_cpu_on_finish(unsigned int cpu_idx,
242 psci_power_state_t *state_info);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100243
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000244/* Private exported functions from psci_off.c */
Soby Mathew011ca182015-07-29 17:05:03 +0100245int psci_do_cpu_off(unsigned int end_pwrlvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100246
Sandrine Bailleuxf4119ec2015-12-17 13:58:58 +0000247/* Private exported functions from psci_suspend.c */
Soby Mathew981487a2015-07-13 14:10:57 +0100248void psci_cpu_suspend_start(entry_point_info_t *ep,
Soby Mathew011ca182015-07-29 17:05:03 +0100249 unsigned int end_pwrlvl,
Soby Mathew981487a2015-07-13 14:10:57 +0100250 psci_power_state_t *state_info,
Roberto Vargas777dd432018-02-12 12:36:17 +0000251 unsigned int is_power_down_state);
Soby Mathew8595b872015-01-06 15:36:38 +0000252
Soby Mathew981487a2015-07-13 14:10:57 +0100253void psci_cpu_suspend_finish(unsigned int cpu_idx,
254 psci_power_state_t *state_info);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000255
Achin Guptae1aa5162014-06-26 09:58:52 +0100256/* Private exported functions from psci_helpers.S */
Soby Mathew011ca182015-07-29 17:05:03 +0100257void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
Achin Guptae1aa5162014-06-26 09:58:52 +0100258void psci_do_pwrup_cache_maintenance(void);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100259
Juan Castillo4dc4a472014-08-12 11:17:06 +0100260/* Private exported functions from psci_system_off.c */
261void __dead2 psci_system_off(void);
262void __dead2 psci_system_reset(void);
Roberto Vargasb820ad02017-07-26 09:23:09 +0100263int psci_system_reset2(uint32_t reset_type, u_register_t cookie);
Juan Castillo4dc4a472014-08-12 11:17:06 +0100264
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100265/* Private exported functions from psci_stat.c */
266void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
267 const psci_power_state_t *state_info);
268void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
dp-arm66abfbe2017-01-31 13:01:04 +0000269 const psci_power_state_t *state_info);
Yatharth Kochar241ec6c2016-05-09 18:26:35 +0100270u_register_t psci_stat_residency(u_register_t target_cpu,
271 unsigned int power_state);
272u_register_t psci_stat_count(u_register_t target_cpu,
273 unsigned int power_state);
274
Roberto Vargas0a4c2612017-08-03 08:16:16 +0100275/* Private exported functions from psci_mem_protect.c */
276int psci_mem_protect(unsigned int enable);
277int psci_mem_chk_range(uintptr_t base, u_register_t length);
278
Achin Gupta4f6ad662013-10-25 09:08:21 +0100279#endif /* __PSCI_PRIVATE_H__ */