blob: 6e556cdf04dcc5c1a83aae7a052605fcd384ba57 [file] [log] [blame]
Michal Simek91794362022-08-31 16:45:14 +02001/*
Michal Simek2a47faa2023-04-14 08:43:51 +02002 * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
Michal Simek91794362022-08-31 16:45:14 +02003 * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
Jay Buddhabhatti5b9f3912023-02-02 22:34:03 -08004 * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
Michal Simek91794362022-08-31 16:45:14 +02005 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#include <assert.h>
10
11#include <common/debug.h>
Michal Simekdc708ac2022-09-19 13:52:54 +020012#include <common/runtime_svc.h>
Michal Simek91794362022-08-31 16:45:14 +020013#include <lib/mmio.h>
14#include <lib/psci/psci.h>
15#include <plat/arm/common/plat_arm.h>
16#include <plat/common/platform.h>
17#include <plat_arm.h>
18
19#include <plat_private.h>
Jay Buddhabhatti5b9f3912023-02-02 22:34:03 -080020#include <pm_defs.h>
Michal Simek91794362022-08-31 16:45:14 +020021
Michal Simekdc708ac2022-09-19 13:52:54 +020022#define PM_RET_ERROR_NOFEATURE U(19)
23
Michal Simek91794362022-08-31 16:45:14 +020024static uintptr_t versal_net_sec_entry;
25
Michal Simekdc708ac2022-09-19 13:52:54 +020026static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
27{
28 dsb();
29 wfi();
30}
31
32static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
33{
34 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
35 uint32_t cpu = cpu_id % PLATFORM_CORE_COUNT_PER_CLUSTER;
36 uint32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER;
37 uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0;
38 uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + (cluster * 0x4);
39
40 VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n",
41 __func__, mpidr, cpu_id, cpu, cluster);
42
43 if (cpu_id == -1) {
44 return PSCI_E_INTERN_FAIL;
45 }
46
47 if (platform_id == VERSAL_NET_SPP && cluster > 1) {
48 panic();
49 }
50
51 if (cluster > 3) {
52 panic();
53 }
54
55 apu_pcli_cluster = APU_PCLI + APU_PCLI_CLUSTER_OFFSET + (cluster * APU_PCLI_CLUSTER_STEP);
56 apu_cluster_base = APU_CLUSTER0 + (cluster * APU_CLUSTER_STEP);
57
58 /* Enable clock */
59 mmio_setbits_32(PSX_CRF + ACPU0_CLK_CTRL + (cluster * 0x4), ACPU_CLK_CTRL_CLKACT);
60
61 /* Enable cluster states */
62 mmio_setbits_32(apu_pcli_cluster + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_SET);
63 mmio_setbits_32(apu_pcli_cluster + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
64
65 /* assert core reset */
66 mmio_setbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
67
68 /* program RVBAR */
69 mmio_write_32(apu_cluster_base + APU_RVBAR_L_0 + (cpu << 3),
70 (uint32_t)versal_net_sec_entry);
71 mmio_write_32(apu_cluster_base + APU_RVBAR_H_0 + (cpu << 3),
72 versal_net_sec_entry >> 32);
73
74 /* de-assert core reset */
75 mmio_clrbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
76
77 /* clear cluster resets */
78 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_WARM_RESET);
79 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_COLD_RESET);
80
81 apu_pcli_base = APU_PCLI + (APU_PCLI_CPU_STEP * cpu) +
82 (APU_PCLI_CLUSTER_CPU_STEP * cluster);
83
84 mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR);
85 mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
86
87 return PSCI_E_SUCCESS;
88}
89
90static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
91{
92}
93
94static void __dead2 zynqmp_nopmu_system_reset(void)
95{
96 while (1)
97 wfi();
98}
99
100static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)
101{
102 return PSCI_E_SUCCESS;
103}
104
105static void zynqmp_pwr_domain_suspend(const psci_power_state_t *target_state)
106{
107}
108
109static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
110{
111 plat_versal_net_gic_pcpu_init();
112 plat_versal_net_gic_cpuif_enable();
113}
114
115static void zynqmp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
116{
117}
118
119static void __dead2 zynqmp_system_off(void)
120{
121 while (1)
122 wfi();
123}
124
125static int32_t zynqmp_validate_power_state(uint32_t power_state, psci_power_state_t *req_state)
126{
127 return PSCI_E_SUCCESS;
128}
129
130static void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state)
131{
132 req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
133 req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
134}
135
Michal Simek91794362022-08-31 16:45:14 +0200136static const struct plat_psci_ops versal_net_nopmc_psci_ops = {
Michal Simekdc708ac2022-09-19 13:52:54 +0200137 .cpu_standby = zynqmp_cpu_standby,
138 .pwr_domain_on = zynqmp_nopmu_pwr_domain_on,
139 .pwr_domain_off = zynqmp_nopmu_pwr_domain_off,
140 .system_reset = zynqmp_nopmu_system_reset,
141 .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint,
142 .pwr_domain_suspend = zynqmp_pwr_domain_suspend,
143 .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish,
144 .pwr_domain_suspend_finish = zynqmp_pwr_domain_suspend_finish,
145 .system_off = zynqmp_system_off,
146 .validate_power_state = zynqmp_validate_power_state,
147 .get_sys_suspend_power_state = zynqmp_get_sys_suspend_power_state,
Michal Simek91794362022-08-31 16:45:14 +0200148};
149
150/*******************************************************************************
151 * Export the platform specific power ops.
152 ******************************************************************************/
153int32_t plat_setup_psci_ops(uintptr_t sec_entrypoint,
154 const struct plat_psci_ops **psci_ops)
155{
156 versal_net_sec_entry = sec_entrypoint;
157
158 VERBOSE("Setting up entry point %lx\n", versal_net_sec_entry);
159
160 *psci_ops = &versal_net_nopmc_psci_ops;
161
162 return 0;
163}
Michal Simekdc708ac2022-09-19 13:52:54 +0200164
165int sip_svc_setup_init(void)
166{
167 return 0;
168}
169
170static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
171 uint32_t arg1, uint32_t arg2)
172{
173 VERBOSE("%s: ioctl_id: %x, arg1: %x\n", __func__, ioctl_id, arg1);
174 if (ioctl_id == IOCTL_OSPI_MUX_SELECT) {
175 mmio_write_32(SLCR_OSPI_QSPI_IOU_AXI_MUX_SEL, arg1);
176 return 0;
177 }
178 return PM_RET_ERROR_NOFEATURE;
179}
180
181static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
182 uint64_t x4, void *cookie, void *handle, uint64_t flags)
183{
184 int32_t ret;
185 uint32_t arg[4], api_id;
186
187 arg[0] = (uint32_t)x1;
188 arg[1] = (uint32_t)(x1 >> 32);
189 arg[2] = (uint32_t)x2;
190 arg[3] = (uint32_t)(x2 >> 32);
191
192 api_id = smc_fid & FUNCID_NUM_MASK;
193 VERBOSE("%s: smc_fid: %x, api_id=0x%x\n", __func__, smc_fid, api_id);
194
Michal Simeka16a74c2022-10-03 14:02:57 +0200195 switch (api_id) {
Michal Simekdc708ac2022-09-19 13:52:54 +0200196 case PM_IOCTL:
197 {
198 ret = no_pm_ioctl(arg[0], arg[1], arg[2], arg[3]);
199 SMC_RET1(handle, (uint64_t)ret);
200 }
201 case PM_GET_CHIPID:
202 {
203 uint32_t idcode, version;
204
205 idcode = mmio_read_32(PMC_TAP);
206 version = mmio_read_32(PMC_TAP_VERSION);
207 SMC_RET2(handle, ((uint64_t)idcode << 32), version);
208 }
209 default:
210 WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
211 SMC_RET1(handle, SMC_UNK);
212 }
213}
214
215uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
216 void *cookie, void *handle, uint64_t flags)
217{
218 return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
219}