blob: a55042df99f5b3713fbd782ca85b9ea808ce4f35 [file] [log] [blame]
Amit Nagal055796f2024-06-05 12:32:38 +05301/*
2 * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
3 * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
4 * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#include <assert.h>
10
11#include <common/debug.h>
12#include <common/runtime_svc.h>
13#include <lib/mmio.h>
14#include <lib/psci/psci.h>
15#include <plat/arm/common/plat_arm.h>
16#include <plat/common/platform.h>
17#include <plat_arm.h>
18
19#include <plat_private.h>
20#include <pm_defs.h>
21
22#define PM_RET_ERROR_NOFEATURE U(19)
23#define ALWAYSTRUE true
24
25static uintptr_t _sec_entry;
26
27static void zynqmp_cpu_standby(plat_local_state_t cpu_state)
28{
29 dsb();
30 wfi();
31}
32
33#define MPIDR_MT_BIT (24)
34
35static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr)
36{
37 uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr) & ~BIT(MPIDR_MT_BIT);
38 uint32_t cpu = cpu_id % PLATFORM_CORE_COUNT_PER_CLUSTER;
39 uint32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER;
40 uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0;
41 uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + ((uint64_t)cluster * 0x4U);
42
43 VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n",
44 __func__, mpidr, cpu_id, cpu, cluster);
45
46 if (cpu_id == -1) {
47 return PSCI_E_INTERN_FAIL;
48 }
49
50 if (cluster > 3) {
51 panic();
52 }
53
54 apu_pcli_cluster = APU_PCLI + APU_PCLI_CLUSTER_OFFSET + ((uint64_t)cluster * APU_PCLI_CLUSTER_STEP);
55 apu_cluster_base = APU_CLUSTER0 + ((uint64_t)cluster * APU_CLUSTER_STEP);
56
57 /* Enable clock */
58 mmio_setbits_32(PSX_CRF + ACPU0_CLK_CTRL + ((uint64_t)cluster * 0x4U), ACPU_CLK_CTRL_CLKACT);
59
60 /* Enable cluster states */
61 mmio_setbits_32(apu_pcli_cluster + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_SET);
62 mmio_setbits_32(apu_pcli_cluster + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
63
64 /* assert core reset */
65 mmio_setbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
66
67 /* program RVBAR */
68 mmio_write_32(apu_cluster_base + APU_RVBAR_L_0 + (cpu << 3),
69 (uint32_t)_sec_entry);
70 mmio_write_32(apu_cluster_base + APU_RVBAR_H_0 + (cpu << 3),
71 _sec_entry >> 32);
72
73 /* de-assert core reset */
74 mmio_clrbits_32(rst_apu_cluster, ((RST_APU_COLD_RESET|RST_APU_WARN_RESET) << cpu));
75
76 /* clear cluster resets */
77 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_WARM_RESET);
78 mmio_clrbits_32(rst_apu_cluster, RST_APU_CLUSTER_COLD_RESET);
79
80 apu_pcli_base = APU_PCLI + (APU_PCLI_CPU_STEP * cpu) +
81 (APU_PCLI_CLUSTER_CPU_STEP * cluster);
82
83 mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR);
84 mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST);
85
86 return PSCI_E_SUCCESS;
87}
88
89static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state)
90{
91 plat_gic_cpuif_disable();
92}
93
94static void __dead2 zynqmp_nopmu_system_reset(void)
95{
96 while (ALWAYSTRUE) {
97 wfi();
98 }
99}
100
101static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint)
102{
103 VERBOSE("Validate ns_entry point %lx\n", ns_entrypoint);
104
105 if ((ns_entrypoint) != 0U) {
106 return PSCI_E_SUCCESS;
107 } else {
108 return PSCI_E_INVALID_ADDRESS;
109 }
110}
111
112static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state)
113{
114 plat_gic_pcpu_init();
115 plat_gic_cpuif_enable();
116}
117
118static void __dead2 zynqmp_system_off(void)
119{
120 while (ALWAYSTRUE) {
121 wfi();
122 }
123}
124
125static int32_t zynqmp_validate_power_state(uint32_t power_state, psci_power_state_t *req_state)
126{
127 return PSCI_E_SUCCESS;
128}
129
130static const struct plat_psci_ops _nopmc_psci_ops = {
131 .cpu_standby = zynqmp_cpu_standby,
132 .pwr_domain_on = zynqmp_nopmu_pwr_domain_on,
133 .pwr_domain_off = zynqmp_nopmu_pwr_domain_off,
134 .system_reset = zynqmp_nopmu_system_reset,
135 .validate_ns_entrypoint = zynqmp_validate_ns_entrypoint,
136 .pwr_domain_on_finish = zynqmp_pwr_domain_on_finish,
137 .system_off = zynqmp_system_off,
138 .validate_power_state = zynqmp_validate_power_state,
139};
140
141/*******************************************************************************
142 * Export the platform specific power ops.
143 ******************************************************************************/
144int32_t plat_setup_psci_ops(uintptr_t sec_entrypoint,
145 const struct plat_psci_ops **psci_ops)
146{
147 _sec_entry = sec_entrypoint;
148
149 VERBOSE("Setting up entry point %lx\n", _sec_entry);
150
151 *psci_ops = &_nopmc_psci_ops;
152
153 return 0;
154}
155
156int sip_svc_setup_init(void)
157{
158 return 0;
159}
160
161static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
162 uint32_t arg1, uint32_t arg2)
163{
Amit Nagalacb6b922024-07-28 20:32:58 -1200164 int32_t ret = 0;
Amit Nagal055796f2024-06-05 12:32:38 +0530165 VERBOSE("%s: ioctl_id: %x, arg1: %x\n", __func__, ioctl_id, arg1);
Amit Nagalacb6b922024-07-28 20:32:58 -1200166
167 switch (ioctl_id) {
168 case IOCTL_OSPI_MUX_SELECT:
Amit Nagal055796f2024-06-05 12:32:38 +0530169 mmio_write_32(SLCR_OSPI_QSPI_IOU_AXI_MUX_SEL, arg1);
Amit Nagalacb6b922024-07-28 20:32:58 -1200170 break;
171 case IOCTL_UFS_TXRX_CFGRDY_GET:
172 ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_TX_RX_CONFIG_RDY);
173 break;
174 case IOCTL_UFS_SRAM_CSR_SEL:
175 if (arg1 == 1) {
176 ret = (int32_t) mmio_read_32(PMXC_IOU_SLCR_SRAM_CSR);
177 } else if (arg1 == 0) {
178 mmio_write_32(PMXC_IOU_SLCR_SRAM_CSR, arg2);
179 }
180 break;
Maheedhar Bollapalliebe40e22024-09-04 18:29:38 -1200181 case IOCTL_USB_SET_STATE:
182 break;
Amit Nagalacb6b922024-07-28 20:32:58 -1200183 default:
184 ret = PM_RET_ERROR_NOFEATURE;
185 break;
Amit Nagal055796f2024-06-05 12:32:38 +0530186 }
Amit Nagalacb6b922024-07-28 20:32:58 -1200187
188 return ret;
Amit Nagal055796f2024-06-05 12:32:38 +0530189}
190
191static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
192 uint64_t x4, void *cookie, void *handle, uint64_t flags)
193{
194 int32_t ret;
195 uint32_t arg[4], api_id;
196
197 arg[0] = (uint32_t)x1;
198 arg[1] = (uint32_t)(x1 >> 32);
199 arg[2] = (uint32_t)x2;
200 arg[3] = (uint32_t)(x2 >> 32);
201
202 api_id = smc_fid & FUNCID_NUM_MASK;
203 VERBOSE("%s: smc_fid: %x, api_id=0x%x\n", __func__, smc_fid, api_id);
204
205 switch (api_id) {
206 case PM_IOCTL:
207 {
208 ret = no_pm_ioctl(arg[0], arg[1], arg[2], arg[3]);
Amit Nagalacb6b922024-07-28 20:32:58 -1200209 /* Firmware driver expects return code in upper 32 bits and
210 * status in lower 32 bits.
211 * status is always SUCCESS(0) for mmio low level register
212 * r/w calls and return value is the value returned from
213 * no_pm_ioctl
214 */
215 SMC_RET1(handle, ((uint64_t)ret << 32));
Amit Nagal055796f2024-06-05 12:32:38 +0530216 }
217 case PM_GET_CHIPID:
218 {
219 uint32_t idcode, version;
220
221 idcode = mmio_read_32(PMC_TAP);
222 version = mmio_read_32(PMC_TAP_VERSION);
223 SMC_RET2(handle, ((uint64_t)idcode << 32), version);
224 }
225 default:
226 WARN("Unimplemented PM Service Call: 0x%x\n", smc_fid);
227 SMC_RET1(handle, SMC_UNK);
228 }
229}
230
231uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
232 void *cookie, void *handle, uint64_t flags)
233{
234 return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
235}