blob: 6ccd6fd178ae7b17866119809ca7c7f28c64121e [file] [log] [blame]
Jacky Bai9a6f62f2019-11-25 14:43:26 +08001/*
2 * Copyright 2019-2022 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Jacky Baid746daa12019-11-25 13:19:37 +08007#include <bl31/interrupt_mgmt.h>
8#include <common/runtime_svc.h>
Jacky Bai9a6f62f2019-11-25 14:43:26 +08009#include <lib/mmio.h>
Jacky Baid746daa12019-11-25 13:19:37 +080010#include <lib/spinlock.h>
11#include <plat/common/platform.h>
Jacky Bai9a6f62f2019-11-25 14:43:26 +080012
13#include <dram.h>
14
Jacky Baid746daa12019-11-25 13:19:37 +080015#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
16#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
17
Jacky Bai9a6f62f2019-11-25 14:43:26 +080018struct dram_info dram_info;
19
Jacky Baid746daa12019-11-25 13:19:37 +080020/* lock used for DDR DVFS */
21spinlock_t dfs_lock;
22
23static volatile uint32_t wfe_done;
24static volatile bool wait_ddrc_hwffc_done = true;
25static unsigned int dev_fsp = 0x1;
26
27static uint32_t fsp_init_reg[3][4] = {
28 { DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
29 { DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
30 { DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
31};
32
33static void get_mr_values(uint32_t (*mr_value)[8])
34{
35 uint32_t init_val;
36 unsigned int i, fsp_index;
37
38 for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
39 for (i = 0U; i < 4U; i++) {
40 init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
41 mr_value[fsp_index][2*i] = init_val >> 16;
42 mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
43 }
44 }
45}
46
Jacky Bai9a6f62f2019-11-25 14:43:26 +080047/* Restore the ddrc configs */
48void dram_umctl2_init(struct dram_timing_info *timing)
49{
50 struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
51 unsigned int i;
52
53 for (i = 0U; i < timing->ddrc_cfg_num; i++) {
54 mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
55 ddrc_cfg++;
56 }
57
58 /* set the default fsp to P0 */
59 mmio_write_32(DDRC_MSTR2(0), 0x0);
60}
61
62/* Restore the dram PHY config */
63void dram_phy_init(struct dram_timing_info *timing)
64{
65 struct dram_cfg_param *cfg = timing->ddrphy_cfg;
66 unsigned int i;
67
68 /* Restore the PHY init config */
69 cfg = timing->ddrphy_cfg;
70 for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
71 dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
72 cfg++;
73 }
74
75 /* Restore the DDR PHY CSRs */
76 cfg = timing->ddrphy_trained_csr;
77 for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
78 dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
79 cfg++;
80 }
81
82 /* Load the PIE image */
83 cfg = timing->ddrphy_pie;
84 for (i = 0U; i < timing->ddrphy_pie_num; i++) {
85 dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
86 cfg++;
87 }
Jacky Baid746daa12019-11-25 13:19:37 +080088}
89
90/* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
91static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
92 void *handle, void *cookie)
93{
94 uint64_t mpidr = read_mpidr_el1();
95 unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
96 uint32_t irq;
97
98 irq = plat_ic_acknowledge_interrupt();
99 if (irq < 1022U) {
100 plat_ic_end_of_interrupt(irq);
101 }
102
103 /* set the WFE done status */
104 spin_lock(&dfs_lock);
105 wfe_done |= (1 << cpu_id * 8);
106 dsb();
107 spin_unlock(&dfs_lock);
108
109 while (1) {
110 /* ddr frequency change done */
111 if (!wait_ddrc_hwffc_done)
112 break;
113
114 wfe();
115 }
116
117 return 0;
Jacky Bai9a6f62f2019-11-25 14:43:26 +0800118}
119
120void dram_info_init(unsigned long dram_timing_base)
121{
122 uint32_t ddrc_mstr, current_fsp;
Jacky Baid746daa12019-11-25 13:19:37 +0800123 uint32_t flags = 0;
124 uint32_t rc;
125 unsigned int i;
Jacky Bai9a6f62f2019-11-25 14:43:26 +0800126
127 /* Get the dram type & rank */
128 ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
129
130 dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
131 dram_info.num_rank = (ddrc_mstr >> 24) & ACTIVE_RANK_MASK;
132
133 /* Get current fsp info */
134 current_fsp = mmio_read_32(DDRC_DFIMISC(0)) & 0xf;
135 dram_info.boot_fsp = current_fsp;
136 dram_info.current_fsp = current_fsp;
137
Jacky Baid746daa12019-11-25 13:19:37 +0800138 get_mr_values(dram_info.mr_table);
139
Jacky Bai9a6f62f2019-11-25 14:43:26 +0800140 dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
Jacky Baid746daa12019-11-25 13:19:37 +0800141
142 /* get the num of supported fsp */
143 for (i = 0U; i < 4U; ++i) {
144 if (!dram_info.timing_info->fsp_table[i]) {
145 break;
146 }
147 }
148 dram_info.num_fsp = i;
149
150 /* check if has bypass mode support */
151 if (dram_info.timing_info->fsp_table[i-1] < 666) {
152 dram_info.bypass_mode = true;
153 } else {
154 dram_info.bypass_mode = false;
155 }
156
157 /* Register the EL3 handler for DDR DVFS */
158 set_interrupt_rm_flag(flags, NON_SECURE);
159 rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
160 if (rc != 0) {
161 panic();
162 }
163}
164
165
166/*
167 * For each freq return the following info:
168 *
169 * r1: data rate
170 * r2: 1 + dram_core parent
171 * r3: 1 + dram_alt parent index
172 * r4: 1 + dram_apb parent index
173 *
174 * The parent indices can be used by an OS who manages source clocks to enabled
175 * them ahead of the switch.
176 *
177 * A parent value of "0" means "don't care".
178 *
179 * Current implementation of freq switch is hardcoded in
180 * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
181 * a wide variety of rates.
182 */
183int dram_dvfs_get_freq_info(void *handle, u_register_t index)
184{
185 switch (index) {
186 case 0:
187 SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
188 1, 0, 5);
189 case 1:
190 if (!dram_info.bypass_mode) {
191 SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
192 1, 0, 0);
193 }
194 SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
195 2, 2, 4);
196 case 2:
197 if (!dram_info.bypass_mode) {
198 SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
199 1, 0, 0);
200 }
201 SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
202 2, 3, 3);
203 case 3:
204 SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
205 1, 0, 0);
206 default:
207 SMC_RET1(handle, -3);
208 }
209}
210
211int dram_dvfs_handler(uint32_t smc_fid, void *handle,
212 u_register_t x1, u_register_t x2, u_register_t x3)
213{
214 uint64_t mpidr = read_mpidr_el1();
215 unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
216 unsigned int fsp_index = x1;
217 uint32_t online_cores = x2;
218
219 if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
220 SMC_RET1(handle, dram_info.num_fsp);
221 } else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
222 return dram_dvfs_get_freq_info(handle, x2);
223 } else if (x1 < 4) {
224 wait_ddrc_hwffc_done = true;
225 dsb();
226
227 /* trigger the SGI IPI to info other cores */
228 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
229 if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
230 plat_ic_raise_el3_sgi(0x8, i);
231 }
232 }
233
234 /* make sure all the core in WFE */
235 online_cores &= ~(0x1 << (cpu_id * 8));
236 while (1) {
237 if (online_cores == wfe_done) {
238 break;
239 }
240 }
241
242 /* flush the L1/L2 cache */
243 dcsw_op_all(DCCSW);
244
245 if (dram_info.dram_type == DDRC_LPDDR4) {
246 lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
247 dev_fsp = (~dev_fsp) & 0x1;
248 } else if (dram_info.dram_type == DDRC_DDR4) {
249 ddr4_swffc(&dram_info, fsp_index);
250 }
251
252 dram_info.current_fsp = fsp_index;
253 wait_ddrc_hwffc_done = false;
254 wfe_done = 0;
255 dsb();
256 sev();
257 isb();
258 }
259
260 SMC_RET1(handle, 0);
Jacky Bai9a6f62f2019-11-25 14:43:26 +0800261}