blob: d58a0cc1d9c2ad7ba081b2252ca88fa9c6c5f4cd [file] [log] [blame]
Jacky Baid746daa2019-11-25 13:19:37 +08001/*
Jacky Bai699ece52020-04-13 11:07:40 +08002 * Copyright 2018-2023 NXP
Jacky Baid746daa2019-11-25 13:19:37 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <drivers/delay_timer.h>
8#include <lib/mmio.h>
9
10#include <dram.h>
11
Jacky Baid148bae2020-04-22 21:26:13 +080012void ddr4_mr_write(uint32_t mr, uint32_t data, uint32_t mr_type,
13 uint32_t rank, uint32_t dram_type)
Jacky Baid746daa2019-11-25 13:19:37 +080014{
15 uint32_t val, mr_mirror, data_mirror;
16
17 /*
18 * 1. Poll MRSTAT.mr_wr_busy until it is 0 to make sure
19 * that there is no outstanding MR transAction.
20 */
21 while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1) {
22 ;
23 }
24
25 /*
26 * 2. Write the MRCTRL0.mr_type, MRCTRL0.mr_addr, MRCTRL0.mr_rank
27 * and (for MRWs) MRCTRL1.mr_data to define the MR transaction.
28 */
29 val = mmio_read_32(DDRC_DIMMCTL(0));
30 if ((val & 0x2) && (rank == 0x2)) {
31 mr_mirror = (mr & 0x4) | ((mr & 0x1) << 1) | ((mr & 0x2) >> 1); /* BA0, BA1 swap */
Jacky Baid148bae2020-04-22 21:26:13 +080032 if (dram_type == DDRC_DDR4) {
33 data_mirror = (data & 0x1607) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) |
Jacky Baid746daa2019-11-25 13:19:37 +080034 ((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) |
Jacky Baid148bae2020-04-22 21:26:13 +080035 ((data & 0x100) >> 1) | ((data & 0x800) << 2) | ((data & 0x2000) >> 2) ;
36 } else {
37 data_mirror = (data & 0xfe07) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) |
38 ((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) |
39 ((data & 0x100) >> 1);
40 }
Jacky Baid746daa2019-11-25 13:19:37 +080041 } else {
42 mr_mirror = mr;
43 data_mirror = data;
44 }
45
46 mmio_write_32(DDRC_MRCTRL0(0), mr_type | (mr_mirror << 12) | (rank << 4));
47 mmio_write_32(DDRC_MRCTRL1(0), data_mirror);
48
49 /*
50 * 3. In a separate APB transaction, write the MRCTRL0.mr_wr to 1.
51 * This bit is self-clearing, and triggers the MR transaction.
52 * The uMCTL2 then asserts the MRSTAT.mr_wr_busy while it performs
53 * the MR transaction to SDRAM, and no further accesses can be
54 * initiated until it is deasserted.
55 */
56 mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31));
57
58 while (mmio_read_32(DDRC_MRSTAT(0))) {
59 ;
60 }
61}
62
63void dram_cfg_all_mr(struct dram_info *info, uint32_t pstate)
64{
65 uint32_t num_rank = info->num_rank;
Jacky Baid148bae2020-04-22 21:26:13 +080066 uint32_t dram_type = info->dram_type;
Jacky Baid746daa2019-11-25 13:19:37 +080067 /*
68 * 15. Perform MRS commands as required to re-program
69 * timing registers in the SDRAM for the new frequency
70 * (in particular, CL, CWL and WR may need to be changed).
71 */
72
73 for (int i = 1; i <= num_rank; i++) {
74 for (int j = 0; j < 6; j++) {
Jacky Baid148bae2020-04-22 21:26:13 +080075 ddr4_mr_write(j, info->mr_table[pstate][j], 0, i, dram_type);
Jacky Baid746daa2019-11-25 13:19:37 +080076 }
Jacky Baid148bae2020-04-22 21:26:13 +080077 ddr4_mr_write(6, info->mr_table[pstate][7], 0, i, dram_type);
Jacky Baid746daa2019-11-25 13:19:37 +080078 }
79}
80
81void sw_pstate(uint32_t pstate, uint32_t drate)
82{
83 uint32_t val;
84
85 mmio_write_32(DDRC_SWCTL(0), 0x0);
86
87 /*
88 * Update any registers which may be required to
89 * change for the new frequency.
90 */
91 mmio_write_32(DDRC_MSTR2(0), pstate);
92 mmio_setbits_32(DDRC_MSTR(0), (0x1 << 29));
93
94 /*
95 * Toggle RFSHCTL3.refresh_update_level to allow the
96 * new refresh-related register values to propagate
97 * to the refresh logic.
98 */
99 val = mmio_read_32(DDRC_RFSHCTL3(0));
100 if (val & 0x2) {
101 mmio_write_32(DDRC_RFSHCTL3(0), val & 0xFFFFFFFD);
102 } else {
103 mmio_write_32(DDRC_RFSHCTL3(0), val | 0x2);
104 }
105
106 /*
107 * 19. If required, trigger the initialization in the PHY.
108 * If using the gen2 multiPHY, PLL initialization should
109 * be triggered at this point. See the PHY databook for
110 * details about the frequency change procedure.
111 */
112 mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8));
113 mmio_write_32(DDRC_DFIMISC(0), 0x00000020 | (pstate << 8));
114
115 /* wait DFISTAT.dfi_init_complete to 0 */
116 while (mmio_read_32(DDRC_DFISTAT(0)) & 0x1) {
117 ;
118 }
119
120 /* change the clock to the target frequency */
121 dram_clock_switch(drate, false);
122
123 mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8));
124
125 /* wait DFISTAT.dfi_init_complete to 1 */
126 while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) {
127 ;
128 }
129
130 /*
131 * When changing frequencies the controller may violate the JEDEC
132 * requirement that no more than 16 refreshes should be issued within
133 * 2*tREFI. These extra refreshes are not expected to cause a problem
134 * in the SDRAM. This issue can be avoided by waiting for at least 2*tREFI
135 * before exiting self-refresh in step 19.
136 */
137 udelay(14);
138
139 /* 14. Exit the self-refresh state by setting PWRCTL.selfref_sw = 0. */
140 mmio_clrbits_32(DDRC_PWRCTL(0), (1 << 5));
141
142 while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) == 0x23) {
143 ;
144 }
145}
146
147void ddr4_swffc(struct dram_info *info, unsigned int pstate)
148{
149 uint32_t drate = info->timing_info->fsp_table[pstate];
150
151 /*
152 * 1. set SWCTL.sw_done to disable quasi-dynamic register
153 * programming outside reset.
154 */
155 mmio_write_32(DDRC_SWCTL(0), 0x0);
156
157 /*
158 * 2. Write 0 to PCTRL_n.port_en. This blocks AXI port(s)
159 * from taking any transaction (blocks traffic on AXI ports).
160 */
161 mmio_write_32(DDRC_PCTRL_0(0), 0x0);
162
163 /*
164 * 3. Poll PSTAT.rd_port_busy_n=0 and PSTAT.wr_port_busy_n=0.
165 * Wait until all AXI ports are idle (the uMCTL2 core has to
166 * be idle).
167 */
168 while (mmio_read_32(DDRC_PSTAT(0)) & 0x10001) {
169 ;
170 }
171
172 /*
173 * 4. Write 0 to SBRCTL.scrub_en. Disable SBR, required only if
174 * SBR instantiated.
175 * 5. Poll SBRSTAT.scrub_busy=0.
176 * 6. Set DERATEEN.derate_enable = 0, if DERATEEN.derate_eanble = 1
177 * and the read latency (RL) value needs to change after the frequency
178 * change (LPDDR2/3/4 only).
179 * 7. Set DBG1.dis_hif=1 so that no new commands will be accepted by the uMCTL2.
180 */
181 mmio_setbits_32(DDRC_DBG1(0), (0x1 << 1));
182
183 /*
184 * 8. Poll DBGCAM.dbg_wr_q_empty and DBGCAM.dbg_rd_q_empty to ensure
185 * that write and read data buffers are empty.
186 */
187 while ((mmio_read_32(DDRC_DBGCAM(0)) & 0x06000000) != 0x06000000) {
188 ;
189 }
190
191 /*
192 * 9. For DDR4, update MR6 with the new tDLLK value via the Mode
193 * Register Write signals
194 * 10. Set DFILPCFG0.dfi_lp_en_sr = 0, if DFILPCFG0.dfi_lp_en_sr = 1,
195 * and wait until DFISTAT.dfi_lp_ack
196 * 11. If DFI PHY Master interface is active in uMCTL2, then disable it
197 * 12. Wait until STAT.operating_mode[1:0]!=11 indicating that the
198 * controller is not in self-refresh mode.
199 */
Jacky Bai699ece52020-04-13 11:07:40 +0800200 if ((mmio_read_32(DDRC_STAT(0)) & 0x3) == 0x3) {
201 VERBOSE("DRAM is in Self Refresh\n");
Jacky Baid746daa2019-11-25 13:19:37 +0800202 }
203
204 /*
205 * 13. Assert PWRCTL.selfref_sw for the DWC_ddr_umctl2 core to enter
206 * the self-refresh mode.
207 */
208 mmio_setbits_32(DDRC_PWRCTL(0), (1 << 5));
209
210 /*
211 * 14. Wait until STAT.operating_mode[1:0]==11 indicating that the
212 * controller core is in self-refresh mode.
213 */
214 while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) != 0x23) {
215 ;
216 }
217
218 sw_pstate(pstate, drate);
219 dram_cfg_all_mr(info, pstate);
220
221 /* 23. Enable HIF commands by setting DBG1.dis_hif=0. */
222 mmio_clrbits_32(DDRC_DBG1(0), (0x1 << 1));
223
224 /*
225 * 24. Reset DERATEEN.derate_enable = 1 if DERATEEN.derate_enable
226 * has been set to 0 in step 6.
227 * 25. If DFI PHY Master interface was active before step 11 then
228 * enable it back by programming DFIPHYMSTR.phymstr_en = 1'b1.
229 * 26. Write 1 to PCTRL_n.port_en. AXI port(s) are no longer blocked
230 * from taking transactions (Re-enable traffic on AXI ports)
231 */
232 mmio_write_32(DDRC_PCTRL_0(0), 0x1);
233
234 /*
235 * 27. Write 1 to SBRCTL.scrub_en. Enable SBR if desired, only
236 * required if SBR instantiated.
237 */
238
239 /*
240 * set SWCTL.sw_done to enable quasi-dynamic register programming
241 * outside reset.
242 */
243 mmio_write_32(DDRC_SWCTL(0), 0x1);
244
245 /* wait SWSTAT.sw_done_ack to 1 */
246 while (!(mmio_read_32(DDRC_SWSTAT(0)) & 0x1)) {
247 ;
248 }
249}